]> git.neil.brown.name Git - history.git/commitdiff
Import 2.3.32pre3 2.3.32pre3
authorLinus Torvalds <torvalds@linuxfoundation.org>
Fri, 23 Nov 2007 20:29:11 +0000 (15:29 -0500)
committerLinus Torvalds <torvalds@linuxfoundation.org>
Fri, 23 Nov 2007 20:29:11 +0000 (15:29 -0500)
210 files changed:
Documentation/Configure.help
MAINTAINERS
Makefile
arch/arm/def-configs/brutus
arch/i386/defconfig
arch/i386/kernel/apm.c
arch/i386/kernel/pci-pc.c
arch/i386/kernel/process.c
arch/i386/mm/init.c
arch/ppc/configs/common_defconfig
arch/ppc/configs/gemini_defconfig
arch/ppc/configs/oak_defconfig
arch/ppc/configs/walnut_defconfig
arch/ppc/defconfig
arch/sh/defconfig
arch/sparc64/defconfig
arch/sparc64/kernel/sys_sparc32.c
drivers/ap1000/ap.c
drivers/ap1000/ddv.c
drivers/block/DAC960.c
drivers/block/DAC960.h
drivers/block/acsi.c
drivers/block/amiflop.c
drivers/block/ataflop.c
drivers/block/cpqarray.c
drivers/block/floppy.c
drivers/block/hd.c
drivers/block/ide-disk.c
drivers/block/ide-probe.c
drivers/block/ide.c
drivers/block/ll_rw_blk.c
drivers/block/loop.c
drivers/block/md.c
drivers/block/nbd.c
drivers/block/paride/pcd.c
drivers/block/paride/pd.c
drivers/block/paride/pf.c
drivers/block/ps2esdi.c
drivers/block/rd.c
drivers/block/swim3.c
drivers/block/swim_iop.c
drivers/block/xd.c
drivers/block/xd.h
drivers/block/z2ram.c
drivers/cdrom/aztcd.c
drivers/cdrom/cdu31a.c
drivers/cdrom/cm206.c
drivers/cdrom/gscd.c
drivers/cdrom/mcd.c
drivers/cdrom/mcdx.c
drivers/cdrom/optcd.c
drivers/cdrom/sbpcd.c
drivers/cdrom/sjcd.c
drivers/cdrom/sonycd535.c
drivers/char/Config.in
drivers/char/Makefile
drivers/char/agp/Makefile
drivers/char/agp/agp.h [new file with mode: 0644]
drivers/char/agp/agp_backend.c [deleted file]
drivers/char/agp/agp_backendP.h [deleted file]
drivers/char/agp/agpgart_be.c [new file with mode: 0644]
drivers/char/agp/agpgart_fe.c
drivers/char/drm/drmP.h
drivers/char/drm/fops.c
drivers/char/drm/init.c
drivers/char/synclink.c
drivers/i2o/i2o_block.c
drivers/net/tlan.c
drivers/net/tlan.h
drivers/net/tokenring/ibmtr.c
drivers/net/tokenring/ibmtr.h
drivers/pci/names.c
drivers/scsi/Config.in
drivers/scsi/Makefile
drivers/scsi/advansys.c
drivers/scsi/aha1542.c
drivers/scsi/atp870u.c
drivers/scsi/atp870u.h
drivers/scsi/eata.c
drivers/scsi/eata_dma.c
drivers/scsi/g_NCR5380.c
drivers/scsi/gdth_proc.c
drivers/scsi/hosts.c
drivers/scsi/hosts.h
drivers/scsi/ibmmca.c
drivers/scsi/inia100.c
drivers/scsi/ips.c
drivers/scsi/megaraid.c
drivers/scsi/scsi.c
drivers/scsi/scsi.h
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_debug.h
drivers/scsi/scsi_error.c
drivers/scsi/scsi_ioctl.c
drivers/scsi/scsi_lib.c [new file with mode: 0644]
drivers/scsi/scsi_merge.c [new file with mode: 0644]
drivers/scsi/scsi_obsolete.c
drivers/scsi/scsi_queue.c
drivers/scsi/scsi_syms.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/sg.c
drivers/scsi/sr.c
drivers/scsi/sr.h
drivers/scsi/sr_ioctl.c
drivers/scsi/sr_vendor.c
drivers/scsi/st.c
drivers/scsi/u14-34f.c
drivers/sound/sb_card.c
drivers/video/fbgen.c
fs/Config.in
fs/autofs/symlink.c
fs/buffer.c
fs/coda/symlink.c
fs/ext2/inode.c
fs/ext2/namei.c
fs/ext2/symlink.c
fs/fcntl.c
fs/lockd/svclock.c
fs/lockd/xdr.c
fs/locks.c
fs/minix/namei.c
fs/minix/symlink.c
fs/namei.c
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/symlink.c
fs/nfsd/export.c
fs/nfsd/nfs3proc.c
fs/nfsd/vfs.c
fs/ntfs/fs.c
fs/ntfs/super.c
fs/ntfs/super.h
fs/partitions/Config.in
fs/proc/generic.c
fs/proc/root.c
fs/romfs/inode.c
fs/super.c
fs/sysv/namei.c
fs/sysv/symlink.c
fs/udf/symlink.c
fs/ufs/inode.c
fs/ufs/namei.c
fs/ufs/symlink.c
fs/umsdos/README-WIP.txt
fs/umsdos/check.c
fs/umsdos/dir.c
fs/umsdos/inode.c
fs/umsdos/ioctl.c
fs/umsdos/namei.c
fs/umsdos/rdir.c
include/asm-alpha/fcntl.h
include/asm-alpha/pgalloc.h
include/asm-alpha/posix_types.h
include/asm-alpha/resource.h
include/asm-arm/fcntl.h
include/asm-arm/resource.h
include/asm-i386/bitops.h
include/asm-i386/fcntl.h
include/asm-i386/io.h
include/asm-i386/pgalloc.h
include/asm-i386/processor.h
include/asm-i386/resource.h
include/asm-i386/string-486.h
include/asm-i386/string.h
include/asm-m68k/fcntl.h
include/asm-m68k/resource.h
include/asm-mips/fcntl.h
include/asm-mips/pgtable.h
include/asm-mips/resource.h
include/asm-ppc/fcntl.h
include/asm-ppc/pgalloc.h
include/asm-ppc/resource.h
include/asm-sh/fcntl.h
include/asm-sh/pgtable.h
include/asm-sh/resource.h
include/asm-sparc/resource.h
include/asm-sparc64/fcntl.h
include/asm-sparc64/pgtable.h
include/asm-sparc64/resource.h
include/linux/agp_backend.h
include/linux/agpgart.h
include/linux/blk.h
include/linux/blkdev.h
include/linux/ext2_fs.h
include/linux/fs.h
include/linux/ide.h
include/linux/mm.h
include/linux/module.h
include/linux/nfsd/nfsd.h
include/linux/nfsd/nfsfh.h
include/linux/ntfs_fs_sb.h
include/linux/resource.h
include/linux/sched.h
include/linux/ufs_fs.h
include/linux/umsdos_fs.h
include/linux/umsdos_fs_i.h
include/scsi/scsi.h
kernel/Makefile
kernel/fork.c
kernel/ksyms.c
kernel/module.c
kernel/sched.c
kernel/timer.c [new file with mode: 0644]
mm/bootmem.c
mm/mmap.c
mm/page_alloc.c
net/ipv4/tcp_input.c
net/packet/af_packet.c
scripts/patch-kernel

index 30c8f3901568dc3c26d04260f85f5a4986a965a6..660e5157eb63a8062fc9e670c967098b2a78d30e 100644 (file)
@@ -4081,6 +4081,16 @@ CONFIG_CHR_DEV_SG
   Documentation/scsi.txt. The module will be called sg.o. If unsure,
   say N.
 
+Debug new queueing code for SCSI
+CONFIG_SCSI_DEBUG_QUEUES
+  This option turns on a lot of additional consistency checking for the new
+  queueing code.   This will adversely affect performance, but it is likely
+  that bugs will be caught sooner if this is turned on.   This will typically
+  cause the kernel to panic if an error is detected, but it would have probably
+  crashed if the panic weren't there.   Comments/questions/problems to
+  linux-scsi mailing list please.  See http://www.andante.org/scsi_queue.html
+  for more uptodate information.
+
 Probe all LUNs on each SCSI device
 CONFIG_SCSI_MULTI_LUN
   If you have a SCSI device that supports more than one LUN (Logical
@@ -8307,6 +8317,10 @@ CONFIG_UMSDOS_FS
   MSDOS floppies. You will need a program called umssync in order to
   make use of umsdos; read Documentation/filesystems/umsdos.txt. 
 
+  To get utilities for initializing/checking UMSDOS filesystem, or
+  latest patches and/or information, visit UMSDOS homepage at
+  http://www.voyager.hr/~mnalis/umsdos/ .
+
   This option enlarges your kernel by about 28 KB and it only works if
   you said Y to both "fat fs support" and "msdos fs support" above. If
   you want to compile this as a module ( = code which can be inserted
@@ -8407,14 +8421,6 @@ CONFIG_NFSD
   The module is called nfsd.o. If you want to compile it as a module,
   say M here and read Documentation/modules.txt. If unsure, say N.
 
-Emulate SUN NFS server
-CONFIG_NFSD_SUN
-  If you would like for the server to allow clients to access
-  directories that are mount points on the local filesystem (this is
-  how nfsd behaves on Sun systems), say Y here. 
-  If you use Tru64 clients, say Y.
-  If unsure, say N.
-
 Provide NFSv3 server support (EXPERIMENTAL)
 CONFIG_NFSD_V3
   If you would like to include the NFSv3 server was well as the NFSv2
index d9eac38d8636f3982335a8cd8f6a955c8822f440..a2a2f385a99683ea6fdd0c5bba20703538b70072 100644 (file)
@@ -883,8 +883,10 @@ M: kgb@manjak.knm.org.pl
 S:     Maintained
 
 TLAN NETWORK DRIVER
+P:     Torben Mathiasen
+M:     torben.mathiasen@compaq.com
 L:     tlan@vuser.vu.union.edu
-S:     Orphan
+S:     Maintained
 
 TOKEN-RING NETWORK DRIVER
 P:     Paul Norton
@@ -917,8 +919,9 @@ S:  Maintained
 
 UMSDOS FILESYSTEM
 P:     Matija Nalis
-M:     mnalis@jagor.srce.hr
+M:     Matija Nalis <mnalis-umsdos@voyager.hr>
 L:     linux-kernel@vger.rutgers.edu
+W:     http://www.voyager.hr/~mnalis/umsdos/
 S:     Maintained
 
 UNIFORM CDROM DRIVER
index c6397bb8fa8b3c02db995048234837aa9156f3ec..5c26884b20bbcf647583d6c72299f3806f6ba6e3 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -121,6 +121,10 @@ ifdef CONFIG_DRM
 DRIVERS += drivers/char/drm/drm.o
 endif
 
+ifdef CONFIG_AGP
+DRIVERS += drivers/char/agp/agp.o
+endif
+
 ifdef CONFIG_NUBUS
 DRIVERS := $(DRIVERS) drivers/nubus/nubus.a
 endif
index a700f7d17cdd5484dbd1f21b6a42172c74c3cf38..aade8163981343ee5ef2bb9c8e9eac9bad663bd6 100644 (file)
@@ -203,9 +203,6 @@ CONFIG_EXT2_FS=y
 #
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
 # CONFIG_SGI_PARTITION is not set
 # CONFIG_SUN_PARTITION is not set
 # CONFIG_NLS is not set
index 1779090aa96a599b60b6f6d5e69f2db0503cb613..206cad096e1ceb5c82aa6944c22414fdb1b889c0 100644 (file)
@@ -381,6 +381,9 @@ CONFIG_PSMOUSE=y
 # Ftape, the floppy tape device driver
 #
 # CONFIG_FTAPE is not set
+CONFIG_DRM=y
+CONFIG_DRM_TDFX=y
+# CONFIG_DRM_GAMMA is not set
 
 #
 # PCMCIA character device support
@@ -438,9 +441,6 @@ CONFIG_LOCKD=y
 #
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
 # CONFIG_SGI_PARTITION is not set
 # CONFIG_SUN_PARTITION is not set
 # CONFIG_NLS is not set
index a2453d9f376958b08c75c26dfdff6e704dc5e440..564e6b42b2ca4350732c97b0eded61f55caadc85 100644 (file)
@@ -380,7 +380,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
        __asm__ __volatile__(APM_DO_ZERO_SEGS
                "pushl %%edi\n\t"
                "pushl %%ebp\n\t"
-               "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
+               "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "; cld\n\t"
                "setc %%al\n\t"
                "popl %%ebp\n\t"
                "popl %%edi\n\t"
@@ -413,7 +413,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
                __asm__ __volatile__(APM_DO_ZERO_SEGS
                        "pushl %%edi\n\t"
                        "pushl %%ebp\n\t"
-                       "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
+                       "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry)"; cld\n\t"
                        "setc %%bl\n\t"
                        "popl %%ebp\n\t"
                        "popl %%edi\n\t"
index 2df21faec62606545148dd2c2224956adc085373..9ee1d63557854bbaad90d4c98d2c569214f143b7 100644 (file)
@@ -342,7 +342,7 @@ static unsigned long bios32_service(unsigned long service)
        unsigned long flags;
 
        __save_flags(flags); __cli();
-       __asm__("lcall (%%edi)"
+       __asm__("lcall (%%edi); cld"
                : "=a" (return_code),
                  "=b" (address),
                  "=c" (length),
@@ -383,7 +383,7 @@ static int __init check_pcibios(void)
 
                __save_flags(flags); __cli();
                __asm__(
-                       "lcall (%%edi)\n\t"
+                       "lcall (%%edi); cld\n\t"
                        "jc 1f\n\t"
                        "xor %%ah, %%ah\n"
                        "1:"
@@ -427,7 +427,7 @@ static int __init pci_bios_find_device (unsigned short vendor, unsigned short de
        unsigned short bx;
        unsigned short ret;
 
-       __asm__("lcall (%%edi)\n\t"
+       __asm__("lcall (%%edi); cld\n\t"
                "jc 1f\n\t"
                "xor %%ah, %%ah\n"
                "1:"
@@ -448,7 +448,7 @@ static int pci_bios_read_config_byte(struct pci_dev *dev, int where, u8 *value)
        unsigned long ret;
        unsigned long bx = (dev->bus->number << 8) | dev->devfn;
 
-       __asm__("lcall (%%esi)\n\t"
+       __asm__("lcall (%%esi); cld\n\t"
                "jc 1f\n\t"
                "xor %%ah, %%ah\n"
                "1:"
@@ -466,7 +466,7 @@ static int pci_bios_read_config_word(struct pci_dev *dev, int where, u16 *value)
        unsigned long ret;
        unsigned long bx = (dev->bus->number << 8) | dev->devfn;
 
-       __asm__("lcall (%%esi)\n\t"
+       __asm__("lcall (%%esi); cld\n\t"
                "jc 1f\n\t"
                "xor %%ah, %%ah\n"
                "1:"
@@ -484,7 +484,7 @@ static int pci_bios_read_config_dword(struct pci_dev *dev, int where, u32 *value
        unsigned long ret;
        unsigned long bx = (dev->bus->number << 8) | dev->devfn;
 
-       __asm__("lcall (%%esi)\n\t"
+       __asm__("lcall (%%esi); cld\n\t"
                "jc 1f\n\t"
                "xor %%ah, %%ah\n"
                "1:"
@@ -502,7 +502,7 @@ static int pci_bios_write_config_byte(struct pci_dev *dev, int where, u8 value)
        unsigned long ret;
        unsigned long bx = (dev->bus->number << 8) | dev->devfn;
 
-       __asm__("lcall (%%esi)\n\t"
+       __asm__("lcall (%%esi); cld\n\t"
                "jc 1f\n\t"
                "xor %%ah, %%ah\n"
                "1:"
@@ -520,7 +520,7 @@ static int pci_bios_write_config_word(struct pci_dev *dev, int where, u16 value)
        unsigned long ret;
        unsigned long bx = (dev->bus->number << 8) | dev->devfn;
 
-       __asm__("lcall (%%esi)\n\t"
+       __asm__("lcall (%%esi); cld\n\t"
                "jc 1f\n\t"
                "xor %%ah, %%ah\n"
                "1:"
@@ -538,7 +538,7 @@ static int pci_bios_write_config_dword(struct pci_dev *dev, int where, u32 value
        unsigned long ret;
        unsigned long bx = (dev->bus->number << 8) | dev->devfn;
 
-       __asm__("lcall (%%esi)\n\t"
+       __asm__("lcall (%%esi); cld\n\t"
                "jc 1f\n\t"
                "xor %%ah, %%ah\n"
                "1:"
@@ -702,7 +702,7 @@ static struct irq_routing_table * __init pcibios_get_irq_routing_table(void)
        __asm__("push %%es\n\t"
                "push %%ds\n\t"
                "pop  %%es\n\t"
-               "lcall (%%esi)\n\t"
+               "lcall (%%esi); cld\n\t"
                "pop %%es\n\t"
                "jc 1f\n\t"
                "xor %%ah, %%ah\n"
index 680563959e2c47c1a2809a12fad2143ecc915a1d..4f9c943534e8cc2e00f5df48324bc6494ae9b672 100644 (file)
@@ -462,7 +462,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
        struct pt_regs * childregs;
 
        childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1;
-       *childregs = *regs;
+       struct_cpy(childregs, regs);
        childregs->eax = 0;
        childregs->esp = esp;
 
@@ -475,7 +475,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
        savesegment(gs,p->thread.gs);
 
        unlazy_fpu(current);
-       p->thread.i387 = current->thread.i387;
+       struct_cpy(&p->thread.i387, &current->thread.i387);
 
        return 0;
 }
index b99daee8460ba65f17f48bbeab735ce18cc14b47..7ff12d210739051e0a7199cae2a1d8e2d4bcf1a8 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/e820.h>
 
 unsigned long highstart_pfn, highend_pfn;
+unsigned long *pgd_quicklist = (unsigned long *)0;
 static unsigned long totalram_pages = 0;
 static unsigned long totalhigh_pages = 0;
 
@@ -162,7 +163,10 @@ int do_check_pgt_cache(int low, int high)
        if(pgtable_cache_size > high) {
                do {
                        if(pgd_quicklist)
-                               free_pgd_slow(get_pgd_fast()), freed++;
+                               mmlist_modify_lock(),  \
+                               free_pgd_slow(get_pgd_fast()), \
+                               mmlist_modify_unlock(), \
+                               freed++;
                        if(pmd_quicklist)
                                free_pmd_slow(get_pmd_fast()), freed++;
                        if(pte_quicklist)
index 19404751d5f3a99e1376e364c6e9a1648da4a666..9776262747b0ffb4c11aff9602b6324c47d449c1 100644 (file)
@@ -529,9 +529,6 @@ CONFIG_LOCKD=y
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MAC_PARTITION=y
 CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
 # CONFIG_SGI_PARTITION is not set
 # CONFIG_SUN_PARTITION is not set
 # CONFIG_NLS is not set
index 2b0d3a11c3d9da621d2761f95f05a2864a98a087..b3129c7a54de69cc3f15ae744c847480890dc5d4 100644 (file)
@@ -389,9 +389,6 @@ CONFIG_EXT2_FS=y
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MAC_PARTITION=y
 CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
 # CONFIG_SGI_PARTITION is not set
 # CONFIG_SUN_PARTITION is not set
 # CONFIG_NLS is not set
index eb6d80f54e7aee45cc80f8cfcc58cf7030838fc5..1c2daf125f3daa01bb827285ba2206936bd47398 100644 (file)
@@ -276,9 +276,6 @@ CONFIG_LOCKD=y
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MAC_PARTITION=y
 CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
 # CONFIG_SGI_PARTITION is not set
 # CONFIG_SUN_PARTITION is not set
 # CONFIG_NLS is not set
index 9903c893dda24f5d507b650ce1b5e3a249e2947a..66edd11b61c363178e684d04ca94159254f92d0d 100644 (file)
@@ -276,9 +276,6 @@ CONFIG_LOCKD=y
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MAC_PARTITION=y
 CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
 # CONFIG_SGI_PARTITION is not set
 # CONFIG_SUN_PARTITION is not set
 # CONFIG_NLS is not set
index 671fc7637a7e6cbd6f8d0fa05d519df079fdf108..2a3aac0f73d0597be13cd3cae9456da739929ffe 100644 (file)
@@ -529,9 +529,6 @@ CONFIG_LOCKD=y
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MAC_PARTITION=y
 CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
 # CONFIG_SGI_PARTITION is not set
 # CONFIG_SUN_PARTITION is not set
 # CONFIG_NLS is not set
index 6a65ad4117765980969880d90562ae46a4b5f0df..37440e7c09d53380d9af67da9a48f2a87c1438de 100644 (file)
@@ -82,9 +82,6 @@ CONFIG_EXT2_FS=y
 #
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
 # CONFIG_SGI_PARTITION is not set
 # CONFIG_SUN_PARTITION is not set
 # CONFIG_NLS is not set
index ef84238fd1a7d0296ca20ee9ec9c4fcb9fd6ce08..904027e0748ee8c5723b9c4d72fcd7d4e855d9c6 100644 (file)
@@ -331,9 +331,6 @@ CONFIG_NCP_FS=m
 #
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
 # CONFIG_SGI_PARTITION is not set
 CONFIG_SUN_PARTITION=y
 CONFIG_NLS=y
index a1e0f26dd9a6b115f6ce3ebfd9437e823e3df435..e394ec35b2b2a49c63f14f3098e9ae0b19dc6c9b 100644 (file)
@@ -2988,11 +2988,8 @@ qm_deps(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret)
 
        if (mod->next == NULL)
                return -EINVAL;
-       if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
-               if (put_user(0, ret))
-                       return -EFAULT;
-               else
-                       return 0;
+       if (!MOD_CAN_QUERY(mod))
+               return put_user(0, ret);
 
        space = 0;
        for (i = 0; i < mod->ndeps; ++i) {
@@ -3008,10 +3005,7 @@ qm_deps(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret)
                space += len;
        }
 
-       if (put_user(i, ret))
-               return -EFAULT;
-       else
-               return 0;
+       return put_user(i, ret);
 
 calc_space_needed:
        space += len;
@@ -3032,7 +3026,7 @@ qm_refs(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret)
 
        if (mod->next == NULL)
                return -EINVAL;
-       if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
+       if (!MOD_CAN_QUERY(mod))
                if (put_user(0, ret))
                        return -EFAULT;
                else
@@ -3076,7 +3070,7 @@ qm_symbols(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret
        char *strings;
        unsigned *vals;
 
-       if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
+       if (!MOD_CAN_QUERY(mod))
                if (put_user(0, ret))
                        return -EFAULT;
                else
index 64340bedaedaae77019c225e529d54ee58c55d7e..d7ac5cac419f4ea7837f92340e4daaa80e7ace23 100644 (file)
@@ -53,7 +53,7 @@ static void ap_release(struct inode * inode, struct file * filp)
        MOD_DEC_USE_COUNT;
 }
 
-static void ap_request(void)
+static void ap_request(request_queue_t * q)
 {
   struct cap_request creq;
   unsigned int minor;
@@ -160,7 +160,7 @@ void ap_complete(struct cap_request *creq)
 #endif
   end_request(1);
   request_count--;
-  ap_request();
+  ap_request(NULL);
 }
 
 
@@ -271,7 +271,7 @@ int ap_init(void)
     return -1;
   }
   printk("ap_init: register dev %d\n", MAJOR_NR);
-  blk_dev[MAJOR_NR].request_fn = &ap_request;
+  blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &ap_request);
 
   for (i=0;i<NUM_APDEVS;i++) {
     ap_blocksizes[i] = AP_BLOCK_SIZE;
@@ -307,7 +307,7 @@ void cleanup_module(void)
                invalidate_buffers(MKDEV(MAJOR_NR, i));
 
        unregister_blkdev( MAJOR_NR, "apblock" );
-       blk_dev[MAJOR_NR].request_fn = 0;
+       blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
 }
 
 #endif  /* MODULE */
index f868076d392302c044f0ecf859a07dc42be7bf07..ecfcca621dcd6032c0a140302bf35ca07d3458c9 100644 (file)
@@ -620,7 +620,7 @@ static void ddv_request1(void)
 }
 
 
-static void ddv_request(void)
+static void ddv_request(request_queue_t * q)
 {
        cli();
        ddv_request1();
@@ -932,7 +932,7 @@ int ddv_init(void)
        }
 
        printk("ddv_init: register dev %d\n", MAJOR_NR);
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        read_ahead[MAJOR_NR] = DDV_READ_AHEAD;
        
        bif_add_debug_key('d',ddv_status,"DDV status");
@@ -1016,7 +1016,7 @@ void cleanup_module(void)
        if (*gdp)
                *gdp = (*gdp)->next;
        free_irq(APOPT0_IRQ, NULL);
-       blk_dev[MAJOR_NR].request_fn = 0;
+       blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
 }
 
 #endif  /* MODULE */
index 2da68259ea29290d69a9004444c3d4425be4ad65..6c0120458ea56d53bef2444654595ced79f30946 100644 (file)
@@ -1026,7 +1026,7 @@ static boolean DAC960_ReportDeviceConfiguration(DAC960_Controller_T *Controller)
 
 static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
 {
-  static void (*RequestFunctions[DAC960_MaxControllers])(void) =
+  static void (*RequestFunctions[DAC960_MaxControllers])(request_queue_t *) =
     { DAC960_RequestFunction0, DAC960_RequestFunction1,
       DAC960_RequestFunction2, DAC960_RequestFunction3,
       DAC960_RequestFunction4, DAC960_RequestFunction5,
@@ -1046,8 +1046,8 @@ static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
   /*
     Initialize the I/O Request Function.
   */
-  blk_dev[MajorNumber].request_fn =
-    RequestFunctions[Controller->ControllerNumber];
+  blk_init_queue(BLK_DEFAULT_QUEUE(MajorNumber), 
+                RequestFunctions[Controller->ControllerNumber]);
   /*
     Initialize the Disk Partitions array, Partition Sizes array, Block Sizes
     array, Max Sectors per Request array, and Max Segments per Request array.
@@ -1113,7 +1113,7 @@ static void DAC960_UnregisterBlockDevice(DAC960_Controller_T *Controller)
   /*
     Remove the I/O Request Function.
   */
-  blk_dev[MajorNumber].request_fn = NULL;
+  blk_cleanup_queue(BLK_DEFAULT_QUEUE(MajorNumber));
   /*
     Remove the Disk Partitions array, Partition Sizes array, Block Sizes
     array, Max Sectors per Request array, and Max Segments per Request array.
@@ -1272,7 +1272,7 @@ static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller,
                                     boolean WaitForCommand)
 {
   IO_Request_T **RequestQueuePointer =
-    &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].current_request;
+    &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].request_queue.current_request;
   IO_Request_T *Request;
   DAC960_Command_T *Command;
   char *RequestBuffer;
@@ -1375,7 +1375,7 @@ static inline void DAC960_ProcessRequests(DAC960_Controller_T *Controller)
   DAC960_RequestFunction0 is the I/O Request Function for DAC960 Controller 0.
 */
 
-static void DAC960_RequestFunction0(void)
+static void DAC960_RequestFunction0(request_queue_t * q)
 {
   DAC960_Controller_T *Controller = DAC960_Controllers[0];
   ProcessorFlags_T ProcessorFlags;
@@ -1398,7 +1398,7 @@ static void DAC960_RequestFunction0(void)
   DAC960_RequestFunction1 is the I/O Request Function for DAC960 Controller 1.
 */
 
-static void DAC960_RequestFunction1(void)
+static void DAC960_RequestFunction1(request_queue_t * q)
 {
   DAC960_Controller_T *Controller = DAC960_Controllers[1];
   ProcessorFlags_T ProcessorFlags;
@@ -1421,7 +1421,7 @@ static void DAC960_RequestFunction1(void)
   DAC960_RequestFunction2 is the I/O Request Function for DAC960 Controller 2.
 */
 
-static void DAC960_RequestFunction2(void)
+static void DAC960_RequestFunction2(request_queue_t * q)
 {
   DAC960_Controller_T *Controller = DAC960_Controllers[2];
   ProcessorFlags_T ProcessorFlags;
@@ -1444,7 +1444,7 @@ static void DAC960_RequestFunction2(void)
   DAC960_RequestFunction3 is the I/O Request Function for DAC960 Controller 3.
 */
 
-static void DAC960_RequestFunction3(void)
+static void DAC960_RequestFunction3(request_queue_t * q)
 {
   DAC960_Controller_T *Controller = DAC960_Controllers[3];
   ProcessorFlags_T ProcessorFlags;
@@ -1467,7 +1467,7 @@ static void DAC960_RequestFunction3(void)
   DAC960_RequestFunction4 is the I/O Request Function for DAC960 Controller 4.
 */
 
-static void DAC960_RequestFunction4(void)
+static void DAC960_RequestFunction4(request_queue_t * q)
 {
   DAC960_Controller_T *Controller = DAC960_Controllers[4];
   ProcessorFlags_T ProcessorFlags;
@@ -1490,7 +1490,7 @@ static void DAC960_RequestFunction4(void)
   DAC960_RequestFunction5 is the I/O Request Function for DAC960 Controller 5.
 */
 
-static void DAC960_RequestFunction5(void)
+static void DAC960_RequestFunction5(request_queue_t * q)
 {
   DAC960_Controller_T *Controller = DAC960_Controllers[5];
   ProcessorFlags_T ProcessorFlags;
@@ -1513,7 +1513,7 @@ static void DAC960_RequestFunction5(void)
   DAC960_RequestFunction6 is the I/O Request Function for DAC960 Controller 6.
 */
 
-static void DAC960_RequestFunction6(void)
+static void DAC960_RequestFunction6(request_queue_t * q)
 {
   DAC960_Controller_T *Controller = DAC960_Controllers[6];
   ProcessorFlags_T ProcessorFlags;
@@ -1536,7 +1536,7 @@ static void DAC960_RequestFunction6(void)
   DAC960_RequestFunction7 is the I/O Request Function for DAC960 Controller 7.
 */
 
-static void DAC960_RequestFunction7(void)
+static void DAC960_RequestFunction7(request_queue_t * q)
 {
   DAC960_Controller_T *Controller = DAC960_Controllers[7];
   ProcessorFlags_T ProcessorFlags;
index 1696c507ba6233b12b462fa717b43c6c95691037..e93448faba1eda688bf04059a255ae261b118ced 100644 (file)
@@ -2208,14 +2208,14 @@ DAC960_V3_ReadStatusRegister(void *ControllerBaseAddress)
 
 static void DAC960_FinalizeController(DAC960_Controller_T *);
 static int DAC960_Finalize(NotifierBlock_T *, unsigned long, void *);
-static void DAC960_RequestFunction0(void);
-static void DAC960_RequestFunction1(void);
-static void DAC960_RequestFunction2(void);
-static void DAC960_RequestFunction3(void);
-static void DAC960_RequestFunction4(void);
-static void DAC960_RequestFunction5(void);
-static void DAC960_RequestFunction6(void);
-static void DAC960_RequestFunction7(void);
+static void DAC960_RequestFunction0(request_queue_t *);
+static void DAC960_RequestFunction1(request_queue_t *);
+static void DAC960_RequestFunction2(request_queue_t *);
+static void DAC960_RequestFunction3(request_queue_t *);
+static void DAC960_RequestFunction4(request_queue_t *);
+static void DAC960_RequestFunction5(request_queue_t *);
+static void DAC960_RequestFunction6(request_queue_t *);
+static void DAC960_RequestFunction7(request_queue_t *);
 static void DAC960_InterruptHandler(int, void *, Registers_T *);
 static void DAC960_QueueMonitoringCommand(DAC960_Command_T *);
 static void DAC960_MonitoringTimerFunction(unsigned long);
index 80aa524723ef52a07062ebcf913b3bb3bbc5e7f0..d1631c973cbb09baa2572c1eec0ec5d4a66d085f 100644 (file)
@@ -360,7 +360,7 @@ static void acsi_times_out( unsigned long dummy );
 static void copy_to_acsibuffer( void );
 static void copy_from_acsibuffer( void );
 static void do_end_requests( void );
-static void do_acsi_request( void );
+static void do_acsi_request( request_queue_t * );
 static void redo_acsi_request( void );
 static int acsi_ioctl( struct inode *inode, struct file *file, unsigned int
                        cmd, unsigned long arg );
@@ -938,7 +938,7 @@ static void do_end_requests( void )
  *
  ***********************************************************************/
 
-static void do_acsi_request( void )
+static void do_acsi_request( request_queue_t * q )
 
 {
        stdma_lock( acsi_interrupt, NULL );
@@ -1808,7 +1808,7 @@ int acsi_init( void )
        phys_acsi_buffer = virt_to_phys( acsi_buffer );
        STramMask = ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000;
        
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        read_ahead[MAJOR_NR] = 8;               /* 8 sector (4kB) read-ahead */
        acsi_gendisk.next = gendisk_head;
        gendisk_head = &acsi_gendisk;
@@ -1838,7 +1838,7 @@ void cleanup_module(void)
        struct gendisk ** gdp;
 
        del_timer( &acsi_timer );
-       blk_dev[MAJOR_NR].request_fn = 0;
+       blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
        atari_stram_free( acsi_buffer );
 
        if (unregister_blkdev( MAJOR_NR, "ad" ) != 0)
index a3ee727bde292d2fabea1d60b22145eda867b4a8..1c36e09f27cbf9bf13175ae6cfa831117962d3d0 100644 (file)
@@ -1484,7 +1484,7 @@ static void redo_fd_request(void)
        goto repeat;
 }
 
-static void do_fd_request(void)
+static void do_fd_request(request_queue_t * q)
 {
        redo_fd_request();
 }
@@ -1869,7 +1869,7 @@ int __init amiga_floppy_init(void)
        post_write_timer.data = 0;
        post_write_timer.function = post_write;
   
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        blksize_size[MAJOR_NR] = floppy_blocksizes;
        blk_size[MAJOR_NR] = floppy_sizes;
 
@@ -1911,7 +1911,7 @@ void cleanup_module(void)
        amiga_chip_free(raw_buf);
        blk_size[MAJOR_NR] = NULL;
        blksize_size[MAJOR_NR] = NULL;
-       blk_dev[MAJOR_NR].request_fn = NULL;
+       blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
        unregister_blkdev(MAJOR_NR, "fd");
 }
 #endif
index b8df872053afa41e494191cd5d0267c293fb4d92..b47904cdbc644edb1163ee563669eff8cb0259a2 100644 (file)
@@ -1529,7 +1529,7 @@ repeat:
 }
 
 
-void do_fd_request(void)
+void do_fd_request(request_queue_t * q)
 {
        unsigned long flags;
 
@@ -2051,7 +2051,7 @@ int __init atari_floppy_init (void)
 
        blk_size[MAJOR_NR] = floppy_sizes;
        blksize_size[MAJOR_NR] = floppy_blocksizes;
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
 
        printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
               DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E',
@@ -2103,7 +2103,7 @@ void cleanup_module (void)
 {
        unregister_blkdev(MAJOR_NR, "fd");
 
-       blk_dev[MAJOR_NR].request_fn = 0;
+       blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
        timer_active &= ~(1 << FLOPPY_TIMER);
        timer_table[FLOPPY_TIMER].fn = 0;
        atari_stram_free( DMABuffer );
index 930e4857104c59bbffc948fed5ac93c62c91dbe7..5d5799a89e5f6a9143d2e44cb07a4d1bff493fc2 100644 (file)
@@ -140,14 +140,14 @@ static void do_ida_request(int i);
  */
 #define DO_IDA_REQUEST(x) { do_ida_request(x); }
 
-static void do_ida_request0(void) DO_IDA_REQUEST(0);
-static void do_ida_request1(void) DO_IDA_REQUEST(1);
-static void do_ida_request2(void) DO_IDA_REQUEST(2);
-static void do_ida_request3(void) DO_IDA_REQUEST(3);
-static void do_ida_request4(void) DO_IDA_REQUEST(4);
-static void do_ida_request5(void) DO_IDA_REQUEST(5);
-static void do_ida_request6(void) DO_IDA_REQUEST(6);
-static void do_ida_request7(void) DO_IDA_REQUEST(7);
+static void do_ida_request0(request_queue_t * q) DO_IDA_REQUEST(0);
+static void do_ida_request1(request_queue_t * q) DO_IDA_REQUEST(1);
+static void do_ida_request2(request_queue_t * q) DO_IDA_REQUEST(2);
+static void do_ida_request3(request_queue_t * q) DO_IDA_REQUEST(3);
+static void do_ida_request4(request_queue_t * q) DO_IDA_REQUEST(4);
+static void do_ida_request5(request_queue_t * q) DO_IDA_REQUEST(5);
+static void do_ida_request6(request_queue_t * q) DO_IDA_REQUEST(6);
+static void do_ida_request7(request_queue_t * q) DO_IDA_REQUEST(7);
 
 static void start_io(ctlr_info_t *h);
 
@@ -379,7 +379,7 @@ void cleanup_module(void)
  */
 void __init cpqarray_init(void)
 {
-       void (*request_fns[MAX_CTLR])(void) = {
+       void (*request_fns[MAX_CTLR])(request_queue_t *) = {
                do_ida_request0, do_ida_request1,
                do_ida_request2, do_ida_request3,
                do_ida_request4, do_ida_request5,
@@ -480,7 +480,9 @@ void __init cpqarray_init(void)
                ida_gendisk[i].sizes = ida_sizes + (i*256);
                /* ida_gendisk[i].nr_real is handled by getgeometry */
        
-               blk_dev[MAJOR_NR+i].request_fn = request_fns[i];
+               blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR + i), request_fns[i]);
+               blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR + i), 0);
+
                blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256);
                hardsect_size[MAJOR_NR+i] = ida_hardsizes + (i*256);
                read_ahead[MAJOR_NR+i] = READ_AHEAD;
@@ -894,10 +896,13 @@ static void do_ida_request(int ctlr)
        cmdlist_t *c;
        int seg, sect;
        char *lastdataend;
+       request_queue_t * q;
        struct buffer_head *bh;
        struct request *creq;
 
-       creq = blk_dev[MAJOR_NR+ctlr].current_request;
+       q = &blk_dev[MAJOR_NR+ctlr].request_queue;
+
+       creq = q->current_request;
        if (creq == NULL || creq->rq_status == RQ_INACTIVE)
                goto doreq_done;
 
@@ -974,7 +979,7 @@ DBGPX(              printk("More to do on same request %p\n", creq); );
        } else {
 DBGPX(         printk("Done with %p, queueing %p\n", creq, creq->next); );
                creq->rq_status = RQ_INACTIVE;
-               blk_dev[MAJOR_NR+ctlr].current_request = creq->next;
+               q->current_request = creq->next;
                wake_up(&wait_for_request);
        }
 
index 3f08912a4eda075bbdd8cba16706f8fa2baa5153..24dd21c3a8aaca5350ce0bcb15c1c2b32d3072c0 100644 (file)
@@ -2930,7 +2930,7 @@ static void process_fd_request(void)
        schedule_bh( (void *)(void *) redo_fd_request);
 }
 
-static void do_fd_request(void)
+static void do_fd_request(request_queue_t * q)
 {
        if(usage_count == 0) {
                printk("warning: usage count=0, CURRENT=%p exiting\n", CURRENT);
@@ -4130,7 +4130,7 @@ int __init floppy_init(void)
 
        blk_size[MAJOR_NR] = floppy_sizes;
        blksize_size[MAJOR_NR] = floppy_blocksizes;
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
        config_types();
 
@@ -4159,7 +4159,7 @@ int __init floppy_init(void)
        fdc = 0; /* reset fdc in case of unexpected interrupt */
        if (floppy_grab_irq_and_dma()){
                del_timer(&fd_timeout);
-               blk_dev[MAJOR_NR].request_fn = NULL;
+               blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
                unregister_blkdev(MAJOR_NR,"fd");
                del_timer(&fd_timeout);
                return -EBUSY;
@@ -4225,7 +4225,7 @@ int __init floppy_init(void)
                schedule();
                if (usage_count)
                        floppy_release_irq_and_dma();
-               blk_dev[MAJOR_NR].request_fn = NULL;
+               blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
                unregister_blkdev(MAJOR_NR,"fd");               
        }
        return have_no_fdc;
@@ -4447,7 +4447,7 @@ void cleanup_module(void)
                
        unregister_blkdev(MAJOR_NR, "fd");
 
-       blk_dev[MAJOR_NR].request_fn = 0;
+       blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
        /* eject disk, if any */
        dummy = fd_eject(0);
 }
index 49527026a3f65cc45e09afbf51816b6930847420..8cf37c3de59a47befc760defa9dcb1f897e66ea7 100644 (file)
@@ -585,7 +585,7 @@ repeat:
        panic("unknown hd-command");
 }
 
-static void do_hd_request (void)
+static void do_hd_request (request_queue_t * q)
 {
        disable_irq(HD_IRQ);
        hd_request();
@@ -813,7 +813,7 @@ int __init hd_init(void)
                printk("hd: unable to get major %d for hard disk\n",MAJOR_NR);
                return -1;
        }
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        read_ahead[MAJOR_NR] = 8;               /* 8 sector (4kB) read-ahead */
        hd_gendisk.next = gendisk_head;
        gendisk_head = &hd_gendisk;
index 15b9cd100351dadef2333ae90ee7a9040a61d3db..133d9cd85ca4730ada6e19435a4bfa25cd522216 100644 (file)
@@ -242,7 +242,10 @@ int ide_multwrite (ide_drive_t *drive, unsigned int mcount)
                rq->sector += nsect;
 #endif
                if ((rq->nr_sectors -= nsect) <= 0)
+               {
+                       spin_unlock_irqrestore(&io_request_lock, flags);
                        break;
+               }
                if ((rq->current_nr_sectors -= nsect) == 0) {
                        if ((rq->bh = rq->bh->b_reqnext) != NULL) {
                                rq->current_nr_sectors = rq->bh->b_size>>9;
index 34ab3eb7b56ffa8d400d7dfc3692e9e06debfb23..c4dc22408c170499a989bbdc5e702f380b68bbd3 100644 (file)
@@ -704,7 +704,8 @@ static void init_gendisk (ide_hwif_t *hwif)
 
 static int hwif_init (ide_hwif_t *hwif)
 {
-       void (*rfn)(void);
+       ide_drive_t *drive;
+       void (*rfn)(request_queue_t *);
        
        if (!hwif->present)
                return 0;
@@ -786,11 +787,24 @@ static int hwif_init (ide_hwif_t *hwif)
        
        init_gendisk(hwif);
        blk_dev[hwif->major].data = hwif;
-       blk_dev[hwif->major].request_fn = rfn;
        blk_dev[hwif->major].queue = ide_get_queue;
        read_ahead[hwif->major] = 8;    /* (4kB) */
        hwif->present = 1;      /* success */
 
+       /*
+        * FIXME(eric) - This needs to be tested.  I *think* that this
+        * is correct.   Also, I believe that there is no longer any
+        * reason to have multiple functions (do_ide[0-7]_request)
+        * functions - the queuedata field could be used to indicate
+        * the correct hardware group - either this, or we could add
+        * a new field to request_queue_t to hold this information.
+        */
+       drive = &hwif->drives[0];
+       blk_init_queue(&drive->queue, rfn);
+
+       drive = &hwif->drives[1];
+       blk_init_queue(&drive->queue, rfn);
+
 #if (DEBUG_SPINLOCK > 0)
 {
        static int done = 0;
index cfd9f36f9d1ce1cfa0d04dd4cae2f176f513d826..0222f8a0c3efb7f27b567ed97372c20acd721c09 100644 (file)
@@ -493,8 +493,8 @@ void ide_end_request (byte uptodate, ide_hwgroup_t *hwgroup)
 
        if (!end_that_request_first(rq, uptodate, hwgroup->drive->name)) {
                add_blkdev_randomness(MAJOR(rq->rq_dev));
-               hwgroup->drive->queue = rq->next;
-               blk_dev[MAJOR(rq->rq_dev)].current_request = NULL;
+               hwgroup->drive->queue.current_request = rq->next;
+               blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL;
                hwgroup->rq = NULL;
                end_that_request_last(rq);
        }
@@ -755,8 +755,8 @@ void ide_end_drive_cmd (ide_drive_t *drive, byte stat, byte err)
                }
        }
        spin_lock_irqsave(&io_request_lock, flags);
-       drive->queue = rq->next;
-       blk_dev[MAJOR(rq->rq_dev)].current_request = NULL;
+       drive->queue.current_request = rq->next;
+       blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL;
        HWGROUP(drive)->rq = NULL;
        rq->rq_status = RQ_INACTIVE;
        spin_unlock_irqrestore(&io_request_lock, flags);
@@ -1059,7 +1059,7 @@ static ide_startstop_t start_request (ide_drive_t *drive)
 {
        ide_startstop_t startstop;
        unsigned long block, blockend;
-       struct request *rq = drive->queue;
+       struct request *rq = drive->queue.current_request;
        unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS;
        ide_hwif_t *hwif = HWIF(drive);
 
@@ -1142,13 +1142,13 @@ repeat:
        best = NULL;
        drive = hwgroup->drive;
        do {
-               if (drive->queue && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) {
+               if (drive->queue.current_request && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) {
                        if (!best
                         || (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep)))
                         || (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive))))
                        {
                                struct blk_dev_struct *bdev = &blk_dev[HWIF(drive)->major];
-                               if (bdev->current_request != &bdev->plug)
+                               if( !bdev->request_queue.plugged )
                                        best = drive;
                        }
                }
@@ -1228,8 +1228,8 @@ static void ide_do_request (ide_hwgroup_t *hwgroup)
                        drive = hwgroup->drive;
                        do {
                                bdev = &blk_dev[HWIF(drive)->major];
-                               if (bdev->current_request != &bdev->plug)       /* FIXME: this will do for now */
-                                       bdev->current_request = NULL;           /* (broken since patch-2.1.15) */
+                               if( !bdev->request_queue.plugged )
+                                       bdev->request_queue.current_request = NULL;             /* (broken since patch-2.1.15) */
                                if (drive->sleep && (!sleep || 0 < (signed long)(sleep - drive->sleep)))
                                        sleep = drive->sleep;
                        } while ((drive = drive->next) != hwgroup->drive);
@@ -1267,9 +1267,9 @@ static void ide_do_request (ide_hwgroup_t *hwgroup)
                drive->service_start = jiffies;
 
                bdev = &blk_dev[hwif->major];
-               if (bdev->current_request == &bdev->plug)       /* FIXME: paranoia */
+               if( bdev->request_queue.plugged )       /* FIXME: paranoia */
                        printk("%s: Huh? nuking plugged queue\n", drive->name);
-               bdev->current_request = hwgroup->rq = drive->queue;
+               bdev->request_queue.current_request = hwgroup->rq = drive->queue.current_request;
                spin_unlock(&io_request_lock);
                if (!hwif->serialized)  /* play it safe with buggy hardware */
                        ide__sti();
@@ -1283,76 +1283,76 @@ static void ide_do_request (ide_hwgroup_t *hwgroup)
 /*
  * ide_get_queue() returns the queue which corresponds to a given device.
  */
-struct request **ide_get_queue (kdev_t dev)
+request_queue_t *ide_get_queue (kdev_t dev)
 {
        ide_hwif_t *hwif = (ide_hwif_t *)blk_dev[MAJOR(dev)].data;
 
        return &hwif->drives[DEVICE_NR(dev) & 1].queue;
 }
 
-void do_ide0_request (void)
+void do_ide0_request (request_queue_t *q)
 {
        ide_do_request (ide_hwifs[0].hwgroup);
 }
 
 #if MAX_HWIFS > 1
-void do_ide1_request (void)
+void do_ide1_request (request_queue_t *q)
 {
        ide_do_request (ide_hwifs[1].hwgroup);
 }
 #endif /* MAX_HWIFS > 1 */
 
 #if MAX_HWIFS > 2
-void do_ide2_request (void)
+void do_ide2_request (request_queue_t *q)
 {
        ide_do_request (ide_hwifs[2].hwgroup);
 }
 #endif /* MAX_HWIFS > 2 */
 
 #if MAX_HWIFS > 3
-void do_ide3_request (void)
+void do_ide3_request (request_queue_t *q)
 {
        ide_do_request (ide_hwifs[3].hwgroup);
 }
 #endif /* MAX_HWIFS > 3 */
 
 #if MAX_HWIFS > 4
-void do_ide4_request (void)
+void do_ide4_request (request_queue_t *q)
 {
        ide_do_request (ide_hwifs[4].hwgroup);
 }
 #endif /* MAX_HWIFS > 4 */
 
 #if MAX_HWIFS > 5
-void do_ide5_request (void)
+void do_ide5_request (request_queue_t *q)
 {
        ide_do_request (ide_hwifs[5].hwgroup);
 }
 #endif /* MAX_HWIFS > 5 */
 
 #if MAX_HWIFS > 6
-void do_ide6_request (void)
+void do_ide6_request (request_queue_t *q)
 {
        ide_do_request (ide_hwifs[6].hwgroup);
 }
 #endif /* MAX_HWIFS > 6 */
 
 #if MAX_HWIFS > 7
-void do_ide7_request (void)
+void do_ide7_request (request_queue_t *q)
 {
        ide_do_request (ide_hwifs[7].hwgroup);
 }
 #endif /* MAX_HWIFS > 7 */
 
 #if MAX_HWIFS > 8
-void do_ide8_request (void)
+void do_ide8_request (request_queue_t *q)
 {
        ide_do_request (ide_hwifs[8].hwgroup);
 }
 #endif /* MAX_HWIFS > 8 */
 
 #if MAX_HWIFS > 9
-void do_ide9_request (void)
+void do_ide9_request (request_queue_t *q)
 {
        ide_do_request (ide_hwifs[9].hwgroup);
 }
@@ -1576,10 +1576,12 @@ void ide_intr (int irq, void *dev_id, struct pt_regs *regs)
        hwgroup->handler = NULL;
        del_timer(&hwgroup->timer);
        spin_unlock(&io_request_lock);
+
        if (drive->unmask)
                ide__sti();     /* local CPU only */
        startstop = handler(drive);             /* service this interrupt, may set handler for next interrupt */
        spin_lock_irq(&io_request_lock);
+
        /*
         * Note that handler() may have set things up for another
         * interrupt to occur soon, but it cannot happen until
@@ -1683,10 +1685,10 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
        if (action == ide_wait)
                rq->sem = &sem;
        spin_lock_irqsave(&io_request_lock, flags);
-       cur_rq = drive->queue;
+       cur_rq = drive->queue.current_request;
        if (cur_rq == NULL || action == ide_preempt) {
                rq->next = cur_rq;
-               drive->queue = rq;
+               drive->queue.current_request = rq;
                if (action == ide_preempt)
                        hwgroup->rq = NULL;
        } else {
@@ -1993,7 +1995,7 @@ void ide_unregister (unsigned int index)
        kfree(blksize_size[hwif->major]);
        kfree(max_sectors[hwif->major]);
        kfree(max_readahead[hwif->major]);
-       blk_dev[hwif->major].request_fn = NULL;
+       blk_cleanup_queue(BLK_DEFAULT_QUEUE(hwif->major));
        blk_dev[hwif->major].data = NULL;
        blk_dev[hwif->major].queue = NULL;
        blksize_size[hwif->major] = NULL;
index 1da3702b0b218bd163f7641db42d7dbbf75bdf7e..416586d0b84e59033a97c04fa6423b27132e607a 100644 (file)
@@ -142,14 +142,49 @@ static inline int get_max_segments(kdev_t dev)
  * NOTE: the device-specific queue() functions
  * have to be atomic!
  */
-static inline struct request **get_queue(kdev_t dev)
+static inline request_queue_t *get_queue(kdev_t dev)
 {
        int major = MAJOR(dev);
        struct blk_dev_struct *bdev = blk_dev + major;
 
        if (bdev->queue)
                return bdev->queue(dev);
-       return &blk_dev[major].current_request;
+       return &blk_dev[major].request_queue;
+}
+
+void blk_cleanup_queue(request_queue_t * q)
+{
+       memset(q, 0, sizeof(*q));
+}
+
+void blk_queue_headactive(request_queue_t * q, int active)
+{
+       q->head_active     = active;
+}
+
+void blk_queue_pluggable(request_queue_t * q, int use_plug)
+{
+       q->use_plug        = use_plug;
+}
+
+void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
+{
+       q->request_fn      = rfn;
+       q->current_request = NULL;
+       q->merge_fn        = NULL;
+       q->merge_requests_fn = NULL;
+       q->plug_tq.sync    = 0;
+       q->plug_tq.routine = &unplug_device;
+       q->plug_tq.data    = q;
+       q->plugged         = 0;
+       /*
+        * These booleans describe the queue properties.  We set the
+        * default (and most common) values here.  Other drivers can
+        * use the appropriate functions to alter the queue properties.
+        * as appropriate.
+        */
+       q->use_plug        = 1;
+       q->head_active     = 1;
 }
 
 /*
@@ -157,22 +192,18 @@ static inline struct request **get_queue(kdev_t dev)
  */
 void unplug_device(void * data)
 {
-       struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
-       int queue_new_request=0;
+       request_queue_t * q = (request_queue_t *) data;
        unsigned long flags;
 
        spin_lock_irqsave(&io_request_lock,flags);
-       if (dev->current_request == &dev->plug) {
-               struct request * next = dev->plug.next;
-               dev->current_request = next;
-               if (next || dev->queue) {
-                       dev->plug.next = NULL;
-                       queue_new_request = 1;
+       if( q->plugged )
+       {
+               q->plugged = 0;
+               if( q->current_request != NULL )
+               {
+                       (q->request_fn)(q);
                }
        }
-       if (queue_new_request)
-               (dev->request_fn)();
-
        spin_unlock_irqrestore(&io_request_lock,flags);
 }
 
@@ -184,12 +215,13 @@ void unplug_device(void * data)
  * This is called with interrupts off and no requests on the queue.
  * (and with the request spinlock aquired)
  */
-static inline void plug_device(struct blk_dev_struct * dev)
+static inline void plug_device(request_queue_t * q)
 {
-       if (dev->current_request)
+       if (q->current_request)
                return;
-       dev->current_request = &dev->plug;
-       queue_task(&dev->plug_tq, &tq_disk);
+
+       q->plugged = 1;
+       queue_task(&q->plug_tq, &tq_disk);
 }
 
 /*
@@ -221,6 +253,7 @@ static inline struct request * get_request(int n, kdev_t dev)
        prev_found = req;
        req->rq_status = RQ_ACTIVE;
        req->rq_dev = dev;
+       req->special = NULL;
        return req;
 }
 
@@ -335,12 +368,11 @@ static inline void drive_stat_acct(struct request *req,
  * which is important for drive_stat_acct() above.
  */
 
-void add_request(struct blk_dev_struct * dev, struct request * req)
+static void add_request(request_queue_t * q, struct request * req)
 {
        int major = MAJOR(req->rq_dev);
-       struct request * tmp, **current_request;
+       struct request * tmp;
        unsigned long flags;
-       int queue_new_request = 0;
 
        drive_stat_acct(req, req->nr_sectors, 1);
        req->next = NULL;
@@ -349,12 +381,9 @@ void add_request(struct blk_dev_struct * dev, struct request * req)
         * We use the goto to reduce locking complexity
         */
        spin_lock_irqsave(&io_request_lock,flags);
-       current_request = get_queue(req->rq_dev);
 
-       if (!(tmp = *current_request)) {
-               *current_request = req;
-               if (dev->current_request != &dev->plug)
-                       queue_new_request = 1;
+       if (!(tmp = q->current_request)) {
+               q->current_request = req;
                goto out;
        }
        for ( ; tmp->next ; tmp = tmp->next) {
@@ -372,26 +401,34 @@ void add_request(struct blk_dev_struct * dev, struct request * req)
        req->next = tmp->next;
        tmp->next = req;
 
-/* for SCSI devices, call request_fn unconditionally */
-       if (scsi_blk_major(major))
-               queue_new_request = 1;
-       if (major >= COMPAQ_SMART2_MAJOR+0 &&
-           major <= COMPAQ_SMART2_MAJOR+7)
-               queue_new_request = 1;
+       /*
+        * FIXME(eric) I don't understand why there is a need for this
+        * special case code.  It clearly doesn't fit any more with
+        * the new queueing architecture, and it got added in 2.3.10.  
+        * I am leaving this in here until I hear back from the COMPAQ
+        * people.
+        */
+       if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7)
+       {
+               (q->request_fn)(q);
+       }
+
        if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7)
-               queue_new_request = 1;
+       {
+               (q->request_fn)(q);
+       }
+
 out:
-       if (queue_new_request)
-               (dev->request_fn)();
        spin_unlock_irqrestore(&io_request_lock,flags);
 }
 
 /*
  * Has to be called with the request spinlock aquired
  */
-static inline void attempt_merge (struct request *req,
-                               int max_sectors,
-                               int max_segments)
+static inline void attempt_merge (request_queue_t * q,
+                                 struct request *req, 
+                                 int max_sectors,
+                                 int max_segments)
 {
        struct request *next = req->next;
        int total_segments;
@@ -407,16 +444,37 @@ static inline void attempt_merge (struct request *req,
                total_segments--;
        if (total_segments > max_segments)
                return;
+
+       if( q->merge_requests_fn != NULL )
+       {
+               /*
+                * If we are not allowed to merge these requests, then
+                * return.  If we are allowed to merge, then the count
+                * will have been updated to the appropriate number,
+                * and we shouldn't do it here too.
+                */
+               if( !(q->merge_requests_fn)(q, req, next) )
+               {
+                       return;
+               }
+       }
+       else
+       {
+               req->nr_segments = total_segments;
+       }
+
        req->bhtail->b_reqnext = next->bh;
        req->bhtail = next->bhtail;
        req->nr_sectors += next->nr_sectors;
-       req->nr_segments = total_segments;
        next->rq_status = RQ_INACTIVE;
        req->next = next->next;
        wake_up (&wait_for_request);
 }
 
-void make_request(int major,int rw, struct buffer_head * bh)
+static void __make_request(request_queue_t * q,
+                          int major,
+                          int rw, 
+                          struct buffer_head * bh)
 {
        unsigned int sector, count;
        struct request * req;
@@ -519,13 +577,20 @@ void make_request(int major,int rw, struct buffer_head * bh)
         * not to schedule or do something nonatomic
         */
        spin_lock_irqsave(&io_request_lock,flags);
-       req = *get_queue(bh->b_rdev);
+       req = q->current_request;
        if (!req) {
                /* MD and loop can't handle plugging without deadlocking */
                if (major != MD_MAJOR && major != LOOP_MAJOR && 
-                   major != DDV_MAJOR && major != NBD_MAJOR)
-                       plug_device(blk_dev + major); /* is atomic */
+                   major != DDV_MAJOR && major != NBD_MAJOR
+                   && q->use_plug)
+                       plug_device(q); /* is atomic */
        } else switch (major) {
+            /*
+             * FIXME(eric) - this entire switch statement is going away
+             * soon, and we will instead key off of q->head_active to decide
+             * whether the top request in the queue is active on the device
+             * or not.
+             */
             case IDE0_MAJOR:   /* same as HD_MAJOR */
             case IDE1_MAJOR:
             case FLOPPY_MAJOR:
@@ -548,7 +613,7 @@ void make_request(int major,int rw, struct buffer_head * bh)
                 * All other drivers need to jump over the first entry, as that
                 * entry may be busy being processed and we thus can't change it.
                 */
-               if (req == blk_dev[major].current_request)
+               if (req == q->current_request)
                        req = req->next;
                if (!req)
                        break;
@@ -592,25 +657,71 @@ void make_request(int major,int rw, struct buffer_head * bh)
                                continue;
                        /* Can we add it to the end of this request? */
                        if (req->sector + req->nr_sectors == sector) {
-                               if (req->bhtail->b_data + req->bhtail->b_size
-                                   != bh->b_data) {
-                                       if (req->nr_segments < max_segments)
-                                               req->nr_segments++;
-                                       else continue;
+                               /*
+                                * The merge_fn is a more advanced way
+                                * of accomplishing the same task.  Instead
+                                * of applying a fixed limit of some sort
+                                * we instead define a function which can
+                                * determine whether or not it is safe to
+                                * merge the request or not.
+                                */
+                               if( q->merge_fn == NULL )
+                               {
+                                       if (req->bhtail->b_data + req->bhtail->b_size
+                                           != bh->b_data) {
+                                               if (req->nr_segments < max_segments)
+                                                       req->nr_segments++;
+                                               else continue;
+                                       }
+                               }
+                               else
+                               {
+                                       /*
+                                        * See if this queue has rules that
+                                        * may suggest that we shouldn't merge
+                                        * this 
+                                        */
+                                       if( !(q->merge_fn)(q, req, bh) )
+                                       {
+                                               continue;
+                                       }
                                }
                                req->bhtail->b_reqnext = bh;
                                req->bhtail = bh;
                                req->nr_sectors += count;
                                drive_stat_acct(req, count, 0);
                                /* Can we now merge this req with the next? */
-                               attempt_merge(req, max_sectors, max_segments);
+                               attempt_merge(q, req, max_sectors, max_segments);
                        /* or to the beginning? */
                        } else if (req->sector - count == sector) {
-                               if (bh->b_data + bh->b_size
-                                   != req->bh->b_data) {
-                                       if (req->nr_segments < max_segments)
-                                               req->nr_segments++;
-                                       else continue;
+                               /*
+                                * The merge_fn is a more advanced way
+                                * of accomplishing the same task.  Instead
+                                * of applying a fixed limit of some sort
+                                * we instead define a function which can
+                                * determine whether or not it is safe to
+                                * merge the request or not.
+                                */
+                               if( q->merge_fn == NULL )
+                               {
+                                       if (bh->b_data + bh->b_size
+                                           != req->bh->b_data) {
+                                               if (req->nr_segments < max_segments)
+                                                       req->nr_segments++;
+                                               else continue;
+                                       }
+                               }
+                               else
+                               {
+                                       /*
+                                        * See if this queue has rules that
+                                        * may suggest that we shouldn't merge
+                                        * this 
+                                        */
+                                       if( !(q->merge_fn)(q, req, bh) )
+                                       {
+                                               continue;
+                                       }
                                }
                                bh->b_reqnext = req->bh;
                                req->bh = bh;
@@ -645,20 +756,37 @@ void make_request(int major,int rw, struct buffer_head * bh)
        req->errors = 0;
        req->sector = sector;
        req->nr_sectors = count;
-       req->nr_segments = 1;
        req->current_nr_sectors = count;
+       req->nr_segments = 1; /* Always 1 for a new request. */
        req->buffer = bh->b_data;
        req->sem = NULL;
        req->bh = bh;
        req->bhtail = bh;
        req->next = NULL;
-       add_request(major+blk_dev,req);
+       add_request(q, req);
        return;
 
 end_io:
        bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
 }
 
+void make_request(int major,int rw,  struct buffer_head * bh)
+{
+       request_queue_t * q;
+       unsigned long flags;
+       
+       q = get_queue(bh->b_dev);
+
+       __make_request(q, major, rw, bh);
+
+       spin_lock_irqsave(&io_request_lock,flags);
+       if( !q->plugged )
+               (q->request_fn)(q);
+       spin_unlock_irqrestore(&io_request_lock,flags);
+}
+
+
+
 /* This function can be used to request a number of buffers from a block
    device. Currently the only restriction is that all buffers must belong to
    the same device */
@@ -667,13 +795,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
 {
        unsigned int major;
        int correct_size;
-       struct blk_dev_struct * dev;
+       request_queue_t         * q;
+       unsigned long flags;
        int i;
 
-       dev = NULL;
-       if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
-               dev = blk_dev + major;
-       if (!dev || !dev->request_fn) {
+
+       major = MAJOR(bh[0]->b_dev);
+       if (!(q = get_queue(bh[0]->b_dev))) {
                printk(KERN_ERR
        "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
                kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
@@ -726,8 +854,15 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
                        continue;
                }
 #endif
-               make_request(MAJOR(bh[i]->b_rdev), rw, bh[i]);
+               __make_request(q, MAJOR(bh[i]->b_rdev), rw, bh[i]);
+       }
+
+       spin_lock_irqsave(&io_request_lock,flags);
+       if( !q->plugged )
+       {
+               (q->request_fn)(q);
        }
+       spin_unlock_irqrestore(&io_request_lock,flags);
        return;
 
       sorry:
@@ -801,15 +936,8 @@ int __init blk_dev_init(void)
        struct blk_dev_struct *dev;
 
        for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
-               dev->request_fn      = NULL;
                dev->queue           = NULL;
-               dev->current_request = NULL;
-               dev->plug.rq_status  = RQ_INACTIVE;
-               dev->plug.cmd        = -1;
-               dev->plug.next       = NULL;
-               dev->plug_tq.sync    = 0;
-               dev->plug_tq.routine = &unplug_device;
-               dev->plug_tq.data    = dev;
+               blk_init_queue(&dev->request_queue, NULL);
        }
 
        req = all_requests + NR_REQUEST;
@@ -924,3 +1052,6 @@ int __init blk_dev_init(void)
 EXPORT_SYMBOL(io_request_lock);
 EXPORT_SYMBOL(end_that_request_first);
 EXPORT_SYMBOL(end_that_request_last);
+EXPORT_SYMBOL(blk_init_queue);
+EXPORT_SYMBOL(blk_cleanup_queue);
+EXPORT_SYMBOL(blk_queue_headactive);
index 3459ec1fd01e4bedcca6115058bcfe66fc8a3439..a950172ff2e8e30428105229758cf43a07677899 100644 (file)
@@ -164,7 +164,7 @@ static void figure_loop_size(struct loop_device *lo)
        loop_sizes[lo->lo_number] = size;
 }
 
-static void do_lo_request(void)
+static void do_lo_request(request_queue_t * q)
 {
        int     real_block, block, offset, len, blksize, size;
        char    *dest_addr;
@@ -754,7 +754,7 @@ int __init loop_init(void)
                return -ENOMEM;
        }               
 
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        for (i=0; i < max_loop; i++) {
                memset(&loop_dev[i], 0, sizeof(struct loop_device));
                loop_dev[i].lo_number = i;
index 2a791dede3269b9546ca34a27399ae12c9ece156..b525ef2e973e621b26021c9f4a9b3ece7c04640c 100644 (file)
@@ -761,7 +761,7 @@ int md_make_request (int minor, int rw, struct buffer_head * bh)
        }
 }
 
-static void do_md_request (void)
+static void do_md_request (request_queue_t * q)
 {
   printk ("Got md request, not good...");
   return;
@@ -1274,8 +1274,7 @@ int __init md_init (void)
     return (-1);
   }
 
-  blk_dev[MD_MAJOR].request_fn=DEVICE_REQUEST;
-  blk_dev[MD_MAJOR].current_request=NULL;
+  blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
   read_ahead[MD_MAJOR]=INT_MAX;
   memset(md_dev, 0, MAX_MD_DEV * sizeof (struct md_dev));
   md_gendisk.next=gendisk_head;
index 7c24449ac014eba820294ee47a4c6b9d564d7171..cda45cc0193a3e86c3fe0dc5a10360a6cfc8fcf5 100644 (file)
@@ -290,7 +290,7 @@ void nbd_clear_que(struct nbd_device *lo)
 #undef FAIL
 #define FAIL( s ) { printk( KERN_ERR "NBD, minor %d: " s "\n", dev ); goto error_out; }
 
-static void do_nbd_request(void)
+static void do_nbd_request(request_queue_t * q)
 {
        struct request *req;
        int dev;
@@ -488,7 +488,7 @@ int nbd_init(void)
 #endif
        blksize_size[MAJOR_NR] = nbd_blksizes;
        blk_size[MAJOR_NR] = nbd_sizes;
-       blk_dev[MAJOR_NR].request_fn = do_nbd_request;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_nbd_request);
        for (i = 0; i < MAX_NBD; i++) {
                nbd_dev[i].refcnt = 0;
                nbd_dev[i].file = NULL;
index 96e0e421e82c9b18d5cdc7a45f2987ebbeaa0173..7db6626f46026ce949a653eff69c33bfb922ce53 100644 (file)
@@ -220,7 +220,7 @@ static int pcd_packet(struct cdrom_device_info *cdi,
 static int     pcd_detect(void);
 static void    pcd_probe_capabilities(void);
 static void     do_pcd_read_drq(void);
-static void    do_pcd_request(void);
+static void    do_pcd_request(request_queue_t * q);
 static void    do_pcd_read(void);
 
 static int pcd_blocksizes[PCD_UNITS];
@@ -343,7 +343,7 @@ int pcd_init (void) /* preliminary initialisation */
        for (unit=0;unit<PCD_UNITS;unit++)
                if (PCD.present) register_cdrom(&PCD.info);
 
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
 
        for (i=0;i<PCD_UNITS;i++) pcd_blocksizes[i] = 1024;
@@ -750,7 +750,7 @@ static int pcd_detect( void )
 
 /* I/O request processing */
 
-static void do_pcd_request (void)
+static void do_pcd_request (request_queue_t * q)
 
 {       int unit;
 
@@ -814,7 +814,7 @@ static void pcd_start( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                pcd_busy = 0;
                end_request(0);
-               do_pcd_request();
+               do_pcd_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                return;
        }
@@ -838,7 +838,7 @@ static void do_pcd_read( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                end_request(1);
                pcd_busy = 0;
-               do_pcd_request();
+               do_pcd_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                return;
        }
@@ -862,14 +862,14 @@ static void do_pcd_read_drq( void )
                pcd_busy = 0;
                pcd_bufblk = -1;
                end_request(0);
-               do_pcd_request();
+               do_pcd_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                return;
        }
 
        do_pcd_read();
        spin_lock_irqsave(&io_request_lock,saved_flags);
-       do_pcd_request();
+       do_pcd_request(NULL);
        spin_unlock_irqrestore(&io_request_lock,saved_flags); 
 }
 
index bd28390533dccce094014d92300cb5a4e8216b2c..cf8f1fc2f23def372521264d1465196e2e41c14b 100644 (file)
@@ -265,7 +265,7 @@ void cleanup_module( void );
 #endif
 static void pd_geninit(struct gendisk *ignored);
 static int pd_open(struct inode *inode, struct file *file);
-static void do_pd_request(void);
+static void do_pd_request(request_queue_t * q);
 static int pd_ioctl(struct inode *inode,struct file *file,
                     unsigned int cmd, unsigned long arg);
 static int pd_release (struct inode *inode, struct file *file);
@@ -404,7 +404,7 @@ int pd_init (void)
                         name,major);
                 return -1;
         }
-        blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
         read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
         
        pd_gendisk.major = major;
@@ -894,7 +894,7 @@ static int pd_ready( void )
        return (!(RR(1,6) & STAT_BUSY)) ;
 }
 
-static void do_pd_request (void)
+static void do_pd_request (request_queue_t * q)
 
 {       struct buffer_head * bh;
        struct request * req;
@@ -992,7 +992,7 @@ static void do_pd_read_start( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
-                do_pd_request();
+               do_pd_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                 return;
         }
@@ -1016,7 +1016,7 @@ static void do_pd_read_drq( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
-                do_pd_request();
+               do_pd_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                 return;
             }
@@ -1031,7 +1031,7 @@ static void do_pd_read_drq( void )
        spin_lock_irqsave(&io_request_lock,saved_flags);
         end_request(1);
         pd_busy = 0;
-        do_pd_request();
+       do_pd_request(NULL);
        spin_unlock_irqrestore(&io_request_lock,saved_flags);
 }
 
@@ -1058,7 +1058,7 @@ static void do_pd_write_start( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
-                do_pd_request();
+               do_pd_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                 return;
         }
@@ -1074,7 +1074,7 @@ static void do_pd_write_start( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
-                do_pd_request();
+               do_pd_request(NULL);
                 spin_unlock_irqrestore(&io_request_lock,saved_flags);
                return;
             }
@@ -1103,7 +1103,7 @@ static void do_pd_write_done( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
-                do_pd_request();
+               do_pd_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                 return;
         }
@@ -1111,7 +1111,7 @@ static void do_pd_write_done( void )
        spin_lock_irqsave(&io_request_lock,saved_flags);
         end_request(1);
         pd_busy = 0;
-        do_pd_request();
+       do_pd_request(NULL);
        spin_unlock_irqrestore(&io_request_lock,saved_flags);
 }
 
index 4dba8c8b5d2dfc63d9216735d7b398be9597aaa0..1b935dc5030a6c1122345ccbf3239b7ac6ba2b86 100644 (file)
@@ -246,7 +246,7 @@ int pf_init(void);
 void cleanup_module( void );
 #endif
 static int pf_open(struct inode *inode, struct file *file);
-static void do_pf_request(void);
+static void do_pf_request(request_queue_t * q);
 static int pf_ioctl(struct inode *inode,struct file *file,
                     unsigned int cmd, unsigned long arg);
 
@@ -365,7 +365,7 @@ int pf_init (void)      /* preliminary initialisation */
                         major);
                 return -1;
         }
-        blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
         read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
         
        for (i=0;i<PF_UNITS;i++) pf_blocksizes[i] = 1024;
@@ -863,7 +863,7 @@ static int pf_ready( void )
        return (((RR(1,6)&(STAT_BUSY|pf_mask)) == pf_mask));
 }
 
-static void do_pf_request (void)
+static void do_pf_request (request_queue_t * q)
 
 {       struct buffer_head * bh;
        struct request * req;
@@ -958,7 +958,7 @@ static void do_pf_read_start( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                 end_request(0);
                 pf_busy = 0;
-                do_pf_request();
+               do_pf_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                 return;
         }
@@ -984,7 +984,7 @@ static void do_pf_read_drq( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                 end_request(0);
                 pf_busy = 0;
-                do_pf_request();
+               do_pf_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                 return;
             }
@@ -999,7 +999,7 @@ static void do_pf_read_drq( void )
        spin_lock_irqsave(&io_request_lock,saved_flags); 
         end_request(1);
         pf_busy = 0;
-        do_pf_request();
+       do_pf_request(NULL);
        spin_unlock_irqrestore(&io_request_lock,saved_flags);
 }
 
@@ -1025,7 +1025,7 @@ static void do_pf_write_start( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                 end_request(0);
                 pf_busy = 0;
-                do_pf_request();
+               do_pf_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                 return;
         }
@@ -1042,7 +1042,7 @@ static void do_pf_write_start( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                 end_request(0);
                 pf_busy = 0;
-                do_pf_request();
+               do_pf_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                 return;
             }
@@ -1072,7 +1072,7 @@ static void do_pf_write_done( void )
                spin_lock_irqsave(&io_request_lock,saved_flags);
                 end_request(0);
                 pf_busy = 0;
-                do_pf_request();
+               do_pf_request(NULL);
                spin_unlock_irqrestore(&io_request_lock,saved_flags);
                 return;
         }
@@ -1080,7 +1080,7 @@ static void do_pf_write_done( void )
        spin_lock_irqsave(&io_request_lock,saved_flags);
         end_request(1);
         pf_busy = 0;
-        do_pf_request();
+       do_pf_request(NULL);
        spin_unlock_irqrestore(&io_request_lock,saved_flags);
 }
 
index 221a3297acf2f9471bcfc66bcd11e017255f8e66..a90f3bbd6c2a423b3508a0e8d015b51e4a700392 100644 (file)
@@ -70,7 +70,7 @@ int ps2esdi_init(void);
 
 static void ps2esdi_geninit(struct gendisk *ignored);
 
-static void do_ps2esdi_request(void);
+static void do_ps2esdi_request(request_queue_t * q);
 
 static void ps2esdi_readwrite(int cmd, u_char drive, u_int block, u_int count);
 
@@ -188,7 +188,7 @@ int __init ps2esdi_init(void)
                return -1;
        }
        /* set up some global information - indicating device specific info */
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
 
        /* some minor housekeeping - setup the global gendisk structure */
@@ -464,7 +464,7 @@ static void __init ps2esdi_get_device_cfg(void)
 }
 
 /* strategy routine that handles most of the IO requests */
-static void do_ps2esdi_request(void)
+static void do_ps2esdi_request(request_queue_t * q)
 {
        u_int block, count;
        /* since, this routine is called with interrupts cleared - they 
@@ -487,7 +487,7 @@ static void do_ps2esdi_request(void)
                printk("%s: DMA above 16MB not supported\n", DEVICE_NAME);
                end_request(FAIL);
                if (CURRENT)
-                       do_ps2esdi_request();
+                       do_ps2esdi_request(q);
                return;
        }                       /* check for above 16Mb dmas */
        if ((CURRENT_DEV < ps2esdi_drives) &&
@@ -521,7 +521,7 @@ static void do_ps2esdi_request(void)
                        printk("%s: Unknown command\n", DEVICE_NAME);
                        end_request(FAIL);
                        if (CURRENT)
-                               do_ps2esdi_request();
+                               do_ps2esdi_request(q);
                        break;
                }               /* handle different commands */
        }
@@ -531,7 +531,7 @@ static void do_ps2esdi_request(void)
                       CURRENT->sector, ps2esdi[MINOR(CURRENT->rq_dev)].nr_sects);
                end_request(FAIL);
                if (CURRENT)
-                       do_ps2esdi_request();
+                       do_ps2esdi_request(q);
        }
 
 }                              /* main strategy routine */
@@ -598,11 +598,11 @@ static void ps2esdi_readwrite(int cmd, u_char drive, u_int block, u_int count)
        if (ps2esdi_out_cmd_blk(cmd_blk)) {
                printk("%s: Controller failed\n", DEVICE_NAME);
                if ((++CURRENT->errors) < MAX_RETRIES)
-                       return do_ps2esdi_request();
+                       return do_ps2esdi_request(NULL);
                else {
                        end_request(FAIL);
                        if (CURRENT)
-                               do_ps2esdi_request();
+                               do_ps2esdi_request(NULL);
                }
        }
        /* check for failure to put out the command block */ 
@@ -901,11 +901,11 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code)
                        outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
                        outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
                        if ((++CURRENT->errors) < MAX_RETRIES)
-                               do_ps2esdi_request();
+                               do_ps2esdi_request(NULL);
                        else {
                                end_request(FAIL);
                                if (CURRENT)
-                                       do_ps2esdi_request();
+                                       do_ps2esdi_request(NULL);
                        }
                        break;
                }
@@ -947,11 +947,11 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code)
                outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
                outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
                if ((++CURRENT->errors) < MAX_RETRIES)
-                       do_ps2esdi_request();
+                       do_ps2esdi_request(NULL);
                else {
                        end_request(FAIL);
                        if (CURRENT)
-                               do_ps2esdi_request();
+                               do_ps2esdi_request(NULL);
                }
                break;
 
@@ -961,7 +961,7 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code)
                outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
                end_request(FAIL);
                if (CURRENT)
-                       do_ps2esdi_request();
+                       do_ps2esdi_request(NULL);
                break;
 
        case INT_CMD_FORMAT:
@@ -993,11 +993,11 @@ static void ps2esdi_continue_request(void)
        if (CURRENT->nr_sectors -= CURRENT->current_nr_sectors) {
                CURRENT->buffer += CURRENT->current_nr_sectors * SECT_SIZE;
                CURRENT->sector += CURRENT->current_nr_sectors;
-               do_ps2esdi_request();
+               do_ps2esdi_request(NULL);
        } else {
                end_request(SUCCES);
                if (CURRENT)
-                       do_ps2esdi_request();
+                       do_ps2esdi_request(NULL);
        }
 }
 
index 75bbae97befc20e0269de7279b33bc2ebffd8e56..f83a7616299bbff39d06183c0fdf4f1e683d9645 100644 (file)
@@ -181,7 +181,7 @@ __setup("ramdisk_size=", ramdisk_size2);
  *  allocated size, we must get rid of it...
  *
  */
-static void rd_request(void)
+static void rd_request(request_queue_t * q)
 {
        unsigned int minor;
        unsigned long offset, len;
@@ -350,7 +350,7 @@ static void __exit rd_cleanup (void)
                invalidate_buffers(MKDEV(MAJOR_NR, i));
 
        unregister_blkdev( MAJOR_NR, "ramdisk" );
-       blk_dev[MAJOR_NR].request_fn = 0;
+       blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
 }
 
 /* This is the registration and initialization section of the RAM disk driver */
@@ -371,7 +371,7 @@ int __init rd_init (void)
                return -EIO;
        }
 
-       blk_dev[MAJOR_NR].request_fn = &rd_request;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &rd_request);
 
        for (i = 0; i < NUM_RAMDISKS; i++) {
                /* rd_size is given in kB */
index cee5493da0ec4359c23b539e360ed4c841d29d6a..06cd279af91b7ffe9510851e0e2ac8687ccc3dcd 100644 (file)
@@ -219,7 +219,7 @@ static unsigned short write_postamble[] = {
 static void swim3_select(struct floppy_state *fs, int sel);
 static void swim3_action(struct floppy_state *fs, int action);
 static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(void);
+static void do_fd_request(request_queue_t * q);
 static void start_request(struct floppy_state *fs);
 static void set_timeout(struct floppy_state *fs, int nticks,
                        void (*proc)(unsigned long));
@@ -290,7 +290,7 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
        return (stat & DATA) == 0;
 }
 
-static void do_fd_request(void)
+static void do_fd_request(request_queue_t * q)
 {
        int i;
        for(i=0;i<floppy_count;i++)
@@ -1089,7 +1089,7 @@ int swim3_init(void)
                               MAJOR_NR);
                        return -EBUSY;
                }
-               blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+               blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
                blksize_size[MAJOR_NR] = floppy_blocksizes;
                blk_size[MAJOR_NR] = floppy_sizes;
        }
index 462ef7f26e7bf7eac8545d294662e83276f56b37..f15f590a4255ee4edc5b1d581c27eb96d0b88735 100644 (file)
@@ -121,7 +121,7 @@ static void release_drive(struct floppy_state *fs);
 static void set_timeout(struct floppy_state *fs, int nticks,
                        void (*proc)(unsigned long));
 static void fd_request_timeout(unsigned long);
-static void do_fd_request(void);
+static void do_fd_request(request_queue_t * q);
 static void start_request(struct floppy_state *fs);
 
 static struct file_operations floppy_fops = {
@@ -163,7 +163,7 @@ int swimiop_init(void)
                       MAJOR_NR);
                return -EBUSY;
        }
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        blksize_size[MAJOR_NR] = floppy_blocksizes;
        blk_size[MAJOR_NR] = floppy_sizes;
 
@@ -566,7 +566,7 @@ static void set_timeout(struct floppy_state *fs, int nticks,
        restore_flags(flags);
 }
 
-static void do_fd_request(void)
+static void do_fd_request(request_queue_t * q)
 {
        int i;
 
index 56b4d63cfbf83910faacc129ef9b4e2261d4532f..6d2bde45a4e5d6b554ec69dd8667826e768c6c36 100644 (file)
@@ -185,7 +185,7 @@ int __init xd_init (void)
                printk("xd: Unable to get major number %d\n",MAJOR_NR);
                return -1;
        }
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
        xd_gendisk.next = gendisk_head;
        gendisk_head = &xd_gendisk;
@@ -284,7 +284,7 @@ static int xd_open (struct inode *inode,struct file *file)
 }
 
 /* do_xd_request: handle an incoming request */
-static void do_xd_request (void)
+static void do_xd_request (request_queue_t * q)
 {
        u_int block,count,retry;
        int code;
@@ -1143,7 +1143,7 @@ static void xd_done (void)
        struct gendisk ** gdp;
        
        blksize_size[MAJOR_NR] = NULL;
-       blk_dev[MAJOR_NR].request_fn = NULL;
+       blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
        blk_size[MAJOR_NR] = NULL;
        hardsect_size[MAJOR_NR] = NULL;
        read_ahead[MAJOR_NR] = 0;
index b45fae4e70812acca50d2fd38c9e805eabb981d3..c121e8e9f949d302d59da71b6bebda96eee59ef4 100644 (file)
@@ -112,7 +112,7 @@ static u_char xd_initdrives (void (*init_drive)(u_char drive));
 static void xd_geninit (struct gendisk *);
 
 static int xd_open (struct inode *inode,struct file *file);
-static void do_xd_request (void);
+static void do_xd_request (request_queue_t * q);
 static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg);
 static int xd_release (struct inode *inode,struct file *file);
 static int xd_reread_partitions (kdev_t dev);
index f75150e4a4f54636fca9dcd25063cf3fa4a6619e..cceb82dd5204317d0a0d5c7a440d9e4fcaec7aa8 100644 (file)
@@ -68,7 +68,7 @@ static int list_count       = 0;
 static int current_device   = -1;
 
 static void
-do_z2_request( void )
+do_z2_request( request_queue_t * q )
 {
     u_long start, len, addr, size;
 
@@ -373,7 +373,7 @@ z2_init( void )
            }
     }    
    
-    blk_dev[ MAJOR_NR ].request_fn = DEVICE_REQUEST;
+    blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
     blksize_size[ MAJOR_NR ] = z2_blocksizes;
     blk_size[ MAJOR_NR ] = z2_sizes;
 
index 4c03f4917b121eb4a88370c637590443e4b6e72d..e548170a0da2b82eb31bd14eb4aa35ba74eeb09a 100644 (file)
@@ -355,7 +355,7 @@ void        aztcd_setup(char *str, int *ints);
 static int  check_aztcd_media_change(kdev_t full_dev);
 static int  aztcd_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg);
 static void azt_transfer(void);
-static void do_aztcd_request(void);
+static void do_aztcd_request(request_queue_t *);
 static void azt_invalidate_buffers(void);
 int         aztcd_open(struct inode *ip, struct file *fp);
 
@@ -1478,7 +1478,7 @@ static void azt_transfer(void)
   }
 }
 
-static void do_aztcd_request(void)
+static void do_aztcd_request(request_queue_t * q)
 {
 #ifdef AZT_TEST
   printk(" do_aztcd_request(%ld+%ld) Time:%li\n", CURRENT -> sector, CURRENT -> nr_sectors,jiffies);
@@ -1798,7 +1798,7 @@ int __init aztcd_init(void)
                       MAJOR_NR);
                 return -EIO;
        }
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
 #ifndef AZT_KERNEL_PRIOR_2_1
        blksize_size[MAJOR_NR] = aztcd_blocksizes;
 #endif
index edb90ee76e45c2d82bea509bf50723ea758e0b39..8e4a8536bc3e83403b089162d82934d17919c220 100644 (file)
@@ -1641,7 +1641,7 @@ read_data_block(char          *buffer,
  * data access on a CD is done sequentially, this saves a lot of operations.
  */
 static void
-do_cdu31a_request(void)
+do_cdu31a_request(request_queue_t * q)
 {
    int block;
    int nblock;
@@ -3497,7 +3497,7 @@ cdu31a_init(void)
 
       is_a_cdu31a = strcmp("CD-ROM CDU31A", drive_config.product_id) == 0;
 
-      blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+      blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
       read_ahead[MAJOR_NR] = CDU31A_READAHEAD;
       cdu31a_block_size = 1024; /* 1kB default block size */
       /* use 'mount -o block=2048' */
index 3ab0533f8bc575f6cb95d9c942f5683124d10ce6..edb0ab34ae198864ce5ec53c981383c850602f11 100644 (file)
@@ -209,6 +209,8 @@ static int auto_probe=1;    /* Yes, why not? */
 
 static int cm206_base = CM206_BASE;
 static int cm206_irq = CM206_IRQ; 
+static int cm206[2] = {0,0};   /* for compatible `insmod' parameter passing */
+
 MODULE_PARM(cm206_base, "i");  /* base */
 MODULE_PARM(cm206_irq, "i");   /* irq */
 MODULE_PARM(cm206, "1-2i");    /* base,irq or irq,base */
@@ -801,7 +803,7 @@ int try_adapter(int sector)
 /* This is not a very smart implementation. We could optimize for 
    consecutive block numbers. I'm not convinced this would really
    bring down the processor load. */
-static void do_cm206_request(void)
+static void do_cm206_request(request_queue_t * q)
 {
   long int i, cd_sec_no;
   int quarter, error; 
@@ -1394,7 +1396,7 @@ int __init cm206_init(void)
     cleanup(3);
     return -EIO;
   }    
-  blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+  blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
   blksize_size[MAJOR_NR] = cm206_blocksizes;
   read_ahead[MAJOR_NR] = 16;   /* reads ahead what? */
   init_bh(CM206_BH, cm206_bh);
@@ -1411,7 +1413,6 @@ int __init cm206_init(void)
 
 #ifdef MODULE
 
-static int cm206[2] = {0,0};   /* for compatible `insmod' parameter passing */
 
 void __init parse_options(void)
 {
index 07c36f51b3ca04ca963d3b883112f5190587e107..7913f7f0db900a1485d54fc227352367bd728e8f 100644 (file)
@@ -86,7 +86,8 @@ static void gscd_bin2bcd          (unsigned char *p);
 
 /* Schnittstellen zum Kern/FS */
 
-static void do_gscd_request       (void);
+static void do_gscd_request       (request_queue_t *);
+static void __do_gscd_request     (void);
 static int  gscd_ioctl            (struct inode *, struct file *, unsigned int, unsigned long);
 static int  gscd_open             (struct inode *, struct file *);
 static int  gscd_release          (struct inode *, struct file *);
@@ -260,7 +261,12 @@ long offs;
  * I/O request routine called from Linux kernel.
  */
 
-static void do_gscd_request (void)
+static void do_gscd_request (request_queue_t * q)
+{
+  __do_gscd_request();
+}
+
+static void __do_gscd_request (void)
 {
 unsigned int block,dev;
 unsigned int nsect;
@@ -355,7 +361,7 @@ char   cmd[] = { CMD_READ, 0x80, 0,0,0, 0,1 }; /* cmd mode M-S-F secth sectl */
               end_request(1);
           }
        }
-       SET_TIMER(do_gscd_request, 1);
+       SET_TIMER(__do_gscd_request, 1);
 }
 
 
@@ -1060,7 +1066,7 @@ int result;
                return -EIO;
        }
 
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        blksize_size[MAJOR_NR] = gscd_blocksizes;
        read_ahead[MAJOR_NR] = 4;
         
index 81f9bd7626a386cb02dc09ec9ba6f40198bf53f0..c8adc0200b91d743ab3d3afc063dab069a2a00db 100644 (file)
@@ -648,7 +648,7 @@ mcd_interrupt(int irq, void *dev_id, struct pt_regs * regs)
 
 
 static void
-do_mcd_request(void)
+do_mcd_request(request_queue_t * q)
 {
 #ifdef TEST2
   printk(" do_mcd_request(%ld+%ld)\n", CURRENT -> sector, CURRENT -> nr_sectors);
@@ -1179,7 +1179,7 @@ int __init mcd_init(void)
        }
 
        blksize_size[MAJOR_NR] = mcd_blocksizes;
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        read_ahead[MAJOR_NR] = 4;
 
        /* check for card */
index fa3f363103565a159d82b88859345d1015d457b2..38f838797b8b67573f228fe7df0ed40fdb6ea3d2 100644 (file)
@@ -208,7 +208,7 @@ struct s_drive_stuff {
 
 /* declared in blk.h */
 int mcdx_init(void);
-void do_mcdx_request(void);
+void do_mcdx_request(request_queue_t * q);
 
 /* already declared in init/main */
 void mcdx_setup(char *, int *);
@@ -521,7 +521,7 @@ static int mcdx_audio_ioctl(struct cdrom_device_info * cdi, unsigned int cmd,
        }
 }
 
-void do_mcdx_request()
+void do_mcdx_request(request_queue_t * q)
 {
     int dev;
     struct s_drive_stuff *stuffp;
@@ -1116,7 +1116,7 @@ int __init mcdx_init_drive(int drive)
                return 1;
        }
 
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        read_ahead[MAJOR_NR] = READ_AHEAD;
        blksize_size[MAJOR_NR] = mcdx_blocksizes;
 
index 68a22cbd11606ccd16139af87bab2cc01a318c9d..94ad9f091494c8b7901a44083a4e3ee84ac1cfbf 100644 (file)
@@ -1360,7 +1360,7 @@ static void poll(void)
 }
 
 
-static void do_optcd_request(void)
+static void do_optcd_request(request_queue_t * q)
 {
        DEBUG((DEBUG_REQUEST, "do_optcd_request(%ld+%ld)",
               CURRENT -> sector, CURRENT -> nr_sectors));
@@ -2067,7 +2067,7 @@ int __init optcd_init(void)
 
        hardsect_size[MAJOR_NR] = &hsecsize;
        blksize_size[MAJOR_NR] = &blksize;
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        read_ahead[MAJOR_NR] = 4;
        request_region(optcd_port, 4, "optcd");
 
index 3e704b0d81a22911c887243ae3ae8cc17d21479a..7b0fcc10ceea02564f40447b976f5e8b1b31648b 100644 (file)
@@ -4794,7 +4794,7 @@ static inline void sbpcd_end_request(struct request *req, int uptodate) {
 /*
  *  I/O request routine, called from Linux kernel.
  */
-static void DO_SBPCD_REQUEST(void)
+static void DO_SBPCD_REQUEST(request_queue_t * q)
 {
        u_int block;
        u_int nsect;
@@ -5725,7 +5725,7 @@ int __init SBPCD_INIT(void)
                goto init_done;
 #endif MODULE
        }
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
        read_ahead[MAJOR_NR] = buffers * (CD_FRAMESIZE / 512);
        
        request_region(CDo_command,4,major_name);
index 35697369fec70095dcaa1ca42431dcf58205818a..b5edc20574c394f5eab63ea446ffc87a286b7b69 100644 (file)
@@ -1272,7 +1272,7 @@ static void sjcd_poll( void ){
   SJCD_SET_TIMER( sjcd_poll, 1 );
 }
 
-static void do_sjcd_request( void ){
+static void do_sjcd_request( request_queue_t * q ){
 #if defined( SJCD_TRACE )
   printk( "SJCD: do_sjcd_request(%ld+%ld)\n",
         CURRENT->sector, CURRENT->nr_sectors );
@@ -1475,7 +1475,7 @@ int __init sjcd_init( void ){
     return( -EIO );
   }
   
-  blk_dev[ MAJOR_NR ].request_fn = DEVICE_REQUEST;
+  blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
   read_ahead[ MAJOR_NR ] = 4;
   
   if( check_region( sjcd_base, 4 ) ){
index 79bad95de934d9afe389a808f2306a891daff0c5..947277f3d219080f1be15656bfcc56e141c56765 100644 (file)
@@ -781,7 +781,7 @@ size_to_buf(unsigned int size, Byte *buf)
  * data access on a CD is done sequentially, this saves a lot of operations.
  */
 static void
-do_cdu535_request(void)
+do_cdu535_request(request_queue_t * q)
 {
        unsigned int dev;
        unsigned int read_size;
@@ -1601,7 +1601,7 @@ sony535_init(void)
                                                        MAJOR_NR, CDU535_MESSAGE_NAME);
                                        return -EIO;
                                }
-                               blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+                               blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
                                blksize_size[MAJOR_NR] = &sonycd535_block_size;
                                read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read-ahead */
 
index cd81857786661d560bf89ac63f092338b43a127f..b32ea52fe907427094ee60334899b074e8c4f3a8 100644 (file)
@@ -214,12 +214,10 @@ if [ "$CONFIG_FTAPE" != "n" ]; then
 fi
 endmenu
 
-if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
-   bool 'Direct Rendering Manager (XFree86 DRI support) (EXPERIMENTAL)' CONFIG_DRM
-   dep_tristate '  3dfx Banshee/Voodoo3' CONFIG_DRM_TDFX $CONFIG_DRM
-   if [ "$CONFIG_DRM" = "y" ]; then
-      dep_tristate '  3dlabs GMX 2000' CONFIG_DRM_GAMMA m
-   fi
+bool 'Direct Rendering Manager (XFree86 DRI support) (EXPERIMENTAL)' CONFIG_DRM
+dep_tristate '  3dfx Banshee/Voodoo3' CONFIG_DRM_TDFX $CONFIG_DRM
+if [ "$CONFIG_DRM" = "y" ]; then
+  dep_tristate '  3dlabs GMX 2000' CONFIG_DRM_GAMMA m
 fi
 
 if [ "$CONFIG_PCMCIA" != "n" ]; then
@@ -227,8 +225,8 @@ if [ "$CONFIG_PCMCIA" != "n" ]; then
 fi
 
 if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
-   dep_tristate '/dev/agpgart (AGP Support) (EXPERIMENTAL)' CONFIG_AGP m
-   if [ "$CONFIG_AGP" = "m" ]; then
+   tristate '/dev/agpgart (AGP Support) (EXPERIMENTAL)' CONFIG_AGP
+   if [ "$CONFIG_AGP" != "n" ]; then
      bool '   Intel 440LX/BX/GX support' CONFIG_AGP_INTEL
      bool '   Intel I810/I810 DC100/I810e support' CONFIG_AGP_I810
      bool '   VIA VP3/MVP3/Apollo Pro support' CONFIG_AGP_VIA
index 38c4bc55fd40e9fcb6d75ec479f56ba3fca4682f..899679f79442011f0b884cd0adac41c1881ca8d5 100644 (file)
@@ -636,9 +636,15 @@ else
   endif
 endif
 
-ifeq ($(CONFIG_AGP), m)
+ifeq ($(CONFIG_AGP), y)
+  SUB_DIRS += agp
   ALL_SUB_DIRS += agp
   MOD_SUB_DIRS += agp
+else
+  ifeq ($(CONFIG_AGP), m)
+    ALL_SUB_DIRS += agp
+    MOD_SUB_DIRS += agp
+  endif
 endif
 
 include $(TOPDIR)/Rules.make
index c03310f6e432cf3d1584b5a4b59a820e3813f712..39a0e3672ad820a55c331b7f1014a274d7b42c24 100644 (file)
@@ -3,30 +3,17 @@
 # space ioctl interface to use agp memory.  It also adds a kernel interface
 # that other drivers could use to manipulate agp memory.
 
-M_OBJS         := agpgart.o
+O_TARGET       := agp.o
 
-CFLAGS_agp_backend.o   :=
-
-ifdef CONFIG_AGP_I810
-CFLAGS_agp_backend.o   += -DAGP_BUILD_INTEL_I810
-endif
-ifdef CONFIG_AGP_INTEL
-CFLAGS_agp_backend.o   += -DAGP_BUILD_INTEL_GENERIC
-endif
-ifdef CONFIG_AGP_VIA
-CFLAGS_agp_backend.o   += -DAGP_BUILD_VIA_GENERIC
-endif
-ifdef CONFIG_AGP_AMD
-CFLAGS_agp_backend.o   += -DAGP_BUILD_AMD_IRONGATE
-endif
-ifdef CONFIG_AGP_SIS
-CFLAGS_agp_backend.o   += -DAGP_BUILD_SIS_GENERIC
-endif
-ifdef CONFIG_AGP_ALI
-CFLAGS_agp_backend.o   += -DAGP_BUILD_ALI_M1541
+ifeq ($(CONFIG_AGP),y)
+  O_OBJS += agpgart.o
+else
+  ifeq ($(CONFIG_AGP), m)
+    M_OBJS += agpgart.o
+  endif
 endif
 
 include $(TOPDIR)/Rules.make
 
-agpgart.o: agp_backend.o agpgart_fe.o
-       $(LD) $(LD_RFLAG) -r -o $@ agp_backend.o agpgart_fe.o
+agpgart.o: agpgart_be.o agpgart_fe.o
+       $(LD) $(LD_RFLAG) -r -o $@ agpgart_be.o agpgart_fe.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
new file mode 100644 (file)
index 0000000..55d00d8
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AGP_BACKEND_PRIV_H
+#define _AGP_BACKEND_PRIV_H 1
+
+enum aper_size_type {
+       U8_APER_SIZE,
+       U16_APER_SIZE,
+       U32_APER_SIZE,
+       FIXED_APER_SIZE
+};
+
+typedef struct _gatt_mask {
+       unsigned long mask;
+       u32 type;
+       /* totally device specific, for integrated chipsets that 
+        * might have different types of memory masks.  For other
+        * devices this will probably be ignored */
+} gatt_mask;
+
+typedef struct _aper_size_info_8 {
+       int size;
+       int num_entries;
+       int page_order;
+       u8 size_value;
+} aper_size_info_8;
+
+typedef struct _aper_size_info_16 {
+       int size;
+       int num_entries;
+       int page_order;
+       u16 size_value;
+} aper_size_info_16;
+
+typedef struct _aper_size_info_32 {
+       int size;
+       int num_entries;
+       int page_order;
+       u32 size_value;
+} aper_size_info_32;
+
+typedef struct _aper_size_info_fixed {
+       int size;
+       int num_entries;
+       int page_order;
+} aper_size_info_fixed;
+
+struct agp_bridge_data {
+       agp_version *version;
+       void *aperture_sizes;
+       void *previous_size;
+       void *current_size;
+       void *dev_private_data;
+       struct pci_dev *dev;
+       gatt_mask *masks;
+       unsigned long *gatt_table;
+       unsigned long *gatt_table_real;
+       unsigned long scratch_page;
+       unsigned long gart_bus_addr;
+       unsigned long gatt_bus_addr;
+       u32 mode;
+       enum chipset_type type;
+       enum aper_size_type size_type;
+       u32 *key_list;
+       atomic_t current_memory_agp;
+       atomic_t agp_in_use;
+       int max_memory_agp;     /* in number of pages */
+       int needs_scratch_page;
+       int aperture_size_idx;
+       int num_aperture_sizes;
+       int num_of_masks;
+       int capndx;
+
+       /* Links to driver specific functions */
+
+       int (*fetch_size) (void);
+       int (*configure) (void);
+       void (*agp_enable) (u32);
+       void (*cleanup) (void);
+       void (*tlb_flush) (agp_memory *);
+       unsigned long (*mask_memory) (unsigned long, int);
+       void (*cache_flush) (void);
+       int (*create_gatt_table) (void);
+       int (*free_gatt_table) (void);
+       int (*insert_memory) (agp_memory *, off_t, int);
+       int (*remove_memory) (agp_memory *, off_t, int);
+       agp_memory *(*alloc_by_type) (size_t, int);
+       void (*free_by_type) (agp_memory *);
+
+       /* Links to vendor/device specific setup functions */
+#ifdef CONFIG_AGP_INTEL
+       void (*intel_generic_setup) (void);
+#endif
+#ifdef CONFIG_AGP_I810
+       void (*intel_i810_setup) (struct pci_dev *);
+#endif
+#ifdef CONFIG_AGP_VIA
+       void (*via_generic_setup) (void);
+#endif
+#ifdef CONFIG_AGP_SIS
+       void (*sis_generic_setup) (void);
+#endif
+#ifdef CONFIG_AGP_AMD
+       void (*amd_irongate_setup) (void);
+#endif
+#ifdef CONFIG_AGP_ALI
+       void (*ali_generic_setup) (void);
+#endif
+};
+
+#define OUTREG32(mmap, addr, val)   *(volatile u32 *)(mmap + (addr)) = (val)
+#define OUTREG16(mmap, addr, val)   *(volatile u16 *)(mmap + (addr)) = (val)
+#define OUTREG8 (mmap, addr, val)   *(volatile u8 *) (mmap + (addr)) = (val)
+
+#define INREG32(mmap, addr)         *(volatile u32 *)(mmap + (addr))
+#define INREG16(mmap, addr)         *(volatile u16 *)(mmap + (addr))
+#define INREG8 (mmap, addr)         *(volatile u8 *) (mmap + (addr))
+
+#define CACHE_FLUSH    agp_bridge.cache_flush
+#define A_SIZE_8(x)    ((aper_size_info_8 *) x)
+#define A_SIZE_16(x)   ((aper_size_info_16 *) x)
+#define A_SIZE_32(x)   ((aper_size_info_32 *) x)
+#define A_SIZE_FIX(x)  ((aper_size_info_fixed *) x)
+#define A_IDX8()       (A_SIZE_8(agp_bridge.aperture_sizes) + i)
+#define A_IDX16()      (A_SIZE_16(agp_bridge.aperture_sizes) + i)
+#define A_IDX32()      (A_SIZE_32(agp_bridge.aperture_sizes) + i)
+#define A_IDXFIX()     (A_SIZE_FIX(agp_bridge.aperture_sizes) + i)
+#define MAXKEY         (4096 * 32)
+
+#ifndef min
+#define min(a,b)       (((a)<(b))?(a):(b))
+#endif
+
+#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page)
+
+#ifndef PCI_DEVICE_ID_VIA_82C691_0
+#define PCI_DEVICE_ID_VIA_82C691_0      0x0691
+#endif
+#ifndef PCI_DEVICE_ID_VIA_82C691_1
+#define PCI_DEVICE_ID_VIA_82C691_1      0x8691
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_0
+#define PCI_DEVICE_ID_INTEL_810_0       0x7120
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_DC100_0
+#define PCI_DEVICE_ID_INTEL_810_DC100_0 0x7122
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_E_0
+#define PCI_DEVICE_ID_INTEL_810_E_0     0x7124
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82443GX_0
+#define PCI_DEVICE_ID_INTEL_82443GX_0   0x71a0
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_1
+#define PCI_DEVICE_ID_INTEL_810_1       0x7121
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_DC100_1
+#define PCI_DEVICE_ID_INTEL_810_DC100_1 0x7123
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_E_1
+#define PCI_DEVICE_ID_INTEL_810_E_1     0x7125
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82443GX_1
+#define PCI_DEVICE_ID_INTEL_82443GX_1   0x71a1
+#endif
+#ifndef PCI_DEVICE_ID_AMD_IRONGATE_0
+#define PCI_DEVICE_ID_AMD_IRONGATE_0    0x7006
+#endif
+#ifndef PCI_VENDOR_ID_AL
+#define PCI_VENDOR_ID_AL               0x10b9
+#endif
+#ifndef PCI_DEVICE_ID_AL_M1541_0
+#define PCI_DEVICE_ID_AL_M1541_0       0x1541
+#endif
+
+/* intel register */
+#define INTEL_APBASE    0x10
+#define INTEL_APSIZE    0xb4
+#define INTEL_ATTBASE   0xb8
+#define INTEL_AGPCTRL   0xb0
+#define INTEL_NBXCFG    0x50
+#define INTEL_ERRSTS    0x91
+
+/* intel i810 registers */
+#define I810_GMADDR 0x10
+#define I810_MMADDR 0x14
+#define I810_PTE_BASE          0x10000
+#define I810_PTE_MAIN_UNCACHED 0x00000000
+#define I810_PTE_LOCAL         0x00000002
+#define I810_PTE_VALID         0x00000001
+#define I810_SMRAM_MISCC       0x70
+#define I810_GFX_MEM_WIN_SIZE  0x00010000
+#define I810_GFX_MEM_WIN_32M   0x00010000
+#define I810_GMS               0x000000c0
+#define I810_GMS_DISABLE       0x00000000
+#define I810_PGETBL_CTL        0x2020
+#define I810_PGETBL_ENABLED    0x00000001
+#define I810_DRAM_CTL          0x3000
+#define I810_DRAM_ROW_0        0x00000001
+#define I810_DRAM_ROW_0_SDRAM  0x00000001
+
+/* VIA register */
+#define VIA_APBASE      0x10
+#define VIA_GARTCTRL    0x80
+#define VIA_APSIZE      0x84
+#define VIA_ATTBASE     0x88
+
+/* SiS registers */
+#define SIS_APBASE      0x10
+#define SIS_ATTBASE     0x90
+#define SIS_APSIZE      0x94
+#define SIS_TLBCNTRL    0x97
+#define SIS_TLBFLUSH    0x98
+
+/* AMD registers */
+#define AMD_APBASE      0x10
+#define AMD_MMBASE      0x14
+#define AMD_APSIZE      0xac
+#define AMD_MODECNTL    0xb0
+#define AMD_GARTENABLE  0x02   /* In mmio region (16-bit register) */
+#define AMD_ATTBASE     0x04   /* In mmio region (32-bit register) */
+#define AMD_TLBFLUSH    0x0c   /* In mmio region (32-bit register) */
+#define AMD_CACHEENTRY  0x10   /* In mmio region (32-bit register) */
+
+/* ALi registers */
+#define ALI_APBASE     0x10
+#define ALI_AGPCTRL    0xb8
+#define ALI_ATTBASE    0xbc
+#define ALI_TLBCTRL    0xc0
+
+#endif                         /* _AGP_BACKEND_PRIV_H */
diff --git a/drivers/char/agp/agp_backend.c b/drivers/char/agp/agp_backend.c
deleted file mode 100644 (file)
index 246cbbb..0000000
+++ /dev/null
@@ -1,1987 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight
- * Copyright (C) 1999 Xi Graphics
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#define EXPORT_SYMTAB
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/malloc.h>
-#include <linux/vmalloc.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/pagemap.h>
-#include <linux/miscdevice.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/page.h>
-
-#include <linux/agp_backend.h>
-#include "agp_backendP.h"
-
-static struct agp_bridge_data agp_bridge;
-
-#define CACHE_FLUSH agp_bridge.cache_flush
-
-MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
-MODULE_PARM(agp_try_unsupported, "1i");
-EXPORT_SYMBOL(agp_free_memory);
-EXPORT_SYMBOL(agp_allocate_memory);
-EXPORT_SYMBOL(agp_copy_info);
-EXPORT_SYMBOL(agp_bind_memory);
-EXPORT_SYMBOL(agp_unbind_memory);
-EXPORT_SYMBOL(agp_enable);
-EXPORT_SYMBOL(agp_backend_acquire);
-EXPORT_SYMBOL(agp_backend_release);
-
-static int agp_try_unsupported __initdata = 0;
-
-#ifdef __SMP__
-static atomic_t cpus_waiting;
-#endif
-
-int agp_backend_acquire(void)
-{
-       atomic_inc(&(agp_bridge.agp_in_use));
-
-       if (atomic_read(&(agp_bridge.agp_in_use)) != 1) {
-               atomic_dec(&(agp_bridge.agp_in_use));
-               return -EBUSY;
-       }
-       MOD_INC_USE_COUNT;
-       return 0;
-}
-
-void agp_backend_release(void)
-{
-       atomic_dec(&(agp_bridge.agp_in_use));
-       MOD_DEC_USE_COUNT;
-}
-
-static void flush_cache(void)
-{
-       asm volatile ("wbinvd":::"memory");
-}
-
-#ifdef __SMP__
-static void ipi_handler(void *null)
-{
-       flush_cache();
-       atomic_dec(&cpus_waiting);
-       while (atomic_read(&cpus_waiting) > 0)
-               barrier();
-}
-
-static void smp_flush_cache(void)
-{
-       atomic_set(&cpus_waiting, smp_num_cpus - 1);
-       if (smp_call_function(ipi_handler, NULL, 1, 0) != 0)
-               panic("agpgart: timed out waiting for the other CPUs!\n");
-       flush_cache();
-       while (atomic_read(&cpus_waiting) > 0)
-               barrier();
-}
-#endif
-
-/* 
- * Basic Page Allocation Routines -
- * These routines handle page allocation
- * and by default they reserve the allocated 
- * memory.  They also handle incrementing the
- * current_memory_agp value, Which is checked
- * against a maximum value.
- */
-
-static void *agp_alloc_page(void)
-{
-       void *pt;
-
-       pt = (void *) __get_free_page(GFP_KERNEL);
-       if (pt == NULL) {
-               return NULL;
-       }
-       atomic_inc(&(mem_map[MAP_NR(pt)].count));
-       set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
-       atomic_inc(&(agp_bridge.current_memory_agp));
-       return pt;
-}
-
-static void agp_destroy_page(void *pt)
-{
-       if (pt == NULL)
-               return;
-
-       atomic_dec(&(mem_map[MAP_NR(pt)].count));
-       clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
-       wake_up(&mem_map[MAP_NR(pt)].wait);
-       free_page((unsigned long) pt);
-       atomic_dec(&(agp_bridge.current_memory_agp));
-}
-
-/* End Basic Page Allocation Routines */
-
-/* 
- * Generic routines for handling agp_memory structures -
- * They use the basic page allocation routines to do the
- * brunt of the work.
- */
-
-#define MAXKEY (4096 * 32)
-
-static void agp_free_key(int key)
-{
-
-       if (key < 0) {
-               return;
-       }
-       if (key < MAXKEY) {
-               clear_bit(key, agp_bridge.key_list);
-       }
-}
-
-static int agp_get_key(void)
-{
-       int bit;
-
-       bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);
-       if (bit < MAXKEY) {
-               set_bit(bit, agp_bridge.key_list);
-               return bit;
-       }
-       return -1;
-}
-
-static agp_memory *agp_create_memory(int scratch_pages)
-{
-       agp_memory *new;
-
-       new = kmalloc(sizeof(agp_memory), GFP_KERNEL);
-
-       if (new == NULL) {
-               return NULL;
-       }
-       memset(new, 0, sizeof(agp_memory));
-       new->key = agp_get_key();
-
-       if (new->key < 0) {
-               kfree(new);
-               return NULL;
-       }
-       new->memory = vmalloc(PAGE_SIZE * scratch_pages);
-
-       if (new->memory == NULL) {
-               agp_free_key(new->key);
-               kfree(new);
-               return NULL;
-       }
-       new->num_scratch_pages = scratch_pages;
-       return new;
-}
-
-void agp_free_memory(agp_memory * curr)
-{
-       int i;
-
-       if (curr == NULL) {
-               return;
-       }
-       if (curr->is_bound == TRUE) {
-               agp_unbind_memory(curr);
-       }
-       if (curr->type != 0) {
-               agp_bridge.free_by_type(curr);
-               MOD_DEC_USE_COUNT;
-               return;
-       }
-       if (curr->page_count != 0) {
-               for (i = 0; i < curr->page_count; i++) {
-                       curr->memory[i] &= ~(0x00000fff);
-                       agp_destroy_page((void *) phys_to_virt(curr->memory[i]));
-               }
-       }
-       agp_free_key(curr->key);
-       vfree(curr->memory);
-       kfree(curr);
-       MOD_DEC_USE_COUNT;
-}
-
-#define ENTRIES_PER_PAGE               (PAGE_SIZE / sizeof(unsigned long))
-
-agp_memory *agp_allocate_memory(size_t page_count, u32 type)
-{
-       int scratch_pages;
-       agp_memory *new;
-       int i;
-
-       if ((atomic_read(&(agp_bridge.current_memory_agp)) + page_count) >
-           agp_bridge.max_memory_agp) {
-               return NULL;
-       }
-       if (type != 0) {
-               new = agp_bridge.alloc_by_type(page_count, type);
-               return new;
-       }
-       scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
-
-       new = agp_create_memory(scratch_pages);
-
-       if (new == NULL) {
-               return NULL;
-       }
-       for (i = 0; i < page_count; i++) {
-               new->memory[i] = (unsigned long) agp_alloc_page();
-
-               if ((void *) new->memory[i] == NULL) {
-                       /* Free this structure */
-                       agp_free_memory(new);
-                       return NULL;
-               }
-               new->memory[i] =
-                   agp_bridge.mask_memory(virt_to_phys((void *) new->memory[i]), type);
-               new->page_count++;
-       }
-
-       MOD_INC_USE_COUNT;
-       return new;
-}
-
-/* End - Generic routines for handling agp_memory structures */
-
-static int agp_return_size(void)
-{
-       int current_size;
-       void *temp;
-
-       temp = agp_bridge.current_size;
-
-       switch (agp_bridge.size_type) {
-       case U8_APER_SIZE:
-               current_size = ((aper_size_info_8 *) temp)->size;
-               break;
-       case U16_APER_SIZE:
-               current_size = ((aper_size_info_16 *) temp)->size;
-               break;
-       case U32_APER_SIZE:
-               current_size = ((aper_size_info_32 *) temp)->size;
-               break;
-       case FIXED_APER_SIZE:
-               current_size = ((aper_size_info_fixed *) temp)->size;
-               break;
-       default:
-               current_size = 0;
-               break;
-       }
-
-       return current_size;
-}
-
-/* Routine to copy over information structure */
-
-void agp_copy_info(agp_kern_info * info)
-{
-       memset(info, 0, sizeof(agp_kern_info));
-       info->version.major = agp_bridge.version->major;
-       info->version.minor = agp_bridge.version->minor;
-       info->device = agp_bridge.dev;
-       info->chipset = agp_bridge.type;
-       info->mode = agp_bridge.mode;
-       info->aper_base = agp_bridge.gart_bus_addr;
-       info->aper_size = agp_return_size();
-       info->max_memory = agp_bridge.max_memory_agp;
-       info->current_memory = atomic_read(&agp_bridge.current_memory_agp);
-}
-
-/* End - Routine to copy over information structure */
-
-/*
- * Routines for handling swapping of agp_memory into the GATT -
- * These routines take agp_memory and insert them into the GATT.
- * They call device specific routines to actually write to the GATT.
- */
-
-int agp_bind_memory(agp_memory * curr, off_t pg_start)
-{
-       int ret_val;
-
-       if ((curr == NULL) || (curr->is_bound == TRUE)) {
-               return -EINVAL;
-       }
-       if (curr->is_flushed == FALSE) {
-               CACHE_FLUSH();
-               curr->is_flushed = TRUE;
-       }
-       ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);
-
-       if (ret_val != 0) {
-               return ret_val;
-       }
-       curr->is_bound = TRUE;
-       curr->pg_start = pg_start;
-       return 0;
-}
-
-int agp_unbind_memory(agp_memory * curr)
-{
-       int ret_val;
-
-       if (curr == NULL) {
-               return -EINVAL;
-       }
-       if (curr->is_bound != TRUE) {
-               return -EINVAL;
-       }
-       ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);
-
-       if (ret_val != 0) {
-               return ret_val;
-       }
-       curr->is_bound = FALSE;
-       curr->pg_start = 0;
-       return 0;
-}
-
-/* End - Routines for handling swapping of agp_memory into the GATT */
-
-/* 
- * Driver routines - start
- * Currently this module supports the 
- * i810, 440lx, 440bx, 440gx, via vp3, via mvp3,
- * amd irongate, ALi M1541 and generic support for the
- * SiS chipsets.
- */
-
-/* Generic Agp routines - Start */
-
-static void agp_generic_agp_enable(u32 mode)
-{
-       struct pci_dev *device = NULL;
-       u32 command, scratch, cap_id;
-       u8 cap_ptr;
-
-       pci_read_config_dword(agp_bridge.dev,
-                             agp_bridge.capndx + 4,
-                             &command);
-
-       /*
-        * PASS1: go throu all devices that claim to be
-        *        AGP devices and collect their data.
-        */
-
-       while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, device)) != NULL) {
-               pci_read_config_dword(device, 0x04, &scratch);
-
-               if (!(scratch & 0x00100000))
-                       continue;
-
-               pci_read_config_byte(device, 0x34, &cap_ptr);
-
-               if (cap_ptr != 0x00) {
-                       do {
-                               pci_read_config_dword(device, cap_ptr, &cap_id);
-
-                               if ((cap_id & 0xff) != 0x02)
-                                       cap_ptr = (cap_id >> 8) & 0xff;
-                       }
-                       while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
-               }
-               if (cap_ptr != 0x00) {
-                       /*
-                        * Ok, here we have a AGP device. Disable impossible settings,
-                        * and adjust the readqueue to the minimum.
-                        */
-
-                       pci_read_config_dword(device, cap_ptr + 4, &scratch);
-
-                       /* adjust RQ depth */
-                       command =
-                           ((command & ~0xff000000) |
-                            min((mode & 0xff000000), min((command & 0xff000000), (scratch & 0xff000000))));
-
-                       /* disable SBA if it's not supported */
-                       if (!((command & 0x00000200) && (scratch & 0x00000200) && (mode & 0x00000200)))
-                               command &= ~0x00000200;
-
-                       /* disable FW if it's not supported */
-                       if (!((command & 0x00000010) && (scratch & 0x00000010) && (mode & 0x00000010)))
-                               command &= ~0x00000010;
-
-                       if (!((command & 4) && (scratch & 4) && (mode & 4)))
-                               command &= ~0x00000004;
-
-                       if (!((command & 2) && (scratch & 2) && (mode & 2)))
-                               command &= ~0x00000002;
-
-                       if (!((command & 1) && (scratch & 1) && (mode & 1)))
-                               command &= ~0x00000001;
-               }
-       }
-       /*
-        * PASS2: Figure out the 4X/2X/1X setting and enable the
-        *        target (our motherboard chipset).
-        */
-
-       if (command & 4) {
-               command &= ~3;  /* 4X */
-       }
-       if (command & 2) {
-               command &= ~5;  /* 2X */
-       }
-       if (command & 1) {
-               command &= ~6;  /* 1X */
-       }
-       command |= 0x00000100;
-
-       pci_write_config_dword(agp_bridge.dev,
-                              agp_bridge.capndx + 8,
-                              command);
-
-       /*
-        * PASS3: Go throu all AGP devices and update the
-        *        command registers.
-        */
-
-       while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, device)) != NULL) {
-               pci_read_config_dword(device, 0x04, &scratch);
-
-               if (!(scratch & 0x00100000))
-                       continue;
-
-               pci_read_config_byte(device, 0x34, &cap_ptr);
-
-               if (cap_ptr != 0x00) {
-                       do {
-                               pci_read_config_dword(device, cap_ptr, &cap_id);
-
-                               if ((cap_id & 0xff) != 0x02)
-                                       cap_ptr = (cap_id >> 8) & 0xff;
-                       }
-                       while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
-               }
-               if (cap_ptr != 0x00)
-                       pci_write_config_dword(device, cap_ptr + 8, command);
-       }
-}
-
-static int agp_generic_create_gatt_table(void)
-{
-       char *table;
-       char *table_end;
-       int size;
-       int page_order;
-       int num_entries;
-       int i;
-       void *temp;
-
-       table = NULL;
-       i = agp_bridge.aperture_size_idx;
-       temp = agp_bridge.current_size;
-       size = page_order = num_entries = 0;
-
-       if (agp_bridge.size_type != FIXED_APER_SIZE) {
-               do {
-                       switch (agp_bridge.size_type) {
-                       case U8_APER_SIZE:
-                               size = ((aper_size_info_8 *) temp)->size;
-                               page_order = ((aper_size_info_8 *) temp)->page_order;
-                               num_entries = ((aper_size_info_8 *) temp)->num_entries;
-                               break;
-                       case U16_APER_SIZE:
-                               size = ((aper_size_info_16 *) temp)->size;
-                               page_order = ((aper_size_info_16 *) temp)->page_order;
-                               num_entries = ((aper_size_info_16 *) temp)->num_entries;
-                               break;
-                       case U32_APER_SIZE:
-                               size = ((aper_size_info_32 *) temp)->size;
-                               page_order = ((aper_size_info_32 *) temp)->page_order;
-                               num_entries = ((aper_size_info_32 *) temp)->num_entries;
-                               break;
-                               /* This case will never really happen */
-                       case FIXED_APER_SIZE:
-                       default:
-                               size = page_order = num_entries = 0;
-                               break;
-                       }
-
-                       table = (char *) __get_free_pages(GFP_KERNEL, page_order);
-
-                       if (table == NULL) {
-                               i++;
-
-                               switch (agp_bridge.size_type) {
-                               case U8_APER_SIZE:
-                                       agp_bridge.current_size = (((aper_size_info_8 *) agp_bridge.aperture_sizes) + i);
-                                       break;
-                               case U16_APER_SIZE:
-                                       agp_bridge.current_size = (((aper_size_info_16 *) agp_bridge.aperture_sizes) + i);
-                                       break;
-                               case U32_APER_SIZE:
-                                       agp_bridge.current_size = (((aper_size_info_32 *) agp_bridge.aperture_sizes) + i);
-                                       break;
-                                       /* This case will never really happen */
-                               case FIXED_APER_SIZE:
-                               default:
-                                       size = page_order = num_entries = 0;
-                                       break;
-                               }
-                       } else {
-                               agp_bridge.aperture_size_idx = i;
-                       }
-               } while ((table == NULL) && (i < agp_bridge.num_aperture_sizes));
-       } else {
-               size = ((aper_size_info_fixed *) temp)->size;
-               page_order = ((aper_size_info_fixed *) temp)->page_order;
-               num_entries = ((aper_size_info_fixed *) temp)->num_entries;
-               table = (char *) __get_free_pages(GFP_KERNEL, page_order);
-       }
-
-       if (table == NULL) {
-               return -ENOMEM;
-       }
-       table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
-
-       for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
-               set_bit(PG_reserved, &mem_map[i].flags);
-       }
-
-       agp_bridge.gatt_table_real = (unsigned long *) table;
-       CACHE_FLUSH();
-       agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
-                                       (PAGE_SIZE * (1 << page_order)));
-       CACHE_FLUSH();
-
-       if (agp_bridge.gatt_table == NULL) {
-               for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
-                       clear_bit(PG_reserved, &mem_map[i].flags);
-               }
-
-               free_pages((unsigned long) table, page_order);
-
-               return -ENOMEM;
-       }
-       agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
-
-       for (i = 0; i < num_entries; i++) {
-               agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page;
-       }
-
-       return 0;
-}
-
-static int agp_generic_free_gatt_table(void)
-{
-       int i;
-       int page_order;
-       char *table, *table_end;
-       void *temp;
-
-       temp = agp_bridge.current_size;
-
-       switch (agp_bridge.size_type) {
-       case U8_APER_SIZE:
-               page_order = ((aper_size_info_8 *) temp)->page_order;
-               break;
-       case U16_APER_SIZE:
-               page_order = ((aper_size_info_16 *) temp)->page_order;
-               break;
-       case U32_APER_SIZE:
-               page_order = ((aper_size_info_32 *) temp)->page_order;
-               break;
-       case FIXED_APER_SIZE:
-               page_order = ((aper_size_info_fixed *) temp)->page_order;
-               break;
-       default:
-               page_order = 0;
-               break;
-       }
-
-       /* Do not worry about freeing memory, because if this is
-        * called, then all agp memory is deallocated and removed
-        * from the table.
-        */
-
-       iounmap(agp_bridge.gatt_table);
-       table = (char *) agp_bridge.gatt_table_real;
-       table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
-
-       for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
-               clear_bit(PG_reserved, &mem_map[i].flags);
-       }
-
-       free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
-       return 0;
-}
-
-static int agp_generic_insert_memory(agp_memory * mem,
-                                    off_t pg_start, int type)
-{
-       int i, j, num_entries;
-       void *temp;
-
-       temp = agp_bridge.current_size;
-
-       switch (agp_bridge.size_type) {
-       case U8_APER_SIZE:
-               num_entries = ((aper_size_info_8 *) temp)->num_entries;
-               break;
-       case U16_APER_SIZE:
-               num_entries = ((aper_size_info_16 *) temp)->num_entries;
-               break;
-       case U32_APER_SIZE:
-               num_entries = ((aper_size_info_32 *) temp)->num_entries;
-               break;
-       case FIXED_APER_SIZE:
-               num_entries = ((aper_size_info_fixed *) temp)->num_entries;
-               break;
-       default:
-               num_entries = 0;
-               break;
-       }
-
-       if (type != 0 || mem->type != 0) {
-               /* The generic routines know nothing of memory types */
-               return -EINVAL;
-       }
-       if ((pg_start + mem->page_count) > num_entries) {
-               return -EINVAL;
-       }
-       j = pg_start;
-
-       while (j < (pg_start + mem->page_count)) {
-               if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
-                       return -EBUSY;
-               }
-               j++;
-       }
-
-       if (mem->is_flushed == FALSE) {
-               CACHE_FLUSH();
-               mem->is_flushed = TRUE;
-       }
-       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-               agp_bridge.gatt_table[j] = mem->memory[i];
-       }
-
-       agp_bridge.tlb_flush(mem);
-       return 0;
-}
-
-static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start,
-                                    int type)
-{
-       int i;
-
-       if (type != 0 || mem->type != 0) {
-               /* The generic routines know nothing of memory types */
-               return -EINVAL;
-       }
-       for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-               agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page;
-       }
-
-       agp_bridge.tlb_flush(mem);
-       return 0;
-}
-
-static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
-{
-       return NULL;
-}
-
-static void agp_generic_free_by_type(agp_memory * curr)
-{
-       if (curr->memory != NULL) {
-               vfree(curr->memory);
-       }
-       agp_free_key(curr->key);
-       kfree(curr);
-}
-
-void agp_enable(u32 mode)
-{
-       agp_bridge.agp_enable(mode);
-}
-
-/* End - Generic Agp routines */
-
-#ifdef AGP_BUILD_INTEL_I810
-
-static aper_size_info_fixed intel_i810_sizes[] =
-{
-       {64, 16384, 4},
-     /* The 32M mode still requires a 64k gatt */
-       {32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY 1
-
-static gatt_mask intel_i810_masks[] =
-{
-       {I810_PTE_VALID, 0},
-       {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY}
-};
-
-static struct _intel_i810_private {
-       struct pci_dev *i810_dev;       /* device one */
-       volatile unsigned char *registers;
-       int num_dcache_entries;
-} intel_i810_private;
-
-static int intel_i810_fetch_size(void)
-{
-       u32 smram_miscc;
-       aper_size_info_fixed *values;
-
-       pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc);
-       values = (aper_size_info_fixed *) agp_bridge.aperture_sizes;
-
-       if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
-               printk("agpgart: i810 is disabled\n");
-               return 0;
-       }
-       if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
-               agp_bridge.previous_size =
-                   agp_bridge.current_size = (void *) (values + 1);
-               agp_bridge.aperture_size_idx = 1;
-               return values[1].size;
-       } else {
-               agp_bridge.previous_size =
-                   agp_bridge.current_size = (void *) (values);
-               agp_bridge.aperture_size_idx = 0;
-               return values[0].size;
-       }
-
-       return 0;
-}
-
-static int intel_i810_configure(void)
-{
-       aper_size_info_fixed *current_size;
-       u32 temp;
-       int i;
-
-       current_size = (aper_size_info_fixed *) agp_bridge.current_size;
-
-       pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
-       temp &= 0xfff80000;
-
-       intel_i810_private.registers =
-           (volatile unsigned char *) ioremap(temp, 128 * 4096);
-
-       if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL)
-            & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
-               /* This will need to be dynamically assigned */
-               printk("agpgart: detected 4MB dedicated video ram.\n");
-               intel_i810_private.num_dcache_entries = 1024;
-       }
-       pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
-       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-       OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
-                agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
-       CACHE_FLUSH();
-
-       if (agp_bridge.needs_scratch_page == TRUE) {
-               for (i = 0; i < current_size->num_entries; i++) {
-                       OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4),
-                                agp_bridge.scratch_page);
-               }
-       }
-       return 0;
-}
-
-static void intel_i810_cleanup(void)
-{
-       OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
-       iounmap((void *) intel_i810_private.registers);
-}
-
-static void intel_i810_tlbflush(agp_memory * mem)
-{
-       return;
-}
-
-static void intel_i810_agp_enable(u32 mode)
-{
-       return;
-}
-
-static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
-                                    int type)
-{
-       int i, j, num_entries;
-       void *temp;
-
-       temp = agp_bridge.current_size;
-       num_entries = ((aper_size_info_fixed *) temp)->num_entries;
-
-       if ((pg_start + mem->page_count) > num_entries) {
-               return -EINVAL;
-       }
-       for (j = pg_start; j < (pg_start + mem->page_count); j++) {
-               if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
-                       return -EBUSY;
-               }
-       }
-
-       if (type != 0 || mem->type != 0) {
-               if ((type == AGP_DCACHE_MEMORY) &&
-                   (mem->type == AGP_DCACHE_MEMORY)) {
-                       /* special insert */
-
-                       for (i = pg_start; i < (pg_start + mem->page_count); i++) {
-                               OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4),
-                                        (i * 4096) | I810_PTE_LOCAL | I810_PTE_VALID);
-                       }
-
-                       agp_bridge.tlb_flush(mem);
-                       return 0;
-               }
-               return -EINVAL;
-       }
-       if (mem->is_flushed == FALSE) {
-               CACHE_FLUSH();
-               mem->is_flushed = TRUE;
-       }
-       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-               OUTREG32(intel_i810_private.registers,
-                        I810_PTE_BASE + (j * 4), mem->memory[i]);
-       }
-
-       agp_bridge.tlb_flush(mem);
-       return 0;
-}
-
-static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
-                                    int type)
-{
-       int i;
-
-       for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-               OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4),
-                        agp_bridge.scratch_page);
-       }
-
-       agp_bridge.tlb_flush(mem);
-       return 0;
-}
-
-static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
-       agp_memory *new;
-
-       if (type == AGP_DCACHE_MEMORY) {
-               if (pg_count != intel_i810_private.num_dcache_entries) {
-                       return NULL;
-               }
-               new = agp_create_memory(1);
-
-               if (new == NULL) {
-                       return NULL;
-               }
-               new->type = AGP_DCACHE_MEMORY;
-               new->page_count = pg_count;
-               new->num_scratch_pages = 0;
-               vfree(new->memory);
-               return new;
-       }
-       return NULL;
-}
-
-static void intel_i810_free_by_type(agp_memory * curr)
-{
-       agp_free_key(curr->key);
-       kfree(curr);
-}
-
-static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
-{
-       /* Type checking must be done elsewhere */
-       return addr | agp_bridge.masks[type].mask;
-}
-
-static void intel_i810_setup(struct pci_dev *i810_dev)
-{
-       intel_i810_private.i810_dev = i810_dev;
-
-       agp_bridge.masks = intel_i810_masks;
-       agp_bridge.num_of_masks = 2;
-       agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
-       agp_bridge.size_type = FIXED_APER_SIZE;
-       agp_bridge.num_aperture_sizes = 2;
-       agp_bridge.dev_private_data = (void *) &intel_i810_private;
-       agp_bridge.needs_scratch_page = TRUE;
-       agp_bridge.configure = intel_i810_configure;
-       agp_bridge.fetch_size = intel_i810_fetch_size;
-       agp_bridge.cleanup = intel_i810_cleanup;
-       agp_bridge.tlb_flush = intel_i810_tlbflush;
-       agp_bridge.mask_memory = intel_i810_mask_memory;
-       agp_bridge.agp_enable = intel_i810_agp_enable;
-#ifdef __SMP__
-       agp_bridge.cache_flush = smp_flush_cache;
-#else
-       agp_bridge.cache_flush = flush_cache;
-#endif
-       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-       agp_bridge.insert_memory = intel_i810_insert_entries;
-       agp_bridge.remove_memory = intel_i810_remove_entries;
-       agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
-       agp_bridge.free_by_type = intel_i810_free_by_type;
-}
-
-#endif
-
-#ifdef AGP_BUILD_INTEL_GENERIC
-
-static int intel_fetch_size(void)
-{
-       int i;
-       u16 temp;
-       aper_size_info_16 *values;
-
-       pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
-       (void *) values = agp_bridge.aperture_sizes;
-
-       for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-               if (temp == values[i].size_value) {
-                       agp_bridge.previous_size =
-                           agp_bridge.current_size = (void *) (values + i);
-                       agp_bridge.aperture_size_idx = i;
-                       return values[i].size;
-               }
-       }
-
-       return 0;
-}
-
-static void intel_tlbflush(agp_memory * mem)
-{
-       pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
-       pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
-}
-
-static void intel_cleanup(void)
-{
-       u16 temp;
-       aper_size_info_16 *previous_size;
-
-       previous_size = (aper_size_info_16 *) agp_bridge.previous_size;
-       pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
-       pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
-       pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, previous_size->size_value);
-}
-
-static int intel_configure(void)
-{
-       u32 temp;
-       u16 temp2;
-       aper_size_info_16 *current_size;
-
-       current_size = (aper_size_info_16 *) agp_bridge.current_size;
-
-       /* aperture size */
-       pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, current_size->size_value);
-
-       /* address to map to */
-       pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-       /* attbase - aperture base */
-       pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr);
-
-       /* agpctrl */
-       pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
-
-       /* paccfg/nbxcfg */
-       pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
-       pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, (temp2 & ~(1 << 10)) | (1 << 9));
-       /* clear any possible error conditions */
-       pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
-       return 0;
-}
-
-static unsigned long intel_mask_memory(unsigned long addr, int type)
-{
-       /* Memory type is ignored */
-
-       return addr | agp_bridge.masks[0].mask;
-}
-
-
-/* Setup function */
-static gatt_mask intel_generic_masks[] =
-{
-       {0x00000017, 0}
-};
-
-static aper_size_info_16 intel_generic_sizes[7] =
-{
-       {256, 65536, 6, 0},
-       {128, 32768, 5, 32},
-       {64, 16384, 4, 48},
-       {32, 8192, 3, 56},
-       {16, 4096, 2, 60},
-       {8, 2048, 1, 62},
-       {4, 1024, 0, 63}
-};
-
-static void intel_generic_setup(void)
-{
-       agp_bridge.masks = intel_generic_masks;
-       agp_bridge.num_of_masks = 1;
-       agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
-       agp_bridge.size_type = U16_APER_SIZE;
-       agp_bridge.num_aperture_sizes = 7;
-       agp_bridge.dev_private_data = NULL;
-       agp_bridge.needs_scratch_page = FALSE;
-       agp_bridge.configure = intel_configure;
-       agp_bridge.fetch_size = intel_fetch_size;
-       agp_bridge.cleanup = intel_cleanup;
-       agp_bridge.tlb_flush = intel_tlbflush;
-       agp_bridge.mask_memory = intel_mask_memory;
-       agp_bridge.agp_enable = agp_generic_agp_enable;
-#ifdef __SMP__
-       agp_bridge.cache_flush = smp_flush_cache;
-#else
-       agp_bridge.cache_flush = flush_cache;
-#endif
-       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-       agp_bridge.insert_memory = agp_generic_insert_memory;
-       agp_bridge.remove_memory = agp_generic_remove_memory;
-       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-       agp_bridge.free_by_type = agp_generic_free_by_type;
-}
-
-#endif
-
-#ifdef AGP_BUILD_VIA_GENERIC
-
-static int via_fetch_size(void)
-{
-       int i;
-       u8 temp;
-       aper_size_info_8 *values;
-
-       (void *) values = agp_bridge.aperture_sizes;
-       pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp);
-       for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-               if (temp == values[i].size_value) {
-                       agp_bridge.previous_size =
-                           agp_bridge.current_size = (void *) (values + i);
-                       agp_bridge.aperture_size_idx = i;
-                       return values[i].size;
-               }
-       }
-
-       return 0;
-}
-
-static int via_configure(void)
-{
-       u32 temp;
-       aper_size_info_8 *current_size;
-
-       current_size = (aper_size_info_8 *) agp_bridge.current_size;
-       /* aperture size */
-       pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, current_size->size_value);
-       /* address to map too */
-       pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp);
-       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-       /* GART control register */
-       pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
-
-       /* attbase - aperture GATT base */
-       pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE,
-                           (agp_bridge.gatt_bus_addr & 0xfffff000) | 3);
-       return 0;
-}
-
-static void via_cleanup(void)
-{
-       aper_size_info_8 *previous_size;
-
-       previous_size = (aper_size_info_8 *) agp_bridge.previous_size;
-       pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0);
-       pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, previous_size->size_value);
-}
-
-static void via_tlbflush(agp_memory * mem)
-{
-       pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f);
-       pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
-}
-
-static unsigned long via_mask_memory(unsigned long addr, int type)
-{
-       /* Memory type is ignored */
-
-       return addr | agp_bridge.masks[0].mask;
-}
-
-static aper_size_info_8 via_generic_sizes[7] =
-{
-       {256, 65536, 6, 0},
-       {128, 32768, 5, 128},
-       {64, 16384, 4, 192},
-       {32, 8192, 3, 224},
-       {16, 4096, 2, 240},
-       {8, 2048, 1, 248},
-       {4, 1024, 0, 252}
-};
-
-static gatt_mask via_generic_masks[] =
-{
-       {0x00000000, 0}
-};
-
-static void via_generic_setup(void)
-{
-       agp_bridge.masks = via_generic_masks;
-       agp_bridge.num_of_masks = 1;
-       agp_bridge.aperture_sizes = (void *) via_generic_sizes;
-       agp_bridge.size_type = U8_APER_SIZE;
-       agp_bridge.num_aperture_sizes = 7;
-       agp_bridge.dev_private_data = NULL;
-       agp_bridge.needs_scratch_page = FALSE;
-       agp_bridge.configure = via_configure;
-       agp_bridge.fetch_size = via_fetch_size;
-       agp_bridge.cleanup = via_cleanup;
-       agp_bridge.tlb_flush = via_tlbflush;
-       agp_bridge.mask_memory = via_mask_memory;
-       agp_bridge.agp_enable = agp_generic_agp_enable;
-#ifdef __SMP__
-       agp_bridge.cache_flush = smp_flush_cache;
-#else
-       agp_bridge.cache_flush = flush_cache;
-#endif
-       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-       agp_bridge.insert_memory = agp_generic_insert_memory;
-       agp_bridge.remove_memory = agp_generic_remove_memory;
-       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-       agp_bridge.free_by_type = agp_generic_free_by_type;
-}
-
-#endif
-
-#ifdef AGP_BUILD_SIS_GENERIC
-
-static int sis_fetch_size(void)
-{
-       u8 temp_size;
-       int i;
-       aper_size_info_8 *values;
-
-       pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size);
-       (void *) values = agp_bridge.aperture_sizes;
-       for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-               if ((temp_size == values[i].size_value) ||
-                   ((temp_size & ~(0x03)) == (values[i].size_value & ~(0x03)))) {
-                       agp_bridge.previous_size =
-                           agp_bridge.current_size = (void *) (values + i);
-
-                       agp_bridge.aperture_size_idx = i;
-                       return values[i].size;
-               }
-       }
-
-       return 0;
-}
-
-
-static void sis_tlbflush(agp_memory * mem)
-{
-       pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02);
-}
-
-static int sis_configure(void)
-{
-       u32 temp;
-       aper_size_info_8 *current_size;
-
-       current_size = (aper_size_info_8 *) agp_bridge.current_size;
-       pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05);
-       pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp);
-       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-       pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE, agp_bridge.gatt_bus_addr);
-       pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, current_size->size_value);
-       return 0;
-}
-
-static void sis_cleanup(void)
-{
-       aper_size_info_8 *previous_size;
-
-       previous_size = (aper_size_info_8 *) agp_bridge.previous_size;
-       pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, (previous_size->size_value & ~(0x03)));
-}
-
-static unsigned long sis_mask_memory(unsigned long addr, int type)
-{
-       /* Memory type is ignored */
-
-       return addr | agp_bridge.masks[0].mask;
-}
-
-static aper_size_info_8 sis_generic_sizes[7] =
-{
-       {256, 65536, 6, 99},
-       {128, 32768, 5, 83},
-       {64, 16384, 4, 67},
-       {32, 8192, 3, 51},
-       {16, 4096, 2, 35},
-       {8, 2048, 1, 19},
-       {4, 1024, 0, 3}
-};
-
-static gatt_mask sis_generic_masks[] =
-{
-       {0x00000000, 0}
-};
-
-static void sis_generic_setup(void)
-{
-       agp_bridge.masks = sis_generic_masks;
-       agp_bridge.num_of_masks = 1;
-       agp_bridge.aperture_sizes = (void *) sis_generic_sizes;
-       agp_bridge.size_type = U8_APER_SIZE;
-       agp_bridge.num_aperture_sizes = 7;
-       agp_bridge.dev_private_data = NULL;
-       agp_bridge.needs_scratch_page = FALSE;
-       agp_bridge.configure = sis_configure;
-       agp_bridge.fetch_size = sis_fetch_size;
-       agp_bridge.cleanup = sis_cleanup;
-       agp_bridge.tlb_flush = sis_tlbflush;
-       agp_bridge.mask_memory = sis_mask_memory;
-       agp_bridge.agp_enable = agp_generic_agp_enable;
-#ifdef __SMP__
-       agp_bridge.cache_flush = smp_flush_cache;
-#else
-       agp_bridge.cache_flush = flush_cache;
-#endif
-       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-       agp_bridge.insert_memory = agp_generic_insert_memory;
-       agp_bridge.remove_memory = agp_generic_remove_memory;
-       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-       agp_bridge.free_by_type = agp_generic_free_by_type;
-}
-
-#endif
-
-#ifdef AGP_BUILD_AMD_IRONGATE
-
-static struct _amd_irongate_private {
-       volatile unsigned char *registers;
-} amd_irongate_private;
-
-static int amd_irongate_fetch_size(void)
-{
-       int i;
-       u32 temp;
-       aper_size_info_32 *values;
-
-       pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
-       temp = (temp & 0x0000000e);
-       (void *) values = agp_bridge.aperture_sizes;
-       for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-               if (temp == values[i].size_value) {
-                       agp_bridge.previous_size =
-                           agp_bridge.current_size = (void *) (values + i);
-
-                       agp_bridge.aperture_size_idx = i;
-                       return values[i].size;
-               }
-       }
-
-       return 0;
-}
-
-static int amd_irongate_configure(void)
-{
-       aper_size_info_32 *current_size;
-       u32 temp;
-       u16 enable_reg;
-
-       current_size = (aper_size_info_32 *) agp_bridge.current_size;
-
-       /* Get the memory mapped registers */
-       pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp);
-       temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-       amd_irongate_private.registers = (volatile unsigned char *) ioremap(temp, 4096);
-
-       /* Write out the address of the gatt table */
-       OUTREG32(amd_irongate_private.registers, AMD_ATTBASE, agp_bridge.gatt_bus_addr);
-
-       /* Write the Sync register */
-       pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80);
-
-       /* Write the enable register */
-       enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
-       enable_reg = (enable_reg | 0x0004);
-       OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
-
-       /* Write out the size register */
-       pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
-       temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001);
-       pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
-
-       /* Flush the tlb */
-       OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
-
-       /* Get the address for the gart region */
-       pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
-       temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-       agp_bridge.gart_bus_addr = temp;
-       return 0;
-}
-
-static void amd_irongate_cleanup(void)
-{
-       aper_size_info_32 *previous_size;
-       u32 temp;
-       u16 enable_reg;
-
-       previous_size = (aper_size_info_32 *) agp_bridge.previous_size;
-
-       enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
-       enable_reg = (enable_reg & ~(0x0004));
-       OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
-
-       /* Write back the previous size and disable gart translation */
-       pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
-       temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
-       pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
-       iounmap((void *) amd_irongate_private.registers);
-}
-
-/*
- * This routine could be implemented by taking the addresses
- * written to the GATT, and flushing them individually.  However
- * currently it just flushes the whole table.  Which is probably
- * more efficent, since agp_memory blocks can be a large number of
- * entries.
- */
-
-static void amd_irongate_tlbflush(agp_memory * temp)
-{
-       OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
-}
-
-static unsigned long amd_irongate_mask_memory(unsigned long addr, int type)
-{
-       /* Only type 0 is supported by the irongate */
-
-       return addr | agp_bridge.masks[0].mask;
-}
-
-static aper_size_info_32 amd_irongate_sizes[7] =
-{
-       {2048, 524288, 9, 0x0000000c},
-       {1024, 262144, 8, 0x0000000a},
-       {512, 131072, 7, 0x00000008},
-       {256, 65536, 6, 0x00000006},
-       {128, 32768, 5, 0x00000004},
-       {64, 16384, 4, 0x00000002},
-       {32, 8192, 3, 0x00000000}
-};
-
-static gatt_mask amd_irongate_masks[] =
-{
-       {0x00000001, 0}
-};
-
-static void amd_irongate_setup(void)
-{
-       agp_bridge.masks = amd_irongate_masks;
-       agp_bridge.num_of_masks = 1;
-       agp_bridge.aperture_sizes = (void *) amd_irongate_sizes;
-       agp_bridge.size_type = U32_APER_SIZE;
-       agp_bridge.num_aperture_sizes = 7;
-       agp_bridge.dev_private_data = (void *) &amd_irongate_private;
-       agp_bridge.needs_scratch_page = FALSE;
-       agp_bridge.configure = amd_irongate_configure;
-       agp_bridge.fetch_size = amd_irongate_fetch_size;
-       agp_bridge.cleanup = amd_irongate_cleanup;
-       agp_bridge.tlb_flush = amd_irongate_tlbflush;
-       agp_bridge.mask_memory = amd_irongate_mask_memory;
-       agp_bridge.agp_enable = agp_generic_agp_enable;
-#ifdef __SMP__
-       agp_bridge.cache_flush = smp_flush_cache;
-#else
-       agp_bridge.cache_flush = flush_cache;
-#endif
-       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-       agp_bridge.insert_memory = agp_generic_insert_memory;
-       agp_bridge.remove_memory = agp_generic_remove_memory;
-       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-       agp_bridge.free_by_type = agp_generic_free_by_type;
-}
-
-#endif
-
-#ifdef AGP_BUILD_ALI_M1541
-
-static int ali_fetch_size(void)
-{
-       int i;
-       u32 temp;
-       aper_size_info_32 *values;
-
-       pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
-       temp &= ~(0xfffffff0);
-       (void *) values = agp_bridge.aperture_sizes;
-
-       for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-               if (temp == values[i].size_value) {
-                       agp_bridge.previous_size =
-                           agp_bridge.current_size = (void *) (values + i);
-                       agp_bridge.aperture_size_idx = i;
-                       return values[i].size;
-               }
-       }
-
-       return 0;
-}
-
-static void ali_tlbflush(agp_memory * mem)
-{
-       u32 temp;
-
-       pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
-       pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
-                              ((temp & 0xffffff00) | 0x00000090));
-       pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
-                              ((temp & 0xffffff00) | 0x00000010));
-}
-
-static void ali_cleanup(void)
-{
-       aper_size_info_32 *previous_size;
-       u32 temp;
-
-       previous_size = (aper_size_info_32 *) agp_bridge.previous_size;
-
-       pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
-       pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
-                              ((temp & 0xffffff00) | 0x00000090));
-       pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, previous_size->size_value);
-}
-
-static int ali_configure(void)
-{
-       u32 temp;
-       aper_size_info_32 *current_size;
-
-       current_size = (aper_size_info_32 *) agp_bridge.current_size;
-
-       /* aperture size and gatt addr */
-       pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
-                   agp_bridge.gatt_bus_addr | current_size->size_value);
-
-       /* tlb control */
-       pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
-       pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
-                              ((temp & 0xffffff00) | 0x00000010));
-
-       /* address to map to */
-       pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
-       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-       return 0;
-}
-
-static unsigned long ali_mask_memory(unsigned long addr, int type)
-{
-       /* Memory type is ignored */
-
-       return addr | agp_bridge.masks[0].mask;
-}
-
-
-/* Setup function */
-static gatt_mask ali_generic_masks[] =
-{
-       {0x00000000, 0}
-};
-
-static aper_size_info_32 ali_generic_sizes[7] =
-{
-       {256, 65536, 6, 10},
-       {128, 32768, 5, 9},
-       {64, 16384, 4, 8},
-       {32, 8192, 3, 7},
-       {16, 4096, 2, 6},
-       {8, 2048, 1, 4},
-       {4, 1024, 0, 3}
-};
-
-static void ali_generic_setup(void)
-{
-       agp_bridge.masks = ali_generic_masks;
-       agp_bridge.num_of_masks = 1;
-       agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
-       agp_bridge.size_type = U32_APER_SIZE;
-       agp_bridge.num_aperture_sizes = 7;
-       agp_bridge.dev_private_data = NULL;
-       agp_bridge.needs_scratch_page = FALSE;
-       agp_bridge.configure = ali_configure;
-       agp_bridge.fetch_size = ali_fetch_size;
-       agp_bridge.cleanup = ali_cleanup;
-       agp_bridge.tlb_flush = ali_tlbflush;
-       agp_bridge.mask_memory = ali_mask_memory;
-       agp_bridge.agp_enable = agp_generic_agp_enable;
-#ifdef __SMP__
-       agp_bridge.cache_flush = smp_flush_cache;
-#else
-       agp_bridge.cache_flush = flush_cache;
-#endif
-       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-       agp_bridge.insert_memory = agp_generic_insert_memory;
-       agp_bridge.remove_memory = agp_generic_remove_memory;
-       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-       agp_bridge.free_by_type = agp_generic_free_by_type;
-}
-
-#endif
-
-
-
-/* Supported Device Scanning routine */
-
-static void agp_find_supported_device(void)
-{
-       struct pci_dev *dev = NULL;
-       u8 cap_ptr = 0x00;
-       u32 cap_id, scratch;
-
-       if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) {
-               agp_bridge.type = NOT_SUPPORTED;
-               return;
-       }
-       agp_bridge.dev = dev;
-
-       /* Need to test for I810 here */
-#ifdef AGP_BUILD_INTEL_I810
-       if (dev->vendor == PCI_VENDOR_ID_INTEL) {
-               struct pci_dev *i810_dev;
-
-               switch (dev->device) {
-               case PCI_DEVICE_ID_INTEL_810_0:
-                       i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
-                                              PCI_DEVICE_ID_INTEL_810_1,
-                                                  NULL);
-                       if (i810_dev == NULL) {
-                               printk("agpgart: Detected an Intel i810, but could not find the secondary device.\n");
-                               agp_bridge.type = NOT_SUPPORTED;
-                               return;
-                       }
-                       printk("agpgart: Detected an Intel i810 Chipset.\n");
-                       agp_bridge.type = INTEL_I810;
-                       agp_bridge.intel_i810_setup(i810_dev);
-                       return;
-
-               case PCI_DEVICE_ID_INTEL_810_DC100_0:
-                       i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
-                                        PCI_DEVICE_ID_INTEL_810_DC100_1,
-                                                  NULL);
-                       if (i810_dev == NULL) {
-                               printk("agpgart: Detected an Intel i810 DC100, but could not find the secondary device.\n");
-                               agp_bridge.type = NOT_SUPPORTED;
-                               return;
-                       }
-                       printk("agpgart: Detected an Intel i810 DC100 Chipset.\n");
-                       agp_bridge.type = INTEL_I810;
-                       agp_bridge.intel_i810_setup(i810_dev);
-                       return;
-
-               case PCI_DEVICE_ID_INTEL_810_E_0:
-                       i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
-                                            PCI_DEVICE_ID_INTEL_810_E_1,
-                                                  NULL);
-                       if (i810_dev == NULL) {
-                               printk("agpgart: Detected an Intel i810 E, but could not find the secondary device.\n");
-                               agp_bridge.type = NOT_SUPPORTED;
-                               return;
-                       }
-                       printk("agpgart: Detected an Intel i810 E Chipset.\n");
-                       agp_bridge.type = INTEL_I810;
-                       agp_bridge.intel_i810_setup(i810_dev);
-                       return;
-               default:
-                       break;
-               }
-       }
-#endif
-       /* find capndx */
-       pci_read_config_dword(dev, 0x04, &scratch);
-
-       if (!(scratch & 0x00100000)) {
-               agp_bridge.type = NOT_SUPPORTED;
-               return;
-       }
-       pci_read_config_byte(dev, 0x34, &cap_ptr);
-
-       if (cap_ptr != 0x00) {
-               do {
-                       pci_read_config_dword(dev, cap_ptr, &cap_id);
-
-                       if ((cap_id & 0xff) != 0x02)
-                               cap_ptr = (cap_id >> 8) & 0xff;
-               }
-               while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
-       }
-       if (cap_ptr == 0x00) {
-               agp_bridge.type = NOT_SUPPORTED;
-               return;
-       }
-       agp_bridge.capndx = cap_ptr;
-
-       /* Fill in the mode register */
-       pci_read_config_dword(agp_bridge.dev,
-                             agp_bridge.capndx + 4,
-                             &agp_bridge.mode);
-
-       switch (dev->vendor) {
-#ifdef AGP_BUILD_INTEL_GENERIC
-       case PCI_VENDOR_ID_INTEL:
-               switch (dev->device) {
-               case PCI_DEVICE_ID_INTEL_82443LX_0:
-                       agp_bridge.type = INTEL_LX;
-                       printk("agpgart: Detected an Intel 440LX Chipset.\n");
-                       agp_bridge.intel_generic_setup();
-                       return;
-
-               case PCI_DEVICE_ID_INTEL_82443BX_0:
-                       agp_bridge.type = INTEL_BX;
-                       printk("agpgart: Detected an Intel 440BX Chipset.\n");
-                       agp_bridge.intel_generic_setup();
-                       return;
-
-               case PCI_DEVICE_ID_INTEL_82443GX_0:
-                       agp_bridge.type = INTEL_GX;
-                       printk("agpgart: Detected an Intel 440GX Chipset.\n");
-                       agp_bridge.intel_generic_setup();
-                       return;
-
-               default:
-                       if (agp_try_unsupported != 0) {
-                               printk("agpgart: Trying generic intel routines for device id: %x\n", dev->device);
-                               agp_bridge.type = INTEL_GENERIC;
-                               agp_bridge.intel_generic_setup();
-                               return;
-                       } else {
-                               printk("agpgart: Unsupported intel chipset, you might want to try agp_try_unsupported=1.\n");
-                               agp_bridge.type = NOT_SUPPORTED;
-                               return;
-                       }
-               }
-               break;
-#endif
-
-#ifdef AGP_BUILD_VIA_GENERIC
-       case PCI_VENDOR_ID_VIA:
-               switch (dev->device) {
-               case PCI_DEVICE_ID_VIA_82C597_0:
-                       agp_bridge.type = VIA_VP3;
-                       printk("agpgart: Detected a VIA VP3 Chipset.\n");
-                       agp_bridge.via_generic_setup();
-                       return;
-
-               case PCI_DEVICE_ID_VIA_82C598_0:
-                       agp_bridge.type = VIA_MVP3;
-                       printk("agpgart: Detected a VIA MVP3 Chipset.\n");
-                       agp_bridge.via_generic_setup();
-                       return;
-
-               case PCI_DEVICE_ID_VIA_82C691_0:
-                       agp_bridge.type = VIA_APOLLO_PRO;
-                       printk("agpgart: Detected a VIA Apollo Pro Chipset.\n");
-                       agp_bridge.via_generic_setup();
-                       return;
-
-               default:
-                       if (agp_try_unsupported != 0) {
-                               printk("agpgart: Trying generic VIA routines for device id: %x\n", dev->device);
-                               agp_bridge.type = VIA_GENERIC;
-                               agp_bridge.via_generic_setup();
-                               return;
-                       } else {
-                               printk("agpgart: Unsupported VIA chipset, you might want to try agp_try_unsupported=1.\n");
-                               agp_bridge.type = NOT_SUPPORTED;
-                               return;
-                       }
-               }
-               break;
-#endif
-
-#ifdef AGP_BUILD_SIS_GENERIC
-       case PCI_VENDOR_ID_SI:
-               switch (dev->device) {
-                       /* ToDo need to find out the specific devices supported */
-               default:
-                       if (agp_try_unsupported != 0) {
-                               printk("agpgart: Trying generic SiS routines for device id: %x\n", dev->device);
-                               agp_bridge.type = SIS_GENERIC;
-                               agp_bridge.sis_generic_setup();
-                               return;
-                       } else {
-                               printk("agpgart: Unsupported SiS chipset, you might want to try agp_try_unsupported=1.\n");
-                               agp_bridge.type = NOT_SUPPORTED;
-                               return;
-                       }
-               }
-               break;
-#endif
-
-#ifdef AGP_BUILD_AMD_IRONGATE
-       case PCI_VENDOR_ID_AMD:
-               switch (dev->device) {
-               case PCI_DEVICE_ID_AMD_IRONGATE_0:
-                       agp_bridge.type = AMD_IRONGATE;
-                       printk("agpgart: Detected an AMD Irongate Chipset.\n");
-                       agp_bridge.amd_irongate_setup();
-                       return;
-
-               default:
-                       if (agp_try_unsupported != 0) {
-                               printk("agpgart: Trying Amd irongate routines for device id: %x\n", dev->device);
-                               agp_bridge.type = AMD_GENERIC;
-                               agp_bridge.amd_irongate_setup();
-                               return;
-                       } else {
-                               printk("agpgart: Unsupported Amd chipset, you might want to try agp_try_unsupported=1.\n");
-                               agp_bridge.type = NOT_SUPPORTED;
-                               return;
-                       }
-               }
-               break;
-#endif
-
-#ifdef AGP_BUILD_ALI_M1541
-       case PCI_VENDOR_ID_AL:
-               switch (dev->device) {
-               case PCI_DEVICE_ID_AL_M1541_0:
-                       agp_bridge.type = ALI_M1541;
-                       printk("agpgart: Detected an ALi M1541 Chipset\n");
-                       agp_bridge.ali_generic_setup();
-                       return;
-               default:
-                       if (agp_try_unsupported != 0) {
-                               printk("agpgart: Trying ALi generic routines for device id: %x\n", dev->device);
-                               agp_bridge.type = ALI_GENERIC;
-                               agp_bridge.ali_generic_setup();
-                               return;
-                       } else {
-                               printk("agpgart: Unsupported ALi chipset, you might want to type agp_try_unsupported=1.\n");
-                               agp_bridge.type = NOT_SUPPORTED;
-                               return;
-                       }
-               }
-               break;
-#endif
-       default:
-               agp_bridge.type = NOT_SUPPORTED;
-               return;
-       }
-}
-
-struct agp_max_table {
-       int mem;
-       int agp;
-};
-
-static struct agp_max_table agp_maxes_table[9] =
-{
-       {0, 0},
-       {32, 4},
-       {64, 28},
-       {128, 96},
-       {256, 204},
-       {512, 440},
-       {1024, 942},
-       {2048, 1920},
-       {4096, 3932}
-};
-
-static int agp_find_max(void)
-{
-       int memory;
-       float t;
-       int index;
-       int result;
-
-       memory = virt_to_phys(high_memory) / 0x100000;
-       index = 0;
-
-       while ((memory > agp_maxes_table[index].mem) &&
-              (index < 8)) {
-               index++;
-       }
-
-       t = (memory - agp_maxes_table[index - 1].mem) /
-           (agp_maxes_table[index].mem - agp_maxes_table[index - 1].mem);
-
-       result = agp_maxes_table[index - 1].agp +
-           (t * (agp_maxes_table[index].agp - agp_maxes_table[index - 1].agp));
-
-       printk("agpgart: Maximum main memory to use for agp memory: %dM\n", result);
-       result = (result * 0x100000) / 4096;
-       return result;
-}
-
-#define AGPGART_VERSION_MAJOR 0
-#define AGPGART_VERSION_MINOR 99
-
-static agp_version agp_current_version =
-{
-       AGPGART_VERSION_MAJOR,
-       AGPGART_VERSION_MINOR
-};
-
-static int agp_backend_initialize(void)
-{
-       int size_value;
-
-       memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
-       agp_bridge.type = NOT_SUPPORTED;
-#ifdef AGP_BUILD_INTEL_GENERIC
-       agp_bridge.intel_generic_setup = intel_generic_setup;
-#endif
-#ifdef AGP_BUILD_INTEL_I810
-       agp_bridge.intel_i810_setup = intel_i810_setup;
-#endif
-#ifdef AGP_BUILD_VIA_GENERIC
-       agp_bridge.via_generic_setup = via_generic_setup;
-#endif
-#ifdef AGP_BUILD_SIS_GENERIC
-       agp_bridge.sis_generic_setup = sis_generic_setup;
-#endif
-#ifdef AGP_BUILD_AMD_IRONGATE
-       agp_bridge.amd_irongate_setup = amd_irongate_setup;
-#endif
-#ifdef AGP_BUILD_ALI_M1541
-       agp_bridge.ali_generic_setup = ali_generic_setup;
-#endif
-       agp_bridge.max_memory_agp = agp_find_max();
-       agp_bridge.version = &agp_current_version;
-       agp_find_supported_device();
-
-       if (agp_bridge.needs_scratch_page == TRUE) {
-               agp_bridge.scratch_page = (unsigned long) agp_alloc_page();
-
-               if ((void *) (agp_bridge.scratch_page) == NULL) {
-                       printk("agpgart: unable to get memory for scratch page.\n");
-                       return -ENOMEM;
-               }
-               agp_bridge.scratch_page = virt_to_phys((void *) agp_bridge.scratch_page);
-               agp_bridge.scratch_page = agp_bridge.mask_memory(agp_bridge.scratch_page, 0);
-       }
-       if (agp_bridge.type == NOT_SUPPORTED) {
-               printk("agpgart: no supported devices found.\n");
-               return -EINVAL;
-       }
-       size_value = agp_bridge.fetch_size();
-
-       if (size_value == 0) {
-               printk("agpgart: unable to detrimine aperture size.\n");
-               return -EINVAL;
-       }
-       if (agp_bridge.create_gatt_table()) {
-               printk("agpgart: unable to get memory for graphics translation table.\n");
-               return -ENOMEM;
-       }
-       agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
-
-       if (agp_bridge.key_list == NULL) {
-               printk("agpgart: error allocating memory for key lists.\n");
-               agp_bridge.free_gatt_table();
-               return -ENOMEM;
-       }
-       memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
-
-       if (agp_bridge.configure()) {
-               printk("agpgart: error configuring host chipset.\n");
-               agp_bridge.free_gatt_table();
-               vfree(agp_bridge.key_list);
-               return -EINVAL;
-       }
-       printk("agpgart: Physical address of the agp aperture: 0x%lx\n", agp_bridge.gart_bus_addr);
-       printk("agpgart: Agp aperture is %dM in size.\n", size_value);
-       return 0;
-}
-
-static void agp_backend_cleanup(void)
-{
-       agp_bridge.cleanup();
-       agp_bridge.free_gatt_table();
-       vfree(agp_bridge.key_list);
-
-       if (agp_bridge.needs_scratch_page == TRUE) {
-               agp_bridge.scratch_page &= ~(0x00000fff);
-               agp_destroy_page((void *) phys_to_virt(agp_bridge.scratch_page));
-       }
-}
-
-extern int agp_frontend_initialize(void);
-extern void agp_frontend_cleanup(void);
-
-#ifdef MODULE
-int init_module(void)
-{
-       int ret_val;
-
-       printk("Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
-              AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
-       ret_val = agp_backend_initialize();
-
-       if (ret_val != 0) {
-               return ret_val;
-       }
-       ret_val = agp_frontend_initialize();
-
-       if (ret_val != 0) {
-               agp_backend_cleanup();
-               return ret_val;
-       }
-       return 0;
-}
-
-void cleanup_module(void)
-{
-       agp_frontend_cleanup();
-       agp_backend_cleanup();
-}
-
-#endif
diff --git a/drivers/char/agp/agp_backendP.h b/drivers/char/agp/agp_backendP.h
deleted file mode 100644 (file)
index 59beb02..0000000
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight
- * Copyright (C) 1999 Xi Graphics
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _AGP_BACKEND_PRIV_H
-#define _AGP_BACKEND_PRIV_H 1
-
-enum aper_size_type {
-       U8_APER_SIZE,
-       U16_APER_SIZE,
-       U32_APER_SIZE,
-       FIXED_APER_SIZE
-};
-
-typedef struct _gatt_mask {
-       unsigned long mask;
-       u32 type;
-       /* totally device specific, for integrated chipsets that 
-        * might have different types of memory masks.  For other
-        * devices this will probably be ignored */
-} gatt_mask;
-
-typedef struct _aper_size_info_8 {
-       int size;
-       int num_entries;
-       int page_order;
-       u8 size_value;
-} aper_size_info_8;
-
-typedef struct _aper_size_info_16 {
-       int size;
-       int num_entries;
-       int page_order;
-       u16 size_value;
-} aper_size_info_16;
-
-typedef struct _aper_size_info_32 {
-       int size;
-       int num_entries;
-       int page_order;
-       u32 size_value;
-} aper_size_info_32;
-
-typedef struct _aper_size_info_fixed {
-       int size;
-       int num_entries;
-       int page_order;
-} aper_size_info_fixed;
-
-struct agp_bridge_data {
-       agp_version *version;
-       void *aperture_sizes;
-       void *previous_size;
-       void *current_size;
-       void *dev_private_data;
-       struct pci_dev *dev;
-       gatt_mask *masks;
-       unsigned long *gatt_table;
-       unsigned long *gatt_table_real;
-       unsigned long scratch_page;
-       unsigned long gart_bus_addr;
-       unsigned long gatt_bus_addr;
-       u32 mode;
-       enum chipset_type type;
-       enum aper_size_type size_type;
-       u32 *key_list;
-       atomic_t current_memory_agp;
-       atomic_t agp_in_use;
-       int max_memory_agp;     /* in number of pages */
-       int needs_scratch_page;
-       int aperture_size_idx;
-       int num_aperture_sizes;
-       int num_of_masks;
-       int capndx;
-
-       /* Links to driver specific functions */
-
-       int (*fetch_size) (void);       /* returns the index into the size table */
-       int (*configure) (void);
-       void (*agp_enable) (u32);
-       void (*cleanup) (void);
-       void (*tlb_flush) (agp_memory *);
-       unsigned long (*mask_memory) (unsigned long, int);
-       void (*cache_flush) (void);
-       int (*create_gatt_table) (void);
-       int (*free_gatt_table) (void);
-       int (*insert_memory) (agp_memory *, off_t, int);
-       int (*remove_memory) (agp_memory *, off_t, int);
-       agp_memory *(*alloc_by_type) (size_t, int);
-       void (*free_by_type) (agp_memory *);
-
-       /* Links to vendor/device specific setup functions */
-#ifdef AGP_BUILD_INTEL_GENERIC
-       void (*intel_generic_setup) (void);
-#endif
-#ifdef AGP_BUILD_INTEL_I810
-       void (*intel_i810_setup) (struct pci_dev *);
-#endif
-#ifdef AGP_BUILD_VIA_GENERIC
-       void (*via_generic_setup) (void);
-#endif
-#ifdef AGP_BUILD_SIS_GENERIC
-       void (*sis_generic_setup) (void);
-#endif
-#ifdef AGP_BUILD_AMD_IRONGATE
-       void (*amd_irongate_setup) (void);
-#endif
-#ifdef AGP_BUILD_ALI_M1541
-       void (*ali_generic_setup) (void);
-#endif
-};
-
-#define OUTREG32(mmap, addr, val)   *(volatile u32 *)(mmap + (addr)) = (val)
-#define OUTREG16(mmap, addr, val)   *(volatile u16 *)(mmap + (addr)) = (val)
-#define OUTREG8 (mmap, addr, val)   *(volatile u8 *) (mmap + (addr)) = (val)
-
-#define INREG32(mmap, addr)         *(volatile u32 *)(mmap + (addr))
-#define INREG16(mmap, addr)         *(volatile u16 *)(mmap + (addr))
-#define INREG8 (mmap, addr)         *(volatile u8 *) (mmap + (addr))
-
-#ifndef min
-#define min(a,b) (((a)<(b))?(a):(b))
-#endif
-
-#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page)
-
-#ifndef PCI_DEVICE_ID_VIA_82C691_0
-#define PCI_DEVICE_ID_VIA_82C691_0      0x0691
-#endif
-#ifndef PCI_DEVICE_ID_VIA_82C691_1
-#define PCI_DEVICE_ID_VIA_82C691_1      0x8691
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_0
-#define PCI_DEVICE_ID_INTEL_810_0       0x7120
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_DC100_0
-#define PCI_DEVICE_ID_INTEL_810_DC100_0 0x7122
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_E_0
-#define PCI_DEVICE_ID_INTEL_810_E_0     0x7124
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_82443GX_0
-#define PCI_DEVICE_ID_INTEL_82443GX_0   0x71a0
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_1
-#define PCI_DEVICE_ID_INTEL_810_1       0x7121
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_DC100_1
-#define PCI_DEVICE_ID_INTEL_810_DC100_1 0x7123
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_E_1
-#define PCI_DEVICE_ID_INTEL_810_E_1     0x7125
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_82443GX_1
-#define PCI_DEVICE_ID_INTEL_82443GX_1   0x71a1
-#endif
-#ifndef PCI_DEVICE_ID_AMD_IRONGATE_0
-#define PCI_DEVICE_ID_AMD_IRONGATE_0    0x7006
-#endif
-#ifndef PCI_VENDOR_ID_AL
-#define PCI_VENDOR_ID_AL               0x10b9
-#endif
-#ifndef PCI_DEVICE_ID_AL_M1541_0
-#define PCI_DEVICE_ID_AL_M1541_0       0x1541
-#endif
-
-/* intel register */
-#define INTEL_APBASE    0x10
-#define INTEL_APSIZE    0xb4
-#define INTEL_ATTBASE   0xb8
-#define INTEL_AGPCTRL   0xb0
-#define INTEL_NBXCFG    0x50
-#define INTEL_ERRSTS    0x91
-
-/* intel i810 registers */
-#define I810_GMADDR 0x10
-#define I810_MMADDR 0x14
-#define I810_PTE_BASE          0x10000
-#define I810_PTE_MAIN_UNCACHED 0x00000000
-#define I810_PTE_LOCAL         0x00000002
-#define I810_PTE_VALID         0x00000001
-#define I810_SMRAM_MISCC       0x70
-#define I810_GFX_MEM_WIN_SIZE  0x00010000
-#define I810_GFX_MEM_WIN_32M   0x00010000
-#define I810_GMS               0x000000c0
-#define I810_GMS_DISABLE       0x00000000
-#define I810_PGETBL_CTL        0x2020
-#define I810_PGETBL_ENABLED    0x00000001
-#define I810_DRAM_CTL          0x3000
-#define I810_DRAM_ROW_0        0x00000001
-#define I810_DRAM_ROW_0_SDRAM  0x00000001
-
-/* VIA register */
-#define VIA_APBASE      0x10
-#define VIA_GARTCTRL    0x80
-#define VIA_APSIZE      0x84
-#define VIA_ATTBASE     0x88
-
-/* SiS registers */
-#define SIS_APBASE      0x10
-#define SIS_ATTBASE     0x90
-#define SIS_APSIZE      0x94
-#define SIS_TLBCNTRL    0x97
-#define SIS_TLBFLUSH    0x98
-
-/* AMD registers */
-#define AMD_APBASE      0x10
-#define AMD_MMBASE      0x14
-#define AMD_APSIZE      0xac
-#define AMD_MODECNTL    0xb0
-#define AMD_GARTENABLE  0x02   /* In mmio region (16-bit register) */
-#define AMD_ATTBASE     0x04   /* In mmio region (32-bit register) */
-#define AMD_TLBFLUSH    0x0c   /* In mmio region (32-bit register) */
-#define AMD_CACHEENTRY  0x10   /* In mmio region (32-bit register) */
-
-/* ALi registers */
-#define ALI_APBASE     0x10
-#define ALI_AGPCTRL    0xb8
-#define ALI_ATTBASE    0xbc
-#define ALI_TLBCTRL    0xc0
-
-#endif                         /* _AGP_BACKEND_PRIV_H */
diff --git a/drivers/char/agp/agpgart_be.c b/drivers/char/agp/agpgart_be.c
new file mode 100644 (file)
index 0000000..72f8779
--- /dev/null
@@ -0,0 +1,2053 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#define EXPORT_SYMTAB
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/malloc.h>
+#include <linux/vmalloc.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/page.h>
+
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
+MODULE_PARM(agp_try_unsupported, "1i");
+EXPORT_SYMBOL(agp_free_memory);
+EXPORT_SYMBOL(agp_allocate_memory);
+EXPORT_SYMBOL(agp_copy_info);
+EXPORT_SYMBOL(agp_bind_memory);
+EXPORT_SYMBOL(agp_unbind_memory);
+EXPORT_SYMBOL(agp_enable);
+EXPORT_SYMBOL(agp_backend_acquire);
+EXPORT_SYMBOL(agp_backend_release);
+
+static void flush_cache(void);
+
+static struct agp_bridge_data agp_bridge;
+static int agp_try_unsupported __initdata = 0;
+#ifdef __SMP__
+static atomic_t cpus_waiting;
+
+static void ipi_handler(void *null)
+{
+       flush_cache();
+       atomic_dec(&cpus_waiting);
+       while (atomic_read(&cpus_waiting) > 0)
+               barrier();
+}
+
+static void smp_flush_cache(void)
+{
+       atomic_set(&cpus_waiting, smp_num_cpus - 1);
+       if (smp_call_function(ipi_handler, NULL, 1, 0) != 0)
+               panic("agpgart: timed out waiting for the other CPUs!\n");
+       flush_cache();
+       while (atomic_read(&cpus_waiting) > 0)
+               barrier();
+}
+#define global_cache_flush smp_flush_cache
+#else                          /* __SMP__ */
+#define global_cache_flush flush_cache
+#endif                         /* __SMP__ */
+
+static void flush_cache(void)
+{
+       asm volatile ("wbinvd":::"memory");
+}
+
+
+int agp_backend_acquire(void)
+{
+       atomic_inc(&agp_bridge.agp_in_use);
+
+       if (atomic_read(&agp_bridge.agp_in_use) != 1) {
+               atomic_dec(&agp_bridge.agp_in_use);
+               return -EBUSY;
+       }
+       MOD_INC_USE_COUNT;
+       return 0;
+}
+
+void agp_backend_release(void)
+{
+       atomic_dec(&agp_bridge.agp_in_use);
+       MOD_DEC_USE_COUNT;
+}
+
+/* 
+ * Basic Page Allocation Routines -
+ * These routines handle page allocation
+ * and by default they reserve the allocated 
+ * memory.  They also handle incrementing the
+ * current_memory_agp value, Which is checked
+ * against a maximum value.
+ */
+
+static unsigned long agp_alloc_page(void)
+{
+       void *pt;
+
+       pt = (void *) __get_free_page(GFP_KERNEL);
+       if (pt == NULL) {
+               return 0;
+       }
+       atomic_inc(&mem_map[MAP_NR(pt)].count);
+       set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
+       atomic_inc(&agp_bridge.current_memory_agp);
+       return (unsigned long) pt;
+}
+
+static void agp_destroy_page(unsigned long page)
+{
+       void *pt = (void *) page;
+
+       if (pt == NULL) {
+               return;
+       }
+       atomic_dec(&mem_map[MAP_NR(pt)].count);
+       clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
+       wake_up(&mem_map[MAP_NR(pt)].wait);
+       free_page((unsigned long) pt);
+       atomic_dec(&agp_bridge.current_memory_agp);
+}
+
+/* End Basic Page Allocation Routines */
+
+/* 
+ * Generic routines for handling agp_memory structures -
+ * They use the basic page allocation routines to do the
+ * brunt of the work.
+ */
+
+
+static void agp_free_key(int key)
+{
+
+       if (key < 0) {
+               return;
+       }
+       if (key < MAXKEY) {
+               clear_bit(key, agp_bridge.key_list);
+       }
+}
+
+static int agp_get_key(void)
+{
+       int bit;
+
+       bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);
+       if (bit < MAXKEY) {
+               set_bit(bit, agp_bridge.key_list);
+               return bit;
+       }
+       return -1;
+}
+
+static agp_memory *agp_create_memory(int scratch_pages)
+{
+       agp_memory *new;
+
+       new = kmalloc(sizeof(agp_memory), GFP_KERNEL);
+
+       if (new == NULL) {
+               return NULL;
+       }
+       memset(new, 0, sizeof(agp_memory));
+       new->key = agp_get_key();
+
+       if (new->key < 0) {
+               kfree(new);
+               return NULL;
+       }
+       new->memory = vmalloc(PAGE_SIZE * scratch_pages);
+
+       if (new->memory == NULL) {
+               agp_free_key(new->key);
+               kfree(new);
+               return NULL;
+       }
+       new->num_scratch_pages = scratch_pages;
+       return new;
+}
+
+void agp_free_memory(agp_memory * curr)
+{
+       int i;
+
+       if (curr == NULL) {
+               return;
+       }
+       if (curr->is_bound == TRUE) {
+               agp_unbind_memory(curr);
+       }
+       if (curr->type != 0) {
+               agp_bridge.free_by_type(curr);
+               MOD_DEC_USE_COUNT;
+               return;
+       }
+       if (curr->page_count != 0) {
+               for (i = 0; i < curr->page_count; i++) {
+                       curr->memory[i] &= ~(0x00000fff);
+                       agp_destroy_page((unsigned long)
+                                        phys_to_virt(curr->memory[i]));
+               }
+       }
+       agp_free_key(curr->key);
+       vfree(curr->memory);
+       kfree(curr);
+       MOD_DEC_USE_COUNT;
+}
+
+#define ENTRIES_PER_PAGE               (PAGE_SIZE / sizeof(unsigned long))
+
+agp_memory *agp_allocate_memory(size_t page_count, u32 type)
+{
+       int scratch_pages;
+       agp_memory *new;
+       int i;
+
+       if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) >
+           agp_bridge.max_memory_agp) {
+               return NULL;
+       }
+       if (type != 0) {
+               new = agp_bridge.alloc_by_type(page_count, type);
+               return new;
+       }
+       scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+
+       new = agp_create_memory(scratch_pages);
+
+       if (new == NULL) {
+               return NULL;
+       }
+       for (i = 0; i < page_count; i++) {
+               new->memory[i] = agp_alloc_page();
+
+               if (new->memory[i] == 0) {
+                       /* Free this structure */
+                       agp_free_memory(new);
+                       return NULL;
+               }
+               new->memory[i] =
+                   agp_bridge.mask_memory(
+                                  virt_to_phys((void *) new->memory[i]),
+                                                 type);
+               new->page_count++;
+       }
+
+       MOD_INC_USE_COUNT;
+       return new;
+}
+
+/* End - Generic routines for handling agp_memory structures */
+
+static int agp_return_size(void)
+{
+       int current_size;
+       void *temp;
+
+       temp = agp_bridge.current_size;
+
+       switch (agp_bridge.size_type) {
+       case U8_APER_SIZE:
+               current_size = A_SIZE_8(temp)->size;
+               break;
+       case U16_APER_SIZE:
+               current_size = A_SIZE_16(temp)->size;
+               break;
+       case U32_APER_SIZE:
+               current_size = A_SIZE_32(temp)->size;
+               break;
+       case FIXED_APER_SIZE:
+               current_size = A_SIZE_FIX(temp)->size;
+               break;
+       default:
+               current_size = 0;
+               break;
+       }
+
+       return current_size;
+}
+
+/* Routine to copy over information structure */
+
+void agp_copy_info(agp_kern_info * info)
+{
+       memset(info, 0, sizeof(agp_kern_info));
+       info->version.major = agp_bridge.version->major;
+       info->version.minor = agp_bridge.version->minor;
+       info->device = agp_bridge.dev;
+       info->chipset = agp_bridge.type;
+       info->mode = agp_bridge.mode;
+       info->aper_base = agp_bridge.gart_bus_addr;
+       info->aper_size = agp_return_size();
+       info->max_memory = agp_bridge.max_memory_agp;
+       info->current_memory = atomic_read(&agp_bridge.current_memory_agp);
+}
+
+/* End - Routine to copy over information structure */
+
+/*
+ * Routines for handling swapping of agp_memory into the GATT -
+ * These routines take agp_memory and insert them into the GATT.
+ * They call device specific routines to actually write to the GATT.
+ */
+
+int agp_bind_memory(agp_memory * curr, off_t pg_start)
+{
+       int ret_val;
+
+       if ((curr == NULL) || (curr->is_bound == TRUE)) {
+               return -EINVAL;
+       }
+       if (curr->is_flushed == FALSE) {
+               CACHE_FLUSH();
+               curr->is_flushed = TRUE;
+       }
+       ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);
+
+       if (ret_val != 0) {
+               return ret_val;
+       }
+       curr->is_bound = TRUE;
+       curr->pg_start = pg_start;
+       return 0;
+}
+
+int agp_unbind_memory(agp_memory * curr)
+{
+       int ret_val;
+
+       if (curr == NULL) {
+               return -EINVAL;
+       }
+       if (curr->is_bound != TRUE) {
+               return -EINVAL;
+       }
+       ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);
+
+       if (ret_val != 0) {
+               return ret_val;
+       }
+       curr->is_bound = FALSE;
+       curr->pg_start = 0;
+       return 0;
+}
+
+/* End - Routines for handling swapping of agp_memory into the GATT */
+
+/* 
+ * Driver routines - start
+ * Currently this module supports the 
+ * i810, 440lx, 440bx, 440gx, via vp3, via mvp3,
+ * amd irongate, ALi M1541 and generic support for the
+ * SiS chipsets.
+ */
+
+/* Generic Agp routines - Start */
+
+static void agp_generic_agp_enable(u32 mode)
+{
+       struct pci_dev *device = NULL;
+       u32 command, scratch, cap_id;
+       u8 cap_ptr;
+
+       pci_read_config_dword(agp_bridge.dev,
+                             agp_bridge.capndx + 4,
+                             &command);
+
+       /*
+        * PASS1: go throu all devices that claim to be
+        *        AGP devices and collect their data.
+        */
+
+       while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
+                                       device)) != NULL) {
+               pci_read_config_dword(device, 0x04, &scratch);
+
+               if (!(scratch & 0x00100000))
+                       continue;
+
+               pci_read_config_byte(device, 0x34, &cap_ptr);
+
+               if (cap_ptr != 0x00) {
+                       do {
+                               pci_read_config_dword(device,
+                                                     cap_ptr, &cap_id);
+
+                               if ((cap_id & 0xff) != 0x02)
+                                       cap_ptr = (cap_id >> 8) & 0xff;
+                       }
+                       while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+               }
+               if (cap_ptr != 0x00) {
+                       /*
+                        * Ok, here we have a AGP device. Disable impossible 
+                        * settings, and adjust the readqueue to the minimum.
+                        */
+
+                       pci_read_config_dword(device, cap_ptr + 4, &scratch);
+
+                       /* adjust RQ depth */
+                       command =
+                           ((command & ~0xff000000) |
+                            min((mode & 0xff000000),
+                                min((command & 0xff000000),
+                                    (scratch & 0xff000000))));
+
+                       /* disable SBA if it's not supported */
+                       if (!((command & 0x00000200) &&
+                             (scratch & 0x00000200) &&
+                             (mode & 0x00000200)))
+                               command &= ~0x00000200;
+
+                       /* disable FW if it's not supported */
+                       if (!((command & 0x00000010) &&
+                             (scratch & 0x00000010) &&
+                             (mode & 0x00000010)))
+                               command &= ~0x00000010;
+
+                       if (!((command & 4) &&
+                             (scratch & 4) &&
+                             (mode & 4)))
+                               command &= ~0x00000004;
+
+                       if (!((command & 2) &&
+                             (scratch & 2) &&
+                             (mode & 2)))
+                               command &= ~0x00000002;
+
+                       if (!((command & 1) &&
+                             (scratch & 1) &&
+                             (mode & 1)))
+                               command &= ~0x00000001;
+               }
+       }
+       /*
+        * PASS2: Figure out the 4X/2X/1X setting and enable the
+        *        target (our motherboard chipset).
+        */
+
+       if (command & 4) {
+               command &= ~3;  /* 4X */
+       }
+       if (command & 2) {
+               command &= ~5;  /* 2X */
+       }
+       if (command & 1) {
+               command &= ~6;  /* 1X */
+       }
+       command |= 0x00000100;
+
+       pci_write_config_dword(agp_bridge.dev,
+                              agp_bridge.capndx + 8,
+                              command);
+
+       /*
+        * PASS3: Go throu all AGP devices and update the
+        *        command registers.
+        */
+
+       while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
+                                       device)) != NULL) {
+               pci_read_config_dword(device, 0x04, &scratch);
+
+               if (!(scratch & 0x00100000))
+                       continue;
+
+               pci_read_config_byte(device, 0x34, &cap_ptr);
+
+               if (cap_ptr != 0x00) {
+                       do {
+                               pci_read_config_dword(device,
+                                                     cap_ptr, &cap_id);
+
+                               if ((cap_id & 0xff) != 0x02)
+                                       cap_ptr = (cap_id >> 8) & 0xff;
+                       }
+                       while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+               }
+               if (cap_ptr != 0x00)
+                       pci_write_config_dword(device, cap_ptr + 8, command);
+       }
+}
+
+static int agp_generic_create_gatt_table(void)
+{
+       char *table;
+       char *table_end;
+       int size;
+       int page_order;
+       int num_entries;
+       int i;
+       void *temp;
+
+       table = NULL;
+       i = agp_bridge.aperture_size_idx;
+       temp = agp_bridge.current_size;
+       size = page_order = num_entries = 0;
+
+       if (agp_bridge.size_type != FIXED_APER_SIZE) {
+               do {
+                       switch (agp_bridge.size_type) {
+                       case U8_APER_SIZE:
+                               size = A_SIZE_8(temp)->size;
+                               page_order =
+                                   A_SIZE_8(temp)->page_order;
+                               num_entries =
+                                   A_SIZE_8(temp)->num_entries;
+                               break;
+                       case U16_APER_SIZE:
+                               size = A_SIZE_16(temp)->size;
+                               page_order = A_SIZE_16(temp)->page_order;
+                               num_entries = A_SIZE_16(temp)->num_entries;
+                               break;
+                       case U32_APER_SIZE:
+                               size = A_SIZE_32(temp)->size;
+                               page_order = A_SIZE_32(temp)->page_order;
+                               num_entries = A_SIZE_32(temp)->num_entries;
+                               break;
+                               /* This case will never really happen. */
+                       case FIXED_APER_SIZE:
+                       default:
+                               size = page_order = num_entries = 0;
+                               break;
+                       }
+
+                       table = (char *) __get_free_pages(GFP_KERNEL,
+                                                         page_order);
+
+                       if (table == NULL) {
+                               i++;
+                               switch (agp_bridge.size_type) {
+                               case U8_APER_SIZE:
+                                       agp_bridge.current_size = A_IDX8();
+                                       break;
+                               case U16_APER_SIZE:
+                                       agp_bridge.current_size = A_IDX16();
+                                       break;
+                               case U32_APER_SIZE:
+                                       agp_bridge.current_size = A_IDX32();
+                                       break;
+                                       /* This case will never really 
+                                        * happen. 
+                                        */
+                               case FIXED_APER_SIZE:
+                               default:
+                                       agp_bridge.current_size =
+                                           agp_bridge.current_size;
+                                       break;
+                               }
+                       } else {
+                               agp_bridge.aperture_size_idx = i;
+                       }
+               } while ((table == NULL) &&
+                        (i < agp_bridge.num_aperture_sizes));
+       } else {
+               size = ((aper_size_info_fixed *) temp)->size;
+               page_order = ((aper_size_info_fixed *) temp)->page_order;
+               num_entries = ((aper_size_info_fixed *) temp)->num_entries;
+               table = (char *) __get_free_pages(GFP_KERNEL, page_order);
+       }
+
+       if (table == NULL) {
+               return -ENOMEM;
+       }
+       table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+       for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
+               set_bit(PG_reserved, &mem_map[i].flags);
+       }
+
+       agp_bridge.gatt_table_real = (unsigned long *) table;
+       CACHE_FLUSH();
+       agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
+                                       (PAGE_SIZE * (1 << page_order)));
+       CACHE_FLUSH();
+
+       if (agp_bridge.gatt_table == NULL) {
+               for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
+                       clear_bit(PG_reserved, &mem_map[i].flags);
+               }
+
+               free_pages((unsigned long) table, page_order);
+
+               return -ENOMEM;
+       }
+       agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
+
+       for (i = 0; i < num_entries; i++) {
+               agp_bridge.gatt_table[i] =
+                   (unsigned long) agp_bridge.scratch_page;
+       }
+
+       return 0;
+}
+
+static int agp_generic_free_gatt_table(void)
+{
+       int i;
+       int page_order;
+       char *table, *table_end;
+       void *temp;
+
+       temp = agp_bridge.current_size;
+
+       switch (agp_bridge.size_type) {
+       case U8_APER_SIZE:
+               page_order = A_SIZE_8(temp)->page_order;
+               break;
+       case U16_APER_SIZE:
+               page_order = A_SIZE_16(temp)->page_order;
+               break;
+       case U32_APER_SIZE:
+               page_order = A_SIZE_32(temp)->page_order;
+               break;
+       case FIXED_APER_SIZE:
+               page_order = A_SIZE_FIX(temp)->page_order;
+               break;
+       default:
+               page_order = 0;
+               break;
+       }
+
+       /* Do not worry about freeing memory, because if this is
+        * called, then all agp memory is deallocated and removed
+        * from the table.
+        */
+
+       iounmap(agp_bridge.gatt_table);
+       table = (char *) agp_bridge.gatt_table_real;
+       table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+       for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
+               clear_bit(PG_reserved, &mem_map[i].flags);
+       }
+
+       free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
+       return 0;
+}
+
+static int agp_generic_insert_memory(agp_memory * mem,
+                                    off_t pg_start, int type)
+{
+       int i, j, num_entries;
+       void *temp;
+
+       temp = agp_bridge.current_size;
+
+       switch (agp_bridge.size_type) {
+       case U8_APER_SIZE:
+               num_entries = A_SIZE_8(temp)->num_entries;
+               break;
+       case U16_APER_SIZE:
+               num_entries = A_SIZE_16(temp)->num_entries;
+               break;
+       case U32_APER_SIZE:
+               num_entries = A_SIZE_32(temp)->num_entries;
+               break;
+       case FIXED_APER_SIZE:
+               num_entries = A_SIZE_FIX(temp)->num_entries;
+               break;
+       default:
+               num_entries = 0;
+               break;
+       }
+
+       if (type != 0 || mem->type != 0) {
+               /* The generic routines know nothing of memory types */
+               return -EINVAL;
+       }
+       if ((pg_start + mem->page_count) > num_entries) {
+               return -EINVAL;
+       }
+       j = pg_start;
+
+       while (j < (pg_start + mem->page_count)) {
+               if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
+                       return -EBUSY;
+               }
+               j++;
+       }
+
+       if (mem->is_flushed == FALSE) {
+               CACHE_FLUSH();
+               mem->is_flushed = TRUE;
+       }
+       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+               agp_bridge.gatt_table[j] = mem->memory[i];
+       }
+
+       agp_bridge.tlb_flush(mem);
+       return 0;
+}
+
+static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start,
+                                    int type)
+{
+       int i;
+
+       if (type != 0 || mem->type != 0) {
+               /* The generic routines know nothing of memory types */
+               return -EINVAL;
+       }
+       for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+               agp_bridge.gatt_table[i] =
+                   (unsigned long) agp_bridge.scratch_page;
+       }
+
+       agp_bridge.tlb_flush(mem);
+       return 0;
+}
+
+static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
+{
+       return NULL;
+}
+
+static void agp_generic_free_by_type(agp_memory * curr)
+{
+       if (curr->memory != NULL) {
+               vfree(curr->memory);
+       }
+       agp_free_key(curr->key);
+       kfree(curr);
+}
+
+void agp_enable(u32 mode)
+{
+       agp_bridge.agp_enable(mode);
+}
+
+/* End - Generic Agp routines */
+
+#ifdef CONFIG_AGP_I810
+static aper_size_info_fixed intel_i810_sizes[] =
+{
+       {64, 16384, 4},
+     /* The 32M mode still requires a 64k gatt */
+       {32, 8192, 4}
+};
+
+#define AGP_DCACHE_MEMORY 1
+
+static gatt_mask intel_i810_masks[] =
+{
+       {I810_PTE_VALID, 0},
+       {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY}
+};
+
+static struct _intel_i810_private {
+       struct pci_dev *i810_dev;       /* device one */
+       volatile u8 *registers;
+       int num_dcache_entries;
+} intel_i810_private;
+
+static int intel_i810_fetch_size(void)
+{
+       u32 smram_miscc;
+       aper_size_info_fixed *values;
+
+       pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc);
+       values = A_SIZE_FIX(agp_bridge.aperture_sizes);
+
+       if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+               printk("agpgart: i810 is disabled\n");
+               return 0;
+       }
+       if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+               agp_bridge.previous_size =
+                   agp_bridge.current_size = (void *) (values + 1);
+               agp_bridge.aperture_size_idx = 1;
+               return values[1].size;
+       } else {
+               agp_bridge.previous_size =
+                   agp_bridge.current_size = (void *) (values);
+               agp_bridge.aperture_size_idx = 0;
+               return values[0].size;
+       }
+
+       return 0;
+}
+
+static int intel_i810_configure(void)
+{
+       aper_size_info_fixed *current_size;
+       u32 temp;
+       int i;
+
+       current_size = A_SIZE_FIX(agp_bridge.current_size);
+
+       pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
+       temp &= 0xfff80000;
+
+       intel_i810_private.registers =
+           (volatile u8 *) ioremap(temp, 128 * 4096);
+
+       if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL)
+            & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+               /* This will need to be dynamically assigned */
+               printk(KERN_INFO
+                      "agpgart: detected 4MB dedicated video ram.\n");
+               intel_i810_private.num_dcache_entries = 1024;
+       }
+       pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
+       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+       OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
+                agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
+       CACHE_FLUSH();
+
+       if (agp_bridge.needs_scratch_page == TRUE) {
+               for (i = 0; i < current_size->num_entries; i++) {
+                       OUTREG32(intel_i810_private.registers,
+                                I810_PTE_BASE + (i * 4),
+                                agp_bridge.scratch_page);
+               }
+       }
+       return 0;
+}
+
+static void intel_i810_cleanup(void)
+{
+       OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
+       iounmap((void *) intel_i810_private.registers);
+}
+
+static void intel_i810_tlbflush(agp_memory * mem)
+{
+       return;
+}
+
+static void intel_i810_agp_enable(u32 mode)
+{
+       return;
+}
+
+static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
+                                    int type)
+{
+       int i, j, num_entries;
+       void *temp;
+
+       temp = agp_bridge.current_size;
+       num_entries = A_SIZE_FIX(temp)->num_entries;
+
+       if ((pg_start + mem->page_count) > num_entries) {
+               return -EINVAL;
+       }
+       for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+               if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
+                       return -EBUSY;
+               }
+       }
+
+       if (type != 0 || mem->type != 0) {
+               if ((type == AGP_DCACHE_MEMORY) &&
+                   (mem->type == AGP_DCACHE_MEMORY)) {
+                       /* special insert */
+
+                       for (i = pg_start;
+                            i < (pg_start + mem->page_count); i++) {
+                               OUTREG32(intel_i810_private.registers,
+                                        I810_PTE_BASE + (i * 4),
+                                        (i * 4096) | I810_PTE_LOCAL |
+                                        I810_PTE_VALID);
+                       }
+
+                       agp_bridge.tlb_flush(mem);
+                       return 0;
+               }
+               return -EINVAL;
+       }
+       if (mem->is_flushed == FALSE) {
+               CACHE_FLUSH();
+               mem->is_flushed = TRUE;
+       }
+       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+               OUTREG32(intel_i810_private.registers,
+                        I810_PTE_BASE + (j * 4), mem->memory[i]);
+       }
+
+       agp_bridge.tlb_flush(mem);
+       return 0;
+}
+
+static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
+                                    int type)
+{
+       int i;
+
+       for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+               OUTREG32(intel_i810_private.registers,
+                        I810_PTE_BASE + (i * 4),
+                        agp_bridge.scratch_page);
+       }
+
+       agp_bridge.tlb_flush(mem);
+       return 0;
+}
+
+static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
+{
+       agp_memory *new;
+
+       if (type == AGP_DCACHE_MEMORY) {
+               if (pg_count != intel_i810_private.num_dcache_entries) {
+                       return NULL;
+               }
+               new = agp_create_memory(1);
+
+               if (new == NULL) {
+                       return NULL;
+               }
+               new->type = AGP_DCACHE_MEMORY;
+               new->page_count = pg_count;
+               new->num_scratch_pages = 0;
+               vfree(new->memory);
+               return new;
+       }
+       return NULL;
+}
+
+static void intel_i810_free_by_type(agp_memory * curr)
+{
+       agp_free_key(curr->key);
+       kfree(curr);
+}
+
+static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
+{
+       /* Type checking must be done elsewhere */
+       return addr | agp_bridge.masks[type].mask;
+}
+
+static void intel_i810_setup(struct pci_dev *i810_dev)
+{
+       intel_i810_private.i810_dev = i810_dev;
+
+       agp_bridge.masks = intel_i810_masks;
+       agp_bridge.num_of_masks = 2;
+       agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
+       agp_bridge.size_type = FIXED_APER_SIZE;
+       agp_bridge.num_aperture_sizes = 2;
+       agp_bridge.dev_private_data = (void *) &intel_i810_private;
+       agp_bridge.needs_scratch_page = TRUE;
+       agp_bridge.configure = intel_i810_configure;
+       agp_bridge.fetch_size = intel_i810_fetch_size;
+       agp_bridge.cleanup = intel_i810_cleanup;
+       agp_bridge.tlb_flush = intel_i810_tlbflush;
+       agp_bridge.mask_memory = intel_i810_mask_memory;
+       agp_bridge.agp_enable = intel_i810_agp_enable;
+       agp_bridge.cache_flush = global_cache_flush;
+       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+       agp_bridge.insert_memory = intel_i810_insert_entries;
+       agp_bridge.remove_memory = intel_i810_remove_entries;
+       agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
+       agp_bridge.free_by_type = intel_i810_free_by_type;
+}
+
+#endif
+
+#ifdef CONFIG_AGP_INTEL
+
+static int intel_fetch_size(void)
+{
+       int i;
+       u16 temp;
+       aper_size_info_16 *values;
+
+       pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
+       values = A_SIZE_16(agp_bridge.aperture_sizes);
+
+       for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+               if (temp == values[i].size_value) {
+                       agp_bridge.previous_size =
+                           agp_bridge.current_size = (void *) (values + i);
+                       agp_bridge.aperture_size_idx = i;
+                       return values[i].size;
+               }
+       }
+
+       return 0;
+}
+
+static void intel_tlbflush(agp_memory * mem)
+{
+       pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
+       pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
+}
+
+static void intel_cleanup(void)
+{
+       u16 temp;
+       aper_size_info_16 *previous_size;
+
+       previous_size = A_SIZE_16(agp_bridge.previous_size);
+       pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
+       pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
+       pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
+                             previous_size->size_value);
+}
+
+static int intel_configure(void)
+{
+       u32 temp;
+       u16 temp2;
+       aper_size_info_16 *current_size;
+
+       current_size = A_SIZE_16(agp_bridge.current_size);
+
+       /* aperture size */
+       pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
+                             current_size->size_value);
+
+       /* address to map to */
+       pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+       /* attbase - aperture base */
+       pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+                              agp_bridge.gatt_bus_addr);
+
+       /* agpctrl */
+       pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
+
+       /* paccfg/nbxcfg */
+       pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
+       pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
+                             (temp2 & ~(1 << 10)) | (1 << 9));
+       /* clear any possible error conditions */
+       pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
+       return 0;
+}
+
+static unsigned long intel_mask_memory(unsigned long addr, int type)
+{
+       /* Memory type is ignored */
+
+       return addr | agp_bridge.masks[0].mask;
+}
+
+
+/* Setup function */
+static gatt_mask intel_generic_masks[] =
+{
+       {0x00000017, 0}
+};
+
+static aper_size_info_16 intel_generic_sizes[7] =
+{
+       {256, 65536, 6, 0},
+       {128, 32768, 5, 32},
+       {64, 16384, 4, 48},
+       {32, 8192, 3, 56},
+       {16, 4096, 2, 60},
+       {8, 2048, 1, 62},
+       {4, 1024, 0, 63}
+};
+
+static void intel_generic_setup(void)
+{
+       agp_bridge.masks = intel_generic_masks;
+       agp_bridge.num_of_masks = 1;
+       agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
+       agp_bridge.size_type = U16_APER_SIZE;
+       agp_bridge.num_aperture_sizes = 7;
+       agp_bridge.dev_private_data = NULL;
+       agp_bridge.needs_scratch_page = FALSE;
+       agp_bridge.configure = intel_configure;
+       agp_bridge.fetch_size = intel_fetch_size;
+       agp_bridge.cleanup = intel_cleanup;
+       agp_bridge.tlb_flush = intel_tlbflush;
+       agp_bridge.mask_memory = intel_mask_memory;
+       agp_bridge.agp_enable = agp_generic_agp_enable;
+       agp_bridge.cache_flush = global_cache_flush;
+       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+       agp_bridge.insert_memory = agp_generic_insert_memory;
+       agp_bridge.remove_memory = agp_generic_remove_memory;
+       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+       agp_bridge.free_by_type = agp_generic_free_by_type;
+}
+
+#endif
+
+#ifdef CONFIG_AGP_VIA
+
+static int via_fetch_size(void)
+{
+       int i;
+       u8 temp;
+       aper_size_info_8 *values;
+
+       values = A_SIZE_8(agp_bridge.aperture_sizes);
+       pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp);
+       for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+               if (temp == values[i].size_value) {
+                       agp_bridge.previous_size =
+                           agp_bridge.current_size = (void *) (values + i);
+                       agp_bridge.aperture_size_idx = i;
+                       return values[i].size;
+               }
+       }
+
+       return 0;
+}
+
+static int via_configure(void)
+{
+       u32 temp;
+       aper_size_info_8 *current_size;
+
+       current_size = A_SIZE_8(agp_bridge.current_size);
+       /* aperture size */
+       pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
+                             current_size->size_value);
+       /* address to map too */
+       pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp);
+       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+       /* GART control register */
+       pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
+
+       /* attbase - aperture GATT base */
+       pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE,
+                           (agp_bridge.gatt_bus_addr & 0xfffff000) | 3);
+       return 0;
+}
+
+static void via_cleanup(void)
+{
+       aper_size_info_8 *previous_size;
+
+       previous_size = A_SIZE_8(agp_bridge.previous_size);
+       pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0);
+       pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
+                             previous_size->size_value);
+}
+
+static void via_tlbflush(agp_memory * mem)
+{
+       pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f);
+       pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
+}
+
+static unsigned long via_mask_memory(unsigned long addr, int type)
+{
+       /* Memory type is ignored */
+
+       return addr | agp_bridge.masks[0].mask;
+}
+
+static aper_size_info_8 via_generic_sizes[7] =
+{
+       {256, 65536, 6, 0},
+       {128, 32768, 5, 128},
+       {64, 16384, 4, 192},
+       {32, 8192, 3, 224},
+       {16, 4096, 2, 240},
+       {8, 2048, 1, 248},
+       {4, 1024, 0, 252}
+};
+
+static gatt_mask via_generic_masks[] =
+{
+       {0x00000000, 0}
+};
+
+static void via_generic_setup(void)
+{
+       agp_bridge.masks = via_generic_masks;
+       agp_bridge.num_of_masks = 1;
+       agp_bridge.aperture_sizes = (void *) via_generic_sizes;
+       agp_bridge.size_type = U8_APER_SIZE;
+       agp_bridge.num_aperture_sizes = 7;
+       agp_bridge.dev_private_data = NULL;
+       agp_bridge.needs_scratch_page = FALSE;
+       agp_bridge.configure = via_configure;
+       agp_bridge.fetch_size = via_fetch_size;
+       agp_bridge.cleanup = via_cleanup;
+       agp_bridge.tlb_flush = via_tlbflush;
+       agp_bridge.mask_memory = via_mask_memory;
+       agp_bridge.agp_enable = agp_generic_agp_enable;
+       agp_bridge.cache_flush = global_cache_flush;
+       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+       agp_bridge.insert_memory = agp_generic_insert_memory;
+       agp_bridge.remove_memory = agp_generic_remove_memory;
+       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+       agp_bridge.free_by_type = agp_generic_free_by_type;
+}
+
+#endif
+
+#ifdef CONFIG_AGP_SIS
+
+static int sis_fetch_size(void)
+{
+       u8 temp_size;
+       int i;
+       aper_size_info_8 *values;
+
+       pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size);
+       values = A_SIZE_8(agp_bridge.aperture_sizes);
+       for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+               if ((temp_size == values[i].size_value) ||
+                   ((temp_size & ~(0x03)) ==
+                    (values[i].size_value & ~(0x03)))) {
+                       agp_bridge.previous_size =
+                           agp_bridge.current_size = (void *) (values + i);
+
+                       agp_bridge.aperture_size_idx = i;
+                       return values[i].size;
+               }
+       }
+
+       return 0;
+}
+
+
+static void sis_tlbflush(agp_memory * mem)
+{
+       pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02);
+}
+
+static int sis_configure(void)
+{
+       u32 temp;
+       aper_size_info_8 *current_size;
+
+       current_size = A_SIZE_8(agp_bridge.current_size);
+       pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05);
+       pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp);
+       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+       pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE,
+                              agp_bridge.gatt_bus_addr);
+       pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
+                             current_size->size_value);
+       return 0;
+}
+
+static void sis_cleanup(void)
+{
+       aper_size_info_8 *previous_size;
+
+       previous_size = A_SIZE_8(agp_bridge.previous_size);
+       pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
+                             (previous_size->size_value & ~(0x03)));
+}
+
+static unsigned long sis_mask_memory(unsigned long addr, int type)
+{
+       /* Memory type is ignored */
+
+       return addr | agp_bridge.masks[0].mask;
+}
+
+static aper_size_info_8 sis_generic_sizes[7] =
+{
+       {256, 65536, 6, 99},
+       {128, 32768, 5, 83},
+       {64, 16384, 4, 67},
+       {32, 8192, 3, 51},
+       {16, 4096, 2, 35},
+       {8, 2048, 1, 19},
+       {4, 1024, 0, 3}
+};
+
+static gatt_mask sis_generic_masks[] =
+{
+       {0x00000000, 0}
+};
+
+static void sis_generic_setup(void)
+{
+       agp_bridge.masks = sis_generic_masks;
+       agp_bridge.num_of_masks = 1;
+       agp_bridge.aperture_sizes = (void *) sis_generic_sizes;
+       agp_bridge.size_type = U8_APER_SIZE;
+       agp_bridge.num_aperture_sizes = 7;
+       agp_bridge.dev_private_data = NULL;
+       agp_bridge.needs_scratch_page = FALSE;
+       agp_bridge.configure = sis_configure;
+       agp_bridge.fetch_size = sis_fetch_size;
+       agp_bridge.cleanup = sis_cleanup;
+       agp_bridge.tlb_flush = sis_tlbflush;
+       agp_bridge.mask_memory = sis_mask_memory;
+       agp_bridge.agp_enable = agp_generic_agp_enable;
+       agp_bridge.cache_flush = global_cache_flush;
+       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+       agp_bridge.insert_memory = agp_generic_insert_memory;
+       agp_bridge.remove_memory = agp_generic_remove_memory;
+       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+       agp_bridge.free_by_type = agp_generic_free_by_type;
+}
+
+#endif
+
+#ifdef CONFIG_AGP_AMD
+
+static struct _amd_irongate_private {
+       volatile u8 *registers;
+} amd_irongate_private;
+
+static int amd_irongate_fetch_size(void)
+{
+       int i;
+       u32 temp;
+       aper_size_info_32 *values;
+
+       pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
+       temp = (temp & 0x0000000e);
+       values = A_SIZE_32(agp_bridge.aperture_sizes);
+       for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+               if (temp == values[i].size_value) {
+                       agp_bridge.previous_size =
+                           agp_bridge.current_size = (void *) (values + i);
+
+                       agp_bridge.aperture_size_idx = i;
+                       return values[i].size;
+               }
+       }
+
+       return 0;
+}
+
+static int amd_irongate_configure(void)
+{
+       aper_size_info_32 *current_size;
+       u32 temp;
+       u16 enable_reg;
+
+       current_size = A_SIZE_32(agp_bridge.current_size);
+
+       /* Get the memory mapped registers */
+       pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp);
+       temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+       amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096);
+
+       /* Write out the address of the gatt table */
+       OUTREG32(amd_irongate_private.registers, AMD_ATTBASE,
+                agp_bridge.gatt_bus_addr);
+
+       /* Write the Sync register */
+       pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80);
+
+       /* Write the enable register */
+       enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
+       enable_reg = (enable_reg | 0x0004);
+       OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
+
+       /* Write out the size register */
+       pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
+       temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+               | 0x00000001);
+       pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
+
+       /* Flush the tlb */
+       OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
+
+       /* Get the address for the gart region */
+       pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
+       temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+       agp_bridge.gart_bus_addr = temp;
+       return 0;
+}
+
+static void amd_irongate_cleanup(void)
+{
+       aper_size_info_32 *previous_size;
+       u32 temp;
+       u16 enable_reg;
+
+       previous_size = A_SIZE_32(agp_bridge.previous_size);
+
+       enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
+       enable_reg = (enable_reg & ~(0x0004));
+       OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
+
+       /* Write back the previous size and disable gart translation */
+       pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
+       temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+       pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
+       iounmap((void *) amd_irongate_private.registers);
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually.  However
+ * currently it just flushes the whole table.  Which is probably
+ * more efficent, since agp_memory blocks can be a large number of
+ * entries.
+ */
+
+static void amd_irongate_tlbflush(agp_memory * temp)
+{
+       OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
+}
+
+static unsigned long amd_irongate_mask_memory(unsigned long addr, int type)
+{
+       /* Only type 0 is supported by the irongate */
+
+       return addr | agp_bridge.masks[0].mask;
+}
+
+static aper_size_info_32 amd_irongate_sizes[7] =
+{
+       {2048, 524288, 9, 0x0000000c},
+       {1024, 262144, 8, 0x0000000a},
+       {512, 131072, 7, 0x00000008},
+       {256, 65536, 6, 0x00000006},
+       {128, 32768, 5, 0x00000004},
+       {64, 16384, 4, 0x00000002},
+       {32, 8192, 3, 0x00000000}
+};
+
+static gatt_mask amd_irongate_masks[] =
+{
+       {0x00000001, 0}
+};
+
+static void amd_irongate_setup(void)
+{
+       agp_bridge.masks = amd_irongate_masks;
+       agp_bridge.num_of_masks = 1;
+       agp_bridge.aperture_sizes = (void *) amd_irongate_sizes;
+       agp_bridge.size_type = U32_APER_SIZE;
+       agp_bridge.num_aperture_sizes = 7;
+       agp_bridge.dev_private_data = (void *) &amd_irongate_private;
+       agp_bridge.needs_scratch_page = FALSE;
+       agp_bridge.configure = amd_irongate_configure;
+       agp_bridge.fetch_size = amd_irongate_fetch_size;
+       agp_bridge.cleanup = amd_irongate_cleanup;
+       agp_bridge.tlb_flush = amd_irongate_tlbflush;
+       agp_bridge.mask_memory = amd_irongate_mask_memory;
+       agp_bridge.agp_enable = agp_generic_agp_enable;
+       agp_bridge.cache_flush = global_cache_flush;
+       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+       agp_bridge.insert_memory = agp_generic_insert_memory;
+       agp_bridge.remove_memory = agp_generic_remove_memory;
+       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+       agp_bridge.free_by_type = agp_generic_free_by_type;
+}
+
+#endif
+
+#ifdef CONFIG_AGP_ALI
+
+static int ali_fetch_size(void)
+{
+       int i;
+       u32 temp;
+       aper_size_info_32 *values;
+
+       pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
+       temp &= ~(0xfffffff0);
+       values = A_SIZE_32(agp_bridge.aperture_sizes);
+
+       for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+               if (temp == values[i].size_value) {
+                       agp_bridge.previous_size =
+                           agp_bridge.current_size = (void *) (values + i);
+                       agp_bridge.aperture_size_idx = i;
+                       return values[i].size;
+               }
+       }
+
+       return 0;
+}
+
+static void ali_tlbflush(agp_memory * mem)
+{
+       u32 temp;
+
+       pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+       pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
+                              ((temp & 0xffffff00) | 0x00000090));
+       pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
+                              ((temp & 0xffffff00) | 0x00000010));
+}
+
+static void ali_cleanup(void)
+{
+       aper_size_info_32 *previous_size;
+       u32 temp;
+
+       previous_size = A_SIZE_32(agp_bridge.previous_size);
+
+       pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+       pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
+                              ((temp & 0xffffff00) | 0x00000090));
+       pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
+                              previous_size->size_value);
+}
+
+static int ali_configure(void)
+{
+       u32 temp;
+       aper_size_info_32 *current_size;
+
+       current_size = A_SIZE_32(agp_bridge.current_size);
+
+       /* aperture size and gatt addr */
+       pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
+                   agp_bridge.gatt_bus_addr | current_size->size_value);
+
+       /* tlb control */
+       pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+       pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
+                              ((temp & 0xffffff00) | 0x00000010));
+
+       /* address to map to */
+       pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
+       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+       return 0;
+}
+
+static unsigned long ali_mask_memory(unsigned long addr, int type)
+{
+       /* Memory type is ignored */
+
+       return addr | agp_bridge.masks[0].mask;
+}
+
+
+/* Setup function */
+static gatt_mask ali_generic_masks[] =
+{
+       {0x00000000, 0}
+};
+
+static aper_size_info_32 ali_generic_sizes[7] =
+{
+       {256, 65536, 6, 10},
+       {128, 32768, 5, 9},
+       {64, 16384, 4, 8},
+       {32, 8192, 3, 7},
+       {16, 4096, 2, 6},
+       {8, 2048, 1, 4},
+       {4, 1024, 0, 3}
+};
+
+static void ali_generic_setup(void)
+{
+       agp_bridge.masks = ali_generic_masks;
+       agp_bridge.num_of_masks = 1;
+       agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
+       agp_bridge.size_type = U32_APER_SIZE;
+       agp_bridge.num_aperture_sizes = 7;
+       agp_bridge.dev_private_data = NULL;
+       agp_bridge.needs_scratch_page = FALSE;
+       agp_bridge.configure = ali_configure;
+       agp_bridge.fetch_size = ali_fetch_size;
+       agp_bridge.cleanup = ali_cleanup;
+       agp_bridge.tlb_flush = ali_tlbflush;
+       agp_bridge.mask_memory = ali_mask_memory;
+       agp_bridge.agp_enable = agp_generic_agp_enable;
+       agp_bridge.cache_flush = global_cache_flush;
+       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+       agp_bridge.insert_memory = agp_generic_insert_memory;
+       agp_bridge.remove_memory = agp_generic_remove_memory;
+       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+       agp_bridge.free_by_type = agp_generic_free_by_type;
+}
+
+#endif
+
+
+
+/* Supported Device Scanning routine */
+
+static void agp_find_supported_device(void)
+{
+       struct pci_dev *dev = NULL;
+       u8 cap_ptr = 0x00;
+       u32 cap_id, scratch;
+
+       if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) {
+               agp_bridge.type = NOT_SUPPORTED;
+               return;
+       }
+       agp_bridge.dev = dev;
+
+       /* Need to test for I810 here */
+#ifdef CONFIG_AGP_I810
+       if (dev->vendor == PCI_VENDOR_ID_INTEL) {
+               struct pci_dev *i810_dev;
+
+               switch (dev->device) {
+               case PCI_DEVICE_ID_INTEL_810_0:
+                       i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+                                              PCI_DEVICE_ID_INTEL_810_1,
+                                                  NULL);
+                       if (i810_dev == NULL) {
+                               printk("agpgart: Detected an Intel i810,"
+                                      " but could not find the secondary"
+                                      " device.\n");
+                               agp_bridge.type = NOT_SUPPORTED;
+                               return;
+                       }
+                       printk(KERN_INFO "agpgart: Detected an Intel "
+                              "i810 Chipset.\n");
+                       agp_bridge.type = INTEL_I810;
+                       agp_bridge.intel_i810_setup(i810_dev);
+                       return;
+
+               case PCI_DEVICE_ID_INTEL_810_DC100_0:
+                       i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+                                        PCI_DEVICE_ID_INTEL_810_DC100_1,
+                                                  NULL);
+                       if (i810_dev == NULL) {
+                               printk("agpgart: Detected an Intel i810 "
+                                      "DC100, but could not find the "
+                                      "secondary device.\n");
+                               agp_bridge.type = NOT_SUPPORTED;
+                               return;
+                       }
+                       printk(KERN_INFO "agpgart: Detected an Intel i810 "
+                              "DC100 Chipset.\n");
+                       agp_bridge.type = INTEL_I810;
+                       agp_bridge.intel_i810_setup(i810_dev);
+                       return;
+
+               case PCI_DEVICE_ID_INTEL_810_E_0:
+                       i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+                                            PCI_DEVICE_ID_INTEL_810_E_1,
+                                                  NULL);
+                       if (i810_dev == NULL) {
+                               printk("agpgart: Detected an Intel i810 E"
+                                   ", but could not find the secondary "
+                                      "device.\n");
+                               agp_bridge.type = NOT_SUPPORTED;
+                               return;
+                       }
+                       printk(KERN_INFO "agpgart: Detected an Intel i810 E "
+                              "Chipset.\n");
+                       agp_bridge.type = INTEL_I810;
+                       agp_bridge.intel_i810_setup(i810_dev);
+                       return;
+               default:
+                       break;
+               }
+       }
+#endif
+       /* find capndx */
+       pci_read_config_dword(dev, 0x04, &scratch);
+
+       if (!(scratch & 0x00100000)) {
+               agp_bridge.type = NOT_SUPPORTED;
+               return;
+       }
+       pci_read_config_byte(dev, 0x34, &cap_ptr);
+
+       if (cap_ptr != 0x00) {
+               do {
+                       pci_read_config_dword(dev, cap_ptr, &cap_id);
+
+                       if ((cap_id & 0xff) != 0x02)
+                               cap_ptr = (cap_id >> 8) & 0xff;
+               }
+               while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+       }
+       if (cap_ptr == 0x00) {
+               agp_bridge.type = NOT_SUPPORTED;
+               return;
+       }
+       agp_bridge.capndx = cap_ptr;
+
+       /* Fill in the mode register */
+       pci_read_config_dword(agp_bridge.dev,
+                             agp_bridge.capndx + 4,
+                             &agp_bridge.mode);
+
+       switch (dev->vendor) {
+#ifdef CONFIG_AGP_INTEL
+       case PCI_VENDOR_ID_INTEL:
+               switch (dev->device) {
+               case PCI_DEVICE_ID_INTEL_82443LX_0:
+                       agp_bridge.type = INTEL_LX;
+                       printk(KERN_INFO "agpgart: Detected an Intel 440LX"
+                              " Chipset.\n");
+                       agp_bridge.intel_generic_setup();
+                       return;
+
+               case PCI_DEVICE_ID_INTEL_82443BX_0:
+                       agp_bridge.type = INTEL_BX;
+                       printk(KERN_INFO "agpgart: Detected an Intel 440BX "
+                              "Chipset.\n");
+                       agp_bridge.intel_generic_setup();
+                       return;
+
+               case PCI_DEVICE_ID_INTEL_82443GX_0:
+                       agp_bridge.type = INTEL_GX;
+                       printk(KERN_INFO "agpgart: Detected an Intel 440GX "
+                              "Chipset.\n");
+                       agp_bridge.intel_generic_setup();
+                       return;
+
+               default:
+                       if (agp_try_unsupported != 0) {
+                               printk("agpgart: Trying generic intel "
+                                      "routines for device id: %x\n",
+                                      dev->device);
+                               agp_bridge.type = INTEL_GENERIC;
+                               agp_bridge.intel_generic_setup();
+                               return;
+                       } else {
+                               printk("agpgart: Unsupported intel chipset,"
+                                      " you might want to try "
+                                      "agp_try_unsupported=1.\n");
+                               agp_bridge.type = NOT_SUPPORTED;
+                               return;
+                       }
+               }
+               break;
+#endif
+
+#ifdef CONFIG_AGP_VIA
+       case PCI_VENDOR_ID_VIA:
+               switch (dev->device) {
+               case PCI_DEVICE_ID_VIA_82C597_0:
+                       agp_bridge.type = VIA_VP3;
+                       printk(KERN_INFO "agpgart: Detected a VIA VP3 "
+                              "Chipset.\n");
+                       agp_bridge.via_generic_setup();
+                       return;
+
+               case PCI_DEVICE_ID_VIA_82C598_0:
+                       agp_bridge.type = VIA_MVP3;
+                       printk(KERN_INFO "agpgart: Detected a VIA MVP3 "
+                              "Chipset.\n");
+                       agp_bridge.via_generic_setup();
+                       return;
+
+               case PCI_DEVICE_ID_VIA_82C691_0:
+                       agp_bridge.type = VIA_APOLLO_PRO;
+                       printk(KERN_INFO "agpgart: Detected a VIA Apollo "
+                              "Pro Chipset.\n");
+                       agp_bridge.via_generic_setup();
+                       return;
+
+               default:
+                       if (agp_try_unsupported != 0) {
+                               printk("agpgart: Trying generic VIA routines"
+                                   " for device id: %x\n", dev->device);
+                               agp_bridge.type = VIA_GENERIC;
+                               agp_bridge.via_generic_setup();
+                               return;
+                       } else {
+                               printk("agpgart: Unsupported VIA chipset,"
+                                      " you might want to try "
+                                      "agp_try_unsupported=1.\n");
+                               agp_bridge.type = NOT_SUPPORTED;
+                               return;
+                       }
+               }
+               break;
+#endif
+
+#ifdef CONFIG_AGP_SIS
+       case PCI_VENDOR_ID_SI:
+               switch (dev->device) {
+                       /* ToDo need to find out the
+                        * specific devices supported.
+                        */
+               default:
+                       if (agp_try_unsupported != 0) {
+                               printk("agpgart: Trying generic SiS routines"
+                                   " for device id: %x\n", dev->device);
+                               agp_bridge.type = SIS_GENERIC;
+                               agp_bridge.sis_generic_setup();
+                               return;
+                       } else {
+                               printk("agpgart: Unsupported SiS chipset, "
+                                      "you might want to try "
+                                      "agp_try_unsupported=1.\n");
+                               agp_bridge.type = NOT_SUPPORTED;
+                               return;
+                       }
+               }
+               break;
+#endif
+
+#ifdef CONFIG_AGP_AMD
+       case PCI_VENDOR_ID_AMD:
+               switch (dev->device) {
+               case PCI_DEVICE_ID_AMD_IRONGATE_0:
+                       agp_bridge.type = AMD_IRONGATE;
+                       printk(KERN_INFO "agpgart: Detected an AMD Irongate"
+                              " Chipset.\n");
+                       agp_bridge.amd_irongate_setup();
+                       return;
+
+               default:
+                       if (agp_try_unsupported != 0) {
+                               printk("agpgart: Trying Amd irongate"
+                                      " routines for device id: %x\n",
+                                      dev->device);
+                               agp_bridge.type = AMD_GENERIC;
+                               agp_bridge.amd_irongate_setup();
+                               return;
+                       } else {
+                               printk("agpgart: Unsupported Amd chipset,"
+                                      " you might want to try "
+                                      "agp_try_unsupported=1.\n");
+                               agp_bridge.type = NOT_SUPPORTED;
+                               return;
+                       }
+               }
+               break;
+#endif
+
+#ifdef CONFIG_AGP_ALI
+       case PCI_VENDOR_ID_AL:
+               switch (dev->device) {
+               case PCI_DEVICE_ID_AL_M1541_0:
+                       agp_bridge.type = ALI_M1541;
+                       printk(KERN_INFO "agpgart: Detected an ALi M1541"
+                              " Chipset\n");
+                       agp_bridge.ali_generic_setup();
+                       return;
+               default:
+                       if (agp_try_unsupported != 0) {
+                               printk("agpgart: Trying ALi generic routines"
+                                   " for device id: %x\n", dev->device);
+                               agp_bridge.type = ALI_GENERIC;
+                               agp_bridge.ali_generic_setup();
+                               return;
+                       } else {
+                               printk("agpgart: Unsupported ALi chipset,"
+                                      " you might want to type "
+                                      "agp_try_unsupported=1.\n");
+                               agp_bridge.type = NOT_SUPPORTED;
+                               return;
+                       }
+               }
+               break;
+#endif
+       default:
+               agp_bridge.type = NOT_SUPPORTED;
+               return;
+       }
+}
+
+struct agp_max_table {
+       int mem;
+       int agp;
+};
+
+static struct agp_max_table maxes_table[9] =
+{
+       {0, 0},
+       {32, 4},
+       {64, 28},
+       {128, 96},
+       {256, 204},
+       {512, 440},
+       {1024, 942},
+       {2048, 1920},
+       {4096, 3932}
+};
+
+static int agp_find_max(void)
+{
+       int memory;
+       float t;
+       int index;
+       int result;
+
+       memory = virt_to_phys(high_memory) / 0x100000;
+       index = 0;
+
+       while ((memory > maxes_table[index].mem) &&
+              (index < 8)) {
+               index++;
+       }
+
+       t = (memory - maxes_table[index - 1].mem) /
+           (maxes_table[index].mem - maxes_table[index - 1].mem);
+
+       result = maxes_table[index - 1].agp +
+           (t * (maxes_table[index].agp - maxes_table[index - 1].agp));
+
+       printk(KERN_INFO "agpgart: Maximum main memory to use "
+              "for agp memory: %dM\n", result);
+       result = (result * 0x100000) / 4096;
+       return result;
+}
+
+#define AGPGART_VERSION_MAJOR 0
+#define AGPGART_VERSION_MINOR 99
+
+static agp_version agp_current_version =
+{
+       AGPGART_VERSION_MAJOR,
+       AGPGART_VERSION_MINOR
+};
+
+static int agp_backend_initialize(void)
+{
+       int size_value;
+
+       memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
+       agp_bridge.type = NOT_SUPPORTED;
+#ifdef CONFIG_AGP_INTEL
+       agp_bridge.intel_generic_setup = intel_generic_setup;
+#endif
+#ifdef CONFIG_AGP_I810
+       agp_bridge.intel_i810_setup = intel_i810_setup;
+#endif
+#ifdef CONFIG_AGP_VIA
+       agp_bridge.via_generic_setup = via_generic_setup;
+#endif
+#ifdef CONFIG_AGP_SIS
+       agp_bridge.sis_generic_setup = sis_generic_setup;
+#endif
+#ifdef CONFIG_AGP_AMD
+       agp_bridge.amd_irongate_setup = amd_irongate_setup;
+#endif
+#ifdef CONFIG_AGP_ALI
+       agp_bridge.ali_generic_setup = ali_generic_setup;
+#endif
+       agp_bridge.max_memory_agp = agp_find_max();
+       agp_bridge.version = &agp_current_version;
+       agp_find_supported_device();
+
+       if (agp_bridge.needs_scratch_page == TRUE) {
+               agp_bridge.scratch_page = agp_alloc_page();
+
+               if (agp_bridge.scratch_page == 0) {
+                       printk("agpgart: unable to get memory for "
+                              "scratch page.\n");
+                       return -ENOMEM;
+               }
+               agp_bridge.scratch_page =
+                   virt_to_phys((void *) agp_bridge.scratch_page);
+               agp_bridge.scratch_page =
+                   agp_bridge.mask_memory(agp_bridge.scratch_page, 0);
+       }
+       if (agp_bridge.type == NOT_SUPPORTED) {
+               printk("agpgart: no supported devices found.\n");
+               return -EINVAL;
+       }
+       size_value = agp_bridge.fetch_size();
+
+       if (size_value == 0) {
+               printk("agpgart: unable to detrimine aperture size.\n");
+               return -EINVAL;
+       }
+       if (agp_bridge.create_gatt_table()) {
+               printk("agpgart: unable to get memory for graphics "
+                      "translation table.\n");
+               return -ENOMEM;
+       }
+       agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
+
+       if (agp_bridge.key_list == NULL) {
+               printk("agpgart: error allocating memory for key lists.\n");
+               agp_bridge.free_gatt_table();
+               return -ENOMEM;
+       }
+       memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
+
+       if (agp_bridge.configure()) {
+               printk("agpgart: error configuring host chipset.\n");
+               agp_bridge.free_gatt_table();
+               vfree(agp_bridge.key_list);
+               return -EINVAL;
+       }
+       printk(KERN_INFO "agpgart: Physical address of the agp aperture:"
+              " 0x%lx\n", agp_bridge.gart_bus_addr);
+       printk(KERN_INFO "agpgart: Agp aperture is %dM in size.\n",
+              size_value);
+       return 0;
+}
+
+static void agp_backend_cleanup(void)
+{
+       agp_bridge.cleanup();
+       agp_bridge.free_gatt_table();
+       vfree(agp_bridge.key_list);
+
+       if (agp_bridge.needs_scratch_page == TRUE) {
+               agp_bridge.scratch_page &= ~(0x00000fff);
+               agp_destroy_page((unsigned long)
+                                phys_to_virt(agp_bridge.scratch_page));
+       }
+}
+
+extern int agp_frontend_initialize(void);
+extern void agp_frontend_cleanup(void);
+
+static int __init agp_init(void)
+{
+       int ret_val;
+
+       printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
+              AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
+       ret_val = agp_backend_initialize();
+
+       if (ret_val != 0) {
+               return ret_val;
+       }
+       ret_val = agp_frontend_initialize();
+
+       if (ret_val != 0) {
+               agp_backend_cleanup();
+               return ret_val;
+       }
+       return 0;
+}
+
+static void __exit agp_cleanup(void)
+{
+       agp_frontend_cleanup();
+       agp_backend_cleanup();
+}
+
+module_init(agp_init);
+module_exit(agp_cleanup);
index 97d40b19474b8418a8317c960aaad1595875439a..54ad294e7dd92292b6104e6de5cbd6bdb64cfa1e 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * AGPGART module frontend version 0.99
  * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight
- * Copyright (C) 1999 Xi Graphics
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -25,7 +25,6 @@
  */
 
 #define __NO_VERSION__
-#include <linux/config.h>
 #include <linux/version.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
@@ -44,7 +43,6 @@
 #include <linux/agpgart.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
-#include <asm/system.h>
 #include <asm/io.h>
 #include <asm/page.h>
 #include <asm/mman.h>
@@ -187,7 +185,8 @@ static int agp_create_segment(agp_client * client, agp_region * region)
        agp_segment *user_seg;
        int i;
 
-       seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count), GFP_KERNEL);
+       seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count),
+                     GFP_KERNEL);
        if (seg == NULL) {
                kfree(region->seg_list);
                return -ENOMEM;
@@ -373,8 +372,8 @@ static void agp_remove_all_clients(agp_controller * controller)
                priv = agp_find_private(temp->pid);
 
                if (priv != NULL) {
-                       clear_bit(AGP_FF_IS_VALID, &(priv->access_flags));
-                       clear_bit(AGP_FF_IS_CLIENT, &(priv->access_flags));
+                       clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+                       clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
                }
                client = client->next;
                kfree(temp);
@@ -439,8 +438,8 @@ static void agp_controller_make_current(agp_controller * controller)
                priv = agp_find_private(clients->pid);
 
                if (priv != NULL) {
-                       set_bit(AGP_FF_IS_VALID, &(priv->access_flags));
-                       set_bit(AGP_FF_IS_CLIENT, &(priv->access_flags));
+                       set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+                       set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
                }
                clients = clients->next;
        }
@@ -453,7 +452,7 @@ static void agp_controller_release_current(agp_controller * controller,
 {
        agp_client *clients;
 
-       clear_bit(AGP_FF_IS_VALID, &(controller_priv->access_flags));
+       clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
        clients = controller->clients;
 
        while (clients != NULL) {
@@ -462,7 +461,7 @@ static void agp_controller_release_current(agp_controller * controller,
                priv = agp_find_private(clients->pid);
 
                if (priv != NULL) {
-                       clear_bit(AGP_FF_IS_VALID, &(priv->access_flags));
+                       clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
                }
                clients = clients->next;
        }
@@ -610,7 +609,7 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma)
                AGP_UNLOCK();
                return -EPERM;
        }
-       if (!(test_bit(AGP_FF_IS_VALID, &(priv->access_flags)))) {
+       if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) {
                AGP_UNLOCK();
                return -EPERM;
        }
@@ -620,7 +619,7 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma)
        current_size = current_size * 0x100000;
        offset = vma->vm_pgoff << PAGE_SHIFT;
 
-       if (test_bit(AGP_FF_IS_CLIENT, &(priv->access_flags))) {
+       if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
                if ((size + offset) > current_size) {
                        AGP_UNLOCK();
                        return -EINVAL;
@@ -631,11 +630,13 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma)
                        AGP_UNLOCK();
                        return -EPERM;
                }
-               if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot)) {
+               if (!agp_find_seg_in_client(client, offset,
+                                           size, vma->vm_page_prot)) {
                        AGP_UNLOCK();
                        return -EINVAL;
                }
-               if (remap_page_range(vma->vm_start, (kerninfo.aper_base + offset),
+               if (remap_page_range(vma->vm_start,
+                                    (kerninfo.aper_base + offset),
                                     size, vma->vm_page_prot)) {
                        AGP_UNLOCK();
                        return -EAGAIN;
@@ -643,7 +644,7 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma)
                AGP_UNLOCK();
                return 0;
        }
-       if (test_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags))) {
+       if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
                if (size != current_size) {
                        AGP_UNLOCK();
                        return -EINVAL;
@@ -666,19 +667,20 @@ static int agp_release(struct inode *inode, struct file *file)
 
        AGP_LOCK();
 
-       if (test_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags))) {
+       if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
                agp_controller *controller;
 
                controller = agp_find_controller_by_pid(priv->my_pid);
 
                if (controller != NULL) {
                        if (controller == agp_fe.current_controller) {
-                               agp_controller_release_current(controller, priv);
+                               agp_controller_release_current(controller,
+                                                              priv);
                        }
                        agp_remove_controller(controller);
                }
        }
-       if (test_bit(AGP_FF_IS_CLIENT, &(priv->access_flags))) {
+       if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
                agp_remove_client(priv->my_pid);
        }
        agp_remove_file_private(priv);
@@ -707,18 +709,18 @@ static int agp_open(struct inode *inode, struct file *file)
                return -ENOMEM;
        }
        memset(priv, 0, sizeof(agp_file_private));
-       set_bit(AGP_FF_ALLOW_CLIENT, &(priv->access_flags));
+       set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
        priv->my_pid = current->pid;
 
        if ((current->uid == 0) || (current->suid == 0)) {
                /* Root priv, can be controller */
-               set_bit(AGP_FF_ALLOW_CONTROLLER, &(priv->access_flags));
+               set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
        }
        client = agp_find_client_by_pid(current->pid);
 
        if (client != NULL) {
-               set_bit(AGP_FF_IS_CLIENT, &(priv->access_flags));
-               set_bit(AGP_FF_IS_VALID, &(priv->access_flags));
+               set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+               set_bit(AGP_FF_IS_VALID, &priv->access_flags);
        }
        file->private_data = (void *) priv;
        agp_insert_file_private(priv);
@@ -754,7 +756,8 @@ static int agpioc_info_wrap(agp_file_private * priv, unsigned long arg)
 
        userinfo.version.major = kerninfo.version.major;
        userinfo.version.minor = kerninfo.version.minor;
-       userinfo.bridge_id = kerninfo.device->vendor | (kerninfo.device->device << 16);
+       userinfo.bridge_id = kerninfo.device->vendor |
+           (kerninfo.device->device << 16);
        userinfo.agp_mode = kerninfo.mode;
        userinfo.aper_base = kerninfo.aper_base;
        userinfo.aper_size = kerninfo.aper_size;
@@ -770,7 +773,7 @@ static int agpioc_info_wrap(agp_file_private * priv, unsigned long arg)
 static int agpioc_acquire_wrap(agp_file_private * priv, unsigned long arg)
 {
        agp_controller *controller;
-       if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &(priv->access_flags)))) {
+       if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) {
                return -EPERM;
        }
        if (agp_fe.current_controller != NULL) {
@@ -798,8 +801,8 @@ static int agpioc_acquire_wrap(agp_file_private * priv, unsigned long arg)
                agp_controller_make_current(controller);
        }
 
-       set_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags));
-       set_bit(AGP_FF_IS_VALID, &(priv->access_flags));
+       set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
+       set_bit(AGP_FF_IS_VALID, &priv->access_flags);
        return 0;
 }
 
@@ -837,8 +840,10 @@ static int agpioc_reserve_wrap(agp_file_private * priv, unsigned long arg)
                client_priv = agp_find_private(reserve.pid);
 
                if (client_priv != NULL) {
-                       set_bit(AGP_FF_IS_CLIENT, &(client_priv->access_flags));
-                       set_bit(AGP_FF_IS_VALID, &(client_priv->access_flags));
+                       set_bit(AGP_FF_IS_CLIENT,
+                               &client_priv->access_flags);
+                       set_bit(AGP_FF_IS_VALID,
+                               &client_priv->access_flags);
                }
                if (client == NULL) {
                        /* client is already removed */
@@ -848,12 +853,14 @@ static int agpioc_reserve_wrap(agp_file_private * priv, unsigned long arg)
        } else {
                agp_segment *segment;
 
-               segment = kmalloc((sizeof(agp_segment) * reserve.seg_count), GFP_KERNEL);
+               segment = kmalloc((sizeof(agp_segment) * reserve.seg_count),
+                                 GFP_KERNEL);
 
                if (segment == NULL) {
                        return -ENOMEM;
                }
-               if (copy_from_user(segment, (void *) reserve.seg_list, GFP_KERNEL)) {
+               if (copy_from_user(segment, (void *) reserve.seg_list,
+                                  GFP_KERNEL)) {
                        kfree(segment);
                        return -EFAULT;
                }
@@ -870,8 +877,10 @@ static int agpioc_reserve_wrap(agp_file_private * priv, unsigned long arg)
                        client_priv = agp_find_private(reserve.pid);
 
                        if (client_priv != NULL) {
-                               set_bit(AGP_FF_IS_CLIENT, &(client_priv->access_flags));
-                               set_bit(AGP_FF_IS_VALID, &(client_priv->access_flags));
+                               set_bit(AGP_FF_IS_CLIENT,
+                                       &client_priv->access_flags);
+                               set_bit(AGP_FF_IS_VALID,
+                                       &client_priv->access_flags);
                        }
                        return agp_create_segment(client, &reserve);
                } else {
@@ -972,10 +981,12 @@ static int agp_ioctl(struct inode *inode, struct file *file,
                return -EBUSY;
        }
        if (cmd != AGPIOC_ACQUIRE) {
-               if (!(test_bit(AGP_FF_IS_CONTROLLER, &(curr_priv->access_flags)))) {
+               if (!(test_bit(AGP_FF_IS_CONTROLLER,
+                              &curr_priv->access_flags))) {
                        return -EPERM;
                }
-               /* Use the original pid of the controller, in case it's threaded */
+               /* Use the original pid of the controller,
+                * in case it's threaded */
 
                if (agp_fe.current_controller->pid != curr_priv->my_pid) {
                        return -EBUSY;
index 5e9d098696f6f476cb546e58278078f2520fc934..fce2df7ec942078d46116e0356b8320e733ce753 100644 (file)
@@ -116,7 +116,6 @@ typedef struct wait_queue *wait_queue_head_t;
 #endif
 
                                /* Generic cmpxchg added in 2.3.x */
-#if CPU != 386
 #ifndef __HAVE_ARCH_CMPXCHG
                                /* Include this here so that driver can be
                                    used with older kernels. */
@@ -150,10 +149,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 #define cmpxchg(ptr,o,n)                                               \
   ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),             \
                                 (unsigned long)(n),sizeof(*(ptr))))
-#endif
-#else
-                               /* Compiling for a 386 proper... */
-#error DRI not supported on Intel 80386
 #endif
 
                                /* Macros to make printk easier */
@@ -468,6 +463,7 @@ typedef struct drm_device {
                                /* Misc. support (init.c) */
 extern int          drm_flags;
 extern void         drm_parse_options(char *s);
+extern int           drm_cpu_valid(void);
 
 
                                /* Device support (fops.c) */
index 47eacb833b9e4df8370a94d4dc59895274a6cd31..24b17356bdd451ab47e299ca5932b2aeb913fb63 100644 (file)
@@ -40,6 +40,7 @@ int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t *dev)
        drm_file_t   *priv;
 
        if (filp->f_flags & O_EXCL)   return -EBUSY; /* No exclusive opens */
+       if (!drm_cpu_valid())         return -EINVAL;
 
        DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
 
index 340ba8f5a87ecca57ca65ba369b871207ac1ff12..e6b78395c120c167516544767813abc7fa56253b 100644 (file)
@@ -97,3 +97,13 @@ void drm_parse_options(char *s)
        }
 }
 
+/* drm_cpu_valid returns non-zero if the DRI will run on this CPU, and 0
+ * otherwise. */
+
+int drm_cpu_valid(void)
+{
+#if defined(__i386__)
+       if (boot_cpu_data.x86 == 3) return 0; /* No cmpxchg on a 386 */
+#endif
+       return 1;
+}
index cb3f1e0dea6aa5e8c62e7d2e26d973f709ee28ed..5feeeef8028db5e7ed930c3dbeee48c07f7043ef 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * linux/drivers/char/synclink.c
  *
- * ==FILEDATE 19990901==
+ * ==FILEDATE 19991207==
  *
  * Device driver for Microgate SyncLink ISA and PCI
  * high speed multiprotocol serial adapters.
@@ -925,7 +925,7 @@ MODULE_PARM(maxframe,"1-" __MODULE_STRING(MAX_TOTAL_DEVICES) "i");
 #endif
 
 static char *driver_name = "SyncLink serial driver";
-static char *driver_version = "1.14";
+static char *driver_version = "1.15";
 
 static struct tty_driver serial_driver, callout_driver;
 static int serial_refcount;
@@ -6981,7 +6981,6 @@ BOOLEAN mgsl_register_test( struct mgsl_struct *info )
 
        spin_lock_irqsave(&info->irq_spinlock,flags);
        usc_reset(info);
-       spin_unlock_irqrestore(&info->irq_spinlock,flags);
 
        /* Verify the reset state of some registers. */
 
@@ -7015,7 +7014,6 @@ BOOLEAN mgsl_register_test( struct mgsl_struct *info )
                }
        }
 
-       spin_lock_irqsave(&info->irq_spinlock,flags);
        usc_reset(info);
        spin_unlock_irqrestore(&info->irq_spinlock,flags);
 
@@ -7035,7 +7033,6 @@ BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
 
        spin_lock_irqsave(&info->irq_spinlock,flags);
        usc_reset(info);
-       spin_unlock_irqrestore(&info->irq_spinlock,flags);
 
        /*
         * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. 
@@ -7057,6 +7054,8 @@ BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
        usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
        usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
 
+       spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
        EndTime=100;
        while( EndTime-- && !info->irq_occurred ) {
                set_current_state(TASK_INTERRUPTIBLE);
@@ -7359,7 +7358,9 @@ BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
                }
        }
 
+       spin_lock_irqsave(&info->irq_spinlock,flags);
        usc_reset( info );
+       spin_unlock_irqrestore(&info->irq_spinlock,flags);
 
        /* restore current port options */
        memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
index 60412f5871ad6edbc8d049ab67da8855b1d9c114..f83ecbe9bf6f6e2cd94e7e455c1ba6d8260a1a16 100644 (file)
@@ -118,7 +118,7 @@ static int i2ob_timer_started = 0;
 
 static int i2ob_install_device(struct i2o_controller *, struct i2o_device *, int);
 static void i2ob_end_request(struct request *);
-static void i2ob_request(void);
+static void i2ob_request(request_queue_t * q);
 
 /*
  * Dump messages.
@@ -135,7 +135,6 @@ static void i2ob_dump_msg(struct i2ob_device *dev,u32 *msg,int size)
         printk(KERN_INFO "\n");
 }
 
-
 /*
  *     Get a message
  */
@@ -154,8 +153,8 @@ static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq,
 {
        struct i2o_controller *c = dev->controller;
        int tid = dev->tid;
-       u32 *msg;
-       u32 *mptr;
+       unsigned long msg;
+       unsigned long mptr;
        u64 offset;
        struct request *req = ireq->req;
        struct buffer_head *bh = req->bh;
@@ -167,22 +166,22 @@ static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq,
        /*
          * Build the message based on the request.
         */
-       __raw_writel(i2ob_context|(unit<<8), &msg[2]);
-       __raw_writel(ireq->num, &msg[3]);
-       __raw_writel(req->nr_sectors << 9, &msg[5]);
+       __raw_writel(i2ob_context|(unit<<8), msg+8);
+       __raw_writel(ireq->num, msg+12);
+       __raw_writel(req->nr_sectors << 9, msg+20);
        
        /* This can be optimised later - just want to be sure its right for
           starters */
        offset = ((u64)(req->sector+base)) << 9;
-       __raw_writel( offset & 0xFFFFFFFF, &msg[6]);
-       __raw_writel(offset>>32, &msg[7]);
+       __raw_writel( offset & 0xFFFFFFFF, msg+24);
+       __raw_writel(offset>>32, msg+28);
        mptr=msg+8;
        
        if(req->cmd == READ)
        {
-               __raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, &msg[1]);
+               __raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, msg+4);
                /* We don't yet do cache/readahead and other magic */
-               __raw_writel(1<<16, &msg[4]);
+               __raw_writel(1<<16, msg+16);
                while(bh!=NULL)
                {
                        /*
@@ -191,31 +190,33 @@ static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq,
                         *      sucky to read.
                         */
                        if(bh->b_reqnext)
-                               __raw_writel(0x10000000|(bh->b_size), mptr++);
+                               __raw_writel(0x10000000|(bh->b_size), mptr);
                        else
-                               __raw_writel(0xD0000000|(bh->b_size), mptr++);
+                               __raw_writel(0xD0000000|(bh->b_size), mptr);
        
-                       __raw_writel(virt_to_bus(bh->b_data), mptr++);
+                       __raw_writel(virt_to_bus(bh->b_data), mptr+4);
+                       mptr+=8;
                        count -= bh->b_size;
                        bh = bh->b_reqnext;
                }
        }
        else if(req->cmd == WRITE)
        {
-               __raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, &msg[1]);
-               __raw_writel(1<<16, &msg[4]);
+               __raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, msg+4);
+               __raw_writel(1<<16, msg+16);
                while(bh!=NULL)
                {
                        if(bh->b_reqnext)
-                               __raw_writel(0x14000000|(bh->b_size), mptr++);
+                               __raw_writel(0x14000000|(bh->b_size), mptr);
                        else
-                               __raw_writel(0xD4000000|(bh->b_size), mptr++);
+                               __raw_writel(0xD4000000|(bh->b_size), mptr);
                        count -= bh->b_size;
-                       __raw_writel(virt_to_bus(bh->b_data), mptr++);
+                       __raw_writel(virt_to_bus(bh->b_data), mptr+4);
+                       mptr+=8;
                        bh = bh->b_reqnext;
                }
        }
-       __raw_writel(I2O_MESSAGE_SIZE(mptr-msg) | SGL_OFFSET_8, &msg[0]);
+       __raw_writel(I2O_MESSAGE_SIZE(mptr-msg) | SGL_OFFSET_8, msg);
        
        if(req->current_nr_sectors > 8)
                printk("Gathered sectors %ld.\n", 
@@ -223,8 +224,7 @@ static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq,
                        
        if(count != 0)
        {
-               printk("Request count botched by %d.\n", count);
-               msg[5] -= count;
+               printk(KERN_ERR "Request count botched by %d.\n", count);
        }
 
        i2o_post_message(c,m);
@@ -399,7 +399,7 @@ static void i2o_block_reply(struct i2o_handler *h, struct i2o_controller *c, str
         */
         
        atomic_dec(&queue_depth);
-       i2ob_request();
+       i2ob_request(NULL);
        spin_unlock_irqrestore(&io_request_lock, flags);
 }
 
@@ -437,7 +437,7 @@ static void i2ob_timer_handler(unsigned long dummy)
        /* 
         * Restart any requests.
         */
-       i2ob_request();
+       i2ob_request(NULL);
 
        /* 
         * Free the lock.
@@ -453,7 +453,7 @@ static void i2ob_timer_handler(unsigned long dummy)
  *     we use it.
  */
 
-static void i2ob_request(void)
+static void i2ob_request(request_queue_t * q)
 {
        struct request *req;
        struct i2ob_request *ireq;
@@ -527,7 +527,6 @@ static void i2ob_request(void)
        }
 }
 
-
 /*
  *     SCSI-CAM for ioctl geometry mapping
  *     Duplicated with SCSI - this should be moved into somewhere common
@@ -1086,7 +1085,9 @@ int i2o_block_init(void)
        blk_size[MAJOR_NR] = i2ob_sizes;
        max_sectors[MAJOR_NR] = i2ob_max_sectors;
        
-       blk_dev[MAJOR_NR].request_fn = i2ob_request;
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), i2ob_request);
+       blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0);
+
        for (i = 0; i < MAX_I2OB << 4; i++) {
                i2ob_dev[i].refcnt = 0;
                i2ob_dev[i].flags = 0;
index 53da1d861c6566171a4b39eb9d37b3615d3c2c7a..00ae312fa9b1270ac9d624f1478f97da0bf3572c 100644 (file)
@@ -31,6 +31,8 @@
  *                                             new PCI BIOS interface.
  *     Alan Cox        <alan@redhat.com>:      Fixed the out of memory
  *                                             handling.
+ *      
+ *     Torben Mathiasen <torben.mathiasen@compaq.com> New Maintainer!
  *
  ********************************************************************/
 
@@ -468,9 +470,6 @@ extern int tlan_probe( struct net_device *dev )
 
        priv = (TLanPrivateInfo *) dev->priv;
 
-       dev->name = priv->devName;
-       strcpy( priv->devName, "    " );
-
        dev = init_etherdev( dev, sizeof(TLanPrivateInfo) );
 
        dev->base_addr = io_base;
@@ -489,7 +488,7 @@ extern int tlan_probe( struct net_device *dev )
        }
        priv->sa_int =     dev->mem_start & 0x02;
        priv->debug =      dev->mem_end;
-
+       spin_lock_init(&priv->lock);
 
        printk("TLAN %d.%d:  %s irq=%2d io=%04x, %s, Rev. %d\n",
                TLanVersionMajor,
index 7a49663d63510cb960487ae579c107af9e64c2e3..4ae1cf2789742bda4fae3affcca3c0b7b117a4a6 100644 (file)
  *
  ** This file is best viewed/edited with tabstop=4, colums>=132
  *
+ *  
+ *  Dec 10, 1999       Torben Mathiasen <torben.mathiasen@compaq.com>
+ *                     New Maintainer
+ *
  ********************************************************************/
 
 
index 85cdfb77448baf7ae993874c6ed5eeb442788856..64ce20902d37a49d6865fd1061b53f9eed56452d 100644 (file)
@@ -238,7 +238,7 @@ static void __init HWPrtChanID (__u32 pcid, short stride)
 {
        short i, j;
        for (i=0, j=0; i<24; i++, j+=stride)
-               printk("%1x", ((int)readb(pcid + j)) & 0x0f);
+               printk("%1x", ((int)isa_readb(pcid + j)) & 0x0f);
        printk("\n");
 }
 
@@ -267,14 +267,8 @@ int __init ibmtr_probe(struct net_device *dev)
                 */
                 
                if (ibmtr_probe1(dev, base_addr)) 
-               {
-#ifndef MODULE
-#ifndef PCMCIA
-                      tr_freedev(dev);
-#endif
-#endif
                       return -ENODEV;
-               else
+               else
                       return 0;
        }
         else if (base_addr != 0)   /* Don't probe at all. */
@@ -285,13 +279,7 @@ int __init ibmtr_probe(struct net_device *dev)
                int ioaddr = ibmtr_portlist[i];
                if (check_region(ioaddr, IBMTR_IO_EXTENT))
                        continue;
-                if (ibmtr_probe1(dev, ioaddr)) {
-#ifndef MODULE
-#ifndef PCMCIA
-                       tr_freedev(dev);
-#endif
-#endif
-               } else
+               if (!ibmtr_probe1(dev, ioaddr))
                        return 0;
         }
 
@@ -351,7 +339,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr)
         *      Suboptimize knowing first byte different
         */
 
-       ctemp = readb(cd_chanid) & 0x0f;
+       ctemp = isa_readb(cd_chanid) & 0x0f;
        if (ctemp != *tchanid) { /* NOT ISA card, try MCA */
                tchanid=mcchannelid;
                cardpresent=TR_MCA;
@@ -366,7 +354,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr)
                 */
                for (i=2,j=1; i<=46; i=i+2,j++) 
                {
-                       if ((readb(cd_chanid+i) & 0x0f) != tchanid[j]) {
+                       if ((isa_readb(cd_chanid+i) & 0x0f) != tchanid[j]) {
                                cardpresent=NOTOK;   /* match failed, not TR card */
                                break;
                        }
@@ -378,7 +366,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr)
         *      as it has different IRQ settings 
         */
 
-       if (cardpresent == TR_ISA && (readb(AIPFID + t_mmio)==0x0e))
+       if (cardpresent == TR_ISA && (isa_readb(AIPFID + t_mmio)==0x0e))
                cardpresent=TR_ISAPNP;
 
        if (cardpresent == NOTOK) { /* "channel_id" did not match, report */
@@ -461,14 +449,14 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr)
                        if (intr==3)
                                irq=11;
                        timeout = jiffies + TR_SPIN_INTERVAL;
-                       while(!readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN))
+                       while(!isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN))
                                if (time_after(jiffies, timeout)) {
                                        DPRINTK("Hardware timeout during initialization.\n");
                                        kfree_s(ti, sizeof(struct tok_info));
                                        return -ENODEV;
                                }
 
-                       ti->sram=((__u32)readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)<<12);
+                       ti->sram=((__u32)isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)<<12);
                        ti->global_int_enable=PIOaddr+ADAPTINTREL;
                        ti->adapter_int_enable=PIOaddr+ADAPTINTREL;
                        break;
@@ -492,7 +480,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr)
        for (i=0; i<0x18; i=i+2) 
        {
                /* technical reference states to do this */
-               temp = readb(ti->mmio + AIP + i) & 0x0f;
+               temp = isa_readb(ti->mmio + AIP + i) & 0x0f;
 #if !TR_NEWFORMAT
                printk("%1X",ti->hw_address[j]=temp);
 #else
@@ -507,13 +495,13 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr)
 #endif
 
        /* get Adapter type:  'F' = Adapter/A, 'E' = 16/4 Adapter II,...*/
-       ti->adapter_type = readb(ti->mmio + AIPADAPTYPE);
+       ti->adapter_type = isa_readb(ti->mmio + AIPADAPTYPE);
 
        /* get Data Rate:  F=4Mb, E=16Mb, D=4Mb & 16Mb ?? */
-       ti->data_rate = readb(ti->mmio + AIPDATARATE);
+       ti->data_rate = isa_readb(ti->mmio + AIPDATARATE);
 
        /* Get Early Token Release support?: F=no, E=4Mb, D=16Mb, C=4&16Mb */
-       ti->token_release = readb(ti->mmio + AIPEARLYTOKEN);
+       ti->token_release = isa_readb(ti->mmio + AIPEARLYTOKEN);
 
        /* How much shared RAM is on adapter ? */
 #ifdef PCMCIA
@@ -524,10 +512,10 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr)
 #endif
        /* We need to set or do a bunch of work here based on previous results.. */
        /* Support paging?  What sizes?:  F=no, E=16k, D=32k, C=16 & 32k */
-       ti->shared_ram_paging = readb(ti->mmio + AIPSHRAMPAGE);
+       ti->shared_ram_paging = isa_readb(ti->mmio + AIPSHRAMPAGE);
 
         /* Available DHB  4Mb size:   F=2048, E=4096, D=4464 */
-       switch (readb(ti->mmio + AIP4MBDHB)) {
+       switch (isa_readb(ti->mmio + AIP4MBDHB)) {
        case 0xe : 
                ti->dhb_size4mb = 4096;
                break; 
@@ -540,7 +528,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr)
        }
 
        /* Available DHB 16Mb size:  F=2048, E=4096, D=8192, C=16384, B=17960 */
-       switch (readb(ti->mmio + AIP16MBDHB)) {
+       switch (isa_readb(ti->mmio + AIP16MBDHB)) {
        case 0xe : 
                ti->dhb_size16mb = 4096;
                break; 
@@ -576,7 +564,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr)
        /*
         *      determine how much of total RAM is mapped into PC space 
         */
-       ti->mapped_ram_size=1<<((((readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)) >>2) & 0x03) + 4);
+       ti->mapped_ram_size=1<<((((isa_readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)) >>2) & 0x03) + 4);
        ti->page_mask=0;
        if (ti->shared_ram_paging == 0xf) { /* No paging in adapter */
                ti->mapped_ram_size = ti->avail_shared_ram;
@@ -635,7 +623,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr)
                static __u32 ram_bndry_mask[]={0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000};
                __u32 new_base, rrr_32, chk_base, rbm;
 
-               rrr_32 = ((readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD))>>2) & 0x00000003;
+               rrr_32 = ((isa_readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD))>>2) & 0x00000003;
                rbm = ram_bndry_mask[rrr_32];
                new_base = (ibmtr_mem_base + (~rbm)) & rbm; /* up to boundary */
                chk_base = new_base + (ti->mapped_ram_size<<9);
@@ -765,11 +753,11 @@ static unsigned char __init get_sram_size(struct tok_info *adapt_info)
           'B' - 64KB less 512 bytes at top
           (WARNING ... must zero top bytes in INIT */
 
-       avail_sram_code=0xf-readb(adapt_info->mmio + AIPAVAILSHRAM);
+       avail_sram_code=0xf-isa_readb(adapt_info->mmio + AIPAVAILSHRAM);
        if (avail_sram_code)
                return size_code[avail_sram_code];
        else  /* for code 'F', must compute size from RRR(3,2) bits */
-               return 1<<((readb(adapt_info->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)>>2)+4);
+               return 1<<((isa_readb(adapt_info->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)>>2)+4);
 }
 
 static int __init trdev_init(struct net_device *dev)
@@ -816,20 +804,20 @@ static void tok_set_multicast_list(struct net_device *dev)
        }
        SET_PAGE(ti->srb);
        for (i=0; i<sizeof(struct srb_set_funct_addr); i++)
-               writeb(0, ti->srb+i);
+               isa_writeb(0, ti->srb+i);
 
-       writeb(DIR_SET_FUNC_ADDR, 
+       isa_writeb(DIR_SET_FUNC_ADDR, 
                ti->srb + offsetof(struct srb_set_funct_addr, command));
 
        DPRINTK("Setting functional address: ");
 
        for (i=0; i<4; i++)
        {
-               writeb(address[i], 
+               isa_writeb(address[i], 
                ti->srb + offsetof(struct srb_set_funct_addr, funct_address)+i);
                printk("%02X ", address[i]);
        }
-       writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+       isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
        printk("\n");
 }
 
@@ -838,7 +826,7 @@ static int tok_open(struct net_device *dev)
        struct tok_info *ti=(struct tok_info *)dev->priv;
 
        /* init the spinlock */
-       ti->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+       spin_lock_init(&ti->lock);
 
        if (ti->open_status==CLOSED) tok_init_card(dev);
 
@@ -862,17 +850,17 @@ static int tok_close(struct net_device *dev)
 
        struct tok_info *ti=(struct tok_info *) dev->priv;
 
-       writeb(DIR_CLOSE_ADAPTER,
+       isa_writeb(DIR_CLOSE_ADAPTER,
               ti->srb + offsetof(struct srb_close_adapter, command));
-       writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+       isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
 
        ti->open_status=CLOSED;
 
        sleep_on(&ti->wait_for_tok_int);
 
-       if (readb(ti->srb + offsetof(struct srb_close_adapter, ret_code)))
+       if (isa_readb(ti->srb + offsetof(struct srb_close_adapter, ret_code)))
                DPRINTK("close adapter failed: %02X\n",
-                       (int)readb(ti->srb + offsetof(struct srb_close_adapter, ret_code)));
+                       (int)isa_readb(ti->srb + offsetof(struct srb_close_adapter, ret_code)));
 
         dev->start = 0;
 #ifdef PCMCIA
@@ -899,7 +887,7 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
 
        /* Disable interrupts till processing is finished */
        dev->interrupt=1;
-       writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
+       isa_writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
 
        /* Reset interrupt for ISA boards */
         if (ti->adapter_int_enable)
@@ -916,7 +904,7 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
                    the extra levels of logic and call depth for the
                    original solution.   */
 
-               status=readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD);
+               status=isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD);
 #ifdef PCMCIA
                /* Check if the PCMCIA card was pulled. */
                if (status == 0xFF)
@@ -928,7 +916,7 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
                        }
 
                /* Check ISRP EVEN too. */
-               if ( readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) == 0xFF)
+               if ( isa_readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) == 0xFF)
                {
                         DPRINTK("PCMCIA card removed.\n");
                         spin_unlock(&(ti->lock));
@@ -943,26 +931,26 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
                        int i;
                        __u32 check_reason;
 
-                       check_reason=ti->mmio + ntohs(readw(ti->sram + ACA_OFFSET + ACA_RW +WWCR_EVEN));
+                       check_reason=ti->mmio + ntohs(isa_readw(ti->sram + ACA_OFFSET + ACA_RW +WWCR_EVEN));
 
                        DPRINTK("Adapter check interrupt\n");
                        DPRINTK("8 reason bytes follow: ");
                        for(i=0; i<8; i++, check_reason++)
-                               printk("%02X ", (int)readb(check_reason));
+                               printk("%02X ", (int)isa_readb(check_reason));
                        printk("\n");
 
-                       writeb((~ADAP_CHK_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
-                       writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET  + ISRP_EVEN);
+                       isa_writeb((~ADAP_CHK_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+                       isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET  + ISRP_EVEN);
                        dev->interrupt=0;
 
-               } else if (readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)
+               } else if (isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)
                                 & (TCR_INT | ERR_INT | ACCESS_INT)) {
 
                        DPRINTK("adapter error: ISRP_EVEN : %02x\n",
-                               (int)readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN));
-                       writeb(~(TCR_INT | ERR_INT | ACCESS_INT),
+                               (int)isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN));
+                       isa_writeb(~(TCR_INT | ERR_INT | ACCESS_INT),
                               ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
-                       writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET  + ISRP_EVEN);
+                       isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET  + ISRP_EVEN);
                        dev->interrupt=0;
 
                } else if (status
@@ -971,12 +959,12 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
 
                        if (status & SRB_RESP_INT) { /* SRB response */
 
-                               switch(readb(ti->srb)) { /* SRB command check */
+                               switch(isa_readb(ti->srb)) { /* SRB command check */
 
                                      case XMIT_DIR_FRAME: {
                                              unsigned char xmit_ret_code;
 
-                                             xmit_ret_code=readb(ti->srb + offsetof(struct srb_xmit, ret_code));
+                                             xmit_ret_code=isa_readb(ti->srb + offsetof(struct srb_xmit, ret_code));
                                              if (xmit_ret_code != 0xff) {
                                                      DPRINTK("error on xmit_dir_frame request: %02X\n",
                                                              xmit_ret_code);
@@ -993,7 +981,7 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
                                      case XMIT_UI_FRAME: {
                                              unsigned char xmit_ret_code;
 
-                                             xmit_ret_code=readb(ti->srb + offsetof(struct srb_xmit, ret_code));
+                                             xmit_ret_code=isa_readb(ti->srb + offsetof(struct srb_xmit, ret_code));
                                              if (xmit_ret_code != 0xff) {
                                                      DPRINTK("error on xmit_ui_frame request: %02X\n",
                                                              xmit_ret_code);
@@ -1011,14 +999,14 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
                                              unsigned char open_ret_code;
                                              __u16 open_error_code;
 
-                                             ti->srb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, srb_addr)));
-                                             ti->ssb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, ssb_addr)));
-                                             ti->arb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, arb_addr)));
-                                             ti->asb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, asb_addr)));
+                                             ti->srb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, srb_addr)));
+                                             ti->ssb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, ssb_addr)));
+                                             ti->arb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, arb_addr)));
+                                             ti->asb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, asb_addr)));
                                              ti->current_skb=NULL;
 
-                                             open_ret_code = readb(ti->init_srb +offsetof(struct srb_open_response, ret_code));
-                                             open_error_code = ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, error_code)));
+                                             open_ret_code = isa_readb(ti->init_srb +offsetof(struct srb_open_response, ret_code));
+                                             open_error_code = ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, error_code)));
 
                                              if (open_ret_code==7) {
 
@@ -1049,9 +1037,9 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
 #else
                                                      DPRINTK("Adapter initialized and opened.\n");
 #endif
-                                                     writeb(~(SRB_RESP_INT),
+                                                     isa_writeb(~(SRB_RESP_INT),
                                                             ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
-                                                     writeb(~(CMD_IN_SRB),
+                                                     isa_writeb(~(CMD_IN_SRB),
                                                             ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD);
                                                      open_sap(EXTENDED_SAP,dev);
 
@@ -1073,13 +1061,13 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
                                        break;
 
                                      case DLC_OPEN_SAP:
-                                       if (readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))) {
+                                       if (isa_readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))) {
                                                DPRINTK("open_sap failed: ret_code = %02X,retrying\n",
-                                                       (int)readb(ti->srb+offsetof(struct dlc_open_sap, ret_code)));
+                                                       (int)isa_readb(ti->srb+offsetof(struct dlc_open_sap, ret_code)));
                                                ibmtr_reset_timer(&(ti->tr_timer), dev);
                                        } else {
                                                ti->exsap_station_id=
-                                                       readw(ti->srb+offsetof(struct dlc_open_sap, station_id));
+                                                       isa_readw(ti->srb+offsetof(struct dlc_open_sap, station_id));
                                                ti->open_status=SUCCESS; /* TR adapter is now available */
                                                wake_up(&ti->wait_for_reset);
                                        }
@@ -1090,16 +1078,16 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
                                      case DIR_SET_GRP_ADDR:
                                      case DIR_SET_FUNC_ADDR:
                                      case DLC_CLOSE_SAP:
-                                       if (readb(ti->srb+offsetof(struct srb_interrupt, ret_code)))
+                                       if (isa_readb(ti->srb+offsetof(struct srb_interrupt, ret_code)))
                                                DPRINTK("error on %02X: %02X\n",
-                                                       (int)readb(ti->srb+offsetof(struct srb_interrupt, command)),
-                                                       (int)readb(ti->srb+offsetof(struct srb_interrupt, ret_code)));
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_interrupt, command)),
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_interrupt, ret_code)));
                                        break;
 
                                      case DIR_READ_LOG:
-                                       if (readb(ti->srb+offsetof(struct srb_read_log, ret_code)))
+                                       if (isa_readb(ti->srb+offsetof(struct srb_read_log, ret_code)))
                                                DPRINTK("error on dir_read_log: %02X\n",
-                                                       (int)readb(ti->srb+offsetof(struct srb_read_log, ret_code)));
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_read_log, ret_code)));
                                        else
                                            if (IBMTR_DEBUG_MESSAGES) {
                                                DPRINTK(
@@ -1107,24 +1095,24 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
                                                        "A/C errors %02X, Abort delimiters %02X, Lost frames %02X\n"
                                                        "Receive congestion count %02X, Frame copied errors %02X\n"
                                                        "Frequency errors %02X, Token errors %02X\n",
-                                                       (int)readb(ti->srb+offsetof(struct srb_read_log,
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
                                                                                    line_errors)),
-                                                       (int)readb(ti->srb+offsetof(struct srb_read_log,
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
                                                                                    internal_errors)),
-                                                       (int)readb(ti->srb+offsetof(struct srb_read_log,
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
                                                                                    burst_errors)),
-                                                       (int)readb(ti->srb+offsetof(struct srb_read_log, A_C_errors)),
-                                                       (int)readb(ti->srb+offsetof(struct srb_read_log,
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_read_log, A_C_errors)),
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
                                                                                    abort_delimiters)),
-                                                       (int)readb(ti->srb+offsetof(struct srb_read_log,
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
                                                                                    lost_frames)),
-                                                       (int)readb(ti->srb+offsetof(struct srb_read_log,
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
                                                                                                    recv_congest_count)),
-                                                       (int)readb(ti->srb+offsetof(struct srb_read_log,
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
                                                                                    frame_copied_errors)),
-                                                       (int)readb(ti->srb+offsetof(struct srb_read_log,
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
                                                                                    frequency_errors)),
-                                                       (int)readb(ti->srb+offsetof(struct srb_read_log,
+                                                       (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
                                                                                                    token_errors)));
                                            }
                                        dev->tbusy=0;
@@ -1132,19 +1120,19 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
 
                                      default:
                                        DPRINTK("Unknown command %02X encountered\n",
-                                               (int)readb(ti->srb));
+                                               (int)isa_readb(ti->srb));
 
                                } /* SRB command check */
 
-                               writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD);
-                               writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+                               isa_writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD);
+                               isa_writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
 
                          skip_reset:
                        } /* SRB response */
 
                        if (status & ASB_FREE_INT) { /* ASB response */
 
-                               switch(readb(ti->asb)) { /* ASB command check */
+                               switch(isa_readb(ti->asb)) { /* ASB command check */
 
                                      case REC_DATA:
                                      case XMIT_UI_FRAME:
@@ -1153,25 +1141,25 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
 
                                      default:
                                        DPRINTK("unknown command in asb %02X\n",
-                                               (int)readb(ti->asb));
+                                               (int)isa_readb(ti->asb));
 
                                } /* ASB command check */
 
-                               if (readb(ti->asb+2)!=0xff) /* checks ret_code */
+                               if (isa_readb(ti->asb+2)!=0xff) /* checks ret_code */
                                    DPRINTK("ASB error %02X in cmd %02X\n",
-                                           (int)readb(ti->asb+2),(int)readb(ti->asb));
-                               writeb(~ASB_FREE_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+                                           (int)isa_readb(ti->asb+2),(int)isa_readb(ti->asb));
+                               isa_writeb(~ASB_FREE_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
 
                        } /* ASB response */
 
                        if (status & ARB_CMD_INT) { /* ARB response */
 
-                               switch (readb(ti->arb)) { /* ARB command check */
+                               switch (isa_readb(ti->arb)) { /* ARB command check */
 
                                      case DLC_STATUS:
                                        DPRINTK("DLC_STATUS new status: %02X on station %02X\n",
-                                               ntohs(readw(ti->arb + offsetof(struct arb_dlc_status, status))),
-                                               ntohs(readw(ti->arb
+                                               ntohs(isa_readw(ti->arb + offsetof(struct arb_dlc_status, status))),
+                                               ntohs(isa_readw(ti->arb
                                                                            +offsetof(struct arb_dlc_status, station_id))));
                                        break;
 
@@ -1182,7 +1170,7 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
                                      case RING_STAT_CHANGE: {
                                              unsigned short ring_status;
 
-                                             ring_status=ntohs(readw(ti->arb
+                                             ring_status=ntohs(isa_readw(ti->arb
                                                                      +offsetof(struct arb_ring_stat_change, ring_status)));
 
                                              if (ring_status & (SIGNAL_LOSS | LOBE_FAULT)) {
@@ -1209,46 +1197,46 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs)
 
                                      default:
                                        DPRINTK("Unknown command %02X in arb\n",
-                                               (int)readb(ti->arb));
+                                               (int)isa_readb(ti->arb));
                                        break;
 
                                } /* ARB command check */
 
-                               writeb(~ARB_CMD_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
-                               writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+                               isa_writeb(~ARB_CMD_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+                               isa_writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
 
                        } /* ARB response */
 
                        if (status & SSB_RESP_INT) { /* SSB response */
                                unsigned char retcode;
-                               switch (readb(ti->ssb)) { /* SSB command check */
+                               switch (isa_readb(ti->ssb)) { /* SSB command check */
                                      
                                      case XMIT_DIR_FRAME:
                                      case XMIT_UI_FRAME:
-                                       retcode = readb(ti->ssb+2);
+                                       retcode = isa_readb(ti->ssb+2);
                                        if (retcode && (retcode != 0x22)) /* checks ret_code */
                                                DPRINTK("xmit ret_code: %02X xmit error code: %02X\n",
-                                                       (int)retcode, (int)readb(ti->ssb+6));
+                                                       (int)retcode, (int)isa_readb(ti->ssb+6));
                                        else ti->tr_stats.tx_packets++;
                                        break;
 
                                      case XMIT_XID_CMD:
-                                       DPRINTK("xmit xid ret_code: %02X\n", (int)readb(ti->ssb+2));
+                                       DPRINTK("xmit xid ret_code: %02X\n", (int)isa_readb(ti->ssb+2));
 
                                      default:
-                                       DPRINTK("Unknown command %02X in ssb\n", (int)readb(ti->ssb));
+                                       DPRINTK("Unknown command %02X in ssb\n", (int)isa_readb(ti->ssb));
 
                                } /* SSB command check */
 
-                               writeb(~SSB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
-                               writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+                               isa_writeb(~SSB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+                               isa_writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
 
                        } /* SSB response */
 
                }        /* SRB, ARB, ASB or SSB response */
 
                dev->interrupt=0;
-               writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+               isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
                break;
 
              case FIRST_INT:
@@ -1278,12 +1266,12 @@ static void initial_tok_int(struct net_device *dev)
 
        /* we assign the shared-ram address for ISA devices */
        if(!ti->sram) {
-               writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN);
+               isa_writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN);
                ti->sram=((__u32)ti->sram_base << 12);
        }
        ti->init_srb=ti->sram
-               +ntohs((unsigned short)readw(ti->mmio+ ACA_OFFSET + WRBR_EVEN));
-       SET_PAGE(ntohs((unsigned short)readw(ti->mmio+ACA_OFFSET + WRBR_EVEN)));
+               +ntohs((unsigned short)isa_readw(ti->mmio+ ACA_OFFSET + WRBR_EVEN));
+       SET_PAGE(ntohs((unsigned short)isa_readw(ti->mmio+ACA_OFFSET + WRBR_EVEN)));
 
        dev->mem_start = ti->sram;
        dev->mem_end = ti->sram + (ti->mapped_ram_size<<9) - 1;
@@ -1292,12 +1280,12 @@ static void initial_tok_int(struct net_device *dev)
        {
                int i;
                DPRINTK("init_srb(%p):", ti->init_srb);
-               for (i=0;i<17;i++) printk("%02X ", (int)readb(ti->init_srb+i));
+               for (i=0;i<17;i++) printk("%02X ", (int)isa_readb(ti->init_srb+i));
                printk("\n");
        }
 #endif
 
-       hw_encoded_addr = readw(ti->init_srb
+       hw_encoded_addr = isa_readw(ti->init_srb
                                + offsetof(struct srb_init_response, encoded_address));
 
 #if !TR_NEWFORMAT
@@ -1307,7 +1295,7 @@ static void initial_tok_int(struct net_device *dev)
 #endif
 
        encoded_addr=(ti->sram + ntohs(hw_encoded_addr));
-       ti->ring_speed = readb(ti->init_srb+offsetof(struct srb_init_response, init_status)) & 0x01 ? 16 : 4;
+       ti->ring_speed = isa_readb(ti->init_srb+offsetof(struct srb_init_response, init_status)) & 0x01 ? 16 : 4;
 #if !TR_NEWFORMAT
        DPRINTK("encoded addr (%04X,%04X,%08X): ", hw_encoded_addr,
                ntohs(hw_encoded_addr), encoded_addr);
@@ -1316,12 +1304,12 @@ static void initial_tok_int(struct net_device *dev)
                ti->ring_speed, ti->sram);
 #endif
 
-       ti->auto_ringspeedsave=readb(ti->init_srb
+       ti->auto_ringspeedsave=isa_readb(ti->init_srb
                                     +offsetof(struct srb_init_response, init_status_2)) & 0x4 ? TRUE : FALSE;
 
 #if !TR_NEWFORMAT
        for(i=0;i<TR_ALEN;i++) {
-               dev->dev_addr[i]=readb(encoded_addr + i);
+               dev->dev_addr[i]=isa_readb(encoded_addr + i);
                printk("%02X%s", dev->dev_addr[i], (i==TR_ALEN-1) ? "" : ":" );
        }
        printk("\n");
@@ -1346,10 +1334,10 @@ static int tok_init_card(struct net_device *dev)
 
 #ifdef ENABLE_PAGING
        if(ti->page_mask)
-               writeb(SRPR_ENABLE_PAGING, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
+               isa_writeb(SRPR_ENABLE_PAGING, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
 #endif
 
-       writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
+       isa_writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
 
 #if !TR_NEWFORMAT
        DPRINTK("resetting card\n");
@@ -1364,7 +1352,7 @@ static int tok_init_card(struct net_device *dev)
 #endif
 
        ti->open_status=IN_PROGRESS;
-       writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+       isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
        return 0;
 }
 
@@ -1375,18 +1363,18 @@ static void open_sap(unsigned char type,struct net_device *dev)
 
        SET_PAGE(ti->srb);
        for (i=0; i<sizeof(struct dlc_open_sap); i++)
-               writeb(0, ti->srb+i);
+               isa_writeb(0, ti->srb+i);
 
-       writeb(DLC_OPEN_SAP, ti->srb + offsetof(struct dlc_open_sap, command));
-       writew(htons(MAX_I_FIELD),
+       isa_writeb(DLC_OPEN_SAP, ti->srb + offsetof(struct dlc_open_sap, command));
+       isa_writew(htons(MAX_I_FIELD),
               ti->srb + offsetof(struct dlc_open_sap, max_i_field));
-       writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY,
+       isa_writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY,
               ti->srb + offsetof(struct dlc_open_sap, sap_options));
-       writeb(SAP_OPEN_STATION_CNT,
+       isa_writeb(SAP_OPEN_STATION_CNT,
               ti->srb + offsetof(struct dlc_open_sap, station_count));
-       writeb(type, ti->srb + offsetof(struct dlc_open_sap, sap_value));
+       isa_writeb(type, ti->srb + offsetof(struct dlc_open_sap, sap_value));
 
-       writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+       isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
 
 }
 
@@ -1403,42 +1391,42 @@ void tok_open_adapter(unsigned long dev_addr)
        DPRINTK("now opening the board...\n");
 #endif
 
-       writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
-       writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD);
+       isa_writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+       isa_writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD);
 
        for (i=0; i<sizeof(struct dir_open_adapter); i++)
-               writeb(0, ti->init_srb+i);
+               isa_writeb(0, ti->init_srb+i);
 
-       writeb(DIR_OPEN_ADAPTER,
+       isa_writeb(DIR_OPEN_ADAPTER,
               ti->init_srb + offsetof(struct dir_open_adapter, command));
-       writew(htons(OPEN_PASS_BCON_MAC),
+       isa_writew(htons(OPEN_PASS_BCON_MAC),
               ti->init_srb + offsetof(struct dir_open_adapter, open_options));
        if (ti->ring_speed == 16) {
-               writew(htons(ti->dhb_size16mb),
+               isa_writew(htons(ti->dhb_size16mb),
                       ti->init_srb + offsetof(struct dir_open_adapter, dhb_length));
-               writew(htons(ti->rbuf_cnt16),
+               isa_writew(htons(ti->rbuf_cnt16),
                       ti->init_srb + offsetof(struct dir_open_adapter, num_rcv_buf));
-               writew(htons(ti->rbuf_len16),
+               isa_writew(htons(ti->rbuf_len16),
                       ti->init_srb + offsetof(struct dir_open_adapter, rcv_buf_len));
        } else {
-               writew(htons(ti->dhb_size4mb),
+               isa_writew(htons(ti->dhb_size4mb),
                       ti->init_srb + offsetof(struct dir_open_adapter, dhb_length));
-               writew(htons(ti->rbuf_cnt4),
+               isa_writew(htons(ti->rbuf_cnt4),
                       ti->init_srb + offsetof(struct dir_open_adapter, num_rcv_buf));
-               writew(htons(ti->rbuf_len4),
+               isa_writew(htons(ti->rbuf_len4),
                       ti->init_srb + offsetof(struct dir_open_adapter, rcv_buf_len));
        }
-       writeb(NUM_DHB, /* always 2 */ 
+       isa_writeb(NUM_DHB, /* always 2 */ 
               ti->init_srb + offsetof(struct dir_open_adapter, num_dhb));
-       writeb(DLC_MAX_SAP,
+       isa_writeb(DLC_MAX_SAP,
               ti->init_srb + offsetof(struct dir_open_adapter, dlc_max_sap));
-       writeb(DLC_MAX_STA,
+       isa_writeb(DLC_MAX_STA,
               ti->init_srb + offsetof(struct dir_open_adapter, dlc_max_sta));
 
        ti->srb=ti->init_srb; /* We use this one in the interrupt handler */
 
-       writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
-       writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+       isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+       isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
 
 }
 
@@ -1452,7 +1440,7 @@ static void tr_tx(struct net_device *dev)
        int i;
        struct trllc    *llc;
 
-       if (readb(ti->asb + offsetof(struct asb_xmit_resp, ret_code))!=0xFF)
+       if (isa_readb(ti->asb + offsetof(struct asb_xmit_resp, ret_code))!=0xFF)
                DPRINTK("ASB not free !!!\n");
 
        /* in providing the transmit interrupts,
@@ -1461,7 +1449,7 @@ static void tr_tx(struct net_device *dev)
           to stuff with data.  Here we compute the
           effective address where we will place data.*/
        dhb=ti->sram
-               +ntohs(readw(ti->arb + offsetof(struct arb_xmit_req, dhb_address)));
+               +ntohs(isa_readw(ti->arb + offsetof(struct arb_xmit_req, dhb_address)));
        
        /* Figure out the size of the 802.5 header */
        if (!(trhdr->saddr[0] & 0x80)) /* RIF present? */
@@ -1472,28 +1460,28 @@ static void tr_tx(struct net_device *dev)
 
        llc = (struct trllc *)(ti->current_skb->data + hdr_len);
 
-       xmit_command = readb(ti->srb + offsetof(struct srb_xmit, command));
+       xmit_command = isa_readb(ti->srb + offsetof(struct srb_xmit, command));
 
-       writeb(xmit_command, ti->asb + offsetof(struct asb_xmit_resp, command));
-       writew(readb(ti->srb + offsetof(struct srb_xmit, station_id)),
+       isa_writeb(xmit_command, ti->asb + offsetof(struct asb_xmit_resp, command));
+       isa_writew(isa_readb(ti->srb + offsetof(struct srb_xmit, station_id)),
               ti->asb + offsetof(struct asb_xmit_resp, station_id));
-       writeb(llc->ssap, ti->asb + offsetof(struct asb_xmit_resp, rsap_value));
-       writeb(readb(ti->srb + offsetof(struct srb_xmit, cmd_corr)),
+       isa_writeb(llc->ssap, ti->asb + offsetof(struct asb_xmit_resp, rsap_value));
+       isa_writeb(isa_readb(ti->srb + offsetof(struct srb_xmit, cmd_corr)),
               ti->asb + offsetof(struct asb_xmit_resp, cmd_corr));
-       writeb(0, ti->asb + offsetof(struct asb_xmit_resp, ret_code));
+       isa_writeb(0, ti->asb + offsetof(struct asb_xmit_resp, ret_code));
 
        if ((xmit_command==XMIT_XID_CMD) || (xmit_command==XMIT_TEST_CMD)) {
 
-               writew(htons(0x11),
+               isa_writew(htons(0x11),
                       ti->asb + offsetof(struct asb_xmit_resp, frame_length));
-               writeb(0x0e, ti->asb + offsetof(struct asb_xmit_resp, hdr_length));
-               writeb(AC, dhb);
-               writeb(LLC_FRAME, dhb+1);
+               isa_writeb(0x0e, ti->asb + offsetof(struct asb_xmit_resp, hdr_length));
+               isa_writeb(AC, dhb);
+               isa_writeb(LLC_FRAME, dhb+1);
 
-               for (i=0; i<TR_ALEN; i++) writeb((int)0x0FF, dhb+i+2);
-               for (i=0; i<TR_ALEN; i++) writeb(0, dhb+i+TR_ALEN+2);
+               for (i=0; i<TR_ALEN; i++) isa_writeb((int)0x0FF, dhb+i+2);
+               for (i=0; i<TR_ALEN; i++) isa_writeb(0, dhb+i+TR_ALEN+2);
 
-               writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+               isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
                return;
 
        }
@@ -1502,13 +1490,13 @@ static void tr_tx(struct net_device *dev)
         *      the token ring packet is copied from sk_buff to the adapter
         *      buffer identified in the command data received with the interrupt.
         */
-       writeb(hdr_len, ti->asb + offsetof(struct asb_xmit_resp, hdr_length));
-       writew(htons(ti->current_skb->len),
+       isa_writeb(hdr_len, ti->asb + offsetof(struct asb_xmit_resp, hdr_length));
+       isa_writew(htons(ti->current_skb->len),
               ti->asb + offsetof(struct asb_xmit_resp, frame_length));
 
-       memcpy_toio(dhb, ti->current_skb->data, ti->current_skb->len);
+       isa_memcpy_toio(dhb, ti->current_skb->data, ti->current_skb->len);
 
-       writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+       isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
        ti->tr_stats.tx_bytes+=ti->current_skb->len;
        dev->tbusy=0;
        dev_kfree_skb(ti->current_skb);
@@ -1531,19 +1519,19 @@ static void tr_rx(struct net_device *dev)
        struct iphdr *iph;
 
        rbuffer=(ti->sram
-                +ntohs(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))))+2;
+                +ntohs(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))))+2;
  
-       if(readb(ti->asb + offsetof(struct asb_rec, ret_code))!=0xFF)
+       if(isa_readb(ti->asb + offsetof(struct asb_rec, ret_code))!=0xFF)
                DPRINTK("ASB not free !!!\n");
 
-       writeb(REC_DATA,
+       isa_writeb(REC_DATA,
               ti->asb + offsetof(struct asb_rec, command));
-       writew(readw(ti->arb + offsetof(struct arb_rec_req, station_id)),
+       isa_writew(isa_readw(ti->arb + offsetof(struct arb_rec_req, station_id)),
               ti->asb + offsetof(struct asb_rec, station_id));
-       writew(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr)),
+       isa_writew(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr)),
               ti->asb + offsetof(struct asb_rec, rec_buf_addr));
 
-       lan_hdr_len=readb(ti->arb + offsetof(struct arb_rec_req, lan_hdr_len));
+       lan_hdr_len=isa_readb(ti->arb + offsetof(struct arb_rec_req, lan_hdr_len));
        hdr_len = lan_hdr_len + sizeof(struct trllc) + sizeof(struct iphdr);
        
        llc=(rbuffer + offsetof(struct rec_buf, data) + lan_hdr_len);
@@ -1552,28 +1540,28 @@ static void tr_rx(struct net_device *dev)
        DPRINTK("offsetof data: %02X lan_hdr_len: %02X\n",
                (unsigned int)offsetof(struct rec_buf,data), (unsigned int)lan_hdr_len);
        DPRINTK("llc: %08X rec_buf_addr: %04X ti->sram: %p\n", llc,
-               ntohs(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))),
+               ntohs(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))),
                ti->sram);
        DPRINTK("dsap: %02X, ssap: %02X, llc: %02X, protid: %02X%02X%02X, "
                "ethertype: %04X\n",
-               (int)readb(llc + offsetof(struct trllc, dsap)),
-               (int)readb(llc + offsetof(struct trllc, ssap)),
-               (int)readb(llc + offsetof(struct trllc, llc)),
-               (int)readb(llc + offsetof(struct trllc, protid)),
-               (int)readb(llc + offsetof(struct trllc, protid)+1),
-               (int)readb(llc + offsetof(struct trllc, protid)+2),
-               (int)readw(llc + offsetof(struct trllc, ethertype)));
+               (int)isa_readb(llc + offsetof(struct trllc, dsap)),
+               (int)isa_readb(llc + offsetof(struct trllc, ssap)),
+               (int)isa_readb(llc + offsetof(struct trllc, llc)),
+               (int)isa_readb(llc + offsetof(struct trllc, protid)),
+               (int)isa_readb(llc + offsetof(struct trllc, protid)+1),
+               (int)isa_readb(llc + offsetof(struct trllc, protid)+2),
+               (int)isa_readw(llc + offsetof(struct trllc, ethertype)));
 #endif
-       if (readb(llc + offsetof(struct trllc, llc))!=UI_CMD) {
-               writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
+       if (isa_readb(llc + offsetof(struct trllc, llc))!=UI_CMD) {
+               isa_writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
                ti->tr_stats.rx_dropped++;
-               writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+               isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
                return;
        }
 
-       length = ntohs(readw(ti->arb+offsetof(struct arb_rec_req, frame_len)));
-               if ((readb(llc + offsetof(struct trllc, dsap))==EXTENDED_SAP) &&
-                   (readb(llc + offsetof(struct trllc, ssap))==EXTENDED_SAP) &&
+       length = ntohs(isa_readw(ti->arb+offsetof(struct arb_rec_req, frame_len)));
+               if ((isa_readb(llc + offsetof(struct trllc, dsap))==EXTENDED_SAP) &&
+                   (isa_readb(llc + offsetof(struct trllc, ssap))==EXTENDED_SAP) &&
                (length>=hdr_len)) {
                        IPv4_p = 1;
                }
@@ -1588,20 +1576,20 @@ static void tr_rx(struct net_device *dev)
                        DPRINTK("Probably non-IP frame received.\n");
                        DPRINTK("ssap: %02X dsap: %02X saddr: %02X:%02X:%02X:%02X:%02X:%02X "
                                "daddr: %02X:%02X:%02X:%02X:%02X:%02X\n",
-                               (int)readb(llc + offsetof(struct trllc, ssap)),
-                               (int)readb(llc + offsetof(struct trllc, dsap)),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+1),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+2),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+3),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+4),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+5),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+1),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+2),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+3),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+4),
-                               (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+5));
+                               (int)isa_readb(llc + offsetof(struct trllc, ssap)),
+                               (int)isa_readb(llc + offsetof(struct trllc, dsap)),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+1),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+2),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+3),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+4),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+5),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+1),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+2),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+3),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+4),
+                               (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+5));
                }
 #endif
 
@@ -1610,8 +1598,8 @@ static void tr_rx(struct net_device *dev)
                if (!(skb=dev_alloc_skb(skb_size))) {
                        DPRINTK("out of memory. frame dropped.\n");
                        ti->tr_stats.rx_dropped++;
-                       writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
-                       writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+                       isa_writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
+                       isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
                        return;
                }
 
@@ -1619,12 +1607,12 @@ static void tr_rx(struct net_device *dev)
        skb_reserve(skb, sizeof(struct trh_hdr)-lan_hdr_len+sizeof(struct trllc));
                skb->dev=dev;
                data=skb->data;
-       rbuffer_len=ntohs(readw(rbuffer + offsetof(struct rec_buf, buf_len)));
+       rbuffer_len=ntohs(isa_readw(rbuffer + offsetof(struct rec_buf, buf_len)));
        rbufdata = rbuffer + offsetof(struct rec_buf,data);
 
        if (IPv4_p) {
                 /* Copy the headers without checksumming */
-               memcpy_fromio(data, rbufdata, hdr_len);
+               isa_memcpy_fromio(data, rbufdata, hdr_len);
 
                /* Watch for padded packets and bogons */
                iph=(struct iphdr*)(data + lan_hdr_len + sizeof(struct trllc));
@@ -1644,20 +1632,20 @@ static void tr_rx(struct net_device *dev)
                                                   length < rbuffer_len ? length : rbuffer_len,
                                                   chksum);
                else
-                       memcpy_fromio(data, rbufdata, rbuffer_len);
-               rbuffer = ntohs(readw(rbuffer));
+                       isa_memcpy_fromio(data, rbufdata, rbuffer_len);
+               rbuffer = ntohs(isa_readw(rbuffer));
                if (!rbuffer)
                        break;
                length -= rbuffer_len;
                data += rbuffer_len;
                rbuffer += ti->sram;
-               rbuffer_len = ntohs(readw(rbuffer + offsetof(struct rec_buf, buf_len)));
+               rbuffer_len = ntohs(isa_readw(rbuffer + offsetof(struct rec_buf, buf_len)));
                rbufdata = rbuffer + offsetof(struct rec_buf, data);
        }
 
-               writeb(0, ti->asb + offsetof(struct asb_rec, ret_code));
+               isa_writeb(0, ti->asb + offsetof(struct asb_rec, ret_code));
 
-               writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+               isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
 
        ti->tr_stats.rx_bytes += skb->len;
                ti->tr_stats.rx_packets++;
@@ -1696,10 +1684,10 @@ static int tok_send_packet(struct sk_buff *skb, struct net_device *dev)
 
                /* Save skb; we'll need it when the adapter asks for the data */
                ti->current_skb=skb;
-               writeb(XMIT_UI_FRAME, ti->srb + offsetof(struct srb_xmit, command));
-               writew(ti->exsap_station_id, ti->srb
+               isa_writeb(XMIT_UI_FRAME, ti->srb + offsetof(struct srb_xmit, command));
+               isa_writew(ti->exsap_station_id, ti->srb
                       +offsetof(struct srb_xmit, station_id));
-               writeb(CMD_IN_SRB, (ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD));
+               isa_writeb(CMD_IN_SRB, (ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD));
                spin_unlock_irqrestore(&(ti->lock), flags);
 
                dev->trans_start=jiffies;
@@ -1721,9 +1709,9 @@ void ibmtr_readlog(struct net_device *dev) {
         ti=(struct tok_info *) dev->priv;
 
         ti->readlog_pending = 0;
-        writeb(DIR_READ_LOG, ti->srb);
-        writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
-        writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+        isa_writeb(DIR_READ_LOG, ti->srb);
+        isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+        isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
         dev->tbusy=1; /* really srb busy... */
 }
 
index 3f3fa6aedd021aa74e95df6a075870fb5b4049da..769af277ab109993fc1b1836b526d1c916d868cb 100644 (file)
 #define ACA_RW 0x00
 
 #ifdef ENABLE_PAGING
-#define SET_PAGE(x) (writeb(((x>>8)&ti.page_mask), \
+#define SET_PAGE(x) (isa_writeb(((x>>8)&ti.page_mask), \
   ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN))
 #else
 #define SET_PAGE(x)
index ec85e07a8d7c2a3a6fe5c8ba2b5a979e5625947e..9018e897663010165e4ee9bd2655352bc2190159 100644 (file)
@@ -5,6 +5,7 @@
  *     David Mosberger-Tang, Martin Mares
  */
 
+#include <linux/config.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/pci.h>
index 621045989c3c9c26ad464a91b4174c79eb47894b..5939548ba28ba20af913de123188c63bc0bccecd 100644 (file)
@@ -10,8 +10,12 @@ dep_tristate '  SCSI generic support' CONFIG_CHR_DEV_SG $CONFIG_SCSI
 
 comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs'
 
-bool '  Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+  bool 'Enable extra checks in new queueing code' CONFIG_SCSI_DEBUG_QUEUES
+fi
 
+bool '  Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
+  
 bool '  Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS
 bool '  SCSI logging facility' CONFIG_SCSI_LOGGING
 
index ed78f8cf9340cb6ef55d753f4467835cd45ff6b5..e6d51509ba73d09687f896aa85875b5fe399b788 100644 (file)
@@ -40,8 +40,8 @@ ifeq ($(CONFIG_SCSI),y)
     OX_OBJS  := scsi_syms.o
   endif
   L_OBJS += scsi_n_syms.o hosts.o scsi_ioctl.o constants.o scsicam.o
-  L_OBJS += scsi_error.o scsi_obsolete.o scsi_queue.o
-  L_OBJS += scsi_proc.o
+  L_OBJS += scsi_error.o scsi_obsolete.o scsi_queue.o scsi_lib.o 
+  L_OBJS += scsi_merge.o scsi_proc.o
 else
   ifeq ($(CONFIG_SCSI),m)
     MIX_OBJS += scsi_syms.o
@@ -721,10 +721,11 @@ megaraid.o: megaraid.c
        $(CC) $(CFLAGS) -c megaraid.c
 
 scsi_mod.o: $(MIX_OBJS) hosts.o scsi.o scsi_ioctl.o constants.o \
-               scsicam.o scsi_proc.o scsi_error.o scsi_obsolete.o scsi_queue.o
+               scsicam.o scsi_proc.o scsi_error.o scsi_obsolete.o \
+               scsi_queue.o scsi_lib.o scsi_merge.o
        $(LD) $(LD_RFLAG) -r -o $@ $(MIX_OBJS) hosts.o scsi.o scsi_ioctl.o \
-               constants.o scsicam.o scsi_proc.o               \
-               scsi_error.o scsi_obsolete.o scsi_queue.o \
+               constants.o scsicam.o scsi_proc.o scsi_merge.o     \
+               scsi_error.o scsi_obsolete.o scsi_queue.o scsi_lib.o
 
 sr_mod.o: sr.o sr_ioctl.o sr_vendor.o
        $(LD) $(LD_RFLAG) -r -o $@ sr.o sr_ioctl.o sr_vendor.o
index ef18e4f9aa1ec8765e5615188c71c15591c4d6a7..afcc4d1a5a4cf6f0ed9358c014371bd1850ad041 100644 (file)
@@ -1,5 +1,5 @@
-/* $Id: advansys.c,v 1.67 1999/11/18 20:13:15 bobf Exp bobf $ */
-#define ASC_VERSION "3.2K"    /* AdvanSys Driver Version */
+/* $Id: advansys.c,v 1.68 1999/11/19 01:57:47 bobf Exp bobf $ */
+#define ASC_VERSION "3.2L"    /* AdvanSys Driver Version */
 
 /*
  * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
          4. Increase Wide board scatter-gather list maximum length to
             255 when the driver is compiled into the kernel.
 
+     3.2L (11/18/99):
+         1. Fix bug in adv_get_sglist() that caused an assertion failure
+            at line 7475. The reqp->sgblkp pointer must be initialized
+            to NULL in adv_get_sglist().
+
   J. Known Problems/Fix List (XXX)
 
      1. Need to add memory mapping workaround. Test the memory mapping.
@@ -7471,8 +7476,8 @@ adv_get_sglist(asc_board_t *boardp, adv_req_t *reqp, Scsi_Cmnd *scp)
     slp = (struct scatterlist *) scp->request_buffer;
     sg_elem_cnt = scp->use_sg;
     prev_sg_block = NULL;
+    reqp->sgblkp == NULL;
 
-    ASC_ASSERT(reqp->sgblkp == NULL);
     do
     {
         /*
index 6f9b2e480e7a7864abe55c119310ed0afafebf12..e3d3f0cb27e6172941bf6f733047e7e35bf0cd04 100644 (file)
@@ -1292,7 +1292,9 @@ int aha1542_bus_reset(Scsi_Cmnd * SCpnt)
      * check for timeout, and if we are doing something like this
      * we are pretty desperate anyways.
      */
+    spin_unlock_irq(&io_request_lock);
     scsi_sleep(4*HZ);
+    spin_lock_irq(&io_request_lock);
 
     WAIT(STATUS(SCpnt->host->io_port), 
         STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF);
@@ -1359,7 +1361,9 @@ int aha1542_host_reset(Scsi_Cmnd * SCpnt)
      * check for timeout, and if we are doing something like this
      * we are pretty desperate anyways.
      */
+    spin_unlock_irq(&io_request_lock);
     scsi_sleep(4*HZ);
+    spin_lock_irq(&io_request_lock);
 
     WAIT(STATUS(SCpnt->host->io_port), 
         STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF);
index a8ff8703ac9705612e26a368791db979acd268f5..c0b3f3a62a3b4171c850a7a7015c88676d6706da 100644 (file)
@@ -36,47 +36,59 @@ void mydlyu(unsigned int);
  *   static const char RCSid[] = "$Header: /usr/src/linux/kernel/blk_drv/scsi/RCS/atp870u.c,v 1.0 1997/05/07 15:22:00 root Exp root $";
  */
 
-static unsigned char admaxu = 1, host_idu[2], chip_veru[2], scam_on[2], global_map[2];
-static unsigned short int active_idu[2], wide_idu[2], sync_idu, ultra_map[2];
-static int workingu[2] = {0, 0};
+static unsigned char admaxu = 1;
+static unsigned short int sync_idu;
 
-static Scsi_Cmnd *querequ[2][qcnt], *curr_req[2][16];
-
-static unsigned char devspu[2][16] = {
-       {0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
-        0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20},
-       {0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
-        0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}
-};
-
-static unsigned char dirctu[2][16], last_cmd[2], in_snd[2], in_int[2];
-static unsigned char ata_cdbu[2][16];
-static unsigned int ioportu[2] = {0, 0};
 static unsigned int irqnumu[2] = {0, 0};
-static unsigned short int pciportu[2];
-static unsigned long prdaddru[2][16], tran_lenu[2][16], last_lenu[2][16];
-static unsigned char prd_tableu[2][16][1024];
-static unsigned char *prd_posu[2][16];
-static unsigned char quhdu[2], quendu[2];
 
-static unsigned char devtypeu[2][16] =
+struct atp_unit
 {
-       {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
-       {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+       unsigned long ioport;
+       unsigned long irq;
+       unsigned long pciport;
+       unsigned char last_cmd;
+       unsigned char in_snd;
+       unsigned char in_int;
+       unsigned char quhdu;
+       unsigned char quendu;
+       unsigned char scam_on;
+       unsigned char global_map;
+       unsigned char chip_veru;
+       unsigned char host_idu;
+       int working;
+       unsigned short wide_idu;
+       unsigned short active_idu;
+       unsigned short ultra_map;
+       unsigned char ata_cdbu[16];
+       Scsi_Cmnd *querequ[qcnt];
+       struct atp_id
+       {
+               unsigned char dirctu;
+               unsigned char devspu;
+               unsigned char devtypeu;
+               unsigned long prdaddru;
+               unsigned long tran_lenu;
+               unsigned long last_lenu;
+               unsigned char *prd_posu;
+               unsigned char *prd_tableu;
+               Scsi_Cmnd *curr_req;
+       } id[16];
 };
 
 static struct Scsi_Host *atp_host[2] = {NULL, NULL};
+static struct atp_unit atp_unit[2];
 
 static void atp870u_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
 {
        unsigned long flags;
        unsigned short int tmpcip, id;
-       unsigned char i, j, h, tarid, lun;
+       unsigned char i, j, h, target_id, lun;
        unsigned char *prd;
        Scsi_Cmnd *workrequ;
        unsigned int workportu, tmport;
        unsigned long adrcntu, k;
        int errstus;
+       struct atp_unit *dev = dev_id;
 
        for (h = 0; h < 2; h++) {
                if (irq == irqnumu[h]) {
@@ -85,59 +97,84 @@ static void atp870u_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
        }
        return;
 irq_numok:
-       in_int[h] = 1;
-       workportu = ioportu[h];
+       dev->in_int = 1;
+       workportu = dev->ioport;
        tmport = workportu;
-
-       if (workingu[h] != 0) 
+       
+       if (dev->working != 0) 
        {
                tmport += 0x1f;
                j = inb(tmport);
 
-               tmpcip = pciportu[h];
+               tmpcip = dev->pciport;
                if ((inb(tmpcip) & 0x08) != 0) 
                {
                        tmpcip += 0x2;
-                       while ((inb(tmpcip) & 0x08) != 0);
+                       for (k=0; k < 1000; k++)
+                       {
+                               if ((inb(tmpcip) & 0x08) == 0)
+                               {
+                                       goto stop_dma;
+                               }
+                               if ((inb(tmpcip) & 0x01) == 0)
+                               {
+                                       goto stop_dma;
+                               }    
+                       }
                }
-               tmpcip = pciportu[h];
+stop_dma:
+               tmpcip = dev->pciport;
                outb(0x00, tmpcip);
                tmport -= 0x08;
 
                i = inb(tmport);
                if ((j & 0x40) == 0) 
                {
-                       if ((last_cmd[h] & 0x40) == 0) 
+                       if ((dev->last_cmd & 0x40) == 0) 
                        {
-                               last_cmd[h] = 0xff;
+                               dev->last_cmd = 0xff;
                        }
                }
-               else last_cmd[h] |= 0x40;
+               else dev->last_cmd |= 0x40;
 
                tmport -= 0x02;
-               tarid = inb(tmport);
+               target_id = inb(tmport);
                tmport += 0x02;
 
-               if ((tarid & 0x40) != 0) {
-                       tarid = (tarid & 0x07) | 0x08;
+               /*
+                *      Remap wide devices onto id numbers
+                */
+                
+               if ((target_id & 0x40) != 0) {
+                       target_id = (target_id & 0x07) | 0x08;
                } else {
-                       tarid &= 0x07;
+                       target_id &= 0x07;
                }
+               
                if (i == 0x85) 
                {
-                       if (wide_idu[h] != 0) 
+                       /*
+                        *      Flip wide
+                        */
+                       if (dev->wide_idu != 0) 
                        {
                                tmport = workportu + 0x1b;
                                j = inb(tmport) & 0x0e;
                                j |= 0x01;
                                outb(j, tmport);
                        }
-                       if (((quhdu[h] != quendu[h]) || (last_cmd[h] != 0xff)) &&
-                           (in_snd[h] == 0)) 
+                       /*
+                        *      Issue more commands
+                        */
+                       if (((dev->quhdu != dev->quendu) || (dev->last_cmd != 0xff)) &&
+                           (dev->in_snd == 0)) 
                        {
                                send_s870(h);
                        }
-                       in_int[h] = 0;
+                       /*
+                        *      Done
+                        */
+                       dev->in_int = 0;
                        return;
                }
                if (i == 0x21) 
@@ -147,15 +184,15 @@ irq_numok:
                        ((unsigned char *) &adrcntu)[2] = inb(tmport++);
                        ((unsigned char *) &adrcntu)[1] = inb(tmport++);
                        ((unsigned char *) &adrcntu)[0] = inb(tmport);
-                       k = last_lenu[h][tarid];
+                       k = dev->id[target_id].last_lenu;
                        k -= adrcntu;
-                       tran_lenu[h][tarid] = k;
-                       last_lenu[h][tarid] = adrcntu;
+                       dev->id[target_id].tran_lenu = k;
+                       dev->id[target_id].last_lenu = adrcntu;
                        tmport -= 0x04;
                        outb(0x41, tmport);
                        tmport += 0x08;
                        outb(0x08, tmport);
-                       in_int[h] = 0;
+                       dev->in_int = 0;
                        return;
                }
                if ((i == 0x80) || (i == 0x8f)) 
@@ -163,7 +200,7 @@ irq_numok:
                        lun = 0;
                        tmport -= 0x07;
                        j = inb(tmport);
-                       if (j == 0x44) {
+                       if (j == 0x44 || i==0x80) {
                                tmport += 0x0d;
                                lun = inb(tmport) & 0x07;
                        } else {
@@ -174,71 +211,80 @@ irq_numok:
                                        ((unsigned char *) &adrcntu)[2] = inb(tmport++);
                                        ((unsigned char *) &adrcntu)[1] = inb(tmport++);
                                        ((unsigned char *) &adrcntu)[0] = inb(tmport);
-                                       k = last_lenu[h][tarid];
+                                       k = dev->id[target_id].last_lenu;
                                        k -= adrcntu;
-                                       tran_lenu[h][tarid] = k;
-                                       last_lenu[h][tarid] = adrcntu;
+                                       dev->id[target_id].tran_lenu = k;
+                                       dev->id[target_id].last_lenu = adrcntu;
                                        tmport += 0x04;
                                        outb(0x08, tmport);
-                                       in_int[h] = 0;
+                                       dev->in_int = 0;
                                        return;
                                }
                                else 
                                {
                                        outb(0x46, tmport);
-                                       dirctu[h][tarid] = 0x00;
+                                       dev->id[target_id].dirctu = 0x00;
                                        tmport += 0x02;
                                        outb(0x00, tmport++);
                                        outb(0x00, tmport++);
                                        outb(0x00, tmport++);
                                        tmport += 0x03;
                                        outb(0x08, tmport);
-                                       in_int[h] = 0;
+                                       dev->in_int = 0;
                                        return;
                                }
                        }
                        tmport = workportu + 0x10;
                        outb(0x45, tmport);
                        tmport += 0x06;
-                       tarid = inb(tmport);
-                       if ((tarid & 0x10) != 0) 
+                       target_id = inb(tmport);
+                       /*
+                        *      Remap wide identifiers
+                        */
+                       if ((target_id & 0x10) != 0) 
                        {
-                               tarid = (tarid & 0x07) | 0x08;
+                               target_id = (target_id & 0x07) | 0x08;
                        } else {
-                               tarid &= 0x07;
+                               target_id &= 0x07;
                        }
-                       workrequ = curr_req[h][tarid];
+                       workrequ = dev->id[target_id].curr_req;
                        tmport = workportu + 0x0f;
                        outb(lun, tmport);
                        tmport += 0x02;
-                       outb(devspu[h][tarid], tmport++);
-                       adrcntu = tran_lenu[h][tarid];
-                       k = last_lenu[h][tarid];
+                       outb(dev->id[target_id].devspu, tmport++);
+                       adrcntu = dev->id[target_id].tran_lenu;
+                       k = dev->id[target_id].last_lenu;
                        outb(((unsigned char *) &k)[2], tmport++);
                        outb(((unsigned char *) &k)[1], tmport++);
                        outb(((unsigned char *) &k)[0], tmport++);
-                       j = tarid;
-                       if (tarid > 7) {
+                       /* Remap wide */
+                       j = target_id;
+                       if (target_id > 7) {
                                j = (j & 0x07) | 0x40;
                        }
-                       j |= dirctu[h][tarid];
+                       /* Add direction */
+                       j |= dev->id[target_id].dirctu;
                        outb(j, tmport++);
                        outb(0x80, tmport);
                        tmport = workportu + 0x1b;
                        j = inb(tmport) & 0x0e;
                        id = 1;
-                       id = id << tarid;
-                       if ((id & wide_idu[h]) != 0) {
+                       id = id << target_id;
+                       /*
+                        *      Is this a wide device
+                        */
+                       if ((id & dev->wide_idu) != 0) {
                                j |= 0x01;
                        }
                        outb(j, tmport);
-                       if (last_lenu[h][tarid] == 0) {
+                       
+                       if (dev->id[target_id].last_lenu == 0) {
                                tmport = workportu + 0x18;
                                outb(0x08, tmport);
-                               in_int[h] = 0;
+                               dev->in_int = 0;
                                return;
                        }
-                       prd = prd_posu[h][tarid];
+                       prd = dev->id[target_id].prd_posu;
                        while (adrcntu != 0) 
                        {
                                id = ((unsigned short int *) (prd))[2];
@@ -252,35 +298,44 @@ irq_numok:
                                            (k - adrcntu);
                                        ((unsigned long *) (prd))[0] += adrcntu;
                                        adrcntu = 0;
-                                       prd_posu[h][tarid] = prd;
+                                       dev->id[target_id].prd_posu = prd;
                                } else {
                                        adrcntu -= k;
-                                       prdaddru[h][tarid] += 0x08;
+                                       dev->id[target_id].prdaddru += 0x08;
                                        prd += 0x08;
                                        if (adrcntu == 0) {
-                                               prd_posu[h][tarid] = prd;
+                                               dev->id[target_id].prd_posu = prd;
                                        }
                                }
                        }
-                       tmpcip = pciportu[h] + 0x04;
-                       outl(prdaddru[h][tarid], tmpcip);
+                       tmpcip = dev->pciport + 0x04;
+                       outl(dev->id[target_id].prdaddru, tmpcip);
                        tmpcip -= 0x02;
                        outb(0x06, tmpcip);
                        outb(0x00, tmpcip);
                        tmpcip -= 0x02;
                        tmport = workportu + 0x18;
-                       if (dirctu[h][tarid] != 0) {
+                       /*
+                        *      Check transfer direction
+                        */
+                       if (dev->id[target_id].dirctu != 0) {
                                outb(0x08, tmport);
                                outb(0x01, tmpcip);
-                               in_int[h] = 0;
+                               dev->in_int = 0;
                                return;
                        }
                        outb(0x08, tmport);
                        outb(0x09, tmpcip);
-                       in_int[h] = 0;
+                       dev->in_int = 0;
                        return;
                }
-               workrequ = curr_req[h][tarid];
+               
+               /*
+                *      Current scsi request on this target
+                */
+                
+               workrequ = dev->id[target_id].curr_req;
+               
                if (i == 0x42) {
                        errstus = 0x02;
                        workrequ->result = errstus;
@@ -293,24 +348,36 @@ irq_numok:
                        errstus = inb(tmport);
                        workrequ->result = errstus;
 go_42:
+                       /*
+                        *      Complete the command
+                        */
                        spin_lock_irqsave(&io_request_lock, flags);
                        (*workrequ->scsi_done) (workrequ);
                        spin_unlock_irqrestore(&io_request_lock, flags);
 
-                       curr_req[h][tarid] = 0;
-                       workingu[h]--;
-                       if (wide_idu[h] != 0) {
+                       /*
+                        *      Clear it off the queue
+                        */
+                       dev->id[target_id].curr_req = 0;
+                       dev->working--;
+                       /*
+                        *      Take it back wide
+                        */
+                       if (dev->wide_idu != 0) {
                                tmport = workportu + 0x1b;
                                j = inb(tmport) & 0x0e;
                                j |= 0x01;
                                outb(j, tmport);
                        }
-                       if (((last_cmd[h] != 0xff) || (quhdu[h] != quendu[h])) &&
-                           (in_snd[h] == 0)) 
+                       /*
+                        *      If there is stuff to send and nothing going then send it
+                        */
+                       if (((dev->last_cmd != 0xff) || (dev->quhdu != dev->quendu)) &&
+                           (dev->in_snd == 0)) 
                        {
                                send_s870(h);
                        }
-                       in_int[h] = 0;
+                       dev->in_int = 0;
                        return;
                }
                if (i == 0x4f) {
@@ -319,23 +386,23 @@ go_42:
                i &= 0x0f;
                if (i == 0x09) {
                        tmpcip = tmpcip + 4;
-                       outl(prdaddru[h][tarid], tmpcip);
+                       outl(dev->id[target_id].prdaddru, tmpcip);
                        tmpcip = tmpcip - 2;
                        outb(0x06, tmpcip);
                        outb(0x00, tmpcip);
                        tmpcip = tmpcip - 2;
                        tmport = workportu + 0x10;
                        outb(0x41, tmport);
-                       dirctu[h][tarid] = 0x00;
+                       dev->id[target_id].dirctu = 0x00;
                        tmport += 0x08;
                        outb(0x08, tmport);
                        outb(0x09, tmpcip);
-                       in_int[h] = 0;
+                       dev->in_int = 0;
                        return;
                }
                if (i == 0x08) {
                        tmpcip = tmpcip + 4;
-                       outl(prdaddru[h][tarid], tmpcip);
+                       outl(dev->id[target_id].prdaddru, tmpcip);
                        tmpcip = tmpcip - 2;
                        outb(0x06, tmpcip);
                        outb(0x00, tmpcip);
@@ -344,11 +411,11 @@ go_42:
                        outb(0x41, tmport);
                        tmport += 0x05;
                        outb((unsigned char) (inb(tmport) | 0x20), tmport);
-                       dirctu[h][tarid] = 0x20;
+                       dev->id[target_id].dirctu = 0x20;
                        tmport += 0x03;
                        outb(0x08, tmport);
                        outb(0x01, tmpcip);
-                       in_int[h] = 0;
+                       dev->in_int = 0;
                        return;
                }
                tmport -= 0x07;
@@ -357,20 +424,20 @@ go_42:
                } else {
                        outb(0x46, tmport);
                }
-               dirctu[h][tarid] = 0x00;
+               dev->id[target_id].dirctu = 0x00;
                tmport += 0x02;
                outb(0x00, tmport++);
                outb(0x00, tmport++);
                outb(0x00, tmport++);
                tmport += 0x03;
                outb(0x08, tmport);
-               in_int[h] = 0;
+               dev->in_int = 0;
                return;
        } else {
                tmport = workportu + 0x17;
                inb(tmport);
-               workingu[h] = 0;
-               in_int[h] = 0;
+               dev->working = 0;
+               dev->in_int = 0;
                return;
        }
 }
@@ -381,6 +448,7 @@ int atp870u_queuecommand(Scsi_Cmnd * req_p, void (*done) (Scsi_Cmnd *))
        unsigned long flags;
        unsigned short int m;
        unsigned int tmport;
+       struct atp_unit *dev;
 
        for (h = 0; h <= admaxu; h++) {
                if (req_p->host == atp_host[h]) {
@@ -394,9 +462,15 @@ host_ok:
                done(req_p);
                return 0;
        }
+       dev = &atp_unit[h];
        m = 1;
        m = m << req_p->target;
-       if ((m & active_idu[h]) == 0) {
+
+       /*
+        *      Fake a timeout for missing targets
+        */
+        
+       if ((m & dev->active_idu) == 0) {
                req_p->result = 0x00040000;
                done(req_p);
                return 0;
@@ -404,30 +478,36 @@ host_ok:
        if (done) {
                req_p->scsi_done = done;
        } else {
-               printk("atp870u_queuecommand: done can't be NULL\n");
+               printk(KERN_WARNING "atp870u_queuecommand: done can't be NULL\n");
                req_p->result = 0;
                done(req_p);
                return 0;
        }
-       quendu[h]++;
-       if (quendu[h] >= qcnt) {
-               quendu[h] = 0;
+       /*
+        *      Count new command
+        */
+       dev->quendu++;
+       if (dev->quendu >= qcnt) {
+               dev->quendu = 0;
        }
+       /*
+        *      Check queue state
+        */
 wait_que_empty:
-       if (quhdu[h] == quendu[h]) {
+       if (dev->quhdu == dev->quendu) {
                goto wait_que_empty;
        }
        save_flags(flags);
        cli();
-       querequ[h][quendu[h]] = req_p;
-       if (quendu[h] == 0) {
+       dev->querequ[dev->quendu] = req_p;
+       if (dev->quendu == 0) {
                i = qcnt - 1;
        } else {
-               i = quendu[h] - 1;
+               i = dev->quendu - 1;
        }
-       tmport = ioportu[h] + 0x1c;
+       tmport = dev->ioport + 0x1c;
        restore_flags(flags);
-       if ((inb(tmport) == 0) && (in_int[h] == 0) && (in_snd[h] == 0)) {
+       if ((inb(tmport) == 0) && (dev->in_int == 0) && (dev->in_snd == 0)) {
                send_s870(h);
        }
        return 0;
@@ -447,44 +527,45 @@ void send_s870(unsigned char h)
        Scsi_Cmnd *workrequ;
        unsigned long flags;
        unsigned int i;
-       unsigned char j, tarid;
+       unsigned char j, target_id;
        unsigned char *prd;
        unsigned short int tmpcip, w;
        unsigned long l, bttl;
        unsigned int workportu;
        struct scatterlist *sgpnt;
+       struct atp_unit *dev = &atp_unit[h];
 
        save_flags(flags);
        cli();
-       if (in_snd[h] != 0) {
+       if (dev->in_snd != 0) {
                restore_flags(flags);
                return;
        }
-       in_snd[h] = 1;
-       if ((last_cmd[h] != 0xff) && ((last_cmd[h] & 0x40) != 0)) {
-               last_cmd[h] &= 0x0f;
-               workrequ = curr_req[h][last_cmd[h]];
+       dev->in_snd = 1;
+       if ((dev->last_cmd != 0xff) && ((dev->last_cmd & 0x40) != 0)) {
+               dev->last_cmd &= 0x0f;
+               workrequ = dev->id[dev->last_cmd].curr_req;
                goto cmd_subp;
        }
-       workingu[h]++;
-       j = quhdu[h];
-       quhdu[h]++;
-       if (quhdu[h] >= qcnt) {
-               quhdu[h] = 0;
+       dev->working++;
+       j = dev->quhdu;
+       dev->quhdu++;
+       if (dev->quhdu >= qcnt) {
+               dev->quhdu = 0;
        }
-       workrequ = querequ[h][quhdu[h]];
-       if (curr_req[h][workrequ->target] == 0) {
-               curr_req[h][workrequ->target] = workrequ;
-               last_cmd[h] = workrequ->target;
+       workrequ = dev->querequ[dev->quhdu];
+       if (dev->id[workrequ->target].curr_req == 0) {
+               dev->id[workrequ->target].curr_req = workrequ;
+               dev->last_cmd = workrequ->target;
                goto cmd_subp;
        }
-       quhdu[h] = j;
-       workingu[h]--;
-       in_snd[h] = 0;
+       dev->quhdu = j;
+       dev->working--;
+       dev->in_snd = 0;
        restore_flags(flags);
        return;
 cmd_subp:
-       workportu = ioportu[h];
+       workportu = dev->ioport;
        tmport = workportu + 0x1f;
        if ((inb(tmport) & 0xb0) != 0) {
                goto abortsnd;
@@ -494,43 +575,63 @@ cmd_subp:
                goto oktosend;
        }
 abortsnd:
-       last_cmd[h] |= 0x40;
-       in_snd[h] = 0;
+       dev->last_cmd |= 0x40;
+       dev->in_snd = 0;
        restore_flags(flags);
        return;
 oktosend:
-       memcpy(&ata_cdbu[h][0], &workrequ->cmnd[0], workrequ->cmd_len);
-       if (ata_cdbu[h][0] == 0x25) {
+       memcpy(&dev->ata_cdbu[0], &workrequ->cmnd[0], workrequ->cmd_len);
+       if (dev->ata_cdbu[0] == READ_CAPACITY) {
                if (workrequ->request_bufflen > 8) {
                        workrequ->request_bufflen = 0x08;
                }
        }
-       if (ata_cdbu[h][0] == 0x12) {
+       /*
+        *      Why limit this ????
+        */
+       if (dev->ata_cdbu[0] == INQUIRY) {
                if (workrequ->request_bufflen > 0x24) {
                        workrequ->request_bufflen = 0x24;
-                       ata_cdbu[h][4] = 0x24;
+                       dev->ata_cdbu[4] = 0x24;
                }
        }
+       
        tmport = workportu + 0x1b;
        j = inb(tmport) & 0x0e;
-       tarid = workrequ->target;
+       target_id = workrequ->target;
+       
+       /*
+        *      Wide ?
+        */
        w = 1;
-       w = w << tarid;
-       if ((w & wide_idu[h]) != 0) {
+       w = w << target_id;
+       if ((w & dev->wide_idu) != 0) {
                j |= 0x01;
-       }
+       }       
        outb(j, tmport);
+       
+       /*
+        *      Write the command
+        */
+        
        tmport = workportu;
        outb(workrequ->cmd_len, tmport++);
        outb(0x2c, tmport++);
        outb(0xcf, tmport++);
        for (i = 0; i < workrequ->cmd_len; i++) {
-               outb(ata_cdbu[h][i], tmport++);
+               outb(dev->ata_cdbu[i], tmport++);
        }
        tmport = workportu + 0x0f;
-       outb(0x00, tmport);
+       outb(workrequ->lun, tmport);
        tmport += 0x02;
-       outb(devspu[h][tarid], tmport++);
+       /*
+        *      Write the target
+        */
+       outb(dev->id[target_id].devspu, tmport++);
+       
+       /*
+        *      Figure out the transfer size
+        */
        if (workrequ->use_sg) 
        {
                l = 0;
@@ -546,38 +647,54 @@ oktosend:
        } else {
                l = workrequ->request_bufflen;
        }
+       /*
+        *      Write transfer size
+        */
        outb((unsigned char) (((unsigned char *) (&l))[2]), tmport++);
        outb((unsigned char) (((unsigned char *) (&l))[1]), tmport++);
        outb((unsigned char) (((unsigned char *) (&l))[0]), tmport++);
-       j = tarid;
-       last_lenu[h][j] = l;
-       tran_lenu[h][j] = 0;
+       j = target_id;
+       dev->id[j].last_lenu = l;
+       dev->id[j].tran_lenu = 0;
+       /*
+        *      Flip the wide bits
+        */
        if ((j & 0x08) != 0) {
                j = (j & 0x07) | 0x40;
        }
-       if ((ata_cdbu[h][0] == 0x0a) || (ata_cdbu[h][0] == 0x2a) ||
-           (ata_cdbu[h][0] == 0xaa) || (ata_cdbu[h][0] == 0x15)) {
+       /*
+        *      Check transfer direction
+        */
+       if ((dev->ata_cdbu[0] == WRITE_6) || (dev->ata_cdbu[0] == WRITE_10) ||
+           (dev->ata_cdbu[0] == WRITE_12) || (dev->ata_cdbu[0] == MODE_SELECT)) {
                outb((unsigned char) (j | 0x20), tmport++);
        } else {
                outb(j, tmport++);
        }
+       outb((unsigned char)(inb(tmport) | 0x80),tmport);
        outb(0x80, tmport);
        tmport = workportu + 0x1c;
-       dirctu[h][tarid] = 0;
+       dev->id[target_id].dirctu = 0;
        if (l == 0) {
                if (inb(tmport) == 0) {
                        tmport = workportu + 0x18;
                        outb(0x08, tmport);
                } else {
-                       last_cmd[h] |= 0x40;
+                       dev->last_cmd |= 0x40;
                }
-               in_snd[h] = 0;
+               dev->in_snd = 0;
                restore_flags(flags);
                return;
        }
-       tmpcip = pciportu[h];
-       prd = &prd_tableu[h][tarid][0];
-       prd_posu[h][tarid] = prd;
+       tmpcip = dev->pciport;
+       prd = dev->id[target_id].prd_tableu;
+       dev->id[target_id].prd_posu = prd;
+       
+       /*
+        *      Now write the request list. Either as scatter/gather or as
+        *      a linear chain.
+        */
+        
        if (workrequ->use_sg) 
        {
                sgpnt = (struct scatterlist *) workrequ->request_buffer;
@@ -590,6 +707,9 @@ oktosend:
                }
                (unsigned short int) (((unsigned short int *) (prd))[i - 1]) = 0x8000;
        } else {
+               /*
+                *      For a linear request write a chain of blocks
+                */
                bttl = virt_to_bus(workrequ->request_buffer);
                l = workrequ->request_bufflen;
                i = 0;
@@ -606,24 +726,24 @@ oktosend:
                (unsigned long) (((unsigned long *) (prd))[i >> 1]) = bttl;
        }
        tmpcip = tmpcip + 4;
-       prdaddru[h][tarid] = virt_to_bus(&prd_tableu[h][tarid][0]);
-       outl(prdaddru[h][tarid], tmpcip);
+       dev->id[target_id].prdaddru = virt_to_bus(dev->id[target_id].prd_tableu);
+       outl(dev->id[target_id].prdaddru, tmpcip);
        tmpcip = tmpcip - 2;
        outb(0x06, tmpcip);
        outb(0x00, tmpcip);
        tmpcip = tmpcip - 2;
-       if ((ata_cdbu[h][0] == 0x0a) || (ata_cdbu[h][0] == 0x2a) ||
-           (ata_cdbu[h][0] == 0xaa) || (ata_cdbu[h][0] == 0x15)) 
+       if ((dev->ata_cdbu[0] == WRITE_6) || (dev->ata_cdbu[0] == WRITE_10) ||
+           (dev->ata_cdbu[0] == WRITE_12) || (dev->ata_cdbu[0] == MODE_SELECT)) 
        {
-               dirctu[h][tarid] = 0x20;
+               dev->id[target_id].dirctu = 0x20;
                if (inb(tmport) == 0) {
                        tmport = workportu + 0x18;
                        outb(0x08, tmport);
                        outb(0x01, tmpcip);
                } else {
-                       last_cmd[h] |= 0x40;
+                       dev->last_cmd |= 0x40;
                }
-               in_snd[h] = 0;
+               dev->in_snd = 0;
                restore_flags(flags);
                return;
        }
@@ -633,9 +753,9 @@ oktosend:
                outb(0x08, tmport);
                outb(0x09, tmpcip);
        } else {
-               last_cmd[h] |= 0x40;
+               dev->last_cmd |= 0x40;
        }
-       in_snd[h] = 0;
+       dev->in_snd = 0;
        restore_flags(flags);
        return;
 
@@ -657,13 +777,13 @@ int atp870u_command(Scsi_Cmnd * SCpnt)
        return SCpnt->result;
 }
 
-unsigned char fun_scam(unsigned char host, unsigned short int *val)
+unsigned char fun_scam(struct atp_unit *dev, unsigned short int *val)
 {
        unsigned int tmport;
        unsigned short int i, k;
        unsigned char j;
 
-       tmport = ioportu[host] + 0x1c;
+       tmport = dev->ioport + 0x1c;
        outw(*val, tmport);
 FUN_D7:
        for (i = 0; i < 10; i++) {      /* stable >= bus settle delay(400 ns)  */
@@ -706,32 +826,34 @@ void tscam(unsigned char host)
        unsigned long n;
        unsigned short int m, assignid_map, val;
        unsigned char mbuf[33], quintet[2];
-       static unsigned char g2q_tab[8] =
-       {0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27};
+       struct atp_unit *dev = &atp_unit[host];
+       static unsigned char g2q_tab[8] = {
+               0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27
+       };
 
 
        for (i = 0; i < 0x10; i++) {
                mydlyu(0xffff);
        }
 
-       tmport = ioportu[host] + 1;
+       tmport = dev->ioport + 1;
        outb(0x08, tmport++);
        outb(0x7f, tmport);
-       tmport = ioportu[host] + 0x11;
+       tmport = dev->ioport + 0x11;
        outb(0x20, tmport);
 
-       if ((scam_on[host] & 0x40) == 0) {
+       if ((dev->scam_on & 0x40) == 0) {
                return;
        }
        m = 1;
-       m <<= host_idu[host];
+       m <<= dev->host_idu;
        j = 16;
-       if (chip_veru[host] < 4) {
+       if (dev->chip_veru < 4) {
                m |= 0xff00;
                j = 8;
        }
        assignid_map = m;
-       tmport = ioportu[host] + 0x02;
+       tmport = dev->ioport + 0x02;
        outb(0x02, tmport++);   /* 2*2=4ms,3EH 2/32*3E=3.9ms */
        outb(0, tmport++);
        outb(0, tmport++);
@@ -746,7 +868,7 @@ void tscam(unsigned char host)
                if ((m & assignid_map) != 0) {
                        continue;
                }
-               tmport = ioportu[host] + 0x0f;
+               tmport = dev->ioport + 0x0f;
                outb(0, tmport++);
                tmport += 0x02;
                outb(0, tmport++);
@@ -758,14 +880,14 @@ void tscam(unsigned char host)
                        k = i;
                }
                outb(k, tmport++);
-               tmport = ioportu[host] + 0x1b;
-               if (chip_veru[host] == 4) {
+               tmport = dev->ioport + 0x1b;
+               if (dev->chip_veru == 4) {
                        outb((unsigned char) ((inb(tmport) & 0x0e) | 0x01), tmport);
                } else {
                        outb((unsigned char) (inb(tmport) & 0x0e), tmport);
                }
 wait_rdyok:
-               tmport = ioportu[host] + 0x18;
+               tmport = dev->ioport + 0x18;
                outb(0x09, tmport);
                tmport += 0x07;
 
@@ -776,22 +898,22 @@ wait_rdyok:
                        if ((k == 0x85) || (k == 0x42)) {
                                continue;
                        }
-                       tmport = ioportu[host] + 0x10;
+                       tmport = dev->ioport + 0x10;
                        outb(0x41, tmport);
                        goto wait_rdyok;
                }
                assignid_map |= m;
 
        }
-       tmport = ioportu[host] + 0x02;
+       tmport = dev->ioport + 0x02;
        outb(0x7f, tmport);
-       tmport = ioportu[host] + 0x1b;
+       tmport = dev->ioport + 0x1b;
        outb(0x02, tmport);
 
        outb(0, 0x80);
 
        val = 0x0080;           /* bsy  */
-       tmport = ioportu[host] + 0x1c;
+       tmport = dev->ioport + 0x1c;
        outw(val, tmport);
        val |= 0x0040;          /* sel  */
        outw(val, tmport);
@@ -836,7 +958,7 @@ TCM_SYNC:
        if ((inb(tmport) & 0x80) == 0x00) {     /* bsy ? */
                outw(0, tmport--);
                outb(0, tmport);
-               tmport = ioportu[host] + 0x15;
+               tmport = dev->ioport + 0x15;
                outb(0, tmport);
                tmport += 0x03;
                outb(0x09, tmport);
@@ -848,11 +970,11 @@ TCM_SYNC:
        }
        val &= 0x00ff;          /* synchronization  */
        val |= 0x3f00;
-       fun_scam(host, &val);
+       fun_scam(dev, &val);
        outb(3, 0x80);
        val &= 0x00ff;          /* isolation        */
        val |= 0x2000;
-       fun_scam(host, &val);
+       fun_scam(dev, &val);
        outb(4, 0x80);
        i = 8;
        j = 0;
@@ -863,7 +985,7 @@ TCM_ID:
        outb(5, 0x80);
        val &= 0x00ff;          /* get ID_STRING */
        val |= 0x2000;
-       k = fun_scam(host, &val);
+       k = fun_scam(dev, &val);
        if ((k & 0x03) == 0) {
                goto TCM_5;
        }
@@ -927,11 +1049,11 @@ G2Q_QUIN:                /* k=binID#,       */
        val &= 0x00ff;          /* AssignID 1stQuintet,AH=001xxxxx  */
        m = quintet[0] << 8;
        val |= m;
-       fun_scam(host, &val);
+       fun_scam(dev, &val);
        val &= 0x00ff;          /* AssignID 2ndQuintet,AH=001xxxxx */
        m = quintet[1] << 8;
        val |= m;
-       fun_scam(host, &val);
+       fun_scam(dev, &val);
 
        goto TCM_SYNC;
 
@@ -949,25 +1071,26 @@ void is870(unsigned long host, unsigned int wkport)
        static unsigned char synu[6] =  {0x80, 1, 3, 1, 0x0c, 0x0e};
        static unsigned char synw[6] =  {0x80, 1, 3, 1, 0x0c, 0x07};
        static unsigned char wide[6] =  {0x80, 1, 2, 3, 1, 0};
+       struct atp_unit *dev = &atp_unit[host];
 
        sync_idu = 0;
        tmport = wkport + 0x3a;
        outb((unsigned char) (inb(tmport) | 0x10), tmport);
 
        for (i = 0; i < 16; i++) {
-               if ((chip_veru[host] != 4) && (i > 7)) {
+               if ((dev->chip_veru != 4) && (i > 7)) {
                        break;
                }
                m = 1;
                m = m << i;
-               if ((m & active_idu[host]) != 0) {
+               if ((m & dev->active_idu) != 0) {
                        continue;
                }
-               if (i == host_idu[host]) {
-                       printk("         ID: %2d  Host Adapter\n", host_idu[host]);
+               if (i == dev->host_idu) {
+                       printk(KERN_INFO "         ID: %2d  Host Adapter\n", dev->host_idu);
                        continue;
                }
-               if (chip_veru[host] == 4) {
+               if (dev->chip_veru == 4) {
                        tmport = wkport + 0x1b;
                        j = (inb(tmport) & 0x0e) | 0x01;
                        outb(j, tmport);
@@ -984,7 +1107,7 @@ void is870(unsigned long host, unsigned int wkport)
                tmport += 0x06;
                outb(0, tmport);
                tmport += 0x02;
-               outb(devspu[host][i], tmport++);
+               outb(dev->id[i].devspu, tmport++);
                outb(0, tmport++);
                outb(satn[6], tmport++);
                outb(satn[7], tmport++);
@@ -1003,7 +1126,7 @@ void is870(unsigned long host, unsigned int wkport)
                        continue;
                }
                while (inb(tmport) != 0x8e);
-               active_idu[host] |= m;
+               dev->active_idu |= m;
 
                tmport = wkport + 0x10;
                outb(0x30, tmport);
@@ -1033,7 +1156,7 @@ sel_ok:
                tmport += 0x07;
                outb(0, tmport);
                tmport += 0x02;
-               outb(devspu[host][i], tmport++);
+               outb(dev->id[i].devspu, tmport++);
                outb(0, tmport++);
                outb(inqd[6], tmport++);
                outb(inqd[7], tmport++);
@@ -1046,7 +1169,7 @@ sel_ok:
                        continue;
                }
                while (inb(tmport) != 0x8e);
-               if (chip_veru[host] == 4) {
+               if (dev->chip_veru == 4) {
                        tmport = wkport + 0x1b;
                        j = inb(tmport) & 0x0e;
                        outb(j, tmport);
@@ -1087,16 +1210,16 @@ rd_inq_data:
                }
 inq_ok:
                mbuf[36] = 0;
-               printk("         ID: %2d  %s\n", i, &mbuf[8]);
-               devtypeu[host][i] = mbuf[0];
+               printk(KERN_INFO "         ID: %2d  %s\n", i, &mbuf[8]);
+               dev->id[i].devtypeu = mbuf[0];
                rmb = mbuf[1];
-               if (chip_veru[host] != 4) {
+               if (dev->chip_veru != 4) {
                        goto not_wide;
                }
                if ((mbuf[7] & 0x60) == 0) {
                        goto not_wide;
                }
-               if ((global_map[host] & 0x20) == 0) {
+               if ((dev->global_map & 0x20) == 0) {
                        goto not_wide;
                }
                tmport = wkport + 0x1b;
@@ -1112,7 +1235,7 @@ inq_ok:
                tmport += 0x06;
                outb(0, tmport);
                tmport += 0x02;
-               outb(devspu[host][i], tmport++);
+               outb(dev->id[i].devspu, tmport++);
                outb(0, tmport++);
                outb(satn[6], tmport++);
                outb(satn[7], tmport++);
@@ -1238,16 +1361,16 @@ widep_cmd:
                }
                m = 1;
                m = m << i;
-               wide_idu[host] |= m;
+               dev->wide_idu |= m;
 not_wide:
-               if ((devtypeu[host][i] == 0x00) || (devtypeu[host][i] == 0x07)) {
+               if ((dev->id[i].devtypeu == 0x00) || (dev->id[i].devtypeu == 0x07)) {
                        goto set_sync;
                }
                continue;
 set_sync:
                tmport = wkport + 0x1b;
                j = inb(tmport) & 0x0e;
-               if ((m & wide_idu[host]) != 0) {
+               if ((m & dev->wide_idu) != 0) {
                        j |= 0x01;
                }
                outb(j, tmport);
@@ -1261,7 +1384,7 @@ set_sync:
                tmport += 0x06;
                outb(0, tmport);
                tmport += 0x02;
-               outb(devspu[host][i], tmport++);
+               outb(dev->id[i].devspu, tmport++);
                outb(0, tmport++);
                outb(satn[6], tmport++);
                outb(satn[7], tmport++);
@@ -1289,10 +1412,10 @@ try_sync:
                                if (rmb != 0) {
                                        outb(synn[j++], tmport);
                                } else {
-                                       if ((m & wide_idu[host]) != 0) {
+                                       if ((m & dev->wide_idu) != 0) {
                                                outb(synw[j++], tmport);
                                        } else {
-                                               if ((m & ultra_map[host]) != 0) {
+                                               if ((m & dev->ultra_map) != 0) {
                                                        outb(synu[j++], tmport);
                                                } else {
                                                        outb(synn[j++], tmport);
@@ -1407,7 +1530,7 @@ tar_dcons:
                if (mbuf[4] > 0x0c) {
                        mbuf[4] = 0x0c;
                }
-               devspu[host][i] = mbuf[4];
+               dev->id[i].devspu = mbuf[4];
                if ((mbuf[3] < 0x0d) && (rmb == 0)) {
                        j = 0xa0;
                        goto set_syn_ok;
@@ -1426,7 +1549,7 @@ tar_dcons:
                }
                j = 0x60;
              set_syn_ok:
-               devspu[host][i] = (devspu[host][i] & 0x0f) | j;
+               dev->id[i].devspu = (dev->id[i].devspu & 0x0f) | j;
        }
        tmport = wkport + 0x3a;
        outb((unsigned char) (inb(tmport) & 0xef), tmport);
@@ -1439,124 +1562,124 @@ int atp870u_detect(Scsi_Host_Template * tpnt)
        unsigned long flags;
        unsigned int base_io, error, tmport;
        unsigned short index = 0;
-       unsigned char pci_bus[3], pci_device_fn[3], chip_ver[3], host_id;
+       struct pci_dev *pdev[3];
+       unsigned char chip_ver[3], host_id;
        struct Scsi_Host *shpnt = NULL;
+       int tmpcnt = 0;
        int count = 0;
-       static unsigned short devid[7] =
-       {0x8002, 0x8010, 0x8020, 0x8030, 0x8040, 0x8050, 0};
-       static struct pci_dev *pdev = NULL, *acard_pdev[3];
+       int result;
+       
+       static unsigned short devid[7] = {
+               0x8002, 0x8010, 0x8020, 0x8030, 0x8040, 0x8050, 0
+       };
 
-       printk("aec671x_detect: \n");
+       printk(KERN_INFO "aec671x_detect: \n");
        if (!pci_present()) {
-               printk("   NO BIOS32 SUPPORT.\n");
+               printk(KERN_INFO"   NO PCI SUPPORT.\n");
                return count;
        }
        tpnt->proc_name = "atp870u";
 
        for (h = 0; h < 2; h++) {
-               active_idu[h] = 0;
-               wide_idu[h] = 0;
-               host_idu[h] = 0x07;
-               quhdu[h] = 0;
-               quendu[h] = 0;
-               pci_bus[h] = 0;
-               pci_device_fn[h] = 0xff;
-               chip_ver[h] = 0;
-               last_cmd[h] = 0xff;
-               in_snd[h] = 0;
-               in_int[h] = 0;
+               struct atp_unit *dev = &atp_unit[h];
+               for(k=0;k<16;k++)
+               {
+                       dev->id[k].prd_tableu = kmalloc(1024, GFP_KERNEL);
+                       dev->id[k].devspu=0x20;
+                       dev->id[k].devtypeu = 0;
+                       dev->id[k].curr_req = NULL;
+               }
+               dev->active_idu = 0;
+               dev->wide_idu = 0;
+               dev->host_idu = 0x07;
+               dev->quhdu = 0;
+               dev->quendu = 0;
+               pdev[h]=NULL;
+               pdev[2]=NULL;
+               dev->chip_veru = 0;
+               dev->last_cmd = 0xff;
+               dev->in_snd = 0;
+               dev->in_int = 0;
                for (k = 0; k < qcnt; k++) {
-                       querequ[h][k] = 0;
+                       dev->querequ[k] = 0;
                }
                for (k = 0; k < 16; k++) {
-                       curr_req[h][k] = 0;
+                       dev->id[k].curr_req = 0;
                }
        }
        h = 0;
        while (devid[h] != 0) {
-               pdev = pci_find_device(0x1191, devid[h], pdev);
-               if (pdev == NULL) {
+               pdev[2] = pci_find_device(0x1191, devid[h], pdev[2]);
+               if (pdev[2] == NULL) {
                        h++;
                        index = 0;
                        continue;
                }
                chip_ver[2] = 0;
 
-               /* To avoid messing with the things below...  */
-               acard_pdev[2] = pdev;
-               pci_device_fn[2] = pdev->devfn;
-               pci_bus[2] = pdev->bus->number;
-
                if (devid[h] == 0x8002) {
-                       error = pci_read_config_byte(pdev, 0x08, &chip_ver[2]);
+                       error = pci_read_config_byte(pdev[2], 0x08, &chip_ver[2]);
                        if (chip_ver[2] < 2) {
                                goto nxt_devfn;
                        }
                }
-               if (devid[h] == 0x8010) {
+               if (devid[h] == 0x8010 || devid[h] == 0x8050) {
                        chip_ver[2] = 0x04;
                }
-               if (pci_device_fn[2] < pci_device_fn[0]) {
-                       acard_pdev[1] = acard_pdev[0];
-                       pci_bus[1] = pci_bus[0];
-                       pci_device_fn[1] = pci_device_fn[0];
-                       chip_ver[1] = chip_ver[0];
-                       acard_pdev[0] = acard_pdev[2];
-                       pci_bus[0] = pci_bus[2];
-                       pci_device_fn[0] = pci_device_fn[2];
-                       chip_ver[0] = chip_ver[2];
-               } else if (pci_device_fn[2] < pci_device_fn[1]) {
-                       acard_pdev[1] = acard_pdev[2];
-                       pci_bus[1] = pci_bus[2];
-                       pci_device_fn[1] = pci_device_fn[2];
-                       chip_ver[1] = chip_ver[2];
-               }
+               pdev[tmpcnt] = pdev[2];
+               chip_ver[tmpcnt] = chip_ver[2];
+               tmpcnt++;
              nxt_devfn:
                index++;
                if (index > 3) {
                        index = 0;
                        h++;
                }
+               if(tmpcnt>1)
+                       break;
        }
        for (h = 0; h < 2; h++) {
-               if (pci_device_fn[h] == 0xff) {
+               struct atp_unit *dev=&atp_unit[h];
+               if (pdev[h]==NULL) {
                        return count;
                }
-               pdev = acard_pdev[h];
-               pdev->devfn = pci_device_fn[h];
-               pdev->bus->number = pci_bus[h];
 
                /* Found an atp870u/w. */
-               error = pci_read_config_dword(pdev, 0x10, &base_io);
-               error += pci_read_config_byte(pdev, 0x3c, &irq);
-               error += pci_read_config_byte(pdev, 0x49, &host_id);
+               base_io = pdev[h]->resource[0].start;
+               irq = pdev[h]->irq;
+               error = pci_read_config_byte(pdev[h],0x49,&host_id);
 
                base_io &= 0xfffffff8;
-               printk("   ACARD AEC-671X PCI Ultra/W SCSI-3 Host Adapter: %d    IO:%x, IRQ:%d.\n"
+
+               if (check_region(base_io,0x40) != 0)
+               {  
+                       return 0;
+               }
+               printk(KERN_INFO "   ACARD AEC-671X PCI Ultra/W SCSI-3 Host Adapter: %d    IO:%x, IRQ:%d.\n"
                       ,h, base_io, irq);
-               ioportu[h] = base_io;
-               pciportu[h] = base_io + 0x20;
+               dev->ioport = base_io;
+               dev->pciport = base_io + 0x20;
                irqnumu[h] = irq;
                host_id &= 0x07;
-               host_idu[h] = host_id;
-               chip_veru[h] = chip_ver[h];
+               dev->host_idu = host_id;
+               dev->chip_veru = chip_ver[h];
 
                tmport = base_io + 0x22;
-               scam_on[h] = inb(tmport);
+               dev->scam_on = inb(tmport);
                tmport += 0x0b;
-               global_map[h] = inb(tmport++);
-               ultra_map[h] = inw(tmport);
-               if (ultra_map[h] == 0) {
-                       scam_on[h] = 0x00;
-                       global_map[h] = 0x20;
-                       ultra_map[h] = 0xffff;
+               dev->global_map = inb(tmport++);
+               dev->ultra_map = inw(tmport);
+               if (dev->ultra_map == 0) {
+                       dev->scam_on = 0x00;
+                       dev->global_map = 0x20;
+                       dev->ultra_map = 0xffff;
                }
                shpnt = scsi_register(tpnt, 4);
 
                save_flags(flags);
                cli();
-               if (request_irq(irq, atp870u_intr_handle, 0, "atp870u", NULL)) {
-                       printk("Unable to allocate IRQ for Acard controller.\n");
+               if (request_irq(irq, atp870u_intr_handle, SA_SHIRQ, "atp870u", dev)) {
+                       printk(KERN_ERR "Unable to allocate IRQ for Acard controller.\n");
                        goto unregister;
                }
                tmport = base_io + 0x3a;
@@ -1584,9 +1707,11 @@ int atp870u_detect(Scsi_Host_Template * tpnt)
                is870(h, base_io);
                tmport = base_io + 0x3a;
                outb((inb(tmport) & 0xef), tmport);
+               tmport++;
+               outb((inb(tmport) | 0x20),tmport);
 
                atp_host[h] = shpnt;
-               if (chip_ver[h] == 4) {
+               if (dev->chip_veru == 4) {
                        shpnt->max_id = 16;
                }
                shpnt->this_id = host_id;
@@ -1617,7 +1742,7 @@ int atp870u_abort(Scsi_Cmnd * SCpnt)
 {
        unsigned char h, j;
        unsigned int tmport;
-/*    printk(" atp870u_abort: \n");   */
+       struct atp_unit *dev;
        for (h = 0; h <= admaxu; h++) {
                if (SCpnt->host == atp_host[h]) {
                        goto find_adp;
@@ -1625,20 +1750,23 @@ int atp870u_abort(Scsi_Cmnd * SCpnt)
        }
        panic("Abort host not found !");
 find_adp:
-       printk(" workingu=%x last_cmd=%x ", workingu[h], last_cmd[h]);
-       printk(" quhdu=%x quendu=%x ", quhdu[h], quendu[h]);
-       tmport = ioportu[h];
+       dev=&atp_unit[h];
+       printk(KERN_DEBUG "working=%x last_cmd=%x ", dev->working, dev->last_cmd);
+       printk(" quhdu=%x quendu=%x ", dev->quhdu, dev->quendu);
+       tmport = dev->ioport;
        for (j = 0; j < 0x17; j++) {
                printk(" r%2x=%2x", j, inb(tmport++));
        }
        tmport += 0x05;
        printk(" r1c=%2x", inb(tmport));
        tmport += 0x03;
-       printk(" r1f=%2x in_snd=%2x ", inb(tmport), in_snd[h]);
+       printk(" r1f=%2x in_snd=%2x ", inb(tmport), dev->in_snd);
        tmport++;
        printk(" r20=%2x", inb(tmport));
        tmport += 0x02;
-       printk(" r22=%2x \n", inb(tmport));
+       printk(" r22=%2x", inb(tmport));
+       tmport += 0x18;
+       printk(" r3a=%2x \n",inb(tmport));
        return (SCSI_ABORT_SNOOZE);
 }
 
@@ -1648,7 +1776,6 @@ int atp870u_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
        /*
         * See if a bus reset was suggested.
         */
-/*     printk("atp870u_reset: \n");    */
        for (h = 0; h <= admaxu; h++) {
                if (SCpnt->host == atp_host[h]) {
                        goto find_host;
@@ -1658,9 +1785,9 @@ int atp870u_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
 find_host:
 /*      SCpnt->result = 0x00080000;
        SCpnt->scsi_done(SCpnt);
-       workingu[h]=0;
-       quhdu[h]=0;
-       quendu[h]=0;
+       dev->working=0;
+       dev->quhdu=0;
+       dev->quendu=0;
        return (SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET);  */
        return (SCSI_RESET_SNOOZE);
 }
@@ -1669,14 +1796,14 @@ const char *atp870u_info(struct Scsi_Host *notused)
 {
        static char buffer[128];
 
-       strcpy(buffer, "ACARD AEC-6710/6712 PCI Ultra/W SCSI-3 Adapter Driver V1.0 ");
+       strcpy(buffer, "ACARD AEC-6710/6712 PCI Ultra/W SCSI-3 Adapter Driver V2.0+ac ");
 
        return buffer;
 }
 
 int atp870u_set_info(char *buffer, int length, struct Scsi_Host *HBAptr)
 {
-       return (-ENOSYS);       /* Currently this is a no-op */
+       return -ENOSYS;         /* Currently this is a no-op */
 }
 
 #define BLS buffer + len + size
@@ -1714,7 +1841,7 @@ int atp870u_proc_info(char *buffer, char **start, off_t offset, int length,
        if (offset == 0) {
                memset(buff, 0, sizeof(buff));
        }
-       size += sprintf(BLS, "ACARD AEC-671X Driver Version: 1.0\n");
+       size += sprintf(BLS, "ACARD AEC-671X Driver Version: 2.0+ac\n");
        len += size;
        pos = begin + len;
        size = 0;
@@ -1727,7 +1854,7 @@ int atp870u_proc_info(char *buffer, char **start, off_t offset, int length,
        pos = begin + len;
        size = 0;
 
-      stop_output:
+stop_output:
        *start = buffer + (offset - begin);     /* Start of wanted data */
        len -= (offset - begin);        /* Start slop */
        if (len > length) {
@@ -1758,6 +1885,26 @@ int atp870u_biosparam(Scsi_Disk * disk, kdev_t dev, int *ip)
        return 0;
 }
 
+
+int atp870u_release (struct Scsi_Host *pshost)
+{
+       int h;
+       for (h = 0; h <= admaxu; h++) 
+       {
+               if (pshost == atp_host[h]) {
+                       int k;
+                       free_irq (pshost->irq, &atp_unit[h]);
+                       release_region (pshost->io_port, pshost->n_io_port);
+                       scsi_unregister(pshost);
+                       for(k=0;k<16;k++)
+                               kfree(atp_unit[h].id[k].prd_tableu);
+                       return 0;
+               }
+       }
+       panic("atp870u: bad scsi host passed.\n");
+               
+}
+
 #ifdef MODULE
 Scsi_Host_Template driver_template = ATP870U;
 
index 13639945a2a969ab09bb774e5f6d1ea019f66f8a..39644040a5688673847493ebb9ffda31891d1b15 100644 (file)
@@ -24,10 +24,11 @@ int atp870u_queuecommand(Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
 int atp870u_abort(Scsi_Cmnd *);
 int atp870u_reset(Scsi_Cmnd *, unsigned int);
 int atp870u_biosparam(Disk *, kdev_t, int *);
+int atp870u_release(struct Scsi_Host *);
 void send_s870(unsigned char);
 
-#define qcnt            32
-#define ATP870U_SCATTER 127
+#define qcnt           32
+#define ATP870U_SCATTER 128
 #define ATP870U_CMDLUN 1
 
 #ifndef NULL
@@ -38,31 +39,33 @@ extern const char *atp870u_info(struct Scsi_Host *);
 
 extern int atp870u_proc_info(char *, char **, off_t, int, int, int);
 
-#define ATP870U {                                              \
-       proc_name:                      "atp870u",              \
-       proc_info:                      atp870u_proc_info,      \
-       name:                           NULL,                   \
-       detect:                         atp870u_detect,         \
-       release:                        NULL,                   \
-       info:                           atp870u_info,           \
-       command:                        atp870u_command,        \
-        queuecommand:                  atp870u_queuecommand,   \
-        eh_strategy_handler:           NULL,                   \
-        eh_abort_handler:              NULL,                   \
-        eh_device_reset_handler:       NULL,                   \
-        eh_bus_reset_handler:          NULL,                   \
-        eh_host_reset_handler:         NULL,                   \
-       abort:                          atp870u_abort,          \
-       reset:                          atp870u_reset,          \
-       slave_attach:                   NULL,                   \
-       bios_param:                     atp870u_biosparam,      \
-       can_queue:                      qcnt,                   \
-       this_id:                        1,                      \
-       sg_tablesize:                   ATP870U_SCATTER,        \
-       cmd_per_lun:                    ATP870U_CMDLUN,         \
-       present:                        0,                      \
-       unchecked_isa_dma:              0,                      \
-       use_clustering:                 ENABLE_CLUSTERING,      \
-       use_new_eh_code:                0                       \
+#define ATP870U {                                              \
+       next: NULL,                                             \
+       module: NULL,                                           \
+       proc_info: atp870u_proc_info,                           \
+       name: NULL,                                             \
+       detect: atp870u_detect,                                 \
+       release: atp870u_release,                               \
+       info: atp870u_info,                                     \
+       command: atp870u_command,                               \
+       queuecommand: atp870u_queuecommand,                     \
+       eh_strategy_handler: NULL,                              \
+       eh_abort_handler: NULL,                                 \
+       eh_device_reset_handler: NULL,                          \
+       eh_bus_reset_handler: NULL,                             \
+       eh_host_reset_handler: NULL,                            \
+       abort: atp870u_abort,                                   \
+       reset: atp870u_reset,                                   \
+       slave_attach: NULL,                                     \
+       bios_param: atp870u_biosparam,                          \
+       can_queue: qcnt,         /* max simultaneous cmds      */\
+       this_id: 7,            /* scsi id of host adapter    */\
+       sg_tablesize: ATP870U_SCATTER,  /* max scatter-gather cmds    */\
+       cmd_per_lun: ATP870U_CMDLUN,    /* cmds per lun (linked cmds) */\
+       present: 0,             /* number of 7xxx's present   */\
+       unchecked_isa_dma: 0,   /* no memory DMA restrictions */\
+       use_clustering: ENABLE_CLUSTERING,                      \
+       use_new_eh_code: 0                                      \
 }
+
 #endif
index 4abd2241186a6ce765fb9ea3ec8dcb91f14ca63c..be0f0a5f2f6bbe72b1a568e82b7ba5fbb1af4c49 100644 (file)
@@ -1049,7 +1049,7 @@ static inline int port_detect \
       sh[j]->unchecked_isa_dma = FALSE;
    else {
       unsigned long flags;
-      sh[j]->wish_block = TRUE;
+//FIXME//      sh[j]->wish_block = TRUE;
       sh[j]->unchecked_isa_dma = TRUE;
       
       flags=claim_dma_lock();
index 2ca322c0e6889305e70b1866f65970f2dd8743ab..4e45fc849d9d84334d400ab20612e141ba8fb33f 100644 (file)
@@ -1297,7 +1297,7 @@ short register_HBA(u32 base, struct get_conf *gc, Scsi_Host_Template * tpnt,
     else
        hd->primary = TRUE;
     
-    sh->wish_block = FALSE;       
+//FIXME//    sh->wish_block = FALSE;      
     
     if (hd->bustype != IS_ISA) {
        sh->unchecked_isa_dma = FALSE;
index 6b427ddbc14c6ccef6691464b30f0635acbb8203..5a13b0e483ed2b3dc849ef31f1cd17884c6961d6 100644 (file)
@@ -16,6 +16,9 @@
  * DTC3181E extensions (c) 1997, Ronald van Cuijlenborg
  * ronald.van.cuijlenborg@tip.nl or nutty@dds.nl
  *
+ * Added ISAPNP support for DTC436 adapters,
+ * Thomas Sailer, sailer@ife.ee.ethz.ch
+ *
  * ALPHA RELEASE 1. 
  *
  * For more information, please consult 
 #include "sd.h"
 #include <linux/stat.h>
 #include <linux/init.h>
-#include<linux/ioport.h>
+#include <linux/ioport.h>
+#include <linux/isapnp.h>
 
 #define NCR_NOT_SET 0
 static int ncr_irq=NCR_NOT_SET;
@@ -280,6 +284,36 @@ int __init generic_NCR5380_detect(Scsi_Host_Template * tpnt){
     else if (dtc_3181e != NCR_NOT_SET)
         overrides[0].board=BOARD_DTC3181E;
 
+    if (!current_override && isapnp_present()) {
+           struct pci_dev *dev = NULL;
+           count = 0;
+           while ((dev = isapnp_find_dev(NULL, ISAPNP_VENDOR('D','T','C'), ISAPNP_FUNCTION(0x436e), dev))) {
+                   if (count >= NO_OVERRIDES)
+                           break;
+                   if (!dev->active && dev->prepare(dev) < 0) {
+                           printk(KERN_ERR "dtc436e probe: prepare failed\n");
+                           continue;
+                   }
+                   if (!(dev->resource[0].flags & IORESOURCE_IO))
+                           continue;
+                   if (!dev->active && dev->activate(dev) < 0) {
+                           printk(KERN_ERR "dtc436e probe: activate failed\n");
+                           continue;
+                   }
+                   if (dev->irq_resource[0].flags & IORESOURCE_IRQ)
+                           overrides[count].irq=dev->irq_resource[0].start;
+                   else
+                           overrides[count].irq=IRQ_NONE;
+                   if (dev->dma_resource[0].flags & IORESOURCE_DMA)
+                           overrides[count].dma=dev->dma_resource[0].start;
+                   else
+                           overrides[count].dma=DMA_NONE;
+                   overrides[count].NCR5380_map_name=(NCR5380_map_type)dev->resource[0].start;
+                   overrides[count].board=BOARD_DTC3181E;
+                   count++;
+           }
+    }
+
     tpnt->proc_name = "g_NCR5380";
 
     for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
index e8042a888f94600b42116f3ede1505d90233850b..6d0112abab73030830130795e81512471677874e 100644 (file)
@@ -934,10 +934,8 @@ static void gdth_do_cmd(Scsi_Cmnd *scp,gdth_cmd_str *gdtcmd,int timeout)
     scp->request.rq_status = RQ_SCSI_BUSY;
     scp->request.sem = &sem;
     scp->SCp.this_residual = IOCTL_PRI;
-    GDTH_LOCK_SCSI_DOCMD();
     scsi_do_cmd(scp, cmnd, gdtcmd, sizeof(gdth_cmd_str), 
                 gdth_scsi_done, timeout*HZ, 1);
-    GDTH_UNLOCK_SCSI_DOCMD();
     down(&sem);
 }
 
index 59a6bee98f9eb1dd75be81ddff8782ee27d2d9d0..5c9fdd941473b8ef2d5ccce29a85721ab693a1b7 100644 (file)
@@ -696,8 +696,6 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){
     atomic_set(&retval->host_active,0);
     retval->host_busy = 0;
     retval->host_failed = 0;
-    retval->block = NULL;
-    retval->wish_block = 0;
     if(j > 0xffff) panic("Too many extra bytes requested\n");
     retval->extra_bytes = j;
     retval->loaded_as_module = scsi_loadable_module_flag;
@@ -723,11 +721,8 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){
     retval->ehandler = NULL;    /* Initial value until the thing starts up. */
     retval->eh_notify   = NULL;    /* Who we notify when we exit. */
 
-    /*
-     * Initialize the fields used for mid-level queueing.
-     */
-    retval->pending_commands = NULL;
-    retval->host_busy = FALSE;
+
+    retval->host_blocked = FALSE;
 
 #ifdef DEBUG
     printk("Register %x %x: %d\n", (int)retval, (int)retval->hostt, j);
@@ -783,6 +778,7 @@ static void launch_error_handler_thread(struct Scsi_Host * shpnt)
 
             kernel_thread((int (*)(void *))scsi_error_handler, 
                           (void *) shpnt, 0);
+
             /*
              * Now wait for the kernel error thread to initialize itself
              * as it might be needed when we scan the bus.
@@ -873,7 +869,6 @@ unsigned int __init scsi_init(void)
     printk ("scsi : %d host%s.\n", next_scsi_host,
            (next_scsi_host == 1) ? "" : "s");
     
-    scsi_make_blocked_list();
     
     /* Now attach the high level drivers */
 #ifdef CONFIG_BLK_DEV_SD
index fa6ac5f6c53aad8acf2f4baab40e8b39cf846a62..18a9bf45c87e182f20f75660d8ccbd6f72503b6a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  hosts.h Copyright (C) 1992 Drew Eckhardt
- *          Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *          Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale
  *
  *  mid to low-level SCSI driver interface header
  *      Initial versions: Drew Eckhardt
@@ -8,7 +8,7 @@
  *
  *  <drew@colorado.edu>
  *
- *      Modified by Eric Youngdale eric@aib.com to
+ *      Modified by Eric Youngdale eric@andante.org to
  *      add scatter-gather, multiple outstanding request, and other
  *      enhancements.
  *
@@ -301,13 +301,7 @@ struct Scsi_Host
      */
     struct Scsi_Host      * next;
     Scsi_Device           * host_queue;
-    /*
-     * List of commands that have been rejected because either the host
-     * or the device was busy.  These need to be retried relatively quickly,
-     * but we need to hold onto it for a short period until the host/device
-     * is available.
-     */
-    Scsi_Cmnd             * pending_commands;
+
 
     struct task_struct    * ehandler;  /* Error recovery thread. */
     struct semaphore      * eh_wait;   /* The error recovery thread waits on
@@ -340,13 +334,6 @@ struct Scsi_Host
     unsigned int max_lun;
     unsigned int max_channel;
 
-    /*
-     * Pointer to a circularly linked list - this indicates the hosts
-     * that should be locked out of performing I/O while we have an active
-     * command on this host.
-     */
-    struct Scsi_Host * block;
-    unsigned wish_block:1;
 
     /* These parameters should be set by the detect routine */
     unsigned long base;
@@ -391,9 +378,14 @@ struct Scsi_Host
      * Host uses correct SCSI ordering not PC ordering. The bit is
      * set for the minority of drivers whose authors actually read the spec ;)
      */
-
     unsigned reverse_ordering:1;
-    
+
+    /*
+     * Indicates that one or more devices on this host were starved, and
+     * when the device becomes less busy that we need to feed them.
+     */
+    unsigned some_device_starved:1;
+   
     void (*select_queue_depths)(struct Scsi_Host *, Scsi_Device *);
 
     /*
@@ -412,7 +404,6 @@ extern Scsi_Host_Template * scsi_hosts;
 
 extern void build_proc_dir_entries(Scsi_Host_Template  *);
 
-
 /*
  *  scsi_init initializes the scsi hosts.
  */
@@ -456,6 +447,7 @@ struct Scsi_Device_Template
     void (*finish)(void);        /* Perform initialization after attachment */
     int (*attach)(Scsi_Device *); /* Attach devices to arrays */
     void (*detach)(Scsi_Device *);
+    int (*init_command)(Scsi_Cmnd *);     /* Used by new queueing code. */
 };
 
 extern struct Scsi_Device_Template sd_template;
index 1deec84e4ed2902b8c466151474f7156bdb5fe51..393cf909e4099ba90d8f7456ce4803d4a96ad08a 100644 (file)
@@ -747,7 +747,7 @@ static void internal_done (Scsi_Cmnd * cmd)
 static int device_inquiry(int host_index, int ldn)
 {
    int retries;
-   Scsi_Cmnd cmd;
+   Scsi_Cmnd *cmd;
    struct im_scb *scb;
    struct im_tsb *tsb;
    unsigned char *buf;
@@ -757,12 +757,18 @@ static int device_inquiry(int host_index, int ldn)
    buf = (unsigned char *)(&(ld(host_index)[ldn].buf));
    ld(host_index)[ldn].tsb.dev_status = 0; /* prepare stusblock */
 
+   cmd = kmalloc(sizeof(*cmd), GFP_KERNEL|GFP_DMA);
+   if(cmd==NULL)
+   {
+       printk(KERN_ERR "ibmmca: out of memory for inquiry.\n");
+       return 0;
+   }
    if (bypass_controller)
      { /* fill the commonly known field for device-inquiry SCSI cmnd */
-       cmd.cmd_len = 6;
-        memset (&(cmd.cmnd), 0x0, sizeof(char) * cmd.cmd_len);
-       cmd.cmnd[0] = INQUIRY; /* device inquiry */
-       cmd.cmnd[4] = 0xff; /* return buffer size = 255 */
+       cmd->cmd_len = 6;
+        memset (&(cmd->cmnd), 0x0, sizeof(char) * cmd->cmd_len);
+       cmd->cmnd[0] = INQUIRY; /* device inquiry */
+       cmd->cmnd[4] = 0xff; /* return buffer size = 255 */
      }
    for (retries = 0; retries < 3; retries++)
      {
@@ -770,8 +776,8 @@ static int device_inquiry(int host_index, int ldn)
          { /* bypass the hardware integrated command set */
             scb->command = IM_OTHER_SCSI_CMD_CMD;
             scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT;
-            scb->u1.scsi_cmd_length = cmd.cmd_len;
-            memcpy (scb->u2.scsi_command, &(cmd.cmnd), cmd.cmd_len);
+            scb->u1.scsi_cmd_length = cmd->cmd_len;
+            memcpy (scb->u2.scsi_command, &(cmd->cmnd), cmd->cmd_len);
             last_scsi_command(host_index)[ldn] = INQUIRY;
             last_scsi_type(host_index)[ldn] = IM_SCB;
          }
@@ -800,6 +806,7 @@ static int device_inquiry(int host_index, int ldn)
             return 1;
          }
      }
+   kfree(cmd);
 
    /*if all three retries failed, return "no device at this ldn" */
    if (retries >= 3)
@@ -1406,7 +1413,7 @@ static int ibmmca_getinfo (char *buf, int slot, void *dev)
      }
    else if (special == INTEGRATED_SCSI)
      { /* if the integrated subsystem has been found automatically: */
-       len += sprintf (buf + len, "Adapter cathegory: integrated\n");
+       len += sprintf (buf + len, "Adapter category: integrated\n");
        len += sprintf (buf + len, "Chip revision level: %d\n",
                        ((pos2 & 0xf0) >> 4));
        len += sprintf (buf + len, "Chip status: %s\n",
@@ -1417,7 +1424,7 @@ static int ibmmca_getinfo (char *buf, int slot, void *dev)
    else if ((special>=0)&&
           (special<(sizeof(subsys_list)/sizeof(struct subsys_list_struct))))
      { /* if the subsystem is a slot adapter */
-       len += sprintf (buf + len, "Adapter cathegory: slot-card\n");
+       len += sprintf (buf + len, "Adapter category: slot-card\n");
        len += sprintf (buf + len, "Chip revision level: %d\n",
                        ((pos2 & 0xf0) >> 4));
        len += sprintf (buf + len, "Chip status: %s\n",
@@ -1427,14 +1434,14 @@ static int ibmmca_getinfo (char *buf, int slot, void *dev)
      }
    else
      {
-       len += sprintf (buf + len, "Adapter cathegory: unknown\n");
+       len += sprintf (buf + len, "Adapter category: unknown\n");
      }
    /* common subsystem information to write to the slotn file */
    len += sprintf (buf + len, "Subsystem PUN: %d\n", shpnt->this_id);
    len += sprintf (buf + len, "I/O base address range: 0x%x-0x%x",
                   (unsigned int)(shpnt->io_port), 
                   (unsigned int)(shpnt->io_port+7));
-   /* Now make sure, the bufferlength is devideable by 4 to avoid
+   /* Now make sure, the bufferlength is divisible by 4 to avoid
     * paging problems of the buffer. */
    while ( len % sizeof( int ) != ( sizeof ( int ) - 1 ) )
      {
@@ -1483,7 +1490,7 @@ int ibmmca_detect (Scsi_Host_Template * scsi_template)
             ((struct ibmmca_hostdata *)shpnt->hostdata)->_pos3 = 0;
             ((struct ibmmca_hostdata *)shpnt->hostdata)->_special =
               FORCED_DETECTION;
-            mca_set_adapter_name(MCA_INTEGSCSI, "forced detected SCSI Adapter");
+            mca_set_adapter_name(MCA_INTEGSCSI, "forcibly detected SCSI Adapter");
             mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo,
                                    shpnt);
             mca_mark_as_used(MCA_INTEGSCSI);
index decbb8af72dcec5021311c96f482771f4d7cb18d..7dfc22fc1fe491be9173808ae78d85fec02d2478 100644 (file)
@@ -351,6 +351,7 @@ int inia100_detect(Scsi_Host_Template * tpnt)
                pHCB->pSRB_head = NULL;         /* Initial SRB save queue       */
                pHCB->pSRB_tail = NULL;         /* Initial SRB save queue       */
                pHCB->pSRB_lock = SPIN_LOCK_UNLOCKED; /* SRB save queue lock */
+               pHCB->BitAllocFlagLock = SPIN_LOCK_UNLOCKED;
                /* Get total memory needed for SCB */
                sz = orc_num_scb * sizeof(ORC_SCB);
                if ((pHCB->HCS_virScbArray = (PVOID) kmalloc(sz, GFP_ATOMIC | GFP_DMA)) == NULL) {
index 7c0b9e6b9651945991fca9bc27b16f201475222e..f9fa6074245203903f558a0c6de41674c0bf3226 100644 (file)
@@ -396,7 +396,7 @@ ips_detect(Scsi_Host_Template *SHT) {
       sh->cmd_per_lun = sh->hostt->cmd_per_lun;
       sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma;
       sh->use_clustering = sh->hostt->use_clustering;
-      sh->wish_block = FALSE;
+//FIXME//      sh->wish_block = FALSE;
 
       /* Store info in HA structure */
       ha->io_addr = io_addr;
index e5ca5c740fc10403a8fb11d6c60d25e6c6ddc125..209d88091715ad3aec87953990af76ab930196f9 100644 (file)
@@ -287,9 +287,9 @@ static mega_scb *pLastScb = NULL;
 static Scsi_Cmnd *qCompleted = NULL;
 
 #if SERDEBUG
-volatile static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED;
 #endif
-volatile static spinlock_t mega_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t mega_lock = SPIN_LOCK_UNLOCKED;
 
 #if SERDEBUG
 static char strbuf[MAX_SERBUF + 1];
index e556c07233c0262482a3e2d179d5afe1c2c6fbde..e70b2d75e5e6e7710e3f477743b1bde70e097de1 100644 (file)
@@ -132,7 +132,8 @@ typedef unsigned int FreeSectorBitmap;
 unsigned long scsi_pid = 0;
 Scsi_Cmnd *last_cmnd = NULL;
 /* Command groups 3 and 4 are reserved and should never be used.  */
-const unsigned char scsi_command_size[8] = {
+const unsigned char scsi_command_size[8] =
+{
        6, 10, 10, 12,
        12, 12, 10, 10
 };
@@ -278,9 +279,9 @@ static struct dev_info device_list[] =
        {"REGAL", "CDC-4X", "*", BLIST_MAX5LUN | BLIST_SINGLELUN},
        {"NAKAMICH", "MJ-4.8S", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
        {"NAKAMICH", "MJ-5.16S", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
-       {"PIONEER", "CD-ROM DRM-600", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
-       {"PIONEER", "CD-ROM DRM-602X", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
-       {"PIONEER", "CD-ROM DRM-604X", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
+    {"PIONEER", "CD-ROM DRM-600", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
+   {"PIONEER", "CD-ROM DRM-602X", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
+   {"PIONEER", "CD-ROM DRM-604X", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
        {"EMULEX", "MD21/S2     ESDI", "*", BLIST_SINGLELUN},
        {"CANON", "IPUBJD", "*", BLIST_SPARSELUN},
        {"nCipher", "Fastness Crypto", "*", BLIST_FORCELUN},
@@ -322,87 +323,6 @@ static int get_device_flags(unsigned char *response_data)
        return 0;
 }
 
-/*
- * Function:    scsi_make_blocked_list
- *
- * Purpose:     Build linked list of hosts that require blocking.
- *
- * Arguments:   None.
- *
- * Returns:     Nothing
- *
- * Notes:       Blocking is sort of a hack that is used to prevent more than one
- *              host adapter from being active at one time.  This is used in cases
- *              where the ISA bus becomes unreliable if you have more than one
- *              host adapter really pumping data through.
- *
- *              We spent a lot of time examining the problem, and I *believe* that
- *              the problem is bus related as opposed to being a driver bug.
- *
- *              The blocked list is used as part of the synchronization object
- *              that we use to ensure that only one host is active at one time.
- *              I (ERY) would like to make this go away someday, but this would
- *              require that we have a recursive mutex object.
- */
-
-void scsi_make_blocked_list(void)
-{
-       int block_count = 0, index;
-       struct Scsi_Host *sh[128], *shpnt;
-
-       /*
-        * Create a circular linked list from the scsi hosts which have
-        * the "wish_block" field in the Scsi_Host structure set.
-        * The blocked list should include all the scsi hosts using ISA DMA.
-        * In some systems, using two dma channels simultaneously causes
-        * unpredictable results.
-        * Among the scsi hosts in the blocked list, only one host at a time
-        * is allowed to have active commands queued. The transition from
-        * one active host to the next one is allowed only when host_busy == 0
-        * for the active host (which implies host_busy == 0 for all the hosts
-        * in the list). Moreover for block devices the transition to a new
-        * active host is allowed only when a request is completed, since a
-        * block device request can be divided into multiple scsi commands
-        * (when there are few sg lists or clustering is disabled).
-        *
-        * (DB, 4 Feb 1995)
-        */
-
-
-       host_active = NULL;
-
-       for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
-
-#if 0
-               /*
-                * Is this is a candidate for the blocked list?
-                * Useful to put into the blocked list all the hosts whose driver
-                * does not know about the host->block feature.
-                */
-               if (shpnt->unchecked_isa_dma)
-                       shpnt->wish_block = 1;
-#endif
-
-               if (shpnt->wish_block)
-                       sh[block_count++] = shpnt;
-       }
-
-       if (block_count == 1)
-               sh[0]->block = NULL;
-
-       else if (block_count > 1) {
-
-               for (index = 0; index < block_count - 1; index++) {
-                       sh[index]->block = sh[index + 1];
-                       printk("scsi%d : added to blocked host list.\n",
-                              sh[index]->host_no);
-               }
-
-               sh[block_count - 1]->block = sh[0];
-               printk("scsi%d : added to blocked host list.\n",
-                      sh[index]->host_no);
-       }
-}
 
 static void scan_scsis_done(Scsi_Cmnd * SCpnt)
 {
@@ -414,10 +334,11 @@ static void scan_scsis_done(Scsi_Cmnd * SCpnt)
                up(SCpnt->request.sem);
 }
 
+#ifdef MODULE
 MODULE_PARM(scsi_logging_level, "i");
 MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
 
-#ifndef MODULE
+#else
 
 static int __init scsi_logging_setup(char *str)
 {
@@ -443,10 +364,12 @@ static int max_scsi_luns = 8;
 static int max_scsi_luns = 1;
 #endif
 
+#ifdef MODULE
+
 MODULE_PARM(max_scsi_luns, "i");
 MODULE_PARM_DESC(max_scsi_luns, "last scsi LUN (should be between 1 and 8)");
 
-#ifndef MODULE
+#else
 
 static int __init scsi_luns_setup(char *str)
 {
@@ -474,15 +397,12 @@ void scsi_wait_cmd (Scsi_Cmnd * SCpnt, const void *cmnd ,
                  void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *),
                  int timeout, int retries)
 {
-       unsigned long flags;
        DECLARE_MUTEX_LOCKED(sem);
        
        SCpnt->request.sem = &sem;
        SCpnt->request.rq_status = RQ_SCSI_BUSY;
-       spin_lock_irqsave(&io_request_lock, flags);
        scsi_do_cmd (SCpnt, (void *) cmnd,
                buffer, bufflen, done, timeout, retries);
-       spin_unlock_irqrestore(&io_request_lock, flags);
        down (&sem);
        SCpnt->request.sem = NULL;
 }
@@ -519,6 +439,16 @@ static void scan_scsis(struct Scsi_Host *shpnt,
                SDpnt = (Scsi_Device *) scsi_init_malloc(sizeof(Scsi_Device),
                                                         GFP_ATOMIC);
                if (SDpnt) {
+                       /*
+                        * Register the queue for the device.  All I/O requests will come
+                        * in through here.  We also need to register a pointer to
+                        * ourselves, since the queue handler won't know what device
+                        * the queue actually represents.   We could look it up, but it
+                        * is pointless work.
+                        */
+                       blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
+                       blk_queue_headactive(&SDpnt->request_queue, 0);
+                       SDpnt->request_queue.queuedata = (void *) SDpnt;
                        /* Make sure we have something that is valid for DMA purposes */
                        scsi_result = ((!shpnt->unchecked_isa_dma)
                                       ? &scsi_result0[0] : scsi_init_malloc(512, GFP_DMA));
@@ -536,6 +466,8 @@ static void scan_scsis(struct Scsi_Host *shpnt,
        SDpnt->host = shpnt;
        SDpnt->online = TRUE;
 
+       initialize_merge_fn(SDpnt);
+
        init_waitqueue_head(&SDpnt->device_wait);
 
        /*
@@ -581,7 +513,6 @@ static void scan_scsis(struct Scsi_Host *shpnt,
                                if (sdtpnt->init && sdtpnt->dev_noticed)
                                        (*sdtpnt->init) ();
 
-                       oldSDpnt->scsi_request_fn = NULL;
                        for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
                                if (sdtpnt->attach) {
                                        (*sdtpnt->attach) (oldSDpnt);
@@ -727,6 +658,7 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun,
        SDpnt->borken = 1;
        SDpnt->was_reset = 0;
        SDpnt->expecting_cc_ua = 0;
+       SDpnt->starved = 0;
 
        scsi_cmd[0] = TEST_UNIT_READY;
        scsi_cmd[1] = lun << 5;
@@ -947,6 +879,19 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun,
                printk("scsi: scan_scsis_single: Cannot malloc\n");
                return 0;
        }
+       /*
+        * Register the queue for the device.  All I/O requests will come
+        * in through here.  We also need to register a pointer to
+        * ourselves, since the queue handler won't know what device
+        * the queue actually represents.   We could look it up, but it
+        * is pointless work.
+        */
+       blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
+       blk_queue_headactive(&SDpnt->request_queue, 0);
+       SDpnt->request_queue.queuedata = (void *) SDpnt;
+       SDpnt->host = shpnt;
+       initialize_merge_fn(SDpnt);
+
        /*
         * And hook up our command block to the new device we will be testing
         * for.
@@ -1044,127 +989,6 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun,
  * of the calling code to ensure that this is the case.
  */
 
-Scsi_Cmnd *scsi_request_queueable(struct request * req, Scsi_Device * device)
-{
-       Scsi_Cmnd *SCpnt = NULL;
-       int tablesize;
-       Scsi_Cmnd *found = NULL;
-       struct buffer_head *bh, *bhp;
-
-       if (!device)
-               panic("No device passed to scsi_request_queueable().\n");
-
-       if (req && req->rq_status == RQ_INACTIVE)
-               panic("Inactive in scsi_request_queueable");
-
-       /*
-        * Look for a free command block.  If we have been instructed not to queue
-        * multiple commands to multi-lun devices, then check to see what else is
-        * going for this device first.
-        */
-
-       if (!device->single_lun) {
-               SCpnt = device->device_queue;
-               while (SCpnt) {
-                       if (SCpnt->request.rq_status == RQ_INACTIVE)
-                               break;
-                       SCpnt = SCpnt->next;
-               }
-       } else {
-               SCpnt = device->device_queue;
-               while (SCpnt) {
-                       if (SCpnt->channel == device->channel
-                           && SCpnt->target == device->id) {
-                               if (SCpnt->lun == device->lun) {
-                                       if (found == NULL
-                                           && SCpnt->request.rq_status == RQ_INACTIVE) {
-                                               found = SCpnt;
-                                       }
-                               }
-                               if (SCpnt->request.rq_status != RQ_INACTIVE) {
-                                       /*
-                                        * I think that we should really limit things to one
-                                        * outstanding command per device - this is what tends
-                                        * to trip up buggy firmware.
-                                        */
-                                       return NULL;
-                               }
-                       }
-                       SCpnt = SCpnt->next;
-               }
-               SCpnt = found;
-       }
-
-       if (!SCpnt)
-               return NULL;
-
-       if (SCSI_BLOCK(device, device->host))
-               return NULL;
-
-       if (req) {
-               memcpy(&SCpnt->request, req, sizeof(struct request));
-               tablesize = device->host->sg_tablesize;
-               bhp = bh = req->bh;
-               if (!tablesize)
-                       bh = NULL;
-               /* Take a quick look through the table to see how big it is.
-                * We already have our copy of req, so we can mess with that
-                * if we want to.
-                */
-               while (req->nr_sectors && bh) {
-                       bhp = bhp->b_reqnext;
-                       if (!bhp || !CONTIGUOUS_BUFFERS(bh, bhp))
-                               tablesize--;
-                       req->nr_sectors -= bh->b_size >> 9;
-                       req->sector += bh->b_size >> 9;
-                       if (!tablesize)
-                               break;
-                       bh = bhp;
-               }
-               if (req->nr_sectors && bh && bh->b_reqnext) {   /* Any leftovers? */
-                       SCpnt->request.bhtail = bh;
-                       req->bh = bh->b_reqnext;        /* Divide request */
-                       bh->b_reqnext = NULL;
-                       bh = req->bh;
-
-                       /* Now reset things so that req looks OK */
-                       SCpnt->request.nr_sectors -= req->nr_sectors;
-                       req->current_nr_sectors = bh->b_size >> 9;
-                       req->buffer = bh->b_data;
-                       SCpnt->request.sem = NULL;      /* Wait until whole thing done */
-               } else {
-                       req->rq_status = RQ_INACTIVE;
-                       wake_up(&wait_for_request);
-               }
-       } else {
-               SCpnt->request.rq_status = RQ_SCSI_BUSY;        /* Busy, but no request */
-               SCpnt->request.sem = NULL;      /* And no one is waiting for the device
-                                                * either */
-       }
-
-       atomic_inc(&SCpnt->host->host_active);
-       SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n", SCpnt->target,
-                               atomic_read(&SCpnt->host->host_active)));
-       SCpnt->use_sg = 0;      /* Reset the scatter-gather flag */
-       SCpnt->old_use_sg = 0;
-       SCpnt->transfersize = 0;
-       SCpnt->resid = 0;
-       SCpnt->underflow = 0;
-       SCpnt->cmd_len = 0;
-
-       /*
-        *      Since not everyone seems to set the device info correctly
-        *      before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
-        */
-       
-       SCpnt->channel = device->channel;
-       SCpnt->lun = device->lun;
-       SCpnt->target = device->id;
-       SCpnt->state = SCSI_STATE_INITIALIZING;
-       SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
-
-       return SCpnt;
-}
 
 /* This function returns a structure pointer that will be valid for
  * the device.  The wait parameter tells us whether we should wait for
@@ -1176,168 +1000,158 @@ Scsi_Cmnd *scsi_request_queueable(struct request * req, Scsi_Device * device)
  * of the packets for each device
  */
 
-Scsi_Cmnd *scsi_allocate_device(struct request ** reqp, Scsi_Device * device,
-                               int wait)
-{
-       kdev_t dev;
-       struct request *req = NULL;
-       int tablesize;
-       struct buffer_head *bh, *bhp;
-       struct Scsi_Host *host;
-       Scsi_Cmnd *SCpnt = NULL;
-       Scsi_Cmnd *SCwait = NULL;
-       Scsi_Cmnd *found = NULL;
-
-       if (!device)
-               panic("No device passed to scsi_allocate_device().\n");
-
-       if (reqp)
-               req = *reqp;
+/*
+ * This lock protects the freelist for all devices on the system.
+ * We could make this finer grained by having a single lock per
+ * device if it is ever found that there is excessive contention
+ * on this lock.
+ */
+static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
 
-       /*
-        * See if this request has already been queued by an 
-        * interrupt routine 
-        */
-        
-       if (req) {
-               if (req->rq_status == RQ_INACTIVE)
-                       return NULL;
-               dev = req->rq_dev;
-       } else
-               dev = 0;        /* unused */
+/*
+ * Used for access to internal allocator used for DMA safe buffers.
+ */
+static spinlock_t allocator_request_lock = SPIN_LOCK_UNLOCKED;
 
-       host = device->host;
+/*
+ * Used to protect insertion into and removal from the queue of
+ * commands to be processed by the bottom half handler.
+ */
+static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
 
-       if (in_interrupt() && SCSI_BLOCK(device, host))
-               return NULL;
+/*
+ * Function:    scsi_allocate_device
+ *
+ * Purpose:     Allocate a command descriptor.
+ *
+ * Arguments:   device    - device for which we want a command descriptor
+ *              wait      - 1 if we should wait in the event that none
+ *                          are available.
+ *
+ * Lock status: No locks assumed to be held.  This function is SMP-safe.
+ *
+ * Returns:     Pointer to command descriptor.
+ *
+ * Notes:       Prior to the new queue code, this function was not SMP-safe.
+ */
 
+Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait)
+{
+       struct Scsi_Host *host;
+       Scsi_Cmnd *SCpnt = NULL;
+       Scsi_Device *SDpnt;
+       unsigned long flags;
+  
+       if (!device)
+               panic("No device passed to scsi_allocate_device().\n");
+  
+       host = device->host;
+  
+       spin_lock_irqsave(&device_request_lock, flags);
        while (1 == 1) {
-               if (!device->single_lun) {
-                       SCpnt = device->device_queue;
-                       while (SCpnt) {
-                               SCwait = SCpnt;
-                               if (SCpnt->request.rq_status == RQ_INACTIVE)
-                                       break;
-                               SCpnt = SCpnt->next;
-                       }
-               } else {
-                       SCpnt = device->device_queue;
-                       while (SCpnt) {
-                               if (SCpnt->channel == device->channel
-                                   && SCpnt->target == device->id) {
-                                       if (SCpnt->lun == device->lun) {
-                                               SCwait = SCpnt;
-                                               if (found == NULL
-                                                   && SCpnt->request.rq_status == RQ_INACTIVE) {
-                                                       found = SCpnt;
+               SCpnt = NULL;
+               if (!device->device_blocked) {
+                       if (device->single_lun) {
+                               /*
+                                * FIXME(eric) - this is not at all optimal.  Given that
+                                * single lun devices are rare and usually slow
+                                * (i.e. CD changers), this is good enough for now, but
+                                * we may want to come back and optimize this later.
+                                *
+                                * Scan through all of the devices attached to this
+                                * host, and see if any are active or not.  If so,
+                                * we need to defer this command.
+                                *
+                                * We really need a busy counter per device.  This would
+                                * allow us to more easily figure out whether we should
+                                * do anything here or not.
+                                */
+                               for (SDpnt = host->host_queue;
+                                    SDpnt;
+                                    SDpnt = SDpnt->next) {
+                                       /*
+                                        * Only look for other devices on the same bus
+                                        * with the same target ID.
+                                        */
+                                       if (SDpnt->channel != device->channel
+                                           || SDpnt->id != device->id
+                                           || SDpnt == device) {
+                                               continue;
+                                       }
+                                       for (SCpnt = SDpnt->device_queue;
+                                            SCpnt;
+                                            SCpnt = SCpnt->next) {
+                                               if (SCpnt->request.rq_status != RQ_INACTIVE) {
+                                                       break;
                                                }
                                        }
-                                       if (SCpnt->request.rq_status != RQ_INACTIVE) {
-                                               /*
-                                                * I think that we should really limit things to one
-                                                * outstanding command per device - this is what tends
-                                                * to trip up buggy firmware.
-                                                */
-                                               found = NULL;
+                                       if (SCpnt) {
                                                break;
                                        }
                                }
-                               SCpnt = SCpnt->next;
+                               if (SDpnt) {
+                                       /*
+                                        * Some other device in this cluster is busy.
+                                        * If asked to wait, we need to wait, otherwise
+                                        * return NULL.
+                                        */
+                                       SCpnt = NULL;
+                                       break;
+                               }
+                       }
+                       /*
+                        * Now we can check for a free command block for this device.
+                        */
+                       for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
+                               if (SCpnt->request.rq_status == RQ_INACTIVE)
+                                       break;
                        }
-                       SCpnt = found;
                }
-
-               /* See if this request has already been queued by an interrupt routine
+               /*
+                * If we couldn't find a free command block, and we have been
+                * asked to wait, then do so.
                 */
-               if (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) {
-                       return NULL;
+               if (SCpnt) {
+                       break;
                }
-               if (!SCpnt || SCpnt->request.rq_status != RQ_INACTIVE) {        /* Might have changed */
-                       if (wait && SCwait && SCwait->request.rq_status != RQ_INACTIVE) {
-                               DECLARE_WAITQUEUE(wait,current);
-                               add_wait_queue(&device->device_wait,&wait);
-                               current->state=TASK_UNINTERRUPTIBLE;
-                               spin_unlock(&io_request_lock);
-                               schedule();
-                               current->state=TASK_RUNNING;
-                               remove_wait_queue(&device->device_wait,&wait);
-                               spin_lock_irq(&io_request_lock);
-                       } else {
-                               if (!wait)
-                                       return NULL;
-                               if (!SCwait) {
-                                       printk("Attempt to allocate device channel %d,"
-                                              " target %d, lun %d\n", device->channel,
-                                              device->id, device->lun);
-                                       panic("No device found in scsi_allocate_device\n");
-                               }
-                       }
+               /*
+                * If we have been asked to wait for a free block, then
+                * wait here.
+                */
+               spin_unlock_irqrestore(&device_request_lock, flags);
+               if (wait) {
+                       /*
+                        * This should block until a device command block
+                        * becomes available.
+                        */
+                       sleep_on(&device->device_wait);
+                       spin_lock_irqsave(&device_request_lock, flags);
                } else {
-                       if (req) {
-                               memcpy(&SCpnt->request, req, sizeof(struct request));
-                               tablesize = device->host->sg_tablesize;
-                               bhp = bh = req->bh;
-                               if (!tablesize)
-                                       bh = NULL;
-                               /* Take a quick look through the table to see how big it is.
-                                * We already have our copy of req, so we can mess with that
-                                * if we want to.
-                                */
-                               while (req->nr_sectors && bh) {
-                                       bhp = bhp->b_reqnext;
-                                       if (!bhp || !CONTIGUOUS_BUFFERS(bh, bhp))
-                                               tablesize--;
-                                       req->nr_sectors -= bh->b_size >> 9;
-                                       req->sector += bh->b_size >> 9;
-                                       if (!tablesize)
-                                               break;
-                                       bh = bhp;
-                               }
-                               if (req->nr_sectors && bh && bh->b_reqnext) {   /* Any leftovers? */
-                                       SCpnt->request.bhtail = bh;
-                                       req->bh = bh->b_reqnext;        /* Divide request */
-                                       bh->b_reqnext = NULL;
-                                       bh = req->bh;
-                                       /* Now reset things so that req looks OK */
-                                       SCpnt->request.nr_sectors -= req->nr_sectors;
-                                       req->current_nr_sectors = bh->b_size >> 9;
-                                       req->buffer = bh->b_data;
-                                       SCpnt->request.sem = NULL;      /* Wait until whole thing done */
-                               } else {
-                                       req->rq_status = RQ_INACTIVE;
-                                       *reqp = req->next;
-                                       wake_up(&wait_for_request);
-                               }
-                       } else {
-                               SCpnt->request.rq_status = RQ_SCSI_BUSY;
-                               SCpnt->request.sem = NULL;      /* And no one is waiting for this
-                                                                * to complete */
-                       }
-                       atomic_inc(&SCpnt->host->host_active);
-                       SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
-                                                  SCpnt->target,
-                               atomic_read(&SCpnt->host->host_active)));
-                       break;
+                       return NULL;
                }
        }
 
+       SCpnt->request.rq_status = RQ_SCSI_BUSY;
+       SCpnt->request.sem = NULL;      /* And no one is waiting for this
+                                        * to complete */
+       atomic_inc(&SCpnt->host->host_active);
+
        SCpnt->use_sg = 0;      /* Reset the scatter-gather flag */
        SCpnt->old_use_sg = 0;
        SCpnt->transfersize = 0;        /* No default transfer size */
        SCpnt->cmd_len = 0;
-       SCpnt->resid = 0;
-       SCpnt->underflow = 0;   /* Do not flag underflow conditions */
 
-       /* Since not everyone seems to set the device info correctly
-        * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
-        * FIXME(eric) This doesn't make any sense.
-        */
-       SCpnt->channel = device->channel;
-       SCpnt->lun = device->lun;
-       SCpnt->target = device->id;
+       SCpnt->underflow = 0;   /* Do not flag underflow conditions */
        SCpnt->state = SCSI_STATE_INITIALIZING;
        SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
 
+       spin_unlock_irqrestore(&device_request_lock, flags);
+
+       SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
+                                  SCpnt->target,
+                               atomic_read(&SCpnt->host->host_active)));
+
        return SCpnt;
 }
 
@@ -1354,6 +1168,9 @@ Scsi_Cmnd *scsi_allocate_device(struct request ** reqp, Scsi_Device * device,
  */
 void scsi_release_command(Scsi_Cmnd * SCpnt)
 {
+       unsigned long flags;
+       spin_lock_irqsave(&device_request_lock, flags);
+
        SCpnt->request.rq_status = RQ_INACTIVE;
        SCpnt->state = SCSI_STATE_UNUSED;
        SCpnt->owner = SCSI_OWNER_NOBODY;
@@ -1379,21 +1196,25 @@ void scsi_release_command(Scsi_Cmnd * SCpnt)
                             atomic_read(&SCpnt->host->eh_wait->count)));
                up(SCpnt->host->eh_wait);
        }
+       spin_unlock_irqrestore(&device_request_lock, flags);
 }
 
 /*
  * This is inline because we have stack problemes if we recurse to deeply.
  */
 
-inline int internal_cmnd(Scsi_Cmnd * SCpnt)
+int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
 {
 #ifdef DEBUG_DELAY
        unsigned long clock;
 #endif
        struct Scsi_Host *host;
        int rtn = 0;
+       unsigned long flags;
        unsigned long timeout;
 
+       ASSERT_LOCK(&io_request_lock, 0);
+
 #if DEBUG
        unsigned long *ret = 0;
 #ifdef __mips__
@@ -1427,11 +1248,9 @@ inline int internal_cmnd(Scsi_Cmnd * SCpnt)
                 * interrupt handler (assuming there is one irq-level per
                 * host).
                 */
-               spin_unlock_irq(&io_request_lock);
                while (--ticks_remaining >= 0)
                        mdelay(1 + 999 / HZ);
                host->resetting = 0;
-               spin_lock_irq(&io_request_lock);
        }
        if (host->hostt->use_new_eh_code) {
                scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
@@ -1444,7 +1263,7 @@ inline int internal_cmnd(Scsi_Cmnd * SCpnt)
         * We will use a queued command if possible, otherwise we will emulate the
         * queuing and calling of completion function ourselves.
         */
-       SCSI_LOG_MLQUEUE(3, printk("internal_cmnd (host = %d, channel = %d, target = %d, "
+       SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
               "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
        SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
                            SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
@@ -1460,35 +1279,42 @@ inline int internal_cmnd(Scsi_Cmnd * SCpnt)
                 * passes a meaningful return value.
                 */
                if (host->hostt->use_new_eh_code) {
+                        spin_lock_irqsave(&io_request_lock, flags);
                        rtn = host->hostt->queuecommand(SCpnt, scsi_done);
+                        spin_unlock_irqrestore(&io_request_lock, flags);
                        if (rtn != 0) {
+                               scsi_delete_timer(SCpnt);
                                scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
                        }
                } else {
+                        spin_lock_irqsave(&io_request_lock, flags);
                        host->hostt->queuecommand(SCpnt, scsi_old_done);
+                        spin_unlock_irqrestore(&io_request_lock, flags);
                }
        } else {
                int temp;
 
                SCSI_LOG_MLQUEUE(3, printk("command() :  routine at %p\n", host->hostt->command));
+                spin_lock_irqsave(&io_request_lock, flags);
                temp = host->hostt->command(SCpnt);
                SCpnt->result = temp;
 #ifdef DEBUG_DELAY
+                spin_unlock_irqrestore(&io_request_lock, flags);
                clock = jiffies + 4 * HZ;
-               spin_unlock_irq(&io_request_lock);
                while (time_before(jiffies, clock))
                        barrier();
-               spin_lock_irq(&io_request_lock);
                printk("done(host = %d, result = %04x) : routine at %p\n",
                       host->host_no, temp, host->hostt->command);
+                spin_lock_irqsave(&io_request_lock, flags);
 #endif
                if (host->hostt->use_new_eh_code) {
                        scsi_done(SCpnt);
                } else {
                        scsi_old_done(SCpnt);
                }
+                spin_unlock_irqrestore(&io_request_lock, flags);
        }
-       SCSI_LOG_MLQUEUE(3, printk("leaving internal_cmnd()\n"));
+       SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
        return rtn;
 }
 
@@ -1499,6 +1325,32 @@ inline int internal_cmnd(Scsi_Cmnd * SCpnt)
  * drivers go for the same host at the same time.
  */
 
+/*
+ * Function:    scsi_do_cmd
+ *
+ * Purpose:     Queue a SCSI command
+ *
+ * Arguments:   SCpnt     - command descriptor.
+ *              cmnd      - actual SCSI command to be performed.
+ *              buffer    - data buffer.
+ *              bufflen   - size of data buffer.
+ *              done      - completion function to be run.
+ *              timeout   - how long to let it run before timeout.
+ *              retries   - number of retries we allow.
+ *
+ * Lock status: With the new queueing code, this is SMP-safe, and no locks
+ *              need be held upon entry.   The old queueing code the lock was
+ *              assumed to be held upon entry.
+ *
+ * Returns:     Pointer to command descriptor.
+ *
+ * Notes:       Prior to the new queue code, this function was not SMP-safe.
+ *              Also, this function is now only used for queueing requests
+ *              for things like ioctls and character device requests - this
+ *              is because we essentially just inject a request into the
+ *              queue for the device. Normal block device handling manipulates
+ *              the queue directly.
+ */
 void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
              void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
                 int timeout, int retries)
@@ -1506,6 +1358,8 @@ void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
        struct Scsi_Host *host = SCpnt->host;
        Scsi_Device *device = SCpnt->device;
 
+       ASSERT_LOCK(&io_request_lock, 0);
+
        SCpnt->owner = SCSI_OWNER_MIDLEVEL;
 
        SCSI_LOG_MLQUEUE(4,
@@ -1533,16 +1387,6 @@ void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
         * ourselves.
         */
 
-       SCpnt->pid = scsi_pid++;
-
-       while (SCSI_BLOCK((Scsi_Device *) NULL, host)) {
-               spin_unlock(&io_request_lock);  /* FIXME!!! */
-               SCSI_SLEEP(&host->host_wait, SCSI_BLOCK((Scsi_Device *) NULL, host));
-               spin_lock_irq(&io_request_lock);        /* FIXME!!! */
-       }
-
-       if (host->block)
-               host_active = host;
 
        host->host_busy++;
        device->device_busy++;
@@ -1583,39 +1427,61 @@ void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
        SCpnt->internal_timeout = NORMAL_TIMEOUT;
        SCpnt->abort_reason = 0;
        SCpnt->result = 0;
-       internal_cmnd(SCpnt);
+
+       /*
+        * At this point, we merely set up the command, stick it in the normal
+        * request queue, and return.  Eventually that request will come to the
+        * top of the list, and will be dispatched.
+        */
+       scsi_insert_special_cmd(SCpnt, 0);
 
        SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
 }
 
-/* This function is the mid-level interrupt routine, which decides how
+/*
+ * This function is the mid-level interrupt routine, which decides how
  *  to handle error conditions.  Each invocation of this function must
  *  do one and *only* one of the following:
  *
  *      1) Insert command in BH queue.
  *      2) Activate error handler for host.
  *
- * FIXME(eric) - I am concerned about stack overflow (still).  An interrupt could
- * come while we are processing the bottom queue, which would cause another command
- * to be stuffed onto the bottom queue, and it would in turn be processed as that
- * interrupt handler is returning.  Given a sufficiently steady rate of returning
- * commands, this could cause the stack to overflow.  I am not sure what is the most
- * appropriate solution here - we should probably keep a depth count, and not process
- * any commands while we still have a bottom handler active higher in the stack.
+ * FIXME(eric) - I am concerned about stack overflow (still).  An
+ * interrupt could come while we are processing the bottom queue,
+ * which would cause another command to be stuffed onto the bottom
+ * queue, and it would in turn be processed as that interrupt handler
+ * is returning.  Given a sufficiently steady rate of returning
+ * commands, this could cause the stack to overflow.  I am not sure
+ * what is the most appropriate solution here - we should probably
+ * keep a depth count, and not process any commands while we still
+ * have a bottom handler active higher in the stack.
  *
- * There is currently code in the bottom half handler to monitor recursion in the bottom
- * handler and report if it ever happens.  If this becomes a problem, it won't be hard to
- * engineer something to deal with it so that only the outer layer ever does any real
- * processing.
+ * There is currently code in the bottom half handler to monitor
+ * recursion in the bottom handler and report if it ever happens.  If
+ * this becomes a problem, it won't be hard to engineer something to
+ * deal with it so that only the outer layer ever does any real
+ * processing.  
  */
 void scsi_done(Scsi_Cmnd * SCpnt)
 {
+       unsigned long flags;
+       int tstatus;
 
        /*
         * We don't have to worry about this one timing out any more.
         */
-       scsi_delete_timer(SCpnt);
+       tstatus = scsi_delete_timer(SCpnt);
 
+       /*
+        * If we are unable to remove the timer, it means that the command
+        * has already timed out.  In this case, we have no choice but to
+        * let the timeout function run, as we have no idea where in fact
+        * that function could really be.  It might be on another processor,
+        * etc, etc.
+        */
+       if (!tstatus) {
+               return;
+       }
        /* Set the serial numbers back to zero */
        SCpnt->serial_number = 0;
 
@@ -1631,6 +1497,8 @@ void scsi_done(Scsi_Cmnd * SCpnt)
                SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt));
                return;
        }
+       spin_lock_irqsave(&scsi_bhqueue_lock, flags);
+
        SCpnt->serial_number_at_timeout = 0;
        SCpnt->state = SCSI_STATE_BHQUEUE;
        SCpnt->owner = SCSI_OWNER_BH_HANDLER;
@@ -1646,6 +1514,10 @@ void scsi_done(Scsi_Cmnd * SCpnt)
         * We already have the io_request_lock here, since we are called from the
         * interrupt handler or the error handler. (DB)
         *
+        * This may be true at the moment, but I would like to wean all of the low
+        * level drivers away from using io_request_lock.   Technically they should
+        * all use their own locking.  I am adding a small spinlock to protect
+        * this datastructure to make it safe for that day.  (ERY)
         */
        if (!scsi_bh_queue_head) {
                scsi_bh_queue_head = SCpnt;
@@ -1655,6 +1527,7 @@ void scsi_done(Scsi_Cmnd * SCpnt)
                scsi_bh_queue_tail = SCpnt;
        }
 
+       spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
        /*
         * Mark the bottom half handler to be run.
         */
@@ -1676,6 +1549,13 @@ void scsi_done(Scsi_Cmnd * SCpnt)
  * race condition when scsi_done is called after a command has already
  * timed out but before the time out is processed by the error handler.
  * (DB)
+ *
+ * I believe I have corrected this.  We simply monitor the return status of
+ * del_timer() - if this comes back as 0, it means that the timer has fired
+ * and that a timeout is in progress.   I have modified scsi_done() such
+ * that in this instance the command is never inserted in the bottom
+ * half queue.  Thus the only time we hold the lock here is when
+ * we wish to atomically remove the contents of the queue.
  */
 void scsi_bottom_half_handler(void)
 {
@@ -1683,14 +1563,14 @@ void scsi_bottom_half_handler(void)
        Scsi_Cmnd *SCnext;
        unsigned long flags;
 
-       spin_lock_irqsave(&io_request_lock, flags);
 
        while (1 == 1) {
+               spin_lock_irqsave(&scsi_bhqueue_lock, flags);
                SCpnt = scsi_bh_queue_head;
                scsi_bh_queue_head = NULL;
+               spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
 
                if (SCpnt == NULL) {
-                       spin_unlock_irqrestore(&io_request_lock, flags);
                        return;
                }
                SCnext = SCpnt->bh_next;
@@ -1774,8 +1654,6 @@ void scsi_bottom_half_handler(void)
 
        }                       /* while(1==1) */
 
-       spin_unlock_irqrestore(&io_request_lock, flags);
-
 }
 
 /*
@@ -1796,9 +1674,7 @@ int scsi_retry_command(Scsi_Cmnd * SCpnt)
        SCpnt->request_bufflen = SCpnt->bufflen;
        SCpnt->use_sg = SCpnt->old_use_sg;
        SCpnt->cmd_len = SCpnt->old_cmd_len;
-       SCpnt->result = 0;
-       memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
-       return internal_cmnd(SCpnt);
+       return scsi_dispatch_cmd(SCpnt);
 }
 
 /*
@@ -1813,32 +1689,14 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
        struct Scsi_Host *host;
        Scsi_Device *device;
 
+       ASSERT_LOCK(&io_request_lock, 0);
+
        host = SCpnt->host;
        device = SCpnt->device;
 
        host->host_busy--;      /* Indicate that we are free */
        device->device_busy--;  /* Decrement device usage counter. */
 
-       if (host->block && host->host_busy == 0) {
-               host_active = NULL;
-
-               /* For block devices "wake_up" is done in end_scsi_request */
-               if (!SCSI_BLK_MAJOR(MAJOR(SCpnt->request.rq_dev))) {
-                       struct Scsi_Host *next;
-
-                       for (next = host->block; next != host; next = next->block)
-                               wake_up(&next->host_wait);
-               }
-       }
-       /*
-        * Now try and drain the mid-level queue if any commands have been
-        * inserted.  Check to see whether the queue even has anything in
-        * it first, as otherwise this is useless overhead.
-        */
-       if (SCpnt->host->pending_commands != NULL) {
-               scsi_mlqueue_finish(SCpnt->host, SCpnt->device);
-       }
-       wake_up(&host->host_wait);
 
        /*
         * If we have valid sense information, then some kind of recovery
@@ -1864,9 +1722,34 @@ static int scsi_register_host(Scsi_Host_Template *);
 static void scsi_unregister_host(Scsi_Host_Template *);
 #endif
 
+/*
+ * Function:    scsi_malloc
+ *
+ * Purpose:     Allocate memory from the DMA-safe pool.
+ *
+ * Arguments:   len       - amount of memory we need.
+ *
+ * Lock status: No locks assumed to be held.  This function is SMP-safe.
+ *
+ * Returns:     Pointer to memory block.
+ *
+ * Notes:       Prior to the new queue code, this function was not SMP-safe.
+ *              This function can only allocate in units of sectors
+ *              (i.e. 512 bytes).
+ *
+ *              We cannot use the normal system allocator becuase we need
+ *              to be able to guarantee that we can process a complete disk
+ *              I/O request without touching the system allocator.  Think
+ *              about it - if the system were heavily swapping, and tried to
+ *              write out a block of memory to disk, and the SCSI code needed
+ *              to allocate more memory in order to be able to write the
+ *              data to disk, you would wedge the system.
+ */
 void *scsi_malloc(unsigned int len)
 {
        unsigned int nbits, mask;
+       unsigned long flags;
+
        int i, j;
        if (len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
                return NULL;
@@ -1874,6 +1757,8 @@ void *scsi_malloc(unsigned int len)
        nbits = len >> 9;
        mask = (1 << nbits) - 1;
 
+       spin_lock_irqsave(&allocator_request_lock, flags);
+
        for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
                for (j = 0; j <= SECTORS_PER_PAGE - nbits; j++) {
                        if ((dma_malloc_freelist[i] & (mask << j)) == 0) {
@@ -1883,15 +1768,37 @@ void *scsi_malloc(unsigned int len)
                                SCSI_LOG_MLQUEUE(3, printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9)));
                                printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9));
 #endif
+                               spin_unlock_irqrestore(&allocator_request_lock, flags);
                                return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
                        }
                }
+       spin_unlock_irqrestore(&allocator_request_lock, flags);
        return NULL;            /* Nope.  No more */
 }
 
+/*
+ * Function:    scsi_free
+ *
+ * Purpose:     Free memory into the DMA-safe pool.
+ *
+ * Arguments:   ptr       - data block we are freeing.
+ *              len       - size of block we are freeing.
+ *
+ * Lock status: No locks assumed to be held.  This function is SMP-safe.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       This function *must* only be used to free memory
+ *              allocated from scsi_malloc().
+ *
+ *              Prior to the new queue code, this function was not SMP-safe.
+ *              This function can only allocate in units of sectors
+ *              (i.e. 512 bytes).
+ */
 int scsi_free(void *obj, unsigned int len)
 {
        unsigned int page, sector, nbits, mask;
+       unsigned long flags;
 
 #ifdef DEBUG
        unsigned long ret = 0;
@@ -1905,6 +1812,8 @@ int scsi_free(void *obj, unsigned int len)
        SCSI_LOG_MLQUEUE(3, printk("SFree: %p %d\n", obj, len));
 #endif
 
+       spin_lock_irqsave(&allocator_request_lock, flags);
+
        for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
                unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
                if ((unsigned long) obj >= page_addr &&
@@ -1927,6 +1836,7 @@ int scsi_free(void *obj, unsigned int len)
                        }
                        scsi_dma_free_sectors += nbits;
                        dma_malloc_freelist[page] &= ~(mask << sector);
+                       spin_unlock_irqrestore(&allocator_request_lock, flags);
                        return 0;
                }
        }
@@ -1977,10 +1887,13 @@ void scsi_init_free(char *ptr, unsigned int size)
 
 void scsi_build_commandblocks(Scsi_Device * SDpnt)
 {
+       unsigned long flags;
        struct Scsi_Host *host = SDpnt->host;
        int j;
        Scsi_Cmnd *SCpnt;
 
+       spin_lock_irqsave(&device_request_lock, flags);
+
        if (SDpnt->queue_depth == 0)
                SDpnt->queue_depth = host->cmd_per_lun;
        SDpnt->device_queue = NULL;
@@ -2020,8 +1933,10 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt)
                       SDpnt->queue_depth, j);
                SDpnt->queue_depth = j;
                SDpnt->has_cmdblocks = (0 != j);
-       } else
+       } else {
                SDpnt->has_cmdblocks = 1;
+       }
+       spin_unlock_irqrestore(&device_request_lock, flags);
 }
 
 static ssize_t proc_scsi_gen_write(struct file * file, const char * buf,
@@ -2450,6 +2365,7 @@ static ssize_t proc_scsi_gen_write(struct file * file, const char * buf,
                        if (HBA_ptr->host_queue == scd) {
                                HBA_ptr->host_queue = scd->next;
                        }
+                       blk_cleanup_queue(&scd->request_queue);
                        scsi_init_free((char *) scd, sizeof(Scsi_Device));
                } else {
                        goto out;
@@ -2464,13 +2380,27 @@ out:
 #endif
 
 /*
- * Go through the device list and recompute the most appropriate size
- * for the dma pool.  Then grab more memory (as required).
+ * Function:    resize_dma_pool
+ *
+ * Purpose:     Ensure that the DMA pool is sufficiently large to be
+ *              able to guarantee that we can always process I/O requests
+ *              without calling the system allocator.
+ *
+ * Arguments:   None.
+ *
+ * Lock status: No locks assumed to be held.  This function is SMP-safe.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       Prior to the new queue code, this function was not SMP-safe.
+ *              Go through the device list and recompute the most appropriate
+ *              size for the dma pool.  Then grab more memory (as required).
  */
 static void resize_dma_pool(void)
 {
        int i, k;
        unsigned long size;
+       unsigned long flags;
        struct Scsi_Host *shpnt;
        struct Scsi_Host *host = NULL;
        Scsi_Device *SDpnt;
@@ -2480,6 +2410,8 @@ static void resize_dma_pool(void)
        unsigned char **new_dma_malloc_pages = NULL;
        int out_of_space = 0;
 
+       spin_lock_irqsave(&allocator_request_lock, flags);
+
        if (!scsi_hostlist) {
                /*
                 * Free up the DMA pool.
@@ -2499,6 +2431,7 @@ static void resize_dma_pool(void)
                dma_malloc_freelist = NULL;
                dma_sectors = 0;
                scsi_dma_free_sectors = 0;
+               spin_unlock_irqrestore(&allocator_request_lock, flags);
                return;
        }
        /* Next, check to see if we need to extend the DMA buffer pool */
@@ -2569,8 +2502,10 @@ static void resize_dma_pool(void)
        if (new_dma_sectors < dma_sectors)
                new_dma_sectors = dma_sectors;
 #endif
-       if (new_dma_sectors <= dma_sectors)
+       if (new_dma_sectors <= dma_sectors) {
+               spin_unlock_irqrestore(&allocator_request_lock, flags);
                return;         /* best to quit while we are in front */
+        }
 
        for (k = 0; k < 20; ++k) {      /* just in case */
                out_of_space = 0;
@@ -2621,6 +2556,7 @@ static void resize_dma_pool(void)
                        break;  /* found space ... */
        }                       /* end of for loop */
        if (out_of_space) {
+               spin_unlock_irqrestore(&allocator_request_lock, flags);
                scsi_need_isa_buffer = new_need_isa_buffer;     /* some useful info */
                printk("      WARNING, not enough memory, pool not expanded\n");
                return;
@@ -2645,6 +2581,8 @@ static void resize_dma_pool(void)
        dma_sectors = new_dma_sectors;
        scsi_need_isa_buffer = new_need_isa_buffer;
 
+       spin_unlock_irqrestore(&allocator_request_lock, flags);
+
 #ifdef DEBUG_INIT
        printk("resize_dma_pool: dma free sectors   = %d\n", scsi_dma_free_sectors);
        printk("resize_dma_pool: dma sectors        = %d\n", dma_sectors);
@@ -2747,8 +2685,6 @@ static int scsi_register_host(Scsi_Host_Template * tpnt)
                printk("scsi : %d host%s.\n", next_scsi_host,
                       (next_scsi_host == 1) ? "" : "s");
 
-               scsi_make_blocked_list();
-
                /* The next step is to call scan_scsis here.  This generates the
                 * Scsi_Devices entries
                 */
@@ -2961,6 +2897,7 @@ static void scsi_unregister_host(Scsi_Host_Template * tpnt)
                        }
                        SDpnt->has_cmdblocks = 0;
 
+                       blk_cleanup_queue(&SDpnt->request_queue);
                        /* Next free up the Scsi_Device structures for this host */
                        shpnt->host_queue = SDpnt->next;
                        scsi_init_free((char *) SDpnt, sizeof(Scsi_Device));
@@ -3016,7 +2953,6 @@ static void scsi_unregister_host(Scsi_Host_Template * tpnt)
               (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
 #endif
 
-       scsi_make_blocked_list();
 
        /* There were some hosts that were loaded at boot time, so we cannot
           do any more than this */
@@ -3249,12 +3185,11 @@ static void scsi_dump_status(int level)
        printk("Dump of scsi host parameters:\n");
        i = 0;
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
-               printk(" %d %d %d : %d %p\n",
+               printk(" %d %d %d : %d\n",
                       shpnt->host_failed,
                       shpnt->host_busy,
                       atomic_read(&shpnt->host_active),
-                      shpnt->host_blocked,
-                      shpnt->pending_commands);
+                      shpnt->host_blocked);
 
        }
 
@@ -3300,10 +3235,10 @@ static void scsi_dump_status(int level)
                        /* Now dump the request lists for each block device */
                        printk("Dump of pending block device requests\n");
                        for (i = 0; i < MAX_BLKDEV; i++) {
-                               if (blk_dev[i].current_request) {
+                               if (blk_dev[i].request_queue.current_request) {
                                        struct request *req;
                                        printk("%d: ", i);
-                                       req = blk_dev[i].current_request;
+                                       req = blk_dev[i].request_queue.current_request;
                                        while (req) {
                                                printk("(%s %d %ld %ld %ld) ",
                                                   kdevname(req->rq_dev),
@@ -3318,7 +3253,7 @@ static void scsi_dump_status(int level)
                        }
                }
        }
-       /* printk("wait_for_request = %p\n", &wait_for_request); */
+       printk("wait_for_request = %p\n", &wait_for_request);
 #endif /* CONFIG_SCSI_LOGGING */ /* } */
 }
 #endif                         /* CONFIG_PROC_FS */
index 3921cf8e23a616c8d54a459ac92aa7608a7070b7..eba0c14b72559084e3ff668e407fe60582c0fd3b 100644 (file)
@@ -1,13 +1,13 @@
 /*
  *  scsi.h Copyright (C) 1992 Drew Eckhardt 
- *         Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *         Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale
  *  generic SCSI package header file by
  *      Initial versions: Drew Eckhardt
  *      Subsequent revisions: Eric Youngdale
  *
  *  <drew@colorado.edu>
  *
- *       Modified by Eric Youngdale eric@aib.com to
+ *       Modified by Eric Youngdale eric@andante.org to
  *       add scatter-gather, multiple outstanding request, and other
  *       enhancements.
  */
@@ -49,6 +49,21 @@ extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
 #define SCSI_TIMEOUT (2*HZ)
 #endif
 
+/*
+ * Used for debugging the new queueing code.  We want to make sure
+ * that the lock state is consistent with design.  Only do this in
+ * the user space simulator.
+ */
+#define ASSERT_LOCK(_LOCK, _COUNT)
+
+#if defined(__SMP__) && defined(CONFIG_USER_DEBUG)
+#undef ASSERT_LOCK
+#define ASSERT_LOCK(_LOCK,_COUNT)       \
+        { if( (_LOCK)->lock != _COUNT )   \
+                panic("Lock count inconsistent %s %d\n", __FILE__, __LINE__); \
+                                                                                       }
+#endif
+
 /*
  *  Use these to separate status msg and our bytes
  *
@@ -378,6 +393,18 @@ extern int scsi_sense_valid(Scsi_Cmnd *);
 extern int scsi_decide_disposition(Scsi_Cmnd * SCpnt);
 extern int scsi_block_when_processing_errors(Scsi_Device *);
 extern void scsi_sleep(int);
+extern int  scsi_partsize(struct buffer_head *bh, unsigned long capacity,
+                    unsigned int *cyls, unsigned int *hds,
+                    unsigned int *secs);
+
+/*
+ * Prototypes for functions in scsi_lib.c
+ */
+extern void initialize_merge_fn(Scsi_Device * SDpnt);
+extern void scsi_request_fn(request_queue_t * q);
+
+extern int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int);
+extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt);
 
 /*
  *  scsi_abort aborts the current command that is executing on host host.
@@ -386,17 +413,18 @@ extern void scsi_sleep(int);
  */
 
 extern void scsi_do_cmd(Scsi_Cmnd *, const void *cmnd,
-                        void *buffer, unsigned bufflen, 
-                        void (*done)(struct scsi_cmnd *),
-                        int timeout, int retries);
-
-extern void scsi_wait_cmd (Scsi_Cmnd *, const void *cmnd ,
                        void *buffer, unsigned bufflen,
                        void (*done) (struct scsi_cmnd *),
                        int timeout, int retries);
 
+extern void scsi_wait_cmd(Scsi_Cmnd *, const void *cmnd,
+                         void *buffer, unsigned bufflen,
+                         void (*done) (struct scsi_cmnd *),
+                         int timeout, int retries);
 
-extern Scsi_Cmnd *scsi_allocate_device(struct request **, Scsi_Device *, int);
+extern void scsi_request_fn(request_queue_t * q);
+
+extern Scsi_Cmnd *scsi_allocate_device(Scsi_Device *, int);
 
 extern Scsi_Cmnd *scsi_request_queueable(struct request *, Scsi_Device *);
 
@@ -428,9 +456,10 @@ struct scsi_device {
        wait_queue_head_t device_wait;  /* Used to wait if
                                           device is busy */
        struct Scsi_Host *host;
+       request_queue_t request_queue;
        volatile unsigned short device_busy;    /* commands actually active on low-level */
-       void (*scsi_request_fn) (void);         /* Used to jumpstart things after an 
-                                                  * ioctl */
+       int (*scsi_init_io_fn) (Scsi_Cmnd *);   /* Used to initialize
+                                                  new request */
        Scsi_Cmnd *device_queue;        /* queue of SCSI Command structures */
 
 /* public: */
@@ -438,6 +467,8 @@ struct scsi_device {
 
        unsigned int manufacturer;      /* Manufacturer of device, for using 
                                         * vendor-specific cmd's */
+       unsigned sector_size;   /* size in bytes */
+
        int attached;           /* # of high level drivers attached to 
                                 * this */
        int access_count;       /* Count of open channels/mounts */
@@ -475,6 +506,10 @@ struct scsi_device {
        unsigned expecting_cc_ua:1;     /* Expecting a CHECK_CONDITION/UNIT_ATTN
                                         * because we did a bus reset. */
        unsigned device_blocked:1;      /* Device returned QUEUE_FULL. */
+       unsigned ten:1;         /* support ten byte read / write */
+       unsigned remap:1;       /* support remapping  */
+       unsigned starved:1;     /* unable to process commands because
+                                  host busy */
 };
 
 
@@ -577,16 +612,16 @@ struct scsi_cmnd {
                                   reconnects.   Probably == sector
                                   size */
 
-       int     resid;          /* Number of bytes requested to be
+       int resid;              /* Number of bytes requested to be
                                   transferred less actual number
                                   transferred (0 if not supported) */
 
        struct request request; /* A copy of the command we are
                                   working on */
 
-       unsigned char sense_buffer[64];  /* obtained by REQUEST SENSE when
-                                           CHECK CONDITION is received on
-                                           original command (auto-sense) */
+       unsigned char sense_buffer[64];         /* obtained by REQUEST SENSE when
+                                                  CHECK CONDITION is received on
+                                                  original command (auto-sense) */
 
        unsigned flags;
 
@@ -630,6 +665,14 @@ struct scsi_cmnd {
        unsigned long pid;      /* Process ID, starts at 0 */
 };
 
+/*
+ *  Flag bits for the internal_timeout array
+ */
+#define NORMAL_TIMEOUT 0
+#define IN_ABORT  1
+#define IN_RESET  2
+#define IN_RESET2 4
+#define IN_RESET3 8
 
 /*
  * Definitions and prototypes used for scsi mid-level queue.
@@ -640,61 +683,16 @@ struct scsi_cmnd {
 extern int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason);
 extern int scsi_mlqueue_finish(struct Scsi_Host *host, Scsi_Device * device);
 
+extern Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate,
+                                  int sectors);
+
+extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
+                              int block_sectors);
+
 
 #if defined(MAJOR_NR) && (MAJOR_NR != SCSI_TAPE_MAJOR)
 #include "hosts.h"
 
-static Scsi_Cmnd *end_scsi_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
-{
-       struct request *req;
-       struct buffer_head *bh;
-
-       req = &SCpnt->request;
-       req->errors = 0;
-       if (!uptodate) {
-               printk(DEVICE_NAME " I/O error: dev %s, sector %lu\n",
-                      kdevname(req->rq_dev), req->sector);
-       }
-       do {
-               if ((bh = req->bh) != NULL) {
-                       req->bh = bh->b_reqnext;
-                       req->nr_sectors -= bh->b_size >> 9;
-                       req->sector += bh->b_size >> 9;
-                       bh->b_reqnext = NULL;
-                       bh->b_end_io(bh, uptodate);
-                       sectors -= bh->b_size >> 9;
-                       if ((bh = req->bh) != NULL) {
-                               req->current_nr_sectors = bh->b_size >> 9;
-                               if (req->nr_sectors < req->current_nr_sectors) {
-                                       req->nr_sectors = req->current_nr_sectors;
-                                       printk("end_scsi_request: buffer-list destroyed\n");
-                               }
-                       }
-               }
-       } while (sectors && bh);
-       if (req->bh) {
-               req->buffer = bh->b_data;
-               return SCpnt;
-       }
-       DEVICE_OFF(req->rq_dev);
-       if (req->sem != NULL) {
-               up(req->sem);
-       }
-       add_blkdev_randomness(MAJOR(req->rq_dev));
-
-       if (SCpnt->host->block) {
-               struct Scsi_Host *next;
-
-               for (next = SCpnt->host->block; next != SCpnt->host;
-                    next = next->block)
-                       wake_up(&next->host_wait);
-       }
-       wake_up(&wait_for_request);
-       wake_up(&SCpnt->device->device_wait);
-       scsi_release_command(SCpnt);
-       return NULL;
-}
-
 
 /* This is just like INIT_REQUEST, but we need to be aware of the fact
  * that an interrupt may start another request, so we run this with interrupts
index 2b23854b3fce0c2a161067d60551896db9d27f3a..1e5eb00c1d9afa10f3704a57af9e2f5e25ddec8a 100644 (file)
 
 /* A few options that we want selected */
 
-#define NR_HOSTS_PRESENT 20
-#define NR_FAKE_DISKS   6
-#define N_HEAD          32
-#define N_SECTOR        64
-#define DISK_READONLY(TGT)      (1)
+#define NR_HOSTS_PRESENT 1
+#define NR_FAKE_DISKS   3
+#define N_HEAD          255
+#define N_SECTOR        63
+#define N_CYLINDER      524
+#define DISK_READONLY(TGT)      (0)
 #define DISK_REMOVEABLE(TGT)    (1)
+#define DEVICE_TYPE(TGT) (TGT == 2 ? TYPE_TAPE : TYPE_DISK);
 
 /* Do not attempt to use a timer to simulate a real disk with latency */
 /* Only use this in the actual kernel, not in the simulator. */
-/* #define IMMEDIATE */
+#define IMMEDIATE
 
 /* Skip some consistency checking.  Good for benchmarking */
 #define SPEEDY
@@ -58,11 +60,15 @@ static int NR_REAL = -1;
 #define START_PARTITION 4
 
 /* Time to wait before completing a command */
-#define DISK_SPEED     (HZ/10)   /* 100ms */
-#define CAPACITY (0x80000)
+#define DISK_SPEED     (HZ/10) /* 100ms */
+#define CAPACITY (N_HEAD * N_SECTOR * N_CYLINDER)
+#define SIZE(TGT) (TGT == 2 ? 2248 : 512)
 
 static int starts[] =
-{N_HEAD, N_HEAD * N_SECTOR, 50000, CAPACITY, 0};
+{N_SECTOR,
+ N_HEAD * N_SECTOR,            /* Single cylinder */
+ N_HEAD * N_SECTOR * 4,
+ CAPACITY, 0};
 static int npart = 0;
 
 #include "scsi_debug.h"
@@ -112,21 +118,25 @@ static int npart = 0;
 
 typedef void (*done_fct_t) (Scsi_Cmnd *);
 
-static volatile done_fct_t do_done[SCSI_DEBUG_MAILBOXES] = {NULL,};
+static volatile done_fct_t do_done[SCSI_DEBUG_MAILBOXES] =
+{NULL,};
 
 static void scsi_debug_intr_handle(unsigned long);
 
 static struct timer_list timeout[SCSI_DEBUG_MAILBOXES];
 
-Scsi_Cmnd *SCint[SCSI_DEBUG_MAILBOXES]  = {NULL,};
-static char SCrst[SCSI_DEBUG_MAILBOXES] = {0,};
+Scsi_Cmnd *SCint[SCSI_DEBUG_MAILBOXES] =
+{NULL,};
+static char SCrst[SCSI_DEBUG_MAILBOXES] =
+{0,};
 
 /*
  * Semaphore used to simulate bus lockups.
  */
 static int scsi_debug_lockup = 0;
 
-static char sense_buffer[128] = {0,};
+static char sense_buffer[128] =
+{0,};
 
 static void scsi_dump(Scsi_Cmnd * SCpnt, int flag)
 {
@@ -197,6 +207,14 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
        sgcount = 0;
        sgpnt = NULL;
 
+        /*
+         * The io_request_lock *must* be held at this point.
+         */
+        if( io_request_lock.lock == 0 )
+        {
+                printk("Warning - io_request_lock is not held in queuecommand\n");
+        }
+
        /*
         * If we are being notified of the mid-level reposessing a command due to timeout,
         * just return.
@@ -242,6 +260,10 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
                SCpnt->result = 0;
                done(SCpnt);
                return 0;
+       case START_STOP:
+               SCSI_LOG_LLQUEUE(3, printk("START_STOP\n"));
+               scsi_debug_errsts = 0;
+               break;
        case ALLOW_MEDIUM_REMOVAL:
                if (cmd[4]) {
                        SCSI_LOG_LLQUEUE(2, printk("Medium removal inhibited..."));
@@ -253,7 +275,7 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
        case INQUIRY:
                SCSI_LOG_LLQUEUE(3, printk("Inquiry...(%p %d)\n", buff, bufflen));
                memset(buff, 0, bufflen);
-               buff[0] = TYPE_DISK;
+               buff[0] = DEVICE_TYPE(target);
                buff[1] = DISK_REMOVEABLE(target) ? 0x80 : 0;   /* Removable disk */
                buff[2] = 1;
                buff[4] = 33 - 5;
@@ -277,7 +299,10 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
                buff[1] = (CAPACITY >> 16) & 0xff;
                buff[2] = (CAPACITY >> 8) & 0xff;
                buff[3] = CAPACITY & 0xff;
-               buff[6] = 2;    /* 512 byte sectors */
+               buff[4] = 0;
+               buff[5] = 0;
+               buff[6] = (SIZE(target) >> 8) & 0xff;   /* 512 byte sectors */
+               buff[7] = SIZE(target) & 0xff;
                scsi_debug_errsts = 0;
                break;
        case READ_10:
@@ -327,15 +352,23 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
                                p = (struct partition *) (buff + 0x1be);
                                i = 0;
                                while (starts[i + 1]) {
+                                       int start_cyl, end_cyl;
+
+                                       start_cyl = starts[i] / N_HEAD / N_SECTOR;
+                                       end_cyl = (starts[i + 1] - 1) / N_HEAD / N_SECTOR;
+                                       p->boot_ind = 0;
+
+                                       p->head = (i == 0 ? 1 : 0);
+                                       p->sector = 1 | ((start_cyl >> 8) << 6);
+                                       p->cyl = (start_cyl & 0xff);
+
+                                       p->end_head = N_HEAD - 1;
+                                       p->end_sector = N_SECTOR | ((end_cyl >> 8) << 6);
+                                       p->end_cyl = (end_cyl & 0xff);
+
                                        p->start_sect = starts[i];
                                        p->nr_sects = starts[i + 1] - starts[i];
                                        p->sys_ind = 0x81;      /* Linux partition */
-                                       p->head = (i == 0 ? 1 : 0);
-                                       p->sector = 1;
-                                       p->cyl = starts[i] / N_HEAD / N_SECTOR;
-                                       p->end_head = N_HEAD - 1;
-                                       p->end_sector = N_SECTOR;
-                                       p->end_cyl = starts[i + 1] / N_HEAD / N_SECTOR;
                                        p++;
                                        i++;
                                };
@@ -465,6 +498,8 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
 #ifdef IMMEDIATE
        if (!scsi_debug_lockup) {
                SCpnt->result = scsi_debug_errsts;
+               SCint[i] = SCpnt;
+               do_done[i] = done;
                scsi_debug_intr_handle(i);      /* No timer - do this one right away */
        }
        restore_flags(flags);
@@ -490,24 +525,6 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
        return 0;
 }
 
-volatile static int internal_done_flag = 0;
-volatile static int internal_done_errcode = 0;
-static void internal_done(Scsi_Cmnd * SCpnt)
-{
-       internal_done_errcode = SCpnt->result;
-       ++internal_done_flag;
-}
-
-int scsi_debug_command(Scsi_Cmnd * SCpnt)
-{
-       DEB(printk("scsi_debug_command: ..calling scsi_debug_queuecommand\n"));
-       scsi_debug_queuecommand(SCpnt, internal_done);
-
-       while (!internal_done_flag);
-       internal_done_flag = 0;
-       return internal_done_errcode;
-}
-
 /* A "high" level interrupt handler.  This should be called once per jiffy
  * to simulate a regular scsi disk.  We use a timer to do this. */
 
@@ -589,7 +606,7 @@ int scsi_debug_biosparam(Disk * disk, kdev_t dev, int *info)
        int size = disk->capacity;
        info[0] = N_HEAD;
        info[1] = N_SECTOR;
-       info[2] = (size + 2047) >> 11;
+       info[2] = N_CYLINDER;
        if (info[2] >= 1024)
                info[2] = 1024;
        return 0;
@@ -684,6 +701,21 @@ int scsi_debug_proc_info(char *buffer, char **start, off_t offset,
        return (len);
 }
 
+#ifdef CONFIG_USER_DEBUG
+/*
+ * This is a hack for the user space emulator.  It allows us to
+ * "insert" arbitrary numbers of additional drivers.
+ */
+void *scsi_debug_get_handle(void)
+{
+       static Scsi_Host_Template driver_copy = SCSI_DEBUG;
+       void *rtn;
+       rtn = kmalloc(sizeof(driver_copy), GFP_ATOMIC);
+       memcpy(rtn, (void *) &driver_copy, sizeof(driver_copy));
+       return rtn;
+}
+#endif
+
 #ifdef MODULE
 /* Eventually this will go into an include file, but this will be later */
 Scsi_Host_Template driver_template = SCSI_DEBUG;
index 6f22616b7535cd389328bc71890cf57f78c4a1eb..357b3f5fc22cc07c12259d49e9b16ec393ed35d7 100644 (file)
@@ -27,16 +27,15 @@ int scsi_debug_proc_info(char *, char **, off_t, int, int, int);
 #define SCSI_DEBUG {proc_info:         scsi_debug_proc_info,   \
                    name:              "SCSI DEBUG",            \
                    detect:            scsi_debug_detect,       \
-                   command:           scsi_debug_command,      \
                    queuecommand:      scsi_debug_queuecommand, \
                    abort:             scsi_debug_abort,        \
                    reset:             scsi_debug_reset,        \
                    bios_param:        scsi_debug_biosparam,    \
                    can_queue:         SCSI_DEBUG_CANQUEUE,     \
                    this_id:           7,                       \
-                   sg_tablesize:      SG_ALL,                  \
+                   sg_tablesize:      16,                      \
                    cmd_per_lun:       3,                       \
-                   unchecked_isa_dma: 1,                       \
+                   unchecked_isa_dma: 0,                       \
                    use_clustering:    ENABLE_CLUSTERING,       \
                    use_new_eh_code:   1,                       \
 }
index 4663d694b1cf254e58a21ebc6459c50823e972cf..08deecf39f5929a138bb6f14dc6840618ffedeb2 100644 (file)
 #include "hosts.h"
 #include "constants.h"
 
-#ifdef MODULE
+/*
+ * We must always allow SHUTDOWN_SIGS.  Even if we are not a module,
+ * the host drivers that we are using may be loaded as modules, and
+ * when we unload these,  we need to ensure that the error handler thread
+ * can be shut down.
+ */
 #define SHUTDOWN_SIGS  (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
-#else
-#define SHUTDOWN_SIGS  (0UL)
-#endif
 
 #ifdef DEBUG
 #define SENSE_TIMEOUT SCSI_TIMEOUT
@@ -128,7 +130,9 @@ void scsi_add_timer(Scsi_Cmnd * SCset,
  *
  * Arguments:   SCset   - command that we are canceling timer for.
  *
- * Returns:     Amount of time remaining before command would have timed out.
+ * Returns:     1 if we were able to detach the timer.  0 if we
+ *              blew it, and the timer function has already started
+ *              to run.
  *
  * Notes:       This should be turned into an inline function.
  */
@@ -136,8 +140,7 @@ int scsi_delete_timer(Scsi_Cmnd * SCset)
 {
        int rtn;
 
-       rtn = jiffies - SCset->eh_timeout.expires;
-       del_timer(&SCset->eh_timeout);
+       rtn = del_timer(&SCset->eh_timeout);
 
        SCSI_LOG_ERROR_RECOVERY(5, printk("Clearing timer for command %p\n", SCset));
 
@@ -415,6 +418,7 @@ STATIC int scsi_request_sense(Scsi_Cmnd * SCpnt)
        {REQUEST_SENSE, 0, 0, 0, 255, 0};
        unsigned char scsi_result0[256], *scsi_result = NULL;
 
+       ASSERT_LOCK(&io_request_lock, 1);
 
        memcpy((void *) SCpnt->cmnd, (void *) generic_sense,
               sizeof(generic_sense));
@@ -563,10 +567,7 @@ void scsi_sleep(int timeout)
 
        add_timer(&timer);
 
-       spin_unlock_irq(&io_request_lock);
        down(&sem);
-       spin_lock_irq(&io_request_lock);
-
        del_timer(&timer);
 }
 
@@ -583,6 +584,8 @@ STATIC void scsi_send_eh_cmnd(Scsi_Cmnd * SCpnt, int timeout)
 {
        struct Scsi_Host *host;
 
+       ASSERT_LOCK(&io_request_lock, 1);
+
        host = SCpnt->host;
 
       retry:
@@ -811,7 +814,9 @@ STATIC int scsi_try_bus_reset(Scsi_Cmnd * SCpnt)
         * If we had a successful bus reset, mark the command blocks to expect
         * a condition code of unit attention.
         */
+       spin_unlock_irq(&io_request_lock);
        scsi_sleep(BUS_RESET_SETTLE_TIME);
+       spin_lock_irq(&io_request_lock);
        if (SCpnt->eh_state == SUCCESS) {
                Scsi_Device *SDloop;
                for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) {
@@ -854,7 +859,9 @@ STATIC int scsi_try_host_reset(Scsi_Cmnd * SCpnt)
         * If we had a successful host reset, mark the command blocks to expect
         * a condition code of unit attention.
         */
+       spin_unlock_irq(&io_request_lock);
        scsi_sleep(HOST_RESET_SETTLE_TIME);
+       spin_lock_irq(&io_request_lock);
        if (SCpnt->eh_state == SUCCESS) {
                Scsi_Device *SDloop;
                for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) {
@@ -1164,6 +1171,8 @@ STATIC int scsi_check_sense(Scsi_Cmnd * SCpnt)
  *
  * Arguments:   host  - host that we are restarting
  *
+ * Lock status: Assumed that locks are not held upon entry.
+ *
  * Returns:     Nothing
  *
  * Notes:       When we entered the error handler, we blocked all further
@@ -1172,6 +1181,9 @@ STATIC int scsi_check_sense(Scsi_Cmnd * SCpnt)
 STATIC void scsi_restart_operations(struct Scsi_Host *host)
 {
        Scsi_Device *SDpnt;
+       unsigned long flags;
+
+       ASSERT_LOCK(&io_request_lock, 0);
 
        /*
         * Next free up anything directly waiting upon the host.  This will be
@@ -1183,18 +1195,23 @@ STATIC void scsi_restart_operations(struct Scsi_Host *host)
        wake_up(&host->host_wait);
 
        /*
-        * Finally, block devices need an extra kick in the pants.  This is because
-        * the request queueing mechanism may have queued lots of pending requests
-        * and there won't be a process waiting in a place where we can simply wake
-        * it up.  Thus we simply go through and call the request function to goose
-        * the various top level drivers and get things moving again.
+        * Finally we need to re-initiate requests that may be pending.  We will
+        * have had everything blocked while error handling is taking place, and
+        * now that error recovery is done, we will need to ensure that these
+        * requests are started.
         */
+       spin_lock_irqsave(&io_request_lock, flags);
        for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) {
-               SCSI_LOG_ERROR_RECOVERY(5, printk("Calling request function to restart things...\n"));
-
-               if (SDpnt->scsi_request_fn != NULL)
-                       (*SDpnt->scsi_request_fn) ();
+               request_queue_t *q;
+               if ((host->can_queue > 0 && (host->host_busy >= host->can_queue))
+                   || (host->host_blocked)
+                   || (SDpnt->device_blocked)) {
+                       break;
+               }
+               q = &SDpnt->request_queue;
+               q->request_fn(q);
        }
+       spin_unlock_irqrestore(&io_request_lock, flags);
 }
 
 /*
@@ -1241,6 +1258,8 @@ STATIC int scsi_unjam_host(struct Scsi_Host *host)
        Scsi_Cmnd *SCdone;
        int timed_out;
 
+       ASSERT_LOCK(&io_request_lock, 1);
+
        SCdone = NULL;
 
        /*
@@ -1524,7 +1543,9 @@ STATIC int scsi_unjam_host(struct Scsi_Host *host)
                                                 * Due to the spinlock, we will never get out of this
                                                 * loop without a proper wait (DB)
                                                 */
+                                               spin_unlock_irq(&io_request_lock);
                                                scsi_sleep(1 * HZ);
+                                               spin_lock_irq(&io_request_lock);
 
                                                goto next_device;
                                        }
@@ -1617,7 +1638,9 @@ STATIC int scsi_unjam_host(struct Scsi_Host *host)
                                 * Due to the spinlock, we will never get out of this
                                 * loop without a proper wait. (DB)
                                 */
+                               spin_unlock_irq(&io_request_lock);
                                scsi_sleep(1 * HZ);
+                               spin_lock_irq(&io_request_lock);
 
                                goto next_device2;
                        }
@@ -1768,11 +1791,11 @@ void scsi_error_handler(void *data)
        lock_kernel();
 
        /*
-        *      Flush resources
+        *    Flush resources
         */
-        
+
        daemonize();
-       
+
        /*
         * Set the name of this process.
         */
@@ -1821,6 +1844,9 @@ void scsi_error_handler(void *data)
 
                host->eh_active = 0;
 
+               /* The spinlock is really needed up to this point. (DB) */
+               spin_unlock_irqrestore(&io_request_lock, flags);
+
                /*
                 * Note - if the above fails completely, the action is to take
                 * individual devices offline and flush the queue of any
@@ -1830,8 +1856,6 @@ void scsi_error_handler(void *data)
                 */
                scsi_restart_operations(host);
 
-               /* The spinlock is really needed up to this point. (DB) */
-               spin_unlock_irqrestore(&io_request_lock, flags);
        }
 
        SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler exiting\n"));
index 1671bd16cf38dac2e0d7a63f9a8c20c96e367ecd..3b471d52a6a3465a10959a08960522cae8dc5b25 100644 (file)
@@ -19,7 +19,7 @@
 #include <scsi/scsi_ioctl.h>
 
 #define NORMAL_RETRIES                 5
-#define NORMAL_TIMEOUT                 (10 * HZ)
+#define IOCTL_NORMAL_TIMEOUT                   (10 * HZ)
 #define FORMAT_UNIT_TIMEOUT            (2 * 60 * 60 * HZ)
 #define START_STOP_TIMEOUT             (60 * HZ)
 #define MOVE_MEDIUM_TIMEOUT            (5 * 60 * HZ)
@@ -69,7 +69,7 @@ static int ioctl_probe(struct Scsi_Host *host, void *buffer)
 /*
 
  * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host.
- * The NORMAL_TIMEOUT and NORMAL_RETRIES  variables are used.  
+ * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES  variables are used.  
  * 
  * dev is the SCSI device struct ptr, *(int *) arg is the length of the
  * input data, if any, not including the command string & counts, 
@@ -105,22 +105,18 @@ static void scsi_ioctl_done(Scsi_Cmnd * SCpnt)
 static int ioctl_internal_command(Scsi_Device * dev, char *cmd,
                                  int timeout, int retries)
 {
-       unsigned long flags;
        int result;
        Scsi_Cmnd *SCpnt;
        Scsi_Device *SDpnt;
 
-       spin_lock_irqsave(&io_request_lock, flags);
 
        SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", cmd[0]));
-       SCpnt = scsi_allocate_device(NULL, dev, 1);
+       SCpnt = scsi_allocate_device(dev, 1);
        {
                DECLARE_MUTEX_LOCKED(sem);
                SCpnt->request.sem = &sem;
                scsi_do_cmd(SCpnt, cmd, NULL, 0, scsi_ioctl_done, timeout, retries);
-               spin_unlock_irqrestore(&io_request_lock, flags);
                down(&sem);
-               spin_lock_irqsave(&io_request_lock, flags);
                SCpnt->request.sem = NULL;
        }
 
@@ -167,11 +163,8 @@ static int ioctl_internal_command(Scsi_Device * dev, char *cmd,
        scsi_release_command(SCpnt);
        SCpnt = NULL;
 
-       if (!SDpnt->was_reset && SDpnt->scsi_request_fn)
-               (*SDpnt->scsi_request_fn) ();
 
        wake_up(&SDpnt->device_wait);
-       spin_unlock_irqrestore(&io_request_lock, flags);
        return result;
 }
 
@@ -183,34 +176,33 @@ static int ioctl_internal_command(Scsi_Device * dev, char *cmd,
  * The structure that we are passed should look like:
  *
  * struct sdata {
- *  unsigned int inlen;             [i] Length of data to be written to device 
+ *  unsigned int inlen;      [i] Length of data to be written to device 
  *  unsigned int outlen;     [i] Length of data to be read from device 
  *  unsigned char cmd[x];    [i] SCSI command (6 <= x <= 12).
- *                          [o] Data read from device starts here.
- *                          [o] On error, sense buffer starts here.
+ *                           [o] Data read from device starts here.
+ *                           [o] On error, sense buffer starts here.
  *  unsigned char wdata[y];  [i] Data written to device starts here.
  * };
  * Notes:
- *   - The SCSI command length is determined by examining the 1st byte
- *     of the given command. There is no way to override this.
- *   - Data transfers are limited to PAGE_SIZE (4K on i386, 8K on alpha).
- *   - The length (x + y) must be at least OMAX_SB_LEN bytes long to
- *     accomodate the sense buffer when an error occurs.
- *     The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
- *     old code will not be surprised.
- *   - If a Unix error occurs (e.g. ENOMEM) then the user will receive
- *     a negative return and the Unix error code in 'errno'. 
- *     If the SCSI command succeeds then 0 is returned.
- *     Positive numbers returned are the compacted SCSI error codes (4 
- *     bytes in one int) where the lowest byte is the SCSI status.
- *     See the drivers/scsi/scsi.h file for more information on this.
+ *   -  The SCSI command length is determined by examining the 1st byte
+ *      of the given command. There is no way to override this.
+ *   -  Data transfers are limited to PAGE_SIZE (4K on i386, 8K on alpha).
+ *   -  The length (x + y) must be at least OMAX_SB_LEN bytes long to
+ *      accomodate the sense buffer when an error occurs.
+ *      The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
+ *      old code will not be surprised.
+ *   -  If a Unix error occurs (e.g. ENOMEM) then the user will receive
+ *      a negative return and the Unix error code in 'errno'. 
+ *      If the SCSI command succeeds then 0 is returned.
+ *      Positive numbers returned are the compacted SCSI error codes (4 
+ *      bytes in one int) where the lowest byte is the SCSI status.
+ *      See the drivers/scsi/scsi.h file for more information on this.
  *
  */
-#define OMAX_SB_LEN 16   /* Old sense buffer length */
+#define OMAX_SB_LEN 16         /* Old sense buffer length */
 
 int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
 {
-       unsigned long flags;
        char *buf;
        unsigned char cmd[12];
        char *cmd_in;
@@ -251,9 +243,7 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
                buf_needed = (buf_needed + 511) & ~511;
                if (buf_needed > MAX_BUF)
                        buf_needed = MAX_BUF;
-               spin_lock_irqsave(&io_request_lock, flags);
                buf = (char *) scsi_malloc(buf_needed);
-               spin_unlock_irqrestore(&io_request_lock, flags);
                if (!buf)
                        return -ENOMEM;
                memset(buf, 0, buf_needed);
@@ -299,23 +289,21 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
                retries = NORMAL_RETRIES;
                break;
        default:
-               timeout = NORMAL_TIMEOUT;
+               timeout = IOCTL_NORMAL_TIMEOUT;
                retries = NORMAL_RETRIES;
                break;
        }
 
 #ifndef DEBUG_NO_CMD
 
-       spin_lock_irqsave(&io_request_lock, flags);
 
-       SCpnt = scsi_allocate_device(NULL, dev, 1);
+       SCpnt = scsi_allocate_device(dev, 1);
 
        {
                DECLARE_MUTEX_LOCKED(sem);
                SCpnt->request.sem = &sem;
                scsi_do_cmd(SCpnt, cmd, buf, needed, scsi_ioctl_done,
                            timeout, retries);
-               spin_unlock_irqrestore(&io_request_lock, flags);
                down(&sem);
                SCpnt->request.sem = NULL;
        }
@@ -339,7 +327,6 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
        }
        result = SCpnt->result;
 
-       spin_lock_irqsave(&io_request_lock, flags);
 
        wake_up(&SCpnt->device->device_wait);
        SDpnt = SCpnt->device;
@@ -349,10 +336,7 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
        if (buf)
                scsi_free(buf, buf_needed);
 
-       if (SDpnt->scsi_request_fn)
-               (*SDpnt->scsi_request_fn) ();
 
-       spin_unlock_irqrestore(&io_request_lock, flags);
        return result;
 #else
        {
@@ -445,7 +429,7 @@ int scsi_ioctl(Scsi_Device * dev, int cmd, void *arg)
                scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
                scsi_cmd[4] = SCSI_REMOVAL_PREVENT;
                return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
-                                        NORMAL_TIMEOUT, NORMAL_RETRIES);
+                                  IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES);
                break;
        case SCSI_IOCTL_DOORUNLOCK:
                if (!dev->removable || !dev->lockable)
@@ -455,14 +439,14 @@ int scsi_ioctl(Scsi_Device * dev, int cmd, void *arg)
                scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
                scsi_cmd[4] = SCSI_REMOVAL_ALLOW;
                return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
-                                        NORMAL_TIMEOUT, NORMAL_RETRIES);
+                                  IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES);
        case SCSI_IOCTL_TEST_UNIT_READY:
                scsi_cmd[0] = TEST_UNIT_READY;
                scsi_cmd[1] = dev->lun << 5;
                scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
                scsi_cmd[4] = 0;
                return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
-                                        NORMAL_TIMEOUT, NORMAL_RETRIES);
+                                  IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES);
                break;
        case SCSI_IOCTL_START_UNIT:
                scsi_cmd[0] = START_STOP;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
new file mode 100644 (file)
index 0000000..f53383d
--- /dev/null
@@ -0,0 +1,781 @@
+/*
+ *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
+ *
+ *  SCSI queueing library.
+ *      Initial versions: Eric Youngdale (eric@andante.org).
+ *                        Based upon conversations with large numbers
+ *                        of people at Linux Expo.
+ */
+
+/*
+ * The fundamental purpose of this file is to contain a library of utility
+ * routines that can be used by low-level drivers.   Ultimately the idea
+ * is that there should be a sufficiently rich number of functions that it
+ * would be possible for a driver author to fashion a queueing function for
+ * a low-level driver if they wished.   Note however that this file also
+ * contains the "default" versions of these functions, as we don't want to
+ * go through and retrofit queueing functions into all 30 some-odd drivers.
+ */
+
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/stat.h>
+#include <linux/blk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+
+
+#define __KERNEL_SYSCALLS__
+
+#include <linux/unistd.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+#include <scsi/scsi_ioctl.h>
+
+/*
+ * This entire source file deals with the new queueing code.
+ */
+
+/*
+ * Function:    scsi_insert_special_cmd()
+ *
+ * Purpose:     Insert pre-formed command into request queue.
+ *
+ * Arguments:   SCpnt   - command that is ready to be queued.
+ *              at_head - boolean.  True if we should insert at head
+ *                        of queue, false if we should insert at tail.
+ *
+ * Lock status: Assumed that lock is not held upon entry.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       This function is called from character device and from
+ *              ioctl types of functions where the caller knows exactly
+ *              what SCSI command needs to be issued.   The idea is that
+ *              we merely inject the command into the queue (at the head
+ *              for now), and then call the queue request function to actually
+ *              process it.
+ */
+int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
+{
+       unsigned long flags;
+       request_queue_t *q;
+
+       ASSERT_LOCK(&io_request_lock, 0);
+
+       /*
+        * The SCpnt already contains a request structure - we will doctor the
+        * thing up with the appropriate values and use that in the actual
+        * request queue.
+        */
+       q = &SCpnt->device->request_queue;
+       SCpnt->request.cmd = SPECIAL;
+       SCpnt->request.special = (void *) SCpnt;
+
+       /*
+        * For the moment, we insert at the head of the queue.   This may turn
+        * out to be a bad idea, but we will see about that when we get there.
+        */
+       spin_lock_irqsave(&io_request_lock, flags);
+
+       if (at_head) {
+               SCpnt->request.next = q->current_request;
+               q->current_request = &SCpnt->request;
+       } else {
+               /*
+                * FIXME(eric) - we always insert at the tail of the list.  Otherwise
+                * ioctl commands would always take precedence over normal I/O.
+                */
+               SCpnt->request.next = NULL;
+               if (q->current_request == NULL) {
+                       q->current_request = &SCpnt->request;
+               } else {
+                       struct request *req;
+
+                       for (req = q->current_request; req; req = req->next) {
+                               if (req->next == NULL) {
+                                       req->next = &SCpnt->request;
+                               }
+                       }
+               }
+       }
+
+       /*
+        * Now hit the requeue function for the queue.   If the host is already
+        * busy, so be it - we have nothing special to do.   If the host can queue
+        * it, then send it off.
+        */
+       q->request_fn(q);
+       spin_unlock_irqrestore(&io_request_lock, flags);
+       return 0;
+}
+
+/*
+ * Function:    scsi_init_cmd_errh()
+ *
+ * Purpose:     Initialize SCpnt fields related to error handling.
+ *
+ * Arguments:   SCpnt   - command that is ready to be queued.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       This function has the job of initializing a number of
+ *              fields related to error handling.   Typically this will
+ *              be called once for each command, as required.
+ */
+int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt)
+{
+       ASSERT_LOCK(&io_request_lock, 0);
+
+       SCpnt->owner = SCSI_OWNER_MIDLEVEL;
+       SCpnt->reset_chain = NULL;
+       SCpnt->serial_number = 0;
+       SCpnt->serial_number_at_timeout = 0;
+       SCpnt->flags = 0;
+       SCpnt->retries = 0;
+
+       SCpnt->abort_reason = 0;
+
+       memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
+
+       if (SCpnt->cmd_len == 0)
+               SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
+
+       /*
+        * We need saved copies of a number of fields - this is because
+        * error handling may need to overwrite these with different values
+        * to run different commands, and once error handling is complete,
+        * we will need to restore these values prior to running the actual
+        * command.
+        */
+       SCpnt->old_use_sg = SCpnt->use_sg;
+       SCpnt->old_cmd_len = SCpnt->cmd_len;
+       memcpy((void *) SCpnt->data_cmnd,
+              (const void *) SCpnt->cmnd, sizeof(SCpnt->cmnd));
+       SCpnt->buffer = SCpnt->request_buffer;
+       SCpnt->bufflen = SCpnt->request_bufflen;
+
+       SCpnt->reset_chain = NULL;
+
+       SCpnt->internal_timeout = NORMAL_TIMEOUT;
+       SCpnt->abort_reason = 0;
+
+       return 1;
+}
+
+/*
+ * Function:    scsi_queue_next_request()
+ *
+ * Purpose:     Handle post-processing of completed commands.
+ *
+ * Arguments:   SCpnt   - command that may need to be requeued.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       After command completion, there may be blocks left
+ *              over which weren't finished by the previous command
+ *              this can be for a number of reasons - the main one is
+ *              that a medium error occurred, and the sectors after
+ *              the bad block need to be re-read.
+ *
+ *              If SCpnt is NULL, it means that the previous command
+ *              was completely finished, and we should simply start
+ *              a new command, if possible.
+ */
+void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
+{
+       int all_clear;
+       unsigned long flags;
+       Scsi_Device *SDpnt;
+       struct Scsi_Host *SHpnt;
+
+       ASSERT_LOCK(&io_request_lock, 0);
+
+       spin_lock_irqsave(&io_request_lock, flags);
+       if (SCpnt != NULL) {
+
+               /*
+                * For some reason, we are not done with this request.
+                * This happens for I/O errors in the middle of the request,
+                * in which case we need to request the blocks that come after
+                * the bad sector.
+                */
+               SCpnt->request.next = q->current_request;
+               q->current_request = &SCpnt->request;
+               SCpnt->request.special = (void *) SCpnt;
+       }
+       /*
+        * Just hit the requeue function for the queue.
+        * FIXME - if this queue is empty, check to see if we might need to
+        * start requests for other devices attached to the same host.
+        */
+       q->request_fn(q);
+
+       /*
+        * Now see whether there are other devices on the bus which
+        * might be starved.  If so, hit the request function.  If we
+        * don't find any, then it is safe to reset the flag.  If we
+        * find any device that it is starved, it isn't safe to reset the
+        * flag as the queue function releases the lock and thus some
+        * other device might have become starved along the way.
+        */
+       SDpnt = (Scsi_Device *) q->queuedata;
+       SHpnt = SDpnt->host;
+       all_clear = 1;
+       if (SHpnt->some_device_starved) {
+               for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
+                       request_queue_t *q;
+                       if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
+                           || (SHpnt->host_blocked)) {
+                               break;
+                       }
+                       if (SDpnt->device_blocked || !SDpnt->starved) {
+                               continue;
+                       }
+                       q = &SDpnt->request_queue;
+                       q->request_fn(q);
+                       all_clear = 0;
+               }
+               if (SDpnt == NULL && all_clear) {
+                       SHpnt->some_device_starved = 0;
+               }
+       }
+       spin_unlock_irqrestore(&io_request_lock, flags);
+}
+
+/*
+ * Function:    scsi_end_request()
+ *
+ * Purpose:     Post-processing of completed commands called from interrupt
+ *              handler.
+ *
+ * Arguments:   SCpnt    - command that is complete.
+ *              uptodate - 1 if I/O indicates success, 0 for I/O error.
+ *              sectors  - number of sectors we want to mark.
+ *
+ * Lock status: Assumed that lock is not held upon entry.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       This is called for block device requests in order to
+ *              mark some number of sectors as complete.
+ */
+Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
+{
+       struct request *req;
+       struct buffer_head *bh;
+
+       ASSERT_LOCK(&io_request_lock, 0);
+
+       req = &SCpnt->request;
+       req->errors = 0;
+       if (!uptodate) {
+               printk(" I/O error: dev %s, sector %lu\n",
+                      kdevname(req->rq_dev), req->sector);
+       }
+       do {
+               if ((bh = req->bh) != NULL) {
+                       req->bh = bh->b_reqnext;
+                       req->nr_sectors -= bh->b_size >> 9;
+                       req->sector += bh->b_size >> 9;
+                       bh->b_reqnext = NULL;
+                       sectors -= bh->b_size >> 9;
+                       bh->b_end_io(bh, uptodate);
+                       if ((bh = req->bh) != NULL) {
+                               req->current_nr_sectors = bh->b_size >> 9;
+                               if (req->nr_sectors < req->current_nr_sectors) {
+                                       req->nr_sectors = req->current_nr_sectors;
+                                       printk("scsi_end_request: buffer-list destroyed\n");
+                               }
+                       }
+               }
+       } while (sectors && bh);
+
+       /*
+        * If there are blocks left over at the end, set up the command
+        * to queue the remainder of them.
+        */
+       if (req->bh) {
+               req->buffer = bh->b_data;
+               return SCpnt;
+       }
+       /*
+        * This request is done.  If there is someone blocked waiting for this
+        * request, wake them up.  Typically used to wake up processes trying
+        * to swap a page into memory.
+        */
+       if (req->sem != NULL) {
+               up(req->sem);
+       }
+       add_blkdev_randomness(MAJOR(req->rq_dev));
+       scsi_release_command(SCpnt);
+       return NULL;
+}
+
+/*
+ * Function:    scsi_io_completion()
+ *
+ * Purpose:     Completion processing for block device I/O requests.
+ *
+ * Arguments:   SCpnt   - command that is finished.
+ *
+ * Lock status: Assumed that no lock is held upon entry.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       This function is matched in terms of capabilities to
+ *              the function that created the scatter-gather list.
+ *              In other words, if there are no bounce buffers
+ *              (the normal case for most drivers), we don't need
+ *              the logic to deal with cleaning up afterwards.
+ */
+void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
+                       int block_sectors)
+{
+       int result = SCpnt->result;
+       int this_count = SCpnt->bufflen >> 9;
+       request_queue_t *q = &SCpnt->device->request_queue;
+
+       ASSERT_LOCK(&io_request_lock, 0);
+
+       /*
+        * Free up any indirection buffers we allocated for DMA purposes. 
+        * For the case of a READ, we need to copy the data out of the
+        * bounce buffer and into the real buffer.
+        */
+       if (SCpnt->use_sg) {
+               struct scatterlist *sgpnt;
+               int i;
+
+               sgpnt = (struct scatterlist *) SCpnt->buffer;
+
+               for (i = 0; i < SCpnt->use_sg; i++) {
+                       if (sgpnt[i].alt_address) {
+                               if (SCpnt->request.cmd == READ) {
+                                       memcpy(sgpnt[i].alt_address, 
+                                              sgpnt[i].address,
+                                              sgpnt[i].length);
+                               }
+                               scsi_free(sgpnt[i].address, sgpnt[i].length);
+                       }
+               }
+               scsi_free(SCpnt->buffer, SCpnt->sglist_len);
+       } else {
+               if (SCpnt->buffer != SCpnt->request.buffer) {
+                       if (SCpnt->request.cmd == READ) {
+                               memcpy(SCpnt->request.buffer, SCpnt->buffer,
+                                      SCpnt->bufflen);
+                       }
+                       scsi_free(SCpnt->buffer, SCpnt->bufflen);
+               }
+       }
+       /*
+        * Next deal with any sectors which we were able to correctly
+        * handle.
+        */
+       if (good_sectors > 0) {
+               SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d sectors done.\n",
+                                             SCpnt->request.nr_sectors,
+                                             good_sectors));
+               SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg));
+
+               SCpnt->request.errors = 0;
+               /*
+                * If multiple sectors are requested in one buffer, then
+                * they will have been finished off by the first command.
+                * If not, then we have a multi-buffer command.
+                */
+               SCpnt = scsi_end_request(SCpnt, 1, good_sectors);
+
+               /*
+                * If the command completed without error, then either finish off the
+                * rest of the command, or start a new one.
+                */
+               if (result == 0) {
+                       scsi_queue_next_request(q, SCpnt);
+                       return;
+               }
+       }
+       /*
+        * Now, if we were good little boys and girls, Santa left us a request
+        * sense buffer.  We can extract information from this, so we
+        * can choose a block to remap, etc.
+        */
+       if (driver_byte(result) != 0) {
+               if (suggestion(result) == SUGGEST_REMAP) {
+#ifdef REMAP
+                       /*
+                        * Not yet implemented.  A read will fail after being remapped,
+                        * a write will call the strategy routine again.
+                        */
+                       if (SCpnt->device->remap) {
+                               result = 0;
+                       }
+#endif
+               }
+               if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70
+                   && (SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
+                       if (SCpnt->device->removable) {
+                               /* detected disc change.  set a bit and quietly refuse
+                                * further access.
+                                */
+                               SCpnt->device->changed = 1;
+                               SCpnt = scsi_end_request(SCpnt, 0, this_count);
+                               scsi_queue_next_request(q, SCpnt);
+                               return;
+                       } else {
+                               /*
+                                * Must have been a power glitch, or a bus reset.
+                                * Could not have been a media change, so we just retry
+                                * the request and see what happens.
+                                */
+                               scsi_queue_next_request(q, SCpnt);
+                               return;
+                       }
+               }
+               /* If we had an ILLEGAL REQUEST returned, then we may have
+                * performed an unsupported command.  The only thing this should be
+                * would be a ten byte read where only a six byte read was supported.
+                * Also, on a system where READ CAPACITY failed, we have have read
+                * past the end of the disk.
+                */
+
+               switch (SCpnt->sense_buffer[2]) {
+               case ILLEGAL_REQUEST:
+                       if (SCpnt->device->ten) {
+                               SCpnt->device->ten = 0;
+                               scsi_queue_next_request(q, SCpnt);
+                               result = 0;
+                       } else {
+                               SCpnt = scsi_end_request(SCpnt, 0, this_count);
+                               scsi_queue_next_request(q, SCpnt);
+                               return;
+                       }
+                       break;
+               case NOT_READY:
+                       printk(KERN_INFO "Device %x not ready.\n",
+                              SCpnt->request.rq_dev);
+                       SCpnt = scsi_end_request(SCpnt, 0, this_count);
+                       scsi_queue_next_request(q, SCpnt);
+                       return;
+                       break;
+               case MEDIUM_ERROR:
+               case VOLUME_OVERFLOW:
+                       printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ",
+                              SCpnt->host->host_no, (int) SCpnt->channel,
+                              (int) SCpnt->target, (int) SCpnt->lun);
+                       print_command(SCpnt->cmnd);
+                       print_sense("sd", SCpnt);
+                       SCpnt = scsi_end_request(SCpnt, 0, block_sectors);
+                       scsi_queue_next_request(q, SCpnt);
+                       return;
+               default:
+                       break;
+               }
+       }                       /* driver byte != 0 */
+       if (result) {
+               printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
+                      SCpnt->device->host->host_no,
+                      SCpnt->device->channel,
+                      SCpnt->device->id,
+                      SCpnt->device->lun, result);
+
+               if (driver_byte(result) & DRIVER_SENSE)
+                       print_sense("sd", SCpnt);
+               SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
+               scsi_queue_next_request(q, SCpnt);
+               return;
+       }
+}
+
+/*
+ * Function:    scsi_get_request_dev()
+ *
+ * Purpose:     Find the upper-level driver that is responsible for this
+ *              request
+ *
+ * Arguments:   request   - I/O request we are preparing to queue.
+ *
+ * Lock status: No locks assumed to be held, but as it happens the
+ *              io_request_lock is held when this is called.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       The requests in the request queue may have originated
+ *              from any block device driver.  We need to find out which
+ *              one so that we can later form the appropriate command.
+ */
+struct Scsi_Device_Template *scsi_get_request_dev(struct request *req)
+{
+       struct Scsi_Device_Template *spnt;
+       kdev_t dev = req->rq_dev;
+       int major = MAJOR(dev);
+
+       ASSERT_LOCK(&io_request_lock, 1);
+
+       for (spnt = scsi_devicelist; spnt; spnt = spnt->next) {
+               /*
+                * Search for a block device driver that supports this
+                * major.
+                */
+               if (spnt->blk && spnt->major == major) {
+                       return spnt;
+               }
+       }
+       return NULL;
+}
+
+/*
+ * Function:    scsi_request_fn()
+ *
+ * Purpose:     Generic version of request function for SCSI hosts.
+ *
+ * Arguments:   q       - Pointer to actual queue.
+ *
+ * Returns:     Nothing
+ *
+ * Lock status: IO request lock assumed to be held when called.
+ *
+ * Notes:       The theory is that this function is something which individual
+ *              drivers could also supply if they wished to.   The problem
+ *              is that we have 30 some odd low-level drivers in the kernel
+ *              tree already, and it would be most difficult to retrofit
+ *              this crap into all of them.   Thus this function has the job
+ *              of acting as a generic queue manager for all of those existing
+ *              drivers.
+ */
+void scsi_request_fn(request_queue_t * q)
+{
+       struct request *req;
+       Scsi_Cmnd *SCpnt;
+       Scsi_Device *SDpnt;
+       struct Scsi_Host *SHpnt;
+       struct Scsi_Device_Template *STpnt;
+
+       ASSERT_LOCK(&io_request_lock, 1);
+
+       SDpnt = (Scsi_Device *) q->queuedata;
+       if (!SDpnt) {
+               panic("Missing device");
+       }
+       SHpnt = SDpnt->host;
+
+       /*
+        * If the host for this device is in error recovery mode, don't
+        * do anything at all here.  When the host leaves error recovery
+        * mode, it will automatically restart things and start queueing
+        * commands again.  Same goes if the queue is actually plugged,
+        * if the device itself is blocked, or if the host is fully
+        * occupied.
+        */
+       if (SHpnt->in_recovery
+           || q->plugged) {
+               return;
+       }
+       /*
+        * To start with, we keep looping until the queue is empty, or until
+        * the host is no longer able to accept any more requests.
+        */
+       while (1 == 1) {
+               /*
+                * If the host cannot accept another request, then quit.
+                */
+               if (SDpnt->device_blocked) {
+                       break;
+               }
+               if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
+                   || (SHpnt->host_blocked)) {
+                       /*
+                        * If we are unable to process any commands at all for this
+                        * device, then we consider it to be starved.  What this means
+                        * is that there are no outstanding commands for this device
+                        * and hence we need a little help getting it started again
+                        * once the host isn't quite so busy.
+                        */
+                       if (SDpnt->device_busy == 0) {
+                               SDpnt->starved = 1;
+                               SHpnt->some_device_starved = 1;
+                       }
+                       break;
+               } else {
+                       SDpnt->starved = 0;
+               }
+               /*
+                * Loop through all of the requests in this queue, and find
+                * one that is queueable.
+                */
+               req = q->current_request;
+
+               /*
+                * If we couldn't find a request that could be queued, then we
+                * can also quit.
+                */
+               if (!req) {
+                       break;
+               }
+               /*
+                * Find the actual device driver associated with this command.
+                * The SPECIAL requests are things like character device or
+                * ioctls, which did not originate from ll_rw_blk.
+                */
+               if (req->special != NULL) {
+                       STpnt = NULL;
+                       SCpnt = (Scsi_Cmnd *) req->special;
+               } else {
+                       STpnt = scsi_get_request_dev(req);
+                       if (!STpnt) {
+                               panic("Unable to find device associated with request");
+                       }
+                       /*
+                        * Now try and find a command block that we can use.
+                        */
+                       SCpnt = scsi_allocate_device(SDpnt, FALSE);
+                       /*
+                        * If so, we are ready to do something.  Bump the count
+                        * while the queue is locked and then break out of the loop.
+                        * Otherwise loop around and try another request.
+                        */
+                       if (!SCpnt) {
+                               break;
+                       }
+                       SHpnt->host_busy++;
+                       SDpnt->device_busy++;
+               }
+
+               /*
+                * FIXME(eric)
+                * I am not sure where the best place to do this is.  We need
+                * to hook in a place where we are likely to come if in user
+                * space.   Technically the error handling thread should be
+                * doing this crap, but the error handler isn't used by
+                * most hosts.
+                */
+               if (SDpnt->was_reset) {
+                       /*
+                        * We need to relock the door, but we might
+                        * be in an interrupt handler.  Only do this
+                        * from user space, since we do not want to
+                        * sleep from an interrupt.
+                        */
+                       if (SDpnt->removable && !in_interrupt()) {
+                               spin_unlock_irq(&io_request_lock);
+                               scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0);
+                               SDpnt->was_reset = 0;
+                               spin_lock_irq(&io_request_lock);
+                               continue;
+                       }
+                       SDpnt->was_reset = 0;
+               }
+               /*
+                * Finally, before we release the lock, we copy the
+                * request to the command block, and remove the
+                * request from the request list.   Note that we always
+                * operate on the queue head - there is absolutely no
+                * reason to search the list, because all of the commands
+                * in this queue are for the same device.
+                */
+               q->current_request = req->next;
+
+               if (req->special == NULL) {
+                       memcpy(&SCpnt->request, req, sizeof(struct request));
+
+                       /*
+                        * We have copied the data out of the request block - it is now in
+                        * a field in SCpnt.  Release the request block.
+                        */
+                       req->next = NULL;
+                       req->rq_status = RQ_INACTIVE;
+                       wake_up(&wait_for_request);
+               }
+               /*
+                * Now it is finally safe to release the lock.  We are not going
+                * to noodle the request list until this request has been queued
+                * and we loop back to queue another.
+                */
+               spin_unlock_irq(&io_request_lock);
+
+               if (req->special == NULL) {
+                       /*
+                        * This will do a couple of things:
+                        *  1) Fill in the actual SCSI command.
+                        *  2) Fill in any other upper-level specific fields (timeout).
+                        *
+                        * If this returns 0, it means that the request failed (reading
+                        * past end of disk, reading offline device, etc).   This won't
+                        * actually talk to the device, but some kinds of consistency
+                        * checking may cause the request to be rejected immediately.
+                        */
+                       if (STpnt == NULL) {
+                               STpnt = scsi_get_request_dev(req);
+                       }
+                       /* 
+                        * This sets up the scatter-gather table (allocating if
+                        * required).  Hosts that need bounce buffers will also
+                        * get those allocated here.  
+                        */
+                       if (!SDpnt->scsi_init_io_fn(SCpnt)) {
+                               continue;
+                       }
+                       /*
+                        * Initialize the actual SCSI command for this request.
+                        */
+                       if (!STpnt->init_command(SCpnt)) {
+                               continue;
+                       }
+               }
+               /*
+                * Finally, initialize any error handling parameters, and set up
+                * the timers for timeouts.
+                */
+               scsi_init_cmd_errh(SCpnt);
+
+               /*
+                * Dispatch the command to the low-level driver.
+                */
+               scsi_dispatch_cmd(SCpnt);
+
+               /*
+                * Now we need to grab the lock again.  We are about to mess with
+                * the request queue and try to find another command.
+                */
+               spin_lock_irq(&io_request_lock);
+       }
+
+       /*
+        * If this is a single-lun device, and we are currently finished
+        * with this device, then see if we need to get another device
+        * started.
+        */
+       if (SDpnt->single_lun
+           && q->current_request == NULL
+           && SDpnt->device_busy == 0) {
+               request_queue_t *q;
+
+               for (SDpnt = SHpnt->host_queue;
+                    SDpnt;
+                    SDpnt = SDpnt->next) {
+                       if (((SHpnt->can_queue > 0)
+                            && (SHpnt->host_busy >= SHpnt->can_queue))
+                           || (SHpnt->host_blocked)
+                           || (SDpnt->device_blocked)) {
+                               break;
+                       }
+                       q = &SDpnt->request_queue;
+                       q->request_fn(q);
+               }
+       }
+}
diff --git a/drivers/scsi/scsi_merge.c b/drivers/scsi/scsi_merge.c
new file mode 100644 (file)
index 0000000..e31d1a7
--- /dev/null
@@ -0,0 +1,770 @@
+/*
+ *  scsi_merge.c Copyright (C) 1999 Eric Youngdale
+ *
+ *  SCSI queueing library.
+ *      Initial versions: Eric Youngdale (eric@andante.org).
+ *                        Based upon conversations with large numbers
+ *                        of people at Linux Expo.
+ */
+
+/*
+ * This file contains queue management functions that are used by SCSI.
+ * Typically this is used for several purposes.   First, we need to ensure
+ * that commands do not grow so large that they cannot be handled all at
+ * once by a host adapter.   The various flavors of merge functions included
+ * here serve this purpose.
+ *
+ * Note that it would be quite trivial to allow the low-level driver the
+ * flexibility to define it's own queue handling functions.  For the time
+ * being, the hooks are not present.   Right now we are just using the
+ * data in the host template as an indicator of how we should be handling
+ * queues, and we select routines that are optimized for that purpose.
+ *
+ * Some hosts do not impose any restrictions on the size of a request.
+ * In such cases none of the merge functions in this file are called,
+ * and we allow ll_rw_blk to merge requests in the default manner.
+ * This isn't guaranteed to be optimal, but it should be pretty darned
+ * good.   If someone comes up with ideas of better ways of managing queues
+ * to improve on the default behavior, then certainly fit it into this
+ * scheme in whatever manner makes the most sense.   Please note that
+ * since each device has it's own queue, we have considerable flexibility
+ * in queue management.
+ */
+
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/stat.h>
+#include <linux/blk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+
+
+#define __KERNEL_SYSCALLS__
+
+#include <linux/unistd.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+#include <scsi/scsi_ioctl.h>
+
+#ifdef CONFIG_SCSI_DEBUG_QUEUES
+/*
+ * Enable a bunch of additional consistency checking.   Turn this off
+ * if you are benchmarking.
+ */
+
+static int dump_stats(struct request *req,
+                     int use_clustering,
+                     int dma_host,
+                     int segments)
+{
+       struct buffer_head *bh;
+
+       /*
+        * Dump the information that we have.  We know we have an
+        * inconsistency.
+        */
+       printk("nr_segments is %lx\n", req->nr_segments);
+       printk("counted segments is %x\n", segments);
+       printk("Flags %d %d\n", use_clustering, dma_host);
+       for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext) 
+       {
+               printk("Segment 0x%p, blocks %d, addr 0x%lx\n",
+                      bh,
+                      bh->b_size >> 9,
+                      virt_to_phys(bh->b_data - 1));
+       }
+       panic("Ththththaats all folks.  Too dangerous to continue.\n");
+}
+
+
+/*
+ * Simple sanity check that we will use for the first go around
+ * in order to ensure that we are doing the counting correctly.
+ * This can be removed for optimization.
+ */
+#define SANITY_CHECK(req, _CLUSTER, _DMA)                              \
+    if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA) )    \
+    {                                                                  \
+       __label__ here;                                                 \
+here:                                                                  \
+       printk("Incorrect segment count at 0x%p", &&here);              \
+       dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA)); \
+    }
+#else
+#define SANITY_CHECK(req, _CLUSTER, _DMA)
+#endif
+
+/*
+ * FIXME(eric) - the original disk code disabled clustering for MOD
+ * devices.  I have no idea why we thought this was a good idea - my
+ * guess is that it was an attempt to limit the size of requests to MOD
+ * devices.
+ */
+#define CLUSTERABLE_DEVICE(SH,SD) (SH->use_clustering && \
+                                  SD->type != TYPE_MOD)
+
+/*
+ * This entire source file deals with the new queueing code.
+ */
+
+/*
+ * Function:    __count_segments()
+ *
+ * Purpose:     Prototype for queue merge function.
+ *
+ * Arguments:   q       - Queue for which we are merging request.
+ *              req     - request into which we wish to merge.
+ *              use_clustering - 1 if this host wishes to use clustering
+ *              dma_host - 1 if this host has ISA DMA issues (bus doesn't
+ *                      expose all of the address lines, so that DMA cannot
+ *                      be done from an arbitrary address).
+ *
+ * Returns:     Count of the number of SG segments for the request.
+ *
+ * Lock status: 
+ *
+ * Notes:       This is only used for diagnostic purposes.
+ */
+__inline static int __count_segments(struct request *req,
+                                    int use_clustering,
+                                    int dma_host)
+{
+       int ret = 1;
+       struct buffer_head *bh;
+
+       for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext) {
+               if (use_clustering) {
+                       /* 
+                        * See if we can do this without creating another
+                        * scatter-gather segment.  In the event that this is a
+                        * DMA capable host, make sure that a segment doesn't span
+                        * the DMA threshold boundary.  
+                        */
+                       if (dma_host &&
+                           virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) {
+                               ret++;
+                       } else if (CONTIGUOUS_BUFFERS(bh, bh->b_reqnext)) {
+                               /*
+                                * This one is OK.  Let it go.
+                                */
+                               continue;
+                       }
+                       ret++;
+               } else {
+                       ret++;
+               }
+       }
+       return ret;
+}
+
+/*
+ * Function:    __scsi_merge_fn()
+ *
+ * Purpose:     Prototype for queue merge function.
+ *
+ * Arguments:   q       - Queue for which we are merging request.
+ *              req     - request into which we wish to merge.
+ *              bh      - Block which we may wish to merge into request
+ *              use_clustering - 1 if this host wishes to use clustering
+ *              dma_host - 1 if this host has ISA DMA issues (bus doesn't
+ *                      expose all of the address lines, so that DMA cannot
+ *                      be done from an arbitrary address).
+ *
+ * Returns:     1 if it is OK to merge the block into the request.  0
+ *              if it is not OK.
+ *
+ * Lock status: io_request_lock is assumed to be held here.
+ *
+ * Notes:       Some drivers have limited scatter-gather table sizes, and
+ *              thus they cannot queue an infinitely large command.  This
+ *              function is called from ll_rw_blk before it attempts to merge
+ *              a new block into a request to make sure that the request will
+ *              not become too large.
+ *
+ *              This function is not designed to be directly called.  Instead
+ *              it should be referenced from other functions where the
+ *              use_clustering and dma_host parameters should be integer
+ *              constants.  The compiler should thus be able to properly
+ *              optimize the code, eliminating stuff that is irrelevant.
+ *              It is more maintainable to do this way with a single function
+ *              than to have 4 separate functions all doing roughly the
+ *              same thing.
+ */
+__inline static int __scsi_merge_fn(request_queue_t * q,
+                                   struct request *req,
+                                   struct buffer_head *bh,
+                                   int use_clustering,
+                                   int dma_host)
+{
+       unsigned int sector, count;
+       Scsi_Device *SDpnt;
+       struct Scsi_Host *SHpnt;
+
+       SDpnt = (Scsi_Device *) q->queuedata;
+       SHpnt = SDpnt->host;
+
+       count = bh->b_size >> 9;
+       sector = bh->b_rsector;
+
+       /*
+        * We come in here in one of two cases.   The first is that we
+        * are checking to see if we can add the buffer to the end of the
+        * request, the other is to see if we should add the request to the
+        * start.
+        */
+       if (req->sector + req->nr_sectors == sector) {
+               if (use_clustering) {
+                       /* 
+                        * See if we can do this without creating another
+                        * scatter-gather segment.  In the event that this is a
+                        * DMA capable host, make sure that a segment doesn't span
+                        * the DMA threshold boundary.  
+                        */
+                       if (dma_host &&
+                           virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) {
+                               goto new_segment;
+                       }
+                       if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) {
+                               /*
+                                * This one is OK.  Let it go.
+                                */
+                               return 1;
+                       }
+               }
+               goto new_segment;
+       } else if (req->sector - count == sector) {
+               if (use_clustering) {
+                       /* 
+                        * See if we can do this without creating another
+                        * scatter-gather segment.  In the event that this is a
+                        * DMA capable host, make sure that a segment doesn't span
+                        * the DMA threshold boundary. 
+                        */
+                       if (dma_host &&
+                           virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) {
+                               goto new_segment;
+                       }
+                       if (CONTIGUOUS_BUFFERS(bh, req->bh)) {
+                               /*
+                                * This one is OK.  Let it go.
+                                */
+                               return 1;
+                       }
+               }
+               goto new_segment;
+       } else {
+               panic("Attempt to merge sector that doesn't belong");
+       }
+      new_segment:
+       if (req->nr_segments < SHpnt->sg_tablesize) {
+               /*
+                * This will form the start of a new segment.  Bump the 
+                * counter.
+                */
+               req->nr_segments++;
+               return 1;
+       } else {
+               return 0;
+       }
+}
+
+/*
+ * Function:    scsi_merge_fn_()
+ *
+ * Purpose:     queue merge function.
+ *
+ * Arguments:   q       - Queue for which we are merging request.
+ *              req     - request into which we wish to merge.
+ *              bh      - Block which we may wish to merge into request
+ *
+ * Returns:     1 if it is OK to merge the block into the request.  0
+ *              if it is not OK.
+ *
+ * Lock status: io_request_lock is assumed to be held here.
+ *
+ * Notes:       Optimized for different cases depending upon whether
+ *              ISA DMA is in use and whether clustering should be used.
+ */
+#define MERGEFCT(_FUNCTION, _CLUSTER, _DMA)            \
+static int _FUNCTION(request_queue_t * q,              \
+              struct request * req,                    \
+              struct buffer_head * bh)                 \
+{                                                      \
+    int ret;                                           \
+    SANITY_CHECK(req, _CLUSTER, _DMA);                 \
+    ret =  __scsi_merge_fn(q, req, bh, _CLUSTER, _DMA); \
+    return ret;                                                \
+}
+
+MERGEFCT(scsi_merge_fn_, 0, 0)
+MERGEFCT(scsi_merge_fn_d, 0, 1)
+MERGEFCT(scsi_merge_fn_c, 1, 0)
+MERGEFCT(scsi_merge_fn_dc, 1, 1)
+/*
+ * Function:    __scsi_merge_requests_fn()
+ *
+ * Purpose:     Prototype for queue merge function.
+ *
+ * Arguments:   q       - Queue for which we are merging request.
+ *              req     - request into which we wish to merge.
+ *              next    - 2nd request that we might want to combine with req
+ *              use_clustering - 1 if this host wishes to use clustering
+ *              dma_host - 1 if this host has ISA DMA issues (bus doesn't
+ *                      expose all of the address lines, so that DMA cannot
+ *                      be done from an arbitrary address).
+ *
+ * Returns:     1 if it is OK to merge the two requests.  0
+ *              if it is not OK.
+ *
+ * Lock status: io_request_lock is assumed to be held here.
+ *
+ * Notes:       Some drivers have limited scatter-gather table sizes, and
+ *              thus they cannot queue an infinitely large command.  This
+ *              function is called from ll_rw_blk before it attempts to merge
+ *              a new block into a request to make sure that the request will
+ *              not become too large.
+ *
+ *              This function is not designed to be directly called.  Instead
+ *              it should be referenced from other functions where the
+ *              use_clustering and dma_host parameters should be integer
+ *              constants.  The compiler should thus be able to properly
+ *              optimize the code, eliminating stuff that is irrelevant.
+ *              It is more maintainable to do this way with a single function
+ *              than to have 4 separate functions all doing roughly the
+ *              same thing.
+ */
+__inline static int __scsi_merge_requests_fn(request_queue_t * q,
+                                            struct request *req,
+                                            struct request *next,
+                                            int use_clustering,
+                                            int dma_host)
+{
+       Scsi_Device *SDpnt;
+       struct Scsi_Host *SHpnt;
+
+       SDpnt = (Scsi_Device *) q->queuedata;
+       SHpnt = SDpnt->host;
+
+       /*
+        * If the two requests together are too large (even assuming that we
+        * can merge the boundary requests into one segment, then don't
+        * allow the merge.
+        */
+       if (req->nr_segments + next->nr_segments - 1 > SHpnt->sg_tablesize) {
+               return 0;
+       }
+       /*
+        * The main question is whether the two segments at the boundaries
+        * would be considered one or two.
+        */
+       if (use_clustering) {
+               /* 
+                * See if we can do this without creating another
+                * scatter-gather segment.  In the event that this is a
+                * DMA capable host, make sure that a segment doesn't span
+                * the DMA threshold boundary.  
+                */
+               if (dma_host &&
+                   virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) {
+                       goto dont_combine;
+               }
+               if (CONTIGUOUS_BUFFERS(req->bhtail, next->bh)) {
+                       /*
+                        * This one is OK.  Let it go.
+                        */
+                       req->nr_segments += next->nr_segments - 1;
+                       return 1;
+               }
+       }
+      dont_combine:
+       /*
+        * We know that the two requests at the boundary should not be combined.
+        * Make sure we can fix something that is the sum of the two.
+        * A slightly stricter test than we had above.
+        */
+       if (req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) {
+               return 0;
+       } else {
+               /*
+                * This will form the start of a new segment.  Bump the 
+                * counter.
+                */
+               req->nr_segments += next->nr_segments;
+               return 1;
+       }
+}
+
+/*
+ * Function:    scsi_merge_requests_fn_()
+ *
+ * Purpose:     queue merge function.
+ *
+ * Arguments:   q       - Queue for which we are merging request.
+ *              req     - request into which we wish to merge.
+ *              bh      - Block which we may wish to merge into request
+ *
+ * Returns:     1 if it is OK to merge the block into the request.  0
+ *              if it is not OK.
+ *
+ * Lock status: io_request_lock is assumed to be held here.
+ *
+ * Notes:       Optimized for different cases depending upon whether
+ *              ISA DMA is in use and whether clustering should be used.
+ */
+#define MERGEREQFCT(_FUNCTION, _CLUSTER, _DMA)         \
+static int _FUNCTION(request_queue_t * q,              \
+                    struct request * req,              \
+                    struct request * next)             \
+{                                                      \
+    int ret;                                           \
+    SANITY_CHECK(req, _CLUSTER, _DMA);                 \
+    ret =  __scsi_merge_requests_fn(q, req, next, _CLUSTER, _DMA); \
+    return ret;                                                \
+}
+
+MERGEREQFCT(scsi_merge_requests_fn_, 0, 0)
+MERGEREQFCT(scsi_merge_requests_fn_d, 0, 1)
+MERGEREQFCT(scsi_merge_requests_fn_c, 1, 0)
+MERGEREQFCT(scsi_merge_requests_fn_dc, 1, 1)
+/*
+ * Function:    __init_io()
+ *
+ * Purpose:     Prototype for io initialize function.
+ *
+ * Arguments:   SCpnt   - Command descriptor we wish to initialize
+ *              sg_count_valid  - 1 if the sg count in the req is valid.
+ *              use_clustering - 1 if this host wishes to use clustering
+ *              dma_host - 1 if this host has ISA DMA issues (bus doesn't
+ *                      expose all of the address lines, so that DMA cannot
+ *                      be done from an arbitrary address).
+ *
+ * Returns:     1 on success.
+ *
+ * Lock status: 
+ *
+ * Notes:       Only the SCpnt argument should be a non-constant variable.
+ *              This function is designed in such a way that it will be
+ *              invoked from a series of small stubs, each of which would
+ *              be optimized for specific circumstances.
+ *
+ *              The advantage of this is that hosts that don't do DMA
+ *              get versions of the function that essentially don't have
+ *              any of the DMA code.  Same goes for clustering - in the
+ *              case of hosts with no need for clustering, there is no point
+ *              in a whole bunch of overhead.
+ *
+ *              Finally, in the event that a host has set can_queue to SG_ALL
+ *              implying that there is no limit to the length of a scatter
+ *              gather list, the sg count in the request won't be valid
+ *              (mainly because we don't need queue management functions
+ *              which keep the tally uptodate.
+ */
+__inline static int __init_io(Scsi_Cmnd * SCpnt,
+                             int sg_count_valid,
+                             int use_clustering,
+                             int dma_host)
+{
+       struct buffer_head *bh;
+       struct buffer_head *bhprev;
+       char *buff;
+       int count;
+       int i;
+       struct request *req;
+       struct scatterlist *sgpnt;
+       int this_count;
+
+       /*
+        * FIXME(eric) - don't inline this - it doesn't depend on the
+        * integer flags.   Come to think of it, I don't think this is even
+        * needed any more.  Need to play with it and see if we hit the
+        * panic.  If not, then don't bother.
+        */
+       if (!SCpnt->request.bh) {
+               /* 
+                * Case of page request (i.e. raw device), or unlinked buffer 
+                * Typically used for swapping, but this isn't how we do
+                * swapping any more.
+                */
+               panic("I believe this is dead code.  If we hit this, I was wrong");
+#if 0
+               SCpnt->request_bufflen = SCpnt->request.nr_sectors << 9;
+               SCpnt->request_buffer = SCpnt->request.buffer;
+               SCpnt->use_sg = 0;
+               /*
+                * FIXME(eric) - need to handle DMA here.
+                */
+#endif
+               return 1;
+       }
+       req = &SCpnt->request;
+       /*
+        * First we need to know how many scatter gather segments are needed.
+        */
+       if (!sg_count_valid) {
+               count = __count_segments(req, use_clustering, dma_host);
+       } else {
+               count = req->nr_segments;
+       }
+
+       /*
+        * If the dma pool is nearly empty, then queue a minimal request
+        * with a single segment.  Typically this will satisfy a single
+        * buffer.
+        */
+       if (dma_host && scsi_dma_free_sectors <= 10) {
+               this_count = SCpnt->request.current_nr_sectors;
+               goto single_segment;
+       }
+       /*
+        * Don't bother with scatter-gather if there is only one segment.
+        */
+       if (count == 1) {
+               this_count = SCpnt->request.nr_sectors;
+               goto single_segment;
+       }
+       SCpnt->use_sg = count;
+
+       /* 
+        * Allocate the actual scatter-gather table itself.
+        * scsi_malloc can only allocate in chunks of 512 bytes 
+        */
+       SCpnt->sglist_len = (SCpnt->use_sg
+                            * sizeof(struct scatterlist) + 511) & ~511;
+
+       sgpnt = (struct scatterlist *) scsi_malloc(SCpnt->sglist_len);
+
+       /*
+        * Now fill the scatter-gather table.
+        */
+       if (!sgpnt) {
+               /*
+                * If we cannot allocate the scatter-gather table, then
+                * simply write the first buffer all by itself.
+                */
+               printk("Warning - running *really* short on DMA buffers\n");
+               this_count = SCpnt->request.current_nr_sectors;
+               goto single_segment;
+       }
+       /* 
+        * Next, walk the list, and fill in the addresses and sizes of
+        * each segment.
+        */
+       memset(sgpnt, 0, SCpnt->sglist_len);
+       SCpnt->request_buffer = (char *) sgpnt;
+       SCpnt->request_bufflen = 0;
+       bhprev = NULL;
+
+       for (count = 0, bh = SCpnt->request.bh;
+            bh; bh = bh->b_reqnext) {
+               if (use_clustering && bhprev != NULL) {
+                       if (dma_host &&
+                           virt_to_phys(bhprev->b_data - 1) == ISA_DMA_THRESHOLD) {
+                               /* Nothing - fall through */
+                       } else if (CONTIGUOUS_BUFFERS(bhprev, bh)) {
+                               /*
+                                * This one is OK.  Let it go.
+                                */
+                               sgpnt[count - 1].length += bh->b_size;
+                               if (!dma_host) {
+                                       SCpnt->request_bufflen += bh->b_size;
+                               }
+                               bhprev = bh;
+                               continue;
+                       }
+               }
+               count++;
+               sgpnt[count - 1].address = bh->b_data;
+               sgpnt[count - 1].length += bh->b_size;
+               if (!dma_host) {
+                       SCpnt->request_bufflen += bh->b_size;
+               }
+               bhprev = bh;
+       }
+
+       /*
+        * Verify that the count is correct.
+        */
+       if (count != SCpnt->use_sg) {
+               panic("Incorrect sg segment count");
+       }
+       if (!dma_host) {
+               return 1;
+       }
+       /*
+        * Now allocate bounce buffers, if needed.
+        */
+       SCpnt->request_bufflen = 0;
+       for (i = 0; i < count; i++) {
+               SCpnt->request_bufflen += sgpnt[i].length;
+               if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 >
+                   ISA_DMA_THRESHOLD && !sgpnt[count].alt_address) {
+                       sgpnt[i].alt_address = sgpnt[i].address;
+                       sgpnt[i].address =
+                           (char *) scsi_malloc(sgpnt[i].length);
+                       /*
+                        * If we cannot allocate memory for this DMA bounce
+                        * buffer, then queue just what we have done so far.
+                        */
+                       if (sgpnt[i].address == NULL) {
+                               printk("Warning - running low on DMA memory\n");
+                               SCpnt->request_bufflen -= sgpnt[i].length;
+                               SCpnt->use_sg = i;
+                               if (i == 0) {
+                                       panic("DMA pool exhausted");
+                               }
+                               break;
+                       }
+                       if (SCpnt->request.cmd == WRITE) {
+                               memcpy(sgpnt[i].address, sgpnt[i].alt_address,
+                                      sgpnt[i].length);
+                       }
+               }
+       }
+       return 1;
+
+      single_segment:
+       /*
+        * Come here if for any reason we choose to do this as a single
+        * segment.  Possibly the entire request, or possibly a small
+        * chunk of the entire request.
+        */
+       bh = SCpnt->request.bh;
+       buff = SCpnt->request.buffer;
+
+       if (dma_host) {
+               /*
+                * Allocate a DMA bounce buffer.  If the allocation fails, fall
+                * back and allocate a really small one - enough to satisfy
+                * the first buffer.
+                */
+               if (virt_to_phys(SCpnt->request.bh->b_data)
+                   + (this_count << 9) - 1 > ISA_DMA_THRESHOLD) {
+                       buff = (char *) scsi_malloc(this_count << 9);
+                       if (!buff) {
+                               printk("Warning - running low on DMA memory\n");
+                               this_count = SCpnt->request.current_nr_sectors;
+                               buff = (char *) scsi_malloc(this_count << 9);
+                               if (!buff) {
+                                       panic("Unable to allocate DMA buffer\n");
+                               }
+                       }
+                       if (SCpnt->request.cmd == WRITE)
+                               memcpy(buff, (char *) SCpnt->request.buffer, this_count << 9);
+               }
+       }
+       SCpnt->request_bufflen = this_count << 9;
+       SCpnt->request_buffer = buff;
+       SCpnt->use_sg = 0;
+       return 1;
+}
+
+#define INITIO(_FUNCTION, _VALID, _CLUSTER, _DMA)      \
+static int _FUNCTION(Scsi_Cmnd * SCpnt)                        \
+{                                                      \
+    return __init_io(SCpnt, _VALID, _CLUSTER, _DMA);   \
+}
+
+/*
+ * ll_rw_blk.c now keeps track of the number of segments in
+ * a request.  Thus we don't have to do it any more here.
+ * We always force "_VALID" to 1.  Eventually clean this up
+ * and get rid of the extra argument.
+ */
+#if 0
+/* Old definitions */
+INITIO(scsi_init_io_, 0, 0, 0)
+INITIO(scsi_init_io_d, 0, 0, 1)
+INITIO(scsi_init_io_c, 0, 1, 0)
+INITIO(scsi_init_io_dc, 0, 1, 1)
+
+/* Newer redundant definitions. */
+INITIO(scsi_init_io_, 1, 0, 0)
+INITIO(scsi_init_io_d, 1, 0, 1)
+INITIO(scsi_init_io_c, 1, 1, 0)
+INITIO(scsi_init_io_dc, 1, 1, 1)
+#endif
+
+INITIO(scsi_init_io_v, 1, 0, 0)
+INITIO(scsi_init_io_vd, 1, 0, 1)
+INITIO(scsi_init_io_vc, 1, 1, 0)
+INITIO(scsi_init_io_vdc, 1, 1, 1)
+/*
+ * Function:    initialize_merge_fn()
+ *
+ * Purpose:     Initialize merge function for a host
+ *
+ * Arguments:   SHpnt   - Host descriptor.
+ *
+ * Returns:     Nothing.
+ *
+ * Lock status: 
+ *
+ * Notes:
+ */
+void initialize_merge_fn(Scsi_Device * SDpnt)
+{
+       request_queue_t *q;
+       struct Scsi_Host *SHpnt;
+       SHpnt = SDpnt->host;
+
+       q = &SDpnt->request_queue;
+
+       /*
+        * If the host has already selected a merge manager, then don't
+        * pick a new one.
+        */
+       if (q->merge_fn != NULL) {
+               return;
+       }
+       /*
+        * If this host has an unlimited tablesize, then don't bother with a
+        * merge manager.  The whole point of the operation is to make sure
+        * that requests don't grow too large, and this host isn't picky.
+        */
+       if (SHpnt->sg_tablesize == SG_ALL) {
+               if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
+                       SDpnt->scsi_init_io_fn = scsi_init_io_v;
+               } else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
+                       SDpnt->scsi_init_io_fn = scsi_init_io_vd;
+               } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
+                       SDpnt->scsi_init_io_fn = scsi_init_io_vc;
+               } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
+                       SDpnt->scsi_init_io_fn = scsi_init_io_vdc;
+               }
+               return;
+       }
+       /*
+        * Now pick out the correct function.
+        */
+       if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
+               q->merge_fn = scsi_merge_fn_;
+               q->merge_requests_fn = scsi_merge_requests_fn_;
+               SDpnt->scsi_init_io_fn = scsi_init_io_v;
+       } else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
+               q->merge_fn = scsi_merge_fn_d;
+               q->merge_requests_fn = scsi_merge_requests_fn_d;
+               SDpnt->scsi_init_io_fn = scsi_init_io_vd;
+       } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
+               q->merge_fn = scsi_merge_fn_c;
+               q->merge_requests_fn = scsi_merge_requests_fn_c;
+               SDpnt->scsi_init_io_fn = scsi_init_io_vc;
+       } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
+               q->merge_fn = scsi_merge_fn_dc;
+               q->merge_requests_fn = scsi_merge_requests_fn_dc;
+               SDpnt->scsi_init_io_fn = scsi_init_io_vdc;
+       }
+}
index b431c3849197d1f5e5cb2e9e0a824e96a21b9258..ccfb0b34f8dc368bc97da63f60d6f34031311f07 100644 (file)
@@ -13,7 +13,7 @@
  *      Tommy Thorn <tthorn>
  *      Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
  *
- *  Modified by Eric Youngdale eric@aib.com to
+ *  Modified by Eric Youngdale eric@andante.org to
  *  add scatter-gather, multiple outstanding request, and other
  *  enhancements.
  *
@@ -84,13 +84,15 @@ static int scsi_reset(Scsi_Cmnd *, unsigned int);
 extern void scsi_old_done(Scsi_Cmnd * SCpnt);
 int update_timeout(Scsi_Cmnd *, int);
 extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
-extern void internal_cmnd(Scsi_Cmnd * SCpnt);
+
+extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt);
 
 extern volatile struct Scsi_Host *host_active;
 #define SCSI_BLOCK(HOST) ((HOST->block && host_active && HOST != host_active) \
                          || (HOST->can_queue && HOST->host_busy >= HOST->can_queue))
 
-static unsigned char generic_sense[6] = {REQUEST_SENSE, 0, 0, 0, 255, 0};
+static unsigned char generic_sense[6] =
+{REQUEST_SENSE, 0, 0, 0, 255, 0};
 
 /*
  *  This is the number  of clock ticks we should wait before we time out
@@ -232,7 +234,13 @@ static void scsi_request_sense(Scsi_Cmnd * SCpnt)
        SCpnt->use_sg = 0;
        SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
        SCpnt->result = 0;
-       internal_cmnd(SCpnt);
+        /*
+         * Ugly, ugly.  The newer interfaces all assume that the lock
+         * isn't held.  Mustn't disappoint, or we deadlock the system.
+         */
+        spin_unlock_irq(&io_request_lock);
+       scsi_dispatch_cmd(SCpnt);
+        spin_lock_irq(&io_request_lock);
 }
 
 
@@ -443,7 +451,7 @@ void scsi_old_done(Scsi_Cmnd * SCpnt)
                                                               __LINE__);
                                                }
                                        }
-                                       /* end WAS_SENSE */ 
+                                       /* end WAS_SENSE */
                                        else {
 #ifdef DEBUG
                                                printk("COMMAND COMPLETE message returned, "
@@ -628,7 +636,14 @@ void scsi_old_done(Scsi_Cmnd * SCpnt)
                        SCpnt->use_sg = SCpnt->old_use_sg;
                        SCpnt->cmd_len = SCpnt->old_cmd_len;
                        SCpnt->result = 0;
-                       internal_cmnd(SCpnt);
+                        /*
+                         * Ugly, ugly.  The newer interfaces all
+                         * assume that the lock isn't held.  Mustn't
+                         * disappoint, or we deadlock the system.  
+                         */
+                        spin_unlock_irq(&io_request_lock);
+                       scsi_dispatch_cmd(SCpnt);
+                        spin_lock_irq(&io_request_lock);
                }
                break;
        default:
@@ -641,22 +656,18 @@ void scsi_old_done(Scsi_Cmnd * SCpnt)
 #endif
                host->host_busy--;      /* Indicate that we are free */
 
-               if (host->block && host->host_busy == 0) {
-                       host_active = NULL;
-
-                       /* For block devices "wake_up" is done in end_scsi_request */
-                       if (!SCSI_BLK_MAJOR(MAJOR(SCpnt->request.rq_dev))) {
-                               struct Scsi_Host *next;
-
-                               for (next = host->block; next != host; next = next->block)
-                                       wake_up(&next->host_wait);
-                       }
-               }
-               wake_up(&host->host_wait);
                SCpnt->result = result | ((exit & 0xff) << 24);
                SCpnt->use_sg = SCpnt->old_use_sg;
                SCpnt->cmd_len = SCpnt->old_cmd_len;
+                /*
+                 * The upper layers assume the lock isn't held.  We mustn't
+                 * disappoint them.  When the new error handling code is in
+                 * use, the upper code is run from a bottom half handler, so
+                 * it isn't an issue.
+                 */
+                spin_unlock_irq(&io_request_lock);
                SCpnt->done(SCpnt);
+                spin_lock_irq(&io_request_lock);
        }
 #undef CMD_FINISHED
 #undef REDO
@@ -925,8 +936,7 @@ static int scsi_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
                                if (host->last_reset - jiffies > 20UL * HZ)
                                        host->last_reset = jiffies;
                        } else {
-                               if (!host->block)
-                                       host->host_busy++;
+                               host->host_busy++;
                                host->last_reset = jiffies;
                                host->resetting = 1;
                                SCpnt->flags |= (WAS_RESET | IS_RESETTING);
@@ -939,8 +949,7 @@ static int scsi_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
                                if (time_before(host->last_reset, jiffies) ||
                                    (time_after(host->last_reset, jiffies + 20 * HZ)))
                                        host->last_reset = jiffies;
-                               if (!host->block)
-                                       host->host_busy--;
+                               host->host_busy--;
                        }
                        if (reset_flags & SCSI_RESET_SYNCHRONOUS)
                                SCpnt->flags &= ~SYNC_RESET;
index 95c1cac69598bf4af40c35a49bf98676a8fcacc4..1c64977e9da7caed121cd24cf719990c417adf25 100644 (file)
 
 static const char RCSid[] = "$Header: /mnt/ide/home/eric/CVSROOT/linux/drivers/scsi/scsi_queue.c,v 1.1 1997/10/21 11:16:38 eric Exp $";
 
-/*
- * Lock used to prevent more than one process from frobbing the list at the
- * same time.  FIXME(eric) - there should be separate spinlocks for each host.
- * This will reduce contention.
- */
-
-spinlock_t scsi_mlqueue_lock = SPIN_LOCK_UNLOCKED;
-spinlock_t scsi_mlqueue_remove_lock = SPIN_LOCK_UNLOCKED;
 
 /*
  * Function:    scsi_mlqueue_insert()
@@ -73,6 +65,8 @@ spinlock_t scsi_mlqueue_remove_lock = SPIN_LOCK_UNLOCKED;
  * Arguments:   cmd    - command that we are adding to queue.
  *              reason - why we are inserting command to queue.
  *
+ * Lock status: Assumed that lock is not held upon entry.
+ *
  * Returns:     Nothing.
  *
  * Notes:       We do this for one of two cases.  Either the host is busy
@@ -84,8 +78,6 @@ spinlock_t scsi_mlqueue_remove_lock = SPIN_LOCK_UNLOCKED;
  */
 int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason)
 {
-       Scsi_Cmnd *cpnt;
-       unsigned long flags;
        struct Scsi_Host *host;
 
        SCSI_LOG_MLQUEUE(1, printk("Inserting command %p into mlqueue\n", cmd));
@@ -126,12 +118,12 @@ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason)
                 * If a host is inactive and cannot queue any commands, I don't see
                 * how things could possibly work anyways.
                 */
-               if (cmd->device->device_busy == 0) {
+               if (cmd->device->device_blocked == 0) {
                        if (scsi_retry_command(cmd) == 0) {
                                return 0;
                        }
                }
-               cmd->device->device_busy = TRUE;
+               cmd->device->device_blocked = TRUE;
                cmd->device_wait = TRUE;
        }
 
@@ -143,142 +135,9 @@ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason)
        cmd->bh_next = NULL;
 
        /*
-        * As a performance enhancement, look to see whether the list is
-        * empty.  If it is, then we can just atomicly insert the command
-        * in the list and return without locking.
+        * Insert this command at the head of the queue for it's device.
+        * It will go before all other commands that are already in the queue.
         */
-       if (host->pending_commands == NULL) {
-               cpnt = xchg(&host->pending_commands, cmd);
-               if (cpnt == NULL) {
-                       return 0;
-               }
-               /*
-                * Rats.  Something slipped in while we were exchanging.
-                * Swap it back and fall through to do it the hard way.
-                */
-               cmd = xchg(&host->pending_commands, cpnt);
-
-       }
-       /*
-        * Next append the command to the list of pending commands.
-        */
-       spin_lock_irqsave(&scsi_mlqueue_lock, flags);
-       for (cpnt = host->pending_commands; cpnt && cpnt->bh_next;
-            cpnt = cpnt->bh_next) {
-               continue;
-       }
-       if (cpnt != NULL) {
-               cpnt->bh_next = cmd;
-       } else {
-               host->pending_commands = cmd;
-       }
-
-       spin_unlock_irqrestore(&scsi_mlqueue_lock, flags);
-       return 0;
-}
-
-/*
- * Function:    scsi_mlqueue_finish()
- *
- * Purpose:     Try and queue commands from the midlevel queue.
- *
- * Arguments:   host    - host that just finished a command.
- *              device  - device that just finished a command.
- *
- * Returns:     Nothing.
- *
- * Notes:       This could be called either from an interrupt context or a
- *              normal process context.
- */
-int scsi_mlqueue_finish(struct Scsi_Host *host, Scsi_Device * device)
-{
-       Scsi_Cmnd *cpnt;
-       unsigned long flags;
-       Scsi_Cmnd *next;
-       Scsi_Cmnd *prev;
-       int reason = 0;
-       int rtn;
-
-       SCSI_LOG_MLQUEUE(2, printk("scsi_mlqueue_finish starting\n"));
-       /*
-        * First, clear the flag for the host/device.  We will then start
-        * pushing commands through until either something else blocks, or
-        * the queue is empty.
-        */
-       if (host->host_blocked) {
-               reason = SCSI_MLQUEUE_HOST_BUSY;
-               host->host_blocked = FALSE;
-       }
-       if (device->device_busy) {
-               reason = SCSI_MLQUEUE_DEVICE_BUSY;
-               device->device_busy = FALSE;
-       }
-       /*
-        * Walk the list of commands to see if there is anything we can
-        * queue.  This probably needs to be optimized for performance at
-        * some point.
-        */
-       prev = NULL;
-       spin_lock_irqsave(&scsi_mlqueue_remove_lock, flags);
-       for (cpnt = host->pending_commands; cpnt; cpnt = next) {
-               next = cpnt->bh_next;
-               /*
-                * First, see if this command is suitable for being retried now.
-                */
-               if (reason == SCSI_MLQUEUE_HOST_BUSY) {
-                       /*
-                        * The host was busy, but isn't any more.  Thus we may be
-                        * able to queue the command now, but we were waiting for
-                        * the device, then we should keep waiting.  Similarily, if
-                        * the device is now busy, we should also keep waiting.
-                        */
-                       if ((cpnt->host_wait == FALSE)
-                           || (device->device_busy == TRUE)) {
-                               prev = cpnt;
-                               continue;
-                       }
-               }
-               if (reason == SCSI_MLQUEUE_DEVICE_BUSY) {
-                       /*
-                        * The device was busy, but isn't any more.  Thus we may be
-                        * able to queue the command now, but we were waiting for
-                        * the host, then we should keep waiting.  Similarily, if
-                        * the host is now busy, we should also keep waiting.
-                        */
-                       if ((cpnt->device_wait == FALSE)
-                           || (host->host_blocked == TRUE)) {
-                               prev = cpnt;
-                               continue;
-                       }
-               }
-               /*
-                * First, remove the command from the list.
-                */
-               if (prev == NULL) {
-                       host->pending_commands = next;
-               } else {
-                       prev->bh_next = next;
-               }
-               cpnt->bh_next = NULL;
-
-               rtn = scsi_retry_command(cpnt);
-
-               /*
-                * If we got a non-zero return value, it means that the host rejected
-                * the command.  The internal_cmnd function will have added the
-                * command back to the end of the list, so we don't have anything
-                * more to do here except return.
-                */
-               if (rtn) {
-                       spin_unlock_irqrestore(&scsi_mlqueue_remove_lock, flags);
-                       SCSI_LOG_MLQUEUE(1, printk("Unable to remove command %p from mlqueue\n", cpnt));
-                       goto finish;
-               }
-               SCSI_LOG_MLQUEUE(1, printk("Removed command %p from mlqueue\n", cpnt));
-       }
-
-       spin_unlock_irqrestore(&scsi_mlqueue_remove_lock, flags);
-finish:
-       SCSI_LOG_MLQUEUE(2, printk("scsi_mlqueue_finish returning\n"));
+       scsi_insert_special_cmd(cmd, 1);
        return 0;
 }
index d3cb4a1031dcaabb8d64a3be2937aa5fa2f87e93..3c94212fd71cd49ba96627096796861eecf6a654 100644 (file)
@@ -33,8 +33,8 @@
  * modules.
  */
 
-extern void print_command (unsigned char *command);
-extern void print_sense(const char * devclass, Scsi_Cmnd * SCpnt);
+extern void print_command(unsigned char *command);
+extern void print_sense(const char *devclass, Scsi_Cmnd * SCpnt);
 
 extern const char *const scsi_device_types[];
 
@@ -60,13 +60,12 @@ EXPORT_SYMBOL(print_status);
 EXPORT_SYMBOL(scsi_dma_free_sectors);
 EXPORT_SYMBOL(kernel_scsi_ioctl);
 EXPORT_SYMBOL(scsi_need_isa_buffer);
-EXPORT_SYMBOL(scsi_request_queueable);
 EXPORT_SYMBOL(scsi_release_command);
 EXPORT_SYMBOL(print_Scsi_Cmnd);
 EXPORT_SYMBOL(scsi_block_when_processing_errors);
 EXPORT_SYMBOL(scsi_mark_host_reset);
 EXPORT_SYMBOL(scsi_ioctl_send_command);
-#if defined(CONFIG_SCSI_LOGGING) /* { */
+#if defined(CONFIG_SCSI_LOGGING)       /* { */
 EXPORT_SYMBOL(scsi_logging_level);
 #endif
 
@@ -75,6 +74,9 @@ EXPORT_SYMBOL(scsi_sleep);
 EXPORT_SYMBOL(proc_print_scsidevice);
 EXPORT_SYMBOL(proc_scsi);
 
+EXPORT_SYMBOL(scsi_io_completion);
+EXPORT_SYMBOL(scsi_end_request);
+
 /*
  * These are here only while I debug the rest of the scsi stuff.
  */
@@ -83,5 +85,4 @@ EXPORT_SYMBOL(scsi_hosts);
 EXPORT_SYMBOL(scsi_devicelist);
 EXPORT_SYMBOL(scsi_device_types);
 
-
-#endif /* CONFIG_MODULES */
+#endif                         /* CONFIG_MODULES */
index 240453144db60d0a3fc0b17fbca8d405b1e41b6b..ce9e28a41f1b3e84f5f8885e6c3d84ecb03aafa6 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *      sd.c Copyright (C) 1992 Drew Eckhardt
- *           Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *           Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
  *
  *      Linux scsi disk driver
  *              Initial versions: Drew Eckhardt
@@ -8,11 +8,11 @@
  *
  *      <drew@colorado.edu>
  *
- *       Modified by Eric Youngdale ericy@cais.com to
+ *       Modified by Eric Youngdale ericy@andante.org to
  *       add scatter-gather, multiple outstanding request, and other
  *       enhancements.
  *
- *       Modified by Eric Youngdale eric@aib.com to support loadable
+ *       Modified by Eric Youngdale eric@andante.org to support loadable
  *       low-level scsi drivers.
  *
  *       Modified by Jirka Hanika geo@ff.cuni.cz to support more
@@ -96,13 +96,15 @@ static int fop_revalidate_scsidisk(kdev_t);
 
 static int sd_init_onedisk(int);
 
-static void requeue_sd_request(Scsi_Cmnd * SCpnt);
 
 static int sd_init(void);
 static void sd_finish(void);
 static int sd_attach(Scsi_Device *);
 static int sd_detect(Scsi_Device *);
 static void sd_detach(Scsi_Device *);
+static void rw_intr(Scsi_Cmnd * SCpnt);
+
+static int sd_init_command(Scsi_Cmnd *);
 
 static int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
 {
@@ -196,12 +198,170 @@ static void sd_devname(unsigned int disknum, char *buffer)
 }
 
 struct Scsi_Device_Template sd_template = {
-       NULL, "disk", "sd", NULL, TYPE_DISK,
-       SCSI_DISK0_MAJOR, 0, 0, 0, 1,
-       sd_detect, sd_init,
-       sd_finish, sd_attach, sd_detach
+       name:"disk",
+       tag:"sd",
+       scsi_type:TYPE_DISK,
+       major:SCSI_DISK0_MAJOR,
+       blk:1,
+       detect:sd_detect,
+       init:sd_init,
+       finish:sd_finish,
+       attach:sd_attach,
+       detach:sd_detach,
+       init_command:sd_init_command,
 };
 
+static request_queue_t *sd_find_queue(kdev_t dev)
+{
+       Scsi_Disk *dpnt;
+       int target;
+       target = DEVICE_NR(dev);
+
+       dpnt = &rscsi_disks[target];
+       if (!dpnt)
+               return NULL;    /* No such device */
+       return &dpnt->device->request_queue;
+}
+
+static int sd_init_command(Scsi_Cmnd * SCpnt)
+{
+       int dev, devm, block, this_count;
+       Scsi_Disk *dpnt;
+       char nbuff[6];
+
+       devm = MINOR(SCpnt->request.rq_dev);
+       dev = DEVICE_NR(SCpnt->request.rq_dev);
+
+       block = SCpnt->request.sector;
+       this_count = SCpnt->request_bufflen >> 9;
+
+       SCSI_LOG_HLQUEUE(1, printk("Doing sd request, dev = %d, block = %d\n", devm, block));
+
+       dpnt = &rscsi_disks[dev];
+       if (devm >= (sd_template.dev_max << 4) ||
+           !dpnt ||
+           !dpnt->device->online ||
+           block + SCpnt->request.nr_sectors > sd[devm].nr_sects) {
+               SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors));
+               SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+               SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
+               return 0;
+       }
+       block += sd[devm].start_sect;
+       if (dpnt->device->changed) {
+               /*
+                * quietly refuse to do anything to a changed disc until the changed
+                * bit has been reset
+                */
+               /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
+               SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+               return 0;
+       }
+       SCSI_LOG_HLQUEUE(2, sd_devname(devm, nbuff));
+       SCSI_LOG_HLQUEUE(2, printk("%s : real dev = /dev/%d, block = %d\n",
+                                  nbuff, dev, block));
+
+       /*
+        * If we have a 1K hardware sectorsize, prevent access to single
+        * 512 byte sectors.  In theory we could handle this - in fact
+        * the scsi cdrom driver must be able to handle this because
+        * we typically use 1K blocksizes, and cdroms typically have
+        * 2K hardware sectorsizes.  Of course, things are simpler
+        * with the cdrom, since it is read-only.  For performance
+        * reasons, the filesystems should be able to handle this
+        * and not force the scsi disk driver to use bounce buffers
+        * for this.
+        */
+       if (dpnt->device->sector_size == 1024) {
+               if ((block & 1) || (SCpnt->request.nr_sectors & 1)) {
+                       printk("sd.c:Bad block number requested");
+                       SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+                       return 0;
+               } else {
+                       block = block >> 1;
+                       this_count = this_count >> 1;
+               }
+       }
+       if (dpnt->device->sector_size == 2048) {
+               if ((block & 3) || (SCpnt->request.nr_sectors & 3)) {
+                       printk("sd.c:Bad block number requested");
+                       SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+                       return 0;
+               } else {
+                       block = block >> 2;
+                       this_count = this_count >> 2;
+               }
+       }
+       switch (SCpnt->request.cmd) {
+       case WRITE:
+               if (!dpnt->device->writeable) {
+                       SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+                       return 0;
+               }
+               SCpnt->cmnd[0] = WRITE_6;
+               break;
+       case READ:
+               SCpnt->cmnd[0] = READ_6;
+               break;
+       default:
+               panic("Unknown sd command %d\n", SCpnt->request.cmd);
+       }
+
+       SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
+                                  nbuff,
+                  (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
+                                this_count, SCpnt->request.nr_sectors));
+
+       SCpnt->cmnd[1] = (SCpnt->lun << 5) & 0xe0;
+
+       if (((this_count > 0xff) || (block > 0x1fffff)) && SCpnt->device->ten) {
+               if (this_count > 0xffff)
+                       this_count = 0xffff;
+
+               SCpnt->cmnd[0] += READ_10 - READ_6;
+               SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
+               SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
+               SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
+               SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+               SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
+               SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
+               SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+       } else {
+               if (this_count > 0xff)
+                       this_count = 0xff;
+
+               SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+               SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
+               SCpnt->cmnd[3] = (unsigned char) block & 0xff;
+               SCpnt->cmnd[4] = (unsigned char) this_count;
+               SCpnt->cmnd[5] = 0;
+       }
+
+       /*
+        * We shouldn't disconnect in the middle of a sector, so with a dumb
+        * host adapter, it's safe to assume that we can at least transfer
+        * this many bytes between each connect / disconnect.
+        */
+       SCpnt->transfersize = dpnt->device->sector_size;
+       SCpnt->underflow = this_count << 9;
+
+       SCpnt->allowed = MAX_RETRIES;
+       SCpnt->timeout_per_command = (SCpnt->device->type == TYPE_DISK ?
+                                     SD_TIMEOUT : SD_MOD_TIMEOUT);
+
+       /*
+        * This is the completion routine we use.  This is matched in terms
+        * of capability to this function.
+        */
+       SCpnt->done = rw_intr;
+
+       /*
+        * This indicates that the command is ready from our end to be
+        * queued.
+        */
+       return 1;
+}
+
 static int sd_open(struct inode *inode, struct file *filp)
 {
        int target;
@@ -359,7 +519,7 @@ static void rw_intr(Scsi_Cmnd * SCpnt)
        int good_sectors = (result == 0 ? this_count : 0);
        int block_sectors = 1;
 
-       sd_devname(DEVICE_NR(SCpnt->request.rq_dev), nbuff);
+       SCSI_LOG_HLCOMPLETE(1, sd_devname(DEVICE_NR(SCpnt->request.rq_dev), nbuff));
 
        SCSI_LOG_HLCOMPLETE(1, printk("%s : rw_intr(%d, %x [%x %x])\n", nbuff,
                                      SCpnt->host->host_no,
@@ -380,202 +540,37 @@ static void rw_intr(Scsi_Cmnd * SCpnt)
                (SCpnt->sense_buffer[4] << 16) |
                (SCpnt->sense_buffer[5] << 8) |
                SCpnt->sense_buffer[6];
-               int sector_size =
-               rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].sector_size;
                if (SCpnt->request.bh != NULL)
                        block_sectors = SCpnt->request.bh->b_size >> 9;
-               if (sector_size == 1024) {
+               switch (SCpnt->device->sector_size) {
+               case 1024:
                        error_sector <<= 1;
                        if (block_sectors < 2)
                                block_sectors = 2;
-               } else if (sector_size == 2048) {
+                       break;
+               case 2048:
                        error_sector <<= 2;
                        if (block_sectors < 4)
                                block_sectors = 4;
-               } else if (sector_size == 256)
+                       break;
+               case 256:
                        error_sector >>= 1;
-               error_sector -= sd[SD_PARTITION(SCpnt->request.rq_dev)].start_sect;
+                       break;
+               default:
+                       break;
+               }
+               error_sector -= sd[MINOR(SCpnt->request.rq_dev)].start_sect;
                error_sector &= ~(block_sectors - 1);
                good_sectors = error_sector - SCpnt->request.sector;
                if (good_sectors < 0 || good_sectors >= this_count)
                        good_sectors = 0;
        }
        /*
-        * First case : we assume that the command succeeded.  One of two things
-        * will happen here.  Either we will be finished, or there will be more
-        * sectors that we were unable to read last time.
+        * This calls the generic completion function, now that we know
+        * how many actual sectors finished, and how many sectors we need
+        * to say have failed.
         */
-
-       if (good_sectors > 0) {
-
-               SCSI_LOG_HLCOMPLETE(1, printk("%s : %ld sectors remain.\n", nbuff,
-                                             SCpnt->request.nr_sectors));
-               SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg));
-
-               if (SCpnt->use_sg) {
-                       struct scatterlist *sgpnt;
-                       int i;
-                       sgpnt = (struct scatterlist *) SCpnt->buffer;
-                       for (i = 0; i < SCpnt->use_sg; i++) {
-
-#if 0
-                               SCSI_LOG_HLCOMPLETE(3, printk(":%p %p %d\n", sgpnt[i].alt_address, sgpnt[i].address,
-                                                      sgpnt[i].length));
-#endif
-
-                               if (sgpnt[i].alt_address) {
-                                       if (SCpnt->request.cmd == READ)
-                                               memcpy(sgpnt[i].alt_address, sgpnt[i].address,
-                                                      sgpnt[i].length);
-                                       scsi_free(sgpnt[i].address, sgpnt[i].length);
-                               }
-                       }
-
-                       /* Free list of scatter-gather pointers */
-                       scsi_free(SCpnt->buffer, SCpnt->sglist_len);
-               } else {
-                       if (SCpnt->buffer != SCpnt->request.buffer) {
-                               SCSI_LOG_HLCOMPLETE(3, printk("nosg: %p %p %d\n",
-                                   SCpnt->request.buffer, SCpnt->buffer,
-                                                       SCpnt->bufflen));
-
-                               if (SCpnt->request.cmd == READ)
-                                       memcpy(SCpnt->request.buffer, SCpnt->buffer,
-                                              SCpnt->bufflen);
-                               scsi_free(SCpnt->buffer, SCpnt->bufflen);
-                       }
-               }
-               /*
-                * If multiple sectors are requested in one buffer, then
-                * they will have been finished off by the first command.
-                * If not, then we have a multi-buffer command.
-                */
-               if (SCpnt->request.nr_sectors > this_count) {
-                       SCpnt->request.errors = 0;
-
-                       if (!SCpnt->request.bh) {
-                               SCSI_LOG_HLCOMPLETE(2, printk("%s : handling page request, no buffer\n",
-                                                             nbuff));
-
-                               /*
-                                * The SCpnt->request.nr_sectors field is always done in
-                                * 512 byte sectors, even if this really isn't the case.
-                                */
-                               panic("sd.c: linked page request (%lx %x)",
-                                     SCpnt->request.sector, this_count);
-                       }
-               }
-               SCpnt = end_scsi_request(SCpnt, 1, good_sectors);
-               if (result == 0) {
-                       requeue_sd_request(SCpnt);
-                       return;
-               }
-       }
-       if (good_sectors == 0) {
-
-               /* Free up any indirection buffers we allocated for DMA purposes. */
-               if (SCpnt->use_sg) {
-                       struct scatterlist *sgpnt;
-                       int i;
-                       sgpnt = (struct scatterlist *) SCpnt->buffer;
-                       for (i = 0; i < SCpnt->use_sg; i++) {
-                               SCSI_LOG_HLCOMPLETE(3, printk("err: %p %p %d\n",
-                                   SCpnt->request.buffer, SCpnt->buffer,
-                                                       SCpnt->bufflen));
-                               if (sgpnt[i].alt_address) {
-                                       scsi_free(sgpnt[i].address, sgpnt[i].length);
-                               }
-                       }
-                       scsi_free(SCpnt->buffer, SCpnt->sglist_len);    /* Free list of scatter-gather pointers */
-               } else {
-                       SCSI_LOG_HLCOMPLETE(2, printk("nosgerr: %p %p %d\n",
-                                   SCpnt->request.buffer, SCpnt->buffer,
-                                                     SCpnt->bufflen));
-                       if (SCpnt->buffer != SCpnt->request.buffer)
-                               scsi_free(SCpnt->buffer, SCpnt->bufflen);
-               }
-       }
-       /*
-        * Now, if we were good little boys and girls, Santa left us a request
-        * sense buffer.  We can extract information from this, so we
-        * can choose a block to remap, etc.
-        */
-
-       if (driver_byte(result) != 0) {
-               if (suggestion(result) == SUGGEST_REMAP) {
-#ifdef REMAP
-                       /*
-                        * Not yet implemented.  A read will fail after being remapped,
-                        * a write will call the strategy routine again.
-                        */
-                       if rscsi_disks
-                               [DEVICE_NR(SCpnt->request.rq_dev)].remap
-                       {
-                               result = 0;
-                       }
-#endif
-               }
-               if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
-                       if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
-                               if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
-                                       /* detected disc change.  set a bit and quietly refuse
-                                        * further access.
-                                        */
-                                       rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
-                                       SCpnt = end_scsi_request(SCpnt, 0, this_count);
-                                       requeue_sd_request(SCpnt);
-                                       return;
-                               } else {
-                                       /*
-                                        * Must have been a power glitch, or a bus reset.
-                                        * Could not have been a media change, so we just retry
-                                        * the request and see what happens.
-                                        */
-                                       requeue_sd_request(SCpnt);
-                                       return;
-                               }
-                       }
-               }
-               /* If we had an ILLEGAL REQUEST returned, then we may have
-                * performed an unsupported command.  The only thing this should be
-                * would be a ten byte read where only a six byte read was supported.
-                * Also, on a system where READ CAPACITY failed, we have have read
-                * past the end of the disk.
-                */
-
-               if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
-                       if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
-                               rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
-                               requeue_sd_request(SCpnt);
-                               result = 0;
-                       } else {
-                               /* ???? */
-                       }
-               }
-               if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {
-                       printk("scsi%d: MEDIUM ERROR on channel %d, id %d, lun %d, CDB: ",
-                              SCpnt->host->host_no, (int) SCpnt->channel,
-                              (int) SCpnt->target, (int) SCpnt->lun);
-                       print_command(SCpnt->cmnd);
-                       print_sense("sd", SCpnt);
-                       SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
-                       requeue_sd_request(SCpnt);
-                       return;
-               }
-       }                       /* driver byte != 0 */
-       if (result) {
-               printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
-                      rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
-                      rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
-               rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
-                      rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
-
-               if (driver_byte(result) & DRIVER_SENSE)
-                       print_sense("sd", SCpnt);
-               SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
-               requeue_sd_request(SCpnt);
-               return;
-       }
+       scsi_io_completion(SCpnt, good_sectors, block_sectors);
 }
 /*
  * requeue_sd_request() is the request handler function for the sd driver.
@@ -583,532 +578,6 @@ static void rw_intr(Scsi_Cmnd * SCpnt)
  * them to SCSI commands.
  */
 
-static void do_sd_request(void)
-{
-       Scsi_Cmnd *SCpnt = NULL;
-       Scsi_Device *SDev;
-       struct request *req = NULL;
-       int flag = 0;
-
-       while (1 == 1) {
-               if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
-                       return;
-               }
-               INIT_SCSI_REQUEST;
-               SDev = rscsi_disks[CURRENT_DEV].device;
-
-               /*
-                * If the host for this device is in error recovery mode, don't
-                * do anything at all here.  When the host leaves error recovery
-                * mode, it will automatically restart things and start queueing
-                * commands again.
-                */
-               if (SDev->host->in_recovery) {
-                       return;
-               }
-               /*
-                * I am not sure where the best place to do this is.  We need
-                * to hook in a place where we are likely to come if in user
-                * space.
-                */
-               if (SDev->was_reset) {
-                       /*
-                        * We need to relock the door, but we might
-                        * be in an interrupt handler.  Only do this
-                        * from user space, since we do not want to
-                        * sleep from an interrupt.  FIXME(eric) - do this
-                        * from the kernel error handling thred.
-                        */
-                       if (SDev->removable && !in_interrupt()) {
-                               spin_unlock_irq(&io_request_lock);      /* FIXME!!!! */
-                               scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
-                               /* scsi_ioctl may allow CURRENT to change, so start over. */
-                               SDev->was_reset = 0;
-                               spin_lock_irq(&io_request_lock);        /* FIXME!!!! */
-                               continue;
-                       }
-                       SDev->was_reset = 0;
-               }
-               /* We have to be careful here. scsi_allocate_device will get a free pointer,
-                * but there is no guarantee that it is queueable.  In normal usage,
-                * we want to call this, because other types of devices may have the
-                * host all tied up, and we want to make sure that we have at least
-                * one request pending for this type of device. We can also come
-                * through here while servicing an interrupt, because of the need to
-                * start another command. If we call scsi_allocate_device more than once,
-                * then the system can wedge if the command is not queueable. The
-                * scsi_request_queueable function is safe because it checks to make sure
-                * that the host is able to take another command before it returns
-                * a pointer.
-                */
-
-               if (flag++ == 0)
-                       SCpnt = scsi_allocate_device(&CURRENT,
-                                    rscsi_disks[CURRENT_DEV].device, 0);
-               else
-                       SCpnt = NULL;
-
-               /*
-                * The following restore_flags leads to latency problems.  FIXME.
-                * Using a "sti()" gets rid of the latency problems but causes
-                * race conditions and crashes.
-                */
-
-               /* This is a performance enhancement. We dig down into the request
-                * list and try to find a queueable request (i.e. device not busy,
-                * and host able to accept another command. If we find one, then we
-                * queue it. This can make a big difference on systems with more than
-                * one disk drive.  We want to have the interrupts off when monkeying
-                * with the request list, because otherwise the kernel might try to
-                * slip in a request in between somewhere.
-                *
-                * FIXME(eric) - this doesn't belong at this level.  The device code in
-                * ll_rw_blk.c should know how to dig down into the device queue to
-                * figure out what it can deal with, and what it can't.  Consider
-                * possibility of pulling entire queue down into scsi layer.
-                */
-               if (!SCpnt && sd_template.nr_dev > 1) {
-                       struct request *req1;
-                       req1 = NULL;
-                       req = CURRENT;
-                       while (req) {
-                               SCpnt = scsi_request_queueable(req,
-                                                              rscsi_disks[DEVICE_NR(req->rq_dev)].device);
-                               if (SCpnt)
-                                       break;
-                               req1 = req;
-                               req = req->next;
-                       }
-                       if (SCpnt && req->rq_status == RQ_INACTIVE) {
-                               if (req == CURRENT)
-                                       CURRENT = CURRENT->next;
-                               else
-                                       req1->next = req->next;
-                       }
-               }
-               if (!SCpnt)
-                       return; /* Could not find anything to do */
-
-               /* Queue command */
-               requeue_sd_request(SCpnt);
-       }                       /* While */
-}
-
-static void requeue_sd_request(Scsi_Cmnd * SCpnt)
-{
-       int dev, devm, block, this_count;
-       unsigned char cmd[10];
-       char nbuff[6];
-       int bounce_size, contiguous;
-       int max_sg;
-       struct buffer_head *bh, *bhp;
-       char *buff, *bounce_buffer;
-
-repeat:
-
-       if (!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
-               do_sd_request();
-               return;
-       }
-       devm = SD_PARTITION(SCpnt->request.rq_dev);
-       dev = DEVICE_NR(SCpnt->request.rq_dev);
-
-       block = SCpnt->request.sector;
-       this_count = 0;
-
-       SCSI_LOG_HLQUEUE(1, printk("Doing sd request, dev = %d, block = %d\n", devm, block));
-
-       if (devm >= (sd_template.dev_max << 4) ||
-           !rscsi_disks[dev].device ||
-           !rscsi_disks[dev].device->online ||
-           block + SCpnt->request.nr_sectors > sd[devm].nr_sects) {
-               SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors));
-               SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
-               SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
-               goto repeat;
-       }
-       block += sd[devm].start_sect;
-
-       if (rscsi_disks[dev].device->changed) {
-               /*
-                * quietly refuse to do anything to a changed disc until the changed
-                * bit has been reset
-                */
-               /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
-               SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
-               goto repeat;
-       }
-       sd_devname(devm >> 4, nbuff);
-       SCSI_LOG_HLQUEUE(2, printk("%s : real dev = /dev/%d, block = %d\n",
-                                  nbuff, dev, block));
-
-       /*
-        * If we have a 1K hardware sectorsize, prevent access to single
-        * 512 byte sectors.  In theory we could handle this - in fact
-        * the scsi cdrom driver must be able to handle this because
-        * we typically use 1K blocksizes, and cdroms typically have
-        * 2K hardware sectorsizes.  Of course, things are simpler
-        * with the cdrom, since it is read-only.  For performance
-        * reasons, the filesystems should be able to handle this
-        * and not force the scsi disk driver to use bounce buffers
-        * for this.
-        */
-       if (rscsi_disks[dev].sector_size == 1024)
-               if ((block & 1) || (SCpnt->request.nr_sectors & 1)) {
-                       printk("sd.c:Bad block number/count requested");
-                       SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
-                       goto repeat;
-               }
-       if (rscsi_disks[dev].sector_size == 2048)
-               if ((block & 3) || (SCpnt->request.nr_sectors & 3)) {
-                       printk("sd.c:Bad block number/count requested");
-                       SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
-                       goto repeat;
-               }
-       if (rscsi_disks[dev].sector_size == 4096)
-               if ((block & 7) || (SCpnt->request.nr_sectors & 7)) {
-                       printk("sd.cBad block number/count requested");
-                       SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
-                       goto repeat;
-               }
-       switch (SCpnt->request.cmd) {
-       case WRITE:
-               if (!rscsi_disks[dev].device->writeable) {
-                       SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
-                       goto repeat;
-               }
-               cmd[0] = WRITE_6;
-               break;
-       case READ:
-               cmd[0] = READ_6;
-               break;
-       default:
-               panic("Unknown sd command %d\n", SCpnt->request.cmd);
-       }
-
-       SCpnt->this_count = 0;
-
-       /* If the host adapter can deal with very large scatter-gather
-        * requests, it is a waste of time to cluster
-        */
-       contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 : 1);
-       bounce_buffer = NULL;
-       bounce_size = (SCpnt->request.nr_sectors << 9);
-
-       /* First see if we need a bounce buffer for this request. If we do, make
-        * sure that we can allocate a buffer. Do not waste space by allocating
-        * a bounce buffer if we are straddling the 16Mb line
-        */
-       if (contiguous && SCpnt->request.bh &&
-           virt_to_phys(SCpnt->request.bh->b_data)
-           + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
-           && SCpnt->host->unchecked_isa_dma) {
-               if (virt_to_phys(SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
-                       bounce_buffer = (char *) scsi_malloc(bounce_size);
-               if (!bounce_buffer)
-                       contiguous = 0;
-       }
-       if (contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
-               for (bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
-                    bhp = bhp->b_reqnext) {
-                       if (!CONTIGUOUS_BUFFERS(bh, bhp)) {
-                               if (bounce_buffer)
-                                       scsi_free(bounce_buffer, bounce_size);
-                               contiguous = 0;
-                               break;
-                       }
-               }
-       if (!SCpnt->request.bh || contiguous) {
-
-               /* case of page request (i.e. raw device), or unlinked buffer */
-               this_count = SCpnt->request.nr_sectors;
-               buff = SCpnt->request.buffer;
-               SCpnt->use_sg = 0;
-
-       } else if (SCpnt->host->sg_tablesize == 0 ||
-                (scsi_need_isa_buffer && scsi_dma_free_sectors <= 10)) {
-
-               /* Case of host adapter that cannot scatter-gather.  We also
-                * come here if we are running low on DMA buffer memory.  We set
-                * a threshold higher than that we would need for this request so
-                * we leave room for other requests.  Even though we would not need
-                * it all, we need to be conservative, because if we run low enough
-                * we have no choice but to panic.
-                */
-               if (SCpnt->host->sg_tablesize != 0 &&
-                   scsi_need_isa_buffer &&
-                   scsi_dma_free_sectors <= 10)
-                       printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");
-
-               this_count = SCpnt->request.current_nr_sectors;
-               buff = SCpnt->request.buffer;
-               SCpnt->use_sg = 0;
-
-       } else {
-
-               /* Scatter-gather capable host adapter */
-               struct scatterlist *sgpnt;
-               int count, this_count_max;
-               int counted;
-
-               bh = SCpnt->request.bh;
-               this_count = 0;
-               this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
-               count = 0;
-               bhp = NULL;
-               while (bh) {
-                       if ((this_count + (bh->b_size >> 9)) > this_count_max)
-                               break;
-                       if (!bhp || !CONTIGUOUS_BUFFERS(bhp, bh) ||
-                           !CLUSTERABLE_DEVICE(SCpnt) ||
-                           (SCpnt->host->unchecked_isa_dma &&
-                            virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD)) {
-                               if (count < SCpnt->host->sg_tablesize)
-                                       count++;
-                               else
-                                       break;
-                       }
-                       this_count += (bh->b_size >> 9);
-                       bhp = bh;
-                       bh = bh->b_reqnext;
-               }
-#if 0
-               if (SCpnt->host->unchecked_isa_dma &&
-                   virt_to_phys(SCpnt->request.bh->b_data - 1) == ISA_DMA_THRESHOLD)
-                       count--;
-#endif
-               SCpnt->use_sg = count;  /* Number of chains */
-               /* scsi_malloc can only allocate in chunks of 512 bytes */
-               count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511;
-
-               SCpnt->sglist_len = count;
-               max_sg = count / sizeof(struct scatterlist);
-               if (SCpnt->host->sg_tablesize < max_sg)
-                       max_sg = SCpnt->host->sg_tablesize;
-               sgpnt = (struct scatterlist *) scsi_malloc(count);
-               if (!sgpnt) {
-                       printk("Warning - running *really* short on DMA buffers\n");
-                       SCpnt->use_sg = 0;      /* No memory left - bail out */
-                       this_count = SCpnt->request.current_nr_sectors;
-                       buff = SCpnt->request.buffer;
-               } else {
-                       memset(sgpnt, 0, count);        /* Zero so it is easy to fill, but only
-                                                        * if memory is available
-                                                        */
-                       buff = (char *) sgpnt;
-                       counted = 0;
-                       for (count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
-                            count < SCpnt->use_sg && bh;
-                            count++, bh = bhp) {
-
-                               bhp = bh->b_reqnext;
-
-                               if (!sgpnt[count].address)
-                                       sgpnt[count].address = bh->b_data;
-                               sgpnt[count].length += bh->b_size;
-                               counted += bh->b_size >> 9;
-
-                               if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length - 1 >
-                                   ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
-                                   !sgpnt[count].alt_address) {
-                                       sgpnt[count].alt_address = sgpnt[count].address;
-                                       /* We try to avoid exhausting the DMA pool, since it is
-                                        * easier to control usage here. In other places we might
-                                        * have a more pressing need, and we would be screwed if
-                                        * we ran out */
-                                       if (scsi_dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
-                                               sgpnt[count].address = NULL;
-                                       } else {
-                                               sgpnt[count].address =
-                                                   (char *) scsi_malloc(sgpnt[count].length);
-                                       }
-                                       /* If we start running low on DMA buffers, we abort the
-                                        * scatter-gather operation, and free all of the memory
-                                        * we have allocated.  We want to ensure that all scsi
-                                        * operations are able to do at least a non-scatter/gather
-                                        * operation */
-                                       if (sgpnt[count].address == NULL) {     /* Out of dma memory */
-#if 0
-                                               printk("Warning: Running low on SCSI DMA buffers");
-                                               /* Try switching back to a non s-g operation. */
-                                               while (--count >= 0) {
-                                                       if (sgpnt[count].alt_address)
-                                                               scsi_free(sgpnt[count].address,
-                                                                         sgpnt[count].length);
-                                               }
-                                               this_count = SCpnt->request.current_nr_sectors;
-                                               buff = SCpnt->request.buffer;
-                                               SCpnt->use_sg = 0;
-                                               scsi_free(sgpnt, SCpnt->sglist_len);
-#endif
-                                               SCpnt->use_sg = count;
-                                               this_count = counted -= bh->b_size >> 9;
-                                               break;
-                                       }
-                               }
-                               /* Only cluster buffers if we know that we can supply DMA
-                                * buffers large enough to satisfy the request. Do not cluster
-                                * a new request if this would mean that we suddenly need to
-                                * start using DMA bounce buffers */
-                               if (bhp && CONTIGUOUS_BUFFERS(bh, bhp)
-                                   && CLUSTERABLE_DEVICE(SCpnt)) {
-                                       char *tmp;
-
-                                       if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length +
-                                           bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
-                                           (SCpnt->host->unchecked_isa_dma) &&
-                                           !sgpnt[count].alt_address)
-                                               continue;
-
-                                       if (!sgpnt[count].alt_address) {
-                                               count--;
-                                               continue;
-                                       }
-                                       if (scsi_dma_free_sectors > 10)
-                                               tmp = (char *) scsi_malloc(sgpnt[count].length
-                                                         + bhp->b_size);
-                                       else {
-                                               tmp = NULL;
-                                               max_sg = SCpnt->use_sg;
-                                       }
-                                       if (tmp) {
-                                               scsi_free(sgpnt[count].address, sgpnt[count].length);
-                                               sgpnt[count].address = tmp;
-                                               count--;
-                                               continue;
-                                       }
-                                       /* If we are allowed another sg chain, then increment
-                                        * counter so we can insert it.  Otherwise we will end
-                                        up truncating */
-
-                                       if (SCpnt->use_sg < max_sg)
-                                               SCpnt->use_sg++;
-                               }       /* contiguous buffers */
-                       }       /* for loop */
-
-                       /* This is actually how many we are going to transfer */
-                       this_count = counted;
-
-                       if (count < SCpnt->use_sg || SCpnt->use_sg
-                           > SCpnt->host->sg_tablesize) {
-                               bh = SCpnt->request.bh;
-                               printk("Use sg, count %d %x %d\n",
-                                      SCpnt->use_sg, count, scsi_dma_free_sectors);
-                               printk("maxsg = %x, counted = %d this_count = %d\n",
-                                      max_sg, counted, this_count);
-                               while (bh) {
-                                       printk("[%p %x] ", bh->b_data, bh->b_size);
-                                       bh = bh->b_reqnext;
-                               }
-                               if (SCpnt->use_sg < 16)
-                                       for (count = 0; count < SCpnt->use_sg; count++)
-                                               printk("{%d:%p %p %d}  ", count,
-                                                   sgpnt[count].address,
-                                               sgpnt[count].alt_address,
-                                                   sgpnt[count].length);
-                               panic("Ooops");
-                       }
-                       if (SCpnt->request.cmd == WRITE)
-                               for (count = 0; count < SCpnt->use_sg; count++)
-                                       if (sgpnt[count].alt_address)
-                                               memcpy(sgpnt[count].address, sgpnt[count].alt_address,
-                                                   sgpnt[count].length);
-               }               /* Able to malloc sgpnt */
-       }                       /* Host adapter capable of scatter-gather */
-
-       /* Now handle the possibility of DMA to addresses > 16Mb */
-
-       if (SCpnt->use_sg == 0) {
-               if (virt_to_phys(buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
-                   (SCpnt->host->unchecked_isa_dma)) {
-                       if (bounce_buffer)
-                               buff = bounce_buffer;
-                       else
-                               buff = (char *) scsi_malloc(this_count << 9);
-                       if (buff == NULL) {     /* Try backing off a bit if we are low on mem */
-                               this_count = SCpnt->request.current_nr_sectors;
-                               buff = (char *) scsi_malloc(this_count << 9);
-                               if (!buff)
-                                       panic("Ran out of DMA buffers.");
-                       }
-                       if (SCpnt->request.cmd == WRITE)
-                               memcpy(buff, (char *) SCpnt->request.buffer, this_count << 9);
-               }
-       }
-       SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
-                                  nbuff,
-                  (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
-                                this_count, SCpnt->request.nr_sectors));
-
-       cmd[1] = (SCpnt->lun << 5) & 0xe0;
-
-       if (rscsi_disks[dev].sector_size == 4096) {
-               if (block & 7)
-                       panic("sd.c:Bad block number requested");
-               if (this_count & 7)
-                       panic("sd.c:Bad block number requested");
-               block = block >> 3;
-               this_count = block >> 3;
-       }
-       if (rscsi_disks[dev].sector_size == 2048) {
-               if (block & 3)
-                       panic("sd.c:Bad block number requested");
-               if (this_count & 3)
-                       panic("sd.c:Bad block number requested");
-               block = block >> 2;
-               this_count = this_count >> 2;
-       }
-       if (rscsi_disks[dev].sector_size == 1024) {
-               if (block & 1)
-                       panic("sd.c:Bad block number requested");
-               if (this_count & 1)
-                       panic("sd.c:Bad block number requested");
-               block = block >> 1;
-               this_count = this_count >> 1;
-       }
-       if (rscsi_disks[dev].sector_size == 256) {
-               block = block << 1;
-               this_count = this_count << 1;
-       }
-       if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten) {
-               if (this_count > 0xffff)
-                       this_count = 0xffff;
-
-               cmd[0] += READ_10 - READ_6;
-               cmd[2] = (unsigned char) (block >> 24) & 0xff;
-               cmd[3] = (unsigned char) (block >> 16) & 0xff;
-               cmd[4] = (unsigned char) (block >> 8) & 0xff;
-               cmd[5] = (unsigned char) block & 0xff;
-               cmd[6] = cmd[9] = 0;
-               cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
-               cmd[8] = (unsigned char) this_count & 0xff;
-       } else {
-               if (this_count > 0xff)
-                       this_count = 0xff;
-
-               cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
-               cmd[2] = (unsigned char) ((block >> 8) & 0xff);
-               cmd[3] = (unsigned char) block & 0xff;
-               cmd[4] = (unsigned char) this_count;
-               cmd[5] = 0;
-       }
-
-       /*
-        * We shouldn't disconnect in the middle of a sector, so with a dumb
-        * host adapter, it's safe to assume that we can at least transfer
-        * this many bytes between each connect / disconnect.
-        */
-
-       SCpnt->transfersize = rscsi_disks[dev].sector_size;
-       SCpnt->underflow = this_count << 9;
-       SCpnt->cmd_len = 0;
-       scsi_do_cmd(SCpnt, (void *) cmd, buff,
-                   this_count * rscsi_disks[dev].sector_size,
-                   rw_intr,
-                   (SCpnt->device->type == TYPE_DISK ?
-                    SD_TIMEOUT : SD_MOD_TIMEOUT),
-                   MAX_RETRIES);
-}
 
 static int check_scsidisk_media_change(kdev_t full_dev)
 {
@@ -1173,19 +642,17 @@ static int check_scsidisk_media_change(kdev_t full_dev)
        return retval;
 }
 
-static void sd_wait_cmd (Scsi_Cmnd * SCpnt, const void *cmnd ,
-                 void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *),
-                 int timeout, int retries)
+static void sd_wait_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
+             void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
+                       int timeout, int retries)
 {
        DECLARE_MUTEX_LOCKED(sem);
-       
+
        SCpnt->request.sem = &sem;
        SCpnt->request.rq_status = RQ_SCSI_BUSY;
-       scsi_do_cmd (SCpnt, (void *) cmnd,
-               buffer, bufflen, done, timeout, retries);
-       spin_unlock_irq(&io_request_lock);
-       down (&sem);
-       spin_lock_irq(&io_request_lock);
+       scsi_do_cmd(SCpnt, (void *) cmnd,
+                   buffer, bufflen, done, timeout, retries);
+       down(&sem);
        SCpnt->request.sem = NULL;
 }
 
@@ -1207,6 +674,7 @@ static int sd_init_onedisk(int i)
        unsigned char *buffer;
        unsigned long spintime_value = 0;
        int the_result, retries, spintime;
+       int sector_size;
        Scsi_Cmnd *SCpnt;
 
        /*
@@ -1221,14 +689,13 @@ static int sd_init_onedisk(int i)
        if (rscsi_disks[i].device->online == FALSE) {
                return i;
        }
-       spin_lock_irq(&io_request_lock);
-
        /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is
         * considered a fatal error, and many devices report such an error
         * just after a scsi bus reset.
         */
 
-       SCpnt = scsi_allocate_device(NULL, rscsi_disks[i].device, 1);
+       SCpnt = scsi_allocate_device(rscsi_disks[i].device, 1);
+
        buffer = (unsigned char *) scsi_malloc(512);
 
        spintime = 0;
@@ -1237,7 +704,7 @@ static int sd_init_onedisk(int i)
        /* Spinup needs to be done for module loads too. */
        do {
                retries = 0;
-               
+
                while (retries < 3) {
                        cmd[0] = TEST_UNIT_READY;
                        cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
@@ -1259,11 +726,9 @@ static int sd_init_onedisk(int i)
                /* Look for non-removable devices that return NOT_READY.
                 * Issue command to spin up drive for these cases. */
                if (the_result && !rscsi_disks[i].device->removable &&
-                   SCpnt->sense_buffer[2] == NOT_READY) 
-               {
+                   SCpnt->sense_buffer[2] == NOT_READY) {
                        unsigned long time1;
-                       if (!spintime) 
-                       {
+                       if (!spintime) {
                                printk("%s: Spinning up disk...", nbuff);
                                cmd[0] = START_STOP;
                                cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
@@ -1275,19 +740,15 @@ static int sd_init_onedisk(int i)
                                SCpnt->sense_buffer[2] = 0;
 
                                sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
-                                       512, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
+                                           512, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
                        }
-
                        spintime = 1;
                        spintime_value = jiffies;
                        time1 = jiffies + HZ;
-                       spin_unlock_irq(&io_request_lock);
-                       while(time_before(jiffies, time1)); /* Wait 1 second for next try */
+                       while (time_before(jiffies, time1));    /* Wait 1 second for next try */
                        printk(".");
-                       spin_lock_irq(&io_request_lock);
                }
-       } while(the_result && spintime && time_after(spintime_value+100*HZ, jiffies));
-
+       } while (the_result && spintime && time_after(spintime_value + 100 * HZ, jiffies));
        if (spintime) {
                if (the_result)
                        printk("not responding...\n");
@@ -1305,7 +766,7 @@ static int sd_init_onedisk(int i)
                SCpnt->sense_buffer[2] = 0;
 
                sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
-                       8, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
+                           8, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
 
                the_result = SCpnt->result;
                retries--;
@@ -1344,7 +805,7 @@ static int sd_init_onedisk(int i)
                printk("%s : block size assumed to be 512 bytes, disk size 1GB.  \n",
                       nbuff);
                rscsi_disks[i].capacity = 0x1fffff;
-               rscsi_disks[i].sector_size = 512;
+               sector_size = 512;
 
                /* Set dirty bit for removable devices if not ready - sometimes drives
                 * will not report this properly. */
@@ -1363,38 +824,29 @@ static int sd_init_onedisk(int i)
                                               (buffer[2] << 8) |
                                               buffer[3]);
 
-               rscsi_disks[i].sector_size = (buffer[4] << 24) |
+               sector_size = (buffer[4] << 24) |
                    (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
 
-               if (rscsi_disks[i].sector_size == 0) {
-                       rscsi_disks[i].sector_size = 512;
+               if (sector_size == 0) {
+                       sector_size = 512;
                        printk("%s : sector size 0 reported, assuming 512.\n", nbuff);
                }
-               if (rscsi_disks[i].sector_size != 512 &&
-                   rscsi_disks[i].sector_size != 1024 &&
-                   rscsi_disks[i].sector_size != 2048 &&
-                   rscsi_disks[i].sector_size != 4096 &&
-                   rscsi_disks[i].sector_size != 256) {
+               if (sector_size != 512 &&
+                   sector_size != 1024 &&
+                   sector_size != 2048 &&
+                   sector_size != 4096 &&
+                   sector_size != 256) {
                        printk("%s : unsupported sector size %d.\n",
-                              nbuff, rscsi_disks[i].sector_size);
-                       if (rscsi_disks[i].device->removable) {
-                               rscsi_disks[i].capacity = 0;
-                       } else {
-                               printk("scsi : deleting disk entry.\n");
-                               sd_detach(rscsi_disks[i].device);
-                               rscsi_disks[i].device = NULL;
-
-                               /* Wake up a process waiting for device */
-                               wake_up(&SCpnt->device->device_wait);
-                               scsi_release_command(SCpnt);
-                               SCpnt = NULL;
-                               scsi_free(buffer, 512);
-                               spin_unlock_irq(&io_request_lock);
-
-                               return i;
-                       }
+                              nbuff, sector_size);
+                       /*
+                        * The user might want to re-format the drive with
+                        * a supported sectorsize.  Once this happens, it
+                        * would be relatively trivial to set the thing up.
+                        * For this reason, we leave the thing in the table.
+                        */
+                       rscsi_disks[i].capacity = 0;
                }
-               if (rscsi_disks[i].sector_size == 2048) {
+               if (sector_size == 2048) {
                        int m;
 
                        /*
@@ -1414,7 +866,7 @@ static int sd_init_onedisk(int i)
                         */
                        int m, mb;
                        int sz_quot, sz_rem;
-                       int hard_sector = rscsi_disks[i].sector_size;
+                       int hard_sector = sector_size;
                        /* There are 16 minors allocated for each major device */
                        for (m = i << 4; m < ((i + 1) << 4); m++) {
                                sd_hardsizes[m] = hard_sector;
@@ -1429,13 +881,13 @@ static int sd_init_onedisk(int i)
                             nbuff, hard_sector, rscsi_disks[i].capacity,
                               mb, sz_quot, sz_rem);
                }
-               if (rscsi_disks[i].sector_size == 4096)
+               if (sector_size == 4096)
                        rscsi_disks[i].capacity <<= 3;
-               if (rscsi_disks[i].sector_size == 2048)
+               if (sector_size == 2048)
                        rscsi_disks[i].capacity <<= 2;  /* Change into 512 byte sectors */
-               if (rscsi_disks[i].sector_size == 1024)
+               if (sector_size == 1024)
                        rscsi_disks[i].capacity <<= 1;  /* Change into 512 byte sectors */
-               if (rscsi_disks[i].sector_size == 256)
+               if (sector_size == 256)
                        rscsi_disks[i].capacity >>= 1;  /* Change into 512 byte sectors */
        }
 
@@ -1465,7 +917,7 @@ static int sd_init_onedisk(int i)
 
                /* same code as READCAPA !! */
                sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
-                               512, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
+                           512, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
 
                the_result = SCpnt->result;
 
@@ -1479,15 +931,15 @@ static int sd_init_onedisk(int i)
                }
 
        }                       /* check for write protect */
+       SCpnt->device->ten = 1;
+       SCpnt->device->remap = 1;
+       SCpnt->device->sector_size = sector_size;
        /* Wake up a process waiting for device */
        wake_up(&SCpnt->device->device_wait);
        scsi_release_command(SCpnt);
        SCpnt = NULL;
 
-       rscsi_disks[i].ten = 1;
-       rscsi_disks[i].remap = 1;
        scsi_free(buffer, 512);
-       spin_unlock_irq(&io_request_lock);
        return i;
 }
 
@@ -1572,23 +1024,14 @@ static int sd_init()
        return 0;
 }
 
-/*
- * sd_get_queue() returns the queue which corresponds to a given device.
- */
-static struct request **sd_get_queue(kdev_t dev)
-{
-       return &blk_dev[MAJOR_NR].current_request;
-}
+
 static void sd_finish()
 {
        struct gendisk *gendisk;
        int i;
 
        for (i = 0; i <= (sd_template.dev_max - 1) / SCSI_DISKS_PER_MAJOR; i++) {
-               /* FIXME: After 2.2 we should implement multiple sd queues */
-               blk_dev[SD_MAJOR(i)].request_fn = DEVICE_REQUEST;
-               if (i)
-                       blk_dev[SD_MAJOR(i)].queue = sd_get_queue;
+               blk_dev[SD_MAJOR(i)].queue = sd_find_queue;
        }
        for (gendisk = gendisk_head; gendisk != NULL; gendisk = gendisk->next)
                if (gendisk == sd_gendisks)
@@ -1658,7 +1101,6 @@ static int sd_attach(Scsi_Device * SDp)
        if (i >= sd_template.dev_max)
                panic("scsi_devices corrupt (sd)");
 
-       SDp->scsi_request_fn = do_sd_request;
        rscsi_disks[i].device = SDp;
        rscsi_disks[i].has_part_table = 0;
        sd_template.nr_dev++;
@@ -1713,7 +1155,7 @@ int revalidate_scsidisk(kdev_t dev, int maxusage)
                 * to make sure that everything remains consistent.
                 */
                sd_blocksizes[index] = 1024;
-               if (rscsi_disks[target].sector_size == 2048)
+               if (rscsi_disks[target].device->sector_size == 2048)
                        sd_blocksizes[index] = 2048;
                else
                        sd_blocksizes[index] = 1024;
@@ -1824,7 +1266,7 @@ void cleanup_module(void)
 
        }
        for (i = 0; i <= (sd_template.dev_max - 1) / SCSI_DISKS_PER_MAJOR; i++) {
-               blk_dev[SD_MAJOR(i)].request_fn = NULL;
+               blk_cleanup_queue(BLK_DEFAULT_QUEUE(SD_MAJOR(i)));
                blk_size[SD_MAJOR(i)] = NULL;
                hardsect_size[SD_MAJOR(i)] = NULL;
                read_ahead[SD_MAJOR(i)] = 0;
index f893b446f028273b153a37953938311e56c9e94f..9bbfbeb509a40e627802c108f4e0c114a8d8b68a 100644 (file)
@@ -5,7 +5,7 @@
  *
  *      <drew@colorado.edu>
  *
- *       Modified by Eric Youngdale eric@aib.com to
+ *       Modified by Eric Youngdale eric@andante.org to
  *       add scatter-gather, multiple outstanding request, and other
  *       enhancements.
  */
@@ -27,14 +27,11 @@ extern struct hd_struct *sd;
 
 typedef struct scsi_disk {
        unsigned capacity;      /* size in blocks */
-       unsigned sector_size;   /* size in bytes */
        Scsi_Device *device;
        unsigned char ready;    /* flag ready for FLOPTICAL */
        unsigned char write_prot;       /* flag write_protect for rmvable dev */
        unsigned char sector_bit_size;  /* sector_size = 2 to the  bit size power */
        unsigned char sector_bit_shift;         /* power of 2 sectors per FS block */
-       unsigned ten:1;         /* support ten byte read / write */
-       unsigned remap:1;       /* support remapping  */
        unsigned has_part_table:1;      /* has partition table */
 } Scsi_Disk;
 
index 4a2258cb53b1ad5a730af5726d26d469de88f692..97a911ddb0195c33549e29e877e8863ba3045273 100644 (file)
@@ -355,7 +355,6 @@ static ssize_t sg_read(struct file * filp, char * buf,
 static ssize_t sg_write(struct file * filp, const char * buf, 
                         size_t count, loff_t *ppos)
 {
-    unsigned long         flags;
     int                   mxsize, cmd_size, k;
     unsigned char         cmnd[MAX_COMMAND_SIZE];
     int                   input_size;
@@ -432,8 +431,9 @@ static ssize_t sg_write(struct file * filp, const char * buf,
         return k;    /* probably out of space --> ENOMEM */
     }
 /*  SCSI_LOG_TIMEOUT(7, printk("sg_write: allocating device\n")); */
-    if (! (SCpnt = scsi_allocate_device(NULL, sdp->device, 
-                                        !(filp->f_flags & O_NONBLOCK)))) {
+    if (! (SCpnt = scsi_allocate_device(sdp->device, 
+                                        !(filp->f_flags & O_NONBLOCK)))) 
+    {
         sg_finish_rem_req(srp, NULL, 0);
         return -EAGAIN;   /* No available command blocks at the moment */
     }
@@ -448,7 +448,6 @@ static ssize_t sg_write(struct file * filp, const char * buf,
     cmnd[1]= (cmnd[1] & 0x1f) | (sdp->device->lun << 5);
 
 /*  SCSI_LOG_TIMEOUT(7, printk("sg_write: do cmd\n")); */
-    spin_lock_irqsave(&io_request_lock, flags);
     SCpnt->use_sg = srp->data.use_sg;
     SCpnt->sglist_len = srp->data.sglist_len;
     SCpnt->bufflen = srp->data.bufflen;
@@ -467,7 +466,6 @@ static ssize_t sg_write(struct file * filp, const char * buf,
                 (void *)SCpnt->buffer, mxsize,
                 sg_command_done, sfp->timeout, SG_DEFAULT_RETRIES);
     /* 'mxsize' overwrites SCpnt->bufflen, hence need for b_malloc_len */
-    spin_unlock_irqrestore(&io_request_lock, flags);
 /*  SCSI_LOG_TIMEOUT(6, printk("sg_write: sent scsi cmd to mid-level\n")); */
     return count;
 }
@@ -1116,7 +1114,9 @@ static void sg_shorten_timeout(Scsi_Cmnd * scpnt)
         scsi_add_timer(scpnt, scpnt->timeout_per_command,
                        scsi_old_times_out);
 #else
+    spin_unlock_irq(&io_request_lock);
     scsi_sleep(HZ); /* just sleep 1 second and hope ... */
+    spin_lock_irq(&io_request_lock);
 #endif
 }
 
index f7ad10693371d8b5ac26ee0655b53ce9cde7178b..d482a729478a456300c48b3ae00453cd1ffb4fb9 100644 (file)
@@ -1,17 +1,17 @@
 /*
  *  sr.c Copyright (C) 1992 David Giller
- *           Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *           Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
  *
  *  adapted from:
  *      sd.c Copyright (C) 1992 Drew Eckhardt
  *      Linux scsi disk driver by
  *              Drew Eckhardt <drew@colorado.edu>
  *
- *      Modified by Eric Youngdale ericy@cais.com to
+ *      Modified by Eric Youngdale ericy@andante.org to
  *      add scatter-gather, multiple outstanding request, and other
  *      enhancements.
  *
- *          Modified by Eric Youngdale eric@aib.com to support loadable
+ *          Modified by Eric Youngdale eric@andante.org to support loadable
  *          low-level scsi drivers.
  *
  *       Modified by Thomas Quinot thomas@melchior.cuivre.fdn.fr to
@@ -60,17 +60,28 @@ static int sr_attach(Scsi_Device *);
 static int sr_detect(Scsi_Device *);
 static void sr_detach(Scsi_Device *);
 
-struct Scsi_Device_Template sr_template = {
-       NULL, "cdrom", "sr", NULL, TYPE_ROM,
-       SCSI_CDROM_MAJOR, 0, 0, 0, 1,
-       sr_detect, sr_init,
-       sr_finish, sr_attach, sr_detach
+static int sr_init_command(Scsi_Cmnd *);
+
+struct Scsi_Device_Template sr_template =
+{
+       name:"cdrom",
+       tag:"sr",
+       scsi_type:TYPE_ROM,
+       major:SCSI_CDROM_MAJOR,
+       blk:1,
+       detect:sr_detect,
+       init:sr_init,
+       finish:sr_finish,
+       attach:sr_attach,
+       detach:sr_detach,
+       init_command:sr_init_command
 };
 
 Scsi_CD *scsi_CDs = NULL;
 static int *sr_sizes = NULL;
 
 static int *sr_blocksizes = NULL;
+static int *sr_hardsizes = NULL;
 
 static int sr_open(struct cdrom_device_info *, int);
 void get_sectorsize(int);
@@ -82,7 +93,7 @@ static int sr_packet(struct cdrom_device_info *, struct cdrom_generic_command *)
 
 static void sr_release(struct cdrom_device_info *cdi)
 {
-       if (scsi_CDs[MINOR(cdi->dev)].sector_size > 2048)
+       if (scsi_CDs[MINOR(cdi->dev)].device->sector_size > 2048)
                sr_set_blocklength(MINOR(cdi->dev), 2048);
        sync_dev(cdi->dev);
        scsi_CDs[MINOR(cdi->dev)].device->access_count--;
@@ -108,7 +119,7 @@ static struct cdrom_device_ops sr_dops =
        sr_audio_ioctl,         /* audio ioctl */
        sr_dev_ioctl,           /* device-specific ioctl */
        CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED |
-       CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED |
+      CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED |
        CDC_PLAY_AUDIO | CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS |
        CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM |
        CDC_GENERIC_PACKET,
@@ -165,7 +176,7 @@ int sr_media_change(struct cdrom_device_info *cdi, int slot)
                 */
                scsi_CDs[MINOR(cdi->dev)].needs_sector_size = 1;
 
-               scsi_CDs[MINOR(cdi->dev)].sector_size = 2048;
+               scsi_CDs[MINOR(cdi->dev)].device->sector_size = 2048;
        }
        return retval;
 }
@@ -178,7 +189,7 @@ int sr_media_change(struct cdrom_device_info *cdi, int slot)
 static void rw_intr(Scsi_Cmnd * SCpnt)
 {
        int result = SCpnt->result;
-       int this_count = SCpnt->this_count;
+       int this_count = SCpnt->bufflen >> 9;
        int good_sectors = (result == 0 ? this_count : 0);
        int block_sectors = 0;
 
@@ -191,6 +202,7 @@ static void rw_intr(Scsi_Cmnd * SCpnt)
           avoid unnecessary additional work such as memcpy's that could be avoided.
         */
 
+
        if (driver_byte(result) != 0 &&         /* An error occurred */
            SCpnt->sense_buffer[0] == 0xF0 &&   /* Sense data is valid */
            (SCpnt->sense_buffer[2] == MEDIUM_ERROR ||
@@ -205,177 +217,178 @@ static void rw_intr(Scsi_Cmnd * SCpnt)
                        block_sectors = SCpnt->request.bh->b_size >> 9;
                if (block_sectors < 4)
                        block_sectors = 4;
-               if (scsi_CDs[device_nr].sector_size == 2048)
+               if (scsi_CDs[device_nr].device->sector_size == 2048)
                        error_sector <<= 2;
                error_sector &= ~(block_sectors - 1);
                good_sectors = error_sector - SCpnt->request.sector;
                if (good_sectors < 0 || good_sectors >= this_count)
                        good_sectors = 0;
                /*
-                  The SCSI specification allows for the value returned by READ
-                  CAPACITY to be up to 75 2K sectors past the last readable
-                  block.  Therefore, if we hit a medium error within the last
-                  75 2K sectors, we decrease the saved size value.
+                * The SCSI specification allows for the value returned by READ
+                * CAPACITY to be up to 75 2K sectors past the last readable
+                * block.  Therefore, if we hit a medium error within the last
+                * 75 2K sectors, we decrease the saved size value.
                 */
                if ((error_sector >> 1) < sr_sizes[device_nr] &&
                    scsi_CDs[device_nr].capacity - error_sector < 4 * 75)
                        sr_sizes[device_nr] = error_sector >> 1;
        }
-       if (good_sectors > 0) { /* Some sectors were read successfully. */
-               if (SCpnt->use_sg == 0) {
-                       if (SCpnt->buffer != SCpnt->request.buffer) {
-                               int offset;
-                               offset = (SCpnt->request.sector % 4) << 9;
-                               memcpy((char *) SCpnt->request.buffer,
-                                      (char *) SCpnt->buffer + offset,
-                                      good_sectors << 9);
-                               /* Even though we are not using scatter-gather, we look
-                                * ahead and see if there is a linked request for the
-                                * other half of this buffer.  If there is, then satisfy
-                                * it. */
-                               if ((offset == 0) && good_sectors == 2 &&
-                                   SCpnt->request.nr_sectors > good_sectors &&
-                                   SCpnt->request.bh &&
-                                   SCpnt->request.bh->b_reqnext &&
-                                   SCpnt->request.bh->b_reqnext->b_size == 1024) {
-                                       memcpy((char *) SCpnt->request.bh->b_reqnext->b_data,
-                                          (char *) SCpnt->buffer + 1024,
-                                              1024);
-                                       good_sectors += 2;
-                               };
-
-                               scsi_free(SCpnt->buffer, 2048);
-                       }
-               } else {
-                       struct scatterlist *sgpnt;
-                       int i;
-                       sgpnt = (struct scatterlist *) SCpnt->buffer;
-                       for (i = 0; i < SCpnt->use_sg; i++) {
-                               if (sgpnt[i].alt_address) {
-                                       if (sgpnt[i].alt_address != sgpnt[i].address) {
-                                               memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
-                                       };
-                                       scsi_free(sgpnt[i].address, sgpnt[i].length);
-                               };
-                       };
-                       scsi_free(SCpnt->buffer, SCpnt->sglist_len);    /* Free list of scatter-gather pointers */
-                       if (SCpnt->request.sector % 4)
-                               good_sectors -= 2;
-                       /* See   if there is a padding record at the end that needs to be removed */
-                       if (good_sectors > SCpnt->request.nr_sectors)
-                               good_sectors -= 2;
-               };
+       /*
+        * This calls the generic completion function, now that we know
+        * how many actual sectors finished, and how many sectors we need
+        * to say have failed.
+        */
+       scsi_io_completion(SCpnt, good_sectors, block_sectors);
+}
 
-#ifdef DEBUG
-               printk("(%x %x %x) ", SCpnt->request.bh, SCpnt->request.nr_sectors,
-                      good_sectors);
-#endif
-               if (SCpnt->request.nr_sectors > this_count) {
-                       SCpnt->request.errors = 0;
-                       if (!SCpnt->request.bh)
-                               panic("sr.c: linked page request (%lx %x)",
-                                     SCpnt->request.sector, this_count);
-               }
-               SCpnt = end_scsi_request(SCpnt, 1, good_sectors);       /* All done */
-               if (result == 0) {
-                       requeue_sr_request(SCpnt);
-                       return;
-               }
+
+static request_queue_t *sr_find_queue(kdev_t dev)
+{
+       if (MINOR(dev) >= sr_template.dev_max
+           || !scsi_CDs[MINOR(dev)].device)
+               return NULL;    /* No such device */
+       return &scsi_CDs[MINOR(dev)].device->request_queue;
+}
+
+static int sr_init_command(Scsi_Cmnd * SCpnt)
+{
+       int dev, devm, block, this_count;
+
+       devm = MINOR(SCpnt->request.rq_dev);
+       dev = DEVICE_NR(SCpnt->request.rq_dev);
+
+       block = SCpnt->request.sector;
+       this_count = SCpnt->request_bufflen >> 9;
+
+       if (!SCpnt->request.bh) {
+               /*
+                * Umm, yeah, right.   Swapping to a cdrom.  Nice try.
+                */
+               SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+               return 0;
        }
-       if (good_sectors == 0) {
-               /* We only come through here if no sectors were read successfully. */
-
-               /* Free up any indirection buffers we allocated for DMA purposes. */
-               if (SCpnt->use_sg) {
-                       struct scatterlist *sgpnt;
-                       int i;
-                       sgpnt = (struct scatterlist *) SCpnt->buffer;
-                       for (i = 0; i < SCpnt->use_sg; i++) {
-                               if (sgpnt[i].alt_address) {
-                                       scsi_free(sgpnt[i].address, sgpnt[i].length);
-                               }
-                       }
-                       scsi_free(SCpnt->buffer, SCpnt->sglist_len);    /* Free list of scatter-gather pointers */
+       SCSI_LOG_HLQUEUE(1, printk("Doing sr request, dev = %d, block = %d\n", devm, block));
+
+       if (dev >= sr_template.nr_dev ||
+           !scsi_CDs[dev].device ||
+           !scsi_CDs[dev].device->online) {
+               SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors));
+               SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+               SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
+               return 0;
+       }
+       if (scsi_CDs[dev].device->changed) {
+               /*
+                * quietly refuse to do anything to a changed disc until the changed
+                * bit has been reset
+                */
+               /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
+               SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+               return 0;
+       }
+       /*
+        * we do lazy blocksize switching (when reading XA sectors,
+        * see CDROMREADMODE2 ioctl) 
+        */
+       if (scsi_CDs[dev].device->sector_size > 2048) {
+               if (!in_interrupt())
+                       sr_set_blocklength(DEVICE_NR(CURRENT->rq_dev), 2048);
+               else
+                       printk("sr: can't switch blocksize: in interrupt\n");
+       }
+       if (SCpnt->request.cmd == WRITE) {
+               SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+               return 0;
+       }
+       if (scsi_CDs[dev].device->sector_size == 1024) {
+               if ((block & 1) || (SCpnt->request.nr_sectors & 1)) {
+                       printk("sr.c:Bad 1K block number requested (%d %ld)",
+                               block, SCpnt->request.nr_sectors);
+                       SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+                       return 0;
                } else {
-                       if (SCpnt->buffer != SCpnt->request.buffer)
-                               scsi_free(SCpnt->buffer, SCpnt->bufflen);
+                       block = block >> 1;
+                       this_count = this_count >> 1;
                }
-
        }
-       if (driver_byte(result) != 0) {
-               if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
-                       if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
-                               /* detected disc change.  set a bit and quietly refuse
-                                * further access.    */
-
-                               scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
-                               SCpnt = end_scsi_request(SCpnt, 0, this_count);
-                               requeue_sr_request(SCpnt);
-                               return;
-                       }
-               }
-               if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
-                       printk("sr%d: CD-ROM error: ",
-                              DEVICE_NR(SCpnt->request.rq_dev));
-                       print_sense("sr", SCpnt);
-                       printk("command was: ");
-                       print_command(SCpnt->cmnd);
-                       if (scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
-                               scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
-                               requeue_sr_request(SCpnt);
-                               result = 0;
-                               return;
-                       } else {
-                               SCpnt = end_scsi_request(SCpnt, 0, this_count);
-                               requeue_sr_request(SCpnt);      /* Do next request */
-                               return;
-                       }
-
-               }
-               if (SCpnt->sense_buffer[2] == NOT_READY) {
-                       printk(KERN_INFO "sr%d: CD-ROM not ready.  Make sure you have a disc in the drive.\n",
-                              DEVICE_NR(SCpnt->request.rq_dev));
-                       SCpnt = end_scsi_request(SCpnt, 0, this_count);
-                       requeue_sr_request(SCpnt);      /* Do next request */
-                       return;
-               }
-               if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {
-                       printk("scsi%d: MEDIUM ERROR on "
-                              "channel %d, id %d, lun %d, CDB: ",
-                              SCpnt->host->host_no, (int) SCpnt->channel,
-                              (int) SCpnt->target, (int) SCpnt->lun);
-                       print_command(SCpnt->cmnd);
-                       print_sense("sr", SCpnt);
-                       SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
-                       requeue_sr_request(SCpnt);
-                       return;
+       if (scsi_CDs[dev].device->sector_size == 2048) {
+               if ((block & 3) || (SCpnt->request.nr_sectors & 3)) {
+                       printk("sr.c:Bad 2K block number requested (%d %ld)",
+                               block, SCpnt->request.nr_sectors);
+                       SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+                       return 0;
+               } else {
+                       block = block >> 2;
+                       this_count = this_count >> 2;
                }
-               if (SCpnt->sense_buffer[2] == VOLUME_OVERFLOW) {
-                       printk("scsi%d: VOLUME OVERFLOW on "
-                              "channel %d, id %d, lun %d, CDB: ",
-                              SCpnt->host->host_no, (int) SCpnt->channel,
-                              (int) SCpnt->target, (int) SCpnt->lun);
-                       print_command(SCpnt->cmnd);
-                       print_sense("sr", SCpnt);
-                       SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
-                       requeue_sr_request(SCpnt);
-                       return;
+       }
+       switch (SCpnt->request.cmd) {
+       case WRITE:
+               if (!scsi_CDs[dev].device->writeable) {
+                       SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+                       return 0;
                }
+               SCpnt->cmnd[0] = WRITE_6;
+               break;
+       case READ:
+               SCpnt->cmnd[0] = READ_6;
+               break;
+       default:
+               panic("Unknown sd command %d\n", SCpnt->request.cmd);
        }
-       /* We only get this far if we have an error we have not recognized */
-       if (result) {
-               printk("SCSI CD error : host %d id %d lun %d return code = %03x\n",
-                      scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
-                  scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
-                 scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->lun,
-                      result);
-
-               if (status_byte(result) == CHECK_CONDITION)
-                       print_sense("sr", SCpnt);
-
-               SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
-               requeue_sr_request(SCpnt);
+
+       SCSI_LOG_HLQUEUE(2, printk("sr%d : %s %d/%ld 512 byte blocks.\n",
+                                   devm,
+                  (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
+                                this_count, SCpnt->request.nr_sectors));
+
+       SCpnt->cmnd[1] = (SCpnt->lun << 5) & 0xe0;
+
+       if (((this_count > 0xff) || (block > 0x1fffff)) && SCpnt->device->ten) {
+               if (this_count > 0xffff)
+                       this_count = 0xffff;
+
+               SCpnt->cmnd[0] += READ_10 - READ_6;
+               SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
+               SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
+               SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
+               SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+               SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
+               SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
+               SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+       } else {
+               if (this_count > 0xff)
+                       this_count = 0xff;
+
+               SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+               SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
+               SCpnt->cmnd[3] = (unsigned char) block & 0xff;
+               SCpnt->cmnd[4] = (unsigned char) this_count;
+               SCpnt->cmnd[5] = 0;
        }
+
+       /*
+        * We shouldn't disconnect in the middle of a sector, so with a dumb
+        * host adapter, it's safe to assume that we can at least transfer
+        * this many bytes between each connect / disconnect.
+        */
+       SCpnt->transfersize = scsi_CDs[dev].device->sector_size;
+       SCpnt->underflow = this_count << 9;
+
+       SCpnt->allowed = MAX_RETRIES;
+       SCpnt->timeout_per_command = SR_TIMEOUT;
+
+       /*
+        * This is the completion routine we use.  This is matched in terms
+        * of capability to this function.
+        */
+       SCpnt->done = rw_intr;
+
+       /*
+        * This indicates that the command is ready from our end to be
+        * queued.
+        */
+       return 1;
 }
 
 static int sr_open(struct cdrom_device_info *cdi, int purpose)
@@ -416,390 +429,6 @@ static int sr_open(struct cdrom_device_info *cdi, int purpose)
  * translate them to SCSI commands.
  */
 
-static void do_sr_request(void)
-{
-       Scsi_Cmnd *SCpnt = NULL;
-       struct request *req = NULL;
-       Scsi_Device *SDev;
-       int flag = 0;
-
-       while (1 == 1) {
-               if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
-                       return;
-               };
-
-               INIT_SCSI_REQUEST;
-
-               SDev = scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device;
-
-               /*
-                * If the host for this device is in error recovery mode, don't
-                * do anything at all here.  When the host leaves error recovery
-                * mode, it will automatically restart things and start queueing
-                * commands again.
-                */
-               if (SDev->host->in_recovery) {
-                       return;
-               }
-               /*
-                * I am not sure where the best place to do this is.  We need
-                * to hook in a place where we are likely to come if in user
-                * space.
-                */
-               if (SDev->was_reset) {
-                       /*
-                        * We need to relock the door, but we might
-                        * be in an interrupt handler.  Only do this
-                        * from user space, since we do not want to
-                        * sleep from an interrupt.
-                        */
-                       if (SDev->removable && !in_interrupt()) {
-                               spin_unlock_irq(&io_request_lock);      /* FIXME!!!! */
-                               scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
-                               spin_lock_irq(&io_request_lock);        /* FIXME!!!! */
-                               /* scsi_ioctl may allow CURRENT to change, so start over. */
-                               SDev->was_reset = 0;
-                               continue;
-                       }
-                       SDev->was_reset = 0;
-               }
-               /* we do lazy blocksize switching (when reading XA sectors,
-                * see CDROMREADMODE2 ioctl) */
-               if (scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].sector_size > 2048) {
-                       if (!in_interrupt())
-                               sr_set_blocklength(DEVICE_NR(CURRENT->rq_dev), 2048);
-#if 1
-                       else
-                               printk("sr: can't switch blocksize: in interrupt\n");
-#endif
-               }
-               if (flag++ == 0)
-                       SCpnt = scsi_allocate_device(&CURRENT,
-                        scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device, 0);
-               else
-                       SCpnt = NULL;
-
-               /* This is a performance enhancement.  We dig down into the request list and
-                * try to find a queueable request (i.e. device not busy, and host able to
-                * accept another command.  If we find one, then we queue it. This can
-                * make a big difference on systems with more than one disk drive.  We want
-                * to have the interrupts off when monkeying with the request list, because
-                * otherwise the kernel might try to slip in a request in between somewhere. */
-
-               if (!SCpnt && sr_template.nr_dev > 1) {
-                       struct request *req1;
-                       req1 = NULL;
-                       req = CURRENT;
-                       while (req) {
-                               SCpnt = scsi_request_queueable(req,
-                               scsi_CDs[DEVICE_NR(req->rq_dev)].device);
-                               if (SCpnt)
-                                       break;
-                               req1 = req;
-                               req = req->next;
-                       }
-                       if (SCpnt && req->rq_status == RQ_INACTIVE) {
-                               if (req == CURRENT)
-                                       CURRENT = CURRENT->next;
-                               else
-                                       req1->next = req->next;
-                       }
-               }
-               if (!SCpnt)
-                       return; /* Could not find anything to do */
-
-               wake_up(&wait_for_request);
-
-               /* Queue command */
-               requeue_sr_request(SCpnt);
-       }                       /* While */
-}
-
-void requeue_sr_request(Scsi_Cmnd * SCpnt)
-{
-       unsigned int dev, block, realcount;
-       unsigned char cmd[10], *buffer, tries;
-       int this_count, start, end_rec;
-
-       tries = 2;
-
-repeat:
-       if (!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
-               do_sr_request();
-               return;
-       }
-       dev = MINOR(SCpnt->request.rq_dev);
-       block = SCpnt->request.sector;
-       buffer = NULL;
-       this_count = 0;
-
-       if (dev >= sr_template.nr_dev) {
-               /* printk("CD-ROM request error: invalid device.\n");                   */
-               SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
-               tries = 2;
-               goto repeat;
-       }
-       if (!scsi_CDs[dev].use) {
-               /* printk("CD-ROM request error: device marked not in use.\n");         */
-               SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
-               tries = 2;
-               goto repeat;
-       }
-       if (!scsi_CDs[dev].device->online) {
-               SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
-               tries = 2;
-               goto repeat;
-       }
-       if (scsi_CDs[dev].device->changed) {
-               /*
-                * quietly refuse to do anything to a changed disc
-                * until the changed bit has been reset
-                */
-               /* printk("CD-ROM has been changed.  Prohibiting further I/O.\n");      */
-               SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
-               tries = 2;
-               goto repeat;
-       }
-       switch (SCpnt->request.cmd) {
-       case WRITE:
-               SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
-               goto repeat;
-               break;
-       case READ:
-               cmd[0] = READ_6;
-               break;
-       default:
-               panic("Unknown sr command %d\n", SCpnt->request.cmd);
-       }
-
-       cmd[1] = (SCpnt->lun << 5) & 0xe0;
-
-       /*
-        * Now do the grungy work of figuring out which sectors we need, and
-        * where in memory we are going to put them.
-        *
-        * The variables we need are:
-        *
-        * this_count= number of 512 byte sectors being read
-        * block     = starting cdrom sector to read.
-        * realcount = # of cdrom sectors to read
-        *
-        * The major difference between a scsi disk and a scsi cdrom
-        * is that we will always use scatter-gather if we can, because we can
-        * work around the fact that the buffer cache has a block size of 1024,
-        * and we have 2048 byte sectors.  This code should work for buffers that
-        * are any multiple of 512 bytes long.
-        */
-
-       SCpnt->use_sg = 0;
-
-       if (SCpnt->host->sg_tablesize > 0 &&
-           (!scsi_need_isa_buffer ||
-            scsi_dma_free_sectors >= 10)) {
-               struct buffer_head *bh;
-               struct scatterlist *sgpnt;
-               int count, this_count_max;
-               bh = SCpnt->request.bh;
-               this_count = 0;
-               count = 0;
-               this_count_max = (scsi_CDs[dev].ten ? 0xffff : 0xff) << 4;
-               /* Calculate how many links we can use.  First see if we need
-                * a padding record at the start */
-               this_count = SCpnt->request.sector % 4;
-               if (this_count)
-                       count++;
-               while (bh && count < SCpnt->host->sg_tablesize) {
-                       if ((this_count + (bh->b_size >> 9)) > this_count_max)
-                               break;
-                       this_count += (bh->b_size >> 9);
-                       count++;
-                       bh = bh->b_reqnext;
-               };
-               /* Fix up in case of an odd record at the end */
-               end_rec = 0;
-               if (this_count % 4) {
-                       if (count < SCpnt->host->sg_tablesize) {
-                               count++;
-                               end_rec = (4 - (this_count % 4)) << 9;
-                               this_count += 4 - (this_count % 4);
-                       } else {
-                               count--;
-                               this_count -= (this_count % 4);
-                       };
-               };
-               SCpnt->use_sg = count;  /* Number of chains */
-               /* scsi_malloc can only allocate in chunks of 512 bytes */
-               count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511;
-
-               SCpnt->sglist_len = count;
-               sgpnt = (struct scatterlist *) scsi_malloc(count);
-               if (!sgpnt) {
-                       printk("Warning - running *really* short on DMA buffers\n");
-                       SCpnt->use_sg = 0;      /* No memory left - bail out */
-               } else {
-                       buffer = (unsigned char *) sgpnt;
-                       count = 0;
-                       bh = SCpnt->request.bh;
-                       if (SCpnt->request.sector % 4) {
-                               sgpnt[count].length = (SCpnt->request.sector % 4) << 9;
-                               sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
-                               if (!sgpnt[count].address)
-                                       panic("SCSI DMA pool exhausted.");
-                               sgpnt[count].alt_address = sgpnt[count].address;        /* Flag to delete
-                                                                                          if needed */
-                               count++;
-                       };
-                       for (bh = SCpnt->request.bh; count < SCpnt->use_sg;
-                            count++, bh = bh->b_reqnext) {
-                               if (bh) {       /* Need a placeholder at the end of the record? */
-                                       sgpnt[count].address = bh->b_data;
-                                       sgpnt[count].length = bh->b_size;
-                                       sgpnt[count].alt_address = NULL;
-                               } else {
-                                       sgpnt[count].address = (char *) scsi_malloc(end_rec);
-                                       if (!sgpnt[count].address)
-                                               panic("SCSI DMA pool exhausted.");
-                                       sgpnt[count].length = end_rec;
-                                       sgpnt[count].alt_address = sgpnt[count].address;
-                                       if (count + 1 != SCpnt->use_sg)
-                                               panic("Bad sr request list");
-                                       break;
-                               };
-                               if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length - 1 >
-                                   ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
-                                       sgpnt[count].alt_address = sgpnt[count].address;
-                                       /* We try to avoid exhausting the DMA pool, since it is easier
-                                        * to control usage here.  In other places we might have a more
-                                        * pressing need, and we would be screwed if we ran out */
-                                       if (scsi_dma_free_sectors < (sgpnt[count].length >> 9) + 5) {
-                                               sgpnt[count].address = NULL;
-                                       } else {
-                                               sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
-                                       };
-                                       /* If we start running low on DMA buffers, we abort the scatter-gather
-                                        * operation, and free all of the memory we have allocated.  We want to
-                                        * ensure that all scsi operations are able to do at least a non-scatter/gather
-                                        * operation */
-                                       if (sgpnt[count].address == NULL) {     /* Out of dma memory */
-                                               printk("Warning: Running low on SCSI DMA buffers\n");
-                                               /* Try switching back to a non scatter-gather operation. */
-                                               while (--count >= 0) {
-                                                       if (sgpnt[count].alt_address)
-                                                               scsi_free(sgpnt[count].address, sgpnt[count].length);
-                                               };
-                                               SCpnt->use_sg = 0;
-                                               scsi_free(buffer, SCpnt->sglist_len);
-                                               break;
-                                       };      /* if address == NULL */
-                               };      /* if need DMA fixup */
-                       };      /* for loop to fill list */
-#ifdef DEBUG
-                       printk("SR: %d %d %d %d %d *** ", SCpnt->use_sg, SCpnt->request.sector,
-                              this_count,
-                              SCpnt->request.current_nr_sectors,
-                              SCpnt->request.nr_sectors);
-                       for (count = 0; count < SCpnt->use_sg; count++)
-                               printk("SGlist: %d %x %x %x\n", count,
-                                      sgpnt[count].address,
-                                      sgpnt[count].alt_address,
-                                      sgpnt[count].length);
-#endif
-               };              /* Able to allocate scatter-gather list */
-       };
-
-       if (SCpnt->use_sg == 0) {
-               /* We cannot use scatter-gather.  Do this the old fashion way */
-               if (!SCpnt->request.bh)
-                       this_count = SCpnt->request.nr_sectors;
-               else
-                       this_count = (SCpnt->request.bh->b_size >> 9);
-
-               start = block % 4;
-               if (start) {
-                       this_count = ((this_count > 4 - start) ?
-                                     (4 - start) : (this_count));
-                       buffer = (unsigned char *) scsi_malloc(2048);
-               } else if (this_count < 4) {
-                       buffer = (unsigned char *) scsi_malloc(2048);
-               } else {
-                       this_count -= this_count % 4;
-                       buffer = (unsigned char *) SCpnt->request.buffer;
-                       if (virt_to_phys(buffer) + (this_count << 9) > ISA_DMA_THRESHOLD &&
-                           SCpnt->host->unchecked_isa_dma)
-                               buffer = (unsigned char *) scsi_malloc(this_count << 9);
-               }
-       };
-
-       if (scsi_CDs[dev].sector_size == 2048)
-               block = block >> 2;     /* These are the sectors that the cdrom uses */
-       else
-               block = block & 0xfffffffc;
-
-       realcount = (this_count + 3) / 4;
-
-       if (scsi_CDs[dev].sector_size == 512)
-               realcount = realcount << 2;
-
-       /*
-        * Note: The scsi standard says that READ_6 is *optional*, while
-        * READ_10 is mandatory.   Thus there is no point in using
-        * READ_6.
-        */
-       if (scsi_CDs[dev].ten) {
-               if (realcount > 0xffff) {
-                       realcount = 0xffff;
-                       this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
-               }
-               cmd[0] += READ_10 - READ_6;
-               cmd[2] = (unsigned char) (block >> 24) & 0xff;
-               cmd[3] = (unsigned char) (block >> 16) & 0xff;
-               cmd[4] = (unsigned char) (block >> 8) & 0xff;
-               cmd[5] = (unsigned char) block & 0xff;
-               cmd[6] = cmd[9] = 0;
-               cmd[7] = (unsigned char) (realcount >> 8) & 0xff;
-               cmd[8] = (unsigned char) realcount & 0xff;
-       } else {
-               if (realcount > 0xff) {
-                       realcount = 0xff;
-                       this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
-               }
-               cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
-               cmd[2] = (unsigned char) ((block >> 8) & 0xff);
-               cmd[3] = (unsigned char) block & 0xff;
-               cmd[4] = (unsigned char) realcount;
-               cmd[5] = 0;
-       }
-
-#ifdef DEBUG
-       {
-               int i;
-               printk("ReadCD: %d %d %d %d\n", block, realcount, buffer, this_count);
-               printk("Use sg: %d\n", SCpnt->use_sg);
-               printk("Dumping command: ");
-               for (i = 0; i < 12; i++)
-                       printk("%2.2x ", cmd[i]);
-               printk("\n");
-       };
-#endif
-
-       /* Some dumb host adapters can speed transfers by knowing the
-        * minimum transfersize in advance.
-        *
-        * We shouldn't disconnect in the middle of a sector, but the cdrom
-        * sector size can be larger than the size of a buffer and the
-        * transfer may be split to the size of a buffer.  So it's safe to
-        * assume that we can at least transfer the minimum of the buffer
-        * size (1024) and the sector size between each connect / disconnect.
-        */
-
-       SCpnt->transfersize = (scsi_CDs[dev].sector_size > 1024) ?
-           1024 : scsi_CDs[dev].sector_size;
-
-       SCpnt->this_count = this_count;
-       scsi_do_cmd(SCpnt, (void *) cmd, buffer,
-                   realcount * scsi_CDs[dev].sector_size,
-                   rw_intr, SR_TIMEOUT, MAX_RETRIES);
-}
 
 static int sr_detect(Scsi_Device * SDp)
 {
@@ -833,7 +462,7 @@ static int sr_attach(Scsi_Device * SDp)
        if (i >= sr_template.dev_max)
                panic("scsi_devices corrupt (sr)");
 
-       SDp->scsi_request_fn = do_sr_request;
+
        scsi_CDs[i].device = SDp;
 
        sr_template.nr_dev++;
@@ -860,12 +489,13 @@ void get_sectorsize(int i)
        unsigned char cmd[10];
        unsigned char *buffer;
        int the_result, retries;
+       int sector_size;
        Scsi_Cmnd *SCpnt;
 
-       spin_lock_irq(&io_request_lock);
        buffer = (unsigned char *) scsi_malloc(512);
-       SCpnt = scsi_allocate_device(NULL, scsi_CDs[i].device, 1);
-       spin_unlock_irq(&io_request_lock);
+
+
+       SCpnt = scsi_allocate_device(scsi_CDs[i].device, 1);
 
        retries = 3;
        do {
@@ -879,8 +509,8 @@ void get_sectorsize(int i)
 
                /* Do the command and wait.. */
 
-               scsi_wait_cmd (SCpnt, (void *) cmd, (void *) buffer,
-                       512, sr_init_done,  SR_TIMEOUT,  MAX_RETRIES);
+               scsi_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
+                             512, sr_init_done, SR_TIMEOUT, MAX_RETRIES);
 
                the_result = SCpnt->result;
                retries--;
@@ -894,7 +524,7 @@ void get_sectorsize(int i)
 
        if (the_result) {
                scsi_CDs[i].capacity = 0x1fffff;
-               scsi_CDs[i].sector_size = 2048;         /* A guess, just in case */
+               sector_size = 2048;     /* A guess, just in case */
                scsi_CDs[i].needs_sector_size = 1;
        } else {
 #if 0
@@ -905,9 +535,9 @@ void get_sectorsize(int i)
                                                    (buffer[1] << 16) |
                                                    (buffer[2] << 8) |
                                                    buffer[3]);
-               scsi_CDs[i].sector_size = (buffer[4] << 24) |
+               sector_size = (buffer[4] << 24) |
                    (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
-               switch (scsi_CDs[i].sector_size) {
+               switch (sector_size) {
                        /*
                         * HP 4020i CD-Recorder reports 2340 byte sectors
                         * Philips CD-Writers report 2352 byte sectors
@@ -917,7 +547,7 @@ void get_sectorsize(int i)
                case 0:
                case 2340:
                case 2352:
-                       scsi_CDs[i].sector_size = 2048;
+                       sector_size = 2048;
                        /* fall through */
                case 2048:
                        scsi_CDs[i].capacity *= 4;
@@ -926,11 +556,13 @@ void get_sectorsize(int i)
                        break;
                default:
                        printk("sr%d: unsupported sector size %d.\n",
-                              i, scsi_CDs[i].sector_size);
+                              i, sector_size);
                        scsi_CDs[i].capacity = 0;
                        scsi_CDs[i].needs_sector_size = 1;
                }
 
+               scsi_CDs[i].device->sector_size = sector_size;
+
                /*
                 * Add this so that we have the ability to correctly gauge
                 * what the device is capable of.
@@ -959,9 +591,7 @@ void get_capabilities(int i)
                ""
        };
 
-       spin_lock_irq(&io_request_lock);
        buffer = (unsigned char *) scsi_malloc(512);
-       spin_unlock_irq(&io_request_lock);
        cmd[0] = MODE_SENSE;
        cmd[1] = (scsi_CDs[i].device->lun << 5) & 0xe0;
        cmd[2] = 0x2a;
@@ -1008,19 +638,19 @@ void get_capabilities(int i)
        if ((buffer[n + 3] & 0x1) == 0)
                /* can't write CD-R media */
                scsi_CDs[i].cdi.mask |= CDC_CD_R;
-       if ((buffer[n+6] & 0x8) == 0)
+       if ((buffer[n + 6] & 0x8) == 0)
                /* can't eject */
                scsi_CDs[i].cdi.mask |= CDC_OPEN_TRAY;
 
-       if ((buffer[n+6] >> 5) == mechtype_individual_changer ||
-           (buffer[n+6] >> 5) == mechtype_cartridge_changer)
-               scsi_CDs[i].cdi.capacity = 
-                       cdrom_number_of_slots(&(scsi_CDs[i].cdi));
+       if ((buffer[n + 6] >> 5) == mechtype_individual_changer ||
+           (buffer[n + 6] >> 5) == mechtype_cartridge_changer)
+               scsi_CDs[i].cdi.capacity =
+                   cdrom_number_of_slots(&(scsi_CDs[i].cdi));
        if (scsi_CDs[i].cdi.capacity <= 1)
-                /* not a changer */
+               /* not a changer */
                scsi_CDs[i].cdi.mask |= CDC_SELECT_DISC;
        /*else    I don't think it can close its tray
-               scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */
+          scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */
 
 
        scsi_free(buffer, 512);
@@ -1039,24 +669,21 @@ static int sr_packet(struct cdrom_device_info *cdi, struct cdrom_generic_command
        int stat;
 
        /* get the device */
-       SCpnt = scsi_allocate_device(NULL, device, 1);
+       SCpnt = scsi_allocate_device(device, 1);
        if (SCpnt == NULL)
                return -ENODEV; /* this just doesn't seem right /axboe */
 
        /* use buffer for ISA DMA */
        buflen = (cgc->buflen + 511) & ~511;
        if (cgc->buffer && SCpnt->host->unchecked_isa_dma &&
-          (virt_to_phys(cgc->buffer) + cgc->buflen - 1 > ISA_DMA_THRESHOLD)) {
-               spin_lock_irq(&io_request_lock);
+           (virt_to_phys(cgc->buffer) + cgc->buflen - 1 > ISA_DMA_THRESHOLD)) {
                buffer = scsi_malloc(buflen);
-               spin_unlock_irq(&io_request_lock);
                if (buffer == NULL) {
                        printk("sr: SCSI DMA pool exhausted.");
                        return -ENOMEM;
                }
                memcpy(buffer, cgc->buffer, cgc->buflen);
        }
-
        /* set the LUN */
        cgc->cmd[1] |= device->lun << 5;
 
@@ -1065,8 +692,8 @@ static int sr_packet(struct cdrom_device_info *cdi, struct cdrom_generic_command
        /* scsi_do_cmd sets the command length */
        SCpnt->cmd_len = 0;
 
-       scsi_wait_cmd (SCpnt, (void *)cgc->cmd, (void *)buffer, cgc->buflen,
-               sr_init_done, SR_TIMEOUT, MAX_RETRIES);
+       scsi_wait_cmd(SCpnt, (void *) cgc->cmd, (void *) buffer, cgc->buflen,
+                     sr_init_done, SR_TIMEOUT, MAX_RETRIES);
 
        stat = SCpnt->result;
 
@@ -1080,7 +707,6 @@ static int sr_packet(struct cdrom_device_info *cdi, struct cdrom_generic_command
                memcpy(cgc->buffer, buffer, cgc->buflen);
                scsi_free(buffer, buflen);
        }
-
        return stat;
 }
 
@@ -1113,12 +739,18 @@ static int sr_init()
        sr_blocksizes = (int *) scsi_init_malloc(sr_template.dev_max *
                                                 sizeof(int), GFP_ATOMIC);
 
+       sr_hardsizes = (int *) scsi_init_malloc(sr_template.dev_max *
+                                                sizeof(int), GFP_ATOMIC);
        /*
         * These are good guesses for the time being.
         */
        for (i = 0; i < sr_template.dev_max; i++)
+        {
                sr_blocksizes[i] = 2048;
+               sr_hardsizes[i] = 2048;
+        }
        blksize_size[MAJOR_NR] = sr_blocksizes;
+        hardsect_size[MAJOR_NR] = sr_hardsizes;
        return 0;
 }
 
@@ -1127,7 +759,7 @@ void sr_finish()
        int i;
        char name[6];
 
-       blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+       blk_dev[MAJOR_NR].queue = sr_find_queue;
        blk_size[MAJOR_NR] = sr_sizes;
 
        for (i = 0; i < sr_template.nr_dev; ++i) {
@@ -1136,7 +768,7 @@ void sr_finish()
                if (scsi_CDs[i].capacity)
                        continue;
                scsi_CDs[i].capacity = 0x1fffff;
-               scsi_CDs[i].sector_size = 2048;         /* A guess, just in case */
+               scsi_CDs[i].device->sector_size = 2048;         /* A guess, just in case */
                scsi_CDs[i].needs_sector_size = 1;
                scsi_CDs[i].device->changed = 1;        /* force recheck CD type */
 #if 0
@@ -1145,8 +777,9 @@ void sr_finish()
                printk("Scd sectorsize = %d bytes.\n", scsi_CDs[i].sector_size);
 #endif
                scsi_CDs[i].use = 1;
-               scsi_CDs[i].ten = 1;
-               scsi_CDs[i].remap = 1;
+
+               scsi_CDs[i].device->ten = 1;
+               scsi_CDs[i].device->remap = 1;
                scsi_CDs[i].readcd_known = 0;
                scsi_CDs[i].readcd_cdda = 0;
                sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9);
@@ -1234,9 +867,12 @@ void cleanup_module(void)
 
                scsi_init_free((char *) sr_blocksizes, sr_template.dev_max * sizeof(int));
                sr_blocksizes = NULL;
+               scsi_init_free((char *) sr_hardsizes, sr_template.dev_max * sizeof(int));
+               sr_hardsizes = NULL;
        }
        blksize_size[MAJOR_NR] = NULL;
-       blk_dev[MAJOR_NR].request_fn = NULL;
+        hardsect_size[MAJOR_NR] = sr_hardsizes;
+       blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
        blk_size[MAJOR_NR] = NULL;
        read_ahead[MAJOR_NR] = 0;
 
index defb1d3589256eddb2dea6fcf2c1b21da144eb5d..e4aad11ab1aac6d327d9c47c9ea9190522c739f6 100644 (file)
@@ -9,7 +9,7 @@
  *
  *      <drew@colorado.edu>
  *
- *       Modified by Eric Youngdale eric@aib.com to
+ *       Modified by Eric Youngdale eric@andante.org to
  *       add scatter-gather, multiple outstanding request, and other
  *       enhancements.
  */
 
 typedef struct {
        unsigned capacity;      /* size in blocks                       */
-       unsigned sector_size;   /* size in bytes                        */
        Scsi_Device *device;
        unsigned int vendor;    /* vendor code, see sr_vendor.c         */
        unsigned long ms_offset;        /* for reading multisession-CD's        */
        unsigned char sector_bit_size;  /* sector size = 2^sector_bit_size      */
        unsigned char sector_bit_shift;         /* sectors/FS block = 2^sector_bit_shift */
        unsigned needs_sector_size:1;   /* needs to get sector size */
-       unsigned ten:1;         /* support ten byte commands            */
-       unsigned remap:1;       /* support remapping                    */
        unsigned use:1;         /* is this device still supportable     */
        unsigned xa_flag:1;     /* CD has XA sectors ? */
        unsigned readcd_known:1;        /* drive supports READ_CD (0xbe) */
index 50d1a19b365cce751f4d30250fdc04038a1a4849..df963da1f97d6010ddda065deea042f0d42448c5 100644 (file)
@@ -16,7 +16,7 @@
 #include "sr.h"
 
 #if 0
-# define DEBUG
+#define DEBUG
 #endif
 
 /* The sr_is_xa() seems to trigger firmware bugs with some drives :-(
@@ -32,134 +32,121 @@ extern void get_sectorsize(int);
 
 static void sr_ioctl_done(Scsi_Cmnd * SCpnt)
 {
-    struct request * req;
-    
-    req = &SCpnt->request;
-    req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
-
-    if (SCpnt->buffer && req->buffer && SCpnt->buffer != req->buffer) {
-       memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen);
-       scsi_free(SCpnt->buffer, (SCpnt->bufflen + 511) & ~511);
-       SCpnt->buffer = req->buffer;
-    } 
-    
-    if (req->sem != NULL) {
-       up(req->sem);
-    }
+       struct request *req;
+
+       req = &SCpnt->request;
+       req->rq_status = RQ_SCSI_DONE;  /* Busy, but indicate request done */
+
+       if (SCpnt->buffer && req->buffer && SCpnt->buffer != req->buffer) {
+               memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen);
+               scsi_free(SCpnt->buffer, (SCpnt->bufflen + 511) & ~511);
+               SCpnt->buffer = req->buffer;
+       }
+       if (req->sem != NULL) {
+               up(req->sem);
+       }
 }
 
 /* We do our own retries because we want to know what the specific
    error code is.  Normally the UNIT_ATTENTION code will automatically
    clear after one error */
 
-int sr_do_ioctl(int target, unsigned char * sr_cmd, void * buffer, unsigned buflength, int quiet)
+int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet)
 {
-    Scsi_Cmnd * SCpnt;
-    Scsi_Device * SDev;
-    int result, err = 0, retries = 0;
-    unsigned long flags;
-    char * bounce_buffer;
-
-    spin_lock_irqsave(&io_request_lock, flags);
-    SDev  = scsi_CDs[target].device;
-    SCpnt = scsi_allocate_device(NULL, scsi_CDs[target].device, 1);
-    spin_unlock_irqrestore(&io_request_lock, flags);
-
-    /* use ISA DMA buffer if necessary */
-    SCpnt->request.buffer=buffer;
-    if (buffer && SCpnt->host->unchecked_isa_dma &&
-       (virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) {
-       bounce_buffer = (char *)scsi_malloc((buflength + 511) & ~511);
-       if (bounce_buffer == NULL) {
-               printk("SCSI DMA pool exhausted.");
-               return -ENOMEM;
-       }
-       memcpy(bounce_buffer, (char *)buffer, buflength);
-       buffer = bounce_buffer;
-    }
-
-retry:
-    if( !scsi_block_when_processing_errors(SDev) )
-        return -ENODEV;
-
-    scsi_wait_cmd(SCpnt, (void *)sr_cmd, (void *)buffer, buflength,
-                 sr_ioctl_done, IOCTL_TIMEOUT, IOCTL_RETRIES);
-
-    result = SCpnt->result;
-    
-    /* Minimal error checking.  Ignore cases we know about, and report the rest. */
-    if(driver_byte(result) != 0) {
-       switch(SCpnt->sense_buffer[2] & 0xf) {
-       case UNIT_ATTENTION:
-           scsi_CDs[target].device->changed = 1;
-           if (!quiet)
-               printk(KERN_INFO "sr%d: disc change detected.\n", target);
-           if (retries++ < 10)
-               goto retry;
-           err = -ENOMEDIUM;
-           break;
-       case NOT_READY: /* This happens if there is no disc in drive */
-            if (SCpnt->sense_buffer[12] == 0x04 &&
-                SCpnt->sense_buffer[13] == 0x01) {
-                /* sense: Logical unit is in process of becoming ready */
-                if (!quiet)
-                    printk(KERN_INFO "sr%d: CDROM not ready yet.\n", target);
-               if (retries++ < 10) {
-                   /* sleep 2 sec and try again */
-                   /*
-                    * The spinlock is silly - we should really lock more of this
-                    * function, but the minimal locking required to not lock up
-                    * is around this - scsi_sleep() assumes we hold the spinlock.
-                    */
-                   spin_lock_irqsave(&io_request_lock, flags);
-                   scsi_sleep(2*HZ);
-                   spin_unlock_irqrestore(&io_request_lock, flags);
-                    goto retry;
-               } else {
-                   /* 20 secs are enough? */
-                   err = -ENOMEDIUM;
-                   break;
+       Scsi_Cmnd *SCpnt;
+       Scsi_Device *SDev;
+       int result, err = 0, retries = 0;
+       unsigned long flags;
+       char *bounce_buffer;
+
+       SDev = scsi_CDs[target].device;
+       SCpnt = scsi_allocate_device(scsi_CDs[target].device, 1);
+
+       /* use ISA DMA buffer if necessary */
+       SCpnt->request.buffer = buffer;
+       if (buffer && SCpnt->host->unchecked_isa_dma &&
+           (virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) {
+               bounce_buffer = (char *) scsi_malloc((buflength + 511) & ~511);
+               if (bounce_buffer == NULL) {
+                       printk("SCSI DMA pool exhausted.");
+                       return -ENOMEM;
                }
-            }
-           if (!quiet)
-               printk(KERN_INFO "sr%d: CDROM not ready.  Make sure there is a disc in the drive.\n",target);
+               memcpy(bounce_buffer, (char *) buffer, buflength);
+               buffer = bounce_buffer;
+       }
+      retry:
+       if (!scsi_block_when_processing_errors(SDev))
+               return -ENODEV;
+
+
+       scsi_wait_cmd(SCpnt, (void *) sr_cmd, (void *) buffer, buflength,
+                     sr_ioctl_done, IOCTL_TIMEOUT, IOCTL_RETRIES);
+
+       result = SCpnt->result;
+
+       /* Minimal error checking.  Ignore cases we know about, and report the rest. */
+       if (driver_byte(result) != 0) {
+               switch (SCpnt->sense_buffer[2] & 0xf) {
+               case UNIT_ATTENTION:
+                       scsi_CDs[target].device->changed = 1;
+                       if (!quiet)
+                               printk(KERN_INFO "sr%d: disc change detected.\n", target);
+                       if (retries++ < 10)
+                               goto retry;
+                       err = -ENOMEDIUM;
+                       break;
+               case NOT_READY: /* This happens if there is no disc in drive */
+                       if (SCpnt->sense_buffer[12] == 0x04 &&
+                           SCpnt->sense_buffer[13] == 0x01) {
+                               /* sense: Logical unit is in process of becoming ready */
+                               if (!quiet)
+                                       printk(KERN_INFO "sr%d: CDROM not ready yet.\n", target);
+                               if (retries++ < 10) {
+                                       /* sleep 2 sec and try again */
+                                       scsi_sleep(2 * HZ);
+                                       goto retry;
+                               } else {
+                                       /* 20 secs are enough? */
+                                       err = -ENOMEDIUM;
+                                       break;
+                               }
+                       }
+                       if (!quiet)
+                               printk(KERN_INFO "sr%d: CDROM not ready.  Make sure there is a disc in the drive.\n", target);
 #ifdef DEBUG
-            print_sense("sr", SCpnt);
+                       print_sense("sr", SCpnt);
 #endif
-            err = -ENOMEDIUM;
-           break;
-       case ILLEGAL_REQUEST:
-            if (!quiet)
-               printk(KERN_ERR "sr%d: CDROM (ioctl) reports ILLEGAL "
-                      "REQUEST.\n", target);
-            if (SCpnt->sense_buffer[12] == 0x20 &&
-                SCpnt->sense_buffer[13] == 0x00) {
-                /* sense: Invalid command operation code */
-                err = -EDRIVE_CANT_DO_THIS;
-            } else {
-                err = -EINVAL;
-            }
+                       err = -ENOMEDIUM;
+                       break;
+               case ILLEGAL_REQUEST:
+                       if (!quiet)
+                               printk(KERN_ERR "sr%d: CDROM (ioctl) reports ILLEGAL "
+                                      "REQUEST.\n", target);
+                       if (SCpnt->sense_buffer[12] == 0x20 &&
+                           SCpnt->sense_buffer[13] == 0x00) {
+                               /* sense: Invalid command operation code */
+                               err = -EDRIVE_CANT_DO_THIS;
+                       } else {
+                               err = -EINVAL;
+                       }
 #ifdef DEBUG
-           print_command(sr_cmd);
-            print_sense("sr", SCpnt);
+                       print_command(sr_cmd);
+                       print_sense("sr", SCpnt);
 #endif
-           break;
-       default:
-           printk(KERN_ERR "sr%d: CDROM (ioctl) error, command: ", target);
-           print_command(sr_cmd);
-           print_sense("sr", SCpnt);
-            err = -EIO;
+                       break;
+               default:
+                       printk(KERN_ERR "sr%d: CDROM (ioctl) error, command: ", target);
+                       print_command(sr_cmd);
+                       print_sense("sr", SCpnt);
+                       err = -EIO;
+               }
        }
-    }
-   
-    spin_lock_irqsave(&io_request_lock, flags);
-    result = SCpnt->result;
-    /* Wake up a process waiting for device*/
-    wake_up(&SCpnt->device->device_wait);
-    scsi_release_command(SCpnt);
-    SCpnt = NULL;
-    spin_unlock_irqrestore(&io_request_lock, flags);
-    return err;
+       result = SCpnt->result;
+       /* Wake up a process waiting for device */
+       wake_up(&SCpnt->device->device_wait);
+       scsi_release_command(SCpnt);
+       SCpnt = NULL;
+       return err;
 }
 
 /* ---------------------------------------------------------------------- */
@@ -167,95 +154,94 @@ retry:
 
 static int test_unit_ready(int minor)
 {
-       u_char  sr_cmd[10];
+       u_char sr_cmd[10];
 
-        sr_cmd[0] = GPCMD_TEST_UNIT_READY;
-        sr_cmd[1] = ((scsi_CDs[minor].device -> lun) << 5);
-        sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
-        return sr_do_ioctl(minor, sr_cmd, NULL, 255, 1);
+       sr_cmd[0] = GPCMD_TEST_UNIT_READY;
+       sr_cmd[1] = ((scsi_CDs[minor].device->lun) << 5);
+       sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+       return sr_do_ioctl(minor, sr_cmd, NULL, 255, 1);
 }
 
 int sr_tray_move(struct cdrom_device_info *cdi, int pos)
 {
-        u_char  sr_cmd[10];
-
-        sr_cmd[0] = GPCMD_START_STOP_UNIT;
-        sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device -> lun) << 5);
-        sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
-        sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */;
-       
-        return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 255, 0);
+       u_char sr_cmd[10];
+
+       sr_cmd[0] = GPCMD_START_STOP_UNIT;
+       sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device->lun) << 5);
+       sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+       sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ;
+
+       return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 255, 0);
 }
 
 int sr_lock_door(struct cdrom_device_info *cdi, int lock)
 {
-        return scsi_ioctl (scsi_CDs[MINOR(cdi->dev)].device,
-                           lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK,
-                           0);
+       return scsi_ioctl(scsi_CDs[MINOR(cdi->dev)].device,
+                     lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK,
+                         0);
 }
 
 int sr_drive_status(struct cdrom_device_info *cdi, int slot)
 {
-        if (CDSL_CURRENT != slot) {
-                /* we have no changer support */
-                return -EINVAL;
-        }
-
-        if (0 == test_unit_ready(MINOR(cdi->dev)))
-            return CDS_DISC_OK;
+       if (CDSL_CURRENT != slot) {
+               /* we have no changer support */
+               return -EINVAL;
+       }
+       if (0 == test_unit_ready(MINOR(cdi->dev)))
+               return CDS_DISC_OK;
 
-        return CDS_TRAY_OPEN;
+       return CDS_TRAY_OPEN;
 }
 
 int sr_disk_status(struct cdrom_device_info *cdi)
 {
-       struct cdrom_tochdr    toc_h;
-       struct cdrom_tocentry  toc_e;
-        int                    i,rc,have_datatracks = 0;
-
-        /* look for data tracks */
-        if (0 != (rc = sr_audio_ioctl(cdi, CDROMREADTOCHDR, &toc_h)))
-                return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO;
-
-        for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) {
-                toc_e.cdte_track  = i;
-                toc_e.cdte_format = CDROM_LBA;
-                if (sr_audio_ioctl(cdi, CDROMREADTOCENTRY, &toc_e))
-                        return CDS_NO_INFO;
-                if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) {
-                        have_datatracks = 1;
-                        break;
-                }
-        }
-        if (!have_datatracks)
-            return CDS_AUDIO;
-
-        if (scsi_CDs[MINOR(cdi->dev)].xa_flag)
-            return CDS_XA_2_1;
-        else
-            return CDS_DATA_1;
+       struct cdrom_tochdr toc_h;
+       struct cdrom_tocentry toc_e;
+       int i, rc, have_datatracks = 0;
+
+       /* look for data tracks */
+       if (0 != (rc = sr_audio_ioctl(cdi, CDROMREADTOCHDR, &toc_h)))
+               return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO;
+
+       for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) {
+               toc_e.cdte_track = i;
+               toc_e.cdte_format = CDROM_LBA;
+               if (sr_audio_ioctl(cdi, CDROMREADTOCENTRY, &toc_e))
+                       return CDS_NO_INFO;
+               if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) {
+                       have_datatracks = 1;
+                       break;
+               }
+       }
+       if (!have_datatracks)
+               return CDS_AUDIO;
+
+       if (scsi_CDs[MINOR(cdi->dev)].xa_flag)
+               return CDS_XA_2_1;
+       else
+               return CDS_DATA_1;
 }
 
 int sr_get_last_session(struct cdrom_device_info *cdi,
-                        struct cdrom_multisession* ms_info)
+                       struct cdrom_multisession *ms_info)
 {
-        ms_info->addr.lba=scsi_CDs[MINOR(cdi->dev)].ms_offset;
-        ms_info->xa_flag=scsi_CDs[MINOR(cdi->dev)].xa_flag ||
-            (scsi_CDs[MINOR(cdi->dev)].ms_offset > 0);
+       ms_info->addr.lba = scsi_CDs[MINOR(cdi->dev)].ms_offset;
+       ms_info->xa_flag = scsi_CDs[MINOR(cdi->dev)].xa_flag ||
+           (scsi_CDs[MINOR(cdi->dev)].ms_offset > 0);
 
        return 0;
 }
 
-int sr_get_mcn(struct cdrom_device_info *cdi,struct cdrom_mcn *mcn)
+int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
 {
-        u_char  sr_cmd[10];
+       u_char sr_cmd[10];
        char buffer[32];
-        int result;
-               
+       int result;
+
        sr_cmd[0] = GPCMD_READ_SUBCHANNEL;
        sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device->lun) << 5);
-       sr_cmd[2] = 0x40;    /* I do want the subchannel info */
-       sr_cmd[3] = 0x02;    /* Give me medium catalog number info */
+       sr_cmd[2] = 0x40;       /* I do want the subchannel info */
+       sr_cmd[3] = 0x02;       /* Give me medium catalog number info */
        sr_cmd[4] = sr_cmd[5] = 0;
        sr_cmd[6] = 0;
        sr_cmd[7] = 0;
@@ -263,9 +249,9 @@ int sr_get_mcn(struct cdrom_device_info *cdi,struct cdrom_mcn *mcn)
        sr_cmd[9] = 0;
 
        result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0);
-       
-       memcpy (mcn->medium_catalog_number, buffer + 9, 13);
-        mcn->medium_catalog_number[13] = 0;
+
+       memcpy(mcn->medium_catalog_number, buffer + 9, 13);
+       mcn->medium_catalog_number[13] = 0;
 
        return result;
 }
@@ -273,26 +259,26 @@ int sr_get_mcn(struct cdrom_device_info *cdi,struct cdrom_mcn *mcn)
 int sr_reset(struct cdrom_device_info *cdi)
 {
        invalidate_buffers(cdi->dev);
-       return 0;        
+       return 0;
 }
 
 int sr_select_speed(struct cdrom_device_info *cdi, int speed)
 {
-        u_char  sr_cmd[12];
-
-        if (speed == 0)
-            speed = 0xffff; /* set to max */
-        else
-            speed *= 177;   /* Nx to kbyte/s */
-        
-       memset(sr_cmd,0,12);
-       sr_cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */
+       u_char sr_cmd[12];
+
+       if (speed == 0)
+               speed = 0xffff; /* set to max */
+       else
+               speed *= 177;   /* Nx to kbyte/s */
+
+       memset(sr_cmd, 0, 12);
+       sr_cmd[0] = GPCMD_SET_SPEED;    /* SET CD SPEED */
        sr_cmd[1] = (scsi_CDs[MINOR(cdi->dev)].device->lun) << 5;
-       sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */
-       sr_cmd[3] =  speed       & 0xff; /* LSB */
+       sr_cmd[2] = (speed >> 8) & 0xff;        /* MSB for speed (in kbytes/sec) */
+       sr_cmd[3] = speed & 0xff;       /* LSB */
 
-        if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0))
-            return -EIO;
+       if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0))
+               return -EIO;
        return 0;
 }
 
@@ -302,73 +288,72 @@ int sr_select_speed(struct cdrom_device_info *cdi, int speed)
 /* only cdromreadtochdr and cdromreadtocentry are left - for use with the  */
 /* sr_disk_status interface for the generic cdrom driver.                  */
 
-int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, voidarg)
+int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg)
 {
-    u_char  sr_cmd[10];    
-    int result, target  = MINOR(cdi->dev);
-    unsigned char buffer[32];
-    
-    switch (cmd) 
-    {
-    case CDROMREADTOCHDR:
-    {
-       struct cdrom_tochdr* tochdr = (struct cdrom_tochdr*)arg;
-       
-       sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
-       sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5);
-       sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
-       sr_cmd[6] = 0;
-       sr_cmd[7] = 0;              /* MSB of length (12) */
-       sr_cmd[8] = 12;             /* LSB of length */
-       sr_cmd[9] = 0;
-       
-       result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1);
-       
-       tochdr->cdth_trk0 = buffer[2];
-       tochdr->cdth_trk1 = buffer[3];
-       
-        break;
-    }
-       
-    case CDROMREADTOCENTRY:
-    {
-       struct cdrom_tocentry* tocentry = (struct cdrom_tocentry*)arg;
-       
-       sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
-       sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) |
-          (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0);
-       sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
-       sr_cmd[6] = tocentry->cdte_track;
-       sr_cmd[7] = 0;             /* MSB of length (12)  */
-       sr_cmd[8] = 12;            /* LSB of length */
-       sr_cmd[9] = 0;
-       
-       result = sr_do_ioctl (target, sr_cmd, buffer, 12, 0);
-       
-        tocentry->cdte_ctrl = buffer[5] & 0xf; 
-        tocentry->cdte_adr = buffer[5] >> 4;
-        tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0;
-       if (tocentry->cdte_format == CDROM_MSF) {
-           tocentry->cdte_addr.msf.minute = buffer[9];
-           tocentry->cdte_addr.msf.second = buffer[10];
-           tocentry->cdte_addr.msf.frame = buffer[11];
-       } else
-           tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
-                                       + buffer[10]) << 8) + buffer[11];
-       
-        break;
-    }
-
-    default:
-        return -EINVAL;
-    }
+       u_char sr_cmd[10];
+       int result, target = MINOR(cdi->dev);
+       unsigned char buffer[32];
+
+       switch (cmd) {
+       case CDROMREADTOCHDR:
+               {
+                       struct cdrom_tochdr *tochdr = (struct cdrom_tochdr *) arg;
+
+                       sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+                       sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5);
+                       sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+                       sr_cmd[6] = 0;
+                       sr_cmd[7] = 0;  /* MSB of length (12) */
+                       sr_cmd[8] = 12;         /* LSB of length */
+                       sr_cmd[9] = 0;
+
+                       result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1);
+
+                       tochdr->cdth_trk0 = buffer[2];
+                       tochdr->cdth_trk1 = buffer[3];
+
+                       break;
+               }
+
+       case CDROMREADTOCENTRY:
+               {
+                       struct cdrom_tocentry *tocentry = (struct cdrom_tocentry *) arg;
+
+                       sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+                       sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) |
+                           (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0);
+                       sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+                       sr_cmd[6] = tocentry->cdte_track;
+                       sr_cmd[7] = 0;  /* MSB of length (12)  */
+                       sr_cmd[8] = 12;         /* LSB of length */
+                       sr_cmd[9] = 0;
+
+                       result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0);
+
+                       tocentry->cdte_ctrl = buffer[5] & 0xf;
+                       tocentry->cdte_adr = buffer[5] >> 4;
+                       tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0;
+                       if (tocentry->cdte_format == CDROM_MSF) {
+                               tocentry->cdte_addr.msf.minute = buffer[9];
+                               tocentry->cdte_addr.msf.second = buffer[10];
+                               tocentry->cdte_addr.msf.frame = buffer[11];
+                       } else
+                               tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+                                       + buffer[10]) << 8) + buffer[11];
+
+                       break;
+               }
+
+       default:
+               return -EINVAL;
+       }
 
 #if 0
-    if (result)
-        printk("DEBUG: sr_audio: result for ioctl %x: %x\n",cmd,result);
+       if (result)
+               printk("DEBUG: sr_audio: result for ioctl %x: %x\n", cmd, result);
 #endif
-    
-    return result;
+
+       return result;
 }
 
 /* -----------------------------------------------------------------------
@@ -385,73 +370,78 @@ int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void* arg)
  * blksize: 2048 | 2336 | 2340 | 2352
  */
 
-int
-sr_read_cd(int minor, unsigned char *dest, int lba, int format, int blksize)
+int sr_read_cd(int minor, unsigned char *dest, int lba, int format, int blksize)
 {
-    unsigned char  cmd[12];
+       unsigned char cmd[12];
 
 #ifdef DEBUG
-    printk("sr%d: sr_read_cd lba=%d format=%d blksize=%d\n",
-           minor,lba,format,blksize);
+       printk("sr%d: sr_read_cd lba=%d format=%d blksize=%d\n",
+              minor, lba, format, blksize);
 #endif
 
-    memset(cmd,0,12);
-    cmd[0] = GPCMD_READ_CD; /* READ_CD */
-    cmd[1] = (scsi_CDs[minor].device->lun << 5) | ((format & 7) << 2);
-    cmd[2] = (unsigned char)(lba >> 24) & 0xff;
-    cmd[3] = (unsigned char)(lba >> 16) & 0xff;
-    cmd[4] = (unsigned char)(lba >>  8) & 0xff;
-    cmd[5] = (unsigned char) lba        & 0xff;
-    cmd[8] = 1;
-    switch (blksize) {
-    case 2336: cmd[9] = 0x58; break;
-    case 2340: cmd[9] = 0x78; break;
-    case 2352: cmd[9] = 0xf8; break;
-    default:   cmd[9] = 0x10; break;
-    }
-    return sr_do_ioctl(minor, cmd, dest, blksize, 0);
+       memset(cmd, 0, 12);
+       cmd[0] = GPCMD_READ_CD; /* READ_CD */
+       cmd[1] = (scsi_CDs[minor].device->lun << 5) | ((format & 7) << 2);
+       cmd[2] = (unsigned char) (lba >> 24) & 0xff;
+       cmd[3] = (unsigned char) (lba >> 16) & 0xff;
+       cmd[4] = (unsigned char) (lba >> 8) & 0xff;
+       cmd[5] = (unsigned char) lba & 0xff;
+       cmd[8] = 1;
+       switch (blksize) {
+       case 2336:
+               cmd[9] = 0x58;
+               break;
+       case 2340:
+               cmd[9] = 0x78;
+               break;
+       case 2352:
+               cmd[9] = 0xf8;
+               break;
+       default:
+               cmd[9] = 0x10;
+               break;
+       }
+       return sr_do_ioctl(minor, cmd, dest, blksize, 0);
 }
 
 /*
  * read sectors with blocksizes other than 2048
  */
 
-int
-sr_read_sector(int minor, int lba, int blksize, unsigned char *dest)
+int sr_read_sector(int minor, int lba, int blksize, unsigned char *dest)
 {
-    unsigned char   cmd[12];    /* the scsi-command */
-    int             rc;
-
-    /* we try the READ CD command first... */
-    if (scsi_CDs[minor].readcd_known) {
-        rc = sr_read_cd(minor, dest, lba, 0, blksize);
-        if (-EDRIVE_CANT_DO_THIS != rc)
-            return rc;
-        scsi_CDs[minor].readcd_known = 0;
-        printk("CDROM does'nt support READ CD (0xbe) command\n");
-        /* fall & retry the other way */
-    }
-
-    /* ... if this fails, we switch the blocksize using MODE SELECT */
-    if (blksize != scsi_CDs[minor].sector_size)
-        if (0 != (rc = sr_set_blocklength(minor, blksize)))
-            return rc;
-
+       unsigned char cmd[12];  /* the scsi-command */
+       int rc;
+
+       /* we try the READ CD command first... */
+       if (scsi_CDs[minor].readcd_known) {
+               rc = sr_read_cd(minor, dest, lba, 0, blksize);
+               if (-EDRIVE_CANT_DO_THIS != rc)
+                       return rc;
+               scsi_CDs[minor].readcd_known = 0;
+               printk("CDROM does'nt support READ CD (0xbe) command\n");
+               /* fall & retry the other way */
+       }
+       /* ... if this fails, we switch the blocksize using MODE SELECT */
+       if (blksize != scsi_CDs[minor].device->sector_size) {
+               if (0 != (rc = sr_set_blocklength(minor, blksize)))
+                       return rc;
+       }
 #ifdef DEBUG
-    printk("sr%d: sr_read_sector lba=%d blksize=%d\n",minor,lba,blksize);
+       printk("sr%d: sr_read_sector lba=%d blksize=%d\n", minor, lba, blksize);
 #endif
-    
-    memset(cmd,0,12);
-    cmd[0] = GPCMD_READ_10;
-    cmd[1] = (scsi_CDs[minor].device->lun << 5);
-    cmd[2] = (unsigned char)(lba >> 24) & 0xff;
-    cmd[3] = (unsigned char)(lba >> 16) & 0xff;
-    cmd[4] = (unsigned char)(lba >>  8) & 0xff;
-    cmd[5] = (unsigned char) lba        & 0xff;
-    cmd[8] = 1;
-    rc = sr_do_ioctl(minor, cmd, dest, blksize, 0);
-    
-    return rc;
+
+       memset(cmd, 0, 12);
+       cmd[0] = GPCMD_READ_10;
+       cmd[1] = (scsi_CDs[minor].device->lun << 5);
+       cmd[2] = (unsigned char) (lba >> 24) & 0xff;
+       cmd[3] = (unsigned char) (lba >> 16) & 0xff;
+       cmd[4] = (unsigned char) (lba >> 8) & 0xff;
+       cmd[5] = (unsigned char) lba & 0xff;
+       cmd[8] = 1;
+       rc = sr_do_ioctl(minor, cmd, dest, blksize, 0);
+
+       return rc;
 }
 
 /*
@@ -459,55 +449,50 @@ sr_read_sector(int minor, int lba, int blksize, unsigned char *dest)
  * ret: 1 == mode2 (XA), 0 == mode1, <0 == error 
  */
 
-int
-sr_is_xa(int minor)
+int sr_is_xa(int minor)
 {
-    unsigned char *raw_sector;
-    int is_xa;
-    unsigned long flags;
-    
-    if (!xa_test)
-        return 0;
-   
-    spin_lock_irqsave(&io_request_lock, flags);
-    raw_sector = (unsigned char *) scsi_malloc(2048+512);
-    spin_unlock_irqrestore(&io_request_lock, flags);
-    if (!raw_sector) return -ENOMEM;
-    if (0 == sr_read_sector(minor,scsi_CDs[minor].ms_offset+16,
-                            CD_FRAMESIZE_RAW1,raw_sector)) {
-        is_xa = (raw_sector[3] == 0x02) ? 1 : 0;
-    } else {
-        /* read a raw sector failed for some reason. */
-        is_xa = -1;
-    }
-    spin_lock_irqsave(&io_request_lock, flags);
-    scsi_free(raw_sector, 2048+512);
-    spin_unlock_irqrestore(&io_request_lock, flags);
+       unsigned char *raw_sector;
+       int is_xa;
+
+       if (!xa_test)
+               return 0;
+
+       raw_sector = (unsigned char *) scsi_malloc(2048 + 512);
+       if (!raw_sector)
+               return -ENOMEM;
+       if (0 == sr_read_sector(minor, scsi_CDs[minor].ms_offset + 16,
+                               CD_FRAMESIZE_RAW1, raw_sector)) {
+               is_xa = (raw_sector[3] == 0x02) ? 1 : 0;
+       } else {
+               /* read a raw sector failed for some reason. */
+               is_xa = -1;
+       }
+       scsi_free(raw_sector, 2048 + 512);
 #ifdef DEBUG
-    printk("sr%d: sr_is_xa: %d\n",minor,is_xa);
+       printk("sr%d: sr_is_xa: %d\n", minor, is_xa);
 #endif
-    return is_xa;
+       return is_xa;
 }
 
 int sr_dev_ioctl(struct cdrom_device_info *cdi,
-                 unsigned int cmd, unsigned long arg)
+                unsigned int cmd, unsigned long arg)
 {
-    int target;
-    
-    target = MINOR(cdi->dev);
-    
-    switch (cmd) {
-    case BLKROSET:
-    case BLKROGET:
-    case BLKRASET:
-    case BLKRAGET:
-    case BLKFLSBUF:
-    case BLKSSZGET:
-       return blk_ioctl(cdi->dev, cmd, arg);
-
-    default:
-       return scsi_ioctl(scsi_CDs[target].device,cmd,(void *) arg);
-    }
+       int target;
+
+       target = MINOR(cdi->dev);
+
+       switch (cmd) {
+       case BLKROSET:
+       case BLKROGET:
+       case BLKRASET:
+       case BLKRAGET:
+       case BLKFLSBUF:
+       case BLKSSZGET:
+               return blk_ioctl(cdi->dev, cmd, arg);
+
+       default:
+               return scsi_ioctl(scsi_CDs[target].device, cmd, (void *) arg);
+       }
 }
 
 /*
index 0240197dbb2aa14bc26db3294ff3f38794ce34e0..56f4f004de187990990159ae6a4c6af80ff43bd2 100644 (file)
@@ -1,5 +1,5 @@
 /* -*-linux-c-*-
- *
+
  * vendor-specific code for SCSI CD-ROM's goes here.
  *
  * This is needed becauce most of the new features (multisession and
  *   - TOSHIBA: Detection and support of multisession CD's.
  *              Some XA-Sector tweaking, required for older drives.
  *
- *   - SONY:   Detection and support of multisession CD's.
+ *   - SONY:    Detection and support of multisession CD's.
  *              added by Thomas Quinot <thomas@cuivre.freenix.fr>
  *
  *   - PIONEER, HITACHI, PLEXTOR, MATSHITA, TEAC, PHILIPS: known to
  *              work with SONY (SCSI3 now)  code.
  *
- *   - HP:     Much like SONY, but a little different... (Thomas)
+ *   - HP:      Much like SONY, but a little different... (Thomas)
  *              HP-Writers only ??? Maybe other CD-Writers work with this too ?
- *             HP 6020 writers now supported.
+ *              HP 6020 writers now supported.
  */
 
 #include <linux/config.h>
 #include "sr.h"
 
 #if 0
-# define DEBUG
+#define DEBUG
 #endif
 
 /* here are some constants to sort the vendors into groups */
 
-#define VENDOR_SCSI3           1   /* default: scsi-3 mmc */
+#define VENDOR_SCSI3           1       /* default: scsi-3 mmc */
 
 #define VENDOR_NEC             2
 #define VENDOR_TOSHIBA         3
-#define VENDOR_WRITER          4   /* pre-scsi3 writers */
+#define VENDOR_WRITER          4       /* pre-scsi3 writers */
 
 #define VENDOR_ID (scsi_CDs[minor].vendor)
 
@@ -66,7 +66,7 @@ void sr_vendor_init(int minor)
        VENDOR_ID = VENDOR_SCSI3;
 #else
        char *vendor = scsi_CDs[minor].device->vendor;
-       char *model  = scsi_CDs[minor].device->model;
+       char *model = scsi_CDs[minor].device->model;
 
        /* default */
        VENDOR_ID = VENDOR_SCSI3;
@@ -77,24 +77,24 @@ void sr_vendor_init(int minor)
        if (scsi_CDs[minor].device->type == TYPE_WORM) {
                VENDOR_ID = VENDOR_WRITER;
 
-       } else if (!strncmp (vendor, "NEC", 3)) {
+       } else if (!strncmp(vendor, "NEC", 3)) {
                VENDOR_ID = VENDOR_NEC;
-               if (!strncmp (model,"CD-ROM DRIVE:25", 15)  ||
-                   !strncmp (model,"CD-ROM DRIVE:36", 15)  ||
-                   !strncmp (model,"CD-ROM DRIVE:83", 15)  ||
-                   !strncmp (model,"CD-ROM DRIVE:84 ",16)
+               if (!strncmp(model, "CD-ROM DRIVE:25", 15) ||
+                   !strncmp(model, "CD-ROM DRIVE:36", 15) ||
+                   !strncmp(model, "CD-ROM DRIVE:83", 15) ||
+                   !strncmp(model, "CD-ROM DRIVE:84 ", 16)
 #if 0
-                       /* my NEC 3x returns the read-raw data if a read-raw
-                          is followed by a read for the same sector - aeb */
-                   || !strncmp (model,"CD-ROM DRIVE:500",16)
+               /* my NEC 3x returns the read-raw data if a read-raw
+                  is followed by a read for the same sector - aeb */
+                   || !strncmp(model, "CD-ROM DRIVE:500", 16)
 #endif
-                  )
+                   )
                        /* these can't handle multisession, may hang */
                        scsi_CDs[minor].cdi.mask |= CDC_MULTI_SESSION;
 
-       } else if (!strncmp (vendor, "TOSHIBA", 7)) {
+       } else if (!strncmp(vendor, "TOSHIBA", 7)) {
                VENDOR_ID = VENDOR_TOSHIBA;
-               
+
        }
 #endif
 }
@@ -105,10 +105,10 @@ void sr_vendor_init(int minor)
 
 int sr_set_blocklength(int minor, int blocklength)
 {
-       unsigned char           *buffer;    /* the buffer for the ioctl */
-       unsigned char           cmd[12];    /* the scsi-command */
-       struct ccs_modesel_head *modesel;
-       int                     rc,density = 0;
+       unsigned char *buffer;  /* the buffer for the ioctl */
+       unsigned char cmd[12];  /* the scsi-command */
+       struct ccs_modesel_head *modesel;
+       int rc, density = 0;
 
 #ifdef CONFIG_BLK_DEV_SR_VENDOR
        if (VENDOR_ID == VENDOR_TOSHIBA)
@@ -116,27 +116,29 @@ int sr_set_blocklength(int minor, int blocklength)
 #endif
 
        buffer = (unsigned char *) scsi_malloc(512);
-       if (!buffer) return -ENOMEM;
+       if (!buffer)
+               return -ENOMEM;
 
 #ifdef DEBUG
-       printk("sr%d: MODE SELECT 0x%x/%d\n",minor,density,blocklength);
+       printk("sr%d: MODE SELECT 0x%x/%d\n", minor, density, blocklength);
 #endif
-       memset(cmd,0,12);
+       memset(cmd, 0, 12);
        cmd[0] = MODE_SELECT;
        cmd[1] = (scsi_CDs[minor].device->lun << 5) | (1 << 4);
        cmd[4] = 12;
-       modesel = (struct ccs_modesel_head*)buffer;
-       memset(modesel,0,sizeof(*modesel));
+       modesel = (struct ccs_modesel_head *) buffer;
+       memset(modesel, 0, sizeof(*modesel));
        modesel->block_desc_length = 0x08;
-       modesel->density           = density;
-       modesel->block_length_med  = (blocklength >> 8 ) & 0xff;
-       modesel->block_length_lo   =  blocklength        & 0xff;
-       if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0)))
-               scsi_CDs[minor].sector_size = blocklength;
+       modesel->density = density;
+       modesel->block_length_med = (blocklength >> 8) & 0xff;
+       modesel->block_length_lo = blocklength & 0xff;
+       if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0))) {
+               scsi_CDs[minor].device->sector_size = blocklength;
+       }
 #ifdef DEBUG
        else
                printk("sr%d: switching blocklength to %d bytes failed\n",
-                      minor,blocklength);
+                      minor, blocklength);
 #endif
        scsi_free(buffer, 512);
        return rc;
@@ -149,28 +151,27 @@ int sr_set_blocklength(int minor, int blocklength)
 
 int sr_cd_check(struct cdrom_device_info *cdi)
 {
-       unsigned long   sector;
-       unsigned char   *buffer;     /* the buffer for the ioctl */
-       unsigned char   cmd[12];     /* the scsi-command */
-       int             rc,no_multi,minor;
+       unsigned long sector;
+       unsigned char *buffer;  /* the buffer for the ioctl */
+       unsigned char cmd[12];  /* the scsi-command */
+       int rc, no_multi, minor;
 
        minor = MINOR(cdi->dev);
        if (scsi_CDs[minor].cdi.mask & CDC_MULTI_SESSION)
                return 0;
-       
-       spin_lock_irq(&io_request_lock);
+
        buffer = (unsigned char *) scsi_malloc(512);
-       spin_unlock_irq(&io_request_lock);
-       if(!buffer) return -ENOMEM;
-       
-       sector   = 0;         /* the multisession sector offset goes here  */
-       no_multi = 0;         /* flag: the drive can't handle multisession */
-       rc       = 0;
-    
-       switch(VENDOR_ID) {
-       
+       if (!buffer)
+               return -ENOMEM;
+
+       sector = 0;             /* the multisession sector offset goes here  */
+       no_multi = 0;           /* flag: the drive can't handle multisession */
+       rc = 0;
+
+       switch (VENDOR_ID) {
+
        case VENDOR_SCSI3:
-               memset(cmd,0,12);
+               memset(cmd, 0, 12);
                cmd[0] = READ_TOC;
                cmd[1] = (scsi_CDs[minor].device->lun << 5);
                cmd[8] = 12;
@@ -180,70 +181,70 @@ int sr_cd_check(struct cdrom_device_info *cdi)
                        break;
                if ((buffer[0] << 8) + buffer[1] < 0x0a) {
                        printk(KERN_INFO "sr%d: Hmm, seems the drive "
-                              "doesn't support multisession CD's\n",minor);
+                          "doesn't support multisession CD's\n", minor);
                        no_multi = 1;
                        break;
                }
                sector = buffer[11] + (buffer[10] << 8) +
-                       (buffer[9] << 16) + (buffer[8] << 24);
+                   (buffer[9] << 16) + (buffer[8] << 24);
                if (buffer[6] <= 1) {
                        /* ignore sector offsets from first track */
                        sector = 0;
                }
                break;
-               
+
 #ifdef CONFIG_BLK_DEV_SR_VENDOR
-       case VENDOR_NEC: {
-               unsigned long min,sec,frame;
-               memset(cmd,0,12);
-               cmd[0] = 0xde;
-               cmd[1] = (scsi_CDs[minor].device->lun << 5) | 0x03;
-               cmd[2] = 0xb0;
-               rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1);
-               if (rc != 0)
-                       break;
-               if (buffer[14] != 0 && buffer[14] != 0xb0) {
-                       printk(KERN_INFO "sr%d: Hmm, seems the cdrom "
-                              "doesn't support multisession CD's\n",minor);
-                       no_multi = 1;
+       case VENDOR_NEC:{
+                       unsigned long min, sec, frame;
+                       memset(cmd, 0, 12);
+                       cmd[0] = 0xde;
+                       cmd[1] = (scsi_CDs[minor].device->lun << 5) | 0x03;
+                       cmd[2] = 0xb0;
+                       rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1);
+                       if (rc != 0)
+                               break;
+                       if (buffer[14] != 0 && buffer[14] != 0xb0) {
+                               printk(KERN_INFO "sr%d: Hmm, seems the cdrom "
+                                      "doesn't support multisession CD's\n", minor);
+                               no_multi = 1;
+                               break;
+                       }
+                       min = BCD_TO_BIN(buffer[15]);
+                       sec = BCD_TO_BIN(buffer[16]);
+                       frame = BCD_TO_BIN(buffer[17]);
+                       sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
                        break;
                }
-               min    = BCD_TO_BIN(buffer[15]);
-               sec    = BCD_TO_BIN(buffer[16]);
-               frame  = BCD_TO_BIN(buffer[17]);
-               sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame;
-               break;
-       }
 
-       case VENDOR_TOSHIBA: {
-               unsigned long min,sec,frame;
+       case VENDOR_TOSHIBA:{
+                       unsigned long min, sec, frame;
 
-               /* we request some disc information (is it a XA-CD ?,
-                * where starts the last session ?) */
-               memset(cmd,0,12);
-               cmd[0] = 0xc7;
-               cmd[1] = (scsi_CDs[minor].device->lun << 5) | 3;
-               rc = sr_do_ioctl(minor, cmd, buffer, 4, 1);
-               if (rc == -EINVAL) {
-                       printk(KERN_INFO "sr%d: Hmm, seems the drive "
-                              "doesn't support multisession CD's\n",minor);
-                       no_multi = 1;
+                       /* we request some disc information (is it a XA-CD ?,
+                        * where starts the last session ?) */
+                       memset(cmd, 0, 12);
+                       cmd[0] = 0xc7;
+                       cmd[1] = (scsi_CDs[minor].device->lun << 5) | 3;
+                       rc = sr_do_ioctl(minor, cmd, buffer, 4, 1);
+                       if (rc == -EINVAL) {
+                               printk(KERN_INFO "sr%d: Hmm, seems the drive "
+                                      "doesn't support multisession CD's\n", minor);
+                               no_multi = 1;
+                               break;
+                       }
+                       if (rc != 0)
+                               break;
+                       min = BCD_TO_BIN(buffer[1]);
+                       sec = BCD_TO_BIN(buffer[2]);
+                       frame = BCD_TO_BIN(buffer[3]);
+                       sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
+                       if (sector)
+                               sector -= CD_MSF_OFFSET;
+                       sr_set_blocklength(minor, 2048);
                        break;
                }
-               if (rc != 0)
-                       break;
-               min    = BCD_TO_BIN(buffer[1]);
-               sec    = BCD_TO_BIN(buffer[2]);
-               frame  = BCD_TO_BIN(buffer[3]);
-               sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame;
-               if (sector)
-                       sector -= CD_MSF_OFFSET;
-               sr_set_blocklength(minor,2048);
-               break;
-       }
 
        case VENDOR_WRITER:
-               memset(cmd,0,12);
+               memset(cmd, 0, 12);
                cmd[0] = READ_TOC;
                cmd[1] = (scsi_CDs[minor].device->lun << 5);
                cmd[8] = 0x04;
@@ -253,31 +254,29 @@ int sr_cd_check(struct cdrom_device_info *cdi)
                        break;
                }
                if ((rc = buffer[2]) == 0) {
-                       printk (KERN_WARNING
-                               "sr%d: No finished session\n",minor);
+                       printk(KERN_WARNING
+                              "sr%d: No finished session\n", minor);
                        break;
                }
-
-               cmd[0] = READ_TOC; /* Read TOC */
+               cmd[0] = READ_TOC;      /* Read TOC */
                cmd[1] = (scsi_CDs[minor].device->lun << 5);
-               cmd[6] = rc & 0x7f;  /* number of last session */
+               cmd[6] = rc & 0x7f;     /* number of last session */
                cmd[8] = 0x0c;
                cmd[9] = 0x40;
-               rc = sr_do_ioctl(minor, cmd, buffer, 12, 1);    
+               rc = sr_do_ioctl(minor, cmd, buffer, 12, 1);
                if (rc != 0) {
                        break;
                }
-
                sector = buffer[11] + (buffer[10] << 8) +
-                       (buffer[9] << 16) + (buffer[8] << 24);
+                   (buffer[9] << 16) + (buffer[8] << 24);
                break;
-#endif /* CONFIG_BLK_DEV_SR_VENDOR */
+#endif                         /* CONFIG_BLK_DEV_SR_VENDOR */
 
        default:
                /* should not happen */
                printk(KERN_WARNING
-                      "sr%d: unknown vendor code (%i), not initialized ?\n",
-                      minor,VENDOR_ID);
+                  "sr%d: unknown vendor code (%i), not initialized ?\n",
+                      minor, VENDOR_ID);
                sector = 0;
                no_multi = 1;
                break;
@@ -286,16 +285,17 @@ int sr_cd_check(struct cdrom_device_info *cdi)
        scsi_CDs[minor].xa_flag = 0;
        if (CDS_AUDIO != sr_disk_status(cdi) && 1 == sr_is_xa(minor))
                scsi_CDs[minor].xa_flag = 1;
-       
-       if (2048 != scsi_CDs[minor].sector_size)
-               sr_set_blocklength(minor,2048);
+
+       if (2048 != scsi_CDs[minor].device->sector_size) {
+               sr_set_blocklength(minor, 2048);
+       }
        if (no_multi)
                cdi->mask |= CDC_MULTI_SESSION;
 
 #ifdef DEBUG
        if (sector)
                printk(KERN_DEBUG "sr%d: multisession offset=%lu\n",
-                      minor,sector);
+                      minor, sector);
 #endif
        scsi_free(buffer, 512);
        return rc;
index 057ed9e03332ad98ce86b1c20d930da4881198e1..dde365229aa7f6f1292c19f809f2a3ea794cb5c6 100644 (file)
@@ -286,15 +286,13 @@ static Scsi_Cmnd *
  st_do_scsi(Scsi_Cmnd * SCpnt, Scsi_Tape * STp, unsigned char *cmd, int bytes,
            int timeout, int retries, int do_wait)
 {
-       unsigned long flags;
        unsigned char *bp;
 
-       spin_lock_irqsave(&io_request_lock, flags);
        if (SCpnt == NULL)
-               if ((SCpnt = scsi_allocate_device(NULL, STp->device, 1)) == NULL) {
+               SCpnt = scsi_allocate_device(STp->device, 1);
+               if (SCpnt == NULL) {
                        printk(KERN_ERR "st%d: Can't get SCSI request.\n",
                                TAPE_NR(STp->devt));
-                       spin_unlock_irqrestore(&io_request_lock, flags);
                        return NULL;
                }
 
@@ -315,7 +313,6 @@ static Scsi_Cmnd *
 
        scsi_do_cmd(SCpnt, (void *) cmd, bp, bytes,
                    st_sleep_done, timeout, retries);
-       spin_unlock_irqrestore(&io_request_lock, flags);
 
        if (do_wait) {
                down(SCpnt->request.sem);
index 91f511dea0b91711b987e822cbecc4e01a253acf..6f37bc11e3b0a7f43787a2dc249f3176ceaed509 100644 (file)
@@ -826,7 +826,7 @@ static inline int port_detect \
       }
    else {
       unsigned long flags;
-      sh[j]->wish_block = TRUE;
+//FIXME//      sh[j]->wish_block = TRUE;
       sh[j]->unchecked_isa_dma = TRUE;
       
       flags=claim_dma_lock();
index 46a1d6acefae7c42cd8f62189627fe61d649c079..5afbf1e3e3070c9554cfa72b8c51ead604fae1c2 100644 (file)
@@ -233,38 +233,42 @@ int init_module(void)
        if (mad16 == 0 && trix == 0 && pas2 == 0 && support == 0)
        {
 #ifdef CONFIG_ISAPNP                   
-               if (sb_probe_isapnp(&config, &config_mpu)<0)
+               if (isapnp == 1 && sb_probe_isapnp(&config, &config_mpu)<0)
                {
                        printk(KERN_ERR "sb_card: No ISAPnP cards found\n");
                        return -EINVAL;
                }
+               else
+               {
+#endif                 
+                       if (io == -1 || dma == -1 || irq == -1)
+                       {
+                               printk(KERN_ERR "sb_card: I/O, IRQ, and DMA are mandatory\n");
+                               return -EINVAL;
+                       }
+                       config.io_base = io;
+                       config.irq = irq;
+                       config.dma = dma;
+                       config.dma2 = dma16;
+                       config.card_subtype = type;
+#ifdef CONFIG_ISAPNP
+               }
 #endif
-       } 
-       if (io == -1 || dma == -1 || irq == -1)
-       {
-               printk(KERN_ERR "sb_card: I/O, IRQ, and DMA are mandatory\n");
-               return -EINVAL;
-       }
-       config.io_base = io;
-       config.irq = irq;
-       config.dma = dma;
-       config.dma2 = dma16;
-       config.card_subtype = type;
-
-       if (!probe_sb(&config))
-               return -ENODEV;
-       attach_sb_card(&config);
+               if (!probe_sb(&config))
+                       return -ENODEV;
+               attach_sb_card(&config);
        
-       if(config.slots[0]==-1)
-               return -ENODEV;
+               if(config.slots[0]==-1)
+                       return -ENODEV;
 #ifdef CONFIG_MIDI
-       if (isapnp == 0) 
-         config_mpu.io_base = mpu_io;
-       if (probe_sbmpu(&config_mpu))
-               sbmpu = 1;
-       if (sbmpu)
-               attach_sbmpu(&config_mpu);
+               if (isapnp == 0) 
+                       config_mpu.io_base = mpu_io;
+               if (probe_sbmpu(&config_mpu))
+                       sbmpu = 1;
+               if (sbmpu)
+                       attach_sbmpu(&config_mpu);
 #endif
+       }
        SOUND_LOCK;
        return 0;
 }
index ff14807a0909b7111fe0df0261f632e2f797bdc8..7cf44a5ec58847b368a515851d98e83f7aca236b 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
+#include <video/fbcon.h>
 
 static int currcon = 0;
 
index b8b7784f03dafe4810a8af5970539d2eab7baf82..150e1c95b044b1ba7bfd8bfa694df9db401ab42e 100644 (file)
@@ -86,10 +86,9 @@ if [ "$CONFIG_INET" = "y" ]; then
    fi
    tristate 'NFS server support' CONFIG_NFSD
    if [ "$CONFIG_NFSD" != "n" ]; then
-      bool '  Emulate SUN NFS server' CONFIG_NFSD_SUN
-   fi
-   if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
-      bool '  Provide NFSv3 server support (EXPERIMENTAL)' CONFIG_NFSD_V3
+      if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+        bool '  Provide NFSv3 server support (EXPERIMENTAL)' CONFIG_NFSD_V3
+      fi
    fi
 
    if [ "$CONFIG_NFS_FS" = "y" -o "$CONFIG_NFSD" = "y" ]; then
index be59d3e65eb7a8f2cbc9a4b8a977823e5a02e86a..129d5917f5d201d8bfd00722a16045c528811788 100644 (file)
  *
  * ------------------------------------------------------------------------- */
 
-#include <linux/string.h>
-#include <linux/sched.h>
 #include "autofs_i.h"
 
 static int autofs_readlink(struct dentry *dentry, char *buffer, int buflen)
 {
-       struct autofs_symlink *sl;
-       int len;
-
-       sl = (struct autofs_symlink *)dentry->d_inode->u.generic_ip;
-       len = sl->len;
-       if (len > buflen) len = buflen;
-       copy_to_user(buffer, sl->data, len);
-       return len;
+       char *s=((struct autofs_symlink *)dentry->d_inode->u.generic_ip)->data;
+       return vfs_readlink(dentry, buffer, buflen, s);
 }
 
-static struct dentry * autofs_follow_link(struct dentry *dentry,
-                                       struct dentry *base,
-                                       unsigned int follow)
+static struct dentry *autofs_follow_link(struct dentry *dentry, struct dentry *base, unsigned flags)
 {
-       struct autofs_symlink *sl;
-
-       sl = (struct autofs_symlink *)dentry->d_inode->u.generic_ip;
-       return lookup_dentry(sl->data, base, follow);
+       char *s=((struct autofs_symlink *)dentry->d_inode->u.generic_ip)->data;
+       return vfs_follow_link(dentry, base, flags, s);
 }
 
 struct inode_operations autofs_symlink_inode_operations = {
-       NULL,                   /* file operations */
-       NULL,                   /* create */
-       NULL,                   /* lookup */
-       NULL,                   /* link */
-       NULL,                   /* unlink */
-       NULL,                   /* symlink */
-       NULL,                   /* mkdir */
-       NULL,                   /* rmdir */
-       NULL,                   /* mknod */
-       NULL,                   /* rename */
-       autofs_readlink,        /* readlink */
-       autofs_follow_link,     /* follow_link */
-       NULL,                   /* get_block */
-       NULL,                   /* readpage */
-       NULL,                   /* writepage */
-       NULL,                   /* truncate */
-       NULL,                   /* permission */
-       NULL                    /* revalidate */
+       readlink:       autofs_readlink,
+       follow_link:    autofs_follow_link
 };
index bf3da4a4d8368898456eeedd01517b9385231c1d..6bf84ca27fb5d0d14446e5b5901106a32d79c0cd 100644 (file)
@@ -1378,11 +1378,10 @@ out:
        return err;
 }
 
-int block_write_range(struct dentry *dentry, struct page *page,
+int block_write_zero_range(struct inode *inode, struct page *page,
                unsigned zerofrom, unsigned from, unsigned to,
                const char * buf)
 {
-       struct inode *inode = dentry->d_inode;
        unsigned zeroto = 0, block_start, block_end;
        unsigned long block;
        int err = 0, partial = 0, need_balance_dirty = 0;
@@ -1504,7 +1503,7 @@ out:
 
 int block_write_partial_page(struct file *file, struct page *page, unsigned long offset, unsigned long bytes, const char * buf)
 {
-       struct dentry *dentry = file->f_dentry;
+       struct inode *inode = file->f_dentry->d_inode;
        int err;
 
        if (!PageLocked(page))
@@ -1514,7 +1513,7 @@ int block_write_partial_page(struct file *file, struct page *page, unsigned long
        if (bytes+offset < 0 || bytes+offset > PAGE_SIZE)
                BUG();
 
-       err = block_write_range(dentry, page, offset,offset,offset+bytes, buf);
+       err = block_write_range(inode, page, offset, bytes, buf);
        return err ? err : bytes;
 }
 
@@ -1525,8 +1524,7 @@ int block_write_partial_page(struct file *file, struct page *page, unsigned long
 
 int block_write_cont_page(struct file *file, struct page *page, unsigned long offset, unsigned long bytes, const char * buf)
 {
-       struct dentry *dentry = file->f_dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file->f_dentry->d_inode;
        int err;
        unsigned zerofrom = offset;
 
@@ -1535,7 +1533,8 @@ int block_write_cont_page(struct file *file, struct page *page, unsigned long of
        else if (page->index == (inode->i_size >> PAGE_CACHE_SHIFT) &&
                 offset > (inode->i_size & ~PAGE_CACHE_MASK))
                zerofrom = inode->i_size & ~PAGE_CACHE_MASK;
-       err = block_write_range(dentry, page, zerofrom,offset,offset+bytes,buf);
+       err = block_write_zero_range(inode, page, zerofrom,offset,offset+bytes,
+                                       buf);
        return err ? err : bytes;
 }
 
@@ -1829,9 +1828,8 @@ int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size)
  * mark_buffer_uptodate() functions propagate buffer state into the
  * page struct once IO has completed.
  */
-int block_read_full_page(struct dentry * dentry, struct page * page)
+static inline int __block_read_full_page(struct inode *inode, struct page *page)
 {
-       struct inode *inode = dentry->d_inode;
        unsigned long iblock;
        struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
        unsigned int blocksize, blocks;
@@ -1890,6 +1888,47 @@ int block_read_full_page(struct dentry * dentry, struct page * page)
        return 0;
 }
 
+int block_read_full_page(struct dentry *dentry, struct page *page)
+{
+       return __block_read_full_page(dentry->d_inode, page);
+}
+
+int block_symlink(struct inode *inode, const char *symname, int len)
+{
+       struct page *page = grab_cache_page(&inode->i_data, 0);
+       mm_segment_t fs;
+       int err = -ENOMEM;
+
+       if (!page)
+               goto fail;
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       err = block_write_range(inode, page, 0, len-1, symname);
+       set_fs(fs);
+       inode->i_size = len-1;
+       if (err)
+               goto fail_write;
+       /*
+        * Notice that we are _not_ going to block here - end of page is
+        * unmapped, so this will only try to map the rest of page, see
+        * that it is unmapped (typically even will not look into inode -
+        * ->i_size will be enough for everything) and zero it out.
+        * OTOH it's obviously correct and should make the page up-to-date.
+        */
+       err = __block_read_full_page(inode, page);
+       wait_on_page(page);
+       page_cache_release(page);
+       if (err < 0)
+               goto fail;
+       mark_inode_dirty(inode);
+       return 0;
+fail_write:
+       UnlockPage(page);
+       page_cache_release(page);
+fail:
+       return err;
+}
+
 /*
  * Try to increase the number of buffers available: the size argument
  * is used to determine what kind of buffers we want.
index ecdf4968d9f60af3291559c25fb77b9a736899f0..872370cf9fcd044d1eb81291527be11c6f0ff330 100644 (file)
@@ -14,9 +14,6 @@
 #include <linux/stat.h>
 #include <linux/errno.h>
 #include <linux/locks.h>
-#include <asm/segment.h>
-#include <asm/uaccess.h>
-#include <linux/string.h>
 
 #include <linux/coda.h>
 #include <linux/coda_linux.h>
 #include <linux/coda_cache.h>
 #include <linux/coda_proc.h>
 
-static int coda_readlink(struct dentry *de, char *buffer, int length);
-static struct dentry *coda_follow_link(struct dentry *, struct dentry *, 
-                                      unsigned int);
-
-struct inode_operations coda_symlink_inode_operations = {
-       NULL,                   /* no file-operations */
-       NULL,                   /* create */
-       NULL,                   /* lookup */
-       NULL,                   /* link */
-       NULL,                   /* unlink */
-       NULL,                   /* symlink */
-       NULL,                   /* mkdir */
-       NULL,                   /* rmdir */
-       NULL,                   /* mknod */
-       NULL,                   /* rename */
-       coda_readlink,          /* readlink */
-       coda_follow_link,       /* follow_link */
-       NULL,                   /* get_block */
-       NULL,                   /* readpage */
-       NULL,                   /* writepage */
-       NULL,                   /* truncate */
-       NULL,                   /* permission */
-        NULL                    /* revalidate */
-};
-
-static int coda_readlink(struct dentry *de, char *buffer, int length)
+static int coda_symlink_filler(struct dentry *dentry, struct page *page)
 {
-       struct inode *inode = de->d_inode;
-        int len;
-       int error;
-        char *buf;
-       struct coda_inode_info *cp;
-        ENTRY;
-
-        cp = ITOC(inode);
-       coda_vfs_stat.readlink++;
-
-        /* the maximum length we receive is len */
-        if ( length > CODA_MAXPATHLEN ) 
-               len = CODA_MAXPATHLEN;
-       else
-               len = length;
-       CODA_ALLOC(buf, char *, len);
-       if ( !buf ) 
-               return -ENOMEM;
-       
-       error = venus_readlink(inode->i_sb, &(cp->c_fid), buf, &len);
-
-        CDEBUG(D_INODE, "result %s\n", buf);
-       if (! error) {
-               copy_to_user(buffer, buf, len);
-               put_user('\0', buffer + len);
-               error = len;
-       }
-       if ( buf )
-               CODA_FREE(buf, len);
-       return error;
-}
-
-static struct dentry *coda_follow_link(struct dentry *de, struct dentry *base,
-                                      unsigned int follow)
-{
-       struct inode *inode = de->d_inode;
+       struct inode *inode = dentry->d_inode;
        int error;
        struct coda_inode_info *cnp;
-       unsigned int len;
-       char mem[CODA_MAXPATHLEN];
-       char *path;
-       ENTRY;
-       CDEBUG(D_INODE, "(%x/%ld)\n", inode->i_dev, inode->i_ino);
-       
+       unsigned int len = PAGE_SIZE;
+       char *p = (char*)kmap(page);
+
         cnp = ITOC(inode);
        coda_vfs_stat.follow_link++;
 
-       len = CODA_MAXPATHLEN;
-       error = venus_readlink(inode->i_sb, &(cnp->c_fid), mem, &len);
-
-       if (error) {
-               dput(base);
-               return ERR_PTR(error);
-       }
-       len = strlen(mem);
-       path = kmalloc(len + 1, GFP_KERNEL);
-       if (!path) {
-               dput(base);
-               return ERR_PTR(-ENOMEM);
-       }
-       memcpy(path, mem, len);
-       path[len] = 0;
-
-       base = lookup_dentry(path, base, follow);
-       kfree(path);
-       return base;
+       error = venus_readlink(inode->i_sb, &(cnp->c_fid), p, &len);
+       if (error)
+               goto fail;
+       SetPageUptodate(page);
+       kunmap(page);
+       UnlockPage(page);
+       return 0;
+
+fail:
+       SetPageError(page);
+       kunmap(page);
+       UnlockPage(page);
+       return error;
 }
+
+struct inode_operations coda_symlink_inode_operations = {
+       readlink:       page_readlink,
+       follow_link:    page_follow_link,
+       readpage:       coda_symlink_filler
+};
index 21a636a474891af0cd888a1d17273bd370efbb40..b4f952d07301284b7d80f8fb5de8af551efb0f2d 100644 (file)
@@ -713,7 +713,9 @@ void ext2_read_inode (struct inode * inode)
        else if (S_ISDIR(inode->i_mode))
                inode->i_op = &ext2_dir_inode_operations;
        else if (S_ISLNK(inode->i_mode))
-               inode->i_op = &ext2_symlink_inode_operations;
+               inode->i_op = inode->i_blocks
+                               ?&ext2_symlink_inode_operations
+                               :&ext2_fast_symlink_inode_operations;
        else 
                init_special_inode(inode, inode->i_mode,
                                   le32_to_cpu(raw_inode->i_block[0]));
index 81a4368484af26c004175fdad9011a68f494f682..87f39796a059e94583963be4662f931d4c5fe6ff 100644 (file)
@@ -677,48 +677,32 @@ end_unlink:
 
 int ext2_symlink (struct inode * dir, struct dentry *dentry, const char * symname)
 {
-       struct ext2_dir_entry_2 * de;
        struct inode * inode;
-       struct buffer_head * bh = NULL, * name_block = NULL;
-       char * link;
-       int i, l, err = -EIO;
-       char c;
+       struct ext2_dir_entry_2 * de;
+       struct buffer_head * bh = NULL;
+       int l, err;
 
-       if (!(inode = ext2_new_inode (dir, S_IFLNK, &err))) {
-               return err;
-       }
-       inode->i_mode = S_IFLNK | S_IRWXUGO;
-       inode->i_op = &ext2_symlink_inode_operations;
-       for (l = 0; l < inode->i_sb->s_blocksize - 1 &&
-            symname [l]; l++)
-               ;
-       if (l >= sizeof (inode->u.ext2_i.i_data)) {
-
-               ext2_debug ("l=%d, normal symlink\n", l);
-
-               name_block = ext2_bread (inode, 0, 1, &err);
-               if (!name_block) {
-                       inode->i_nlink--;
-                       mark_inode_dirty(inode);
-                       iput (inode);
-                       return err;
-               }
-               link = name_block->b_data;
-       } else {
-               link = (char *) inode->u.ext2_i.i_data;
+       err = -ENAMETOOLONG;
+       l = strlen(symname)+1;
+       if (l > dir->i_sb->s_blocksize)
+               goto out;
 
-               ext2_debug ("l=%d, fast symlink\n", l);
+       err = -EIO;
+       if (!(inode = ext2_new_inode (dir, S_IFLNK, &err)))
+               goto out;
 
+       inode->i_mode = S_IFLNK | S_IRWXUGO;
+
+       if (l > sizeof (inode->u.ext2_i.i_data)) {
+               inode->i_op = &ext2_symlink_inode_operations;
+               err = block_symlink(inode, symname, l);
+               if (err)
+                       goto out_no_entry;
+       } else {
+               inode->i_op = &ext2_fast_symlink_inode_operations;
+               memcpy((char*)&inode->u.ext2_i.i_data,symname,l);
+               inode->i_size = l-1;
        }
-       i = 0;
-       while (i < inode->i_sb->s_blocksize - 1 && (c = *(symname++)))
-               link[i++] = c;
-       link[i] = 0;
-       if (name_block) {
-               mark_buffer_dirty(name_block, 1);
-               brelse (name_block);
-       }
-       inode->i_size = i;
        mark_inode_dirty(inode);
 
        bh = ext2_add_entry (dir, dentry->d_name.name, dentry->d_name.len, &de, &err);
index c830d5baa3dfbea3f15703ca6399bd5e805b184d..18fbbb368b4eb2ddff70e821f2d65645d70899cd 100644 (file)
  */
 
 #include <linux/fs.h>
-#include <asm/uaccess.h>
+#include <linux/ext2_fs.h>
 
-
-
-static int ext2_readlink (struct dentry *, char *, int);
-static struct dentry *ext2_follow_link(struct dentry *, struct dentry *, unsigned int);
-
-/*
- * symlinks can't do much...
- */
-struct inode_operations ext2_symlink_inode_operations = {
-       NULL,                   /* no file-operations */
-       NULL,                   /* create */
-       NULL,                   /* lookup */
-       NULL,                   /* link */
-       NULL,                   /* unlink */
-       NULL,                   /* symlink */
-       NULL,                   /* mkdir */
-       NULL,                   /* rmdir */
-       NULL,                   /* mknod */
-       NULL,                   /* rename */
-       ext2_readlink,          /* readlink */
-       ext2_follow_link,       /* follow_link */
-       NULL,                   /* get_block */
-       NULL,                   /* readpage */
-       NULL,                   /* writepage */
-       NULL,                   /* truncate */
-       NULL,                   /* permission */
-       NULL                    /* revalidate */
-};
-
-static struct dentry * ext2_follow_link(struct dentry * dentry,
-                                       struct dentry *base,
-                                       unsigned int follow)
+static int ext2_readlink(struct dentry *dentry, char *buffer, int buflen)
 {
-       struct inode *inode = dentry->d_inode;
-       struct buffer_head * bh = NULL;
-       int error;
-       char * link;
-
-       link = (char *) inode->u.ext2_i.i_data;
-       if (inode->i_blocks) {
-               if (!(bh = ext2_bread (inode, 0, 0, &error))) {
-                       dput(base);
-                       return ERR_PTR(-EIO);
-               }
-               link = bh->b_data;
-       }
-       UPDATE_ATIME(inode);
-       base = lookup_dentry(link, base, follow);
-       if (bh)
-               brelse(bh);
-       return base;
+       char *s = (char *)dentry->d_inode->u.ext2_i.i_data;
+       return vfs_readlink(dentry, buffer, buflen, s);
 }
 
-static int ext2_readlink (struct dentry * dentry, char * buffer, int buflen)
+static struct dentry *ext2_follow_link(struct dentry *dentry, struct dentry *base, unsigned flags)
 {
-       struct inode *inode = dentry->d_inode;
-       struct buffer_head * bh = NULL;
-       char * link;
-       int i;
-
-       if (buflen > inode->i_sb->s_blocksize - 1)
-               buflen = inode->i_sb->s_blocksize - 1;
+       char *s = (char *)dentry->d_inode->u.ext2_i.i_data;
+       return vfs_follow_link(dentry, base, flags, s);
+}
 
-       link = (char *) inode->u.ext2_i.i_data;
-       if (inode->i_blocks) {
-               int err;
-               bh = ext2_bread (inode, 0, 0, &err);
-               if (!bh) {
-                       if(err < 0) /* indicate type of error */
-                               return err;
-                       return 0;
-               }
-               link = bh->b_data;
-       }
+struct inode_operations ext2_fast_symlink_inode_operations = {
+       readlink:       ext2_readlink,
+       follow_link:    ext2_follow_link,
+};
 
-       i = 0;
-       while (i < buflen && link[i])
-               i++;
-       if (copy_to_user(buffer, link, i))
-               i = -EFAULT;
-       if (bh)
-               brelse (bh);
-       return i;
-}
+struct inode_operations ext2_symlink_inode_operations = {
+       readlink:       page_readlink,
+       follow_link:    page_follow_link,
+       get_block:      ext2_get_block,
+       readpage:       block_read_full_page,
+};
index 255267da8f2f5e6946bcf8930bc7e153de01074d..fbde959d50038e8ca5a0a4583434b8a6c159fcd4 100644 (file)
@@ -215,6 +215,17 @@ asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
                case F_SETLKW:
                        err = fcntl_setlk(fd, cmd, (struct flock *) arg);
                        break;
+#if BIT_PER_LONG == 32 /* LFS only on 32 bit platforms */
+               case F_GETLK64:
+                       err = fcntl_getlk64(fd, (struct flock64 *) arg);
+                       break;
+               case F_SETLK64:
+                       err = fcntl_setlk64(fd, cmd, (struct flock64 *) arg);
+                       break;
+               case F_SETLKW64:
+                       err = fcntl_setlk64(fd, cmd, (struct flock64 *) arg);
+                       break;
+#endif
                case F_GETOWN:
                        /*
                         * XXX If f_owner is a process group, the
index 73caf63492f6a19271d4718caf952209dca5c274..ca7677cb7da7458005186dca096759aaa23f8979 100644 (file)
@@ -93,12 +93,12 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock, int remove)
        struct nlm_block        **head, *block;
        struct file_lock        *fl;
 
-       dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %ld-%ld ty=%d\n",
+       dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
                                file, lock->fl.fl_pid, lock->fl.fl_start,
                                lock->fl.fl_end, lock->fl.fl_type);
        for (head = &nlm_blocked; (block = *head); head = &block->b_next) {
                fl = &block->b_call.a_args.lock.fl;
-               dprintk("       check f=%p pd=%d %ld-%ld ty=%d\n",
+               dprintk("       check f=%p pd=%d %Ld-%Ld ty=%d\n",
                                block->b_file, fl->fl_pid, fl->fl_start,
                                fl->fl_end, fl->fl_type);
                if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
@@ -282,7 +282,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
        struct nlm_block        *block;
        int                     error;
 
-       dprintk("lockd: nlmsvc_lock(%04x/%ld, ty=%d, pi=%d, %ld-%ld, bl=%d)\n",
+       dprintk("lockd: nlmsvc_lock(%04x/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
                                file->f_file.f_dentry->d_inode->i_dev,
                                file->f_file.f_dentry->d_inode->i_ino,
                                lock->fl.fl_type, lock->fl.fl_pid,
@@ -355,7 +355,7 @@ nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock,
 {
        struct file_lock        *fl;
 
-       dprintk("lockd: nlmsvc_testlock(%04x/%ld, ty=%d, %ld-%ld)\n",
+       dprintk("lockd: nlmsvc_testlock(%04x/%ld, ty=%d, %Ld-%Ld)\n",
                                file->f_file.f_dentry->d_inode->i_dev,
                                file->f_file.f_dentry->d_inode->i_ino,
                                lock->fl.fl_type,
@@ -363,7 +363,7 @@ nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock,
                                lock->fl.fl_end);
 
        if ((fl = posix_test_lock(&file->f_file, &lock->fl)) != NULL) {
-               dprintk("lockd: conflicting lock(ty=%d, %ld-%ld)\n",
+               dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
                                fl->fl_type, fl->fl_start, fl->fl_end);
                conflock->caller = "somehost";  /* FIXME */
                conflock->oh.len = 0;           /* don't return OH info */
@@ -386,7 +386,7 @@ nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
 {
        int     error;
 
-       dprintk("lockd: nlmsvc_unlock(%04x/%ld, pi=%d, %ld-%ld)\n",
+       dprintk("lockd: nlmsvc_unlock(%04x/%ld, pi=%d, %Ld-%Ld)\n",
                                file->f_file.f_dentry->d_inode->i_dev,
                                file->f_file.f_dentry->d_inode->i_ino,
                                lock->fl.fl_pid,
@@ -414,7 +414,7 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
 {
        struct nlm_block        *block;
 
-       dprintk("lockd: nlmsvc_cancel(%04x/%ld, pi=%d, %ld-%ld)\n",
+       dprintk("lockd: nlmsvc_cancel(%04x/%ld, pi=%d, %Ld-%Ld)\n",
                                file->f_file.f_dentry->d_inode->i_dev,
                                file->f_file.f_dentry->d_inode->i_ino,
                                lock->fl.fl_pid,
index 85fb7c7290909289437fffe4e97b8824c135c073..e3ec35109ea9adc112193d47d901d23a988b51ef 100644 (file)
@@ -142,7 +142,7 @@ nlm_decode_lock(u32 *p, struct nlm_lock *lock)
        fl->fl_pid   = ntohl(*p++);
        fl->fl_flags = FL_POSIX;
        fl->fl_type  = F_RDLCK;         /* as good as anything else */
-       fl->fl_start = ntohl(*p++);
+       fl->fl_start = (u_long)ntohl(*p++); // Up to 4G-1
        len = ntohl(*p++);
        if (len == 0 || (fl->fl_end = fl->fl_start + len - 1) < 0)
                fl->fl_end = NLM_OFFSET_MAX;
@@ -163,11 +163,11 @@ nlm_encode_lock(u32 *p, struct nlm_lock *lock)
                return NULL;
 
        *p++ = htonl(fl->fl_pid);
-       *p++ = htonl(lock->fl.fl_start);
+       *p++ = htonl((u_long)lock->fl.fl_start);
        if (lock->fl.fl_end == NLM_OFFSET_MAX)
                *p++ = xdr_zero;
        else
-               *p++ = htonl(lock->fl.fl_end - lock->fl.fl_start + 1);
+               *p++ = htonl((u_long)(lock->fl.fl_end - lock->fl.fl_start + 1));
 
        return p;
 }
@@ -192,11 +192,11 @@ nlm_encode_testres(u32 *p, struct nlm_res *resp)
                if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
                        return 0;
 
-               *p++ = htonl(fl->fl_start);
+               *p++ = htonl((u_long)fl->fl_start);
                if (fl->fl_end == NLM_OFFSET_MAX)
                        *p++ = xdr_zero;
                else
-                       *p++ = htonl(fl->fl_end - fl->fl_start + 1);
+                       *p++ = htonl((u_long)(fl->fl_end - fl->fl_start + 1));
        }
 
        return p;
@@ -425,7 +425,7 @@ nlmclt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
 
                fl->fl_flags = FL_POSIX;
                fl->fl_type  = excl? F_WRLCK : F_RDLCK;
-               fl->fl_start = ntohl(*p++);
+               fl->fl_start = (u_long)ntohl(*p++);
                len = ntohl(*p++);
                if (len == 0 || (fl->fl_end = fl->fl_start + len - 1) < 0)
                        fl->fl_end = NLM_OFFSET_MAX;
index 802958a68b9feed041e33f5e3319cbb58cdc2190..15fc586b9f5155c3a9614ad531154e9cf9d4461a 100644 (file)
 
 #include <asm/uaccess.h>
 
-#define OFFSET_MAX     ((off_t)LONG_MAX)       /* FIXME: move elsewhere? */
+#define OFFSET_MAX     (~(loff_t)0ULL >> 1)    /* FIXME: move elsewhere? */
 
 static int flock_make_lock(struct file *filp, struct file_lock *fl,
                               unsigned int cmd);
 static int posix_make_lock(struct file *filp, struct file_lock *fl,
-                              struct flock *l);
+                              struct flock64 *l);
 static int flock_locks_conflict(struct file_lock *caller_fl,
                                struct file_lock *sys_fl);
 static int posix_locks_conflict(struct file_lock *caller_fl,
@@ -195,7 +195,7 @@ static void locks_insert_block(struct file_lock *blocker,
 
        if (waiter->fl_prevblock) {
                printk(KERN_ERR "locks_insert_block: remove duplicated lock "
-                       "(pid=%d %ld-%ld type=%d)\n",
+                       "(pid=%d %Ld-%Ld type=%d)\n",
                        waiter->fl_pid, waiter->fl_start,
                        waiter->fl_end, waiter->fl_type);
                locks_delete_block(waiter->fl_prevblock, waiter);
@@ -319,18 +319,14 @@ out:
 /* Report the first existing lock that would conflict with l.
  * This implements the F_GETLK command of fcntl().
  */
-int fcntl_getlk(unsigned int fd, struct flock *l)
+static int do_fcntl_getlk(unsigned int fd, struct flock64 *flock)
 {
        struct file *filp;
        struct file_lock *fl,file_lock;
-       struct flock flock;
        int error;
 
-       error = -EFAULT;
-       if (copy_from_user(&flock, l, sizeof(flock)))
-               goto out;
        error = -EINVAL;
-       if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
+       if ((flock->l_type != F_RDLCK) && (flock->l_type != F_WRLCK))
                goto out;
 
        error = -EBADF;
@@ -342,7 +338,7 @@ int fcntl_getlk(unsigned int fd, struct flock *l)
        if (!filp->f_dentry || !filp->f_dentry->d_inode)
                goto out_putf;
 
-       if (!posix_make_lock(filp, &file_lock, &flock))
+       if (!posix_make_lock(filp, &file_lock, flock))
                goto out_putf;
 
        if (filp->f_op->lock) {
@@ -358,18 +354,15 @@ int fcntl_getlk(unsigned int fd, struct flock *l)
                fl = posix_test_lock(filp, &file_lock);
        }
  
-       flock.l_type = F_UNLCK;
+       flock->l_type = F_UNLCK;
        if (fl != NULL) {
-               flock.l_pid = fl->fl_pid;
-               flock.l_start = fl->fl_start;
-               flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
+               flock->l_pid = fl->fl_pid;
+               flock->l_start = fl->fl_start;
+               flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
                        fl->fl_end - fl->fl_start + 1;
-               flock.l_whence = 0;
-               flock.l_type = fl->fl_type;
+               flock->l_whence = 0;
+               flock->l_type = fl->fl_type;
        }
-       error = -EFAULT;
-       if (!copy_to_user(l, &flock, sizeof(flock)))
-               error = 0;
   
 out_putf:
        fput(filp);
@@ -380,22 +373,14 @@ out:
 /* Apply the lock described by l to an open file descriptor.
  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
  */
-int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
+static int do_fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock64 *flock)
 {
        struct file *filp;
        struct file_lock file_lock;
-       struct flock flock;
        struct dentry * dentry;
        struct inode *inode;
        int error;
 
-       /*
-        * This might block, so we do it before checking the inode.
-        */
-       error = -EFAULT;
-       if (copy_from_user(&flock, l, sizeof(flock)))
-               goto out;
-
        /* Get arguments and validate them ...
         */
 
@@ -428,11 +413,11 @@ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
        }
 
        error = -EINVAL;
-       if (!posix_make_lock(filp, &file_lock, &flock))
+       if (!posix_make_lock(filp, &file_lock, flock))
                goto out_putf;
        
        error = -EBADF;
-       switch (flock.l_type) {
+       switch (flock->l_type) {
        case F_RDLCK:
                if (!(filp->f_mode & FMODE_READ))
                        goto out_putf;
@@ -478,6 +463,95 @@ out:
        return error;
 }
 
+int fcntl_getlk(unsigned int fd, struct flock *l)
+{
+       struct flock flock;
+       struct flock64 fl64;
+       int error;
+
+       error = -EFAULT;
+       if (copy_from_user(&flock, l, sizeof(flock)))
+               goto out;
+
+       /* Convert to 64-bit offsets for internal use */
+       fl64.l_type     = flock.l_type;
+       fl64.l_whence   = flock.l_whence;
+       fl64.l_start    = (unsigned long)flock.l_start;
+       fl64.l_len      = (unsigned long)flock.l_len;
+       fl64.l_pid      = flock.l_pid;
+
+       error = do_fcntl_getlk(fd, &fl64);
+       if (error)
+               goto out;
+
+       /* and back again... */
+       flock.l_type    = fl64.l_type;
+       flock.l_whence  = fl64.l_whence;
+       flock.l_start   = (unsigned long)fl64.l_start;
+       flock.l_len     = (unsigned long)fl64.l_len;
+       flock.l_pid     = fl64.l_pid;
+
+       if (copy_to_user(l, &flock, sizeof(flock)))
+               error = -EFAULT;
+out:
+       return error;
+}
+
+int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
+{
+       struct flock flock;
+       struct flock64 fl64;
+       int error;
+
+       error = -EFAULT;
+       if (copy_from_user(&flock, l, sizeof(flock)))
+               goto out;
+
+       /* Convert to 64-bit offsets for internal use */
+       fl64.l_type     = flock.l_type;
+       fl64.l_whence   = flock.l_whence;
+       fl64.l_start    = (unsigned long)flock.l_start;
+       fl64.l_len      = (unsigned long)flock.l_len;
+       fl64.l_pid      = flock.l_pid;
+
+       error = do_fcntl_setlk(fd, cmd, &fl64);
+out:
+       return error;
+}
+
+#if BITS_PER_LONG == 32        /* LFS versions for 32 bit platforms */
+int fcntl_getlk64(unsigned int fd, struct flock64 *l)
+{
+       struct flock64 fl64;
+       int error;
+
+       error = -EFAULT;
+       if (copy_from_user(&fl64, l, sizeof(fl64)))
+               goto out;
+
+       error = do_fcntl_getlk(fd, &fl64);
+
+       if (!error && copy_to_user(l, &fl64, sizeof(fl64)))
+               error = -EFAULT;
+out:
+       return error;
+}
+
+int fcntl_setlk64(unsigned int fd, unsigned int cmd, struct flock64 *l)
+{
+       struct flock64 fl64;
+       int error;
+
+       error = -EFAULT;
+       if (copy_from_user(&fl64, l, sizeof(fl64)))
+               goto out;
+
+       error = do_fcntl_setlk(fd, cmd, &fl64);
+out:
+       return error;
+}
+#endif
+
 /*
  * This function is called when the file is being removed
  * from the task's fd array.
@@ -647,7 +721,7 @@ repeat:
  * style lock.
  */
 static int posix_make_lock(struct file *filp, struct file_lock *fl,
-                          struct flock *l)
+                          struct flock64 *l)
 {
        off_t start;
 
@@ -1200,7 +1274,7 @@ static char *lock_get_status(struct file_lock *fl, int id, char *pfx)
                p += sprintf(p, "FLOCK  ADVISORY  ");
        }
        p += sprintf(p, "%s ", (fl->fl_type == F_RDLCK) ? "READ " : "WRITE");
-       p += sprintf(p, "%d %s:%ld %ld %ld ",
+       p += sprintf(p, "%d %s:%ld %Ld %Ld ",
                     fl->fl_pid,
                     kdevname(inode->i_dev), inode->i_ino, fl->fl_start,
                     fl->fl_end);
index b268a467650df2524cee75912426c0bd628bd441..a3e73dbcdfb55f6001b812a806af831d8ae0c8ef 100644 (file)
@@ -461,46 +461,43 @@ int minix_symlink(struct inode * dir, struct dentry *dentry,
 {
        struct minix_dir_entry * de;
        struct inode * inode = NULL;
-       struct buffer_head * bh = NULL, * name_block = NULL;
+       struct buffer_head * bh = NULL;
        int i;
-       char c;
-
-       inode = minix_new_inode(dir, &i);
-       if (i)
-               return i;
+       int err;
+
+       err = -ENAMETOOLONG;
+       i = strlen(symname)+1;
+       if (i>1024)
+               goto out;
+       inode = minix_new_inode(dir, &err);
+       if (err)
+               goto out;
+       err = -ENOSPC;
        if (!inode)
-               return -ENOSPC;
+               goto out;
 
        inode->i_mode = S_IFLNK | 0777;
        inode->i_op = &minix_symlink_inode_operations;
-       name_block = minix_bread(inode,0,1);
-       if (!name_block) {
-               inode->i_nlink--;
-               mark_inode_dirty(inode);
-               iput(inode);
-               return -ENOSPC;
-       }
-       i = 0;
-       while (i < 1023 && (c=*(symname++)))
-               name_block->b_data[i++] = c;
-       name_block->b_data[i] = 0;
-       mark_buffer_dirty(name_block, 1);
-       brelse(name_block);
-       inode->i_size = i;
-       mark_inode_dirty(inode);
-       i = minix_add_entry(dir, dentry->d_name.name,
+       err = block_symlink(inode, symname, i);
+       if (err)
+               goto fail;
+
+       err = minix_add_entry(dir, dentry->d_name.name,
                            dentry->d_name.len, &bh, &de);
-       if (i) {
-               inode->i_nlink--;
-               mark_inode_dirty(inode);
-               iput(inode);
-               return i;
-       }
+       if (err)
+               goto fail;
+
        de->inode = inode->i_ino;
        mark_buffer_dirty(bh, 1);
        brelse(bh);
        d_instantiate(dentry, inode);
-       return 0;
+out:
+       return err;
+fail:
+       inode->i_nlink--;
+       mark_inode_dirty(inode);
+       iput(inode);
+       goto out;
 }
 
 int minix_link(struct dentry * old_dentry, struct inode * dir,
index 2fa5a34a2aaa416d576df0b0043fae4cd1a61ac9..7e1d03a10c235e8494e53dd95e03b28de615c5e9 100644 (file)
@@ -4,77 +4,19 @@
  *  Copyright (C) 1991, 1992  Linus Torvalds
  *
  *  minix symlink handling code
+ *
+ *  Code removed. 1999, AV ;-)
  */
 
-#include <linux/errno.h>
-#include <linux/sched.h>
 #include <linux/fs.h>
 #include <linux/minix_fs.h>
-#include <linux/stat.h>
-
-#include <asm/uaccess.h>
-
-static int minix_readlink(struct dentry *, char *, int);
-static struct dentry *minix_follow_link(struct dentry *, struct dentry *, unsigned int);
 
 /*
  * symlinks can't do much...
  */
 struct inode_operations minix_symlink_inode_operations = {
-       NULL,                   /* no file-operations */
-       NULL,                   /* create */
-       NULL,                   /* lookup */
-       NULL,                   /* link */
-       NULL,                   /* unlink */
-       NULL,                   /* symlink */
-       NULL,                   /* mkdir */
-       NULL,                   /* rmdir */
-       NULL,                   /* mknod */
-       NULL,                   /* rename */
-       minix_readlink,         /* readlink */
-       minix_follow_link,      /* follow_link */
-       NULL,                   /* get_block */
-       NULL,                   /* readpage */
-       NULL,                   /* writepage */
-       NULL,                   /* truncate */
-       NULL,                   /* permission */
-       NULL                    /* revalidate */
+       readlink:       page_readlink,
+       follow_link:    page_follow_link,
+       get_block:      minix_get_block,
+       readpage:       block_read_full_page
 };
-
-static struct dentry * minix_follow_link(struct dentry * dentry,
-                                       struct dentry * base,
-                                       unsigned int follow)
-{
-       struct inode *inode = dentry->d_inode;
-       struct buffer_head * bh;
-
-       bh = minix_bread(inode, 0, 0);
-       if (!bh) {
-               dput(base);
-               return ERR_PTR(-EIO);
-       }
-       UPDATE_ATIME(inode);
-       base = lookup_dentry(bh->b_data, base, follow);
-       brelse(bh);
-       return base;
-}
-
-static int minix_readlink(struct dentry * dentry, char * buffer, int buflen)
-{
-       struct buffer_head * bh;
-       int i;
-       char c;
-
-       if (buflen > 1023)
-               buflen = 1023;
-       bh = minix_bread(dentry->d_inode, 0, 0);
-       if (!bh)
-               return 0;
-       i = 0;
-       while (i<buflen && (c = bh->b_data[i])) {
-               i++;
-               put_user(c,buffer++);
-       }
-       brelse(bh);
-       return i;
-}
index eba55a751f745519f2fc218ab332eb1cf2a7eaa8..1aac7fb0cebd56512b4ef7fccda6b33a1184170e 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/proc_fs.h>
 #include <linux/smp_lock.h>
 #include <linux/quotaops.h>
+#include <linux/pagemap.h>
 
 #include <asm/uaccess.h>
 #include <asm/unaligned.h>
@@ -1419,3 +1420,85 @@ asmlinkage long sys_rename(const char * oldname, const char * newname)
        unlock_kernel();
        return error;
 }
+
+int vfs_readlink(struct dentry *dentry, char *buffer, int buflen, char *link)
+{
+       u32 len;
+
+       len = PTR_ERR(link);
+       if (IS_ERR(link))
+               goto out;
+
+       len = strlen(link);
+       if (len > buflen)
+               len = buflen;
+       copy_to_user(buffer, link, len);
+out:
+       return len;
+}
+
+struct dentry *
+vfs_follow_link(struct dentry *dentry, struct dentry *base,
+unsigned int follow, char *link)
+{
+       struct dentry *result;
+       UPDATE_ATIME(dentry->d_inode);
+
+       if (IS_ERR(link))
+               goto fail;
+
+       result = lookup_dentry(link, base, follow);
+       return result;
+
+fail:
+       dput(base);
+       return (struct dentry *)link;
+}
+
+/* get the link contents into pagecache */
+static char *page_getlink(struct dentry * dentry, struct page **ppage)
+{
+       struct page * page;
+       page = read_cache_page(&dentry->d_inode->i_data, 0,
+                               (filler_t *)dentry->d_inode->i_op->readpage,
+                               dentry);
+       if (IS_ERR(page))
+               goto sync_fail;
+       wait_on_page(page);
+       if (!Page_Uptodate(page))
+               goto async_fail;
+       *ppage = page;
+       return (char*) kmap(page);
+
+async_fail:
+       page_cache_release(page);
+       return ERR_PTR(-EIO);
+
+sync_fail:
+       return (char*)page;
+}
+
+int page_readlink(struct dentry *dentry, char *buffer, int buflen)
+{
+       struct page *page = NULL;
+       char *s = page_getlink(dentry, &page);
+       int res = vfs_readlink(dentry,buffer,buflen,s);
+       if (page) {
+               kunmap(page);
+               page_cache_release(page);
+       }
+       return res;
+}
+
+struct dentry *
+page_follow_link(struct dentry *dentry, struct dentry *base, unsigned int follow)
+{
+       struct page *page = NULL;
+       char *s = page_getlink(dentry, &page);
+       struct dentry *res = vfs_follow_link(dentry,base,follow,s);
+       if (page) {
+               kunmap(page);
+               page_cache_release(page);
+       }
+       return res;
+}
index 7bb207067b1349715736aef2f43fc703079b35a0..788307cc261b980d12a599c65707e12695789d9a 100644 (file)
@@ -418,7 +418,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
        struct dentry *dentry = filp->f_dentry;
        struct inode *inode = dentry->d_inode;
-       struct page *page, **hash;
+       struct page *page;
        long offset;
        int res;
 
@@ -432,8 +432,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        if ((offset = nfs_readdir_offset(inode, filp->f_pos)) < 0)
                goto no_dirent_page;
 
-       hash = page_hash(&inode->i_data, offset);
-       page = __find_get_page(&inode->i_data, offset, hash);
+       page = find_get_page(&inode->i_data, offset);
        if (!page)
                goto no_dirent_page;
        if (!Page_Uptodate(page))
index 00279fc6a4b9e1bdd1bebd456125205d20906a59..7d2901a565489d71511359e01e7ed356d80be90f 100644 (file)
@@ -226,7 +226,7 @@ nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
        dprintk("NFS: nfs_lock(f=%4x/%ld, t=%x, fl=%x, r=%ld:%ld)\n",
                        inode->i_dev, inode->i_ino,
                        fl->fl_type, fl->fl_flags,
-                       fl->fl_start, fl->fl_end);
+                       (unsigned long) fl->fl_start, (unsigned long) fl->fl_end);
 
        if (!inode)
                return -EINVAL;
index 1dd02bb78a00e944bc1a8d86eb44efe1b46889d5..ab0ebf1454aa9e38f505852e065f545672793217 100644 (file)
 #include <linux/malloc.h>
 #include <linux/string.h>
 
-#include <asm/uaccess.h>
-
-static int nfs_readlink(struct dentry *, char *, int);
-static struct dentry *nfs_follow_link(struct dentry *, struct dentry *, unsigned int);
-
-/*
- * symlinks can't do much...
- */
-struct inode_operations nfs_symlink_inode_operations = {
-       NULL,                   /* no file-operations */
-       NULL,                   /* create */
-       NULL,                   /* lookup */
-       NULL,                   /* link */
-       NULL,                   /* unlink */
-       NULL,                   /* symlink */
-       NULL,                   /* mkdir */
-       NULL,                   /* rmdir */
-       NULL,                   /* mknod */
-       NULL,                   /* rename */
-       nfs_readlink,           /* readlink */
-       nfs_follow_link,        /* follow_link */
-       NULL,                   /* get_block */
-       NULL,                   /* readpage */
-       NULL,                   /* writepage */
-       NULL,                   /* truncate */
-       NULL,                   /* permission */
-       NULL                    /* revalidate */
-};
-
 /* Symlink caching in the page cache is even more simplistic
  * and straight-forward than readdir caching.
  */
@@ -91,12 +62,12 @@ static char *nfs_getlink(struct dentry *dentry, struct page **ppage)
        if (IS_ERR(page))
                goto read_failed;
        if (!Page_Uptodate(page))
-               goto followlink_read_error;
+               goto getlink_read_error;
        *ppage = page;
        p = (u32 *) kmap(page);
        return (char*)(p+1);
                
-followlink_read_error:
+getlink_read_error:
        page_cache_release(page);
        return ERR_PTR(-EIO);
 read_failed:
@@ -106,41 +77,31 @@ read_failed:
 static int nfs_readlink(struct dentry *dentry, char *buffer, int buflen)
 {
        struct page *page = NULL;
-       u32 len;
-       char *s = nfs_getlink(dentry, &page);
-       UPDATE_ATIME(dentry->d_inode);
-
-       len = PTR_ERR(s);
-       if (IS_ERR(s))
-               goto out;
-
-       len = strlen(s);
-       if (len > buflen)
-               len = buflen;
-       copy_to_user(buffer, s, len);
-       kunmap(page);
-       page_cache_release(page);
-out:
-       return len;
+       int res = vfs_readlink(dentry,buffer,buflen,nfs_getlink(dentry,&page));
+       if (page) {
+               kunmap(page);
+               page_cache_release(page);
+       }
+       return res;
 }
 
 static struct dentry *
 nfs_follow_link(struct dentry *dentry, struct dentry *base, unsigned int follow)
 {
-       struct dentry *result;
        struct page *page = NULL;
-       char *s = nfs_getlink(dentry, &page);
-       UPDATE_ATIME(dentry->d_inode);
-
-       if (IS_ERR(s))
-               goto fail;
-
-       result = lookup_dentry(s, base, follow);
-
-       kunmap(page);
-       page_cache_release(page);
-       return result;
-
-fail:
-       return (struct dentry *)s;
+       struct dentry *res = vfs_follow_link(dentry, base, follow,
+                                            nfs_getlink(dentry, &page));
+       if (page) {
+               kunmap(page);
+               page_cache_release(page);
+       }
+       return res;
 }
+
+/*
+ * symlinks can't do much...
+ */
+struct inode_operations nfs_symlink_inode_operations = {
+       readlink:       nfs_readlink,
+       follow_link:    nfs_follow_link,
+};
index d5b1537db0f204b51f5c07038c2664492c5eb393..14b55c4501ba76eeeb393da8f320461f9377ad09 100644 (file)
@@ -105,20 +105,6 @@ out:
        return exp;
 }
 
-/*
- * Check whether there are any exports for a device.
- */
-static int
-exp_device_in_use(kdev_t dev)
-{
-       struct svc_client *clp;
-
-       for (clp = clients; clp; clp = clp->cl_next) {
-               if (exp_find(clp, dev))
-                       return 1;
-       }
-       return 0;
-}
 
 /*
  * Look up the device of the parent fs.
@@ -286,6 +272,12 @@ exp_export(struct nfsctl_export *nxp)
                goto finish;
 
        err = -EINVAL;
+       if (!(inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV) ||
+           inode->i_sb->s_op->read_inode == NULL) {
+               dprintk("exp_export: export of invalid fs type.\n");
+               goto finish;
+       }
+
        if ((parent = exp_child(clp, dev, dentry)) != NULL) {
                dprintk("exp_export: export not valid (Rule 3).\n");
                goto finish;
index 9daf3a8a09ddef6cf60ddd4314c04ad46f11ff7e..3b0de5545286b20dc77359005adaad18cd53fd86 100644 (file)
@@ -37,8 +37,8 @@ static int    nfs3_ftypes[] = {
        S_IFBLK,                /* NF3BLK */
        S_IFCHR,                /* NF3CHR */
        S_IFLNK,                /* NF3LNK */
-       S_IFIFO,                /* NF3FIFO */
        S_IFSOCK,               /* NF3SOCK */
+       S_IFIFO,                /* NF3FIFO */
 };
 
 /*
@@ -344,7 +344,7 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp, struct nfsd3_mknodargs *argp,
                        return nfserr_inval;
                rdev = ((argp->major) << 8) | (argp->minor);
        } else
-               if (argp->ftype != NF3SOCK || argp->ftype != NF3FIFO)
+               if (argp->ftype != NF3SOCK && argp->ftype != NF3FIFO)
                        return nfserr_inval;
 
        type = nfs3_ftypes[argp->ftype];
index d952ffac90a28e2138c6db1aaf8fa33b0c06a618..aaea951e71b94f2ab1dd2d1a4b5e7be27f92fc05 100644 (file)
@@ -127,26 +127,6 @@ fh_lock_parent(struct svc_fh *parent_fh, struct dentry *dchild)
        return nfserr_noent;
 }
 
-/*
- * Deny access to certain file systems
- */
-static inline int
-fs_off_limits(struct super_block *sb)
-{
-       return !sb || sb->s_magic == NFS_SUPER_MAGIC
-                  || sb->s_magic == PROC_SUPER_MAGIC;
-}
-
-/*
- * Check whether directory is a mount point, but it is all right if
- * this is precisely the local mount point being exported.
- */
-static inline int
-nfsd_iscovered(struct dentry *dentry, struct svc_export *exp)
-{
-       return (dentry != dentry->d_covers &&
-               dentry != exp->ex_dentry);
-}
 
 /*
  * Look up one component of a pathname.
@@ -183,12 +163,7 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
        if (err)
                goto out;
 #endif
-       err = nfserr_noent;
-       if (fs_off_limits(dparent->d_sb))
-               goto out;
        err = nfserr_acces;
-       if (nfsd_iscovered(dparent, exp))
-               goto out;
 
        /* Lookup the name, but don't follow links */
        dchild = lookup_dentry(name, dget(dparent), 0);
@@ -1203,8 +1178,6 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
        dentry = fhp->fh_dentry;
 
        err = nfserr_perm;
-       if (nfsd_iscovered(dentry, fhp->fh_export))
-               goto out;
        dirp = dentry->d_inode;
        if (!dirp->i_op || !dirp->i_op->symlink)
                goto out;
@@ -1297,10 +1270,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
        dold = tfhp->fh_dentry;
        dest = dold->d_inode;
 
-       err = nfserr_acces;
-       if (nfsd_iscovered(ddir, ffhp->fh_export))
-               goto out_unlock;
-       /* FIXME: nxdev for NFSv3 */
+       err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
        if (dirp->i_dev != dest->i_dev)
                goto out_unlock;
 
@@ -1380,6 +1350,10 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
        tdentry = tfhp->fh_dentry;
        tdir = tdentry->d_inode;
 
+       err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+       if (fdir->i_dev != tdir->i_dev)
+               goto out;
+
        /* N.B. We shouldn't need this ... dentry layer handles it */
        err = nfserr_perm;
        if (!flen || (fname[0] == '.' && 
@@ -1691,17 +1665,10 @@ nfsd_permission(struct svc_export *exp, struct dentry *dentry, int acc)
        dprintk("      owner %d/%d user %d/%d\n",
                inode->i_uid, inode->i_gid, current->fsuid, current->fsgid);
 #endif
-#ifndef CONFIG_NFSD_SUN
-        if (dentry->d_mounts != dentry) {
-               return nfserr_perm;
-       }
-#endif
 
        if (acc & (MAY_WRITE | MAY_SATTR | MAY_TRUNC)) {
                if (EX_RDONLY(exp) || IS_RDONLY(inode))
                        return nfserr_rofs;
-               if (S_ISDIR(inode->i_mode) && nfsd_iscovered(dentry, exp))
-                       return nfserr_perm;
                if (/* (acc & MAY_WRITE) && */ IS_IMMUTABLE(inode))
                        return nfserr_perm;
        }
index 95dfd7f24fb8716bc00b23e71e498dd839a3d8ae..b087bd7f69f92811721c439da46467ca70b94b74 100644 (file)
@@ -812,6 +812,7 @@ static int ntfs_statfs(struct super_block *sb, struct statfs *sf, int bufsize)
        struct statfs fs;
        struct inode *mft;
        ntfs_volume *vol;
+       ntfs_u64 size;
        int error;
 
        ntfs_debug(DEBUG_OTHER, "ntfs_statfs\n");
@@ -820,16 +821,21 @@ static int ntfs_statfs(struct super_block *sb, struct statfs *sf, int bufsize)
        fs.f_type=NTFS_SUPER_MAGIC;
        fs.f_bsize=vol->clustersize;
 
-       error = ntfs_get_volumesize( NTFS_SB2VOL( sb ), &fs.f_blocks );
+       error = ntfs_get_volumesize( NTFS_SB2VOL( sb ), &size );
        if( error )
                return -error;
+       fs.f_blocks = size;     /* volumesize is in clusters */
        fs.f_bfree=ntfs_get_free_cluster_count(vol->bitmap);
        fs.f_bavail=fs.f_bfree;
 
        /* Number of files is limited by free space only, so we lie here */
        fs.f_ffree=0;
        mft=iget(sb,FILE_MFT);
-       fs.f_files=mft->i_size >> vol->mft_recordbits;
+       if (!mft)
+               return -EIO;
+       /* So ... we lie... thus this following cast of loff_t value
+          is ok here.. */
+       fs.f_files = (unsigned long)mft->i_size / vol->mft_recordsize;
        iput(mft);
 
        /* should be read from volume */
index c4d104cbffc29f662d9e0648c0e2853f5e3c37d7..d7dcb127fa2a33f08c11b485d4ec1fbded8b82f3 100644 (file)
@@ -91,13 +91,12 @@ int ntfs_init_volume(ntfs_volume *vol,char *boot)
        if(vol->mft_clusters_per_record<0 && vol->mft_clusters_per_record!=-10)
                ntfs_error("Unexpected data #4 in boot block\n");
 
-       vol->clustersize = vol->blocksize * vol->clusterfactor;
-       if (vol->mft_clusters_per_record > 0)
-               vol->mft_recordbits = vol->clustersize * vol->mft_clusters_per_record;
+       vol->clustersize=vol->blocksize*vol->clusterfactor;
+       if(vol->mft_clusters_per_record>0)
+               vol->mft_recordsize=
+                       vol->clustersize*vol->mft_clusters_per_record;
        else
-               vol->mft_recordbits = -vol->mft_clusters_per_record;
-
-       vol->mft_recordsize = 1 << vol->mft_recordbits;
+               vol->mft_recordsize=1<<(-vol->mft_clusters_per_record);
        vol->index_recordsize=vol->clustersize*vol->index_clusters_per_record;
        /* FIXME: long long value */
        vol->mft_cluster=NTFS_GETU64(boot+0x30);
@@ -254,10 +253,9 @@ int ntfs_release_volume(ntfs_volume *vol)
  * Writes the volume size into vol_size. Returns 0 if successful
  * or error.
  */
-int ntfs_get_volumesize(ntfs_volume *vol, long *vol_size )
+int ntfs_get_volumesize(ntfs_volume *vol, ntfs_u64 *vol_size )
 {
        ntfs_io io;
-       ntfs_u64 size;
        char *cluster0;
 
        if( !vol_size )
@@ -273,11 +271,8 @@ int ntfs_get_volumesize(ntfs_volume *vol, long *vol_size )
        io.do_read=1;
        io.size=vol->clustersize;
        ntfs_getput_clusters(vol,0,0,&io);
-       size=NTFS_GETU64(cluster0+0x28);
+       *vol_size = NTFS_GETU64(cluster0+0x28);
        ntfs_free(cluster0);
-       /* FIXME: more than 2**32 cluster */
-       /* FIXME: gcc will emit udivdi3 if we don't truncate it */
-       *vol_size = ((unsigned long)size)/vol->clusterfactor;
        return 0;
 }
 
index b747721437e905585d0ce1caa85792f859fabcd9..3afbf7e476c92754e8972c54a14439921bcc3561 100644 (file)
@@ -10,7 +10,7 @@
 #define ALLOC_REQUIRE_SIZE     2
 
 int ntfs_get_free_cluster_count(ntfs_inode *bitmap);
-int ntfs_get_volumesize(ntfs_volume *vol, long *vol_size );
+int ntfs_get_volumesize(ntfs_volume *vol, ntfs_u64 *vol_size );
 int ntfs_init_volume(ntfs_volume *vol,char *boot);
 int ntfs_load_special_files(ntfs_volume *vol);
 int ntfs_release_volume(ntfs_volume *vol);
index 25594b93b0a4aacd15c31eae144abe30fdae171e..ac9d133cfca61e81aa1dcedeadf518e3cf2d9045 100644 (file)
@@ -18,7 +18,8 @@ else
     define_bool CONFIG_MSDOS_PARTITION y
   fi
 fi
-if [ "$CONFIG_MSDOS_PARTITION" = "y" ]; then
+if [ "$CONFIG_PARTITION_ADVANCED" = "y" -a \
+     "$CONFIG_MSDOS_PARTITION" = "y" ]; then
   bool '  BSD disklabel (FreeBSD partition tables) support' CONFIG_BSD_DISKLABEL
   bool '  Solaris (x86) partition table support' CONFIG_SOLARIS_X86_PARTITION
   bool '  Unixware slices support' CONFIG_UNIXWARE_DISKLABEL
index 39009ec4d946b24f9dfcb82376e7aeb9c54ce990..5f43c691e885be88acb97b4348d8e73d8dcf233d 100644 (file)
@@ -199,40 +199,21 @@ static int make_inode_number(void)
        return PROC_DYNAMIC_FIRST + i;
 }
 
-static int proc_readlink(struct dentry * dentry, char * buffer, int buflen)
+static int proc_readlink(struct dentry *dentry, char *buffer, int buflen)
 {
-       struct inode *inode = dentry->d_inode;
-       struct proc_dir_entry * de;
-       int len;
-       de = (struct proc_dir_entry *) inode->u.generic_ip;
-       len = de->size+1;
-       if (len > buflen)
-               len = buflen;
-       copy_to_user(buffer, de->data, len);
-       return len;
+       char *s=((struct proc_dir_entry *)dentry->d_inode->u.generic_ip)->data;
+       return vfs_readlink(dentry, buffer, buflen, s);
 }
 
-struct dentry * proc_follow_link(struct dentry * dentry, struct dentry *base, unsigned int follow)
+static struct dentry *proc_follow_link(struct dentry *dentry, struct dentry *base, unsigned flags)
 {
-       struct inode *inode = dentry->d_inode;
-       struct proc_dir_entry * de;
-       de = (struct proc_dir_entry *) inode->u.generic_ip;
-       return lookup_dentry(de->data, base, follow);
+       char *s=((struct proc_dir_entry *)dentry->d_inode->u.generic_ip)->data;
+       return vfs_follow_link(dentry, base, flags, s);
 }
 
 static struct inode_operations proc_link_inode_operations = {
-       NULL,                   /* no file-ops */
-       NULL,                   /* create */
-       NULL,                   /* lookup */
-       NULL,                   /* link */
-       NULL,                   /* unlink */
-       NULL,                   /* symlink */
-       NULL,                   /* mkdir */
-       NULL,                   /* rmdir */
-       NULL,                   /* mknod */
-       NULL,                   /* rename */
-       proc_readlink,          /* readlink */
-       proc_follow_link,       /* follow_link */
+       readlink:       proc_readlink,
+       follow_link:    proc_follow_link
 };
 
 /*
index 15032d2cdd1f999a98cbad18a4f16ea02d7c7f39..83a0d76195a239ed92d59d61485d1875d07b0c32 100644 (file)
@@ -27,14 +27,9 @@ struct proc_dir_entry *proc_sys_root;
  */
 static int proc_self_readlink(struct dentry *dentry, char *buffer, int buflen)
 {
-       int len;
        char tmp[30];
-
-       len = sprintf(tmp, "%d", current->pid);
-       if (buflen < len)
-               len = buflen;
-       copy_to_user(buffer, tmp, len);
-       return len;
+       sprintf(tmp, "%d", current->pid);
+       return vfs_readlink(dentry,buffer,buflen,tmp);
 }
 
 static struct dentry * proc_self_follow_link(struct dentry *dentry,
@@ -42,24 +37,13 @@ static struct dentry * proc_self_follow_link(struct dentry *dentry,
                                                unsigned int follow)
 {
        char tmp[30];
-
        sprintf(tmp, "%d", current->pid);
-       return lookup_dentry(tmp, base, follow);
+       return vfs_follow_link(dentry,base,follow,tmp);
 }      
 
 static struct inode_operations proc_self_inode_operations = {
-       NULL,                   /* no file-ops */
-       NULL,                   /* create */
-       NULL,                   /* lookup */
-       NULL,                   /* link */
-       NULL,                   /* unlink */
-       NULL,                   /* symlink */
-       NULL,                   /* mkdir */
-       NULL,                   /* rmdir */
-       NULL,                   /* mknod */
-       NULL,                   /* rename */
-       proc_self_readlink,     /* readlink */
-       proc_self_follow_link,  /* follow_link */
+       readlink:       proc_self_readlink,
+       follow_link:    proc_self_follow_link
 };
 
 static struct proc_dir_entry proc_root_self = {
index 6f22d104704a9737937701c165a97e2b696bf3b8..287f4f60b45f57393e076a504047052b62e42cb9 100644 (file)
@@ -444,63 +444,6 @@ romfs_readpage(struct dentry * dentry, struct page * page)
        return result;
 }
 
-static int
-romfs_readlink(struct dentry *dentry, char *buffer, int len)
-{
-       struct inode *inode = dentry->d_inode;
-       int mylen;
-       char buf[ROMFS_MAXFN];          /* XXX dynamic */
-
-       if (!inode || !S_ISLNK(inode->i_mode)) {
-               mylen = -EBADF;
-               goto out;
-       }
-
-       mylen = min(sizeof(buf), inode->i_size);
-
-       if (romfs_copyfrom(inode, buf, inode->u.romfs_i.i_dataoffset, mylen) <= 0) {
-               mylen = -EIO;
-               goto out;
-       }
-       copy_to_user(buffer, buf, mylen);
-
-out:
-       return mylen;
-}
-
-static struct dentry *romfs_follow_link(struct dentry *dentry,
-                                       struct dentry *base,
-                                       unsigned int follow)
-{
-       struct inode *inode = dentry->d_inode;
-       char *link;
-       int len, cnt;
-
-       len = inode->i_size;
-
-       dentry = ERR_PTR(-EAGAIN);                      /* correct? */
-       if (!(link = kmalloc(len+1, GFP_KERNEL)))
-               goto outnobuf;
-
-       cnt = romfs_copyfrom(inode, link, inode->u.romfs_i.i_dataoffset, len);
-       if (len != cnt) {
-               dentry = ERR_PTR(-EIO);
-               goto out;
-       } else
-               link[len] = 0;
-
-       dentry = lookup_dentry(link, base, follow);
-       kfree(link);
-
-       if (0) {
-out:
-               kfree(link);
-outnobuf:
-               dput(base);
-       }
-       return dentry;
-}
-
 /* Mapping from our types to the kernel */
 
 static struct file_operations romfs_file_operations = {
@@ -584,24 +527,9 @@ static struct inode_operations romfs_dir_inode_operations = {
 };
 
 static struct inode_operations romfs_link_inode_operations = {
-       NULL,                   /* no file operations on symlinks */
-       NULL,                   /* create */
-       NULL,                   /* lookup */
-       NULL,                   /* link */
-       NULL,                   /* unlink */
-       NULL,                   /* symlink */
-       NULL,                   /* mkdir */
-       NULL,                   /* rmdir */
-       NULL,                   /* mknod */
-       NULL,                   /* rename */
-       romfs_readlink,         /* readlink */
-       romfs_follow_link,      /* follow_link */
-       NULL,                   /* get_block */
-       NULL,                   /* readpage */
-       NULL,                   /* writepage */
-       NULL,                   /* truncate */
-       NULL,                   /* permission */
-       NULL                    /* revalidate */
+       readlink:       page_readlink,
+       follow_link:    page_follow_link,
+       readpage:       romfs_readpage
 };
 
 static mode_t romfs_modemap[] =
index eb66df0d05481e42d3b634054d9037085ebee4c0..9f901236be09e09e1351c46f4da9fb688487ad57 100644 (file)
@@ -58,10 +58,9 @@ int nr_super_blocks = 0;
 int max_super_blocks = NR_SUPER;
 LIST_HEAD(super_blocks);
 
-static struct file_system_type *file_systems = (struct file_system_type *) NULL;
-struct vfsmount *vfsmntlist = (struct vfsmount *) NULL;
-static struct vfsmount *vfsmnttail = (struct vfsmount *) NULL,
-                       *mru_vfsmnt = (struct vfsmount *) NULL;
+static struct file_system_type *file_systems = NULL;
+struct vfsmount *vfsmntlist = NULL;
+static struct vfsmount *vfsmnttail = NULL, *mru_vfsmnt = NULL;
 
 /* 
  * This part handles the management of the list of mounted filesystems.
@@ -70,23 +69,19 @@ struct vfsmount *lookup_vfsmnt(kdev_t dev)
 {
        struct vfsmount *lptr;
 
-       if (vfsmntlist == (struct vfsmount *)NULL)
-               return ((struct vfsmount *)NULL);
+       if (vfsmntlist == NULL)
+               return NULL;
 
-       if (mru_vfsmnt != (struct vfsmount *)NULL &&
-           mru_vfsmnt->mnt_dev == dev)
+       if (mru_vfsmnt != NULL && mru_vfsmnt->mnt_dev == dev)
                return (mru_vfsmnt);
 
-       for (lptr = vfsmntlist;
-            lptr != (struct vfsmount *)NULL;
-            lptr = lptr->mnt_next)
+       for (lptr = vfsmntlist; lptr != NULL; lptr = lptr->mnt_next)
                if (lptr->mnt_dev == dev) {
                        mru_vfsmnt = lptr;
                        return (lptr);
                }
 
-       return ((struct vfsmount *)NULL);
-       /* NOTREACHED */
+       return NULL;
 }
 
 static struct vfsmount *add_vfsmnt(struct super_block *sb,
@@ -140,7 +135,7 @@ void remove_vfsmnt(kdev_t dev)
 {
        struct vfsmount *lptr, *tofree;
 
-       if (vfsmntlist == (struct vfsmount *)NULL)
+       if (vfsmntlist == NULL)
                return;
        lptr = vfsmntlist;
        if (lptr->mnt_dev == dev) {
@@ -149,13 +144,13 @@ void remove_vfsmnt(kdev_t dev)
                if (vfsmnttail->mnt_dev == dev)
                        vfsmnttail = vfsmntlist;
        } else {
-               while (lptr->mnt_next != (struct vfsmount *)NULL) {
+               while (lptr->mnt_next != NULL) {
                        if (lptr->mnt_next->mnt_dev == dev)
                                break;
                        lptr = lptr->mnt_next;
                }
                tofree = lptr->mnt_next;
-               if (tofree == (struct vfsmount *)NULL)
+               if (tofree == NULL)
                        return;
                lptr->mnt_next = lptr->mnt_next->mnt_next;
                if (vfsmnttail->mnt_dev == dev)
index 2a1626bdd5ad95584a2238cb82a44384b92525cd..84491289892066787c39511b36b171c02f0b2589 100644 (file)
@@ -440,59 +440,41 @@ end_unlink:
 int sysv_symlink(struct inode * dir, struct dentry * dentry, 
                 const char * symname)
 {
-       struct sysv_dir_entry * de;
        struct inode * inode;
-       struct buffer_head * name_block;
-       char * name_block_data;
-       struct super_block * sb;
-       int i;
-       char c;
+       struct sysv_dir_entry * de;
        struct buffer_head * bh;
-
+       int err;
+       int l;
+
+       err = -ENAMETOOLONG;
+       l = strlen(symname)+1;
+       if (l > dir->i_sb->sv_block_size_1)
+               goto out;
+       err = -ENOSPC;
        if (!(inode = sysv_new_inode(dir)))
-               return -ENOSPC;
+               goto out;
 
        inode->i_mode = S_IFLNK | 0777;
        inode->i_op = &sysv_symlink_inode_operations;
-       name_block = sysv_file_bread(inode, 0, 1);
-       if (!name_block) {
-               inode->i_nlink--;
-               mark_inode_dirty(inode);
-               iput(inode);
-               return -ENOSPC;
-       }
-       sb = inode->i_sb;
-       name_block_data = name_block->b_data;
-       i = 0;
-       while (i < sb->sv_block_size_1 && (c = *(symname++)))
-               name_block_data[i++] = c;
-       name_block_data[i] = 0;
-       mark_buffer_dirty(name_block, 1);
-       brelse(name_block);
-       inode->i_size = i;
+       err = block_symlink(inode, symname, l);
+       if (err)
+               goto out_no_entry;
        mark_inode_dirty(inode);
-       bh = sysv_find_entry(dir, dentry->d_name.name,
-                             dentry->d_name.len, &de);
-       if (bh) {
-               inode->i_nlink--;
-               mark_inode_dirty(inode);
-               iput(inode);
-               brelse(bh);
-               return -EEXIST;
-       }
-       i = sysv_add_entry(dir, dentry->d_name.name,
+       err = sysv_add_entry(dir, dentry->d_name.name,
                            dentry->d_name.len, &bh, &de);
-       if (i) {
-               inode->i_nlink--;
-               mark_inode_dirty(inode);
-               iput(inode);
-               return i;
-       }
+       if (err)
+               goto out_no_entry;
        de->inode = inode->i_ino;
        mark_buffer_dirty(bh, 1);
        brelse(bh);
         d_instantiate(dentry, inode);
-       return 0;
+out:
+       return err;
+out_no_entry:
+       inode->i_nlink--;
+       mark_inode_dirty(inode);
+       iput(inode);
+       goto out;
 }
 
 int sysv_link(struct dentry * old_dentry, struct inode * dir, 
index b84e4504e8c6a7098b216a7306b5caa29302916a..3f77f831e91087c581be9331f138d510bc55dac2 100644 (file)
  *  SystemV/Coherent symlink handling code
  */
 
-#include <linux/errno.h>
-#include <linux/sched.h>
 #include <linux/sysv_fs.h>
-#include <linux/stat.h>
-
-#include <asm/uaccess.h>
-
-static int sysv_readlink(struct dentry *, char *, int);
-static struct dentry *sysv_follow_link(struct dentry *, struct dentry *, unsigned int);
 
 /*
  * symlinks can't do much...
  */
 struct inode_operations sysv_symlink_inode_operations = {
-       NULL,                   /* no file-operations */
-       NULL,                   /* create */
-       NULL,                   /* lookup */
-       NULL,                   /* link */
-       NULL,                   /* unlink */
-       NULL,                   /* symlink */
-       NULL,                   /* mkdir */
-       NULL,                   /* rmdir */
-       NULL,                   /* mknod */
-       NULL,                   /* rename */
-       sysv_readlink,          /* readlink */
-       sysv_follow_link,       /* follow_link */
-       NULL,                   /* get_block */
-       NULL,                   /* readpage */
-       NULL,                   /* writepage */
-       NULL,                   /* truncate */
-       NULL,                   /* permission */
-       NULL                    /* revalidate */
+       readlink:       page_readlink,
+       follow_link:    page_follow_link,
+       get_block:      sysv_get_block,
+       readpage:       block_read_full_page
 };
-
-static struct dentry *sysv_follow_link(struct dentry * dentry,
-                                       struct dentry * base,
-                                       unsigned int follow)
-{
-       struct inode *inode = dentry->d_inode;
-       struct buffer_head * bh;
-
-       bh = sysv_file_bread(inode, 0, 0);
-       if (!bh) {
-               dput(base);
-               return ERR_PTR(-EIO);
-       }
-       UPDATE_ATIME(inode);
-       base = lookup_dentry(bh->b_data, base, follow);
-       brelse(bh);
-       return base;
-}
-
-static int sysv_readlink(struct dentry * dentry, char * buffer, int buflen)
-{
-       struct inode *inode = dentry->d_inode;
-       struct buffer_head * bh;
-       char * bh_data;
-       int i;
-       char c;
-
-       if (buflen > inode->i_sb->sv_block_size_1)
-               buflen = inode->i_sb->sv_block_size_1;
-       bh = sysv_file_bread(inode, 0, 0);
-       if (!bh)
-               return 0;
-       bh_data = bh->b_data;
-       i = 0;
-       while (i<buflen && (c = bh_data[i])) {
-               i++;
-               put_user(c,buffer++);
-       }
-       brelse(bh);
-       return i;
-}
index f1b076263df45be9d5fdf714149195ae695da419..63ebb571395273ddff0271bff994dcdd45c5e26e 100644 (file)
 #include <linux/mm.h>
 #include <linux/stat.h>
 #include <linux/malloc.h>
+#include <linux/pagemap.h>
 #include "udf_i.h"
 
-static int udf_readlink(struct dentry *, char *, int);
-static struct dentry * udf_follow_link(struct dentry * dentry,
-       struct dentry * base, unsigned int follow);
-
-/*
- * symlinks can't do much...
- */
-struct inode_operations udf_symlink_inode_operations = {
-       NULL,                   /* no file-operations */
-       NULL,                   /* create */
-       NULL,                   /* lookup */
-       NULL,                   /* link */
-       NULL,                   /* unlink */
-       NULL,                   /* symlink */
-       NULL,                   /* mkdir */
-       NULL,                   /* rmdir */
-       NULL,                   /* mknod */
-       NULL,                   /* rename */
-       udf_readlink,           /* readlink */
-       udf_follow_link,        /* follow_link */
-       NULL,                   /* get_block */
-       NULL,                   /* readpage */
-       NULL,                   /* writepage */
-       NULL,                   /* truncate */
-       NULL,                   /* permission */
-       NULL                    /* revalidate */
-};
-
-int udf_pc_to_char(char *from, int fromlen, char **to)
+static void udf_pc_to_char(char *from, int fromlen, char *to)
 {
        struct PathComponent *pc;
        int elen = 0, len = 0;
+       char *p = to;
 
-       *to = (char *)kmalloc(fromlen, GFP_KERNEL);
-
-       if (!(*to))
-               return -1;
-
-       while (elen < fromlen)
-       {
+       while (elen < fromlen) {
                pc = (struct PathComponent *)(from + elen);
-               if (pc->componentType == 1 && pc->lengthComponentIdent == 0)
-               {
-                       (*to)[0] = '/';
-                       len = 1;
-               }
-               else if (pc->componentType == 3)
-               {
-                       memcpy(&(*to)[len], "../", 3);
-                       len += 3;
-               }
-        else if (pc->componentType == 4)
-               {
-                       memcpy(&(*to)[len], "./", 2);
-                       len += 2;
-               }
-               else if (pc->componentType == 5)
-               {
-                       memcpy(&(*to)[len], pc->componentIdent, pc->lengthComponentIdent);
-                       len += pc->lengthComponentIdent + 1;
-                       (*to)[len-1] = '/';
+               switch (pc->componentType) {
+                       case 1:
+                               if (pc->lengthComponentIdent == 0) {
+                                       p = to;
+                                       *p++ = '/';
+                               }
+                               break;
+                       case 3:
+                               memcpy(p, "../", 3);
+                               p += 3;
+                               break;
+                       case 4:
+                               memcpy(p, "./", 2);
+                               p += 2;
+                               /* that would be . - just ignore */
+                               break;
+                       case 5:
+                               memcpy(p+len, pc->componentIdent,
+                                       pc->lengthComponentIdent);
+                               p += pc->lengthComponentIdent;
+                               *p++ = '/';
                }
                elen += sizeof(struct PathComponent) + pc->lengthComponentIdent;
        }
 
-       if (len)
-       {
-               len --;
-               (*to)[len] = '\0';
+       if (p>to+1) {
+               p[-1] = '\0';
        }
-       return len;
 }
 
-static struct dentry * udf_follow_link(struct dentry * dentry,
-       struct dentry * base, unsigned int follow)
+static int udf_symlink_filler(struct dentry * dentry, struct page *page)
 {
        struct inode *inode = dentry->d_inode;
        struct buffer_head *bh = NULL;
-       char *symlink, *tmpbuf;
-       int len;
-       
-       if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB)
-       {
-               bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
-
-               if (!bh)
-                       return 0;
-
-               symlink = bh->b_data + udf_file_entry_alloc_offset(inode);
-       }
-       else
-       {
-               bh = bread(inode->i_dev, udf_block_map(inode, 0), inode->i_sb->s_blocksize);
-
-               if (!bh)
-                       return 0;
-
-               symlink = bh->b_data;
-       }
-
-       if ((len = udf_pc_to_char(symlink, inode->i_size, &tmpbuf)) >= 0)
-       {
-               base = lookup_dentry(tmpbuf, base, follow);
-               kfree(tmpbuf);
-               return base;
-       }
-       else
-               return ERR_PTR(-ENOMEM);
-}
+       char *symlink;
+       int err;
 
-static int udf_readlink(struct dentry * dentry, char * buffer, int buflen)
-{
-       struct inode *inode = dentry->d_inode;
-       struct buffer_head *bh = NULL;
-       char *symlink, *tmpbuf;
-       int len;
+       char *p = (char*)kmap(page);
        
-       if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB)
-       {
-               bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+       err = -EIO;
+       if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB) {
+               bh = udf_tread(inode->i_sb, inode->i_ino,
+                               inode->i_sb->s_blocksize);
 
                if (!bh)
-                       return 0;
+                       goto out;
 
                symlink = bh->b_data + udf_file_entry_alloc_offset(inode);
-       }
-       else
-       {
-               bh = bread(inode->i_dev, udf_block_map(inode, 0), inode->i_sb->s_blocksize);
+       } else {
+               bh = bread(inode->i_dev, udf_block_map(inode, 0),
+                               inode->i_sb->s_blocksize);
 
                if (!bh)
-                       return 0;
+                       goto out;
 
                symlink = bh->b_data;
        }
 
-       if ((len = udf_pc_to_char(symlink, inode->i_size, &tmpbuf)) >= 0)
-       {
-               if (copy_to_user(buffer, tmpbuf, len > buflen ? buflen : len))
-                       len = -EFAULT;
-               kfree(tmpbuf);
-       }
-       else
-               len = -ENOMEM;
-
-       UPDATE_ATIME(inode);
-       if (bh)
-               udf_release_data(bh);
-       return len;
+       udf_pc_to_char(symlink, inode->i_size, p);
+       udf_release_data(bh);
+       SetPageUptodate(page);
+       kunmap(page);
+       UnlockPage(page);
+       return 0;
+out:
+       SetPageError(page);
+       kunmap(page);
+       UnlockPage(page);
+       return -EIO;
 }
+
+/*
+ * symlinks can't do much...
+ */
+struct inode_operations udf_symlink_inode_operations = {
+       readlink:       page_readlink,
+       follow_link:    page_follow_link,
+       readpage:       udf_symlink_filler,
+};
index afcdcd60022785750a80e216aa088753eb88f0e4..21171a86481d0c68d5eda57ae0028da4aa89cfbe 100644 (file)
@@ -631,7 +631,9 @@ void ufs_read_inode (struct inode * inode)
        else if (S_ISDIR(inode->i_mode))
                inode->i_op = &ufs_dir_inode_operations;
        else if (S_ISLNK(inode->i_mode))
-               inode->i_op = &ufs_symlink_inode_operations;
+               inode->i_op = inode->i_blocks
+                               ?&ufs_symlink_inode_operations
+                               :&ufs_fast_symlink_inode_operations;
        else
                init_special_inode(inode, inode->i_mode,
                                   SWAB32(ufs_inode->ui_u2.ui_addr.ui_db[0]));
index 278f8826aa993081bc5d0bb5764102aebb63a2dd..497f69867ec61fe7d64927d8e6a96246490ca94f 100644 (file)
@@ -751,55 +751,42 @@ end_unlink:
 int ufs_symlink (struct inode * dir, struct dentry * dentry,
        const char * symname)
 {
-       struct super_block * sb;
+       struct super_block * sb = dir->i_sb;
        struct ufs_dir_entry * de;
        struct inode * inode;
-       struct buffer_head * bh, * name_block;
-       char * link;
-       unsigned i, l;
+       struct buffer_head * bh = NULL;
+       unsigned l;
        int err;
-       char c;
-       unsigned swab;
+       unsigned swab = sb->u.ufs_sb.s_swab;
        
        UFSD(("ENTER\n"))
        
-       sb = dir->i_sb;
-       swab = sb->u.ufs_sb.s_swab;
-       bh = name_block = NULL;
+
+       err = -ENAMETOOLONG;
+       l = strlen(symname)+1;
+       if (l > dir->i_sb->s_blocksize)
+               goto out;
+
        err = -EIO;
        
        if (!(inode = ufs_new_inode (dir, S_IFLNK, &err))) {
                return err;
        }
        inode->i_mode = S_IFLNK | S_IRWXUGO;
-       inode->i_op = &ufs_symlink_inode_operations;
-       for (l = 0; l < sb->s_blocksize - 1 && symname [l]; l++);
 
-       /***if (l >= sizeof (inode->u.ufs_i.i_data)) {***/
+       /***if (l > sizeof (inode->u.ufs_i.i_data)) {***/
        if (1) {
                /* slow symlink */
-               name_block = ufs_bread (inode, 0, 1, &err);
-               if (!name_block) {
-                       inode->i_nlink--;
-                       mark_inode_dirty(inode);
-                       iput (inode);
-                       return err;
-               }
-               link = name_block->b_data;
-               
+               inode->i_op = &ufs_symlink_inode_operations;
+               err = block_symlink(inode, symname, l);
+               if (err)
+                       goto out_no_entry;
        } else {
                /* fast symlink */
-               link = (char *) inode->u.ufs_i.i_u1.i_data;
-       }
-       i = 0;
-       while (i < sb->s_blocksize - 1 && (c = *(symname++)))
-               link[i++] = c;
-       link[i] = 0;
-       if (name_block) {
-               mark_buffer_dirty(name_block, 1);
-               brelse (name_block);
+               inode->i_op = &ufs_fast_symlink_inode_operations;
+               memcpy((char*)&inode->u.ufs_i.i_u1.i_data,symname,l);
+               inode->i_size = l-1;
        }
-       inode->i_size = i;
        mark_inode_dirty(inode);
 
        bh = ufs_add_entry (dir, dentry->d_name.name, dentry->d_name.len, &de, &err);
@@ -828,16 +815,12 @@ out_no_entry:
 int ufs_link (struct dentry * old_dentry, struct inode * dir,
        struct dentry *dentry)
 {
-       struct super_block * sb;
        struct inode *inode = old_dentry->d_inode;
+       struct super_block * sb = inode->i_sb;
        struct ufs_dir_entry * de;
        struct buffer_head * bh;
        int err;
-       unsigned swab;
-
-       inode = old_dentry->d_inode;
-       sb = inode->i_sb;
-       swab = sb->u.ufs_sb.s_swab;
+       unsigned swab = sb->u.ufs_sb.s_swab;
        
        if (S_ISDIR(inode->i_mode))
                return -EPERM;
index 519d27b267be259674e67aba50670184f885e570..06ee827402d546a6a6833e866c02fe0b70c9d591 100644 (file)
  *  ext2 symlink handling code
  */
 
-#include <asm/uaccess.h>
-
-#include <linux/errno.h>
 #include <linux/fs.h>
 #include <linux/ufs_fs.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/stat.h>
-
-
-#undef UFS_SYMLINK_DEBUG
 
-#ifdef UFS_SYMLINK_DEBUG
-#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
-#else
-#define UFSD(x)
-#endif
-
-
-static struct dentry * ufs_follow_link(struct dentry * dentry,
-       struct dentry * base, unsigned int follow)
+static int ufs_readlink(struct dentry *dentry, char *buffer, int buflen)
 {
-       struct inode * inode;
-       struct buffer_head * bh;
-       int error;
-       char * link;
-
-       UFSD(("ENTER\n"))
-
-       inode = dentry->d_inode;
-       bh = NULL;
-       /* slow symlink */          
-       if (inode->i_blocks) {
-               if (!(bh = ufs_bread (inode, 0, 0, &error))) {
-                       dput(base);
-                       return ERR_PTR(-EIO);
-               }
-               link = bh->b_data;
-       }
-       /* fast symlink */
-       else {
-               link = (char *) inode->u.ufs_i.i_u1.i_symlink;
-       }
-       UPDATE_ATIME(inode);
-       base = lookup_dentry(link, base, follow);
-       if (bh)
-               brelse(bh);
-       UFSD(("EXIT\n"))
-       return base;
+       char *s = (char *)dentry->d_inode->u.ufs_i.i_u1.i_symlink;
+       return vfs_readlink(dentry, buffer, buflen, s);
 }
 
-static int ufs_readlink (struct dentry * dentry, char * buffer, int buflen)
+static struct dentry *ufs_follow_link(struct dentry *dentry, struct dentry *base, unsigned flags)
 {
-       struct super_block * sb;
-       struct inode * inode;
-       struct buffer_head * bh;
-       char * link;
-       int i;
-
-       UFSD(("ENTER\n"))
-
-       inode = dentry->d_inode;
-       sb = inode->i_sb;
-       bh = NULL;
-       if (buflen > sb->s_blocksize - 1)
-               buflen = sb->s_blocksize - 1;
-       /* slow symlink */
-       if (inode->i_blocks) {
-               int err;
-               bh = ufs_bread (inode, 0, 0, &err);
-               if (!bh) {
-                       if(err < 0) /* indicate type of error */
-                               return err;
-                       return 0;
-               }
-               link = bh->b_data;
-       }
-       /* fast symlink */
-       else {
-               link = (char *) inode->u.ufs_i.i_u1.i_symlink;
-       }
-       i = 0;
-       while (i < buflen && link[i])
-               i++;
-       if (copy_to_user(buffer, link, i))
-               i = -EFAULT;
-       UPDATE_ATIME(inode);
-       if (bh)
-               brelse (bh);
-       UFSD(("ENTER\n"))
-       return i;
+       char *s = (char *)dentry->d_inode->u.ufs_i.i_u1.i_symlink;
+       return vfs_follow_link(dentry, base, flags, s);
 }
 
+struct inode_operations ufs_fast_symlink_inode_operations = {
+       readlink:       ufs_readlink,
+       follow_link:    ufs_follow_link,
+};
+
 struct inode_operations ufs_symlink_inode_operations = {
-       NULL,                   /* no file-operations */
-       NULL,                   /* create */
-       NULL,                   /* lookup */
-       NULL,                   /* link */
-       NULL,                   /* unlink */
-       NULL,                   /* symlink */
-       NULL,                   /* mkdir */
-       NULL,                   /* rmdir */
-       NULL,                   /* mknod */
-       NULL,                   /* rename */
-       ufs_readlink,           /* readlink */
-       ufs_follow_link,        /* follow_link */
-       NULL,                   /* get_block */
-       NULL,                   /* readpage */
-       NULL,                   /* writepage */
-       NULL,                   /* truncate */
-       NULL,                   /* permission */
-       NULL                    /* revalidate */
+       readlink:       page_readlink,
+       follow_link:    page_follow_link,
+       get_block:      ufs_getfrag_block,
+       readpage:       block_read_full_page
 };
index f53426a13264976987f2eebdf24d45c882ec60ea..9c09891de727555380fae3e32697dec105e87d84 100644 (file)
@@ -11,6 +11,16 @@ I'd call it pre-release, and ask for as many people as possible to
 come and test it! See notes below for some more information, or if
 you are trying to use UMSDOS as root partition.
 
+Userland NOTE: new umsdos_progs (umssync, umssetup, udosctl & friends) that
+will compile and work on 2.2.x kernels and glibc based systems may be found
+at http://cvs.linux.hr/
+
+Also look at the quick-hack "homepage" for umsdos filesystem at 
+http://www.voyager.hr/~mnalis/umsdos
+
+Information below is getting outdated slowly -- I'll fix it one day when I
+get enough time - there are more important things to fix right now.
+
 Legend: those lines marked with '+' on the beggining of line indicates it
 passed all of my tests, and performed perfect in all of them.
 
index 1eda3df939617d2225383738ed9dc2fa9316202c..6eebff74a08a18b82a946e855354c24dccb32a8b 100644 (file)
@@ -59,6 +59,14 @@ void check_page_tables (void)
 
 
 #if UMS_DEBUG
+/*
+ * check for wait queue in 2.3.x
+ */
+inline void uq_log (char *txt, struct inode *inode)
+{
+       printk (KERN_ERR "%s: (%lu) magic=%lu creator=%lu lock=%u\n", txt, inode->i_ino, inode->u.umsdos_i.dir_info.p.__magic, inode->u.umsdos_i.dir_info.p.__creator, inode->u.umsdos_i.dir_info.p.lock.lock);
+}
+
 /*
  * check a superblock
  */
@@ -213,6 +221,7 @@ void check_dentry_path (struct dentry *dentry, const char *desc)
        }
 }
 #else
+inline void uq_log (char *txt, struct inode *inode) {};
 void check_sb (struct super_block *sb, const char c) {};
 void check_inode (struct inode *inode) {};
 void checkd_inode (struct inode *inode) {};
index 670f3fd5d8d0d44fcaa216e0fddc7f334a5beeaf..af875aa7952175254699fd174310ed82b5881a2d 100644 (file)
@@ -49,9 +49,7 @@ struct dentry_operations umsdos_dentry_operations =
        umsdos_dentry_validate, /* d_revalidate(struct dentry *, int) */
        NULL,                   /* d_hash */
        NULL,                   /* d_compare */
-       umsdos_dentry_dput,     /* d_delete(struct dentry *) */
-       NULL,
-       NULL,
+       umsdos_dentry_dput      /* d_delete(struct dentry *) */
 };
 
 
@@ -836,5 +834,5 @@ struct inode_operations umsdos_dir_inode_operations =
        NULL,                   /* writepage */
        NULL,                   /* truncate */
        NULL,                   /* permission */
-       NULL,                   /* revalidate */
+       NULL                    /* revalidate */
 };
index 171aa15dc0225e2b3bb1d970a14a66077038a971..252f8a48b747acd5ffd4139e97673ff62351afbf 100644 (file)
@@ -94,6 +94,11 @@ void umsdos_setup_dir(struct dentry *dir)
                printk(KERN_ERR "umsdos_setup_dir: %s/%s not a dir!\n",
                        dir->d_parent->d_name.name, dir->d_name.name);
 
+       init_waitqueue_head (&inode->u.umsdos_i.dir_info.p);
+       inode->u.umsdos_i.dir_info.looking = 0;
+       inode->u.umsdos_i.dir_info.creating = 0;
+       inode->u.umsdos_i.dir_info.pid = 0;
+
        inode->i_op = &umsdos_rdir_inode_operations;
        if (umsdos_have_emd(dir)) {
 Printk((KERN_DEBUG "umsdos_setup_dir: %s/%s using EMD\n",
@@ -311,7 +316,7 @@ static struct super_operations umsdos_sops =
        NULL,                   /* write_super */
        fat_statfs,             /* statfs */
        NULL,                   /* remount_fs */
-       fat_clear_inode,        /* clear_inode */
+       fat_clear_inode         /* clear_inode */
 };
 
 /*
@@ -333,7 +338,7 @@ struct super_block *UMSDOS_read_super (struct super_block *sb, void *data,
        if (!res)
                goto out_fail;
 
-       printk (KERN_INFO "UMSDOS 0.85 "
+       printk (KERN_INFO "UMSDOS 0.86 "
                "(compatibility level %d.%d, fast msdos)\n", 
                UMSDOS_VERSION, UMSDOS_RELEASE);
 
index f26d19ba8bc4194fbc077b03b2d2aef1f8725f2f..2390433183b3906ed11de4dbb504da55bd63635b 100644 (file)
@@ -217,14 +217,11 @@ dentry->d_parent->d_name.name, dentry->d_name.name, cmd, data_ptr));
                 * 
                 * Return 0 if success.
                 */
-               extern struct inode_operations umsdos_rdir_inode_operations;
 
                ret = umsdos_make_emd(dentry);
 Printk(("UMSDOS_ioctl_dir: INIT_EMD %s/%s, ret=%d\n",
 dentry->d_parent->d_name.name, dentry->d_name.name, ret));
-               dir->i_op = (ret == 0)
-                   ? &umsdos_dir_inode_operations
-                   : &umsdos_rdir_inode_operations;
+               umsdos_setup_dir (dentry);
                goto out;
        }
 
@@ -280,6 +277,8 @@ printk("umsdos_ioctl: renaming %s/%s to %s/%s\n",
 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
                        ret = msdos_rename (dir, old_dentry, dir, new_dentry);
+                       d_drop(new_dentry);
+                       d_drop(old_dentry);
                        dput(new_dentry);
                }
                dput(old_dentry);
index 444e9ffae2cde57f30ab5320dea5f427934d5720..415f5e3fbe6f258157c3bb6e62f4adf583e5633a 100644 (file)
 #include <linux/umsdos_fs.h>
 #include <linux/malloc.h>
 
-#if 1
+#define UMSDOS_DIR_LOCK
+
+#ifdef UMSDOS_DIR_LOCK
+
+static inline void u_sleep_on (struct inode *dir)
+{
+       sleep_on (&dir->u.umsdos_i.dir_info.p);
+}
+
+static inline void u_wake_up (struct inode *dir)
+{
+       wake_up (&dir->u.umsdos_i.dir_info.p);
+}
+
 /*
  * Wait for creation exclusivity.
  * Return 0 if the dir was already available.
@@ -34,9 +47,10 @@ static int umsdos_waitcreate (struct inode *dir)
 {
        int ret = 0;
 
-       if (dir->u.umsdos_i.u.dir_info.creating
-           && dir->u.umsdos_i.u.dir_info.pid != current->pid) {
-               sleep_on (&dir->u.umsdos_i.u.dir_info.p);
+       if (dir->u.umsdos_i.dir_info.creating
+           && dir->u.umsdos_i.dir_info.pid != current->pid) {
+               PRINTK (("creating && dir_info.pid=%lu, current->pid=%u\n", dir->u.umsdos_i.dir_info.pid, current->pid));
+               u_sleep_on (dir);
                ret = 1;
        }
        return ret;
@@ -47,8 +61,8 @@ static int umsdos_waitcreate (struct inode *dir)
  */
 static void umsdos_waitlookup (struct inode *dir)
 {
-       while (dir->u.umsdos_i.u.dir_info.looking) {
-               sleep_on (&dir->u.umsdos_i.u.dir_info.p);
+       while (dir->u.umsdos_i.dir_info.looking) {
+               u_sleep_on (dir);
        }
 }
 
@@ -90,8 +104,8 @@ void umsdos_lockcreate (struct inode *dir)
         * if we (the process) own the lock
         */
        while (umsdos_waitcreate (dir) != 0);
-       dir->u.umsdos_i.u.dir_info.creating++;
-       dir->u.umsdos_i.u.dir_info.pid = current->pid;
+       dir->u.umsdos_i.dir_info.creating++;
+       dir->u.umsdos_i.dir_info.pid = current->pid;
        umsdos_waitlookup (dir);
 }
 
@@ -110,10 +124,10 @@ static void umsdos_lockcreate2 (struct inode *dir1, struct inode *dir2)
                if (umsdos_waitcreate (dir1) == 0
                    && umsdos_waitcreate (dir2) == 0) {
                        /* We own both now */
-                       dir1->u.umsdos_i.u.dir_info.creating++;
-                       dir1->u.umsdos_i.u.dir_info.pid = current->pid;
-                       dir2->u.umsdos_i.u.dir_info.creating++;
-                       dir2->u.umsdos_i.u.dir_info.pid = current->pid;
+                       dir1->u.umsdos_i.dir_info.creating++;
+                       dir1->u.umsdos_i.dir_info.pid = current->pid;
+                       dir2->u.umsdos_i.dir_info.creating++;
+                       dir2->u.umsdos_i.dir_info.pid = current->pid;
                        break;
                }
        }
@@ -127,7 +141,7 @@ static void umsdos_lockcreate2 (struct inode *dir1, struct inode *dir2)
 void umsdos_startlookup (struct inode *dir)
 {
        while (umsdos_waitcreate (dir) != 0);
-       dir->u.umsdos_i.u.dir_info.looking++;
+       dir->u.umsdos_i.dir_info.looking++;
 }
 
 /*
@@ -135,12 +149,12 @@ void umsdos_startlookup (struct inode *dir)
  */
 void umsdos_unlockcreate (struct inode *dir)
 {
-       dir->u.umsdos_i.u.dir_info.creating--;
-       if (dir->u.umsdos_i.u.dir_info.creating < 0) {
-               printk ("UMSDOS: dir->u.umsdos_i.u.dir_info.creating < 0: %d"
-                       ,dir->u.umsdos_i.u.dir_info.creating);
+       dir->u.umsdos_i.dir_info.creating--;
+       if (dir->u.umsdos_i.dir_info.creating < 0) {
+               printk ("UMSDOS: dir->u.umsdos_i.dir_info.creating < 0: %d"
+                       ,dir->u.umsdos_i.dir_info.creating);
        }
-       wake_up (&dir->u.umsdos_i.u.dir_info.p);
+       u_wake_up (dir);
 }
 
 /*
@@ -148,12 +162,12 @@ void umsdos_unlockcreate (struct inode *dir)
  */
 void umsdos_endlookup (struct inode *dir)
 {
-       dir->u.umsdos_i.u.dir_info.looking--;
-       if (dir->u.umsdos_i.u.dir_info.looking < 0) {
-               printk ("UMSDOS: dir->u.umsdos_i.u.dir_info.looking < 0: %d"
-                       ,dir->u.umsdos_i.u.dir_info.looking);
+       dir->u.umsdos_i.dir_info.looking--;
+       if (dir->u.umsdos_i.dir_info.looking < 0) {
+               printk ("UMSDOS: dir->u.umsdos_i.dir_info.looking < 0: %d"
+                       ,dir->u.umsdos_i.dir_info.looking);
        }
-       wake_up (&dir->u.umsdos_i.u.dir_info.p);
+       u_wake_up (dir);
 }
 
 #else
@@ -475,8 +489,6 @@ out:
  * Let's go for simplicity...
  */
 
-extern struct inode_operations umsdos_symlink_inode_operations;
-
 /*
  * AV. Should be called with dir->i_sem down.
  */
index c2c5e2905a32cc45a6fe44c4665c18f3deebb553..2b972a524a99af48d42c88043cba503dd544056f 100644 (file)
@@ -252,5 +252,5 @@ struct inode_operations umsdos_rdir_inode_operations =
        NULL,                   /* get_block */
        NULL,                   /* truncate */
        NULL,                   /* permission */
-       NULL,                   /* revalidate */
+       NULL                    /* revalidate */
 };
index 9ba84dbf2cd6e4160e370a5bbc9a4a997d970502..00b5fd164d1cae54863a7167e6345c2ac57aae09 100644 (file)
@@ -62,4 +62,8 @@ struct flock {
        __kernel_pid_t l_pid;
 };
 
+#ifdef __KERNEL__
+#define flock64        flock
+#endif
+
 #endif
index 5ea0193be323ff934ac22027f902912614937600..2be0881d43cce91e108667ef082457fec642ae02 100644 (file)
@@ -176,20 +176,25 @@ extern struct pgtable_cache_struct {
 
 extern __inline__ pgd_t *get_pgd_slow(void)
 {
-       pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
+       pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
        
        if (ret) {
-               init = pgd_offset(&init_mm, 0UL);
                memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
-               memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
-                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-
-               pgd_val(ret[PTRS_PER_PGD])
-                 = pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL));
+               pgd_val(ret[PTRS_PER_PGD]) =
+                       pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL));
        }
        return ret;
 }
 
+extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
+{
+       pgd_t *init;
+
+       init = pgd_offset(&init_mm, 0UL);
+       memcpy (pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+               (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
 extern __inline__ pgd_t *get_pgd_fast(void)
 {
        unsigned long *ret;
@@ -198,8 +203,7 @@ extern __inline__ pgd_t *get_pgd_fast(void)
                pgd_quicklist = (unsigned long *)(*ret);
                ret[0] = ret[1];
                pgtable_cache_size--;
-       } else
-               ret = (unsigned long *)get_pgd_slow();
+       }
        return (pgd_t *)ret;
 }
 
@@ -275,7 +279,6 @@ extern void __bad_pmd(pgd_t *pgd);
 #define pmd_free_kernel(pmd)   free_pmd_fast(pmd)
 #define pmd_free(pmd)          free_pmd_fast(pmd)
 #define pgd_free(pgd)          free_pgd_fast(pgd)
-#define pgd_alloc()            get_pgd_fast()
 
 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
 {
@@ -320,18 +323,13 @@ extern int do_check_pgt_cache(int, int);
 
 extern inline void set_pgdir(unsigned long address, pgd_t entry)
 {
-       struct task_struct * p;
        pgd_t *pgd;
         
-       read_lock(&tasklist_lock);
-       for_each_task(p) {
-               if (!p->mm)
-                       continue;
-               *pgd_offset(p->mm,address) = entry;
-       }
-       read_unlock(&tasklist_lock);
+       mmlist_access_lock();
+       mmlist_set_pgdir(address, entry);
        for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
                pgd[(address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)] = entry;
+       mmlist_access_unlock();
 }
 
 #endif /* _ALPHA_PGALLOC_H */
index 50967ef96141c6c9bd15aaace180ffa4ea45385a..357a44704954e4ddd8be0d84d45373d3f116a4dc 100644 (file)
@@ -12,6 +12,7 @@ typedef unsigned int  __kernel_ino_t;
 typedef unsigned int   __kernel_mode_t;
 typedef unsigned int   __kernel_nlink_t;
 typedef long           __kernel_off_t;
+typedef long           __kernel_loff_t;
 typedef int            __kernel_pid_t;
 typedef int            __kernel_ipc_pid_t;
 typedef unsigned int   __kernel_uid_t;
@@ -26,10 +27,6 @@ typedef int          __kernel_daddr_t;
 typedef char *         __kernel_caddr_t;
 typedef unsigned long  __kernel_sigset_t;      /* at least 32 bits */
 
-#ifdef __GNUC__
-typedef long long      __kernel_loff_t;
-#endif
-
 typedef struct {
        int     val[2];
 } __kernel_fsid_t;
index 234cbfa2f5499db96caeecb59562367c13b75c4b..0f2ddd20c9058b5498c1c7835936647033f1ff60 100644 (file)
 
 #define RLIM_NLIMITS   10
 
+/*
+ * SuS says limits have to be unsigned.  Fine, it's unsigned, but
+ * we retain the old value for compatibility, especially with DU. 
+ * When you run into the 2^63 barrier, you call me.
+ */
+#define RLIM_INFINITY  0x7ffffffffffffffful
+
 #ifdef __KERNEL__
 
 #define INIT_RLIMITS                                                   \
index a0174f310cbbece79528e037776a17a24a9cee98..60f155085e65c37a0c6b09038fabf866de49a407 100644 (file)
 #define F_SETSIG       10      /*  for sockets. */
 #define F_GETSIG       11      /*  for sockets. */
 
+#define F_GETLK64      12      /*  using 'struct flock64' */
+#define F_SETLK64      13
+#define F_SETLKW64     14
+
 /* for F_[GET|SET]FL */
 #define FD_CLOEXEC     1       /* actually anything with low bit set goes */
 
@@ -62,4 +66,12 @@ struct flock {
        pid_t l_pid;
 };
 
+struct flock64 {
+       short  l_type;
+       short  l_whence;
+       loff_t l_start;
+       loff_t l_len;
+       pid_t  l_pid;
+};
+
 #endif
index b5c6ccc6fbf7d2c0d17ac70f68215db67a9e6512..aea5122a05e6289627a312c7e909ef790d87b99d 100644 (file)
 
 #define RLIM_NLIMITS   10
 
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY  (~0UL)
+
 #ifdef __KERNEL__
 
 #define INIT_RLIMITS                                   \
index 08df0f278bca06f129ed8dab2b9bd54665b1fc0c..86068d069773e15cab4bf26226ea1568246a6ef4 100644 (file)
@@ -132,8 +132,7 @@ extern __inline__ int find_first_zero_bit(void * addr, unsigned size)
 
        if (!size)
                return 0;
-       __asm__("cld\n\t"
-               "movl $-1,%%eax\n\t"
+       __asm__("movl $-1,%%eax\n\t"
                "xorl %%edx,%%edx\n\t"
                "repe; scasl\n\t"
                "je 1f\n\t"
index eff29ac8f90e62cdb0b900b66054f4fa9b23b60c..5fbfc35fcfe054f54788c8319d6685b035549021 100644 (file)
 #define F_SETSIG       10      /*  for sockets. */
 #define F_GETSIG       11      /*  for sockets. */
 
+#define F_GETLK64      12      /*  using 'struct flock64' */
+#define F_SETLK64      13
+#define F_SETLKW64     14
+
 /* for F_[GET|SET]FL */
 #define FD_CLOEXEC     1       /* actually anything with low bit set goes */
 
@@ -62,4 +66,12 @@ struct flock {
        pid_t l_pid;
 };
 
+struct flock64 {
+       short  l_type;
+       short  l_whence;
+       loff_t l_start;
+       loff_t l_len;
+       pid_t  l_pid;
+};
+
 #endif
index 2e2d53ab626d0b6236de0351d2ca847f63ac2e7e..7a4e9facc0d4b11e1dcd161c6314020da44cda53 100644 (file)
@@ -71,12 +71,12 @@ __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i
 
 #define __INS(s) \
 extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("cld ; rep ; ins" #s \
+{ __asm__ __volatile__ ("rep ; ins" #s \
 : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
 
 #define __OUTS(s) \
 extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("cld ; rep ; outs" #s \
+{ __asm__ __volatile__ ("rep ; outs" #s \
 : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
 
 #define RETURN_TYPE unsigned char
index 3cdfac12fa09ca7e4da9ea9817b82c20e677485b..ac9a75f508f8ab1d30729d0d2f9b1caa4755216b 100644 (file)
@@ -6,7 +6,7 @@
 #include <asm/fixmap.h>
 #include <linux/threads.h>
 
-#define pgd_quicklist (current_cpu_data.pgd_quick)
+extern unsigned long *pgd_quicklist;
 #define pmd_quicklist (current_cpu_data.pmd_quick)
 #define pte_quicklist (current_cpu_data.pte_quick)
 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
@@ -35,11 +35,16 @@ extern __inline__ pgd_t *get_pgd_slow(void)
 #else
                memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
 #endif
-               memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
        }
        return ret;
 }
 
+extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
+{
+       memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, 
+               (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
 extern __inline__ pgd_t *get_pgd_fast(void)
 {
        unsigned long *ret;
@@ -48,8 +53,7 @@ extern __inline__ pgd_t *get_pgd_fast(void)
                pgd_quicklist = (unsigned long *)(*ret);
                ret[0] = 0;
                pgtable_cache_size--;
-       } else
-               ret = (unsigned long *)get_pgd_slow();
+       }
        return (pgd_t *)ret;
 }
 
@@ -94,8 +98,7 @@ extern __inline__ void free_pte_slow(pte_t *pte)
 
 #define pte_free_kernel(pte)    free_pte_slow(pte)
 #define pte_free(pte)     free_pte_slow(pte)
-#define pgd_free(pgd)     free_pgd_slow(pgd)
-#define pgd_alloc()         get_pgd_fast()
+#define pgd_free(pgd)     free_pgd_fast(pgd)
 
 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
 {
@@ -154,29 +157,14 @@ extern int do_check_pgt_cache(int, int);
 
 extern inline void set_pgdir(unsigned long address, pgd_t entry)
 {
-       struct task_struct * p;
        pgd_t *pgd;
-#ifdef __SMP__
-       int i;
-#endif 
-
-       read_lock(&tasklist_lock);
-       for_each_task(p) {
-               if (!p->mm)
-                       continue;
-               *pgd_offset(p->mm,address) = entry;
-       }
-       read_unlock(&tasklist_lock);
-#ifndef __SMP__
-       for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+
+       mmlist_access_lock();
+       mmlist_set_pgdir(address, entry);
+       for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned 
+                                                               long *)pgd)
                pgd[address >> PGDIR_SHIFT] = entry;
-#else
-       /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
-          modify pgd caches of other CPUs as well. -jj */
-       for (i = 0; i < NR_CPUS; i++)
-               for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
-                       pgd[address >> PGDIR_SHIFT] = entry;
-#endif
+       mmlist_access_unlock();
 }
 
 /*
index 878c637f7cea012afaf58309add49a65086efbbe..7f59238bb1922f7754b6fb5ee66941696b06c324 100644 (file)
@@ -45,7 +45,6 @@ struct cpuinfo_x86 {
        int     f00f_bug;
        int     coma_bug;
        unsigned long loops_per_sec;
-       unsigned long *pgd_quick;
        unsigned long *pmd_quick;
        unsigned long *pte_quick;
        unsigned long pgtable_cache_sz;
index 0f43dba41c98cfb3c1b42a431a230b57cc9010c4..e49c5b8c3453347a30def0896a7b91dd1d3ff06a 100644 (file)
 
 #define RLIM_NLIMITS   10
 
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY  (~0UL)
+
 #ifdef __KERNEL__
 
 #define INIT_RLIMITS                                   \
index 4c2531712e3c9b613a73a4726d6ef13d14ac9ccb..7c3ee1a1bfb34befe20b4c75b1288446625214b1 100644 (file)
@@ -187,7 +187,6 @@ extern inline char * strrchr(const char * s, int c)
 int    d0, d1;
 register char * __res;
 __asm__ __volatile__(
-       "cld\n\t"
        "movb %%al,%%ah\n"
        "1:\tlodsb\n\t"
        "cmpb %%ah,%%al\n\t"
@@ -206,7 +205,6 @@ extern inline size_t strspn(const char * cs, const char * ct)
 int    d0, d1;
 register char * __res;
 __asm__ __volatile__(
-       "cld\n\t"
        "movl %6,%%edi\n\t"
        "repne\n\t"
        "scasb\n\t"
@@ -234,7 +232,6 @@ extern inline size_t strcspn(const char * cs, const char * ct)
 int    d0, d1;
 register char * __res;
 __asm__ __volatile__(
-       "cld\n\t"
        "movl %6,%%edi\n\t"
        "repne\n\t"
        "scasb\n\t"
@@ -263,7 +260,6 @@ extern inline char * strpbrk(const char * cs,const char * ct)
 int    d0, d1;
 register char * __res;
 __asm__ __volatile__(
-       "cld\n\t"
        "movl %6,%%edi\n\t"
        "repne\n\t"
        "scasb\n\t"
@@ -296,7 +292,6 @@ extern inline char * strstr(const char * cs,const char * ct)
 int    d0, d1;
 register char * __res;
 __asm__ __volatile__(
-       "cld\n\t" \
        "movl %6,%%edi\n\t"
        "repne\n\t"
        "scasb\n\t"
@@ -378,7 +373,6 @@ __asm__ __volatile__(
        "1:\txorl %0,%0\n\t"
        "movl $-1,%%ecx\n\t"
        "xorl %%eax,%%eax\n\t"
-       "cld\n\t"
        "movl %4,%%edi\n\t"
        "repne\n\t"
        "scasb\n\t"
@@ -474,7 +468,6 @@ extern inline void * __memcpy_g(void * to, const void * from, size_t n)
 int    d0, d1, d2;
 register void *tmp = (void *)to;
 __asm__ __volatile__ (
-       "cld\n\t"
        "shrl $1,%%ecx\n\t"
        "jnc 1f\n\t"
        "movsb\n"
@@ -554,7 +547,6 @@ int d0, d1, d2;
 register void *tmp = (void *)dest;
 if (dest<src)
 __asm__ __volatile__ (
-       "cld\n\t"
        "rep\n\t"
        "movsb"
        :"=&c" (d0), "=&S" (d1), "=&D" (d2)
@@ -577,7 +569,6 @@ extern inline int memcmp(const void * cs,const void * ct,size_t count)
 int    d0, d1, d2;
 register int __res;
 __asm__ __volatile__(
-       "cld\n\t"
        "repe\n\t"
        "cmpsb\n\t"
        "je 1f\n\t"
@@ -597,7 +588,6 @@ register void * __res;
 if (!count)
        return NULL;
 __asm__ __volatile__(
-       "cld\n\t"
        "repne\n\t"
        "scasb\n\t"
        "je 1f\n\t"
@@ -753,8 +743,7 @@ extern inline void * memscan(void * addr, int c, size_t size)
 {
        if (!size)
                return addr;
-       __asm__("cld
-               repnz; scasb
+       __asm__("repnz; scasb
                jnz 1f
                dec %%edi
 1:             "
index ea2e9f85d65bf40f2f9515435b66d73a9702994e..515ffa7d58246f9db9cb24324bae4e4946fdf1f1 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _I386_STRING_H_
 #define _I386_STRING_H_
 
+#ifdef __KERNEL__
 /*
  * On a 486 or Pentium, we are better off not using the
  * byte string operations. But on a 386 or a PPro the
@@ -32,7 +33,6 @@ extern inline char * strcpy(char * dest,const char *src)
 {
 int d0, d1, d2;
 __asm__ __volatile__(
-       "cld\n"
        "1:\tlodsb\n\t"
        "stosb\n\t"
        "testb %%al,%%al\n\t"
@@ -47,7 +47,6 @@ extern inline char * strncpy(char * dest,const char *src,size_t count)
 {
 int d0, d1, d2, d3;
 __asm__ __volatile__(
-       "cld\n"
        "1:\tdecl %2\n\t"
        "js 2f\n\t"
        "lodsb\n\t"
@@ -67,7 +66,6 @@ extern inline char * strcat(char * dest,const char * src)
 {
 int d0, d1, d2, d3;
 __asm__ __volatile__(
-       "cld\n\t"
        "repne\n\t"
        "scasb\n\t"
        "decl %1\n"
@@ -85,7 +83,6 @@ extern inline char * strncat(char * dest,const char * src,size_t count)
 {
 int d0, d1, d2, d3;
 __asm__ __volatile__(
-       "cld\n\t"
        "repne\n\t"
        "scasb\n\t"
        "decl %1\n\t"
@@ -110,7 +107,6 @@ extern inline int strcmp(const char * cs,const char * ct)
 int d0, d1;
 register int __res;
 __asm__ __volatile__(
-       "cld\n"
        "1:\tlodsb\n\t"
        "scasb\n\t"
        "jne 2f\n\t"
@@ -132,7 +128,6 @@ extern inline int strncmp(const char * cs,const char * ct,size_t count)
 register int __res;
 int d0, d1, d2;
 __asm__ __volatile__(
-       "cld\n"
        "1:\tdecl %3\n\t"
        "js 2f\n\t"
        "lodsb\n\t"
@@ -156,7 +151,6 @@ extern inline char * strchr(const char * s, int c)
 int d0;
 register char * __res;
 __asm__ __volatile__(
-       "cld\n\t"
        "movb %%al,%%ah\n"
        "1:\tlodsb\n\t"
        "cmpb %%ah,%%al\n\t"
@@ -176,7 +170,6 @@ extern inline char * strrchr(const char * s, int c)
 int d0, d1;
 register char * __res;
 __asm__ __volatile__(
-       "cld\n\t"
        "movb %%al,%%ah\n"
        "1:\tlodsb\n\t"
        "cmpb %%ah,%%al\n\t"
@@ -194,7 +187,6 @@ extern inline size_t strlen(const char * s)
 int d0;
 register int __res;
 __asm__ __volatile__(
-       "cld\n\t"
        "repne\n\t"
        "scasb\n\t"
        "notl %0\n\t"
@@ -207,7 +199,6 @@ extern inline void * __memcpy(void * to, const void * from, size_t n)
 {
 int d0, d1, d2;
 __asm__ __volatile__(
-       "cld\n\t"
        "rep ; movsl\n\t"
        "testb $2,%b4\n\t"
        "je 1f\n\t"
@@ -273,7 +264,6 @@ extern inline void * __constant_memcpy(void * to, const void * from, size_t n)
        }
 #define COMMON(x) \
 __asm__ __volatile__( \
-       "cld\n\t" \
        "rep ; movsl" \
        x \
        : "=&c" (d0), "=&D" (d1), "=&S" (d2) \
@@ -343,13 +333,28 @@ extern __inline__ void *__memcpy3d(void *to, const void *from, size_t len)
 
 #endif
 
+/*
+ * struct_cpy(x,y), copy structure *x into (matching structure) *y.
+ *
+ * We get link-time errors if the structure sizes do not match.
+ * There is no runtime overhead, it's all optimized away at
+ * compile time.
+ */
+extern void __struct_cpy_bug (void);
+
+#define struct_cpy(x,y)                        \
+({                                             \
+       if (sizeof(*(x)) != sizeof(*(y)))       \
+               __struct_cpy_bug;               \
+       memcpy(x, y, sizeof(*(x)));             \
+})
+
 #define __HAVE_ARCH_MEMMOVE
 extern inline void * memmove(void * dest,const void * src, size_t n)
 {
 int d0, d1, d2;
 if (dest<src)
 __asm__ __volatile__(
-       "cld\n\t"
        "rep\n\t"
        "movsb"
        : "=&c" (d0), "=&S" (d1), "=&D" (d2)
@@ -379,7 +384,6 @@ register void * __res;
 if (!count)
        return NULL;
 __asm__ __volatile__(
-       "cld\n\t"
        "repne\n\t"
        "scasb\n\t"
        "je 1f\n\t"
@@ -393,7 +397,6 @@ extern inline void * __memset_generic(void * s, char c,size_t count)
 {
 int d0, d1;
 __asm__ __volatile__(
-       "cld\n\t"
        "rep\n\t"
        "stosb"
        : "=&c" (d0), "=&D" (d1)
@@ -414,7 +417,6 @@ extern inline void * __constant_c_memset(void * s, unsigned long c, size_t count
 {
 int d0, d1;
 __asm__ __volatile__(
-       "cld\n\t"
        "rep ; stosl\n\t"
        "testb $2,%b3\n\t"
        "je 1f\n\t"
@@ -475,7 +477,7 @@ extern inline void * __constant_c_and_count_memset(void * s, unsigned long patte
                        return s;
        }
 #define COMMON(x) \
-__asm__  __volatile__("cld\n\t" \
+__asm__  __volatile__( \
        "rep ; stosl" \
        x \
        : "=&c" (d0), "=&D" (d1) \
@@ -518,8 +520,7 @@ extern inline void * memscan(void * addr, int c, size_t size)
 {
        if (!size)
                return addr;
-       __asm__("cld
-               repnz; scasb
+       __asm__("repnz; scasb
                jnz 1f
                dec %%edi
 1:             "
@@ -528,5 +529,7 @@ extern inline void * memscan(void * addr, int c, size_t size)
        return addr;
 }
 
+#endif /* __KERNEL__ */
+
 #endif
 #endif
index 2235bd5bb7ede5a7ff857181fd9e469945ea5f4c..365958e258398c192615efe6350890de377f3067 100644 (file)
 #define F_SETSIG       10      /*  for sockets. */
 #define F_GETSIG       11      /*  for sockets. */
 
+#define F_GETLK64      12      /*  using 'struct flock64' */
+#define F_SETLK64      13
+#define F_SETLKW64     14
+
 /* for F_[GET|SET]FL */
 #define FD_CLOEXEC     1       /* actually anything with low bit set goes */
 
@@ -60,4 +64,12 @@ struct flock {
        pid_t l_pid;
 };
 
+struct flock64 {
+       short  l_type;
+       short  l_whence;
+       loff_t l_start;
+       loff_t l_len;
+       pid_t  l_pid;
+};
+
 #endif /* _M68K_FCNTL_H */
index 11376a494017e04c031723bf7742c48b949f0e0c..f2a6f0f410ad196b726b0b2e0f9124b20d42ca4b 100644 (file)
 
 #define RLIM_NLIMITS   10
 
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY  (~0UL)
+
 #ifdef __KERNEL__
 
 #define INIT_RLIMITS   \
index 2c9088a36255126b9f348f0a4d6eec44bc1e0f74..0d9909b6dbfd1f2fd07b0b3c22f63e05e7a3f11d 100644 (file)
 #define F_SETSIG       10      /*  for sockets. */
 #define F_GETSIG       11      /*  for sockets. */
 
+#define F_GETLK64      12      /*  using 'struct flock64' */
+#define F_SETLK64      13
+#define F_SETLKW64     14
+
 /* for F_[GET|SET]FL */
 #define FD_CLOEXEC     1       /* actually anything with low bit set goes */
 
@@ -73,4 +77,12 @@ typedef struct flock {
        long  pad[4];                   /* ZZZZZZZZZZZZZZZZZZZZZZZZZZ */
 } flock_t;
 
+typedef struct flock64 {
+       short  l_type;
+       short  l_whence;
+       loff_t l_start;
+       loff_t l_len;
+       pid_t  l_pid;
+} flock64_t;
+
 #endif /* __ASM_MIPS_FCNTL_H */
index 2048867038f8791f4e0284cff3ab33ac09a26d85..ca3f76f6160a58299d8492d076763f75c2a60127 100644 (file)
@@ -404,17 +404,22 @@ extern void (*pgd_init)(unsigned long page);
 
 extern __inline__ pgd_t *get_pgd_slow(void)
 {
-       pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
+       pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
 
-       if (ret) {
-               init = pgd_offset(&init_mm, 0);
+       if (ret)
                pgd_init((unsigned long)ret);
-               memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
-                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-       }
        return ret;
 }
 
+extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
+{
+       pgd_t *init;
+
+       init = pgd_offset(&init_mm, 0);
+       memcpy (pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
 extern __inline__ pgd_t *get_pgd_fast(void)
 {
        unsigned long *ret;
@@ -423,8 +428,7 @@ extern __inline__ pgd_t *get_pgd_fast(void)
                pgd_quicklist = (unsigned long *)(*ret);
                ret[0] = ret[1];
                pgtable_cache_size--;
-       } else
-               ret = (unsigned long *)get_pgd_slow();
+       }
        return (pgd_t *)ret;
 }
 
@@ -487,7 +491,6 @@ extern void __bad_pte_kernel(pmd_t *pmd);
 #define pte_free_kernel(pte)    free_pte_fast(pte)
 #define pte_free(pte)           free_pte_fast(pte)
 #define pgd_free(pgd)           free_pgd_fast(pgd)
-#define pgd_alloc()             get_pgd_fast()
 
 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
 {
@@ -547,19 +550,13 @@ extern int do_check_pgt_cache(int, int);
 
 extern inline void set_pgdir(unsigned long address, pgd_t entry)
 {
-       struct task_struct * p;
        pgd_t *pgd;
 #ifdef __SMP__
        int i;
 #endif 
-        
-       read_lock(&tasklist_lock);
-       for_each_task(p) {
-               if (!p->mm)
-                       continue;
-               *pgd_offset(p->mm,address) = entry;
-       }
-       read_unlock(&tasklist_lock);
+       mmlist_access_lock();
+       mmlist_set_pgdir(address, entry);       
 #ifndef __SMP__
        for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
                pgd[address >> PGDIR_SHIFT] = entry;
@@ -570,6 +567,7 @@ extern inline void set_pgdir(unsigned long address, pgd_t entry)
                for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
                        pgd[address >> PGDIR_SHIFT] = entry;
 #endif
+       mmlist_access_unlock();
 }
 
 extern pgd_t swapper_pg_dir[1024];
index b0596d63abcd55199f4c421ac53192c7eb7f3233..48bf48e1387194132889fd1fa7346bfa754cdc6b 100644 (file)
 
 #define RLIM_NLIMITS 10                        /* Number of limit flavors.  */
 
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY  (~0UL)
+
 #ifdef __KERNEL__
 
 #define INIT_RLIMITS                                   \
index c1b149d47345bb1187b979584c77430d696ff770..3409d6923c995a46c3279f06ccfaee830882e82d 100644 (file)
 #define F_SETSIG       10      /*  for sockets. */
 #define F_GETSIG       11      /*  for sockets. */
 
+#define F_GETLK64      12      /*  using 'struct flock64' */
+#define F_SETLK64      13
+#define F_SETLKW64     14
+
 /* for F_[GET|SET]FL */
 #define FD_CLOEXEC     1       /* actually anything with low bit set goes */
 
@@ -68,4 +72,12 @@ struct flock {
        pid_t l_pid;
 };
 
+struct flock64 {
+       short  l_type;
+       short  l_whence;
+       loff_t l_start;
+       loff_t l_len;
+       pid_t  l_pid;
+};
+
 #endif
index 869a0e14392a758cf546fbf83f408e1c2516138e..9ad968cdccb5523443d7aa6f96a1d671b4df489c 100644 (file)
@@ -52,19 +52,13 @@ extern void __bad_pte(pmd_t *pmd);
 
 extern inline void set_pgdir(unsigned long address, pgd_t entry)
 {
-       struct task_struct * p;
        pgd_t *pgd;
 #ifdef __SMP__
        int i;
 #endif 
-        
-       read_lock(&tasklist_lock);
-       for_each_task(p) {
-               if (!p->mm)
-                       continue;
-               *pgd_offset(p->mm,address) = entry;
-       }
-       read_unlock(&tasklist_lock);
+       mmlist_access_lock();
+       mmlist_set_pgdir(address, entry);       
 #ifndef __SMP__
        for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
                pgd[address >> PGDIR_SHIFT] = entry;
@@ -75,6 +69,7 @@ extern inline void set_pgdir(unsigned long address, pgd_t entry)
                for (pgd = (pgd_t *)cpu_data[i].pgd_cache; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
                        pgd[address >> PGDIR_SHIFT] = entry;
 #endif
+       mmlist_access_unlock();
 }
 
 /* We don't use pmd cache, so this is a dummy routine */
@@ -110,18 +105,22 @@ extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
 
 extern __inline__ pgd_t *get_pgd_slow(void)
 {
-       pgd_t *ret, *init;
-       /*if ( (ret = (pgd_t *)get_zero_page_fast()) == NULL )*/
-       if ( (ret = (pgd_t *)__get_free_page(GFP_KERNEL)) != NULL )
+       pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+       if (ret)
                memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
-       if (ret) {
-               init = pgd_offset(&init_mm, 0);
-               memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
-                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-       }
        return ret;
 }
 
+extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
+{
+       pgd_t *init;
+
+       init = pgd_offset(&init_mm, 0);
+       memcpy (pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
 extern __inline__ pgd_t *get_pgd_fast(void)
 {
         unsigned long *ret;
@@ -130,8 +129,7 @@ extern __inline__ pgd_t *get_pgd_fast(void)
                 pgd_quicklist = (unsigned long *)(*ret);
                 ret[0] = 0;
                 pgtable_cache_size--;
-        } else
-                ret = (unsigned long *)get_pgd_slow();
+        }
         return (pgd_t *)ret;
 }
 
@@ -176,7 +174,6 @@ extern __inline__ void free_pte_slow(pte_t *pte)
 #define pte_free_kernel(pte)    free_pte_fast(pte)
 #define pte_free(pte)           free_pte_fast(pte)
 #define pgd_free(pgd)           free_pgd_fast(pgd)
-#define pgd_alloc()             get_pgd_fast()
 
 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
 {
index 9f565771a7dc048c7832a952101d1d8734ec1ee5..c27aa85910c3319b6d9812890cdb4dfb3142e8f4 100644 (file)
 
 #define RLIM_NLIMITS   10
 
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY  (~0UL)
+
+
 #ifdef __KERNEL__
 
 #define INIT_RLIMITS                                                   \
index 5ecc358e9645f521d0a424aefbeb173812ea3c6f..beb6597b3ef505dc6a66392c6d44c1db24bde609 100644 (file)
 #define F_SETSIG       10      /*  for sockets. */
 #define F_GETSIG       11      /*  for sockets. */
 
+#define F_GETLK64      12      /* for LFS */
+#define F_SETLK64      13
+#define F_SETLKW64     14
+
 /* for F_[GET|SET]FL */
 #define FD_CLOEXEC     1       /* actually anything with low bit set goes */
 
@@ -62,5 +66,13 @@ struct flock {
        pid_t l_pid;
 };
 
+struct flock64 {
+       short l_type;
+       short l_whence;
+       loff_t l_start;
+       loff_t l_len;
+       pid_t l_pid;
+};
+
 #endif /* __ASM_SH_FCNTL_H */
 
index 97a9a3ad8fb5ab3fd48946596775c50d0857ee26..3d50bc85f1bb1c2cef5adfbc309d197ba530d4da 100644 (file)
@@ -273,18 +273,18 @@ extern __inline__ pgd_t *get_pgd_slow(void)
 {
        pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
 
-       if (ret) {
-               /* Clear User space */
+       if (ret)
                memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
-
-               /* XXX: Copy vmalloc-ed space??? */
-               memcpy(ret + USER_PTRS_PER_PGD,
-                      swapper_pg_dir + USER_PTRS_PER_PGD,
-                      (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-       }
        return ret;
 }
 
+extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
+{
+       /* XXX: Copy vmalloc-ed space??? */
+       memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
+               (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
 extern __inline__ pgd_t *get_pgd_fast(void)
 {
        unsigned long *ret;
@@ -293,8 +293,7 @@ extern __inline__ pgd_t *get_pgd_fast(void)
                pgd_quicklist = (unsigned long *)(*ret);
                ret[0] = 0;
                pgtable_cache_size--;
-       } else
-               ret = (unsigned long *)get_pgd_slow();
+       }
        return (pgd_t *)ret;
 }
 
@@ -340,7 +339,6 @@ extern __inline__ void free_pte_slow(pte_t *pte)
 #define pte_free_kernel(pte)    free_pte_slow(pte)
 #define pte_free(pte)           free_pte_slow(pte)
 #define pgd_free(pgd)           free_pgd_slow(pgd)
-#define pgd_alloc()             get_pgd_fast()
 
 extern __inline__ pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
 {
@@ -398,18 +396,13 @@ extern int do_check_pgt_cache(int, int);
 
 extern inline void set_pgdir(unsigned long address, pgd_t entry)
 {
-       struct task_struct * p;
        pgd_t *pgd;
         
-       read_lock(&tasklist_lock);
-       for_each_task(p) {
-               if (!p->mm)
-                       continue;
-               *pgd_offset(p->mm,address) = entry;
-       }
-       read_unlock(&tasklist_lock);
+       mmlist_access_lock();
+       mmlist_set_pgdir(address, entry);
        for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
                pgd[address >> PGDIR_SHIFT] = entry;
+       mmlist_access_unlock();
 }
 
 extern pgd_t swapper_pg_dir[1024];
index 857c371666061f5084ffee49099faa7777479ced..ef89ec7e663232fd21ab93db1396f11fefb74c86 100644 (file)
 
 #define RLIM_NLIMITS   10
 
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY  (~0UL)
+
 #ifdef __KERNEL__
 
 #define INIT_RLIMITS                                   \
index 466cfc1eab2e437399a3dc542b73e5c06fb5308c..ff079180f9ad4f505893b9c0cfdf72ae404e67e9 100644 (file)
 
 #define RLIM_NLIMITS   10
 
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY  (~0UL)
+
 #ifdef __KERNEL__
 #define INIT_RLIMITS                           \
 {                                              \
index e0352b8d921b88b35a57492735c0ebc2f0a7bcc5..c7a023d8ba43bba1bfac83da76e5f80a657707b8 100644 (file)
@@ -70,6 +70,8 @@ struct flock32 {
        __kernel_pid_t32 l_pid;
        short __unused;
 };
+
+#define flock64 flock
 #endif
 
 #endif /* !(_SPARC64_FCNTL_H) */
index 5b31257da022b9e54ff68e991ec5838a6552c3bb..545d2dd760800782c19eaa413ff60e193b59239f 100644 (file)
@@ -380,21 +380,28 @@ extern __inline__ pgd_t *get_pgd_fast(void)
                        pgd_quicklist = (unsigned long *)ret->next_hash;
                 ret = (struct page *)(page_address(ret) + off);
                 pgd_cache_size--;
-        } else {
-               ret = (struct page *) __get_free_page(GFP_KERNEL);
-               if(ret) {
-                       struct page *page = mem_map + MAP_NR(ret);
-                       
-                       memset(ret, 0, PAGE_SIZE);
-                       (unsigned long)page->pprev_hash = 2;
-                       (unsigned long *)page->next_hash = pgd_quicklist;
-                       pgd_quicklist = (unsigned long *)page;
-                       pgd_cache_size++;
-               }
         }
         return (pgd_t *)ret;
 }
 
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+       pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
+
+       if(ret)
+               memset(ret, 0, PAGE_SIZE);
+       return (pgd_t *)ret;
+}
+
+extern __inline__ pgd_t *get_pgd_uptodate(pgd_t *pgd)
+{
+       struct page *page = mem_map + MAP_NR(pgd);
+
+       (unsigned long)page->pprev_hash = 2;
+       (unsigned long *)page->next_hash = pgd_quicklist;
+       pgd_quicklist = (unsigned long *)page;
+       pgd_cache_size++;
+}
 #else /* __SMP__ */
 
 extern __inline__ void free_pgd_fast(pgd_t *pgd)
@@ -412,14 +419,23 @@ extern __inline__ pgd_t *get_pgd_fast(void)
                pgd_quicklist = (unsigned long *)(*ret);
                ret[0] = 0;
                pgtable_cache_size--;
-       } else {
-               ret = (unsigned long *) __get_free_page(GFP_KERNEL);
-               if(ret)
-                       memset(ret, 0, PAGE_SIZE);
        }
        return (pgd_t *)ret;
 }
 
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+       pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+       if(ret)
+               memset(ret, 0, PAGE_SIZE);
+       return(ret);
+}
+
+extern __inline__ pgd_t *get_pgd_uptodate(pgd_t *pgd)
+{
+}
+
 extern __inline__ void free_pgd_slow(pgd_t *pgd)
 {
        free_page((unsigned long)pgd);
@@ -484,7 +500,6 @@ extern __inline__ void free_pte_slow(pte_t *pte)
 #define pmd_free_kernel(pmd)   free_pmd_fast(pmd)
 #define pmd_free(pmd)          free_pmd_fast(pmd)
 #define pgd_free(pgd)          free_pgd_fast(pgd)
-#define pgd_alloc()            get_pgd_fast()
 
 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
 {
index a8ecef2a7753f2a5351c5bb6642a100b573b1e66..0bd968298089c404b94371980534c91ccb8c6d04 100644 (file)
 
 #define RLIM_NLIMITS   10
 
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY  (~0UL)
+
 #ifdef __KERNEL__
 #define INIT_RLIMITS                           \
 {                                              \
index 1f87c99b7d465170210eb11348628df14a13960a..d2ae68864e2b0209cce923fd5606e3c2beed5c8b 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * AGPGART module version 0.99
  * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight
- * Copyright (C) 1999 Xi Graphics
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -103,7 +103,7 @@ typedef struct _agp_memory {
 extern void agp_free_memory(agp_memory *);
 
 /*
- * void agp_free_memory(agp_memory *curr) :
+ * agp_free_memory :
  * 
  * This function frees memory associated with
  * an agp_memory pointer.  It is the only function
@@ -118,7 +118,7 @@ extern void agp_free_memory(agp_memory *);
 extern agp_memory *agp_allocate_memory(size_t, u32);
 
 /*
- * agp_memory *agp_allocate_memory(size_t page_count, u32 type) :
+ * agp_allocate_memory :
  * 
  * This function allocates a group of pages of
  * a certain type.
@@ -136,7 +136,7 @@ extern agp_memory *agp_allocate_memory(size_t, u32);
 extern void agp_copy_info(agp_kern_info *);
 
 /*
- * void agp_copy_info(agp_kern_info *info) :
+ * agp_copy_info :
  * 
  * This function copies information about the
  * agp bridge device and the state of the agp
@@ -151,7 +151,7 @@ extern void agp_copy_info(agp_kern_info *);
 extern int agp_bind_memory(agp_memory *, off_t);
 
 /*
- * int agp_bind_memory(agp_memory *curr, off_t pg_start) :
+ * agp_bind_memory :
  * 
  * This function binds an agp_memory structure
  * into the graphics aperture translation table.
@@ -168,7 +168,7 @@ extern int agp_bind_memory(agp_memory *, off_t);
 extern int agp_unbind_memory(agp_memory *);
 
 /* 
- * int agp_unbind_memory(agp_memory *curr) :
+ * agp_unbind_memory :
  * 
  * This function removes an agp_memory structure
  * from the graphics aperture translation table.
@@ -185,7 +185,7 @@ extern int agp_unbind_memory(agp_memory *);
 extern void agp_enable(u32);
 
 /* 
- * void agp_enable(u32 mode) :
+ * agp_enable :
  * 
  * This function initializes the agp point-to-point
  * connection.
@@ -197,7 +197,7 @@ extern void agp_enable(u32);
 extern int agp_backend_acquire(void);
 
 /*
- * int agp_backend_acquire(void) :
+ * agp_backend_acquire :
  * 
  * This Function attempts to acquire the agp
  * backend.
@@ -209,7 +209,7 @@ extern int agp_backend_acquire(void);
 extern void agp_backend_release(void);
 
 /*
- * void agp_backend_release(void) :
+ * agp_backend_release :
  * 
  * This Function releases the lock on the agp
  * backend.
index cc9dc84898dbc6f95b69542e47f4b437b51125a0..86617386e55df8f1b4c4163794b5b95e7f5d7df3 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * AGPGART module version 0.99
  * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight
- * Copyright (C) 1999 Xi Graphics
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
index d0db1bce698c855487d9873e0d543ea519d17e31..594f891afb5bcf86fe8b9da021c3d80175908447 100644 (file)
@@ -157,9 +157,7 @@ static void floppy_off(unsigned int nr);
 #elif (SCSI_DISK_MAJOR(MAJOR_NR))
 
 #define DEVICE_NAME "scsidisk"
-#define DEVICE_INTR do_sd  
 #define TIMEOUT_VALUE (2*HZ)
-#define DEVICE_REQUEST do_sd_request
 #define DEVICE_NR(device) (((MAJOR(device) & SD_MAJOR_MASK) << (8 - 4)) + (MINOR(device) >> 4))
 #define DEVICE_ON(device)
 #define DEVICE_OFF(device)
@@ -184,8 +182,6 @@ static void floppy_off(unsigned int nr);
 #elif (MAJOR_NR == SCSI_CDROM_MAJOR)
 
 #define DEVICE_NAME "CD-ROM"
-#define DEVICE_INTR do_sr
-#define DEVICE_REQUEST do_sr_request
 #define DEVICE_NR(device) (MINOR(device))
 #define DEVICE_ON(device)
 #define DEVICE_OFF(device)
@@ -387,7 +383,7 @@ static void floppy_off(unsigned int nr);
 #if !defined(IDE_DRIVER)
 
 #ifndef CURRENT
-#define CURRENT (blk_dev[MAJOR_NR].current_request)
+#define CURRENT (blk_dev[MAJOR_NR].request_queue.current_request)
 #endif
 
 #ifndef DEVICE_NAME
@@ -421,7 +417,9 @@ else \
 
 #endif /* DEVICE_TIMEOUT */
 
-static void (DEVICE_REQUEST)(void);
+#ifdef DEVICE_REQUEST
+static void (DEVICE_REQUEST)(request_queue_t *);
+#endif 
   
 #ifdef DEVICE_INTR
 #define CLEAR_INTR SET_INTR(NULL)
index 0567e080ead5426ee0a15072c0514eabb50e0bcb..77d31cc9a6732efd1183d075d1a08d66d98c2bda 100644 (file)
@@ -27,6 +27,7 @@ struct request {
        unsigned long nr_sectors;
        unsigned long nr_segments;
        unsigned long current_nr_sectors;
+       void * special;
        char * buffer;
        struct semaphore * sem;
        struct buffer_head * bh;
@@ -34,19 +35,57 @@ struct request {
        struct request * next;
 };
 
-typedef void (request_fn_proc) (void);
-typedef struct request ** (queue_proc) (kdev_t dev);
+typedef struct request_queue request_queue_t;
+typedef int (merge_request_fn) (request_queue_t *, 
+                               struct request  * req,
+                               struct buffer_head *);
+typedef int (merge_requests_fn) (request_queue_t *, 
+                                struct request  * req,
+                                struct request  * req2);
+typedef void (request_fn_proc) (request_queue_t *);
+typedef request_queue_t * (queue_proc) (kdev_t dev);
+
+struct request_queue
+{
+       struct request          * current_request;
+       request_fn_proc         * request_fn;
+       merge_request_fn        * merge_fn;
+       merge_requests_fn       * merge_requests_fn;
+       /*
+        * The queue owner gets to use this for whatever they like.
+        * ll_rw_blk doesn't touch it.
+        */
+       void                    * queuedata;
+
+       /*
+        * This is used to remove the plug when tq_disk runs.
+        */
+       struct tq_struct          plug_tq;
+       /*
+        * Boolean that indicates whether this queue is plugged or not.
+        */
+       char                      plugged;
+
+       /*
+        * Boolean that indicates whether current_request is active or
+        * not.
+        */
+       char                      head_active;
+
+       /*
+        * Boolean that indicates whether we should use plugging on
+        * this queue or not.
+        */
+       char                      use_plug; 
+};
 
 struct blk_dev_struct {
-       request_fn_proc         *request_fn;
        /*
         * queue_proc has to be atomic
         */
+       request_queue_t         request_queue;
        queue_proc              *queue;
        void                    *data;
-       struct request          *current_request;
-       struct request   plug;
-       struct tq_struct plug_tq;
 };
 
 struct sec_size {
@@ -54,6 +93,14 @@ struct sec_size {
        unsigned block_size_bits;
 };
 
+/*
+ * Used to indicate the default queue for drivers that don't bother
+ * to implement multiple queues.  We have this access macro here
+ * so as to eliminate the need for each and every block device
+ * driver to know about the internal structure of blk_dev[].
+ */
+#define BLK_DEFAULT_QUEUE(_MAJOR)  &blk_dev[_MAJOR].request_queue
+
 extern struct sec_size * blk_sec[MAX_BLKDEV];
 extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
 extern wait_queue_head_t wait_for_request;
@@ -61,6 +108,14 @@ extern void resetup_one_dev(struct gendisk *dev, int drive);
 extern void unplug_device(void * data);
 extern void make_request(int major,int rw, struct buffer_head * bh);
 
+/*
+ * Access functions for manipulating queue properties
+ */
+extern void blk_init_queue(request_queue_t *, request_fn_proc *);
+extern void blk_cleanup_queue(request_queue_t *);
+extern void blk_queue_headactive(request_queue_t *, int);
+extern void blk_queue_pluggable(request_queue_t *, int);
+
 /* md needs this function to remap requests */
 extern int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size);
 extern int md_make_request (int minor, int rw, struct buffer_head * bh);
index 1ed6fe5e131804e180c0498e018d295ba41690da..836038e59d6e2af1b939757e67bd5a64d33d0905 100644 (file)
@@ -615,6 +615,7 @@ extern struct inode_operations ext2_file_inode_operations;
 
 /* symlink.c */
 extern struct inode_operations ext2_symlink_inode_operations;
+extern struct inode_operations ext2_fast_symlink_inode_operations;
 
 #endif /* __KERNEL__ */
 
index 9112e5f963ba0fc2d50b78a5c13eaecf2a949721..f94cd91a356b8bc7a2186c48db9dc2a113993da6 100644 (file)
@@ -478,8 +478,8 @@ struct file_lock {
        struct file *fl_file;
        unsigned char fl_flags;
        unsigned char fl_type;
-       off_t fl_start;
-       off_t fl_end;
+       loff_t fl_start;
+       loff_t fl_end;
 
        void (*fl_notify)(struct file_lock *);  /* unblock callback */
 
@@ -495,6 +495,9 @@ extern struct file_lock                     *file_lock_table;
 extern int fcntl_getlk(unsigned int, struct flock *);
 extern int fcntl_setlk(unsigned int, unsigned int, struct flock *);
 
+extern int fcntl_getlk64(unsigned int fd, struct flock64 *l);
+extern int fcntl_setlk64(unsigned int fd, unsigned int cmd, struct flock64 *l);
+
 /* fs/locks.c */
 extern void locks_remove_posix(struct file *, fl_owner_t);
 extern void locks_remove_flock(struct file *);
@@ -946,12 +949,24 @@ extern int block_read_full_page(struct dentry *, struct page *);
 extern int block_write_full_page (struct dentry *, struct page *);
 extern int block_write_partial_page (struct file *, struct page *, unsigned long, unsigned long, const char *);
 extern int block_write_cont_page (struct file *, struct page *, unsigned long, unsigned long, const char *);
+extern int block_write_zero_range(struct inode *, struct page *, unsigned, unsigned, unsigned, const char *);
+extern inline int block_write_range(struct inode *inode, struct page *page,
+                               unsigned from, unsigned len,const char *buf) 
+{
+       return block_write_zero_range(inode, page, from, from, from+len, buf);
+}
 extern int block_flushpage(struct page *, unsigned long);
+extern int block_symlink(struct inode *, const char *, int);
 
 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
 extern ssize_t generic_file_read(struct file *, char *, size_t, loff_t *);
 extern ssize_t generic_file_write(struct file *, const char *, size_t, loff_t *, writepage_t);
-extern void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor);
+extern void do_generic_file_read(struct file *, loff_t *, read_descriptor_t *, read_actor_t);
+
+extern int vfs_readlink(struct dentry *, char *, int, char *);
+extern struct dentry *vfs_follow_link(struct dentry *, struct dentry *, unsigned, char *);
+extern int page_readlink(struct dentry *, char *, int);
+extern struct dentry *page_follow_link(struct dentry *, struct dentry *, unsigned);
 
 extern struct super_block *get_super(kdev_t);
 struct super_block *get_empty_super(void);
index 67376905e81e75fb1760beaaf521b04154979c89..cd813e9c6e6a4154756bf7c359ef5619227ab9f3 100644 (file)
@@ -231,7 +231,7 @@ typedef union {
 } special_t;
 
 typedef struct ide_drive_s {
-       struct request          *queue; /* request queue */
+       request_queue_t          queue; /* request queue */
        struct ide_drive_s      *next;  /* circular list of hwgroup drives */
        unsigned long sleep;            /* sleep until this time */
        unsigned long service_start;    /* time we started last request */
@@ -744,7 +744,7 @@ void ide_stall_queue (ide_drive_t *drive, unsigned long timeout);
 /*
  * ide_get_queue() returns the queue which corresponds to a given device.
  */
-struct request **ide_get_queue (kdev_t dev);
+request_queue_t *ide_get_queue (kdev_t dev);
 
 /*
  * CompactFlash cards and their brethern pretend to be removable hard disks,
@@ -757,33 +757,33 @@ int  ide_spin_wait_hwgroup(ide_drive_t *drive, unsigned long *flags);
 void ide_timer_expiry (unsigned long data);
 void ide_intr (int irq, void *dev_id, struct pt_regs *regs);
 void ide_geninit (struct gendisk *gd);
-void do_ide0_request (void);
+void do_ide0_request (request_queue_t * q);
 #if MAX_HWIFS > 1
-void do_ide1_request (void);
+void do_ide1_request (request_queue_t * q);
 #endif
 #if MAX_HWIFS > 2
-void do_ide2_request (void);
+void do_ide2_request (request_queue_t * q);
 #endif
 #if MAX_HWIFS > 3
-void do_ide3_request (void);
+void do_ide3_request (request_queue_t * q);
 #endif
 #if MAX_HWIFS > 4
-void do_ide4_request (void);
+void do_ide4_request (request_queue_t * q);
 #endif
 #if MAX_HWIFS > 5
-void do_ide5_request (void);
+void do_ide5_request (request_queue_t * q);
 #endif
 #if MAX_HWIFS > 6
-void do_ide6_request (void);
+void do_ide6_request (request_queue_t * q);
 #endif
 #if MAX_HWIFS > 7
-void do_ide7_request (void);
+void do_ide7_request (request_queue_t * q);
 #endif
 #if MAX_HWIFS > 8
-void do_ide8_request (void);
+void do_ide8_request (request_queue_t * q);
 #endif
 #if MAX_HWIFS > 9
-void do_ide9_request (void);
+void do_ide9_request (request_queue_t * q);
 #endif
 void ide_init_subdrivers (void);
 
index fabab7ab70724001e36518a19a0603e1176d9940..be8018e8cdb3296f9758c69d88fb10be8a503839 100644 (file)
@@ -154,8 +154,10 @@ typedef struct page {
 #define PG_error                1
 #define PG_referenced           2
 #define PG_uptodate             3
+#define PG__unused_00           4
 #define PG_decr_after           5
-#define PG_DMA                  7
+#define PG_unused_01            6
+#define PG__unused_02           7
 #define PG_slab                         8
 #define PG_swap_cache           9
 #define PG_skip                        10
@@ -181,7 +183,7 @@ typedef struct page {
 #define ClearPageError(page)   clear_bit(PG_error, &(page)->flags)
 #define PageReferenced(page)   test_bit(PG_referenced, &(page)->flags)
 #define PageDecrAfter(page)    test_bit(PG_decr_after, &(page)->flags)
-#define PageDMA(page)          test_bit(PG_DMA, &(page)->flags)
+#define PageDMA(page)          (contig_page_data.node_zones + ZONE_DMA == (page)->zone)
 #define PageSlab(page)         test_bit(PG_slab, &(page)->flags)
 #define PageSwapCache(page)    test_bit(PG_swap_cache, &(page)->flags)
 #define PageReserved(page)     test_bit(PG_reserved, &(page)->flags)
@@ -218,8 +220,8 @@ typedef struct page {
  * PG_reserved is set for a page which must never be accessed (which
  * may not even be present).
  *
- * PG_DMA is set for those pages which lie in the range of
- * physical addresses capable of carrying DMA transfers.
+ * PG_DMA has been removed, page->zone now tells exactly wether the
+ * page is suited to do DMAing into.
  *
  * Multiple processes may "see" the same page. E.g. for untouched
  * mappings of /dev/null, all processes see the same page full of
@@ -518,6 +520,25 @@ extern struct vm_area_struct *find_extend_vma(struct task_struct *tsk, unsigned
 #define vmlist_modify_lock(mm)         vmlist_access_lock(mm)
 #define vmlist_modify_unlock(mm)       vmlist_access_unlock(mm)
 
+extern spinlock_t mm_lock;
+#define mmlist_access_lock()           spin_lock(&mm_lock)
+#define mmlist_access_unlock()         spin_unlock(&mm_lock)
+#define mmlist_modify_lock()           mmlist_access_lock()
+#define mmlist_modify_unlock()         mmlist_access_unlock()
+
+#define for_each_mm(mm) \
+       for (mm = list_entry(init_mm.mmlist.next, struct mm_struct, mmlist); \
+               (mm != &init_mm);  \
+               (mm = list_entry(mm->mmlist.next, struct mm_struct, mmlist)))
+
+static inline void mmlist_set_pgdir(unsigned long address, pgd_t entry)
+{
+       struct mm_struct *mm;
+
+       for_each_mm(mm)
+               *pgd_offset(mm,address) = entry;
+}
+
 #endif /* __KERNEL__ */
 
 #endif
index a0fc64c887a81073ade8f3fbf8d6858f6918a655..ce2d3aa98a0991c32b344ddb2276062da0afc192 100644 (file)
@@ -102,6 +102,7 @@ struct module_info
 #define MOD_VISITED            8
 #define MOD_USED_ONCE          16
 #define MOD_JUST_FREED         32
+#define MOD_INITIALIZING       64
 
 /* Values for query_module's which.  */
 
@@ -111,6 +112,9 @@ struct module_info
 #define QM_SYMBOLS     4
 #define QM_INFO                5
 
+/* Can the module be queried? */
+#define MOD_CAN_QUERY(mod) (((mod)->flags & (MOD_RUNNING | MOD_INITIALIZING)) && !((mod)->flags & MOD_DELETED))
+
 /* When struct module is extended, we must test whether the new member
    is present in the header received from insmod before we can use it.  
    This function returns true if the member is present.  */
index 0303ea57f4fbe198cda4b58023efe8b8bec82bd5..25b95ee449d40943f8a49e830baf9dd1c8191c52 100644 (file)
@@ -10,6 +10,7 @@
 #ifndef LINUX_NFSD_NFSD_H
 #define LINUX_NFSD_NFSD_H
 
+#include <linux/config.h>
 #include <linux/types.h>
 #include <linux/unistd.h>
 #include <linux/dirent.h>
index 726e8f27f580912bc9b5cdd7f409ee6dc6984bf8..ac73ca4096e8ec6223f047cca981b59232669c4d 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <asm/types.h>
 #ifdef __KERNEL__
+# include <linux/config.h>
 # include <linux/types.h>
 # include <linux/string.h>
 # include <linux/fs.h>
index 811c7e81dff98fbd2424f08226346d57123cdfc1..898ef7105af69caae3cf6d2ea69d9bcf111e5d46 100644 (file)
@@ -26,7 +26,6 @@ struct ntfs_sb_info{
        int clusterfactor;
        int clustersize;
        int mft_recordsize;
-       int mft_recordbits;
        int mft_clusters_per_record;
        int index_recordsize;
        int index_clusters_per_record;
index 5b38bb9e95b87da563d6203ad2227808fd6a9e79..0b7585f25d4b8dfdba54df8420b2756e9fe7cfc9 100644 (file)
@@ -37,13 +37,6 @@ struct       rusage {
        long    ru_nivcsw;              /* involuntary " */
 };
 
-/*
- * SuS says limits have to be unsigned.
- *
- * Which makes a ton more sense anyway.
- */
-#define RLIM_INFINITY  (~0UL)
-
 struct rlimit {
        unsigned long   rlim_cur;
        unsigned long   rlim_max;
index 36590d49cbdc8582f99e1e28d1f118c8a4098ed0..7a6e336051fb98f079b87ee489a34bbc400df0e1 100644 (file)
@@ -223,6 +223,7 @@ struct mm_struct {
        unsigned long cpu_vm_mask;
        unsigned long swap_cnt; /* number of pages to swap on next pass */
        unsigned long swap_address;
+       struct list_head mmlist;                /* active mm list */
        /*
         * This is an architecture-specific pointer: the portable
         * part of Linux does not know about any segments.
@@ -241,7 +242,8 @@ struct mm_struct {
                0, 0, 0,                                \
                0, 0, 0, 0,                             \
                0, 0, 0,                                \
-               0, 0, 0, 0, NULL }
+               0, 0, 0, 0,                             \
+               LIST_HEAD_INIT(init_mm.mmlist), NULL }
 
 struct signal_struct {
        atomic_t                count;
index d938298a3c11ffb1e54c9a9a04508fd2e55ca65c..b650d7897f14325921d8139e7a2e29cf9469b2bb 100644 (file)
@@ -563,6 +563,7 @@ extern void ufs_write_super (struct super_block *);
 
 /* symlink.c */
 extern struct inode_operations ufs_symlink_inode_operations;
+extern struct inode_operations ufs_fast_symlink_inode_operations;
 
 /* truncate.c */
 extern void ufs_truncate (struct inode *);
index 395226a23c443d47cc29c19c2d2d8c27a1174984..d4a675e78f573f8f9ccb34701c4e6eb24a9eb1f4 100644 (file)
@@ -2,7 +2,7 @@
 #define LINUX_UMSDOS_FS_H
 
 
-#define UMS_DEBUG 1    /* define for check_* functions */
+/*#define UMS_DEBUG 1  // define for check_* functions */
 /*#define UMSDOS_DEBUG 1*/
 #define UMSDOS_PARANOIA 1
 
index 96135ec0fbe5afc315d7dfbb1f3b7634ca92bd1f..e123efb102e007ec84bc2b3eeeaf6e9c2c961741 100644 (file)
@@ -47,15 +47,13 @@ struct dir_locking_info {
                                 *  Only one at a time, although one
                                 *  may recursively lock, so it is a counter
                                 */
-       long pid;               /* pid of the process owning the creation */
-       /* lock */
+       long pid;               /* pid of the process owning the creation
+                                * lock */
 };
 
 struct umsdos_inode_info {
-       union {
-               struct msdos_inode_info msdos_info;
-               struct dir_locking_info dir_info;
-       } u;
+       struct msdos_inode_info msdos_info;
+       struct dir_locking_info dir_info;
        int i_patched;                  /* Inode has been patched */
        int i_is_hlink;                 /* Resolved hardlink inode? */
        unsigned long i_emd_owner;      /* Is this the EMD file inode? */
index 086ad3681cee09416d0758cf032992a23a239938..76e63aa75aeb60a323e54c3492df12d6e8bf62aa 100644 (file)
 #define LOG_SELECT            0x4c
 #define LOG_SENSE             0x4d
 #define MODE_SELECT_10        0x55
+#define RESERVE_10            0x56
+#define RELEASE_10            0x57
 #define MODE_SENSE_10         0x5a
+#define PERSISTENT_RESERVE_IN 0x5e
+#define PERSISTENT_RESERVE_OUT 0x5f
 #define MOVE_MEDIUM           0xa5
 #define READ_12               0xa8
 #define WRITE_12              0xaa
index 05357e3480e969556cf43fc58f716d2a5c72f3a5..00d0dfa6939be0ffd27eeabbcdc623524e8b2a01 100644 (file)
@@ -13,7 +13,7 @@
 O_TARGET := kernel.o
 O_OBJS    = sched.o dma.o fork.o exec_domain.o panic.o printk.o sys.o \
            module.o exit.o itimer.o info.o time.o softirq.o resource.o \
-           sysctl.o acct.o capability.o ptrace.o
+           sysctl.o acct.o capability.o ptrace.o timer.o
 
 OX_OBJS  += signal.o
 
index de0b59bac65dde543404f5901fa7dc2866b38b9d..462373b21e5e70984cd75983e8fc6e52616e1a76 100644 (file)
@@ -33,6 +33,7 @@ int last_pid=0;
 
 /* SLAB cache for mm_struct's. */
 kmem_cache_t *mm_cachep;
+spinlock_t mm_lock = SPIN_LOCK_UNLOCKED;
 
 /* SLAB cache for files structs */
 kmem_cache_t *files_cachep; 
@@ -305,9 +306,21 @@ struct mm_struct * mm_alloc(void)
                atomic_set(&mm->mm_count, 1);
                init_MUTEX(&mm->mmap_sem);
                mm->page_table_lock = SPIN_LOCK_UNLOCKED;
-               mm->pgd = pgd_alloc();
-               if (mm->pgd)
+               mmlist_modify_lock();
+               if ((mm->pgd = get_pgd_fast())) {
+                       list_add_tail(&mm->mmlist, &init_mm.mmlist);
+                       mmlist_modify_unlock();
                        return mm;
+               }
+               mmlist_modify_unlock();
+               if ((mm->pgd = get_pgd_slow())) {
+                       mmlist_modify_lock();
+                       get_pgd_uptodate(mm->pgd);
+                       list_add_tail(&mm->mmlist, &init_mm.mmlist);
+                       mmlist_modify_unlock();
+                       return mm;
+               }
+               mmlist_modify_unlock();
                kmem_cache_free(mm_cachep, mm);
        }
        return NULL;
@@ -321,8 +334,11 @@ struct mm_struct * mm_alloc(void)
 inline void __mmdrop(struct mm_struct *mm)
 {
        if (mm == &init_mm) BUG();
+       mmlist_modify_lock();
        pgd_free(mm->pgd);
        destroy_context(mm);
+       list_del(&mm->mmlist);
+       mmlist_modify_unlock();
        kmem_cache_free(mm_cachep, mm);
 }
 
index 8fe894c5445ff76e5e9b2966776dc21bd234eb0d..e976cc7fe229fccc03b1ebee9f2f5f73e2845611 100644 (file)
@@ -188,6 +188,7 @@ EXPORT_SYMBOL(block_read_full_page);
 EXPORT_SYMBOL(block_write_full_page);
 EXPORT_SYMBOL(block_write_partial_page);
 EXPORT_SYMBOL(block_write_cont_page);
+EXPORT_SYMBOL(block_write_zero_range);
 EXPORT_SYMBOL(generic_file_read);
 EXPORT_SYMBOL(do_generic_file_read);
 EXPORT_SYMBOL(generic_file_write);
@@ -218,6 +219,11 @@ EXPORT_SYMBOL(__find_get_page);
 EXPORT_SYMBOL(__find_lock_page);
 EXPORT_SYMBOL(grab_cache_page);
 EXPORT_SYMBOL(read_cache_page);
+EXPORT_SYMBOL(vfs_readlink);
+EXPORT_SYMBOL(vfs_follow_link);
+EXPORT_SYMBOL(page_readlink);
+EXPORT_SYMBOL(page_follow_link);
+EXPORT_SYMBOL(block_symlink);
 
 #if !defined(CONFIG_NFSD) && defined(CONFIG_NFSD_MODULE)
 EXPORT_SYMBOL(do_nfsservctl);
index 9ceb36f9e06dcb574053dee7fa320f2d6e367273..fb9d4ef8d0ce019f88329e4dc06f95582923064e 100644 (file)
@@ -13,6 +13,7 @@
  * 0.99.14 version by Jon Tombs <jon@gtex02.us.es>,
  * Heavily modified by Bjorn Ekwall <bj0rn@blox.se> May 1994 (C)
  * Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
+ * Add MOD_INITIALIZING Keith Owens <kaos@ocs.com.au> Nov 1999
  *
  * This source is covered by the GNU GPL, the same as all kernel sources.
  */
@@ -323,16 +324,18 @@ sys_init_module(const char *name_user, struct module *mod_user)
        put_mod_name(name);
 
        /* Initialize the module.  */
+       mod->flags |= MOD_INITIALIZING;
        atomic_set(&mod->uc.usecount,1);
        if (mod->init && mod->init() != 0) {
                atomic_set(&mod->uc.usecount,0);
+               mod->flags &= ~MOD_INITIALIZING;
                error = -EBUSY;
                goto err0;
        }
        atomic_dec(&mod->uc.usecount);
 
        /* And set it running.  */
-       mod->flags |= MOD_RUNNING;
+       mod->flags = (mod->flags | MOD_RUNNING) & ~MOD_INITIALIZING;
        error = 0;
        goto err0;
 
@@ -456,7 +459,7 @@ qm_deps(struct module *mod, char *buf, size_t bufsize, size_t *ret)
 
        if (mod == &kernel_module)
                return -EINVAL;
-       if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
+       if (!MOD_CAN_QUERY(mod))
                if (put_user(0, ret))
                        return -EFAULT;
                else
@@ -500,7 +503,7 @@ qm_refs(struct module *mod, char *buf, size_t bufsize, size_t *ret)
 
        if (mod == &kernel_module)
                return -EINVAL;
-       if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
+       if (!MOD_CAN_QUERY(mod))
                if (put_user(0, ret))
                        return -EFAULT;
                else
@@ -544,7 +547,7 @@ qm_symbols(struct module *mod, char *buf, size_t bufsize, size_t *ret)
        char *strings;
        unsigned long *vals;
 
-       if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
+       if (!MOD_CAN_QUERY(mod))
                if (put_user(0, ret))
                        return -EFAULT;
                else
@@ -710,7 +713,7 @@ sys_get_kernel_syms(struct kernel_sym *table)
                struct module_symbol *msym;
                unsigned int j;
 
-               if ((mod->flags & (MOD_RUNNING|MOD_DELETED)) != MOD_RUNNING)
+               if (!MOD_CAN_QUERY(mod))
                        continue;
 
                /* magic: write module info as a pseudo symbol */
@@ -859,7 +862,10 @@ int get_module_list(char *p)
                                safe_copy_cstr(" (autoclean)");
                        if (!(mod->flags & MOD_USED_ONCE))
                                safe_copy_cstr(" (unused)");
-               } else
+               }
+               else if (mod->flags & MOD_INITIALIZING)
+                       safe_copy_cstr(" (initializing)");
+               else
                        safe_copy_cstr(" (uninitialized)");
 
                if ((ref = mod->refs) != NULL) {
@@ -903,7 +909,7 @@ get_ksyms_list(char *buf, char **start, off_t offset, int length)
                unsigned i;
                struct module_symbol *sym;
 
-               if (!(mod->flags & MOD_RUNNING) || (mod->flags & MOD_DELETED))
+               if (!MOD_CAN_QUERY(mod))
                        continue;
 
                for (i = mod->nsyms, sym = mod->syms; i > 0; --i, ++sym) {
@@ -951,7 +957,7 @@ get_module_symbol(char *modname, char *symname)
 
        for (mp = module_list; mp; mp = mp->next) {
                if (((modname == NULL) || (strcmp(mp->name, modname) == 0)) &&
-                       (mp->flags & (MOD_RUNNING | MOD_DELETED)) == MOD_RUNNING &&
+                       MOD_CAN_QUERY(mp) &&
                        (mp->nsyms > 0)) {
                        for (i = mp->nsyms, sym = mp->syms;
                                i > 0; --i, ++sym) {
index de470727e009dd7a841ac995f1a67ca974a19a81..e653fd22cdca570d3d11900aa80bbca6f7163dab 100644 (file)
@@ -1,20 +1,15 @@
 /*
  *  linux/kernel/sched.c
  *
+ *  Kernel scheduler and related syscalls
+ *
  *  Copyright (C) 1991, 1992  Linus Torvalds
  *
  *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
  *              make semaphores SMP safe
- *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
- *  1997-09-10 Updated NTP code according to technical memorandum Jan '96
- *             "A Kernel Model for Precision Timekeeping" by Dave Mills
  *  1998-11-19 Implemented schedule_timeout() and related stuff
  *             by Andrea Arcangeli
- *  1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
- *             serialize accesses to xtime/lost_ticks).
- *                             Copyright (C) 1998  Andrea Arcangeli
  *  1998-12-28  Implemented better SMP scheduling by Ingo Molnar
- *  1999-03-10 Improved NTP compatibility by Ulrich Windl
  */
 
 /*
  */
 
 #include <linux/mm.h>
-#include <linux/kernel_stat.h>
-#include <linux/fdreg.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
 #include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
 
-#include <asm/io.h>
 #include <asm/uaccess.h>
-#include <asm/pgtable.h>
 #include <asm/mmu_context.h>
 
-#include <linux/timex.h>
+
+extern void timer_bh(void);
+extern void tqueue_bh(void);
+extern void immediate_bh(void);
 
 /*
- * kernel variables
+ * scheduler variables
  */
 
 unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
 
-long tick = (1000000 + HZ/2) / HZ;     /* timer interrupt period */
-
-/* The current time */
-volatile struct timeval xtime __attribute__ ((aligned (16)));
-
-/* Don't completely fail for HZ > 500.  */
-int tickadj = 500/HZ ? : 1;            /* microsecs */
-
-DECLARE_TASK_QUEUE(tq_timer);
-DECLARE_TASK_QUEUE(tq_immediate);
-DECLARE_TASK_QUEUE(tq_scheduler);
-
-/*
- * phase-lock loop variables
- */
-/* TIME_ERROR prevents overwriting the CMOS clock */
-int time_state = TIME_OK;      /* clock synchronization status */
-int time_status = STA_UNSYNC;  /* clock status bits */
-long time_offset = 0;          /* time adjustment (us) */
-long time_constant = 2;                /* pll time constant */
-long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
-long time_precision = 1;       /* clock precision (us) */
-long time_maxerror = NTP_PHASE_LIMIT;  /* maximum error (us) */
-long time_esterror = NTP_PHASE_LIMIT;  /* estimated error (us) */
-long time_phase = 0;           /* phase offset (scaled us) */
-long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC; /* frequency offset (scaled ppm) */
-long time_adj = 0;             /* tick adjust (scaled 1 / HZ) */
-long time_reftime = 0;         /* time at last adjustment (s) */
-
-long time_adjust = 0;
-long time_adjust_step = 0;
-
-unsigned long event = 0;
-
-extern int do_setitimer(int, struct itimerval *, struct itimerval *);
-unsigned int * prof_buffer = NULL;
-unsigned long prof_len = 0;
-unsigned long prof_shift = 0;
-
 extern void mem_use(void);
 
-unsigned long volatile jiffies=0;
-
 /*
  *     Init task must be ok at boot for the ix86 as we will check its signals
  *     via the SMP irq return path.
@@ -398,139 +351,6 @@ static void process_timeout(unsigned long __data)
        wake_up_process(p);
 }
 
-/*
- * Event timer code
- */
-#define TVN_BITS 6
-#define TVR_BITS 8
-#define TVN_SIZE (1 << TVN_BITS)
-#define TVR_SIZE (1 << TVR_BITS)
-#define TVN_MASK (TVN_SIZE - 1)
-#define TVR_MASK (TVR_SIZE - 1)
-
-struct timer_vec {
-        int index;
-        struct timer_list *vec[TVN_SIZE];
-};
-
-struct timer_vec_root {
-        int index;
-        struct timer_list *vec[TVR_SIZE];
-};
-
-static struct timer_vec tv5 = { 0 };
-static struct timer_vec tv4 = { 0 };
-static struct timer_vec tv3 = { 0 };
-static struct timer_vec tv2 = { 0 };
-static struct timer_vec_root tv1 = { 0 };
-
-static struct timer_vec * const tvecs[] = {
-       (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
-};
-
-#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
-
-static unsigned long timer_jiffies = 0;
-
-static inline void insert_timer(struct timer_list *timer,
-                               struct timer_list **vec, int idx)
-{
-       if ((timer->next = vec[idx]))
-               vec[idx]->prev = timer;
-       vec[idx] = timer;
-       timer->prev = (struct timer_list *)&vec[idx];
-}
-
-static inline void internal_add_timer(struct timer_list *timer)
-{
-       /*
-        * must be cli-ed when calling this
-        */
-       unsigned long expires = timer->expires;
-       unsigned long idx = expires - timer_jiffies;
-
-       if (idx < TVR_SIZE) {
-               int i = expires & TVR_MASK;
-               insert_timer(timer, tv1.vec, i);
-       } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
-               int i = (expires >> TVR_BITS) & TVN_MASK;
-               insert_timer(timer, tv2.vec, i);
-       } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
-               int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
-               insert_timer(timer, tv3.vec, i);
-       } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
-               int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
-               insert_timer(timer, tv4.vec, i);
-       } else if ((signed long) idx < 0) {
-               /* can happen if you add a timer with expires == jiffies,
-                * or you set a timer to go off in the past
-                */
-               insert_timer(timer, tv1.vec, tv1.index);
-       } else if (idx <= 0xffffffffUL) {
-               int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
-               insert_timer(timer, tv5.vec, i);
-       } else {
-               /* Can only get here on architectures with 64-bit jiffies */
-               timer->next = timer->prev = timer;
-       }
-}
-
-spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
-
-void add_timer(struct timer_list *timer)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&timerlist_lock, flags);
-       if (timer->prev)
-               goto bug;
-       internal_add_timer(timer);
-out:
-       spin_unlock_irqrestore(&timerlist_lock, flags);
-       return;
-
-bug:
-       printk("bug: kernel timer added twice at %p.\n",
-                       __builtin_return_address(0));
-       goto out;
-}
-
-static inline int detach_timer(struct timer_list *timer)
-{
-       struct timer_list *prev = timer->prev;
-       if (prev) {
-               struct timer_list *next = timer->next;
-               prev->next = next;
-               if (next)
-                       next->prev = prev;
-               return 1;
-       }
-       return 0;
-}
-
-void mod_timer(struct timer_list *timer, unsigned long expires)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&timerlist_lock, flags);
-       timer->expires = expires;
-       detach_timer(timer);
-       internal_add_timer(timer);
-       spin_unlock_irqrestore(&timerlist_lock, flags);
-}
-
-int del_timer(struct timer_list * timer)
-{
-       int ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&timerlist_lock, flags);
-       ret = detach_timer(timer);
-       timer->next = timer->prev = 0;
-       spin_unlock_irqrestore(&timerlist_lock, flags);
-       return ret;
-}
-
 signed long schedule_timeout(signed long timeout)
 {
        struct timer_list timer;
@@ -944,548 +764,8 @@ long sleep_on_timeout(wait_queue_head_t *q, long timeout)
 
 void scheduling_functions_end_here(void) { }
 
-static inline void cascade_timers(struct timer_vec *tv)
-{
-        /* cascade all the timers from tv up one level */
-        struct timer_list *timer;
-        timer = tv->vec[tv->index];
-        /*
-         * We are removing _all_ timers from the list, so we don't  have to
-         * detach them individually, just clear the list afterwards.
-         */
-        while (timer) {
-                struct timer_list *tmp = timer;
-                timer = timer->next;
-                internal_add_timer(tmp);
-        }
-        tv->vec[tv->index] = NULL;
-        tv->index = (tv->index + 1) & TVN_MASK;
-}
-
-static inline void run_timer_list(void)
-{
-       spin_lock_irq(&timerlist_lock);
-       while ((long)(jiffies - timer_jiffies) >= 0) {
-               struct timer_list *timer;
-               if (!tv1.index) {
-                       int n = 1;
-                       do {
-                               cascade_timers(tvecs[n]);
-                       } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
-               }
-               while ((timer = tv1.vec[tv1.index])) {
-                       void (*fn)(unsigned long) = timer->function;
-                       unsigned long data = timer->data;
-                       detach_timer(timer);
-                       timer->next = timer->prev = NULL;
-                       spin_unlock_irq(&timerlist_lock);
-                       fn(data);
-                       spin_lock_irq(&timerlist_lock);
-               }
-               ++timer_jiffies; 
-               tv1.index = (tv1.index + 1) & TVR_MASK;
-       }
-       spin_unlock_irq(&timerlist_lock);
-}
-
-
-static inline void run_old_timers(void)
-{
-       struct timer_struct *tp;
-       unsigned long mask;
-
-       for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
-               if (mask > timer_active)
-                       break;
-               if (!(mask & timer_active))
-                       continue;
-               if (time_after(tp->expires, jiffies))
-                       continue;
-               timer_active &= ~mask;
-               tp->fn();
-               sti();
-       }
-}
-
-spinlock_t tqueue_lock = SPIN_LOCK_UNLOCKED;
-
-void tqueue_bh(void)
-{
-       run_task_queue(&tq_timer);
-}
-
-void immediate_bh(void)
-{
-       run_task_queue(&tq_immediate);
-}
-
-unsigned long timer_active = 0;
-struct timer_struct timer_table[32];
-
-/*
- * Hmm.. Changed this, as the GNU make sources (load.c) seems to
- * imply that avenrun[] is the standard name for this kind of thing.
- * Nothing else seems to be standardized: the fractional size etc
- * all seem to differ on different machines.
- */
-unsigned long avenrun[3] = { 0,0,0 };
-
-/*
- * Nr of active tasks - counted in fixed-point numbers
- */
-static unsigned long count_active_tasks(void)
-{
-       struct task_struct *p;
-       unsigned long nr = 0;
-
-       read_lock(&tasklist_lock);
-       for_each_task(p) {
-               if ((p->state == TASK_RUNNING ||
-                    (p->state & TASK_UNINTERRUPTIBLE) ||
-                    (p->state & TASK_SWAPPING)))
-                       nr += FIXED_1;
-       }
-       read_unlock(&tasklist_lock);
-       return nr;
-}
-
-static inline void calc_load(unsigned long ticks)
-{
-       unsigned long active_tasks; /* fixed-point */
-       static int count = LOAD_FREQ;
-
-       count -= ticks;
-       if (count < 0) {
-               count += LOAD_FREQ;
-               active_tasks = count_active_tasks();
-               CALC_LOAD(avenrun[0], EXP_1, active_tasks);
-               CALC_LOAD(avenrun[1], EXP_5, active_tasks);
-               CALC_LOAD(avenrun[2], EXP_15, active_tasks);
-       }
-}
-
-/*
- * this routine handles the overflow of the microsecond field
- *
- * The tricky bits of code to handle the accurate clock support
- * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
- * They were originally developed for SUN and DEC kernels.
- * All the kudos should go to Dave for this stuff.
- *
- */
-static void second_overflow(void)
-{
-    long ltemp;
-
-    /* Bump the maxerror field */
-    time_maxerror += time_tolerance >> SHIFT_USEC;
-    if ( time_maxerror > NTP_PHASE_LIMIT ) {
-        time_maxerror = NTP_PHASE_LIMIT;
-       time_status |= STA_UNSYNC;
-    }
-
-    /*
-     * Leap second processing. If in leap-insert state at
-     * the end of the day, the system clock is set back one
-     * second; if in leap-delete state, the system clock is
-     * set ahead one second. The microtime() routine or
-     * external clock driver will insure that reported time
-     * is always monotonic. The ugly divides should be
-     * replaced.
-     */
-    switch (time_state) {
-
-    case TIME_OK:
-       if (time_status & STA_INS)
-           time_state = TIME_INS;
-       else if (time_status & STA_DEL)
-           time_state = TIME_DEL;
-       break;
-
-    case TIME_INS:
-       if (xtime.tv_sec % 86400 == 0) {
-           xtime.tv_sec--;
-           time_state = TIME_OOP;
-           printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
-       }
-       break;
-
-    case TIME_DEL:
-       if ((xtime.tv_sec + 1) % 86400 == 0) {
-           xtime.tv_sec++;
-           time_state = TIME_WAIT;
-           printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
-       }
-       break;
-
-    case TIME_OOP:
-       time_state = TIME_WAIT;
-       break;
-
-    case TIME_WAIT:
-       if (!(time_status & (STA_INS | STA_DEL)))
-           time_state = TIME_OK;
-    }
-
-    /*
-     * Compute the phase adjustment for the next second. In
-     * PLL mode, the offset is reduced by a fixed factor
-     * times the time constant. In FLL mode the offset is
-     * used directly. In either mode, the maximum phase
-     * adjustment for each second is clamped so as to spread
-     * the adjustment over not more than the number of
-     * seconds between updates.
-     */
-    if (time_offset < 0) {
-       ltemp = -time_offset;
-       if (!(time_status & STA_FLL))
-           ltemp >>= SHIFT_KG + time_constant;
-       if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
-           ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
-       time_offset += ltemp;
-       time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
-    } else {
-       ltemp = time_offset;
-       if (!(time_status & STA_FLL))
-           ltemp >>= SHIFT_KG + time_constant;
-       if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
-           ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
-       time_offset -= ltemp;
-       time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
-    }
-
-    /*
-     * Compute the frequency estimate and additional phase
-     * adjustment due to frequency error for the next
-     * second. When the PPS signal is engaged, gnaw on the
-     * watchdog counter and update the frequency computed by
-     * the pll and the PPS signal.
-     */
-    pps_valid++;
-    if (pps_valid == PPS_VALID) {      /* PPS signal lost */
-       pps_jitter = MAXTIME;
-       pps_stabil = MAXFREQ;
-       time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
-                        STA_PPSWANDER | STA_PPSERROR);
-    }
-    ltemp = time_freq + pps_freq;
-    if (ltemp < 0)
-       time_adj -= -ltemp >>
-           (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
-    else
-       time_adj += ltemp >>
-           (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
-
-#if HZ == 100
-    /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
-     * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
-     */
-    if (time_adj < 0)
-       time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
-    else
-       time_adj += (time_adj >> 2) + (time_adj >> 5);
-#endif
-}
-
-/* in the NTP reference this is called "hardclock()" */
-static void update_wall_time_one_tick(void)
-{
-       if ( (time_adjust_step = time_adjust) != 0 ) {
-           /* We are doing an adjtime thing. 
-            *
-            * Prepare time_adjust_step to be within bounds.
-            * Note that a positive time_adjust means we want the clock
-            * to run faster.
-            *
-            * Limit the amount of the step to be in the range
-            * -tickadj .. +tickadj
-            */
-            if (time_adjust > tickadj)
-               time_adjust_step = tickadj;
-            else if (time_adjust < -tickadj)
-               time_adjust_step = -tickadj;
-            
-           /* Reduce by this step the amount of time left  */
-           time_adjust -= time_adjust_step;
-       }
-       xtime.tv_usec += tick + time_adjust_step;
-       /*
-        * Advance the phase, once it gets to one microsecond, then
-        * advance the tick more.
-        */
-       time_phase += time_adj;
-       if (time_phase <= -FINEUSEC) {
-               long ltemp = -time_phase >> SHIFT_SCALE;
-               time_phase += ltemp << SHIFT_SCALE;
-               xtime.tv_usec -= ltemp;
-       }
-       else if (time_phase >= FINEUSEC) {
-               long ltemp = time_phase >> SHIFT_SCALE;
-               time_phase -= ltemp << SHIFT_SCALE;
-               xtime.tv_usec += ltemp;
-       }
-}
-
-/*
- * Using a loop looks inefficient, but "ticks" is
- * usually just one (we shouldn't be losing ticks,
- * we're doing this this way mainly for interrupt
- * latency reasons, not because we think we'll
- * have lots of lost timer ticks
- */
-static void update_wall_time(unsigned long ticks)
-{
-       do {
-               ticks--;
-               update_wall_time_one_tick();
-       } while (ticks);
-
-       if (xtime.tv_usec >= 1000000) {
-           xtime.tv_usec -= 1000000;
-           xtime.tv_sec++;
-           second_overflow();
-       }
-}
-
-static inline void do_process_times(struct task_struct *p,
-       unsigned long user, unsigned long system)
-{
-       unsigned long psecs;
-
-       psecs = (p->times.tms_utime += user);
-       psecs += (p->times.tms_stime += system);
-       if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
-               /* Send SIGXCPU every second.. */
-               if (!(psecs % HZ))
-                       send_sig(SIGXCPU, p, 1);
-               /* and SIGKILL when we go over max.. */
-               if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
-                       send_sig(SIGKILL, p, 1);
-       }
-}
-
-static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
-{
-       unsigned long it_virt = p->it_virt_value;
-
-       if (it_virt) {
-               if (it_virt <= ticks) {
-                       it_virt = ticks + p->it_virt_incr;
-                       send_sig(SIGVTALRM, p, 1);
-               }
-               p->it_virt_value = it_virt - ticks;
-       }
-}
-
-static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
-{
-       unsigned long it_prof = p->it_prof_value;
-
-       if (it_prof) {
-               if (it_prof <= ticks) {
-                       it_prof = ticks + p->it_prof_incr;
-                       send_sig(SIGPROF, p, 1);
-               }
-               p->it_prof_value = it_prof - ticks;
-       }
-}
-
-void update_one_process(struct task_struct *p,
-       unsigned long ticks, unsigned long user, unsigned long system, int cpu)
-{
-       p->per_cpu_utime[cpu] += user;
-       p->per_cpu_stime[cpu] += system;
-       do_process_times(p, user, system);
-       do_it_virt(p, user);
-       do_it_prof(p, ticks);
-}      
-
-static void update_process_times(unsigned long ticks, unsigned long system)
-{
-/*
- * SMP does this on a per-CPU basis elsewhere
- */
-#ifndef  __SMP__
-       struct task_struct * p = current;
-       unsigned long user = ticks - system;
-       if (p->pid) {
-               p->counter -= ticks;
-               if (p->counter <= 0) {
-                       p->counter = 0;
-                       p->need_resched = 1;
-               }
-               if (p->priority < DEF_PRIORITY)
-                       kstat.cpu_nice += user;
-               else
-                       kstat.cpu_user += user;
-               kstat.cpu_system += system;
-       }
-       update_one_process(p, ticks, user, system, 0);
-#endif
-}
-
-volatile unsigned long lost_ticks = 0;
-static unsigned long lost_ticks_system = 0;
-
-/*
- * This spinlock protect us from races in SMP while playing with xtime. -arca
- */
-rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
-
-static inline void update_times(void)
-{
-       unsigned long ticks;
-
-       /*
-        * update_times() is run from the raw timer_bh handler so we
-        * just know that the irqs are locally enabled and so we don't
-        * need to save/restore the flags of the local CPU here. -arca
-        */
-       write_lock_irq(&xtime_lock);
-
-       ticks = lost_ticks;
-       lost_ticks = 0;
-
-       if (ticks) {
-               unsigned long system;
-               system = xchg(&lost_ticks_system, 0);
-
-               calc_load(ticks);
-               update_wall_time(ticks);
-               write_unlock_irq(&xtime_lock);
-               
-               update_process_times(ticks, system);
-
-       } else
-               write_unlock_irq(&xtime_lock);
-}
-
-static void timer_bh(void)
-{
-       update_times();
-       run_old_timers();
-       run_timer_list();
-}
-
-void do_timer(struct pt_regs * regs)
-{
-       (*(unsigned long *)&jiffies)++;
-       lost_ticks++;
-       mark_bh(TIMER_BH);
-       if (!user_mode(regs))
-               lost_ticks_system++;
-       if (tq_timer)
-               mark_bh(TQUEUE_BH);
-}
-
-#if !defined(__alpha__) && !defined(__ia64__)
-
-/*
- * For backwards compatibility?  This can be done in libc so Alpha
- * and all newer ports shouldn't need it.
- */
-asmlinkage unsigned long sys_alarm(unsigned int seconds)
-{
-       struct itimerval it_new, it_old;
-       unsigned int oldalarm;
-
-       it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
-       it_new.it_value.tv_sec = seconds;
-       it_new.it_value.tv_usec = 0;
-       do_setitimer(ITIMER_REAL, &it_new, &it_old);
-       oldalarm = it_old.it_value.tv_sec;
-       /* ehhh.. We can't return 0 if we have an alarm pending.. */
-       /* And we'd better return too much than too little anyway */
-       if (it_old.it_value.tv_usec)
-               oldalarm++;
-       return oldalarm;
-}
-
-#endif
-
 #ifndef __alpha__
 
-/*
- * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this
- * should be moved into arch/i386 instead?
- */
-asmlinkage long sys_getpid(void)
-{
-       /* This is SMP safe - current->pid doesn't change */
-       return current->pid;
-}
-
-/*
- * This is not strictly SMP safe: p_opptr could change
- * from under us. However, rather than getting any lock
- * we can use an optimistic algorithm: get the parent
- * pid, and go back and check that the parent is still
- * the same. If it has changed (which is extremely unlikely
- * indeed), we just try again..
- *
- * NOTE! This depends on the fact that even if we _do_
- * get an old value of "parent", we can happily dereference
- * the pointer: we just can't necessarily trust the result
- * until we know that the parent pointer is valid.
- *
- * The "mb()" macro is a memory barrier - a synchronizing
- * event. It also makes sure that gcc doesn't optimize
- * away the necessary memory references.. The barrier doesn't
- * have to have all that strong semantics: on x86 we don't
- * really require a synchronizing instruction, for example.
- * The barrier is more important for code generation than
- * for any real memory ordering semantics (even if there is
- * a small window for a race, using the old pointer is
- * harmless for a while).
- */
-asmlinkage long sys_getppid(void)
-{
-       int pid;
-       struct task_struct * me = current;
-       struct task_struct * parent;
-
-       parent = me->p_opptr;
-       for (;;) {
-               pid = parent->pid;
-#if __SMP__
-{
-               struct task_struct *old = parent;
-               mb();
-               parent = me->p_opptr;
-               if (old != parent)
-                       continue;
-}
-#endif
-               break;
-       }
-       return pid;
-}
-
-asmlinkage long sys_getuid(void)
-{
-       /* Only we change this so SMP safe */
-       return current->uid;
-}
-
-asmlinkage long sys_geteuid(void)
-{
-       /* Only we change this so SMP safe */
-       return current->euid;
-}
-
-asmlinkage long sys_getgid(void)
-{
-       /* Only we change this so SMP safe */
-       return current->gid;
-}
-
-asmlinkage long sys_getegid(void)
-{
-       /* Only we change this so SMP safe */
-       return  current->egid;
-}
-
 /*
  * This has been replaced by sys_setpriority.  Maybe it should be
  * moved into the arch dependent tree for those ports that require
@@ -1742,47 +1022,6 @@ asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
        return 0;
 }
 
-asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
-{
-       struct timespec t;
-       unsigned long expire;
-
-       if(copy_from_user(&t, rqtp, sizeof(struct timespec)))
-               return -EFAULT;
-
-       if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
-               return -EINVAL;
-
-
-       if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
-           current->policy != SCHED_OTHER)
-       {
-               /*
-                * Short delay requests up to 2 ms will be handled with
-                * high precision by a busy wait for all real-time processes.
-                *
-                * Its important on SMP not to do this holding locks.
-                */
-               udelay((t.tv_nsec + 999) / 1000);
-               return 0;
-       }
-
-       expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
-
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire);
-
-       if (expire) {
-               if (rmtp) {
-                       jiffies_to_timespec(expire, &t);
-                       if (copy_to_user(rmtp, &t, sizeof(struct timespec)))
-                               return -EFAULT;
-               }
-               return -EINTR;
-       }
-       return 0;
-}
-
 static void show_task(struct task_struct * p)
 {
        unsigned long free = 0;
diff --git a/kernel/timer.c b/kernel/timer.c
new file mode 100644 (file)
index 0000000..c54460d
--- /dev/null
@@ -0,0 +1,791 @@
+/*
+ *  linux/kernel/ktimer.c
+ *
+ *  Kernel internal timers, kernel timekeeping, basic process system calls
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
+ *
+ *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
+ *              "A Kernel Model for Precision Timekeeping" by Dave Mills
+ *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
+ *              serialize accesses to xtime/lost_ticks).
+ *                              Copyright (C) 1998  Andrea Arcangeli
+ *  1999-03-10  Improved NTP compatibility by Ulrich Windl
+ */
+
+#include <linux/mm.h>
+#include <linux/timex.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/uaccess.h>
+
+/*
+ * Timekeeping variables
+ */
+
+long tick = (1000000 + HZ/2) / HZ;     /* timer interrupt period */
+
+/* The current time */
+volatile struct timeval xtime __attribute__ ((aligned (16)));
+
+/* Don't completely fail for HZ > 500.  */
+int tickadj = 500/HZ ? : 1;            /* microsecs */
+
+DECLARE_TASK_QUEUE(tq_timer);
+DECLARE_TASK_QUEUE(tq_immediate);
+DECLARE_TASK_QUEUE(tq_scheduler);
+
+/*
+ * phase-lock loop variables
+ */
+/* TIME_ERROR prevents overwriting the CMOS clock */
+int time_state = TIME_OK;              /* clock synchronization status */
+int time_status = STA_UNSYNC;          /* clock status bits            */
+long time_offset = 0;                  /* time adjustment (us)         */
+long time_constant = 2;                        /* pll time constant            */
+long time_tolerance = MAXFREQ;         /* frequency tolerance (ppm)    */
+long time_precision = 1;               /* clock precision (us)         */
+long time_maxerror = NTP_PHASE_LIMIT;  /* maximum error (us)           */
+long time_esterror = NTP_PHASE_LIMIT;  /* estimated error (us)         */
+long time_phase = 0;                   /* phase offset (scaled us)     */
+long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC;
+                                       /* frequency offset (scaled ppm)*/
+long time_adj = 0;                     /* tick adjust (scaled 1 / HZ)  */
+long time_reftime = 0;                 /* time at last adjustment (s)  */
+
+long time_adjust = 0;
+long time_adjust_step = 0;
+
+unsigned long event = 0;
+
+extern int do_setitimer(int, struct itimerval *, struct itimerval *);
+
+unsigned long volatile jiffies = 0;
+
+unsigned int * prof_buffer = NULL;
+unsigned long prof_len = 0;
+unsigned long prof_shift = 0;
+
+/*
+ * Event timer code
+ */
+#define TVN_BITS 6
+#define TVR_BITS 8
+#define TVN_SIZE (1 << TVN_BITS)
+#define TVR_SIZE (1 << TVR_BITS)
+#define TVN_MASK (TVN_SIZE - 1)
+#define TVR_MASK (TVR_SIZE - 1)
+
+struct timer_vec {
+        int index;
+        struct timer_list *vec[TVN_SIZE];
+};
+
+struct timer_vec_root {
+        int index;
+        struct timer_list *vec[TVR_SIZE];
+};
+
+static struct timer_vec tv5 = { 0 };
+static struct timer_vec tv4 = { 0 };
+static struct timer_vec tv3 = { 0 };
+static struct timer_vec tv2 = { 0 };
+static struct timer_vec_root tv1 = { 0 };
+
+static struct timer_vec * const tvecs[] = {
+       (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
+};
+
+#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
+
+static unsigned long timer_jiffies = 0;
+
+static inline void insert_timer(struct timer_list *timer,
+                               struct timer_list **vec, int idx)
+{
+       if ((timer->next = vec[idx]))
+               vec[idx]->prev = timer;
+       vec[idx] = timer;
+       timer->prev = (struct timer_list *)&vec[idx];
+}
+
+static inline void internal_add_timer(struct timer_list *timer)
+{
+       /*
+        * must be cli-ed when calling this
+        */
+       unsigned long expires = timer->expires;
+       unsigned long idx = expires - timer_jiffies;
+
+       if (idx < TVR_SIZE) {
+               int i = expires & TVR_MASK;
+               insert_timer(timer, tv1.vec, i);
+       } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
+               int i = (expires >> TVR_BITS) & TVN_MASK;
+               insert_timer(timer, tv2.vec, i);
+       } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
+               int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
+               insert_timer(timer, tv3.vec, i);
+       } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
+               int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
+               insert_timer(timer, tv4.vec, i);
+       } else if ((signed long) idx < 0) {
+               /* can happen if you add a timer with expires == jiffies,
+                * or you set a timer to go off in the past
+                */
+               insert_timer(timer, tv1.vec, tv1.index);
+       } else if (idx <= 0xffffffffUL) {
+               int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+               insert_timer(timer, tv5.vec, i);
+       } else {
+               /* Can only get here on architectures with 64-bit jiffies */
+               timer->next = timer->prev = timer;
+       }
+}
+
+spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
+
+void add_timer(struct timer_list *timer)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&timerlist_lock, flags);
+       if (timer->prev)
+               goto bug;
+       internal_add_timer(timer);
+out:
+       spin_unlock_irqrestore(&timerlist_lock, flags);
+       return;
+
+bug:
+       printk("bug: kernel timer added twice at %p.\n",
+                       __builtin_return_address(0));
+       goto out;
+}
+
+static inline int detach_timer(struct timer_list *timer)
+{
+       struct timer_list *prev = timer->prev;
+       if (prev) {
+               struct timer_list *next = timer->next;
+               prev->next = next;
+               if (next)
+                       next->prev = prev;
+               return 1;
+       }
+       return 0;
+}
+
+void mod_timer(struct timer_list *timer, unsigned long expires)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&timerlist_lock, flags);
+       timer->expires = expires;
+       detach_timer(timer);
+       internal_add_timer(timer);
+       spin_unlock_irqrestore(&timerlist_lock, flags);
+}
+
+int del_timer(struct timer_list * timer)
+{
+       int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&timerlist_lock, flags);
+       ret = detach_timer(timer);
+       timer->next = timer->prev = 0;
+       spin_unlock_irqrestore(&timerlist_lock, flags);
+       return ret;
+}
+
+static inline void cascade_timers(struct timer_vec *tv)
+{
+        /* cascade all the timers from tv up one level */
+        struct timer_list *timer;
+        timer = tv->vec[tv->index];
+        /*
+         * We are removing _all_ timers from the list, so we don't  have to
+         * detach them individually, just clear the list afterwards.
+         */
+        while (timer) {
+                struct timer_list *tmp = timer;
+                timer = timer->next;
+                internal_add_timer(tmp);
+        }
+        tv->vec[tv->index] = NULL;
+        tv->index = (tv->index + 1) & TVN_MASK;
+}
+
+static inline void run_timer_list(void)
+{
+       spin_lock_irq(&timerlist_lock);
+       while ((long)(jiffies - timer_jiffies) >= 0) {
+               struct timer_list *timer;
+               if (!tv1.index) {
+                       int n = 1;
+                       do {
+                               cascade_timers(tvecs[n]);
+                       } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
+               }
+               while ((timer = tv1.vec[tv1.index])) {
+                       void (*fn)(unsigned long) = timer->function;
+                       unsigned long data = timer->data;
+                       detach_timer(timer);
+                       timer->next = timer->prev = NULL;
+                       spin_unlock_irq(&timerlist_lock);
+                       fn(data);
+                       spin_lock_irq(&timerlist_lock);
+               }
+               ++timer_jiffies; 
+               tv1.index = (tv1.index + 1) & TVR_MASK;
+       }
+       spin_unlock_irq(&timerlist_lock);
+}
+
+
+static inline void run_old_timers(void)
+{
+       struct timer_struct *tp;
+       unsigned long mask;
+
+       for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
+               if (mask > timer_active)
+                       break;
+               if (!(mask & timer_active))
+                       continue;
+               if (time_after(tp->expires, jiffies))
+                       continue;
+               timer_active &= ~mask;
+               tp->fn();
+               sti();
+       }
+}
+
+spinlock_t tqueue_lock = SPIN_LOCK_UNLOCKED;
+
+void tqueue_bh(void)
+{
+       run_task_queue(&tq_timer);
+}
+
+void immediate_bh(void)
+{
+       run_task_queue(&tq_immediate);
+}
+
+unsigned long timer_active = 0;
+struct timer_struct timer_table[32];
+
+/*
+ * this routine handles the overflow of the microsecond field
+ *
+ * The tricky bits of code to handle the accurate clock support
+ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+ * They were originally developed for SUN and DEC kernels.
+ * All the kudos should go to Dave for this stuff.
+ *
+ */
+static void second_overflow(void)
+{
+    long ltemp;
+
+    /* Bump the maxerror field */
+    time_maxerror += time_tolerance >> SHIFT_USEC;
+    if ( time_maxerror > NTP_PHASE_LIMIT ) {
+        time_maxerror = NTP_PHASE_LIMIT;
+       time_status |= STA_UNSYNC;
+    }
+
+    /*
+     * Leap second processing. If in leap-insert state at
+     * the end of the day, the system clock is set back one
+     * second; if in leap-delete state, the system clock is
+     * set ahead one second. The microtime() routine or
+     * external clock driver will insure that reported time
+     * is always monotonic. The ugly divides should be
+     * replaced.
+     */
+    switch (time_state) {
+
+    case TIME_OK:
+       if (time_status & STA_INS)
+           time_state = TIME_INS;
+       else if (time_status & STA_DEL)
+           time_state = TIME_DEL;
+       break;
+
+    case TIME_INS:
+       if (xtime.tv_sec % 86400 == 0) {
+           xtime.tv_sec--;
+           time_state = TIME_OOP;
+           printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
+       }
+       break;
+
+    case TIME_DEL:
+       if ((xtime.tv_sec + 1) % 86400 == 0) {
+           xtime.tv_sec++;
+           time_state = TIME_WAIT;
+           printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
+       }
+       break;
+
+    case TIME_OOP:
+       time_state = TIME_WAIT;
+       break;
+
+    case TIME_WAIT:
+       if (!(time_status & (STA_INS | STA_DEL)))
+           time_state = TIME_OK;
+    }
+
+    /*
+     * Compute the phase adjustment for the next second. In
+     * PLL mode, the offset is reduced by a fixed factor
+     * times the time constant. In FLL mode the offset is
+     * used directly. In either mode, the maximum phase
+     * adjustment for each second is clamped so as to spread
+     * the adjustment over not more than the number of
+     * seconds between updates.
+     */
+    if (time_offset < 0) {
+       ltemp = -time_offset;
+       if (!(time_status & STA_FLL))
+           ltemp >>= SHIFT_KG + time_constant;
+       if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
+           ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
+       time_offset += ltemp;
+       time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
+    } else {
+       ltemp = time_offset;
+       if (!(time_status & STA_FLL))
+           ltemp >>= SHIFT_KG + time_constant;
+       if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
+           ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
+       time_offset -= ltemp;
+       time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
+    }
+
+    /*
+     * Compute the frequency estimate and additional phase
+     * adjustment due to frequency error for the next
+     * second. When the PPS signal is engaged, gnaw on the
+     * watchdog counter and update the frequency computed by
+     * the pll and the PPS signal.
+     */
+    pps_valid++;
+    if (pps_valid == PPS_VALID) {      /* PPS signal lost */
+       pps_jitter = MAXTIME;
+       pps_stabil = MAXFREQ;
+       time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
+                        STA_PPSWANDER | STA_PPSERROR);
+    }
+    ltemp = time_freq + pps_freq;
+    if (ltemp < 0)
+       time_adj -= -ltemp >>
+           (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+    else
+       time_adj += ltemp >>
+           (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+
+#if HZ == 100
+    /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
+     * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
+     */
+    if (time_adj < 0)
+       time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
+    else
+       time_adj += (time_adj >> 2) + (time_adj >> 5);
+#endif
+}
+
+/* in the NTP reference this is called "hardclock()" */
+static void update_wall_time_one_tick(void)
+{
+       if ( (time_adjust_step = time_adjust) != 0 ) {
+           /* We are doing an adjtime thing. 
+            *
+            * Prepare time_adjust_step to be within bounds.
+            * Note that a positive time_adjust means we want the clock
+            * to run faster.
+            *
+            * Limit the amount of the step to be in the range
+            * -tickadj .. +tickadj
+            */
+            if (time_adjust > tickadj)
+               time_adjust_step = tickadj;
+            else if (time_adjust < -tickadj)
+               time_adjust_step = -tickadj;
+            
+           /* Reduce by this step the amount of time left  */
+           time_adjust -= time_adjust_step;
+       }
+       xtime.tv_usec += tick + time_adjust_step;
+       /*
+        * Advance the phase, once it gets to one microsecond, then
+        * advance the tick more.
+        */
+       time_phase += time_adj;
+       if (time_phase <= -FINEUSEC) {
+               long ltemp = -time_phase >> SHIFT_SCALE;
+               time_phase += ltemp << SHIFT_SCALE;
+               xtime.tv_usec -= ltemp;
+       }
+       else if (time_phase >= FINEUSEC) {
+               long ltemp = time_phase >> SHIFT_SCALE;
+               time_phase -= ltemp << SHIFT_SCALE;
+               xtime.tv_usec += ltemp;
+       }
+}
+
+/*
+ * Using a loop looks inefficient, but "ticks" is
+ * usually just one (we shouldn't be losing ticks,
+ * we're doing this this way mainly for interrupt
+ * latency reasons, not because we think we'll
+ * have lots of lost timer ticks
+ */
+static void update_wall_time(unsigned long ticks)
+{
+       do {
+               ticks--;
+               update_wall_time_one_tick();
+       } while (ticks);
+
+       if (xtime.tv_usec >= 1000000) {
+           xtime.tv_usec -= 1000000;
+           xtime.tv_sec++;
+           second_overflow();
+       }
+}
+
+static inline void do_process_times(struct task_struct *p,
+       unsigned long user, unsigned long system)
+{
+       unsigned long psecs;
+
+       psecs = (p->times.tms_utime += user);
+       psecs += (p->times.tms_stime += system);
+       if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
+               /* Send SIGXCPU every second.. */
+               if (!(psecs % HZ))
+                       send_sig(SIGXCPU, p, 1);
+               /* and SIGKILL when we go over max.. */
+               if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
+                       send_sig(SIGKILL, p, 1);
+       }
+}
+
+static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
+{
+       unsigned long it_virt = p->it_virt_value;
+
+       if (it_virt) {
+               if (it_virt <= ticks) {
+                       it_virt = ticks + p->it_virt_incr;
+                       send_sig(SIGVTALRM, p, 1);
+               }
+               p->it_virt_value = it_virt - ticks;
+       }
+}
+
+static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
+{
+       unsigned long it_prof = p->it_prof_value;
+
+       if (it_prof) {
+               if (it_prof <= ticks) {
+                       it_prof = ticks + p->it_prof_incr;
+                       send_sig(SIGPROF, p, 1);
+               }
+               p->it_prof_value = it_prof - ticks;
+       }
+}
+
+void update_one_process(struct task_struct *p,
+       unsigned long ticks, unsigned long user, unsigned long system, int cpu)
+{
+       p->per_cpu_utime[cpu] += user;
+       p->per_cpu_stime[cpu] += system;
+       do_process_times(p, user, system);
+       do_it_virt(p, user);
+       do_it_prof(p, ticks);
+}      
+
+static void update_process_times(unsigned long ticks, unsigned long system)
+{
+/*
+ * SMP does this on a per-CPU basis elsewhere
+ */
+#ifndef  __SMP__
+       struct task_struct * p = current;
+       unsigned long user = ticks - system;
+       if (p->pid) {
+               p->counter -= ticks;
+               if (p->counter <= 0) {
+                       p->counter = 0;
+                       p->need_resched = 1;
+               }
+               if (p->priority < DEF_PRIORITY)
+                       kstat.cpu_nice += user;
+               else
+                       kstat.cpu_user += user;
+               kstat.cpu_system += system;
+       }
+       update_one_process(p, ticks, user, system, 0);
+#endif
+}
+
+/*
+ * Nr of active tasks - counted in fixed-point numbers
+ */
+static unsigned long count_active_tasks(void)
+{
+       struct task_struct *p;
+       unsigned long nr = 0;
+
+       read_lock(&tasklist_lock);
+       for_each_task(p) {
+               if ((p->state == TASK_RUNNING ||
+                    (p->state & TASK_UNINTERRUPTIBLE) ||
+                    (p->state & TASK_SWAPPING)))
+                       nr += FIXED_1;
+       }
+       read_unlock(&tasklist_lock);
+       return nr;
+}
+
+/*
+ * Hmm.. Changed this, as the GNU make sources (load.c) seems to
+ * imply that avenrun[] is the standard name for this kind of thing.
+ * Nothing else seems to be standardized: the fractional size etc
+ * all seem to differ on different machines.
+ */
+unsigned long avenrun[3] = { 0,0,0 };
+
+static inline void calc_load(unsigned long ticks)
+{
+       unsigned long active_tasks; /* fixed-point */
+       static int count = LOAD_FREQ;
+
+       count -= ticks;
+       if (count < 0) {
+               count += LOAD_FREQ;
+               active_tasks = count_active_tasks();
+               CALC_LOAD(avenrun[0], EXP_1, active_tasks);
+               CALC_LOAD(avenrun[1], EXP_5, active_tasks);
+               CALC_LOAD(avenrun[2], EXP_15, active_tasks);
+       }
+}
+
+volatile unsigned long lost_ticks = 0;
+static unsigned long lost_ticks_system = 0;
+
+/*
+ * This spinlock protect us from races in SMP while playing with xtime. -arca
+ */
+rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
+
+static inline void update_times(void)
+{
+       unsigned long ticks;
+
+       /*
+        * update_times() is run from the raw timer_bh handler so we
+        * just know that the irqs are locally enabled and so we don't
+        * need to save/restore the flags of the local CPU here. -arca
+        */
+       write_lock_irq(&xtime_lock);
+
+       ticks = lost_ticks;
+       lost_ticks = 0;
+
+       if (ticks) {
+               unsigned long system;
+               system = xchg(&lost_ticks_system, 0);
+
+               calc_load(ticks);
+               update_wall_time(ticks);
+               write_unlock_irq(&xtime_lock);
+               
+               update_process_times(ticks, system);
+
+       } else
+               write_unlock_irq(&xtime_lock);
+}
+
+void timer_bh(void)
+{
+       update_times();
+       run_old_timers();
+       run_timer_list();
+}
+
+void do_timer(struct pt_regs * regs)
+{
+       (*(unsigned long *)&jiffies)++;
+       lost_ticks++;
+       mark_bh(TIMER_BH);
+       if (!user_mode(regs))
+               lost_ticks_system++;
+       if (tq_timer)
+               mark_bh(TQUEUE_BH);
+}
+
+#if !defined(__alpha__) && !defined(__ia64__)
+
+/*
+ * For backwards compatibility?  This can be done in libc so Alpha
+ * and all newer ports shouldn't need it.
+ */
+asmlinkage unsigned long sys_alarm(unsigned int seconds)
+{
+       struct itimerval it_new, it_old;
+       unsigned int oldalarm;
+
+       it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
+       it_new.it_value.tv_sec = seconds;
+       it_new.it_value.tv_usec = 0;
+       do_setitimer(ITIMER_REAL, &it_new, &it_old);
+       oldalarm = it_old.it_value.tv_sec;
+       /* ehhh.. We can't return 0 if we have an alarm pending.. */
+       /* And we'd better return too much than too little anyway */
+       if (it_old.it_value.tv_usec)
+               oldalarm++;
+       return oldalarm;
+}
+
+#endif
+
+#ifndef __alpha__
+
+/*
+ * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this
+ * should be moved into arch/i386 instead?
+ */
+asmlinkage long sys_getpid(void)
+{
+       /* This is SMP safe - current->pid doesn't change */
+       return current->pid;
+}
+
+/*
+ * This is not strictly SMP safe: p_opptr could change
+ * from under us. However, rather than getting any lock
+ * we can use an optimistic algorithm: get the parent
+ * pid, and go back and check that the parent is still
+ * the same. If it has changed (which is extremely unlikely
+ * indeed), we just try again..
+ *
+ * NOTE! This depends on the fact that even if we _do_
+ * get an old value of "parent", we can happily dereference
+ * the pointer: we just can't necessarily trust the result
+ * until we know that the parent pointer is valid.
+ *
+ * The "mb()" macro is a memory barrier - a synchronizing
+ * event. It also makes sure that gcc doesn't optimize
+ * away the necessary memory references.. The barrier doesn't
+ * have to have all that strong semantics: on x86 we don't
+ * really require a synchronizing instruction, for example.
+ * The barrier is more important for code generation than
+ * for any real memory ordering semantics (even if there is
+ * a small window for a race, using the old pointer is
+ * harmless for a while).
+ */
+asmlinkage long sys_getppid(void)
+{
+       int pid;
+       struct task_struct * me = current;
+       struct task_struct * parent;
+
+       parent = me->p_opptr;
+       for (;;) {
+               pid = parent->pid;
+#if __SMP__
+{
+               struct task_struct *old = parent;
+               mb();
+               parent = me->p_opptr;
+               if (old != parent)
+                       continue;
+}
+#endif
+               break;
+       }
+       return pid;
+}
+
+asmlinkage long sys_getuid(void)
+{
+       /* Only we change this so SMP safe */
+       return current->uid;
+}
+
+asmlinkage long sys_geteuid(void)
+{
+       /* Only we change this so SMP safe */
+       return current->euid;
+}
+
+asmlinkage long sys_getgid(void)
+{
+       /* Only we change this so SMP safe */
+       return current->gid;
+}
+
+asmlinkage long sys_getegid(void)
+{
+       /* Only we change this so SMP safe */
+       return  current->egid;
+}
+
+#endif
+
+asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
+{
+       struct timespec t;
+       unsigned long expire;
+
+       if(copy_from_user(&t, rqtp, sizeof(struct timespec)))
+               return -EFAULT;
+
+       if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
+               return -EINVAL;
+
+
+       if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
+           current->policy != SCHED_OTHER)
+       {
+               /*
+                * Short delay requests up to 2 ms will be handled with
+                * high precision by a busy wait for all real-time processes.
+                *
+                * Its important on SMP not to do this holding locks.
+                */
+               udelay((t.tv_nsec + 999) / 1000);
+               return 0;
+       }
+
+       expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
+
+       current->state = TASK_INTERRUPTIBLE;
+       expire = schedule_timeout(expire);
+
+       if (expire) {
+               if (rmtp) {
+                       jiffies_to_timespec(expire, &t);
+                       if (copy_to_user(rmtp, &t, sizeof(struct timespec)))
+                               return -EFAULT;
+               }
+               return -EINTR;
+       }
+       return 0;
+}
+
index 6b13b94476219ad28aa7140c8d7c20757404de34..463073251212c030ea62d610b93636574c12b802 100644 (file)
@@ -235,8 +235,6 @@ static unsigned long __init free_all_bootmem_core(int nid, bootmem_data_t *bdata
                        count++;
                        ClearPageReserved(page);
                        set_page_count(page, 1);
-                       if ((i+(bdata->node_boot_start >> PAGE_SHIFT)) >= (virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT))
-                               clear_bit(PG_DMA, &page->flags);
                        __free_page(page);
                }
        }
index 822c654472af198dbb1b10b34ee21b438737e58b..bdad6323349db4572cb951021027b24cdab376b8 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -545,8 +545,7 @@ static struct vm_area_struct * unmap_fixup(struct vm_area_struct *area,
                mpnt->vm_page_prot = area->vm_page_prot;
                mpnt->vm_flags = area->vm_flags;
                mpnt->vm_ops = area->vm_ops;
-               mpnt->vm_pgoff = area->vm_pgoff;
-               area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT;
+               mpnt->vm_pgoff = area->vm_pgoff + ((end - area->vm_start) >> PAGE_SHIFT);
                mpnt->vm_file = area->vm_file;
                mpnt->vm_private_data = area->vm_private_data;
                if (mpnt->vm_file)
index fdbb8b10a0f7252bec1d41fc48faaf66bd2e68b8..c607af403237cb4936f802088d84b3d3724520c0 100644 (file)
@@ -557,7 +557,6 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
         */
        for (p = lmem_map; p < lmem_map + totalpages; p++) {
                set_page_count(p, 0);
-               p->flags = (1 << PG_DMA);
                SetPageReserved(p);
                init_waitqueue_head(&p->wait);
                memlist_init(&p->list);
index 0472ba4f85b055749f1a2a7a88cc5c2eb30e93ca..14fa4f80d7944fd8ce6ed6162c28cdea9624970a 100644 (file)
@@ -2760,7 +2760,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 
                if(!sk->dead) {
                        wake_up_interruptible(sk->sleep);
-                       sock_wake_async(sk->socket, 0, POLL_IN);
+                       sock_wake_async(sk->socket, 0, POLL_OUT);
                }
                return -1;
        }
index bc14b4034da832883f52a0f356e8625f6d41a102..0e6617c1f58013b4de9567f73b575baffafbb2d6 100644 (file)
@@ -1852,9 +1852,6 @@ int init_module(void)
 void __init packet_proto_init(struct net_proto *pro)
 #endif
 {
-#ifdef CONFIG_PROC_FS
-       struct proc_dir_entry *ent;
-#endif
        sock_register(&packet_family_ops);
        register_netdevice_notifier(&packet_netdev_notifier);
 #ifdef CONFIG_PROC_FS
index f5abe4440a22bb72011d4a7bde91adf4ecea3a15..328c01b080a0e4a2a702396b03f86f2d36ba5c56 100644 (file)
@@ -1,6 +1,6 @@
 #! /bin/sh
 # Script to apply kernel patches.
-#   usage: patch-kernel [ sourcedir [ patchdir ] ]
+#   usage: patch-kernel [ sourcedir [ patchdir [ stopversion ] ] ]
 #     The source directory defaults to /usr/src/linux, and the patch
 #     directory defaults to the current directory.
 #
 # gzip, bzip, bzip2, zip, compress, and plaintext. 
 #
 #       Adam Sulmicki <adam@cfar.umd.edu>, 1st January 1997.
+#
+# Added ability to stop at a given version number
+# Put the full version number (i.e. 2.3.31) as the last parameter
+#       Dave Gilbert <linux@treblig.org>, 11th December 1999.
 
 # Set directories from arguments, or use defaults.
 sourcedir=${1-/usr/src/linux}
 patchdir=${2-.}
+stopvers=${3-imnotaversion}
 
 # set current VERSION, PATCHLEVEL, SUBLEVEL
 eval `sed -n 's/^\([A-Z]*\) = \([0-9]*\)$/\1=\2/p' $sourcedir/Makefile`
@@ -34,7 +39,9 @@ echo "Current kernel version is $VERSION.$PATCHLEVEL.$SUBLEVEL"
 while :
 do
     SUBLEVEL=`expr $SUBLEVEL + 1`
-    patch=patch-$VERSION.$PATCHLEVEL.$SUBLEVEL 
+    FULLVERSION="$VERSION.$PATCHLEVEL.$SUBLEVEL"
+
+    patch=patch-$FULLVERSION
     if [ -r $patchdir/${patch}.gz ]; then
         ext=".gz"
         name="gzip"
@@ -78,4 +85,10 @@ do
     fi
     # Remove backup files
     find $sourcedir/ '(' -name '*.orig' -o -name '.*.orig' ')' -exec rm -f {} \;
+
+    if [ $stopvers = $FULLVERSION ]
+    then
+        echo "Stoping at $FULLVERSION as requested. Enjoy."
+        break
+    fi
 done