From a0fb7c2516b081a568fab3d523b905ada3c6919f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 23 Nov 2007 15:29:11 -0500 Subject: [PATCH] Import 2.3.32pre3 --- Documentation/Configure.help | 22 +- MAINTAINERS | 7 +- Makefile | 4 + arch/arm/def-configs/brutus | 3 - arch/i386/defconfig | 6 +- arch/i386/kernel/apm.c | 4 +- arch/i386/kernel/pci-pc.c | 20 +- arch/i386/kernel/process.c | 4 +- arch/i386/mm/init.c | 6 +- arch/ppc/configs/common_defconfig | 3 - arch/ppc/configs/gemini_defconfig | 3 - arch/ppc/configs/oak_defconfig | 3 - arch/ppc/configs/walnut_defconfig | 3 - arch/ppc/defconfig | 3 - arch/sh/defconfig | 3 - arch/sparc64/defconfig | 3 - arch/sparc64/kernel/sys_sparc32.c | 16 +- drivers/ap1000/ap.c | 8 +- drivers/ap1000/ddv.c | 6 +- drivers/block/DAC960.c | 26 +- drivers/block/DAC960.h | 16 +- drivers/block/acsi.c | 8 +- drivers/block/amiflop.c | 6 +- drivers/block/ataflop.c | 6 +- drivers/block/cpqarray.c | 29 +- drivers/block/floppy.c | 10 +- drivers/block/hd.c | 4 +- drivers/block/ide-disk.c | 3 + drivers/block/ide-probe.c | 18 +- drivers/block/ide.c | 52 +- drivers/block/ll_rw_blk.c | 271 +++-- drivers/block/loop.c | 4 +- drivers/block/md.c | 5 +- drivers/block/nbd.c | 4 +- drivers/block/paride/pcd.c | 14 +- drivers/block/paride/pd.c | 20 +- drivers/block/paride/pf.c | 20 +- drivers/block/ps2esdi.c | 30 +- drivers/block/rd.c | 6 +- drivers/block/swim3.c | 6 +- drivers/block/swim_iop.c | 6 +- drivers/block/xd.c | 6 +- drivers/block/xd.h | 2 +- drivers/block/z2ram.c | 4 +- drivers/cdrom/aztcd.c | 6 +- drivers/cdrom/cdu31a.c | 4 +- drivers/cdrom/cm206.c | 7 +- drivers/cdrom/gscd.c | 14 +- drivers/cdrom/mcd.c | 4 +- drivers/cdrom/mcdx.c | 6 +- drivers/cdrom/optcd.c | 4 +- drivers/cdrom/sbpcd.c | 4 +- drivers/cdrom/sjcd.c | 4 +- drivers/cdrom/sonycd535.c | 4 +- drivers/char/Config.in | 14 +- drivers/char/Makefile | 8 +- drivers/char/agp/Makefile | 31 +- drivers/char/agp/{agp_backendP.h => agp.h} | 31 +- .../char/agp/{agp_backend.c => agpgart_be.c} | 526 +++++---- drivers/char/agp/agpgart_fe.c | 81 +- drivers/char/drm/drmP.h | 6 +- drivers/char/drm/fops.c | 1 + drivers/char/drm/init.c | 10 + drivers/char/synclink.c | 11 +- drivers/i2o/i2o_block.c | 55 +- drivers/net/tlan.c | 7 +- drivers/net/tlan.h | 4 + drivers/net/tokenring/ibmtr.c | 398 ++++--- drivers/net/tokenring/ibmtr.h | 2 +- drivers/pci/names.c | 1 + drivers/scsi/Config.in | 6 +- drivers/scsi/Makefile | 11 +- drivers/scsi/advansys.c | 11 +- drivers/scsi/aha1542.c | 4 + drivers/scsi/atp870u.c | 711 +++++++----- drivers/scsi/atp870u.h | 59 +- drivers/scsi/eata.c | 2 +- drivers/scsi/eata_dma.c | 2 +- drivers/scsi/g_NCR5380.c | 36 +- drivers/scsi/gdth_proc.c | 2 - drivers/scsi/hosts.c | 11 +- drivers/scsi/hosts.h | 30 +- drivers/scsi/ibmmca.c | 31 +- drivers/scsi/inia100.c | 1 + drivers/scsi/ips.c | 2 +- drivers/scsi/megaraid.c | 4 +- drivers/scsi/scsi.c | 773 ++++++------- drivers/scsi/scsi.h | 128 +-- drivers/scsi/scsi_debug.c | 112 +- drivers/scsi/scsi_debug.h | 5 +- drivers/scsi/scsi_error.c | 72 +- drivers/scsi/scsi_ioctl.c | 66 +- drivers/scsi/scsi_lib.c | 781 +++++++++++++ drivers/scsi/scsi_merge.c | 770 +++++++++++++ drivers/scsi/scsi_obsolete.c | 53 +- drivers/scsi/scsi_queue.c | 155 +-- drivers/scsi/scsi_syms.c | 13 +- drivers/scsi/sd.c | 1022 ++++------------- drivers/scsi/sd.h | 5 +- drivers/scsi/sg.c | 10 +- drivers/scsi/sr.c | 796 ++++--------- drivers/scsi/sr.h | 5 +- drivers/scsi/sr_ioctl.c | 685 ++++++----- drivers/scsi/sr_vendor.c | 222 ++-- drivers/scsi/st.c | 7 +- drivers/scsi/u14-34f.c | 2 +- drivers/sound/sb_card.c | 52 +- drivers/video/fbgen.c | 1 + fs/Config.in | 7 +- fs/autofs/symlink.c | 42 +- fs/buffer.c | 57 +- fs/coda/symlink.c | 114 +- fs/ext2/inode.c | 4 +- fs/ext2/namei.c | 58 +- fs/ext2/symlink.c | 97 +- fs/fcntl.c | 11 + fs/lockd/svclock.c | 14 +- fs/lockd/xdr.c | 12 +- fs/locks.c | 138 ++- fs/minix/namei.c | 57 +- fs/minix/symlink.c | 70 +- fs/namei.c | 83 ++ fs/nfs/dir.c | 5 +- fs/nfs/file.c | 2 +- fs/nfs/symlink.c | 85 +- fs/nfsd/export.c | 20 +- fs/nfsd/nfs3proc.c | 4 +- fs/nfsd/vfs.c | 43 +- fs/ntfs/fs.c | 10 +- fs/ntfs/super.c | 19 +- fs/ntfs/super.h | 2 +- fs/partitions/Config.in | 3 +- fs/proc/generic.c | 35 +- fs/proc/root.c | 26 +- fs/romfs/inode.c | 78 +- fs/super.c | 27 +- fs/sysv/namei.c | 64 +- fs/sysv/symlink.c | 71 +- fs/udf/symlink.c | 182 +-- fs/ufs/inode.c | 4 +- fs/ufs/namei.c | 57 +- fs/ufs/symlink.c | 117 +- fs/umsdos/README-WIP.txt | 10 + fs/umsdos/check.c | 9 + fs/umsdos/dir.c | 6 +- fs/umsdos/inode.c | 9 +- fs/umsdos/ioctl.c | 7 +- fs/umsdos/namei.c | 62 +- fs/umsdos/rdir.c | 2 +- include/asm-alpha/fcntl.h | 4 + include/asm-alpha/pgalloc.h | 34 +- include/asm-alpha/posix_types.h | 5 +- include/asm-alpha/resource.h | 7 + include/asm-arm/fcntl.h | 12 + include/asm-arm/resource.h | 6 + include/asm-i386/bitops.h | 3 +- include/asm-i386/fcntl.h | 12 + include/asm-i386/io.h | 4 +- include/asm-i386/pgalloc.h | 42 +- include/asm-i386/processor.h | 1 - include/asm-i386/resource.h | 6 + include/asm-i386/string-486.h | 13 +- include/asm-i386/string.h | 39 +- include/asm-m68k/fcntl.h | 12 + include/asm-m68k/resource.h | 6 + include/asm-mips/fcntl.h | 12 + include/asm-mips/pgtable.h | 34 +- include/asm-mips/resource.h | 6 + include/asm-ppc/fcntl.h | 12 + include/asm-ppc/pgalloc.h | 37 +- include/asm-ppc/resource.h | 7 + include/asm-sh/fcntl.h | 12 + include/asm-sh/pgtable.h | 31 +- include/asm-sh/resource.h | 6 + include/asm-sparc/resource.h | 6 + include/asm-sparc64/fcntl.h | 2 + include/asm-sparc64/pgtable.h | 47 +- include/asm-sparc64/resource.h | 6 + include/linux/agp_backend.h | 20 +- include/linux/agpgart.h | 4 +- include/linux/blk.h | 10 +- include/linux/blkdev.h | 67 +- include/linux/ext2_fs.h | 1 + include/linux/fs.h | 21 +- include/linux/ide.h | 24 +- include/linux/mm.h | 29 +- include/linux/module.h | 4 + include/linux/nfsd/nfsd.h | 1 + include/linux/nfsd/nfsfh.h | 1 + include/linux/ntfs_fs_sb.h | 1 - include/linux/resource.h | 7 - include/linux/sched.h | 4 +- include/linux/ufs_fs.h | 1 + include/linux/umsdos_fs.h | 2 +- include/linux/umsdos_fs_i.h | 10 +- include/scsi/scsi.h | 4 + kernel/Makefile | 2 +- kernel/fork.c | 20 +- kernel/ksyms.c | 6 + kernel/module.c | 22 +- kernel/sched.c | 781 +------------ kernel/timer.c | 791 +++++++++++++ mm/bootmem.c | 2 - mm/mmap.c | 3 +- mm/page_alloc.c | 1 - net/ipv4/tcp_input.c | 2 +- net/packet/af_packet.c | 3 - scripts/patch-kernel | 17 +- 208 files changed, 6661 insertions(+), 5951 deletions(-) rename drivers/char/agp/{agp_backendP.h => agp.h} (89%) rename drivers/char/agp/{agp_backend.c => agpgart_be.c} (79%) create mode 100644 drivers/scsi/scsi_lib.c create mode 100644 drivers/scsi/scsi_merge.c create mode 100644 kernel/timer.c diff --git a/Documentation/Configure.help b/Documentation/Configure.help index 30c8f3901568..660e5157eb63 100644 --- a/Documentation/Configure.help +++ b/Documentation/Configure.help @@ -4081,6 +4081,16 @@ CONFIG_CHR_DEV_SG Documentation/scsi.txt. The module will be called sg.o. If unsure, say N. +Debug new queueing code for SCSI +CONFIG_SCSI_DEBUG_QUEUES + This option turns on a lot of additional consistency checking for the new + queueing code. This will adversely affect performance, but it is likely + that bugs will be caught sooner if this is turned on. This will typically + cause the kernel to panic if an error is detected, but it would have probably + crashed if the panic weren't there. Comments/questions/problems to + linux-scsi mailing list please. See http://www.andante.org/scsi_queue.html + for more uptodate information. + Probe all LUNs on each SCSI device CONFIG_SCSI_MULTI_LUN If you have a SCSI device that supports more than one LUN (Logical @@ -8307,6 +8317,10 @@ CONFIG_UMSDOS_FS MSDOS floppies. You will need a program called umssync in order to make use of umsdos; read Documentation/filesystems/umsdos.txt. + To get utilities for initializing/checking UMSDOS filesystem, or + latest patches and/or information, visit UMSDOS homepage at + http://www.voyager.hr/~mnalis/umsdos/ . + This option enlarges your kernel by about 28 KB and it only works if you said Y to both "fat fs support" and "msdos fs support" above. If you want to compile this as a module ( = code which can be inserted @@ -8407,14 +8421,6 @@ CONFIG_NFSD The module is called nfsd.o. If you want to compile it as a module, say M here and read Documentation/modules.txt. If unsure, say N. -Emulate SUN NFS server -CONFIG_NFSD_SUN - If you would like for the server to allow clients to access - directories that are mount points on the local filesystem (this is - how nfsd behaves on Sun systems), say Y here. - If you use Tru64 clients, say Y. - If unsure, say N. - Provide NFSv3 server support (EXPERIMENTAL) CONFIG_NFSD_V3 If you would like to include the NFSv3 server was well as the NFSv2 diff --git a/MAINTAINERS b/MAINTAINERS index d9eac38d8636..a2a2f385a996 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -883,8 +883,10 @@ M: kgb@manjak.knm.org.pl S: Maintained TLAN NETWORK DRIVER +P: Torben Mathiasen +M: torben.mathiasen@compaq.com L: tlan@vuser.vu.union.edu -S: Orphan +S: Maintained TOKEN-RING NETWORK DRIVER P: Paul Norton @@ -917,8 +919,9 @@ S: Maintained UMSDOS FILESYSTEM P: Matija Nalis -M: mnalis@jagor.srce.hr +M: Matija Nalis L: linux-kernel@vger.rutgers.edu +W: http://www.voyager.hr/~mnalis/umsdos/ S: Maintained UNIFORM CDROM DRIVER diff --git a/Makefile b/Makefile index c6397bb8fa8b..5c26884b20bb 100644 --- a/Makefile +++ b/Makefile @@ -121,6 +121,10 @@ ifdef CONFIG_DRM DRIVERS += drivers/char/drm/drm.o endif +ifdef CONFIG_AGP +DRIVERS += drivers/char/agp/agp.o +endif + ifdef CONFIG_NUBUS DRIVERS := $(DRIVERS) drivers/nubus/nubus.a endif diff --git a/arch/arm/def-configs/brutus b/arch/arm/def-configs/brutus index a700f7d17cdd..aade81639813 100644 --- a/arch/arm/def-configs/brutus +++ b/arch/arm/def-configs/brutus @@ -203,9 +203,6 @@ CONFIG_EXT2_FS=y # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff --git a/arch/i386/defconfig b/arch/i386/defconfig index 1779090aa96a..206cad096e1c 100644 --- a/arch/i386/defconfig +++ b/arch/i386/defconfig @@ -381,6 +381,9 @@ CONFIG_PSMOUSE=y # Ftape, the floppy tape device driver # # CONFIG_FTAPE is not set +CONFIG_DRM=y +CONFIG_DRM_TDFX=y +# CONFIG_DRM_GAMMA is not set # # PCMCIA character device support @@ -438,9 +441,6 @@ CONFIG_LOCKD=y # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index a2453d9f3769..564e6b42b2ca 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c @@ -380,7 +380,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in, __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t" + "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "; cld\n\t" "setc %%al\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" @@ -413,7 +413,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t" + "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry)"; cld\n\t" "setc %%bl\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" diff --git a/arch/i386/kernel/pci-pc.c b/arch/i386/kernel/pci-pc.c index 2df21faec626..9ee1d6355785 100644 --- a/arch/i386/kernel/pci-pc.c +++ b/arch/i386/kernel/pci-pc.c @@ -342,7 +342,7 @@ static unsigned long bios32_service(unsigned long service) unsigned long flags; __save_flags(flags); __cli(); - __asm__("lcall (%%edi)" + __asm__("lcall (%%edi); cld" : "=a" (return_code), "=b" (address), "=c" (length), @@ -383,7 +383,7 @@ static int __init check_pcibios(void) __save_flags(flags); __cli(); __asm__( - "lcall (%%edi)\n\t" + "lcall (%%edi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -427,7 +427,7 @@ static int __init pci_bios_find_device (unsigned short vendor, unsigned short de unsigned short bx; unsigned short ret; - __asm__("lcall (%%edi)\n\t" + __asm__("lcall (%%edi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -448,7 +448,7 @@ static int pci_bios_read_config_byte(struct pci_dev *dev, int where, u8 *value) unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -466,7 +466,7 @@ static int pci_bios_read_config_word(struct pci_dev *dev, int where, u16 *value) unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -484,7 +484,7 @@ static int pci_bios_read_config_dword(struct pci_dev *dev, int where, u32 *value unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -502,7 +502,7 @@ static int pci_bios_write_config_byte(struct pci_dev *dev, int where, u8 value) unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -520,7 +520,7 @@ static int pci_bios_write_config_word(struct pci_dev *dev, int where, u16 value) unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -538,7 +538,7 @@ static int pci_bios_write_config_dword(struct pci_dev *dev, int where, u32 value unsigned long ret; unsigned long bx = (dev->bus->number << 8) | dev->devfn; - __asm__("lcall (%%esi)\n\t" + __asm__("lcall (%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -702,7 +702,7 @@ static struct irq_routing_table * __init pcibios_get_irq_routing_table(void) __asm__("push %%es\n\t" "push %%ds\n\t" "pop %%es\n\t" - "lcall (%%esi)\n\t" + "lcall (%%esi); cld\n\t" "pop %%es\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 680563959e2c..4f9c943534e8 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -462,7 +462,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, struct pt_regs * childregs; childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1; - *childregs = *regs; + struct_cpy(childregs, regs); childregs->eax = 0; childregs->esp = esp; @@ -475,7 +475,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, savesegment(gs,p->thread.gs); unlazy_fpu(current); - p->thread.i387 = current->thread.i387; + struct_cpy(&p->thread.i387, ¤t->thread.i387); return 0; } diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index b99daee8460b..7ff12d210739 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -36,6 +36,7 @@ #include unsigned long highstart_pfn, highend_pfn; +unsigned long *pgd_quicklist = (unsigned long *)0; static unsigned long totalram_pages = 0; static unsigned long totalhigh_pages = 0; @@ -162,7 +163,10 @@ int do_check_pgt_cache(int low, int high) if(pgtable_cache_size > high) { do { if(pgd_quicklist) - free_pgd_slow(get_pgd_fast()), freed++; + mmlist_modify_lock(), \ + free_pgd_slow(get_pgd_fast()), \ + mmlist_modify_unlock(), \ + freed++; if(pmd_quicklist) free_pmd_slow(get_pmd_fast()), freed++; if(pte_quicklist) diff --git a/arch/ppc/configs/common_defconfig b/arch/ppc/configs/common_defconfig index 19404751d5f3..9776262747b0 100644 --- a/arch/ppc/configs/common_defconfig +++ b/arch/ppc/configs/common_defconfig @@ -529,9 +529,6 @@ CONFIG_LOCKD=y # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff --git a/arch/ppc/configs/gemini_defconfig b/arch/ppc/configs/gemini_defconfig index 2b0d3a11c3d9..b3129c7a54de 100644 --- a/arch/ppc/configs/gemini_defconfig +++ b/arch/ppc/configs/gemini_defconfig @@ -389,9 +389,6 @@ CONFIG_EXT2_FS=y # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff --git a/arch/ppc/configs/oak_defconfig b/arch/ppc/configs/oak_defconfig index eb6d80f54e7a..1c2daf125f3d 100644 --- a/arch/ppc/configs/oak_defconfig +++ b/arch/ppc/configs/oak_defconfig @@ -276,9 +276,6 @@ CONFIG_LOCKD=y # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff --git a/arch/ppc/configs/walnut_defconfig b/arch/ppc/configs/walnut_defconfig index 9903c893dda2..66edd11b61c3 100644 --- a/arch/ppc/configs/walnut_defconfig +++ b/arch/ppc/configs/walnut_defconfig @@ -276,9 +276,6 @@ CONFIG_LOCKD=y # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff --git a/arch/ppc/defconfig b/arch/ppc/defconfig index 671fc7637a7e..2a3aac0f73d0 100644 --- a/arch/ppc/defconfig +++ b/arch/ppc/defconfig @@ -529,9 +529,6 @@ CONFIG_LOCKD=y # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff --git a/arch/sh/defconfig b/arch/sh/defconfig index 6a65ad411776..37440e7c09d5 100644 --- a/arch/sh/defconfig +++ b/arch/sh/defconfig @@ -82,9 +82,6 @@ CONFIG_EXT2_FS=y # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig index ef84238fd1a7..904027e0748e 100644 --- a/arch/sparc64/defconfig +++ b/arch/sparc64/defconfig @@ -331,9 +331,6 @@ CONFIG_NCP_FS=m # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_SGI_PARTITION is not set CONFIG_SUN_PARTITION=y CONFIG_NLS=y diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index a1e0f26dd9a6..e394ec35b2b2 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c @@ -2988,11 +2988,8 @@ qm_deps(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret) if (mod->next == NULL) return -EINVAL; - if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING) - if (put_user(0, ret)) - return -EFAULT; - else - return 0; + if (!MOD_CAN_QUERY(mod)) + return put_user(0, ret); space = 0; for (i = 0; i < mod->ndeps; ++i) { @@ -3008,10 +3005,7 @@ qm_deps(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret) space += len; } - if (put_user(i, ret)) - return -EFAULT; - else - return 0; + return put_user(i, ret); calc_space_needed: space += len; @@ -3032,7 +3026,7 @@ qm_refs(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret) if (mod->next == NULL) return -EINVAL; - if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING) + if (!MOD_CAN_QUERY(mod)) if (put_user(0, ret)) return -EFAULT; else @@ -3076,7 +3070,7 @@ qm_symbols(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret char *strings; unsigned *vals; - if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING) + if (!MOD_CAN_QUERY(mod)) if (put_user(0, ret)) return -EFAULT; else diff --git a/drivers/ap1000/ap.c b/drivers/ap1000/ap.c index 64340bedaeda..d7ac5cac419f 100644 --- a/drivers/ap1000/ap.c +++ b/drivers/ap1000/ap.c @@ -53,7 +53,7 @@ static void ap_release(struct inode * inode, struct file * filp) MOD_DEC_USE_COUNT; } -static void ap_request(void) +static void ap_request(request_queue_t * q) { struct cap_request creq; unsigned int minor; @@ -160,7 +160,7 @@ void ap_complete(struct cap_request *creq) #endif end_request(1); request_count--; - ap_request(); + ap_request(NULL); } @@ -271,7 +271,7 @@ int ap_init(void) return -1; } printk("ap_init: register dev %d\n", MAJOR_NR); - blk_dev[MAJOR_NR].request_fn = &ap_request; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &ap_request); for (i=0;inext; free_irq(APOPT0_IRQ, NULL); - blk_dev[MAJOR_NR].request_fn = 0; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); } #endif /* MODULE */ diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 2da68259ea29..6c0120458ea5 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -1026,7 +1026,7 @@ static boolean DAC960_ReportDeviceConfiguration(DAC960_Controller_T *Controller) static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) { - static void (*RequestFunctions[DAC960_MaxControllers])(void) = + static void (*RequestFunctions[DAC960_MaxControllers])(request_queue_t *) = { DAC960_RequestFunction0, DAC960_RequestFunction1, DAC960_RequestFunction2, DAC960_RequestFunction3, DAC960_RequestFunction4, DAC960_RequestFunction5, @@ -1046,8 +1046,8 @@ static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) /* Initialize the I/O Request Function. */ - blk_dev[MajorNumber].request_fn = - RequestFunctions[Controller->ControllerNumber]; + blk_init_queue(BLK_DEFAULT_QUEUE(MajorNumber), + RequestFunctions[Controller->ControllerNumber]); /* Initialize the Disk Partitions array, Partition Sizes array, Block Sizes array, Max Sectors per Request array, and Max Segments per Request array. @@ -1113,7 +1113,7 @@ static void DAC960_UnregisterBlockDevice(DAC960_Controller_T *Controller) /* Remove the I/O Request Function. */ - blk_dev[MajorNumber].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MajorNumber)); /* Remove the Disk Partitions array, Partition Sizes array, Block Sizes array, Max Sectors per Request array, and Max Segments per Request array. @@ -1272,7 +1272,7 @@ static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller, boolean WaitForCommand) { IO_Request_T **RequestQueuePointer = - &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].current_request; + &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].request_queue.current_request; IO_Request_T *Request; DAC960_Command_T *Command; char *RequestBuffer; @@ -1375,7 +1375,7 @@ static inline void DAC960_ProcessRequests(DAC960_Controller_T *Controller) DAC960_RequestFunction0 is the I/O Request Function for DAC960 Controller 0. */ -static void DAC960_RequestFunction0(void) +static void DAC960_RequestFunction0(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[0]; ProcessorFlags_T ProcessorFlags; @@ -1398,7 +1398,7 @@ static void DAC960_RequestFunction0(void) DAC960_RequestFunction1 is the I/O Request Function for DAC960 Controller 1. */ -static void DAC960_RequestFunction1(void) +static void DAC960_RequestFunction1(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[1]; ProcessorFlags_T ProcessorFlags; @@ -1421,7 +1421,7 @@ static void DAC960_RequestFunction1(void) DAC960_RequestFunction2 is the I/O Request Function for DAC960 Controller 2. */ -static void DAC960_RequestFunction2(void) +static void DAC960_RequestFunction2(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[2]; ProcessorFlags_T ProcessorFlags; @@ -1444,7 +1444,7 @@ static void DAC960_RequestFunction2(void) DAC960_RequestFunction3 is the I/O Request Function for DAC960 Controller 3. */ -static void DAC960_RequestFunction3(void) +static void DAC960_RequestFunction3(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[3]; ProcessorFlags_T ProcessorFlags; @@ -1467,7 +1467,7 @@ static void DAC960_RequestFunction3(void) DAC960_RequestFunction4 is the I/O Request Function for DAC960 Controller 4. */ -static void DAC960_RequestFunction4(void) +static void DAC960_RequestFunction4(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[4]; ProcessorFlags_T ProcessorFlags; @@ -1490,7 +1490,7 @@ static void DAC960_RequestFunction4(void) DAC960_RequestFunction5 is the I/O Request Function for DAC960 Controller 5. */ -static void DAC960_RequestFunction5(void) +static void DAC960_RequestFunction5(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[5]; ProcessorFlags_T ProcessorFlags; @@ -1513,7 +1513,7 @@ static void DAC960_RequestFunction5(void) DAC960_RequestFunction6 is the I/O Request Function for DAC960 Controller 6. */ -static void DAC960_RequestFunction6(void) +static void DAC960_RequestFunction6(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[6]; ProcessorFlags_T ProcessorFlags; @@ -1536,7 +1536,7 @@ static void DAC960_RequestFunction6(void) DAC960_RequestFunction7 is the I/O Request Function for DAC960 Controller 7. */ -static void DAC960_RequestFunction7(void) +static void DAC960_RequestFunction7(request_queue_t * q) { DAC960_Controller_T *Controller = DAC960_Controllers[7]; ProcessorFlags_T ProcessorFlags; diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h index 1696c507ba62..e93448faba1e 100644 --- a/drivers/block/DAC960.h +++ b/drivers/block/DAC960.h @@ -2208,14 +2208,14 @@ DAC960_V3_ReadStatusRegister(void *ControllerBaseAddress) static void DAC960_FinalizeController(DAC960_Controller_T *); static int DAC960_Finalize(NotifierBlock_T *, unsigned long, void *); -static void DAC960_RequestFunction0(void); -static void DAC960_RequestFunction1(void); -static void DAC960_RequestFunction2(void); -static void DAC960_RequestFunction3(void); -static void DAC960_RequestFunction4(void); -static void DAC960_RequestFunction5(void); -static void DAC960_RequestFunction6(void); -static void DAC960_RequestFunction7(void); +static void DAC960_RequestFunction0(request_queue_t *); +static void DAC960_RequestFunction1(request_queue_t *); +static void DAC960_RequestFunction2(request_queue_t *); +static void DAC960_RequestFunction3(request_queue_t *); +static void DAC960_RequestFunction4(request_queue_t *); +static void DAC960_RequestFunction5(request_queue_t *); +static void DAC960_RequestFunction6(request_queue_t *); +static void DAC960_RequestFunction7(request_queue_t *); static void DAC960_InterruptHandler(int, void *, Registers_T *); static void DAC960_QueueMonitoringCommand(DAC960_Command_T *); static void DAC960_MonitoringTimerFunction(unsigned long); diff --git a/drivers/block/acsi.c b/drivers/block/acsi.c index 80aa524723ef..d1631c973cbb 100644 --- a/drivers/block/acsi.c +++ b/drivers/block/acsi.c @@ -360,7 +360,7 @@ static void acsi_times_out( unsigned long dummy ); static void copy_to_acsibuffer( void ); static void copy_from_acsibuffer( void ); static void do_end_requests( void ); -static void do_acsi_request( void ); +static void do_acsi_request( request_queue_t * ); static void redo_acsi_request( void ); static int acsi_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg ); @@ -938,7 +938,7 @@ static void do_end_requests( void ) * ***********************************************************************/ -static void do_acsi_request( void ) +static void do_acsi_request( request_queue_t * q ) { stdma_lock( acsi_interrupt, NULL ); @@ -1808,7 +1808,7 @@ int acsi_init( void ) phys_acsi_buffer = virt_to_phys( acsi_buffer ); STramMask = ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */ acsi_gendisk.next = gendisk_head; gendisk_head = &acsi_gendisk; @@ -1838,7 +1838,7 @@ void cleanup_module(void) struct gendisk ** gdp; del_timer( &acsi_timer ); - blk_dev[MAJOR_NR].request_fn = 0; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); atari_stram_free( acsi_buffer ); if (unregister_blkdev( MAJOR_NR, "ad" ) != 0) diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index a3ee727bde29..1c36e09f27cb 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1484,7 +1484,7 @@ static void redo_fd_request(void) goto repeat; } -static void do_fd_request(void) +static void do_fd_request(request_queue_t * q) { redo_fd_request(); } @@ -1869,7 +1869,7 @@ int __init amiga_floppy_init(void) post_write_timer.data = 0; post_write_timer.function = post_write; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); blksize_size[MAJOR_NR] = floppy_blocksizes; blk_size[MAJOR_NR] = floppy_sizes; @@ -1911,7 +1911,7 @@ void cleanup_module(void) amiga_chip_free(raw_buf); blk_size[MAJOR_NR] = NULL; blksize_size[MAJOR_NR] = NULL; - blk_dev[MAJOR_NR].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); unregister_blkdev(MAJOR_NR, "fd"); } #endif diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index b8df872053af..b47904cdbc64 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1529,7 +1529,7 @@ repeat: } -void do_fd_request(void) +void do_fd_request(request_queue_t * q) { unsigned long flags; @@ -2051,7 +2051,7 @@ int __init atari_floppy_init (void) blk_size[MAJOR_NR] = floppy_sizes; blksize_size[MAJOR_NR] = floppy_blocksizes; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n", DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E', @@ -2103,7 +2103,7 @@ void cleanup_module (void) { unregister_blkdev(MAJOR_NR, "fd"); - blk_dev[MAJOR_NR].request_fn = 0; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); timer_active &= ~(1 << FLOPPY_TIMER); timer_table[FLOPPY_TIMER].fn = 0; atari_stram_free( DMABuffer ); diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 930e4857104c..5d5799a89e5f 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -140,14 +140,14 @@ static void do_ida_request(int i); */ #define DO_IDA_REQUEST(x) { do_ida_request(x); } -static void do_ida_request0(void) DO_IDA_REQUEST(0); -static void do_ida_request1(void) DO_IDA_REQUEST(1); -static void do_ida_request2(void) DO_IDA_REQUEST(2); -static void do_ida_request3(void) DO_IDA_REQUEST(3); -static void do_ida_request4(void) DO_IDA_REQUEST(4); -static void do_ida_request5(void) DO_IDA_REQUEST(5); -static void do_ida_request6(void) DO_IDA_REQUEST(6); -static void do_ida_request7(void) DO_IDA_REQUEST(7); +static void do_ida_request0(request_queue_t * q) DO_IDA_REQUEST(0); +static void do_ida_request1(request_queue_t * q) DO_IDA_REQUEST(1); +static void do_ida_request2(request_queue_t * q) DO_IDA_REQUEST(2); +static void do_ida_request3(request_queue_t * q) DO_IDA_REQUEST(3); +static void do_ida_request4(request_queue_t * q) DO_IDA_REQUEST(4); +static void do_ida_request5(request_queue_t * q) DO_IDA_REQUEST(5); +static void do_ida_request6(request_queue_t * q) DO_IDA_REQUEST(6); +static void do_ida_request7(request_queue_t * q) DO_IDA_REQUEST(7); static void start_io(ctlr_info_t *h); @@ -379,7 +379,7 @@ void cleanup_module(void) */ void __init cpqarray_init(void) { - void (*request_fns[MAX_CTLR])(void) = { + void (*request_fns[MAX_CTLR])(request_queue_t *) = { do_ida_request0, do_ida_request1, do_ida_request2, do_ida_request3, do_ida_request4, do_ida_request5, @@ -480,7 +480,9 @@ void __init cpqarray_init(void) ida_gendisk[i].sizes = ida_sizes + (i*256); /* ida_gendisk[i].nr_real is handled by getgeometry */ - blk_dev[MAJOR_NR+i].request_fn = request_fns[i]; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR + i), request_fns[i]); + blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR + i), 0); + blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256); hardsect_size[MAJOR_NR+i] = ida_hardsizes + (i*256); read_ahead[MAJOR_NR+i] = READ_AHEAD; @@ -894,10 +896,13 @@ static void do_ida_request(int ctlr) cmdlist_t *c; int seg, sect; char *lastdataend; + request_queue_t * q; struct buffer_head *bh; struct request *creq; - creq = blk_dev[MAJOR_NR+ctlr].current_request; + q = &blk_dev[MAJOR_NR+ctlr].request_queue; + + creq = q->current_request; if (creq == NULL || creq->rq_status == RQ_INACTIVE) goto doreq_done; @@ -974,7 +979,7 @@ DBGPX( printk("More to do on same request %p\n", creq); ); } else { DBGPX( printk("Done with %p, queueing %p\n", creq, creq->next); ); creq->rq_status = RQ_INACTIVE; - blk_dev[MAJOR_NR+ctlr].current_request = creq->next; + q->current_request = creq->next; wake_up(&wait_for_request); } diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 3f08912a4eda..24dd21c3a8aa 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2930,7 +2930,7 @@ static void process_fd_request(void) schedule_bh( (void *)(void *) redo_fd_request); } -static void do_fd_request(void) +static void do_fd_request(request_queue_t * q) { if(usage_count == 0) { printk("warning: usage count=0, CURRENT=%p exiting\n", CURRENT); @@ -4130,7 +4130,7 @@ int __init floppy_init(void) blk_size[MAJOR_NR] = floppy_sizes; blksize_size[MAJOR_NR] = floppy_blocksizes; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT); config_types(); @@ -4159,7 +4159,7 @@ int __init floppy_init(void) fdc = 0; /* reset fdc in case of unexpected interrupt */ if (floppy_grab_irq_and_dma()){ del_timer(&fd_timeout); - blk_dev[MAJOR_NR].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); unregister_blkdev(MAJOR_NR,"fd"); del_timer(&fd_timeout); return -EBUSY; @@ -4225,7 +4225,7 @@ int __init floppy_init(void) schedule(); if (usage_count) floppy_release_irq_and_dma(); - blk_dev[MAJOR_NR].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); unregister_blkdev(MAJOR_NR,"fd"); } return have_no_fdc; @@ -4447,7 +4447,7 @@ void cleanup_module(void) unregister_blkdev(MAJOR_NR, "fd"); - blk_dev[MAJOR_NR].request_fn = 0; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); /* eject disk, if any */ dummy = fd_eject(0); } diff --git a/drivers/block/hd.c b/drivers/block/hd.c index 49527026a3f6..8cf37c3de59a 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -585,7 +585,7 @@ repeat: panic("unknown hd-command"); } -static void do_hd_request (void) +static void do_hd_request (request_queue_t * q) { disable_irq(HD_IRQ); hd_request(); @@ -813,7 +813,7 @@ int __init hd_init(void) printk("hd: unable to get major %d for hard disk\n",MAJOR_NR); return -1; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */ hd_gendisk.next = gendisk_head; gendisk_head = &hd_gendisk; diff --git a/drivers/block/ide-disk.c b/drivers/block/ide-disk.c index 15b9cd100351..133d9cd85ca4 100644 --- a/drivers/block/ide-disk.c +++ b/drivers/block/ide-disk.c @@ -242,7 +242,10 @@ int ide_multwrite (ide_drive_t *drive, unsigned int mcount) rq->sector += nsect; #endif if ((rq->nr_sectors -= nsect) <= 0) + { + spin_unlock_irqrestore(&io_request_lock, flags); break; + } if ((rq->current_nr_sectors -= nsect) == 0) { if ((rq->bh = rq->bh->b_reqnext) != NULL) { rq->current_nr_sectors = rq->bh->b_size>>9; diff --git a/drivers/block/ide-probe.c b/drivers/block/ide-probe.c index 34ab3eb7b56f..c4dc22408c17 100644 --- a/drivers/block/ide-probe.c +++ b/drivers/block/ide-probe.c @@ -704,7 +704,8 @@ static void init_gendisk (ide_hwif_t *hwif) static int hwif_init (ide_hwif_t *hwif) { - void (*rfn)(void); + ide_drive_t *drive; + void (*rfn)(request_queue_t *); if (!hwif->present) return 0; @@ -786,11 +787,24 @@ static int hwif_init (ide_hwif_t *hwif) init_gendisk(hwif); blk_dev[hwif->major].data = hwif; - blk_dev[hwif->major].request_fn = rfn; blk_dev[hwif->major].queue = ide_get_queue; read_ahead[hwif->major] = 8; /* (4kB) */ hwif->present = 1; /* success */ + /* + * FIXME(eric) - This needs to be tested. I *think* that this + * is correct. Also, I believe that there is no longer any + * reason to have multiple functions (do_ide[0-7]_request) + * functions - the queuedata field could be used to indicate + * the correct hardware group - either this, or we could add + * a new field to request_queue_t to hold this information. + */ + drive = &hwif->drives[0]; + blk_init_queue(&drive->queue, rfn); + + drive = &hwif->drives[1]; + blk_init_queue(&drive->queue, rfn); + #if (DEBUG_SPINLOCK > 0) { static int done = 0; diff --git a/drivers/block/ide.c b/drivers/block/ide.c index cfd9f36f9d1c..0222f8a0c3ef 100644 --- a/drivers/block/ide.c +++ b/drivers/block/ide.c @@ -493,8 +493,8 @@ void ide_end_request (byte uptodate, ide_hwgroup_t *hwgroup) if (!end_that_request_first(rq, uptodate, hwgroup->drive->name)) { add_blkdev_randomness(MAJOR(rq->rq_dev)); - hwgroup->drive->queue = rq->next; - blk_dev[MAJOR(rq->rq_dev)].current_request = NULL; + hwgroup->drive->queue.current_request = rq->next; + blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL; hwgroup->rq = NULL; end_that_request_last(rq); } @@ -755,8 +755,8 @@ void ide_end_drive_cmd (ide_drive_t *drive, byte stat, byte err) } } spin_lock_irqsave(&io_request_lock, flags); - drive->queue = rq->next; - blk_dev[MAJOR(rq->rq_dev)].current_request = NULL; + drive->queue.current_request = rq->next; + blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL; HWGROUP(drive)->rq = NULL; rq->rq_status = RQ_INACTIVE; spin_unlock_irqrestore(&io_request_lock, flags); @@ -1059,7 +1059,7 @@ static ide_startstop_t start_request (ide_drive_t *drive) { ide_startstop_t startstop; unsigned long block, blockend; - struct request *rq = drive->queue; + struct request *rq = drive->queue.current_request; unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS; ide_hwif_t *hwif = HWIF(drive); @@ -1142,13 +1142,13 @@ repeat: best = NULL; drive = hwgroup->drive; do { - if (drive->queue && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) { + if (drive->queue.current_request && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) { if (!best || (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep))) || (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive)))) { struct blk_dev_struct *bdev = &blk_dev[HWIF(drive)->major]; - if (bdev->current_request != &bdev->plug) + if( !bdev->request_queue.plugged ) best = drive; } } @@ -1228,8 +1228,8 @@ static void ide_do_request (ide_hwgroup_t *hwgroup) drive = hwgroup->drive; do { bdev = &blk_dev[HWIF(drive)->major]; - if (bdev->current_request != &bdev->plug) /* FIXME: this will do for now */ - bdev->current_request = NULL; /* (broken since patch-2.1.15) */ + if( !bdev->request_queue.plugged ) + bdev->request_queue.current_request = NULL; /* (broken since patch-2.1.15) */ if (drive->sleep && (!sleep || 0 < (signed long)(sleep - drive->sleep))) sleep = drive->sleep; } while ((drive = drive->next) != hwgroup->drive); @@ -1267,9 +1267,9 @@ static void ide_do_request (ide_hwgroup_t *hwgroup) drive->service_start = jiffies; bdev = &blk_dev[hwif->major]; - if (bdev->current_request == &bdev->plug) /* FIXME: paranoia */ + if( bdev->request_queue.plugged ) /* FIXME: paranoia */ printk("%s: Huh? nuking plugged queue\n", drive->name); - bdev->current_request = hwgroup->rq = drive->queue; + bdev->request_queue.current_request = hwgroup->rq = drive->queue.current_request; spin_unlock(&io_request_lock); if (!hwif->serialized) /* play it safe with buggy hardware */ ide__sti(); @@ -1283,76 +1283,76 @@ static void ide_do_request (ide_hwgroup_t *hwgroup) /* * ide_get_queue() returns the queue which corresponds to a given device. */ -struct request **ide_get_queue (kdev_t dev) +request_queue_t *ide_get_queue (kdev_t dev) { ide_hwif_t *hwif = (ide_hwif_t *)blk_dev[MAJOR(dev)].data; return &hwif->drives[DEVICE_NR(dev) & 1].queue; } -void do_ide0_request (void) +void do_ide0_request (request_queue_t *q) { ide_do_request (ide_hwifs[0].hwgroup); } #if MAX_HWIFS > 1 -void do_ide1_request (void) +void do_ide1_request (request_queue_t *q) { ide_do_request (ide_hwifs[1].hwgroup); } #endif /* MAX_HWIFS > 1 */ #if MAX_HWIFS > 2 -void do_ide2_request (void) +void do_ide2_request (request_queue_t *q) { ide_do_request (ide_hwifs[2].hwgroup); } #endif /* MAX_HWIFS > 2 */ #if MAX_HWIFS > 3 -void do_ide3_request (void) +void do_ide3_request (request_queue_t *q) { ide_do_request (ide_hwifs[3].hwgroup); } #endif /* MAX_HWIFS > 3 */ #if MAX_HWIFS > 4 -void do_ide4_request (void) +void do_ide4_request (request_queue_t *q) { ide_do_request (ide_hwifs[4].hwgroup); } #endif /* MAX_HWIFS > 4 */ #if MAX_HWIFS > 5 -void do_ide5_request (void) +void do_ide5_request (request_queue_t *q) { ide_do_request (ide_hwifs[5].hwgroup); } #endif /* MAX_HWIFS > 5 */ #if MAX_HWIFS > 6 -void do_ide6_request (void) +void do_ide6_request (request_queue_t *q) { ide_do_request (ide_hwifs[6].hwgroup); } #endif /* MAX_HWIFS > 6 */ #if MAX_HWIFS > 7 -void do_ide7_request (void) +void do_ide7_request (request_queue_t *q) { ide_do_request (ide_hwifs[7].hwgroup); } #endif /* MAX_HWIFS > 7 */ #if MAX_HWIFS > 8 -void do_ide8_request (void) +void do_ide8_request (request_queue_t *q) { ide_do_request (ide_hwifs[8].hwgroup); } #endif /* MAX_HWIFS > 8 */ #if MAX_HWIFS > 9 -void do_ide9_request (void) +void do_ide9_request (request_queue_t *q) { ide_do_request (ide_hwifs[9].hwgroup); } @@ -1576,10 +1576,12 @@ void ide_intr (int irq, void *dev_id, struct pt_regs *regs) hwgroup->handler = NULL; del_timer(&hwgroup->timer); spin_unlock(&io_request_lock); + if (drive->unmask) ide__sti(); /* local CPU only */ startstop = handler(drive); /* service this interrupt, may set handler for next interrupt */ spin_lock_irq(&io_request_lock); + /* * Note that handler() may have set things up for another * interrupt to occur soon, but it cannot happen until @@ -1683,10 +1685,10 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio if (action == ide_wait) rq->sem = &sem; spin_lock_irqsave(&io_request_lock, flags); - cur_rq = drive->queue; + cur_rq = drive->queue.current_request; if (cur_rq == NULL || action == ide_preempt) { rq->next = cur_rq; - drive->queue = rq; + drive->queue.current_request = rq; if (action == ide_preempt) hwgroup->rq = NULL; } else { @@ -1993,7 +1995,7 @@ void ide_unregister (unsigned int index) kfree(blksize_size[hwif->major]); kfree(max_sectors[hwif->major]); kfree(max_readahead[hwif->major]); - blk_dev[hwif->major].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(hwif->major)); blk_dev[hwif->major].data = NULL; blk_dev[hwif->major].queue = NULL; blksize_size[hwif->major] = NULL; diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 1da3702b0b21..416586d0b84e 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c @@ -142,14 +142,49 @@ static inline int get_max_segments(kdev_t dev) * NOTE: the device-specific queue() functions * have to be atomic! */ -static inline struct request **get_queue(kdev_t dev) +static inline request_queue_t *get_queue(kdev_t dev) { int major = MAJOR(dev); struct blk_dev_struct *bdev = blk_dev + major; if (bdev->queue) return bdev->queue(dev); - return &blk_dev[major].current_request; + return &blk_dev[major].request_queue; +} + +void blk_cleanup_queue(request_queue_t * q) +{ + memset(q, 0, sizeof(*q)); +} + +void blk_queue_headactive(request_queue_t * q, int active) +{ + q->head_active = active; +} + +void blk_queue_pluggable(request_queue_t * q, int use_plug) +{ + q->use_plug = use_plug; +} + +void blk_init_queue(request_queue_t * q, request_fn_proc * rfn) +{ + q->request_fn = rfn; + q->current_request = NULL; + q->merge_fn = NULL; + q->merge_requests_fn = NULL; + q->plug_tq.sync = 0; + q->plug_tq.routine = &unplug_device; + q->plug_tq.data = q; + q->plugged = 0; + /* + * These booleans describe the queue properties. We set the + * default (and most common) values here. Other drivers can + * use the appropriate functions to alter the queue properties. + * as appropriate. + */ + q->use_plug = 1; + q->head_active = 1; } /* @@ -157,22 +192,18 @@ static inline struct request **get_queue(kdev_t dev) */ void unplug_device(void * data) { - struct blk_dev_struct * dev = (struct blk_dev_struct *) data; - int queue_new_request=0; + request_queue_t * q = (request_queue_t *) data; unsigned long flags; spin_lock_irqsave(&io_request_lock,flags); - if (dev->current_request == &dev->plug) { - struct request * next = dev->plug.next; - dev->current_request = next; - if (next || dev->queue) { - dev->plug.next = NULL; - queue_new_request = 1; + if( q->plugged ) + { + q->plugged = 0; + if( q->current_request != NULL ) + { + (q->request_fn)(q); } } - if (queue_new_request) - (dev->request_fn)(); - spin_unlock_irqrestore(&io_request_lock,flags); } @@ -184,12 +215,13 @@ void unplug_device(void * data) * This is called with interrupts off and no requests on the queue. * (and with the request spinlock aquired) */ -static inline void plug_device(struct blk_dev_struct * dev) +static inline void plug_device(request_queue_t * q) { - if (dev->current_request) + if (q->current_request) return; - dev->current_request = &dev->plug; - queue_task(&dev->plug_tq, &tq_disk); + + q->plugged = 1; + queue_task(&q->plug_tq, &tq_disk); } /* @@ -221,6 +253,7 @@ static inline struct request * get_request(int n, kdev_t dev) prev_found = req; req->rq_status = RQ_ACTIVE; req->rq_dev = dev; + req->special = NULL; return req; } @@ -335,12 +368,11 @@ static inline void drive_stat_acct(struct request *req, * which is important for drive_stat_acct() above. */ -void add_request(struct blk_dev_struct * dev, struct request * req) +static void add_request(request_queue_t * q, struct request * req) { int major = MAJOR(req->rq_dev); - struct request * tmp, **current_request; + struct request * tmp; unsigned long flags; - int queue_new_request = 0; drive_stat_acct(req, req->nr_sectors, 1); req->next = NULL; @@ -349,12 +381,9 @@ void add_request(struct blk_dev_struct * dev, struct request * req) * We use the goto to reduce locking complexity */ spin_lock_irqsave(&io_request_lock,flags); - current_request = get_queue(req->rq_dev); - if (!(tmp = *current_request)) { - *current_request = req; - if (dev->current_request != &dev->plug) - queue_new_request = 1; + if (!(tmp = q->current_request)) { + q->current_request = req; goto out; } for ( ; tmp->next ; tmp = tmp->next) { @@ -372,26 +401,34 @@ void add_request(struct blk_dev_struct * dev, struct request * req) req->next = tmp->next; tmp->next = req; -/* for SCSI devices, call request_fn unconditionally */ - if (scsi_blk_major(major)) - queue_new_request = 1; - if (major >= COMPAQ_SMART2_MAJOR+0 && - major <= COMPAQ_SMART2_MAJOR+7) - queue_new_request = 1; + /* + * FIXME(eric) I don't understand why there is a need for this + * special case code. It clearly doesn't fit any more with + * the new queueing architecture, and it got added in 2.3.10. + * I am leaving this in here until I hear back from the COMPAQ + * people. + */ + if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7) + { + (q->request_fn)(q); + } + if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7) - queue_new_request = 1; + { + (q->request_fn)(q); + } + out: - if (queue_new_request) - (dev->request_fn)(); spin_unlock_irqrestore(&io_request_lock,flags); } /* * Has to be called with the request spinlock aquired */ -static inline void attempt_merge (struct request *req, - int max_sectors, - int max_segments) +static inline void attempt_merge (request_queue_t * q, + struct request *req, + int max_sectors, + int max_segments) { struct request *next = req->next; int total_segments; @@ -407,16 +444,37 @@ static inline void attempt_merge (struct request *req, total_segments--; if (total_segments > max_segments) return; + + if( q->merge_requests_fn != NULL ) + { + /* + * If we are not allowed to merge these requests, then + * return. If we are allowed to merge, then the count + * will have been updated to the appropriate number, + * and we shouldn't do it here too. + */ + if( !(q->merge_requests_fn)(q, req, next) ) + { + return; + } + } + else + { + req->nr_segments = total_segments; + } + req->bhtail->b_reqnext = next->bh; req->bhtail = next->bhtail; req->nr_sectors += next->nr_sectors; - req->nr_segments = total_segments; next->rq_status = RQ_INACTIVE; req->next = next->next; wake_up (&wait_for_request); } -void make_request(int major,int rw, struct buffer_head * bh) +static void __make_request(request_queue_t * q, + int major, + int rw, + struct buffer_head * bh) { unsigned int sector, count; struct request * req; @@ -519,13 +577,20 @@ void make_request(int major,int rw, struct buffer_head * bh) * not to schedule or do something nonatomic */ spin_lock_irqsave(&io_request_lock,flags); - req = *get_queue(bh->b_rdev); + req = q->current_request; if (!req) { /* MD and loop can't handle plugging without deadlocking */ if (major != MD_MAJOR && major != LOOP_MAJOR && - major != DDV_MAJOR && major != NBD_MAJOR) - plug_device(blk_dev + major); /* is atomic */ + major != DDV_MAJOR && major != NBD_MAJOR + && q->use_plug) + plug_device(q); /* is atomic */ } else switch (major) { + /* + * FIXME(eric) - this entire switch statement is going away + * soon, and we will instead key off of q->head_active to decide + * whether the top request in the queue is active on the device + * or not. + */ case IDE0_MAJOR: /* same as HD_MAJOR */ case IDE1_MAJOR: case FLOPPY_MAJOR: @@ -548,7 +613,7 @@ void make_request(int major,int rw, struct buffer_head * bh) * All other drivers need to jump over the first entry, as that * entry may be busy being processed and we thus can't change it. */ - if (req == blk_dev[major].current_request) + if (req == q->current_request) req = req->next; if (!req) break; @@ -592,25 +657,71 @@ void make_request(int major,int rw, struct buffer_head * bh) continue; /* Can we add it to the end of this request? */ if (req->sector + req->nr_sectors == sector) { - if (req->bhtail->b_data + req->bhtail->b_size - != bh->b_data) { - if (req->nr_segments < max_segments) - req->nr_segments++; - else continue; + /* + * The merge_fn is a more advanced way + * of accomplishing the same task. Instead + * of applying a fixed limit of some sort + * we instead define a function which can + * determine whether or not it is safe to + * merge the request or not. + */ + if( q->merge_fn == NULL ) + { + if (req->bhtail->b_data + req->bhtail->b_size + != bh->b_data) { + if (req->nr_segments < max_segments) + req->nr_segments++; + else continue; + } + } + else + { + /* + * See if this queue has rules that + * may suggest that we shouldn't merge + * this + */ + if( !(q->merge_fn)(q, req, bh) ) + { + continue; + } } req->bhtail->b_reqnext = bh; req->bhtail = bh; req->nr_sectors += count; drive_stat_acct(req, count, 0); /* Can we now merge this req with the next? */ - attempt_merge(req, max_sectors, max_segments); + attempt_merge(q, req, max_sectors, max_segments); /* or to the beginning? */ } else if (req->sector - count == sector) { - if (bh->b_data + bh->b_size - != req->bh->b_data) { - if (req->nr_segments < max_segments) - req->nr_segments++; - else continue; + /* + * The merge_fn is a more advanced way + * of accomplishing the same task. Instead + * of applying a fixed limit of some sort + * we instead define a function which can + * determine whether or not it is safe to + * merge the request or not. + */ + if( q->merge_fn == NULL ) + { + if (bh->b_data + bh->b_size + != req->bh->b_data) { + if (req->nr_segments < max_segments) + req->nr_segments++; + else continue; + } + } + else + { + /* + * See if this queue has rules that + * may suggest that we shouldn't merge + * this + */ + if( !(q->merge_fn)(q, req, bh) ) + { + continue; + } } bh->b_reqnext = req->bh; req->bh = bh; @@ -645,20 +756,37 @@ void make_request(int major,int rw, struct buffer_head * bh) req->errors = 0; req->sector = sector; req->nr_sectors = count; - req->nr_segments = 1; req->current_nr_sectors = count; + req->nr_segments = 1; /* Always 1 for a new request. */ req->buffer = bh->b_data; req->sem = NULL; req->bh = bh; req->bhtail = bh; req->next = NULL; - add_request(major+blk_dev,req); + add_request(q, req); return; end_io: bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state)); } +void make_request(int major,int rw, struct buffer_head * bh) +{ + request_queue_t * q; + unsigned long flags; + + q = get_queue(bh->b_dev); + + __make_request(q, major, rw, bh); + + spin_lock_irqsave(&io_request_lock,flags); + if( !q->plugged ) + (q->request_fn)(q); + spin_unlock_irqrestore(&io_request_lock,flags); +} + + + /* This function can be used to request a number of buffers from a block device. Currently the only restriction is that all buffers must belong to the same device */ @@ -667,13 +795,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bh[]) { unsigned int major; int correct_size; - struct blk_dev_struct * dev; + request_queue_t * q; + unsigned long flags; int i; - dev = NULL; - if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV) - dev = blk_dev + major; - if (!dev || !dev->request_fn) { + + major = MAJOR(bh[0]->b_dev); + if (!(q = get_queue(bh[0]->b_dev))) { printk(KERN_ERR "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n", kdevname(bh[0]->b_dev), bh[0]->b_blocknr); @@ -726,8 +854,15 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bh[]) continue; } #endif - make_request(MAJOR(bh[i]->b_rdev), rw, bh[i]); + __make_request(q, MAJOR(bh[i]->b_rdev), rw, bh[i]); + } + + spin_lock_irqsave(&io_request_lock,flags); + if( !q->plugged ) + { + (q->request_fn)(q); } + spin_unlock_irqrestore(&io_request_lock,flags); return; sorry: @@ -801,15 +936,8 @@ int __init blk_dev_init(void) struct blk_dev_struct *dev; for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) { - dev->request_fn = NULL; dev->queue = NULL; - dev->current_request = NULL; - dev->plug.rq_status = RQ_INACTIVE; - dev->plug.cmd = -1; - dev->plug.next = NULL; - dev->plug_tq.sync = 0; - dev->plug_tq.routine = &unplug_device; - dev->plug_tq.data = dev; + blk_init_queue(&dev->request_queue, NULL); } req = all_requests + NR_REQUEST; @@ -924,3 +1052,6 @@ int __init blk_dev_init(void) EXPORT_SYMBOL(io_request_lock); EXPORT_SYMBOL(end_that_request_first); EXPORT_SYMBOL(end_that_request_last); +EXPORT_SYMBOL(blk_init_queue); +EXPORT_SYMBOL(blk_cleanup_queue); +EXPORT_SYMBOL(blk_queue_headactive); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 3459ec1fd01e..a950172ff2e8 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -164,7 +164,7 @@ static void figure_loop_size(struct loop_device *lo) loop_sizes[lo->lo_number] = size; } -static void do_lo_request(void) +static void do_lo_request(request_queue_t * q) { int real_block, block, offset, len, blksize, size; char *dest_addr; @@ -754,7 +754,7 @@ int __init loop_init(void) return -ENOMEM; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); for (i=0; i < max_loop; i++) { memset(&loop_dev[i], 0, sizeof(struct loop_device)); loop_dev[i].lo_number = i; diff --git a/drivers/block/md.c b/drivers/block/md.c index 2a791dede326..b525ef2e973e 100644 --- a/drivers/block/md.c +++ b/drivers/block/md.c @@ -761,7 +761,7 @@ int md_make_request (int minor, int rw, struct buffer_head * bh) } } -static void do_md_request (void) +static void do_md_request (request_queue_t * q) { printk ("Got md request, not good..."); return; @@ -1274,8 +1274,7 @@ int __init md_init (void) return (-1); } - blk_dev[MD_MAJOR].request_fn=DEVICE_REQUEST; - blk_dev[MD_MAJOR].current_request=NULL; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MD_MAJOR]=INT_MAX; memset(md_dev, 0, MAX_MD_DEV * sizeof (struct md_dev)); md_gendisk.next=gendisk_head; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 7c24449ac014..cda45cc0193a 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -290,7 +290,7 @@ void nbd_clear_que(struct nbd_device *lo) #undef FAIL #define FAIL( s ) { printk( KERN_ERR "NBD, minor %d: " s "\n", dev ); goto error_out; } -static void do_nbd_request(void) +static void do_nbd_request(request_queue_t * q) { struct request *req; int dev; @@ -488,7 +488,7 @@ int nbd_init(void) #endif blksize_size[MAJOR_NR] = nbd_blksizes; blk_size[MAJOR_NR] = nbd_sizes; - blk_dev[MAJOR_NR].request_fn = do_nbd_request; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_nbd_request); for (i = 0; i < MAX_NBD; i++) { nbd_dev[i].refcnt = 0; nbd_dev[i].file = NULL; diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 96e0e421e82c..7db6626f4602 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -220,7 +220,7 @@ static int pcd_packet(struct cdrom_device_info *cdi, static int pcd_detect(void); static void pcd_probe_capabilities(void); static void do_pcd_read_drq(void); -static void do_pcd_request(void); +static void do_pcd_request(request_queue_t * q); static void do_pcd_read(void); static int pcd_blocksizes[PCD_UNITS]; @@ -343,7 +343,7 @@ int pcd_init (void) /* preliminary initialisation */ for (unit=0;unitsector, ps2esdi[MINOR(CURRENT->rq_dev)].nr_sects); end_request(FAIL); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(q); } } /* main strategy routine */ @@ -598,11 +598,11 @@ static void ps2esdi_readwrite(int cmd, u_char drive, u_int block, u_int count) if (ps2esdi_out_cmd_blk(cmd_blk)) { printk("%s: Controller failed\n", DEVICE_NAME); if ((++CURRENT->errors) < MAX_RETRIES) - return do_ps2esdi_request(); + return do_ps2esdi_request(NULL); else { end_request(FAIL); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); } } /* check for failure to put out the command block */ @@ -901,11 +901,11 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code) outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN); outb(CTRL_ENABLE_INTR, ESDI_CONTROL); if ((++CURRENT->errors) < MAX_RETRIES) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); else { end_request(FAIL); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); } break; } @@ -947,11 +947,11 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code) outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN); outb(CTRL_ENABLE_INTR, ESDI_CONTROL); if ((++CURRENT->errors) < MAX_RETRIES) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); else { end_request(FAIL); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); } break; @@ -961,7 +961,7 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code) outb(CTRL_ENABLE_INTR, ESDI_CONTROL); end_request(FAIL); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); break; case INT_CMD_FORMAT: @@ -993,11 +993,11 @@ static void ps2esdi_continue_request(void) if (CURRENT->nr_sectors -= CURRENT->current_nr_sectors) { CURRENT->buffer += CURRENT->current_nr_sectors * SECT_SIZE; CURRENT->sector += CURRENT->current_nr_sectors; - do_ps2esdi_request(); + do_ps2esdi_request(NULL); } else { end_request(SUCCES); if (CURRENT) - do_ps2esdi_request(); + do_ps2esdi_request(NULL); } } diff --git a/drivers/block/rd.c b/drivers/block/rd.c index 75bbae97befc..f83a7616299b 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c @@ -181,7 +181,7 @@ __setup("ramdisk_size=", ramdisk_size2); * allocated size, we must get rid of it... * */ -static void rd_request(void) +static void rd_request(request_queue_t * q) { unsigned int minor; unsigned long offset, len; @@ -350,7 +350,7 @@ static void __exit rd_cleanup (void) invalidate_buffers(MKDEV(MAJOR_NR, i)); unregister_blkdev( MAJOR_NR, "ramdisk" ); - blk_dev[MAJOR_NR].request_fn = 0; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); } /* This is the registration and initialization section of the RAM disk driver */ @@ -371,7 +371,7 @@ int __init rd_init (void) return -EIO; } - blk_dev[MAJOR_NR].request_fn = &rd_request; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &rd_request); for (i = 0; i < NUM_RAMDISKS; i++) { /* rd_size is given in kB */ diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index cee5493da0ec..06cd279af91b 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -219,7 +219,7 @@ static unsigned short write_postamble[] = { static void swim3_select(struct floppy_state *fs, int sel); static void swim3_action(struct floppy_state *fs, int action); static int swim3_readbit(struct floppy_state *fs, int bit); -static void do_fd_request(void); +static void do_fd_request(request_queue_t * q); static void start_request(struct floppy_state *fs); static void set_timeout(struct floppy_state *fs, int nticks, void (*proc)(unsigned long)); @@ -290,7 +290,7 @@ static int swim3_readbit(struct floppy_state *fs, int bit) return (stat & DATA) == 0; } -static void do_fd_request(void) +static void do_fd_request(request_queue_t * q) { int i; for(i=0;i sector, CURRENT -> nr_sectors,jiffies); @@ -1798,7 +1798,7 @@ int __init aztcd_init(void) MAJOR_NR); return -EIO; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); #ifndef AZT_KERNEL_PRIOR_2_1 blksize_size[MAJOR_NR] = aztcd_blocksizes; #endif diff --git a/drivers/cdrom/cdu31a.c b/drivers/cdrom/cdu31a.c index edb90ee76e45..8e4a8536bc3e 100644 --- a/drivers/cdrom/cdu31a.c +++ b/drivers/cdrom/cdu31a.c @@ -1641,7 +1641,7 @@ read_data_block(char *buffer, * data access on a CD is done sequentially, this saves a lot of operations. */ static void -do_cdu31a_request(void) +do_cdu31a_request(request_queue_t * q) { int block; int nblock; @@ -3497,7 +3497,7 @@ cdu31a_init(void) is_a_cdu31a = strcmp("CD-ROM CDU31A", drive_config.product_id) == 0; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = CDU31A_READAHEAD; cdu31a_block_size = 1024; /* 1kB default block size */ /* use 'mount -o block=2048' */ diff --git a/drivers/cdrom/cm206.c b/drivers/cdrom/cm206.c index 3ab0533f8bc5..edb0ab34ae19 100644 --- a/drivers/cdrom/cm206.c +++ b/drivers/cdrom/cm206.c @@ -209,6 +209,8 @@ static int auto_probe=1; /* Yes, why not? */ static int cm206_base = CM206_BASE; static int cm206_irq = CM206_IRQ; +static int cm206[2] = {0,0}; /* for compatible `insmod' parameter passing */ + MODULE_PARM(cm206_base, "i"); /* base */ MODULE_PARM(cm206_irq, "i"); /* irq */ MODULE_PARM(cm206, "1-2i"); /* base,irq or irq,base */ @@ -801,7 +803,7 @@ int try_adapter(int sector) /* This is not a very smart implementation. We could optimize for consecutive block numbers. I'm not convinced this would really bring down the processor load. */ -static void do_cm206_request(void) +static void do_cm206_request(request_queue_t * q) { long int i, cd_sec_no; int quarter, error; @@ -1394,7 +1396,7 @@ int __init cm206_init(void) cleanup(3); return -EIO; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); blksize_size[MAJOR_NR] = cm206_blocksizes; read_ahead[MAJOR_NR] = 16; /* reads ahead what? */ init_bh(CM206_BH, cm206_bh); @@ -1411,7 +1413,6 @@ int __init cm206_init(void) #ifdef MODULE -static int cm206[2] = {0,0}; /* for compatible `insmod' parameter passing */ void __init parse_options(void) { diff --git a/drivers/cdrom/gscd.c b/drivers/cdrom/gscd.c index 07c36f51b3ca..7913f7f0db90 100644 --- a/drivers/cdrom/gscd.c +++ b/drivers/cdrom/gscd.c @@ -86,7 +86,8 @@ static void gscd_bin2bcd (unsigned char *p); /* Schnittstellen zum Kern/FS */ -static void do_gscd_request (void); +static void do_gscd_request (request_queue_t *); +static void __do_gscd_request (void); static int gscd_ioctl (struct inode *, struct file *, unsigned int, unsigned long); static int gscd_open (struct inode *, struct file *); static int gscd_release (struct inode *, struct file *); @@ -260,7 +261,12 @@ long offs; * I/O request routine called from Linux kernel. */ -static void do_gscd_request (void) +static void do_gscd_request (request_queue_t * q) +{ + __do_gscd_request(); +} + +static void __do_gscd_request (void) { unsigned int block,dev; unsigned int nsect; @@ -355,7 +361,7 @@ char cmd[] = { CMD_READ, 0x80, 0,0,0, 0,1 }; /* cmd mode M-S-F secth sectl */ end_request(1); } } - SET_TIMER(do_gscd_request, 1); + SET_TIMER(__do_gscd_request, 1); } @@ -1060,7 +1066,7 @@ int result; return -EIO; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); blksize_size[MAJOR_NR] = gscd_blocksizes; read_ahead[MAJOR_NR] = 4; diff --git a/drivers/cdrom/mcd.c b/drivers/cdrom/mcd.c index 81f9bd7626a3..c8adc0200b91 100644 --- a/drivers/cdrom/mcd.c +++ b/drivers/cdrom/mcd.c @@ -648,7 +648,7 @@ mcd_interrupt(int irq, void *dev_id, struct pt_regs * regs) static void -do_mcd_request(void) +do_mcd_request(request_queue_t * q) { #ifdef TEST2 printk(" do_mcd_request(%ld+%ld)\n", CURRENT -> sector, CURRENT -> nr_sectors); @@ -1179,7 +1179,7 @@ int __init mcd_init(void) } blksize_size[MAJOR_NR] = mcd_blocksizes; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = 4; /* check for card */ diff --git a/drivers/cdrom/mcdx.c b/drivers/cdrom/mcdx.c index fa3f36310356..38f838797b8b 100644 --- a/drivers/cdrom/mcdx.c +++ b/drivers/cdrom/mcdx.c @@ -208,7 +208,7 @@ struct s_drive_stuff { /* declared in blk.h */ int mcdx_init(void); -void do_mcdx_request(void); +void do_mcdx_request(request_queue_t * q); /* already declared in init/main */ void mcdx_setup(char *, int *); @@ -521,7 +521,7 @@ static int mcdx_audio_ioctl(struct cdrom_device_info * cdi, unsigned int cmd, } } -void do_mcdx_request() +void do_mcdx_request(request_queue_t * q) { int dev; struct s_drive_stuff *stuffp; @@ -1116,7 +1116,7 @@ int __init mcdx_init_drive(int drive) return 1; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = READ_AHEAD; blksize_size[MAJOR_NR] = mcdx_blocksizes; diff --git a/drivers/cdrom/optcd.c b/drivers/cdrom/optcd.c index 68a22cbd1160..94ad9f091494 100644 --- a/drivers/cdrom/optcd.c +++ b/drivers/cdrom/optcd.c @@ -1360,7 +1360,7 @@ static void poll(void) } -static void do_optcd_request(void) +static void do_optcd_request(request_queue_t * q) { DEBUG((DEBUG_REQUEST, "do_optcd_request(%ld+%ld)", CURRENT -> sector, CURRENT -> nr_sectors)); @@ -2067,7 +2067,7 @@ int __init optcd_init(void) hardsect_size[MAJOR_NR] = &hsecsize; blksize_size[MAJOR_NR] = &blksize; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = 4; request_region(optcd_port, 4, "optcd"); diff --git a/drivers/cdrom/sbpcd.c b/drivers/cdrom/sbpcd.c index 3e704b0d81a2..7b0fcc10ceea 100644 --- a/drivers/cdrom/sbpcd.c +++ b/drivers/cdrom/sbpcd.c @@ -4794,7 +4794,7 @@ static inline void sbpcd_end_request(struct request *req, int uptodate) { /* * I/O request routine, called from Linux kernel. */ -static void DO_SBPCD_REQUEST(void) +static void DO_SBPCD_REQUEST(request_queue_t * q) { u_int block; u_int nsect; @@ -5725,7 +5725,7 @@ int __init SBPCD_INIT(void) goto init_done; #endif MODULE } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = buffers * (CD_FRAMESIZE / 512); request_region(CDo_command,4,major_name); diff --git a/drivers/cdrom/sjcd.c b/drivers/cdrom/sjcd.c index 35697369fec7..b5edc20574c3 100644 --- a/drivers/cdrom/sjcd.c +++ b/drivers/cdrom/sjcd.c @@ -1272,7 +1272,7 @@ static void sjcd_poll( void ){ SJCD_SET_TIMER( sjcd_poll, 1 ); } -static void do_sjcd_request( void ){ +static void do_sjcd_request( request_queue_t * q ){ #if defined( SJCD_TRACE ) printk( "SJCD: do_sjcd_request(%ld+%ld)\n", CURRENT->sector, CURRENT->nr_sectors ); @@ -1475,7 +1475,7 @@ int __init sjcd_init( void ){ return( -EIO ); } - blk_dev[ MAJOR_NR ].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[ MAJOR_NR ] = 4; if( check_region( sjcd_base, 4 ) ){ diff --git a/drivers/cdrom/sonycd535.c b/drivers/cdrom/sonycd535.c index 79bad95de934..947277f3d219 100644 --- a/drivers/cdrom/sonycd535.c +++ b/drivers/cdrom/sonycd535.c @@ -781,7 +781,7 @@ size_to_buf(unsigned int size, Byte *buf) * data access on a CD is done sequentially, this saves a lot of operations. */ static void -do_cdu535_request(void) +do_cdu535_request(request_queue_t * q) { unsigned int dev; unsigned int read_size; @@ -1601,7 +1601,7 @@ sony535_init(void) MAJOR_NR, CDU535_MESSAGE_NAME); return -EIO; } - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); blksize_size[MAJOR_NR] = &sonycd535_block_size; read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */ diff --git a/drivers/char/Config.in b/drivers/char/Config.in index cd8185778666..b32ea52fe907 100644 --- a/drivers/char/Config.in +++ b/drivers/char/Config.in @@ -214,12 +214,10 @@ if [ "$CONFIG_FTAPE" != "n" ]; then fi endmenu -if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then - bool 'Direct Rendering Manager (XFree86 DRI support) (EXPERIMENTAL)' CONFIG_DRM - dep_tristate ' 3dfx Banshee/Voodoo3' CONFIG_DRM_TDFX $CONFIG_DRM - if [ "$CONFIG_DRM" = "y" ]; then - dep_tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA m - fi +bool 'Direct Rendering Manager (XFree86 DRI support) (EXPERIMENTAL)' CONFIG_DRM +dep_tristate ' 3dfx Banshee/Voodoo3' CONFIG_DRM_TDFX $CONFIG_DRM +if [ "$CONFIG_DRM" = "y" ]; then + dep_tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA m fi if [ "$CONFIG_PCMCIA" != "n" ]; then @@ -227,8 +225,8 @@ if [ "$CONFIG_PCMCIA" != "n" ]; then fi if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then - dep_tristate '/dev/agpgart (AGP Support) (EXPERIMENTAL)' CONFIG_AGP m - if [ "$CONFIG_AGP" = "m" ]; then + tristate '/dev/agpgart (AGP Support) (EXPERIMENTAL)' CONFIG_AGP + if [ "$CONFIG_AGP" != "n" ]; then bool ' Intel 440LX/BX/GX support' CONFIG_AGP_INTEL bool ' Intel I810/I810 DC100/I810e support' CONFIG_AGP_I810 bool ' VIA VP3/MVP3/Apollo Pro support' CONFIG_AGP_VIA diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 38c4bc55fd40..899679f79442 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -636,9 +636,15 @@ else endif endif -ifeq ($(CONFIG_AGP), m) +ifeq ($(CONFIG_AGP), y) + SUB_DIRS += agp ALL_SUB_DIRS += agp MOD_SUB_DIRS += agp +else + ifeq ($(CONFIG_AGP), m) + ALL_SUB_DIRS += agp + MOD_SUB_DIRS += agp + endif endif include $(TOPDIR)/Rules.make diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile index c03310f6e432..39a0e3672ad8 100644 --- a/drivers/char/agp/Makefile +++ b/drivers/char/agp/Makefile @@ -3,30 +3,17 @@ # space ioctl interface to use agp memory. It also adds a kernel interface # that other drivers could use to manipulate agp memory. -M_OBJS := agpgart.o +O_TARGET := agp.o -CFLAGS_agp_backend.o := - -ifdef CONFIG_AGP_I810 -CFLAGS_agp_backend.o += -DAGP_BUILD_INTEL_I810 -endif -ifdef CONFIG_AGP_INTEL -CFLAGS_agp_backend.o += -DAGP_BUILD_INTEL_GENERIC -endif -ifdef CONFIG_AGP_VIA -CFLAGS_agp_backend.o += -DAGP_BUILD_VIA_GENERIC -endif -ifdef CONFIG_AGP_AMD -CFLAGS_agp_backend.o += -DAGP_BUILD_AMD_IRONGATE -endif -ifdef CONFIG_AGP_SIS -CFLAGS_agp_backend.o += -DAGP_BUILD_SIS_GENERIC -endif -ifdef CONFIG_AGP_ALI -CFLAGS_agp_backend.o += -DAGP_BUILD_ALI_M1541 +ifeq ($(CONFIG_AGP),y) + O_OBJS += agpgart.o +else + ifeq ($(CONFIG_AGP), m) + M_OBJS += agpgart.o + endif endif include $(TOPDIR)/Rules.make -agpgart.o: agp_backend.o agpgart_fe.o - $(LD) $(LD_RFLAG) -r -o $@ agp_backend.o agpgart_fe.o +agpgart.o: agpgart_be.o agpgart_fe.o + $(LD) $(LD_RFLAG) -r -o $@ agpgart_be.o agpgart_fe.o diff --git a/drivers/char/agp/agp_backendP.h b/drivers/char/agp/agp.h similarity index 89% rename from drivers/char/agp/agp_backendP.h rename to drivers/char/agp/agp.h index 59beb02599a3..55d00d8233b7 100644 --- a/drivers/char/agp/agp_backendP.h +++ b/drivers/char/agp/agp.h @@ -1,8 +1,8 @@ /* * AGPGART module version 0.99 * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight - * Copyright (C) 1999 Xi Graphics + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -97,7 +97,7 @@ struct agp_bridge_data { /* Links to driver specific functions */ - int (*fetch_size) (void); /* returns the index into the size table */ + int (*fetch_size) (void); int (*configure) (void); void (*agp_enable) (u32); void (*cleanup) (void); @@ -112,22 +112,22 @@ struct agp_bridge_data { void (*free_by_type) (agp_memory *); /* Links to vendor/device specific setup functions */ -#ifdef AGP_BUILD_INTEL_GENERIC +#ifdef CONFIG_AGP_INTEL void (*intel_generic_setup) (void); #endif -#ifdef AGP_BUILD_INTEL_I810 +#ifdef CONFIG_AGP_I810 void (*intel_i810_setup) (struct pci_dev *); #endif -#ifdef AGP_BUILD_VIA_GENERIC +#ifdef CONFIG_AGP_VIA void (*via_generic_setup) (void); #endif -#ifdef AGP_BUILD_SIS_GENERIC +#ifdef CONFIG_AGP_SIS void (*sis_generic_setup) (void); #endif -#ifdef AGP_BUILD_AMD_IRONGATE +#ifdef CONFIG_AGP_AMD void (*amd_irongate_setup) (void); #endif -#ifdef AGP_BUILD_ALI_M1541 +#ifdef CONFIG_AGP_ALI void (*ali_generic_setup) (void); #endif }; @@ -140,8 +140,19 @@ struct agp_bridge_data { #define INREG16(mmap, addr) *(volatile u16 *)(mmap + (addr)) #define INREG8 (mmap, addr) *(volatile u8 *) (mmap + (addr)) +#define CACHE_FLUSH agp_bridge.cache_flush +#define A_SIZE_8(x) ((aper_size_info_8 *) x) +#define A_SIZE_16(x) ((aper_size_info_16 *) x) +#define A_SIZE_32(x) ((aper_size_info_32 *) x) +#define A_SIZE_FIX(x) ((aper_size_info_fixed *) x) +#define A_IDX8() (A_SIZE_8(agp_bridge.aperture_sizes) + i) +#define A_IDX16() (A_SIZE_16(agp_bridge.aperture_sizes) + i) +#define A_IDX32() (A_SIZE_32(agp_bridge.aperture_sizes) + i) +#define A_IDXFIX() (A_SIZE_FIX(agp_bridge.aperture_sizes) + i) +#define MAXKEY (4096 * 32) + #ifndef min -#define min(a,b) (((a)<(b))?(a):(b)) +#define min(a,b) (((a)<(b))?(a):(b)) #endif #define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page) diff --git a/drivers/char/agp/agp_backend.c b/drivers/char/agp/agpgart_be.c similarity index 79% rename from drivers/char/agp/agp_backend.c rename to drivers/char/agp/agpgart_be.c index 246cbbb46d6a..72f87791a19c 100644 --- a/drivers/char/agp/agp_backend.c +++ b/drivers/char/agp/agpgart_be.c @@ -1,8 +1,8 @@ /* * AGPGART module version 0.99 * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight - * Copyright (C) 1999 Xi Graphics + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -46,11 +46,7 @@ #include #include -#include "agp_backendP.h" - -static struct agp_bridge_data agp_bridge; - -#define CACHE_FLUSH agp_bridge.cache_flush +#include "agp.h" MODULE_AUTHOR("Jeff Hartmann "); MODULE_PARM(agp_try_unsupported, "1i"); @@ -63,36 +59,13 @@ EXPORT_SYMBOL(agp_enable); EXPORT_SYMBOL(agp_backend_acquire); EXPORT_SYMBOL(agp_backend_release); -static int agp_try_unsupported __initdata = 0; +static void flush_cache(void); +static struct agp_bridge_data agp_bridge; +static int agp_try_unsupported __initdata = 0; #ifdef __SMP__ static atomic_t cpus_waiting; -#endif - -int agp_backend_acquire(void) -{ - atomic_inc(&(agp_bridge.agp_in_use)); - if (atomic_read(&(agp_bridge.agp_in_use)) != 1) { - atomic_dec(&(agp_bridge.agp_in_use)); - return -EBUSY; - } - MOD_INC_USE_COUNT; - return 0; -} - -void agp_backend_release(void) -{ - atomic_dec(&(agp_bridge.agp_in_use)); - MOD_DEC_USE_COUNT; -} - -static void flush_cache(void) -{ - asm volatile ("wbinvd":::"memory"); -} - -#ifdef __SMP__ static void ipi_handler(void *null) { flush_cache(); @@ -110,7 +83,34 @@ static void smp_flush_cache(void) while (atomic_read(&cpus_waiting) > 0) barrier(); } -#endif +#define global_cache_flush smp_flush_cache +#else /* __SMP__ */ +#define global_cache_flush flush_cache +#endif /* __SMP__ */ + +static void flush_cache(void) +{ + asm volatile ("wbinvd":::"memory"); +} + + +int agp_backend_acquire(void) +{ + atomic_inc(&agp_bridge.agp_in_use); + + if (atomic_read(&agp_bridge.agp_in_use) != 1) { + atomic_dec(&agp_bridge.agp_in_use); + return -EBUSY; + } + MOD_INC_USE_COUNT; + return 0; +} + +void agp_backend_release(void) +{ + atomic_dec(&agp_bridge.agp_in_use); + MOD_DEC_USE_COUNT; +} /* * Basic Page Allocation Routines - @@ -121,30 +121,32 @@ static void smp_flush_cache(void) * against a maximum value. */ -static void *agp_alloc_page(void) +static unsigned long agp_alloc_page(void) { void *pt; pt = (void *) __get_free_page(GFP_KERNEL); if (pt == NULL) { - return NULL; + return 0; } - atomic_inc(&(mem_map[MAP_NR(pt)].count)); + atomic_inc(&mem_map[MAP_NR(pt)].count); set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags); - atomic_inc(&(agp_bridge.current_memory_agp)); - return pt; + atomic_inc(&agp_bridge.current_memory_agp); + return (unsigned long) pt; } -static void agp_destroy_page(void *pt) +static void agp_destroy_page(unsigned long page) { - if (pt == NULL) - return; + void *pt = (void *) page; - atomic_dec(&(mem_map[MAP_NR(pt)].count)); + if (pt == NULL) { + return; + } + atomic_dec(&mem_map[MAP_NR(pt)].count); clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags); wake_up(&mem_map[MAP_NR(pt)].wait); free_page((unsigned long) pt); - atomic_dec(&(agp_bridge.current_memory_agp)); + atomic_dec(&agp_bridge.current_memory_agp); } /* End Basic Page Allocation Routines */ @@ -155,7 +157,6 @@ static void agp_destroy_page(void *pt) * brunt of the work. */ -#define MAXKEY (4096 * 32) static void agp_free_key(int key) { @@ -225,7 +226,8 @@ void agp_free_memory(agp_memory * curr) if (curr->page_count != 0) { for (i = 0; i < curr->page_count; i++) { curr->memory[i] &= ~(0x00000fff); - agp_destroy_page((void *) phys_to_virt(curr->memory[i])); + agp_destroy_page((unsigned long) + phys_to_virt(curr->memory[i])); } } agp_free_key(curr->key); @@ -242,7 +244,7 @@ agp_memory *agp_allocate_memory(size_t page_count, u32 type) agp_memory *new; int i; - if ((atomic_read(&(agp_bridge.current_memory_agp)) + page_count) > + if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) > agp_bridge.max_memory_agp) { return NULL; } @@ -258,15 +260,17 @@ agp_memory *agp_allocate_memory(size_t page_count, u32 type) return NULL; } for (i = 0; i < page_count; i++) { - new->memory[i] = (unsigned long) agp_alloc_page(); + new->memory[i] = agp_alloc_page(); - if ((void *) new->memory[i] == NULL) { + if (new->memory[i] == 0) { /* Free this structure */ agp_free_memory(new); return NULL; } new->memory[i] = - agp_bridge.mask_memory(virt_to_phys((void *) new->memory[i]), type); + agp_bridge.mask_memory( + virt_to_phys((void *) new->memory[i]), + type); new->page_count++; } @@ -285,16 +289,16 @@ static int agp_return_size(void) switch (agp_bridge.size_type) { case U8_APER_SIZE: - current_size = ((aper_size_info_8 *) temp)->size; + current_size = A_SIZE_8(temp)->size; break; case U16_APER_SIZE: - current_size = ((aper_size_info_16 *) temp)->size; + current_size = A_SIZE_16(temp)->size; break; case U32_APER_SIZE: - current_size = ((aper_size_info_32 *) temp)->size; + current_size = A_SIZE_32(temp)->size; break; case FIXED_APER_SIZE: - current_size = ((aper_size_info_fixed *) temp)->size; + current_size = A_SIZE_FIX(temp)->size; break; default: current_size = 0; @@ -396,7 +400,8 @@ static void agp_generic_agp_enable(u32 mode) * AGP devices and collect their data. */ - while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, device)) != NULL) { + while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, + device)) != NULL) { pci_read_config_dword(device, 0x04, &scratch); if (!(scratch & 0x00100000)) @@ -406,7 +411,8 @@ static void agp_generic_agp_enable(u32 mode) if (cap_ptr != 0x00) { do { - pci_read_config_dword(device, cap_ptr, &cap_id); + pci_read_config_dword(device, + cap_ptr, &cap_id); if ((cap_id & 0xff) != 0x02) cap_ptr = (cap_id >> 8) & 0xff; @@ -415,8 +421,8 @@ static void agp_generic_agp_enable(u32 mode) } if (cap_ptr != 0x00) { /* - * Ok, here we have a AGP device. Disable impossible settings, - * and adjust the readqueue to the minimum. + * Ok, here we have a AGP device. Disable impossible + * settings, and adjust the readqueue to the minimum. */ pci_read_config_dword(device, cap_ptr + 4, &scratch); @@ -424,23 +430,35 @@ static void agp_generic_agp_enable(u32 mode) /* adjust RQ depth */ command = ((command & ~0xff000000) | - min((mode & 0xff000000), min((command & 0xff000000), (scratch & 0xff000000)))); + min((mode & 0xff000000), + min((command & 0xff000000), + (scratch & 0xff000000)))); /* disable SBA if it's not supported */ - if (!((command & 0x00000200) && (scratch & 0x00000200) && (mode & 0x00000200))) + if (!((command & 0x00000200) && + (scratch & 0x00000200) && + (mode & 0x00000200))) command &= ~0x00000200; /* disable FW if it's not supported */ - if (!((command & 0x00000010) && (scratch & 0x00000010) && (mode & 0x00000010))) + if (!((command & 0x00000010) && + (scratch & 0x00000010) && + (mode & 0x00000010))) command &= ~0x00000010; - if (!((command & 4) && (scratch & 4) && (mode & 4))) + if (!((command & 4) && + (scratch & 4) && + (mode & 4))) command &= ~0x00000004; - if (!((command & 2) && (scratch & 2) && (mode & 2))) + if (!((command & 2) && + (scratch & 2) && + (mode & 2))) command &= ~0x00000002; - if (!((command & 1) && (scratch & 1) && (mode & 1))) + if (!((command & 1) && + (scratch & 1) && + (mode & 1))) command &= ~0x00000001; } } @@ -469,7 +487,8 @@ static void agp_generic_agp_enable(u32 mode) * command registers. */ - while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, device)) != NULL) { + while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, + device)) != NULL) { pci_read_config_dword(device, 0x04, &scratch); if (!(scratch & 0x00100000)) @@ -479,7 +498,8 @@ static void agp_generic_agp_enable(u32 mode) if (cap_ptr != 0x00) { do { - pci_read_config_dword(device, cap_ptr, &cap_id); + pci_read_config_dword(device, + cap_ptr, &cap_id); if ((cap_id & 0xff) != 0x02) cap_ptr = (cap_id >> 8) & 0xff; @@ -510,52 +530,58 @@ static int agp_generic_create_gatt_table(void) do { switch (agp_bridge.size_type) { case U8_APER_SIZE: - size = ((aper_size_info_8 *) temp)->size; - page_order = ((aper_size_info_8 *) temp)->page_order; - num_entries = ((aper_size_info_8 *) temp)->num_entries; + size = A_SIZE_8(temp)->size; + page_order = + A_SIZE_8(temp)->page_order; + num_entries = + A_SIZE_8(temp)->num_entries; break; case U16_APER_SIZE: - size = ((aper_size_info_16 *) temp)->size; - page_order = ((aper_size_info_16 *) temp)->page_order; - num_entries = ((aper_size_info_16 *) temp)->num_entries; + size = A_SIZE_16(temp)->size; + page_order = A_SIZE_16(temp)->page_order; + num_entries = A_SIZE_16(temp)->num_entries; break; case U32_APER_SIZE: - size = ((aper_size_info_32 *) temp)->size; - page_order = ((aper_size_info_32 *) temp)->page_order; - num_entries = ((aper_size_info_32 *) temp)->num_entries; + size = A_SIZE_32(temp)->size; + page_order = A_SIZE_32(temp)->page_order; + num_entries = A_SIZE_32(temp)->num_entries; break; - /* This case will never really happen */ + /* This case will never really happen. */ case FIXED_APER_SIZE: default: size = page_order = num_entries = 0; break; } - table = (char *) __get_free_pages(GFP_KERNEL, page_order); + table = (char *) __get_free_pages(GFP_KERNEL, + page_order); if (table == NULL) { i++; - switch (agp_bridge.size_type) { case U8_APER_SIZE: - agp_bridge.current_size = (((aper_size_info_8 *) agp_bridge.aperture_sizes) + i); + agp_bridge.current_size = A_IDX8(); break; case U16_APER_SIZE: - agp_bridge.current_size = (((aper_size_info_16 *) agp_bridge.aperture_sizes) + i); + agp_bridge.current_size = A_IDX16(); break; case U32_APER_SIZE: - agp_bridge.current_size = (((aper_size_info_32 *) agp_bridge.aperture_sizes) + i); + agp_bridge.current_size = A_IDX32(); break; - /* This case will never really happen */ + /* This case will never really + * happen. + */ case FIXED_APER_SIZE: default: - size = page_order = num_entries = 0; + agp_bridge.current_size = + agp_bridge.current_size; break; } } else { agp_bridge.aperture_size_idx = i; } - } while ((table == NULL) && (i < agp_bridge.num_aperture_sizes)); + } while ((table == NULL) && + (i < agp_bridge.num_aperture_sizes)); } else { size = ((aper_size_info_fixed *) temp)->size; page_order = ((aper_size_info_fixed *) temp)->page_order; @@ -590,7 +616,8 @@ static int agp_generic_create_gatt_table(void) agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); for (i = 0; i < num_entries; i++) { - agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page; + agp_bridge.gatt_table[i] = + (unsigned long) agp_bridge.scratch_page; } return 0; @@ -607,16 +634,16 @@ static int agp_generic_free_gatt_table(void) switch (agp_bridge.size_type) { case U8_APER_SIZE: - page_order = ((aper_size_info_8 *) temp)->page_order; + page_order = A_SIZE_8(temp)->page_order; break; case U16_APER_SIZE: - page_order = ((aper_size_info_16 *) temp)->page_order; + page_order = A_SIZE_16(temp)->page_order; break; case U32_APER_SIZE: - page_order = ((aper_size_info_32 *) temp)->page_order; + page_order = A_SIZE_32(temp)->page_order; break; case FIXED_APER_SIZE: - page_order = ((aper_size_info_fixed *) temp)->page_order; + page_order = A_SIZE_FIX(temp)->page_order; break; default: page_order = 0; @@ -650,16 +677,16 @@ static int agp_generic_insert_memory(agp_memory * mem, switch (agp_bridge.size_type) { case U8_APER_SIZE: - num_entries = ((aper_size_info_8 *) temp)->num_entries; + num_entries = A_SIZE_8(temp)->num_entries; break; case U16_APER_SIZE: - num_entries = ((aper_size_info_16 *) temp)->num_entries; + num_entries = A_SIZE_16(temp)->num_entries; break; case U32_APER_SIZE: - num_entries = ((aper_size_info_32 *) temp)->num_entries; + num_entries = A_SIZE_32(temp)->num_entries; break; case FIXED_APER_SIZE: - num_entries = ((aper_size_info_fixed *) temp)->num_entries; + num_entries = A_SIZE_FIX(temp)->num_entries; break; default: num_entries = 0; @@ -704,7 +731,8 @@ static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start, return -EINVAL; } for (i = pg_start; i < (mem->page_count + pg_start); i++) { - agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page; + agp_bridge.gatt_table[i] = + (unsigned long) agp_bridge.scratch_page; } agp_bridge.tlb_flush(mem); @@ -732,8 +760,7 @@ void agp_enable(u32 mode) /* End - Generic Agp routines */ -#ifdef AGP_BUILD_INTEL_I810 - +#ifdef CONFIG_AGP_I810 static aper_size_info_fixed intel_i810_sizes[] = { {64, 16384, 4}, @@ -751,7 +778,7 @@ static gatt_mask intel_i810_masks[] = static struct _intel_i810_private { struct pci_dev *i810_dev; /* device one */ - volatile unsigned char *registers; + volatile u8 *registers; int num_dcache_entries; } intel_i810_private; @@ -761,7 +788,7 @@ static int intel_i810_fetch_size(void) aper_size_info_fixed *values; pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc); - values = (aper_size_info_fixed *) agp_bridge.aperture_sizes; + values = A_SIZE_FIX(agp_bridge.aperture_sizes); if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { printk("agpgart: i810 is disabled\n"); @@ -788,18 +815,19 @@ static int intel_i810_configure(void) u32 temp; int i; - current_size = (aper_size_info_fixed *) agp_bridge.current_size; + current_size = A_SIZE_FIX(agp_bridge.current_size); pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp); temp &= 0xfff80000; intel_i810_private.registers = - (volatile unsigned char *) ioremap(temp, 128 * 4096); + (volatile u8 *) ioremap(temp, 128 * 4096); if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL) & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { /* This will need to be dynamically assigned */ - printk("agpgart: detected 4MB dedicated video ram.\n"); + printk(KERN_INFO + "agpgart: detected 4MB dedicated video ram.\n"); intel_i810_private.num_dcache_entries = 1024; } pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp); @@ -810,7 +838,8 @@ static int intel_i810_configure(void) if (agp_bridge.needs_scratch_page == TRUE) { for (i = 0; i < current_size->num_entries; i++) { - OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (i * 4), agp_bridge.scratch_page); } } @@ -840,7 +869,7 @@ static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start, void *temp; temp = agp_bridge.current_size; - num_entries = ((aper_size_info_fixed *) temp)->num_entries; + num_entries = A_SIZE_FIX(temp)->num_entries; if ((pg_start + mem->page_count) > num_entries) { return -EINVAL; @@ -856,9 +885,12 @@ static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start, (mem->type == AGP_DCACHE_MEMORY)) { /* special insert */ - for (i = pg_start; i < (pg_start + mem->page_count); i++) { - OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), - (i * 4096) | I810_PTE_LOCAL | I810_PTE_VALID); + for (i = pg_start; + i < (pg_start + mem->page_count); i++) { + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (i * 4), + (i * 4096) | I810_PTE_LOCAL | + I810_PTE_VALID); } agp_bridge.tlb_flush(mem); @@ -885,7 +917,8 @@ static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start, int i; for (i = pg_start; i < (mem->page_count + pg_start); i++) { - OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (i * 4), agp_bridge.scratch_page); } @@ -944,11 +977,7 @@ static void intel_i810_setup(struct pci_dev *i810_dev) agp_bridge.tlb_flush = intel_i810_tlbflush; agp_bridge.mask_memory = intel_i810_mask_memory; agp_bridge.agp_enable = intel_i810_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif + agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; agp_bridge.free_gatt_table = agp_generic_free_gatt_table; agp_bridge.insert_memory = intel_i810_insert_entries; @@ -959,7 +988,7 @@ static void intel_i810_setup(struct pci_dev *i810_dev) #endif -#ifdef AGP_BUILD_INTEL_GENERIC +#ifdef CONFIG_AGP_INTEL static int intel_fetch_size(void) { @@ -968,7 +997,7 @@ static int intel_fetch_size(void) aper_size_info_16 *values; pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp); - (void *) values = agp_bridge.aperture_sizes; + values = A_SIZE_16(agp_bridge.aperture_sizes); for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { if (temp == values[i].size_value) { @@ -993,10 +1022,11 @@ static void intel_cleanup(void) u16 temp; aper_size_info_16 *previous_size; - previous_size = (aper_size_info_16 *) agp_bridge.previous_size; + previous_size = A_SIZE_16(agp_bridge.previous_size); pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp); pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9)); - pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, previous_size->size_value); + pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, + previous_size->size_value); } static int intel_configure(void) @@ -1005,24 +1035,27 @@ static int intel_configure(void) u16 temp2; aper_size_info_16 *current_size; - current_size = (aper_size_info_16 *) agp_bridge.current_size; + current_size = A_SIZE_16(agp_bridge.current_size); /* aperture size */ - pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, current_size->size_value); + pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, + current_size->size_value); /* address to map to */ pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr); + pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, + agp_bridge.gatt_bus_addr); /* agpctrl */ pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); /* paccfg/nbxcfg */ pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, (temp2 & ~(1 << 10)) | (1 << 9)); + pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, + (temp2 & ~(1 << 10)) | (1 << 9)); /* clear any possible error conditions */ pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7); return 0; @@ -1068,11 +1101,7 @@ static void intel_generic_setup(void) agp_bridge.tlb_flush = intel_tlbflush; agp_bridge.mask_memory = intel_mask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif + agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; agp_bridge.free_gatt_table = agp_generic_free_gatt_table; agp_bridge.insert_memory = agp_generic_insert_memory; @@ -1083,7 +1112,7 @@ static void intel_generic_setup(void) #endif -#ifdef AGP_BUILD_VIA_GENERIC +#ifdef CONFIG_AGP_VIA static int via_fetch_size(void) { @@ -1091,7 +1120,7 @@ static int via_fetch_size(void) u8 temp; aper_size_info_8 *values; - (void *) values = agp_bridge.aperture_sizes; + values = A_SIZE_8(agp_bridge.aperture_sizes); pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp); for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { if (temp == values[i].size_value) { @@ -1110,9 +1139,10 @@ static int via_configure(void) u32 temp; aper_size_info_8 *current_size; - current_size = (aper_size_info_8 *) agp_bridge.current_size; + current_size = A_SIZE_8(agp_bridge.current_size); /* aperture size */ - pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, current_size->size_value); + pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, + current_size->size_value); /* address to map too */ pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp); agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); @@ -1130,9 +1160,10 @@ static void via_cleanup(void) { aper_size_info_8 *previous_size; - previous_size = (aper_size_info_8 *) agp_bridge.previous_size; + previous_size = A_SIZE_8(agp_bridge.previous_size); pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0); - pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, previous_size->size_value); + pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, + previous_size->size_value); } static void via_tlbflush(agp_memory * mem) @@ -1179,11 +1210,7 @@ static void via_generic_setup(void) agp_bridge.tlb_flush = via_tlbflush; agp_bridge.mask_memory = via_mask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif + agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; agp_bridge.free_gatt_table = agp_generic_free_gatt_table; agp_bridge.insert_memory = agp_generic_insert_memory; @@ -1194,7 +1221,7 @@ static void via_generic_setup(void) #endif -#ifdef AGP_BUILD_SIS_GENERIC +#ifdef CONFIG_AGP_SIS static int sis_fetch_size(void) { @@ -1203,10 +1230,11 @@ static int sis_fetch_size(void) aper_size_info_8 *values; pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size); - (void *) values = agp_bridge.aperture_sizes; + values = A_SIZE_8(agp_bridge.aperture_sizes); for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { if ((temp_size == values[i].size_value) || - ((temp_size & ~(0x03)) == (values[i].size_value & ~(0x03)))) { + ((temp_size & ~(0x03)) == + (values[i].size_value & ~(0x03)))) { agp_bridge.previous_size = agp_bridge.current_size = (void *) (values + i); @@ -1229,12 +1257,14 @@ static int sis_configure(void) u32 temp; aper_size_info_8 *current_size; - current_size = (aper_size_info_8 *) agp_bridge.current_size; + current_size = A_SIZE_8(agp_bridge.current_size); pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05); pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp); agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE, agp_bridge.gatt_bus_addr); - pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, current_size->size_value); + pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE, + agp_bridge.gatt_bus_addr); + pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, + current_size->size_value); return 0; } @@ -1242,8 +1272,9 @@ static void sis_cleanup(void) { aper_size_info_8 *previous_size; - previous_size = (aper_size_info_8 *) agp_bridge.previous_size; - pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, (previous_size->size_value & ~(0x03))); + previous_size = A_SIZE_8(agp_bridge.previous_size); + pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, + (previous_size->size_value & ~(0x03))); } static unsigned long sis_mask_memory(unsigned long addr, int type) @@ -1284,11 +1315,7 @@ static void sis_generic_setup(void) agp_bridge.tlb_flush = sis_tlbflush; agp_bridge.mask_memory = sis_mask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif + agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; agp_bridge.free_gatt_table = agp_generic_free_gatt_table; agp_bridge.insert_memory = agp_generic_insert_memory; @@ -1299,10 +1326,10 @@ static void sis_generic_setup(void) #endif -#ifdef AGP_BUILD_AMD_IRONGATE +#ifdef CONFIG_AGP_AMD static struct _amd_irongate_private { - volatile unsigned char *registers; + volatile u8 *registers; } amd_irongate_private; static int amd_irongate_fetch_size(void) @@ -1313,7 +1340,7 @@ static int amd_irongate_fetch_size(void) pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); temp = (temp & 0x0000000e); - (void *) values = agp_bridge.aperture_sizes; + values = A_SIZE_32(agp_bridge.aperture_sizes); for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge.previous_size = @@ -1333,15 +1360,16 @@ static int amd_irongate_configure(void) u32 temp; u16 enable_reg; - current_size = (aper_size_info_32 *) agp_bridge.current_size; + current_size = A_SIZE_32(agp_bridge.current_size); /* Get the memory mapped registers */ pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp); temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); - amd_irongate_private.registers = (volatile unsigned char *) ioremap(temp, 4096); + amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096); /* Write out the address of the gatt table */ - OUTREG32(amd_irongate_private.registers, AMD_ATTBASE, agp_bridge.gatt_bus_addr); + OUTREG32(amd_irongate_private.registers, AMD_ATTBASE, + agp_bridge.gatt_bus_addr); /* Write the Sync register */ pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80); @@ -1353,7 +1381,8 @@ static int amd_irongate_configure(void) /* Write out the size register */ pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); - temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001); + temp = (((temp & ~(0x0000000e)) | current_size->size_value) + | 0x00000001); pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); /* Flush the tlb */ @@ -1372,7 +1401,7 @@ static void amd_irongate_cleanup(void) u32 temp; u16 enable_reg; - previous_size = (aper_size_info_32 *) agp_bridge.previous_size; + previous_size = A_SIZE_32(agp_bridge.previous_size); enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); enable_reg = (enable_reg & ~(0x0004)); @@ -1436,11 +1465,7 @@ static void amd_irongate_setup(void) agp_bridge.tlb_flush = amd_irongate_tlbflush; agp_bridge.mask_memory = amd_irongate_mask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif + agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; agp_bridge.free_gatt_table = agp_generic_free_gatt_table; agp_bridge.insert_memory = agp_generic_insert_memory; @@ -1451,7 +1476,7 @@ static void amd_irongate_setup(void) #endif -#ifdef AGP_BUILD_ALI_M1541 +#ifdef CONFIG_AGP_ALI static int ali_fetch_size(void) { @@ -1461,7 +1486,7 @@ static int ali_fetch_size(void) pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); temp &= ~(0xfffffff0); - (void *) values = agp_bridge.aperture_sizes; + values = A_SIZE_32(agp_bridge.aperture_sizes); for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { if (temp == values[i].size_value) { @@ -1491,12 +1516,13 @@ static void ali_cleanup(void) aper_size_info_32 *previous_size; u32 temp; - previous_size = (aper_size_info_32 *) agp_bridge.previous_size; + previous_size = A_SIZE_32(agp_bridge.previous_size); pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000090)); - pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, previous_size->size_value); + pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, + previous_size->size_value); } static int ali_configure(void) @@ -1504,7 +1530,7 @@ static int ali_configure(void) u32 temp; aper_size_info_32 *current_size; - current_size = (aper_size_info_32 *) agp_bridge.current_size; + current_size = A_SIZE_32(agp_bridge.current_size); /* aperture size and gatt addr */ pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, @@ -1561,11 +1587,7 @@ static void ali_generic_setup(void) agp_bridge.tlb_flush = ali_tlbflush; agp_bridge.mask_memory = ali_mask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; -#ifdef __SMP__ - agp_bridge.cache_flush = smp_flush_cache; -#else - agp_bridge.cache_flush = flush_cache; -#endif + agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; agp_bridge.free_gatt_table = agp_generic_free_gatt_table; agp_bridge.insert_memory = agp_generic_insert_memory; @@ -1593,7 +1615,7 @@ static void agp_find_supported_device(void) agp_bridge.dev = dev; /* Need to test for I810 here */ -#ifdef AGP_BUILD_INTEL_I810 +#ifdef CONFIG_AGP_I810 if (dev->vendor == PCI_VENDOR_ID_INTEL) { struct pci_dev *i810_dev; @@ -1603,11 +1625,14 @@ static void agp_find_supported_device(void) PCI_DEVICE_ID_INTEL_810_1, NULL); if (i810_dev == NULL) { - printk("agpgart: Detected an Intel i810, but could not find the secondary device.\n"); + printk("agpgart: Detected an Intel i810," + " but could not find the secondary" + " device.\n"); agp_bridge.type = NOT_SUPPORTED; return; } - printk("agpgart: Detected an Intel i810 Chipset.\n"); + printk(KERN_INFO "agpgart: Detected an Intel " + "i810 Chipset.\n"); agp_bridge.type = INTEL_I810; agp_bridge.intel_i810_setup(i810_dev); return; @@ -1617,11 +1642,14 @@ static void agp_find_supported_device(void) PCI_DEVICE_ID_INTEL_810_DC100_1, NULL); if (i810_dev == NULL) { - printk("agpgart: Detected an Intel i810 DC100, but could not find the secondary device.\n"); + printk("agpgart: Detected an Intel i810 " + "DC100, but could not find the " + "secondary device.\n"); agp_bridge.type = NOT_SUPPORTED; return; } - printk("agpgart: Detected an Intel i810 DC100 Chipset.\n"); + printk(KERN_INFO "agpgart: Detected an Intel i810 " + "DC100 Chipset.\n"); agp_bridge.type = INTEL_I810; agp_bridge.intel_i810_setup(i810_dev); return; @@ -1631,11 +1659,14 @@ static void agp_find_supported_device(void) PCI_DEVICE_ID_INTEL_810_E_1, NULL); if (i810_dev == NULL) { - printk("agpgart: Detected an Intel i810 E, but could not find the secondary device.\n"); + printk("agpgart: Detected an Intel i810 E" + ", but could not find the secondary " + "device.\n"); agp_bridge.type = NOT_SUPPORTED; return; } - printk("agpgart: Detected an Intel i810 E Chipset.\n"); + printk(KERN_INFO "agpgart: Detected an Intel i810 E " + "Chipset.\n"); agp_bridge.type = INTEL_I810; agp_bridge.intel_i810_setup(i810_dev); return; @@ -1674,35 +1705,42 @@ static void agp_find_supported_device(void) &agp_bridge.mode); switch (dev->vendor) { -#ifdef AGP_BUILD_INTEL_GENERIC +#ifdef CONFIG_AGP_INTEL case PCI_VENDOR_ID_INTEL: switch (dev->device) { case PCI_DEVICE_ID_INTEL_82443LX_0: agp_bridge.type = INTEL_LX; - printk("agpgart: Detected an Intel 440LX Chipset.\n"); + printk(KERN_INFO "agpgart: Detected an Intel 440LX" + " Chipset.\n"); agp_bridge.intel_generic_setup(); return; case PCI_DEVICE_ID_INTEL_82443BX_0: agp_bridge.type = INTEL_BX; - printk("agpgart: Detected an Intel 440BX Chipset.\n"); + printk(KERN_INFO "agpgart: Detected an Intel 440BX " + "Chipset.\n"); agp_bridge.intel_generic_setup(); return; case PCI_DEVICE_ID_INTEL_82443GX_0: agp_bridge.type = INTEL_GX; - printk("agpgart: Detected an Intel 440GX Chipset.\n"); + printk(KERN_INFO "agpgart: Detected an Intel 440GX " + "Chipset.\n"); agp_bridge.intel_generic_setup(); return; default: if (agp_try_unsupported != 0) { - printk("agpgart: Trying generic intel routines for device id: %x\n", dev->device); + printk("agpgart: Trying generic intel " + "routines for device id: %x\n", + dev->device); agp_bridge.type = INTEL_GENERIC; agp_bridge.intel_generic_setup(); return; } else { - printk("agpgart: Unsupported intel chipset, you might want to try agp_try_unsupported=1.\n"); + printk("agpgart: Unsupported intel chipset," + " you might want to try " + "agp_try_unsupported=1.\n"); agp_bridge.type = NOT_SUPPORTED; return; } @@ -1710,35 +1748,41 @@ static void agp_find_supported_device(void) break; #endif -#ifdef AGP_BUILD_VIA_GENERIC +#ifdef CONFIG_AGP_VIA case PCI_VENDOR_ID_VIA: switch (dev->device) { case PCI_DEVICE_ID_VIA_82C597_0: agp_bridge.type = VIA_VP3; - printk("agpgart: Detected a VIA VP3 Chipset.\n"); + printk(KERN_INFO "agpgart: Detected a VIA VP3 " + "Chipset.\n"); agp_bridge.via_generic_setup(); return; case PCI_DEVICE_ID_VIA_82C598_0: agp_bridge.type = VIA_MVP3; - printk("agpgart: Detected a VIA MVP3 Chipset.\n"); + printk(KERN_INFO "agpgart: Detected a VIA MVP3 " + "Chipset.\n"); agp_bridge.via_generic_setup(); return; case PCI_DEVICE_ID_VIA_82C691_0: agp_bridge.type = VIA_APOLLO_PRO; - printk("agpgart: Detected a VIA Apollo Pro Chipset.\n"); + printk(KERN_INFO "agpgart: Detected a VIA Apollo " + "Pro Chipset.\n"); agp_bridge.via_generic_setup(); return; default: if (agp_try_unsupported != 0) { - printk("agpgart: Trying generic VIA routines for device id: %x\n", dev->device); + printk("agpgart: Trying generic VIA routines" + " for device id: %x\n", dev->device); agp_bridge.type = VIA_GENERIC; agp_bridge.via_generic_setup(); return; } else { - printk("agpgart: Unsupported VIA chipset, you might want to try agp_try_unsupported=1.\n"); + printk("agpgart: Unsupported VIA chipset," + " you might want to try " + "agp_try_unsupported=1.\n"); agp_bridge.type = NOT_SUPPORTED; return; } @@ -1746,18 +1790,23 @@ static void agp_find_supported_device(void) break; #endif -#ifdef AGP_BUILD_SIS_GENERIC +#ifdef CONFIG_AGP_SIS case PCI_VENDOR_ID_SI: switch (dev->device) { - /* ToDo need to find out the specific devices supported */ + /* ToDo need to find out the + * specific devices supported. + */ default: if (agp_try_unsupported != 0) { - printk("agpgart: Trying generic SiS routines for device id: %x\n", dev->device); + printk("agpgart: Trying generic SiS routines" + " for device id: %x\n", dev->device); agp_bridge.type = SIS_GENERIC; agp_bridge.sis_generic_setup(); return; } else { - printk("agpgart: Unsupported SiS chipset, you might want to try agp_try_unsupported=1.\n"); + printk("agpgart: Unsupported SiS chipset, " + "you might want to try " + "agp_try_unsupported=1.\n"); agp_bridge.type = NOT_SUPPORTED; return; } @@ -1765,23 +1814,28 @@ static void agp_find_supported_device(void) break; #endif -#ifdef AGP_BUILD_AMD_IRONGATE +#ifdef CONFIG_AGP_AMD case PCI_VENDOR_ID_AMD: switch (dev->device) { case PCI_DEVICE_ID_AMD_IRONGATE_0: agp_bridge.type = AMD_IRONGATE; - printk("agpgart: Detected an AMD Irongate Chipset.\n"); + printk(KERN_INFO "agpgart: Detected an AMD Irongate" + " Chipset.\n"); agp_bridge.amd_irongate_setup(); return; default: if (agp_try_unsupported != 0) { - printk("agpgart: Trying Amd irongate routines for device id: %x\n", dev->device); + printk("agpgart: Trying Amd irongate" + " routines for device id: %x\n", + dev->device); agp_bridge.type = AMD_GENERIC; agp_bridge.amd_irongate_setup(); return; } else { - printk("agpgart: Unsupported Amd chipset, you might want to try agp_try_unsupported=1.\n"); + printk("agpgart: Unsupported Amd chipset," + " you might want to try " + "agp_try_unsupported=1.\n"); agp_bridge.type = NOT_SUPPORTED; return; } @@ -1789,22 +1843,26 @@ static void agp_find_supported_device(void) break; #endif -#ifdef AGP_BUILD_ALI_M1541 +#ifdef CONFIG_AGP_ALI case PCI_VENDOR_ID_AL: switch (dev->device) { case PCI_DEVICE_ID_AL_M1541_0: agp_bridge.type = ALI_M1541; - printk("agpgart: Detected an ALi M1541 Chipset\n"); + printk(KERN_INFO "agpgart: Detected an ALi M1541" + " Chipset\n"); agp_bridge.ali_generic_setup(); return; default: if (agp_try_unsupported != 0) { - printk("agpgart: Trying ALi generic routines for device id: %x\n", dev->device); + printk("agpgart: Trying ALi generic routines" + " for device id: %x\n", dev->device); agp_bridge.type = ALI_GENERIC; agp_bridge.ali_generic_setup(); return; } else { - printk("agpgart: Unsupported ALi chipset, you might want to type agp_try_unsupported=1.\n"); + printk("agpgart: Unsupported ALi chipset," + " you might want to type " + "agp_try_unsupported=1.\n"); agp_bridge.type = NOT_SUPPORTED; return; } @@ -1822,7 +1880,7 @@ struct agp_max_table { int agp; }; -static struct agp_max_table agp_maxes_table[9] = +static struct agp_max_table maxes_table[9] = { {0, 0}, {32, 4}, @@ -1845,18 +1903,19 @@ static int agp_find_max(void) memory = virt_to_phys(high_memory) / 0x100000; index = 0; - while ((memory > agp_maxes_table[index].mem) && + while ((memory > maxes_table[index].mem) && (index < 8)) { index++; } - t = (memory - agp_maxes_table[index - 1].mem) / - (agp_maxes_table[index].mem - agp_maxes_table[index - 1].mem); + t = (memory - maxes_table[index - 1].mem) / + (maxes_table[index].mem - maxes_table[index - 1].mem); - result = agp_maxes_table[index - 1].agp + - (t * (agp_maxes_table[index].agp - agp_maxes_table[index - 1].agp)); + result = maxes_table[index - 1].agp + + (t * (maxes_table[index].agp - maxes_table[index - 1].agp)); - printk("agpgart: Maximum main memory to use for agp memory: %dM\n", result); + printk(KERN_INFO "agpgart: Maximum main memory to use " + "for agp memory: %dM\n", result); result = (result * 0x100000) / 4096; return result; } @@ -1876,22 +1935,22 @@ static int agp_backend_initialize(void) memset(&agp_bridge, 0, sizeof(struct agp_bridge_data)); agp_bridge.type = NOT_SUPPORTED; -#ifdef AGP_BUILD_INTEL_GENERIC +#ifdef CONFIG_AGP_INTEL agp_bridge.intel_generic_setup = intel_generic_setup; #endif -#ifdef AGP_BUILD_INTEL_I810 +#ifdef CONFIG_AGP_I810 agp_bridge.intel_i810_setup = intel_i810_setup; #endif -#ifdef AGP_BUILD_VIA_GENERIC +#ifdef CONFIG_AGP_VIA agp_bridge.via_generic_setup = via_generic_setup; #endif -#ifdef AGP_BUILD_SIS_GENERIC +#ifdef CONFIG_AGP_SIS agp_bridge.sis_generic_setup = sis_generic_setup; #endif -#ifdef AGP_BUILD_AMD_IRONGATE +#ifdef CONFIG_AGP_AMD agp_bridge.amd_irongate_setup = amd_irongate_setup; #endif -#ifdef AGP_BUILD_ALI_M1541 +#ifdef CONFIG_AGP_ALI agp_bridge.ali_generic_setup = ali_generic_setup; #endif agp_bridge.max_memory_agp = agp_find_max(); @@ -1899,14 +1958,17 @@ static int agp_backend_initialize(void) agp_find_supported_device(); if (agp_bridge.needs_scratch_page == TRUE) { - agp_bridge.scratch_page = (unsigned long) agp_alloc_page(); + agp_bridge.scratch_page = agp_alloc_page(); - if ((void *) (agp_bridge.scratch_page) == NULL) { - printk("agpgart: unable to get memory for scratch page.\n"); + if (agp_bridge.scratch_page == 0) { + printk("agpgart: unable to get memory for " + "scratch page.\n"); return -ENOMEM; } - agp_bridge.scratch_page = virt_to_phys((void *) agp_bridge.scratch_page); - agp_bridge.scratch_page = agp_bridge.mask_memory(agp_bridge.scratch_page, 0); + agp_bridge.scratch_page = + virt_to_phys((void *) agp_bridge.scratch_page); + agp_bridge.scratch_page = + agp_bridge.mask_memory(agp_bridge.scratch_page, 0); } if (agp_bridge.type == NOT_SUPPORTED) { printk("agpgart: no supported devices found.\n"); @@ -1919,7 +1981,8 @@ static int agp_backend_initialize(void) return -EINVAL; } if (agp_bridge.create_gatt_table()) { - printk("agpgart: unable to get memory for graphics translation table.\n"); + printk("agpgart: unable to get memory for graphics " + "translation table.\n"); return -ENOMEM; } agp_bridge.key_list = vmalloc(PAGE_SIZE * 4); @@ -1937,8 +2000,10 @@ static int agp_backend_initialize(void) vfree(agp_bridge.key_list); return -EINVAL; } - printk("agpgart: Physical address of the agp aperture: 0x%lx\n", agp_bridge.gart_bus_addr); - printk("agpgart: Agp aperture is %dM in size.\n", size_value); + printk(KERN_INFO "agpgart: Physical address of the agp aperture:" + " 0x%lx\n", agp_bridge.gart_bus_addr); + printk(KERN_INFO "agpgart: Agp aperture is %dM in size.\n", + size_value); return 0; } @@ -1950,19 +2015,19 @@ static void agp_backend_cleanup(void) if (agp_bridge.needs_scratch_page == TRUE) { agp_bridge.scratch_page &= ~(0x00000fff); - agp_destroy_page((void *) phys_to_virt(agp_bridge.scratch_page)); + agp_destroy_page((unsigned long) + phys_to_virt(agp_bridge.scratch_page)); } } extern int agp_frontend_initialize(void); extern void agp_frontend_cleanup(void); -#ifdef MODULE -int init_module(void) +static int __init agp_init(void) { int ret_val; - printk("Linux agpgart interface v%d.%d (c) Jeff Hartmann\n", + printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n", AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR); ret_val = agp_backend_initialize(); @@ -1978,10 +2043,11 @@ int init_module(void) return 0; } -void cleanup_module(void) +static void __exit agp_cleanup(void) { agp_frontend_cleanup(); agp_backend_cleanup(); } -#endif +module_init(agp_init); +module_exit(agp_cleanup); diff --git a/drivers/char/agp/agpgart_fe.c b/drivers/char/agp/agpgart_fe.c index 97d40b19474b..54ad294e7dd9 100644 --- a/drivers/char/agp/agpgart_fe.c +++ b/drivers/char/agp/agpgart_fe.c @@ -1,8 +1,8 @@ /* * AGPGART module frontend version 0.99 * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight - * Copyright (C) 1999 Xi Graphics + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -25,7 +25,6 @@ */ #define __NO_VERSION__ -#include #include #include #include @@ -44,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -187,7 +185,8 @@ static int agp_create_segment(agp_client * client, agp_region * region) agp_segment *user_seg; int i; - seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count), GFP_KERNEL); + seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count), + GFP_KERNEL); if (seg == NULL) { kfree(region->seg_list); return -ENOMEM; @@ -373,8 +372,8 @@ static void agp_remove_all_clients(agp_controller * controller) priv = agp_find_private(temp->pid); if (priv != NULL) { - clear_bit(AGP_FF_IS_VALID, &(priv->access_flags)); - clear_bit(AGP_FF_IS_CLIENT, &(priv->access_flags)); + clear_bit(AGP_FF_IS_VALID, &priv->access_flags); + clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags); } client = client->next; kfree(temp); @@ -439,8 +438,8 @@ static void agp_controller_make_current(agp_controller * controller) priv = agp_find_private(clients->pid); if (priv != NULL) { - set_bit(AGP_FF_IS_VALID, &(priv->access_flags)); - set_bit(AGP_FF_IS_CLIENT, &(priv->access_flags)); + set_bit(AGP_FF_IS_VALID, &priv->access_flags); + set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); } clients = clients->next; } @@ -453,7 +452,7 @@ static void agp_controller_release_current(agp_controller * controller, { agp_client *clients; - clear_bit(AGP_FF_IS_VALID, &(controller_priv->access_flags)); + clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags); clients = controller->clients; while (clients != NULL) { @@ -462,7 +461,7 @@ static void agp_controller_release_current(agp_controller * controller, priv = agp_find_private(clients->pid); if (priv != NULL) { - clear_bit(AGP_FF_IS_VALID, &(priv->access_flags)); + clear_bit(AGP_FF_IS_VALID, &priv->access_flags); } clients = clients->next; } @@ -610,7 +609,7 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma) AGP_UNLOCK(); return -EPERM; } - if (!(test_bit(AGP_FF_IS_VALID, &(priv->access_flags)))) { + if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) { AGP_UNLOCK(); return -EPERM; } @@ -620,7 +619,7 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma) current_size = current_size * 0x100000; offset = vma->vm_pgoff << PAGE_SHIFT; - if (test_bit(AGP_FF_IS_CLIENT, &(priv->access_flags))) { + if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { if ((size + offset) > current_size) { AGP_UNLOCK(); return -EINVAL; @@ -631,11 +630,13 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma) AGP_UNLOCK(); return -EPERM; } - if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot)) { + if (!agp_find_seg_in_client(client, offset, + size, vma->vm_page_prot)) { AGP_UNLOCK(); return -EINVAL; } - if (remap_page_range(vma->vm_start, (kerninfo.aper_base + offset), + if (remap_page_range(vma->vm_start, + (kerninfo.aper_base + offset), size, vma->vm_page_prot)) { AGP_UNLOCK(); return -EAGAIN; @@ -643,7 +644,7 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma) AGP_UNLOCK(); return 0; } - if (test_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags))) { + if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { if (size != current_size) { AGP_UNLOCK(); return -EINVAL; @@ -666,19 +667,20 @@ static int agp_release(struct inode *inode, struct file *file) AGP_LOCK(); - if (test_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags))) { + if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { agp_controller *controller; controller = agp_find_controller_by_pid(priv->my_pid); if (controller != NULL) { if (controller == agp_fe.current_controller) { - agp_controller_release_current(controller, priv); + agp_controller_release_current(controller, + priv); } agp_remove_controller(controller); } } - if (test_bit(AGP_FF_IS_CLIENT, &(priv->access_flags))) { + if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { agp_remove_client(priv->my_pid); } agp_remove_file_private(priv); @@ -707,18 +709,18 @@ static int agp_open(struct inode *inode, struct file *file) return -ENOMEM; } memset(priv, 0, sizeof(agp_file_private)); - set_bit(AGP_FF_ALLOW_CLIENT, &(priv->access_flags)); + set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); priv->my_pid = current->pid; if ((current->uid == 0) || (current->suid == 0)) { /* Root priv, can be controller */ - set_bit(AGP_FF_ALLOW_CONTROLLER, &(priv->access_flags)); + set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags); } client = agp_find_client_by_pid(current->pid); if (client != NULL) { - set_bit(AGP_FF_IS_CLIENT, &(priv->access_flags)); - set_bit(AGP_FF_IS_VALID, &(priv->access_flags)); + set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); + set_bit(AGP_FF_IS_VALID, &priv->access_flags); } file->private_data = (void *) priv; agp_insert_file_private(priv); @@ -754,7 +756,8 @@ static int agpioc_info_wrap(agp_file_private * priv, unsigned long arg) userinfo.version.major = kerninfo.version.major; userinfo.version.minor = kerninfo.version.minor; - userinfo.bridge_id = kerninfo.device->vendor | (kerninfo.device->device << 16); + userinfo.bridge_id = kerninfo.device->vendor | + (kerninfo.device->device << 16); userinfo.agp_mode = kerninfo.mode; userinfo.aper_base = kerninfo.aper_base; userinfo.aper_size = kerninfo.aper_size; @@ -770,7 +773,7 @@ static int agpioc_info_wrap(agp_file_private * priv, unsigned long arg) static int agpioc_acquire_wrap(agp_file_private * priv, unsigned long arg) { agp_controller *controller; - if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &(priv->access_flags)))) { + if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) { return -EPERM; } if (agp_fe.current_controller != NULL) { @@ -798,8 +801,8 @@ static int agpioc_acquire_wrap(agp_file_private * priv, unsigned long arg) agp_controller_make_current(controller); } - set_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags)); - set_bit(AGP_FF_IS_VALID, &(priv->access_flags)); + set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags); + set_bit(AGP_FF_IS_VALID, &priv->access_flags); return 0; } @@ -837,8 +840,10 @@ static int agpioc_reserve_wrap(agp_file_private * priv, unsigned long arg) client_priv = agp_find_private(reserve.pid); if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &(client_priv->access_flags)); - set_bit(AGP_FF_IS_VALID, &(client_priv->access_flags)); + set_bit(AGP_FF_IS_CLIENT, + &client_priv->access_flags); + set_bit(AGP_FF_IS_VALID, + &client_priv->access_flags); } if (client == NULL) { /* client is already removed */ @@ -848,12 +853,14 @@ static int agpioc_reserve_wrap(agp_file_private * priv, unsigned long arg) } else { agp_segment *segment; - segment = kmalloc((sizeof(agp_segment) * reserve.seg_count), GFP_KERNEL); + segment = kmalloc((sizeof(agp_segment) * reserve.seg_count), + GFP_KERNEL); if (segment == NULL) { return -ENOMEM; } - if (copy_from_user(segment, (void *) reserve.seg_list, GFP_KERNEL)) { + if (copy_from_user(segment, (void *) reserve.seg_list, + GFP_KERNEL)) { kfree(segment); return -EFAULT; } @@ -870,8 +877,10 @@ static int agpioc_reserve_wrap(agp_file_private * priv, unsigned long arg) client_priv = agp_find_private(reserve.pid); if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &(client_priv->access_flags)); - set_bit(AGP_FF_IS_VALID, &(client_priv->access_flags)); + set_bit(AGP_FF_IS_CLIENT, + &client_priv->access_flags); + set_bit(AGP_FF_IS_VALID, + &client_priv->access_flags); } return agp_create_segment(client, &reserve); } else { @@ -972,10 +981,12 @@ static int agp_ioctl(struct inode *inode, struct file *file, return -EBUSY; } if (cmd != AGPIOC_ACQUIRE) { - if (!(test_bit(AGP_FF_IS_CONTROLLER, &(curr_priv->access_flags)))) { + if (!(test_bit(AGP_FF_IS_CONTROLLER, + &curr_priv->access_flags))) { return -EPERM; } - /* Use the original pid of the controller, in case it's threaded */ + /* Use the original pid of the controller, + * in case it's threaded */ if (agp_fe.current_controller->pid != curr_priv->my_pid) { return -EBUSY; diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h index 5e9d098696f6..fce2df7ec942 100644 --- a/drivers/char/drm/drmP.h +++ b/drivers/char/drm/drmP.h @@ -116,7 +116,6 @@ typedef struct wait_queue *wait_queue_head_t; #endif /* Generic cmpxchg added in 2.3.x */ -#if CPU != 386 #ifndef __HAVE_ARCH_CMPXCHG /* Include this here so that driver can be used with older kernels. */ @@ -150,10 +149,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #define cmpxchg(ptr,o,n) \ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ (unsigned long)(n),sizeof(*(ptr)))) -#endif -#else - /* Compiling for a 386 proper... */ -#error DRI not supported on Intel 80386 #endif /* Macros to make printk easier */ @@ -468,6 +463,7 @@ typedef struct drm_device { /* Misc. support (init.c) */ extern int drm_flags; extern void drm_parse_options(char *s); +extern int drm_cpu_valid(void); /* Device support (fops.c) */ diff --git a/drivers/char/drm/fops.c b/drivers/char/drm/fops.c index 47eacb833b9e..24b17356bdd4 100644 --- a/drivers/char/drm/fops.c +++ b/drivers/char/drm/fops.c @@ -40,6 +40,7 @@ int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t *dev) drm_file_t *priv; if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */ + if (!drm_cpu_valid()) return -EINVAL; DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor); diff --git a/drivers/char/drm/init.c b/drivers/char/drm/init.c index 340ba8f5a87e..e6b78395c120 100644 --- a/drivers/char/drm/init.c +++ b/drivers/char/drm/init.c @@ -97,3 +97,13 @@ void drm_parse_options(char *s) } } +/* drm_cpu_valid returns non-zero if the DRI will run on this CPU, and 0 + * otherwise. */ + +int drm_cpu_valid(void) +{ +#if defined(__i386__) + if (boot_cpu_data.x86 == 3) return 0; /* No cmpxchg on a 386 */ +#endif + return 1; +} diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index cb3f1e0dea6a..5feeeef8028d 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c @@ -1,7 +1,7 @@ /* * linux/drivers/char/synclink.c * - * ==FILEDATE 19990901== + * ==FILEDATE 19991207== * * Device driver for Microgate SyncLink ISA and PCI * high speed multiprotocol serial adapters. @@ -925,7 +925,7 @@ MODULE_PARM(maxframe,"1-" __MODULE_STRING(MAX_TOTAL_DEVICES) "i"); #endif static char *driver_name = "SyncLink serial driver"; -static char *driver_version = "1.14"; +static char *driver_version = "1.15"; static struct tty_driver serial_driver, callout_driver; static int serial_refcount; @@ -6981,7 +6981,6 @@ BOOLEAN mgsl_register_test( struct mgsl_struct *info ) spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset(info); - spin_unlock_irqrestore(&info->irq_spinlock,flags); /* Verify the reset state of some registers. */ @@ -7015,7 +7014,6 @@ BOOLEAN mgsl_register_test( struct mgsl_struct *info ) } } - spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); @@ -7035,7 +7033,6 @@ BOOLEAN mgsl_irq_test( struct mgsl_struct *info ) spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset(info); - spin_unlock_irqrestore(&info->irq_spinlock,flags); /* * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. @@ -7057,6 +7054,8 @@ BOOLEAN mgsl_irq_test( struct mgsl_struct *info ) usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED); usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + EndTime=100; while( EndTime-- && !info->irq_occurred ) { set_current_state(TASK_INTERRUPTIBLE); @@ -7359,7 +7358,9 @@ BOOLEAN mgsl_dma_test( struct mgsl_struct *info ) } } + spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset( info ); + spin_unlock_irqrestore(&info->irq_spinlock,flags); /* restore current port options */ memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); diff --git a/drivers/i2o/i2o_block.c b/drivers/i2o/i2o_block.c index 60412f5871ad..f83ecbe9bf6f 100644 --- a/drivers/i2o/i2o_block.c +++ b/drivers/i2o/i2o_block.c @@ -118,7 +118,7 @@ static int i2ob_timer_started = 0; static int i2ob_install_device(struct i2o_controller *, struct i2o_device *, int); static void i2ob_end_request(struct request *); -static void i2ob_request(void); +static void i2ob_request(request_queue_t * q); /* * Dump messages. @@ -135,7 +135,6 @@ static void i2ob_dump_msg(struct i2ob_device *dev,u32 *msg,int size) printk(KERN_INFO "\n"); } - /* * Get a message */ @@ -154,8 +153,8 @@ static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq, { struct i2o_controller *c = dev->controller; int tid = dev->tid; - u32 *msg; - u32 *mptr; + unsigned long msg; + unsigned long mptr; u64 offset; struct request *req = ireq->req; struct buffer_head *bh = req->bh; @@ -167,22 +166,22 @@ static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq, /* * Build the message based on the request. */ - __raw_writel(i2ob_context|(unit<<8), &msg[2]); - __raw_writel(ireq->num, &msg[3]); - __raw_writel(req->nr_sectors << 9, &msg[5]); + __raw_writel(i2ob_context|(unit<<8), msg+8); + __raw_writel(ireq->num, msg+12); + __raw_writel(req->nr_sectors << 9, msg+20); /* This can be optimised later - just want to be sure its right for starters */ offset = ((u64)(req->sector+base)) << 9; - __raw_writel( offset & 0xFFFFFFFF, &msg[6]); - __raw_writel(offset>>32, &msg[7]); + __raw_writel( offset & 0xFFFFFFFF, msg+24); + __raw_writel(offset>>32, msg+28); mptr=msg+8; if(req->cmd == READ) { - __raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, &msg[1]); + __raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, msg+4); /* We don't yet do cache/readahead and other magic */ - __raw_writel(1<<16, &msg[4]); + __raw_writel(1<<16, msg+16); while(bh!=NULL) { /* @@ -191,31 +190,33 @@ static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq, * sucky to read. */ if(bh->b_reqnext) - __raw_writel(0x10000000|(bh->b_size), mptr++); + __raw_writel(0x10000000|(bh->b_size), mptr); else - __raw_writel(0xD0000000|(bh->b_size), mptr++); + __raw_writel(0xD0000000|(bh->b_size), mptr); - __raw_writel(virt_to_bus(bh->b_data), mptr++); + __raw_writel(virt_to_bus(bh->b_data), mptr+4); + mptr+=8; count -= bh->b_size; bh = bh->b_reqnext; } } else if(req->cmd == WRITE) { - __raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, &msg[1]); - __raw_writel(1<<16, &msg[4]); + __raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, msg+4); + __raw_writel(1<<16, msg+16); while(bh!=NULL) { if(bh->b_reqnext) - __raw_writel(0x14000000|(bh->b_size), mptr++); + __raw_writel(0x14000000|(bh->b_size), mptr); else - __raw_writel(0xD4000000|(bh->b_size), mptr++); + __raw_writel(0xD4000000|(bh->b_size), mptr); count -= bh->b_size; - __raw_writel(virt_to_bus(bh->b_data), mptr++); + __raw_writel(virt_to_bus(bh->b_data), mptr+4); + mptr+=8; bh = bh->b_reqnext; } } - __raw_writel(I2O_MESSAGE_SIZE(mptr-msg) | SGL_OFFSET_8, &msg[0]); + __raw_writel(I2O_MESSAGE_SIZE(mptr-msg) | SGL_OFFSET_8, msg); if(req->current_nr_sectors > 8) printk("Gathered sectors %ld.\n", @@ -223,8 +224,7 @@ static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq, if(count != 0) { - printk("Request count botched by %d.\n", count); - msg[5] -= count; + printk(KERN_ERR "Request count botched by %d.\n", count); } i2o_post_message(c,m); @@ -399,7 +399,7 @@ static void i2o_block_reply(struct i2o_handler *h, struct i2o_controller *c, str */ atomic_dec(&queue_depth); - i2ob_request(); + i2ob_request(NULL); spin_unlock_irqrestore(&io_request_lock, flags); } @@ -437,7 +437,7 @@ static void i2ob_timer_handler(unsigned long dummy) /* * Restart any requests. */ - i2ob_request(); + i2ob_request(NULL); /* * Free the lock. @@ -453,7 +453,7 @@ static void i2ob_timer_handler(unsigned long dummy) * we use it. */ -static void i2ob_request(void) +static void i2ob_request(request_queue_t * q) { struct request *req; struct i2ob_request *ireq; @@ -527,7 +527,6 @@ static void i2ob_request(void) } } - /* * SCSI-CAM for ioctl geometry mapping * Duplicated with SCSI - this should be moved into somewhere common @@ -1086,7 +1085,9 @@ int i2o_block_init(void) blk_size[MAJOR_NR] = i2ob_sizes; max_sectors[MAJOR_NR] = i2ob_max_sectors; - blk_dev[MAJOR_NR].request_fn = i2ob_request; + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), i2ob_request); + blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0); + for (i = 0; i < MAX_I2OB << 4; i++) { i2ob_dev[i].refcnt = 0; i2ob_dev[i].flags = 0; diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index 53da1d861c65..00ae312fa9b1 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c @@ -31,6 +31,8 @@ * new PCI BIOS interface. * Alan Cox : Fixed the out of memory * handling. + * + * Torben Mathiasen New Maintainer! * ********************************************************************/ @@ -468,9 +470,6 @@ extern int tlan_probe( struct net_device *dev ) priv = (TLanPrivateInfo *) dev->priv; - dev->name = priv->devName; - strcpy( priv->devName, " " ); - dev = init_etherdev( dev, sizeof(TLanPrivateInfo) ); dev->base_addr = io_base; @@ -489,7 +488,7 @@ extern int tlan_probe( struct net_device *dev ) } priv->sa_int = dev->mem_start & 0x02; priv->debug = dev->mem_end; - + spin_lock_init(&priv->lock); printk("TLAN %d.%d: %s irq=%2d io=%04x, %s, Rev. %d\n", TLanVersionMajor, diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h index 7a49663d6351..4ae1cf278974 100644 --- a/drivers/net/tlan.h +++ b/drivers/net/tlan.h @@ -14,6 +14,10 @@ * ** This file is best viewed/edited with tabstop=4, colums>=132 * + * + * Dec 10, 1999 Torben Mathiasen + * New Maintainer + * ********************************************************************/ diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 85cdfb77448b..64ce20902d37 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -238,7 +238,7 @@ static void __init HWPrtChanID (__u32 pcid, short stride) { short i, j; for (i=0, j=0; i<24; i++, j+=stride) - printk("%1x", ((int)readb(pcid + j)) & 0x0f); + printk("%1x", ((int)isa_readb(pcid + j)) & 0x0f); printk("\n"); } @@ -267,14 +267,8 @@ int __init ibmtr_probe(struct net_device *dev) */ if (ibmtr_probe1(dev, base_addr)) - { -#ifndef MODULE -#ifndef PCMCIA - tr_freedev(dev); -#endif -#endif return -ENODEV; - } else + else return 0; } else if (base_addr != 0) /* Don't probe at all. */ @@ -285,13 +279,7 @@ int __init ibmtr_probe(struct net_device *dev) int ioaddr = ibmtr_portlist[i]; if (check_region(ioaddr, IBMTR_IO_EXTENT)) continue; - if (ibmtr_probe1(dev, ioaddr)) { -#ifndef MODULE -#ifndef PCMCIA - tr_freedev(dev); -#endif -#endif - } else + if (!ibmtr_probe1(dev, ioaddr)) return 0; } @@ -351,7 +339,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr) * Suboptimize knowing first byte different */ - ctemp = readb(cd_chanid) & 0x0f; + ctemp = isa_readb(cd_chanid) & 0x0f; if (ctemp != *tchanid) { /* NOT ISA card, try MCA */ tchanid=mcchannelid; cardpresent=TR_MCA; @@ -366,7 +354,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr) */ for (i=2,j=1; i<=46; i=i+2,j++) { - if ((readb(cd_chanid+i) & 0x0f) != tchanid[j]) { + if ((isa_readb(cd_chanid+i) & 0x0f) != tchanid[j]) { cardpresent=NOTOK; /* match failed, not TR card */ break; } @@ -378,7 +366,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr) * as it has different IRQ settings */ - if (cardpresent == TR_ISA && (readb(AIPFID + t_mmio)==0x0e)) + if (cardpresent == TR_ISA && (isa_readb(AIPFID + t_mmio)==0x0e)) cardpresent=TR_ISAPNP; if (cardpresent == NOTOK) { /* "channel_id" did not match, report */ @@ -461,14 +449,14 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr) if (intr==3) irq=11; timeout = jiffies + TR_SPIN_INTERVAL; - while(!readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)) + while(!isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)) if (time_after(jiffies, timeout)) { DPRINTK("Hardware timeout during initialization.\n"); kfree_s(ti, sizeof(struct tok_info)); return -ENODEV; } - ti->sram=((__u32)readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)<<12); + ti->sram=((__u32)isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)<<12); ti->global_int_enable=PIOaddr+ADAPTINTREL; ti->adapter_int_enable=PIOaddr+ADAPTINTREL; break; @@ -492,7 +480,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr) for (i=0; i<0x18; i=i+2) { /* technical reference states to do this */ - temp = readb(ti->mmio + AIP + i) & 0x0f; + temp = isa_readb(ti->mmio + AIP + i) & 0x0f; #if !TR_NEWFORMAT printk("%1X",ti->hw_address[j]=temp); #else @@ -507,13 +495,13 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr) #endif /* get Adapter type: 'F' = Adapter/A, 'E' = 16/4 Adapter II,...*/ - ti->adapter_type = readb(ti->mmio + AIPADAPTYPE); + ti->adapter_type = isa_readb(ti->mmio + AIPADAPTYPE); /* get Data Rate: F=4Mb, E=16Mb, D=4Mb & 16Mb ?? */ - ti->data_rate = readb(ti->mmio + AIPDATARATE); + ti->data_rate = isa_readb(ti->mmio + AIPDATARATE); /* Get Early Token Release support?: F=no, E=4Mb, D=16Mb, C=4&16Mb */ - ti->token_release = readb(ti->mmio + AIPEARLYTOKEN); + ti->token_release = isa_readb(ti->mmio + AIPEARLYTOKEN); /* How much shared RAM is on adapter ? */ #ifdef PCMCIA @@ -524,10 +512,10 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr) #endif /* We need to set or do a bunch of work here based on previous results.. */ /* Support paging? What sizes?: F=no, E=16k, D=32k, C=16 & 32k */ - ti->shared_ram_paging = readb(ti->mmio + AIPSHRAMPAGE); + ti->shared_ram_paging = isa_readb(ti->mmio + AIPSHRAMPAGE); /* Available DHB 4Mb size: F=2048, E=4096, D=4464 */ - switch (readb(ti->mmio + AIP4MBDHB)) { + switch (isa_readb(ti->mmio + AIP4MBDHB)) { case 0xe : ti->dhb_size4mb = 4096; break; @@ -540,7 +528,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr) } /* Available DHB 16Mb size: F=2048, E=4096, D=8192, C=16384, B=17960 */ - switch (readb(ti->mmio + AIP16MBDHB)) { + switch (isa_readb(ti->mmio + AIP16MBDHB)) { case 0xe : ti->dhb_size16mb = 4096; break; @@ -576,7 +564,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr) /* * determine how much of total RAM is mapped into PC space */ - ti->mapped_ram_size=1<<((((readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)) >>2) & 0x03) + 4); + ti->mapped_ram_size=1<<((((isa_readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)) >>2) & 0x03) + 4); ti->page_mask=0; if (ti->shared_ram_paging == 0xf) { /* No paging in adapter */ ti->mapped_ram_size = ti->avail_shared_ram; @@ -635,7 +623,7 @@ static int __init ibmtr_probe1(struct net_device *dev, int PIOaddr) static __u32 ram_bndry_mask[]={0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000}; __u32 new_base, rrr_32, chk_base, rbm; - rrr_32 = ((readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD))>>2) & 0x00000003; + rrr_32 = ((isa_readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD))>>2) & 0x00000003; rbm = ram_bndry_mask[rrr_32]; new_base = (ibmtr_mem_base + (~rbm)) & rbm; /* up to boundary */ chk_base = new_base + (ti->mapped_ram_size<<9); @@ -765,11 +753,11 @@ static unsigned char __init get_sram_size(struct tok_info *adapt_info) 'B' - 64KB less 512 bytes at top (WARNING ... must zero top bytes in INIT */ - avail_sram_code=0xf-readb(adapt_info->mmio + AIPAVAILSHRAM); + avail_sram_code=0xf-isa_readb(adapt_info->mmio + AIPAVAILSHRAM); if (avail_sram_code) return size_code[avail_sram_code]; else /* for code 'F', must compute size from RRR(3,2) bits */ - return 1<<((readb(adapt_info->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)>>2)+4); + return 1<<((isa_readb(adapt_info->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)>>2)+4); } static int __init trdev_init(struct net_device *dev) @@ -816,20 +804,20 @@ static void tok_set_multicast_list(struct net_device *dev) } SET_PAGE(ti->srb); for (i=0; isrb+i); + isa_writeb(0, ti->srb+i); - writeb(DIR_SET_FUNC_ADDR, + isa_writeb(DIR_SET_FUNC_ADDR, ti->srb + offsetof(struct srb_set_funct_addr, command)); DPRINTK("Setting functional address: "); for (i=0; i<4; i++) { - writeb(address[i], + isa_writeb(address[i], ti->srb + offsetof(struct srb_set_funct_addr, funct_address)+i); printk("%02X ", address[i]); } - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); printk("\n"); } @@ -838,7 +826,7 @@ static int tok_open(struct net_device *dev) struct tok_info *ti=(struct tok_info *)dev->priv; /* init the spinlock */ - ti->lock = (spinlock_t) SPIN_LOCK_UNLOCKED; + spin_lock_init(&ti->lock); if (ti->open_status==CLOSED) tok_init_card(dev); @@ -862,17 +850,17 @@ static int tok_close(struct net_device *dev) struct tok_info *ti=(struct tok_info *) dev->priv; - writeb(DIR_CLOSE_ADAPTER, + isa_writeb(DIR_CLOSE_ADAPTER, ti->srb + offsetof(struct srb_close_adapter, command)); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); ti->open_status=CLOSED; sleep_on(&ti->wait_for_tok_int); - if (readb(ti->srb + offsetof(struct srb_close_adapter, ret_code))) + if (isa_readb(ti->srb + offsetof(struct srb_close_adapter, ret_code))) DPRINTK("close adapter failed: %02X\n", - (int)readb(ti->srb + offsetof(struct srb_close_adapter, ret_code))); + (int)isa_readb(ti->srb + offsetof(struct srb_close_adapter, ret_code))); dev->start = 0; #ifdef PCMCIA @@ -899,7 +887,7 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) /* Disable interrupts till processing is finished */ dev->interrupt=1; - writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); + isa_writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); /* Reset interrupt for ISA boards */ if (ti->adapter_int_enable) @@ -916,7 +904,7 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) the extra levels of logic and call depth for the original solution. */ - status=readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD); + status=isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD); #ifdef PCMCIA /* Check if the PCMCIA card was pulled. */ if (status == 0xFF) @@ -928,7 +916,7 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) } /* Check ISRP EVEN too. */ - if ( readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) == 0xFF) + if ( isa_readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) == 0xFF) { DPRINTK("PCMCIA card removed.\n"); spin_unlock(&(ti->lock)); @@ -943,26 +931,26 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) int i; __u32 check_reason; - check_reason=ti->mmio + ntohs(readw(ti->sram + ACA_OFFSET + ACA_RW +WWCR_EVEN)); + check_reason=ti->mmio + ntohs(isa_readw(ti->sram + ACA_OFFSET + ACA_RW +WWCR_EVEN)); DPRINTK("Adapter check interrupt\n"); DPRINTK("8 reason bytes follow: "); for(i=0; i<8; i++, check_reason++) - printk("%02X ", (int)readb(check_reason)); + printk("%02X ", (int)isa_readb(check_reason)); printk("\n"); - writeb((~ADAP_CHK_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb((~ADAP_CHK_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); dev->interrupt=0; - } else if (readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) + } else if (isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) & (TCR_INT | ERR_INT | ACCESS_INT)) { DPRINTK("adapter error: ISRP_EVEN : %02x\n", - (int)readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)); - writeb(~(TCR_INT | ERR_INT | ACCESS_INT), + (int)isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)); + isa_writeb(~(TCR_INT | ERR_INT | ACCESS_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); dev->interrupt=0; } else if (status @@ -971,12 +959,12 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) if (status & SRB_RESP_INT) { /* SRB response */ - switch(readb(ti->srb)) { /* SRB command check */ + switch(isa_readb(ti->srb)) { /* SRB command check */ case XMIT_DIR_FRAME: { unsigned char xmit_ret_code; - xmit_ret_code=readb(ti->srb + offsetof(struct srb_xmit, ret_code)); + xmit_ret_code=isa_readb(ti->srb + offsetof(struct srb_xmit, ret_code)); if (xmit_ret_code != 0xff) { DPRINTK("error on xmit_dir_frame request: %02X\n", xmit_ret_code); @@ -993,7 +981,7 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) case XMIT_UI_FRAME: { unsigned char xmit_ret_code; - xmit_ret_code=readb(ti->srb + offsetof(struct srb_xmit, ret_code)); + xmit_ret_code=isa_readb(ti->srb + offsetof(struct srb_xmit, ret_code)); if (xmit_ret_code != 0xff) { DPRINTK("error on xmit_ui_frame request: %02X\n", xmit_ret_code); @@ -1011,14 +999,14 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) unsigned char open_ret_code; __u16 open_error_code; - ti->srb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, srb_addr))); - ti->ssb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, ssb_addr))); - ti->arb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, arb_addr))); - ti->asb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, asb_addr))); + ti->srb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, srb_addr))); + ti->ssb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, ssb_addr))); + ti->arb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, arb_addr))); + ti->asb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, asb_addr))); ti->current_skb=NULL; - open_ret_code = readb(ti->init_srb +offsetof(struct srb_open_response, ret_code)); - open_error_code = ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, error_code))); + open_ret_code = isa_readb(ti->init_srb +offsetof(struct srb_open_response, ret_code)); + open_error_code = ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, error_code))); if (open_ret_code==7) { @@ -1049,9 +1037,9 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) #else DPRINTK("Adapter initialized and opened.\n"); #endif - writeb(~(SRB_RESP_INT), + isa_writeb(~(SRB_RESP_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); - writeb(~(CMD_IN_SRB), + isa_writeb(~(CMD_IN_SRB), ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD); open_sap(EXTENDED_SAP,dev); @@ -1073,13 +1061,13 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) break; case DLC_OPEN_SAP: - if (readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))) { + if (isa_readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))) { DPRINTK("open_sap failed: ret_code = %02X,retrying\n", - (int)readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))); + (int)isa_readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))); ibmtr_reset_timer(&(ti->tr_timer), dev); } else { ti->exsap_station_id= - readw(ti->srb+offsetof(struct dlc_open_sap, station_id)); + isa_readw(ti->srb+offsetof(struct dlc_open_sap, station_id)); ti->open_status=SUCCESS; /* TR adapter is now available */ wake_up(&ti->wait_for_reset); } @@ -1090,16 +1078,16 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) case DIR_SET_GRP_ADDR: case DIR_SET_FUNC_ADDR: case DLC_CLOSE_SAP: - if (readb(ti->srb+offsetof(struct srb_interrupt, ret_code))) + if (isa_readb(ti->srb+offsetof(struct srb_interrupt, ret_code))) DPRINTK("error on %02X: %02X\n", - (int)readb(ti->srb+offsetof(struct srb_interrupt, command)), - (int)readb(ti->srb+offsetof(struct srb_interrupt, ret_code))); + (int)isa_readb(ti->srb+offsetof(struct srb_interrupt, command)), + (int)isa_readb(ti->srb+offsetof(struct srb_interrupt, ret_code))); break; case DIR_READ_LOG: - if (readb(ti->srb+offsetof(struct srb_read_log, ret_code))) + if (isa_readb(ti->srb+offsetof(struct srb_read_log, ret_code))) DPRINTK("error on dir_read_log: %02X\n", - (int)readb(ti->srb+offsetof(struct srb_read_log, ret_code))); + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, ret_code))); else if (IBMTR_DEBUG_MESSAGES) { DPRINTK( @@ -1107,24 +1095,24 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) "A/C errors %02X, Abort delimiters %02X, Lost frames %02X\n" "Receive congestion count %02X, Frame copied errors %02X\n" "Frequency errors %02X, Token errors %02X\n", - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, line_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, internal_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, burst_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, A_C_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, A_C_errors)), + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, abort_delimiters)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, lost_frames)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, recv_congest_count)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, frame_copied_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, frequency_errors)), - (int)readb(ti->srb+offsetof(struct srb_read_log, + (int)isa_readb(ti->srb+offsetof(struct srb_read_log, token_errors))); } dev->tbusy=0; @@ -1132,19 +1120,19 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) default: DPRINTK("Unknown command %02X encountered\n", - (int)readb(ti->srb)); + (int)isa_readb(ti->srb)); } /* SRB command check */ - writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD); - writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + isa_writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD); + isa_writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); skip_reset: } /* SRB response */ if (status & ASB_FREE_INT) { /* ASB response */ - switch(readb(ti->asb)) { /* ASB command check */ + switch(isa_readb(ti->asb)) { /* ASB command check */ case REC_DATA: case XMIT_UI_FRAME: @@ -1153,25 +1141,25 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) default: DPRINTK("unknown command in asb %02X\n", - (int)readb(ti->asb)); + (int)isa_readb(ti->asb)); } /* ASB command check */ - if (readb(ti->asb+2)!=0xff) /* checks ret_code */ + if (isa_readb(ti->asb+2)!=0xff) /* checks ret_code */ DPRINTK("ASB error %02X in cmd %02X\n", - (int)readb(ti->asb+2),(int)readb(ti->asb)); - writeb(~ASB_FREE_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + (int)isa_readb(ti->asb+2),(int)isa_readb(ti->asb)); + isa_writeb(~ASB_FREE_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); } /* ASB response */ if (status & ARB_CMD_INT) { /* ARB response */ - switch (readb(ti->arb)) { /* ARB command check */ + switch (isa_readb(ti->arb)) { /* ARB command check */ case DLC_STATUS: DPRINTK("DLC_STATUS new status: %02X on station %02X\n", - ntohs(readw(ti->arb + offsetof(struct arb_dlc_status, status))), - ntohs(readw(ti->arb + ntohs(isa_readw(ti->arb + offsetof(struct arb_dlc_status, status))), + ntohs(isa_readw(ti->arb +offsetof(struct arb_dlc_status, station_id)))); break; @@ -1182,7 +1170,7 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) case RING_STAT_CHANGE: { unsigned short ring_status; - ring_status=ntohs(readw(ti->arb + ring_status=ntohs(isa_readw(ti->arb +offsetof(struct arb_ring_stat_change, ring_status))); if (ring_status & (SIGNAL_LOSS | LOBE_FAULT)) { @@ -1209,46 +1197,46 @@ void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs) default: DPRINTK("Unknown command %02X in arb\n", - (int)readb(ti->arb)); + (int)isa_readb(ti->arb)); break; } /* ARB command check */ - writeb(~ARB_CMD_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); - writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(~ARB_CMD_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + isa_writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); } /* ARB response */ if (status & SSB_RESP_INT) { /* SSB response */ unsigned char retcode; - switch (readb(ti->ssb)) { /* SSB command check */ + switch (isa_readb(ti->ssb)) { /* SSB command check */ case XMIT_DIR_FRAME: case XMIT_UI_FRAME: - retcode = readb(ti->ssb+2); + retcode = isa_readb(ti->ssb+2); if (retcode && (retcode != 0x22)) /* checks ret_code */ DPRINTK("xmit ret_code: %02X xmit error code: %02X\n", - (int)retcode, (int)readb(ti->ssb+6)); + (int)retcode, (int)isa_readb(ti->ssb+6)); else ti->tr_stats.tx_packets++; break; case XMIT_XID_CMD: - DPRINTK("xmit xid ret_code: %02X\n", (int)readb(ti->ssb+2)); + DPRINTK("xmit xid ret_code: %02X\n", (int)isa_readb(ti->ssb+2)); default: - DPRINTK("Unknown command %02X in ssb\n", (int)readb(ti->ssb)); + DPRINTK("Unknown command %02X in ssb\n", (int)isa_readb(ti->ssb)); } /* SSB command check */ - writeb(~SSB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); - writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(~SSB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + isa_writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); } /* SSB response */ } /* SRB, ARB, ASB or SSB response */ dev->interrupt=0; - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); break; case FIRST_INT: @@ -1278,12 +1266,12 @@ static void initial_tok_int(struct net_device *dev) /* we assign the shared-ram address for ISA devices */ if(!ti->sram) { - writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN); + isa_writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN); ti->sram=((__u32)ti->sram_base << 12); } ti->init_srb=ti->sram - +ntohs((unsigned short)readw(ti->mmio+ ACA_OFFSET + WRBR_EVEN)); - SET_PAGE(ntohs((unsigned short)readw(ti->mmio+ACA_OFFSET + WRBR_EVEN))); + +ntohs((unsigned short)isa_readw(ti->mmio+ ACA_OFFSET + WRBR_EVEN)); + SET_PAGE(ntohs((unsigned short)isa_readw(ti->mmio+ACA_OFFSET + WRBR_EVEN))); dev->mem_start = ti->sram; dev->mem_end = ti->sram + (ti->mapped_ram_size<<9) - 1; @@ -1292,12 +1280,12 @@ static void initial_tok_int(struct net_device *dev) { int i; DPRINTK("init_srb(%p):", ti->init_srb); - for (i=0;i<17;i++) printk("%02X ", (int)readb(ti->init_srb+i)); + for (i=0;i<17;i++) printk("%02X ", (int)isa_readb(ti->init_srb+i)); printk("\n"); } #endif - hw_encoded_addr = readw(ti->init_srb + hw_encoded_addr = isa_readw(ti->init_srb + offsetof(struct srb_init_response, encoded_address)); #if !TR_NEWFORMAT @@ -1307,7 +1295,7 @@ static void initial_tok_int(struct net_device *dev) #endif encoded_addr=(ti->sram + ntohs(hw_encoded_addr)); - ti->ring_speed = readb(ti->init_srb+offsetof(struct srb_init_response, init_status)) & 0x01 ? 16 : 4; + ti->ring_speed = isa_readb(ti->init_srb+offsetof(struct srb_init_response, init_status)) & 0x01 ? 16 : 4; #if !TR_NEWFORMAT DPRINTK("encoded addr (%04X,%04X,%08X): ", hw_encoded_addr, ntohs(hw_encoded_addr), encoded_addr); @@ -1316,12 +1304,12 @@ static void initial_tok_int(struct net_device *dev) ti->ring_speed, ti->sram); #endif - ti->auto_ringspeedsave=readb(ti->init_srb + ti->auto_ringspeedsave=isa_readb(ti->init_srb +offsetof(struct srb_init_response, init_status_2)) & 0x4 ? TRUE : FALSE; #if !TR_NEWFORMAT for(i=0;idev_addr[i]=readb(encoded_addr + i); + dev->dev_addr[i]=isa_readb(encoded_addr + i); printk("%02X%s", dev->dev_addr[i], (i==TR_ALEN-1) ? "" : ":" ); } printk("\n"); @@ -1346,10 +1334,10 @@ static int tok_init_card(struct net_device *dev) #ifdef ENABLE_PAGING if(ti->page_mask) - writeb(SRPR_ENABLE_PAGING, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); + isa_writeb(SRPR_ENABLE_PAGING, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); #endif - writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); + isa_writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); #if !TR_NEWFORMAT DPRINTK("resetting card\n"); @@ -1364,7 +1352,7 @@ static int tok_init_card(struct net_device *dev) #endif ti->open_status=IN_PROGRESS; - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); return 0; } @@ -1375,18 +1363,18 @@ static void open_sap(unsigned char type,struct net_device *dev) SET_PAGE(ti->srb); for (i=0; isrb+i); + isa_writeb(0, ti->srb+i); - writeb(DLC_OPEN_SAP, ti->srb + offsetof(struct dlc_open_sap, command)); - writew(htons(MAX_I_FIELD), + isa_writeb(DLC_OPEN_SAP, ti->srb + offsetof(struct dlc_open_sap, command)); + isa_writew(htons(MAX_I_FIELD), ti->srb + offsetof(struct dlc_open_sap, max_i_field)); - writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY, + isa_writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY, ti->srb + offsetof(struct dlc_open_sap, sap_options)); - writeb(SAP_OPEN_STATION_CNT, + isa_writeb(SAP_OPEN_STATION_CNT, ti->srb + offsetof(struct dlc_open_sap, station_count)); - writeb(type, ti->srb + offsetof(struct dlc_open_sap, sap_value)); + isa_writeb(type, ti->srb + offsetof(struct dlc_open_sap, sap_value)); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); } @@ -1403,42 +1391,42 @@ void tok_open_adapter(unsigned long dev_addr) DPRINTK("now opening the board...\n"); #endif - writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); - writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD); + isa_writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + isa_writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD); for (i=0; iinit_srb+i); + isa_writeb(0, ti->init_srb+i); - writeb(DIR_OPEN_ADAPTER, + isa_writeb(DIR_OPEN_ADAPTER, ti->init_srb + offsetof(struct dir_open_adapter, command)); - writew(htons(OPEN_PASS_BCON_MAC), + isa_writew(htons(OPEN_PASS_BCON_MAC), ti->init_srb + offsetof(struct dir_open_adapter, open_options)); if (ti->ring_speed == 16) { - writew(htons(ti->dhb_size16mb), + isa_writew(htons(ti->dhb_size16mb), ti->init_srb + offsetof(struct dir_open_adapter, dhb_length)); - writew(htons(ti->rbuf_cnt16), + isa_writew(htons(ti->rbuf_cnt16), ti->init_srb + offsetof(struct dir_open_adapter, num_rcv_buf)); - writew(htons(ti->rbuf_len16), + isa_writew(htons(ti->rbuf_len16), ti->init_srb + offsetof(struct dir_open_adapter, rcv_buf_len)); } else { - writew(htons(ti->dhb_size4mb), + isa_writew(htons(ti->dhb_size4mb), ti->init_srb + offsetof(struct dir_open_adapter, dhb_length)); - writew(htons(ti->rbuf_cnt4), + isa_writew(htons(ti->rbuf_cnt4), ti->init_srb + offsetof(struct dir_open_adapter, num_rcv_buf)); - writew(htons(ti->rbuf_len4), + isa_writew(htons(ti->rbuf_len4), ti->init_srb + offsetof(struct dir_open_adapter, rcv_buf_len)); } - writeb(NUM_DHB, /* always 2 */ + isa_writeb(NUM_DHB, /* always 2 */ ti->init_srb + offsetof(struct dir_open_adapter, num_dhb)); - writeb(DLC_MAX_SAP, + isa_writeb(DLC_MAX_SAP, ti->init_srb + offsetof(struct dir_open_adapter, dlc_max_sap)); - writeb(DLC_MAX_STA, + isa_writeb(DLC_MAX_STA, ti->init_srb + offsetof(struct dir_open_adapter, dlc_max_sta)); ti->srb=ti->init_srb; /* We use this one in the interrupt handler */ - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); } @@ -1452,7 +1440,7 @@ static void tr_tx(struct net_device *dev) int i; struct trllc *llc; - if (readb(ti->asb + offsetof(struct asb_xmit_resp, ret_code))!=0xFF) + if (isa_readb(ti->asb + offsetof(struct asb_xmit_resp, ret_code))!=0xFF) DPRINTK("ASB not free !!!\n"); /* in providing the transmit interrupts, @@ -1461,7 +1449,7 @@ static void tr_tx(struct net_device *dev) to stuff with data. Here we compute the effective address where we will place data.*/ dhb=ti->sram - +ntohs(readw(ti->arb + offsetof(struct arb_xmit_req, dhb_address))); + +ntohs(isa_readw(ti->arb + offsetof(struct arb_xmit_req, dhb_address))); /* Figure out the size of the 802.5 header */ if (!(trhdr->saddr[0] & 0x80)) /* RIF present? */ @@ -1472,28 +1460,28 @@ static void tr_tx(struct net_device *dev) llc = (struct trllc *)(ti->current_skb->data + hdr_len); - xmit_command = readb(ti->srb + offsetof(struct srb_xmit, command)); + xmit_command = isa_readb(ti->srb + offsetof(struct srb_xmit, command)); - writeb(xmit_command, ti->asb + offsetof(struct asb_xmit_resp, command)); - writew(readb(ti->srb + offsetof(struct srb_xmit, station_id)), + isa_writeb(xmit_command, ti->asb + offsetof(struct asb_xmit_resp, command)); + isa_writew(isa_readb(ti->srb + offsetof(struct srb_xmit, station_id)), ti->asb + offsetof(struct asb_xmit_resp, station_id)); - writeb(llc->ssap, ti->asb + offsetof(struct asb_xmit_resp, rsap_value)); - writeb(readb(ti->srb + offsetof(struct srb_xmit, cmd_corr)), + isa_writeb(llc->ssap, ti->asb + offsetof(struct asb_xmit_resp, rsap_value)); + isa_writeb(isa_readb(ti->srb + offsetof(struct srb_xmit, cmd_corr)), ti->asb + offsetof(struct asb_xmit_resp, cmd_corr)); - writeb(0, ti->asb + offsetof(struct asb_xmit_resp, ret_code)); + isa_writeb(0, ti->asb + offsetof(struct asb_xmit_resp, ret_code)); if ((xmit_command==XMIT_XID_CMD) || (xmit_command==XMIT_TEST_CMD)) { - writew(htons(0x11), + isa_writew(htons(0x11), ti->asb + offsetof(struct asb_xmit_resp, frame_length)); - writeb(0x0e, ti->asb + offsetof(struct asb_xmit_resp, hdr_length)); - writeb(AC, dhb); - writeb(LLC_FRAME, dhb+1); + isa_writeb(0x0e, ti->asb + offsetof(struct asb_xmit_resp, hdr_length)); + isa_writeb(AC, dhb); + isa_writeb(LLC_FRAME, dhb+1); - for (i=0; immio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); return; } @@ -1502,13 +1490,13 @@ static void tr_tx(struct net_device *dev) * the token ring packet is copied from sk_buff to the adapter * buffer identified in the command data received with the interrupt. */ - writeb(hdr_len, ti->asb + offsetof(struct asb_xmit_resp, hdr_length)); - writew(htons(ti->current_skb->len), + isa_writeb(hdr_len, ti->asb + offsetof(struct asb_xmit_resp, hdr_length)); + isa_writew(htons(ti->current_skb->len), ti->asb + offsetof(struct asb_xmit_resp, frame_length)); - memcpy_toio(dhb, ti->current_skb->data, ti->current_skb->len); + isa_memcpy_toio(dhb, ti->current_skb->data, ti->current_skb->len); - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); ti->tr_stats.tx_bytes+=ti->current_skb->len; dev->tbusy=0; dev_kfree_skb(ti->current_skb); @@ -1531,19 +1519,19 @@ static void tr_rx(struct net_device *dev) struct iphdr *iph; rbuffer=(ti->sram - +ntohs(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))))+2; + +ntohs(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))))+2; - if(readb(ti->asb + offsetof(struct asb_rec, ret_code))!=0xFF) + if(isa_readb(ti->asb + offsetof(struct asb_rec, ret_code))!=0xFF) DPRINTK("ASB not free !!!\n"); - writeb(REC_DATA, + isa_writeb(REC_DATA, ti->asb + offsetof(struct asb_rec, command)); - writew(readw(ti->arb + offsetof(struct arb_rec_req, station_id)), + isa_writew(isa_readw(ti->arb + offsetof(struct arb_rec_req, station_id)), ti->asb + offsetof(struct asb_rec, station_id)); - writew(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr)), + isa_writew(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr)), ti->asb + offsetof(struct asb_rec, rec_buf_addr)); - lan_hdr_len=readb(ti->arb + offsetof(struct arb_rec_req, lan_hdr_len)); + lan_hdr_len=isa_readb(ti->arb + offsetof(struct arb_rec_req, lan_hdr_len)); hdr_len = lan_hdr_len + sizeof(struct trllc) + sizeof(struct iphdr); llc=(rbuffer + offsetof(struct rec_buf, data) + lan_hdr_len); @@ -1552,28 +1540,28 @@ static void tr_rx(struct net_device *dev) DPRINTK("offsetof data: %02X lan_hdr_len: %02X\n", (unsigned int)offsetof(struct rec_buf,data), (unsigned int)lan_hdr_len); DPRINTK("llc: %08X rec_buf_addr: %04X ti->sram: %p\n", llc, - ntohs(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))), + ntohs(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))), ti->sram); DPRINTK("dsap: %02X, ssap: %02X, llc: %02X, protid: %02X%02X%02X, " "ethertype: %04X\n", - (int)readb(llc + offsetof(struct trllc, dsap)), - (int)readb(llc + offsetof(struct trllc, ssap)), - (int)readb(llc + offsetof(struct trllc, llc)), - (int)readb(llc + offsetof(struct trllc, protid)), - (int)readb(llc + offsetof(struct trllc, protid)+1), - (int)readb(llc + offsetof(struct trllc, protid)+2), - (int)readw(llc + offsetof(struct trllc, ethertype))); + (int)isa_readb(llc + offsetof(struct trllc, dsap)), + (int)isa_readb(llc + offsetof(struct trllc, ssap)), + (int)isa_readb(llc + offsetof(struct trllc, llc)), + (int)isa_readb(llc + offsetof(struct trllc, protid)), + (int)isa_readb(llc + offsetof(struct trllc, protid)+1), + (int)isa_readb(llc + offsetof(struct trllc, protid)+2), + (int)isa_readw(llc + offsetof(struct trllc, ethertype))); #endif - if (readb(llc + offsetof(struct trllc, llc))!=UI_CMD) { - writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); + if (isa_readb(llc + offsetof(struct trllc, llc))!=UI_CMD) { + isa_writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); ti->tr_stats.rx_dropped++; - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); return; } - length = ntohs(readw(ti->arb+offsetof(struct arb_rec_req, frame_len))); - if ((readb(llc + offsetof(struct trllc, dsap))==EXTENDED_SAP) && - (readb(llc + offsetof(struct trllc, ssap))==EXTENDED_SAP) && + length = ntohs(isa_readw(ti->arb+offsetof(struct arb_rec_req, frame_len))); + if ((isa_readb(llc + offsetof(struct trllc, dsap))==EXTENDED_SAP) && + (isa_readb(llc + offsetof(struct trllc, ssap))==EXTENDED_SAP) && (length>=hdr_len)) { IPv4_p = 1; } @@ -1588,20 +1576,20 @@ static void tr_rx(struct net_device *dev) DPRINTK("Probably non-IP frame received.\n"); DPRINTK("ssap: %02X dsap: %02X saddr: %02X:%02X:%02X:%02X:%02X:%02X " "daddr: %02X:%02X:%02X:%02X:%02X:%02X\n", - (int)readb(llc + offsetof(struct trllc, ssap)), - (int)readb(llc + offsetof(struct trllc, dsap)), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+1), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+2), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+3), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+4), - (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+5), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+1), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+2), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+3), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+4), - (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+5)); + (int)isa_readb(llc + offsetof(struct trllc, ssap)), + (int)isa_readb(llc + offsetof(struct trllc, dsap)), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+1), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+2), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+3), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+4), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+5), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+1), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+2), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+3), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+4), + (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+5)); } #endif @@ -1610,8 +1598,8 @@ static void tr_rx(struct net_device *dev) if (!(skb=dev_alloc_skb(skb_size))) { DPRINTK("out of memory. frame dropped.\n"); ti->tr_stats.rx_dropped++; - writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); + isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); return; } @@ -1619,12 +1607,12 @@ static void tr_rx(struct net_device *dev) skb_reserve(skb, sizeof(struct trh_hdr)-lan_hdr_len+sizeof(struct trllc)); skb->dev=dev; data=skb->data; - rbuffer_len=ntohs(readw(rbuffer + offsetof(struct rec_buf, buf_len))); + rbuffer_len=ntohs(isa_readw(rbuffer + offsetof(struct rec_buf, buf_len))); rbufdata = rbuffer + offsetof(struct rec_buf,data); if (IPv4_p) { /* Copy the headers without checksumming */ - memcpy_fromio(data, rbufdata, hdr_len); + isa_memcpy_fromio(data, rbufdata, hdr_len); /* Watch for padded packets and bogons */ iph=(struct iphdr*)(data + lan_hdr_len + sizeof(struct trllc)); @@ -1644,20 +1632,20 @@ static void tr_rx(struct net_device *dev) length < rbuffer_len ? length : rbuffer_len, chksum); else - memcpy_fromio(data, rbufdata, rbuffer_len); - rbuffer = ntohs(readw(rbuffer)); + isa_memcpy_fromio(data, rbufdata, rbuffer_len); + rbuffer = ntohs(isa_readw(rbuffer)); if (!rbuffer) break; length -= rbuffer_len; data += rbuffer_len; rbuffer += ti->sram; - rbuffer_len = ntohs(readw(rbuffer + offsetof(struct rec_buf, buf_len))); + rbuffer_len = ntohs(isa_readw(rbuffer + offsetof(struct rec_buf, buf_len))); rbufdata = rbuffer + offsetof(struct rec_buf, data); } - writeb(0, ti->asb + offsetof(struct asb_rec, ret_code)); + isa_writeb(0, ti->asb + offsetof(struct asb_rec, ret_code)); - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); ti->tr_stats.rx_bytes += skb->len; ti->tr_stats.rx_packets++; @@ -1696,10 +1684,10 @@ static int tok_send_packet(struct sk_buff *skb, struct net_device *dev) /* Save skb; we'll need it when the adapter asks for the data */ ti->current_skb=skb; - writeb(XMIT_UI_FRAME, ti->srb + offsetof(struct srb_xmit, command)); - writew(ti->exsap_station_id, ti->srb + isa_writeb(XMIT_UI_FRAME, ti->srb + offsetof(struct srb_xmit, command)); + isa_writew(ti->exsap_station_id, ti->srb +offsetof(struct srb_xmit, station_id)); - writeb(CMD_IN_SRB, (ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD)); + isa_writeb(CMD_IN_SRB, (ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD)); spin_unlock_irqrestore(&(ti->lock), flags); dev->trans_start=jiffies; @@ -1721,9 +1709,9 @@ void ibmtr_readlog(struct net_device *dev) { ti=(struct tok_info *) dev->priv; ti->readlog_pending = 0; - writeb(DIR_READ_LOG, ti->srb); - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + isa_writeb(DIR_READ_LOG, ti->srb); + isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); dev->tbusy=1; /* really srb busy... */ } diff --git a/drivers/net/tokenring/ibmtr.h b/drivers/net/tokenring/ibmtr.h index 3f3fa6aedd02..769af277ab10 100644 --- a/drivers/net/tokenring/ibmtr.h +++ b/drivers/net/tokenring/ibmtr.h @@ -162,7 +162,7 @@ #define ACA_RW 0x00 #ifdef ENABLE_PAGING -#define SET_PAGE(x) (writeb(((x>>8)&ti.page_mask), \ +#define SET_PAGE(x) (isa_writeb(((x>>8)&ti.page_mask), \ ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN)) #else #define SET_PAGE(x) diff --git a/drivers/pci/names.c b/drivers/pci/names.c index ec85e07a8d7c..9018e8976630 100644 --- a/drivers/pci/names.c +++ b/drivers/pci/names.c @@ -5,6 +5,7 @@ * David Mosberger-Tang, Martin Mares */ +#include #include #include #include diff --git a/drivers/scsi/Config.in b/drivers/scsi/Config.in index 621045989c3c..5939548ba28b 100644 --- a/drivers/scsi/Config.in +++ b/drivers/scsi/Config.in @@ -10,8 +10,12 @@ dep_tristate ' SCSI generic support' CONFIG_CHR_DEV_SG $CONFIG_SCSI comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs' -bool ' Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + bool 'Enable extra checks in new queueing code' CONFIG_SCSI_DEBUG_QUEUES +fi +bool ' Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN + bool ' Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS bool ' SCSI logging facility' CONFIG_SCSI_LOGGING diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index ed78f8cf9340..e6d51509ba73 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -40,8 +40,8 @@ ifeq ($(CONFIG_SCSI),y) OX_OBJS := scsi_syms.o endif L_OBJS += scsi_n_syms.o hosts.o scsi_ioctl.o constants.o scsicam.o - L_OBJS += scsi_error.o scsi_obsolete.o scsi_queue.o - L_OBJS += scsi_proc.o + L_OBJS += scsi_error.o scsi_obsolete.o scsi_queue.o scsi_lib.o + L_OBJS += scsi_merge.o scsi_proc.o else ifeq ($(CONFIG_SCSI),m) MIX_OBJS += scsi_syms.o @@ -721,10 +721,11 @@ megaraid.o: megaraid.c $(CC) $(CFLAGS) -c megaraid.c scsi_mod.o: $(MIX_OBJS) hosts.o scsi.o scsi_ioctl.o constants.o \ - scsicam.o scsi_proc.o scsi_error.o scsi_obsolete.o scsi_queue.o + scsicam.o scsi_proc.o scsi_error.o scsi_obsolete.o \ + scsi_queue.o scsi_lib.o scsi_merge.o $(LD) $(LD_RFLAG) -r -o $@ $(MIX_OBJS) hosts.o scsi.o scsi_ioctl.o \ - constants.o scsicam.o scsi_proc.o \ - scsi_error.o scsi_obsolete.o scsi_queue.o \ + constants.o scsicam.o scsi_proc.o scsi_merge.o \ + scsi_error.o scsi_obsolete.o scsi_queue.o scsi_lib.o sr_mod.o: sr.o sr_ioctl.o sr_vendor.o $(LD) $(LD_RFLAG) -r -o $@ sr.o sr_ioctl.o sr_vendor.o diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index ef18e4f9aa1e..afcc4d1a5a4c 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -1,5 +1,5 @@ -/* $Id: advansys.c,v 1.67 1999/11/18 20:13:15 bobf Exp bobf $ */ -#define ASC_VERSION "3.2K" /* AdvanSys Driver Version */ +/* $Id: advansys.c,v 1.68 1999/11/19 01:57:47 bobf Exp bobf $ */ +#define ASC_VERSION "3.2L" /* AdvanSys Driver Version */ /* * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters @@ -668,6 +668,11 @@ 4. Increase Wide board scatter-gather list maximum length to 255 when the driver is compiled into the kernel. + 3.2L (11/18/99): + 1. Fix bug in adv_get_sglist() that caused an assertion failure + at line 7475. The reqp->sgblkp pointer must be initialized + to NULL in adv_get_sglist(). + J. Known Problems/Fix List (XXX) 1. Need to add memory mapping workaround. Test the memory mapping. @@ -7471,8 +7476,8 @@ adv_get_sglist(asc_board_t *boardp, adv_req_t *reqp, Scsi_Cmnd *scp) slp = (struct scatterlist *) scp->request_buffer; sg_elem_cnt = scp->use_sg; prev_sg_block = NULL; + reqp->sgblkp == NULL; - ASC_ASSERT(reqp->sgblkp == NULL); do { /* diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index 6f9b2e480e7a..e3d3f0cb27e6 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -1292,7 +1292,9 @@ int aha1542_bus_reset(Scsi_Cmnd * SCpnt) * check for timeout, and if we are doing something like this * we are pretty desperate anyways. */ + spin_unlock_irq(&io_request_lock); scsi_sleep(4*HZ); + spin_lock_irq(&io_request_lock); WAIT(STATUS(SCpnt->host->io_port), STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF); @@ -1359,7 +1361,9 @@ int aha1542_host_reset(Scsi_Cmnd * SCpnt) * check for timeout, and if we are doing something like this * we are pretty desperate anyways. */ + spin_unlock_irq(&io_request_lock); scsi_sleep(4*HZ); + spin_lock_irq(&io_request_lock); WAIT(STATUS(SCpnt->host->io_port), STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF); diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index a8ff8703ac97..c0b3f3a62a3b 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -36,47 +36,59 @@ void mydlyu(unsigned int); * static const char RCSid[] = "$Header: /usr/src/linux/kernel/blk_drv/scsi/RCS/atp870u.c,v 1.0 1997/05/07 15:22:00 root Exp root $"; */ -static unsigned char admaxu = 1, host_idu[2], chip_veru[2], scam_on[2], global_map[2]; -static unsigned short int active_idu[2], wide_idu[2], sync_idu, ultra_map[2]; -static int workingu[2] = {0, 0}; +static unsigned char admaxu = 1; +static unsigned short int sync_idu; -static Scsi_Cmnd *querequ[2][qcnt], *curr_req[2][16]; - -static unsigned char devspu[2][16] = { - {0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}, - {0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20} -}; - -static unsigned char dirctu[2][16], last_cmd[2], in_snd[2], in_int[2]; -static unsigned char ata_cdbu[2][16]; -static unsigned int ioportu[2] = {0, 0}; static unsigned int irqnumu[2] = {0, 0}; -static unsigned short int pciportu[2]; -static unsigned long prdaddru[2][16], tran_lenu[2][16], last_lenu[2][16]; -static unsigned char prd_tableu[2][16][1024]; -static unsigned char *prd_posu[2][16]; -static unsigned char quhdu[2], quendu[2]; -static unsigned char devtypeu[2][16] = +struct atp_unit { - {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + unsigned long ioport; + unsigned long irq; + unsigned long pciport; + unsigned char last_cmd; + unsigned char in_snd; + unsigned char in_int; + unsigned char quhdu; + unsigned char quendu; + unsigned char scam_on; + unsigned char global_map; + unsigned char chip_veru; + unsigned char host_idu; + int working; + unsigned short wide_idu; + unsigned short active_idu; + unsigned short ultra_map; + unsigned char ata_cdbu[16]; + Scsi_Cmnd *querequ[qcnt]; + struct atp_id + { + unsigned char dirctu; + unsigned char devspu; + unsigned char devtypeu; + unsigned long prdaddru; + unsigned long tran_lenu; + unsigned long last_lenu; + unsigned char *prd_posu; + unsigned char *prd_tableu; + Scsi_Cmnd *curr_req; + } id[16]; }; static struct Scsi_Host *atp_host[2] = {NULL, NULL}; +static struct atp_unit atp_unit[2]; static void atp870u_intr_handle(int irq, void *dev_id, struct pt_regs *regs) { unsigned long flags; unsigned short int tmpcip, id; - unsigned char i, j, h, tarid, lun; + unsigned char i, j, h, target_id, lun; unsigned char *prd; Scsi_Cmnd *workrequ; unsigned int workportu, tmport; unsigned long adrcntu, k; int errstus; + struct atp_unit *dev = dev_id; for (h = 0; h < 2; h++) { if (irq == irqnumu[h]) { @@ -85,59 +97,84 @@ static void atp870u_intr_handle(int irq, void *dev_id, struct pt_regs *regs) } return; irq_numok: - in_int[h] = 1; - workportu = ioportu[h]; + dev->in_int = 1; + workportu = dev->ioport; tmport = workportu; - - if (workingu[h] != 0) + + if (dev->working != 0) { tmport += 0x1f; j = inb(tmport); - tmpcip = pciportu[h]; + tmpcip = dev->pciport; if ((inb(tmpcip) & 0x08) != 0) { tmpcip += 0x2; - while ((inb(tmpcip) & 0x08) != 0); + for (k=0; k < 1000; k++) + { + if ((inb(tmpcip) & 0x08) == 0) + { + goto stop_dma; + } + if ((inb(tmpcip) & 0x01) == 0) + { + goto stop_dma; + } + } } - tmpcip = pciportu[h]; +stop_dma: + tmpcip = dev->pciport; outb(0x00, tmpcip); tmport -= 0x08; i = inb(tmport); if ((j & 0x40) == 0) { - if ((last_cmd[h] & 0x40) == 0) + if ((dev->last_cmd & 0x40) == 0) { - last_cmd[h] = 0xff; + dev->last_cmd = 0xff; } } - else last_cmd[h] |= 0x40; + else dev->last_cmd |= 0x40; tmport -= 0x02; - tarid = inb(tmport); + target_id = inb(tmport); tmport += 0x02; - if ((tarid & 0x40) != 0) { - tarid = (tarid & 0x07) | 0x08; + /* + * Remap wide devices onto id numbers + */ + + if ((target_id & 0x40) != 0) { + target_id = (target_id & 0x07) | 0x08; } else { - tarid &= 0x07; + target_id &= 0x07; } + if (i == 0x85) { - if (wide_idu[h] != 0) + /* + * Flip wide + */ + if (dev->wide_idu != 0) { tmport = workportu + 0x1b; j = inb(tmport) & 0x0e; j |= 0x01; outb(j, tmport); } - if (((quhdu[h] != quendu[h]) || (last_cmd[h] != 0xff)) && - (in_snd[h] == 0)) + /* + * Issue more commands + */ + if (((dev->quhdu != dev->quendu) || (dev->last_cmd != 0xff)) && + (dev->in_snd == 0)) { send_s870(h); } - in_int[h] = 0; + /* + * Done + */ + dev->in_int = 0; return; } if (i == 0x21) @@ -147,15 +184,15 @@ irq_numok: ((unsigned char *) &adrcntu)[2] = inb(tmport++); ((unsigned char *) &adrcntu)[1] = inb(tmport++); ((unsigned char *) &adrcntu)[0] = inb(tmport); - k = last_lenu[h][tarid]; + k = dev->id[target_id].last_lenu; k -= adrcntu; - tran_lenu[h][tarid] = k; - last_lenu[h][tarid] = adrcntu; + dev->id[target_id].tran_lenu = k; + dev->id[target_id].last_lenu = adrcntu; tmport -= 0x04; outb(0x41, tmport); tmport += 0x08; outb(0x08, tmport); - in_int[h] = 0; + dev->in_int = 0; return; } if ((i == 0x80) || (i == 0x8f)) @@ -163,7 +200,7 @@ irq_numok: lun = 0; tmport -= 0x07; j = inb(tmport); - if (j == 0x44) { + if (j == 0x44 || i==0x80) { tmport += 0x0d; lun = inb(tmport) & 0x07; } else { @@ -174,71 +211,80 @@ irq_numok: ((unsigned char *) &adrcntu)[2] = inb(tmport++); ((unsigned char *) &adrcntu)[1] = inb(tmport++); ((unsigned char *) &adrcntu)[0] = inb(tmport); - k = last_lenu[h][tarid]; + k = dev->id[target_id].last_lenu; k -= adrcntu; - tran_lenu[h][tarid] = k; - last_lenu[h][tarid] = adrcntu; + dev->id[target_id].tran_lenu = k; + dev->id[target_id].last_lenu = adrcntu; tmport += 0x04; outb(0x08, tmport); - in_int[h] = 0; + dev->in_int = 0; return; } else { outb(0x46, tmport); - dirctu[h][tarid] = 0x00; + dev->id[target_id].dirctu = 0x00; tmport += 0x02; outb(0x00, tmport++); outb(0x00, tmport++); outb(0x00, tmport++); tmport += 0x03; outb(0x08, tmport); - in_int[h] = 0; + dev->in_int = 0; return; } } tmport = workportu + 0x10; outb(0x45, tmport); tmport += 0x06; - tarid = inb(tmport); - if ((tarid & 0x10) != 0) + target_id = inb(tmport); + /* + * Remap wide identifiers + */ + if ((target_id & 0x10) != 0) { - tarid = (tarid & 0x07) | 0x08; + target_id = (target_id & 0x07) | 0x08; } else { - tarid &= 0x07; + target_id &= 0x07; } - workrequ = curr_req[h][tarid]; + workrequ = dev->id[target_id].curr_req; tmport = workportu + 0x0f; outb(lun, tmport); tmport += 0x02; - outb(devspu[h][tarid], tmport++); - adrcntu = tran_lenu[h][tarid]; - k = last_lenu[h][tarid]; + outb(dev->id[target_id].devspu, tmport++); + adrcntu = dev->id[target_id].tran_lenu; + k = dev->id[target_id].last_lenu; outb(((unsigned char *) &k)[2], tmport++); outb(((unsigned char *) &k)[1], tmport++); outb(((unsigned char *) &k)[0], tmport++); - j = tarid; - if (tarid > 7) { + /* Remap wide */ + j = target_id; + if (target_id > 7) { j = (j & 0x07) | 0x40; } - j |= dirctu[h][tarid]; + /* Add direction */ + j |= dev->id[target_id].dirctu; outb(j, tmport++); outb(0x80, tmport); tmport = workportu + 0x1b; j = inb(tmport) & 0x0e; id = 1; - id = id << tarid; - if ((id & wide_idu[h]) != 0) { + id = id << target_id; + /* + * Is this a wide device + */ + if ((id & dev->wide_idu) != 0) { j |= 0x01; } outb(j, tmport); - if (last_lenu[h][tarid] == 0) { + + if (dev->id[target_id].last_lenu == 0) { tmport = workportu + 0x18; outb(0x08, tmport); - in_int[h] = 0; + dev->in_int = 0; return; } - prd = prd_posu[h][tarid]; + prd = dev->id[target_id].prd_posu; while (adrcntu != 0) { id = ((unsigned short int *) (prd))[2]; @@ -252,35 +298,44 @@ irq_numok: (k - adrcntu); ((unsigned long *) (prd))[0] += adrcntu; adrcntu = 0; - prd_posu[h][tarid] = prd; + dev->id[target_id].prd_posu = prd; } else { adrcntu -= k; - prdaddru[h][tarid] += 0x08; + dev->id[target_id].prdaddru += 0x08; prd += 0x08; if (adrcntu == 0) { - prd_posu[h][tarid] = prd; + dev->id[target_id].prd_posu = prd; } } } - tmpcip = pciportu[h] + 0x04; - outl(prdaddru[h][tarid], tmpcip); + tmpcip = dev->pciport + 0x04; + outl(dev->id[target_id].prdaddru, tmpcip); tmpcip -= 0x02; outb(0x06, tmpcip); outb(0x00, tmpcip); tmpcip -= 0x02; tmport = workportu + 0x18; - if (dirctu[h][tarid] != 0) { + /* + * Check transfer direction + */ + if (dev->id[target_id].dirctu != 0) { outb(0x08, tmport); outb(0x01, tmpcip); - in_int[h] = 0; + dev->in_int = 0; return; } outb(0x08, tmport); outb(0x09, tmpcip); - in_int[h] = 0; + dev->in_int = 0; return; } - workrequ = curr_req[h][tarid]; + + /* + * Current scsi request on this target + */ + + workrequ = dev->id[target_id].curr_req; + if (i == 0x42) { errstus = 0x02; workrequ->result = errstus; @@ -293,24 +348,36 @@ irq_numok: errstus = inb(tmport); workrequ->result = errstus; go_42: + /* + * Complete the command + */ spin_lock_irqsave(&io_request_lock, flags); (*workrequ->scsi_done) (workrequ); spin_unlock_irqrestore(&io_request_lock, flags); - curr_req[h][tarid] = 0; - workingu[h]--; - if (wide_idu[h] != 0) { + /* + * Clear it off the queue + */ + dev->id[target_id].curr_req = 0; + dev->working--; + /* + * Take it back wide + */ + if (dev->wide_idu != 0) { tmport = workportu + 0x1b; j = inb(tmport) & 0x0e; j |= 0x01; outb(j, tmport); } - if (((last_cmd[h] != 0xff) || (quhdu[h] != quendu[h])) && - (in_snd[h] == 0)) + /* + * If there is stuff to send and nothing going then send it + */ + if (((dev->last_cmd != 0xff) || (dev->quhdu != dev->quendu)) && + (dev->in_snd == 0)) { send_s870(h); } - in_int[h] = 0; + dev->in_int = 0; return; } if (i == 0x4f) { @@ -319,23 +386,23 @@ go_42: i &= 0x0f; if (i == 0x09) { tmpcip = tmpcip + 4; - outl(prdaddru[h][tarid], tmpcip); + outl(dev->id[target_id].prdaddru, tmpcip); tmpcip = tmpcip - 2; outb(0x06, tmpcip); outb(0x00, tmpcip); tmpcip = tmpcip - 2; tmport = workportu + 0x10; outb(0x41, tmport); - dirctu[h][tarid] = 0x00; + dev->id[target_id].dirctu = 0x00; tmport += 0x08; outb(0x08, tmport); outb(0x09, tmpcip); - in_int[h] = 0; + dev->in_int = 0; return; } if (i == 0x08) { tmpcip = tmpcip + 4; - outl(prdaddru[h][tarid], tmpcip); + outl(dev->id[target_id].prdaddru, tmpcip); tmpcip = tmpcip - 2; outb(0x06, tmpcip); outb(0x00, tmpcip); @@ -344,11 +411,11 @@ go_42: outb(0x41, tmport); tmport += 0x05; outb((unsigned char) (inb(tmport) | 0x20), tmport); - dirctu[h][tarid] = 0x20; + dev->id[target_id].dirctu = 0x20; tmport += 0x03; outb(0x08, tmport); outb(0x01, tmpcip); - in_int[h] = 0; + dev->in_int = 0; return; } tmport -= 0x07; @@ -357,20 +424,20 @@ go_42: } else { outb(0x46, tmport); } - dirctu[h][tarid] = 0x00; + dev->id[target_id].dirctu = 0x00; tmport += 0x02; outb(0x00, tmport++); outb(0x00, tmport++); outb(0x00, tmport++); tmport += 0x03; outb(0x08, tmport); - in_int[h] = 0; + dev->in_int = 0; return; } else { tmport = workportu + 0x17; inb(tmport); - workingu[h] = 0; - in_int[h] = 0; + dev->working = 0; + dev->in_int = 0; return; } } @@ -381,6 +448,7 @@ int atp870u_queuecommand(Scsi_Cmnd * req_p, void (*done) (Scsi_Cmnd *)) unsigned long flags; unsigned short int m; unsigned int tmport; + struct atp_unit *dev; for (h = 0; h <= admaxu; h++) { if (req_p->host == atp_host[h]) { @@ -394,9 +462,15 @@ host_ok: done(req_p); return 0; } + dev = &atp_unit[h]; m = 1; m = m << req_p->target; - if ((m & active_idu[h]) == 0) { + + /* + * Fake a timeout for missing targets + */ + + if ((m & dev->active_idu) == 0) { req_p->result = 0x00040000; done(req_p); return 0; @@ -404,30 +478,36 @@ host_ok: if (done) { req_p->scsi_done = done; } else { - printk("atp870u_queuecommand: done can't be NULL\n"); + printk(KERN_WARNING "atp870u_queuecommand: done can't be NULL\n"); req_p->result = 0; done(req_p); return 0; } - quendu[h]++; - if (quendu[h] >= qcnt) { - quendu[h] = 0; + /* + * Count new command + */ + dev->quendu++; + if (dev->quendu >= qcnt) { + dev->quendu = 0; } + /* + * Check queue state + */ wait_que_empty: - if (quhdu[h] == quendu[h]) { + if (dev->quhdu == dev->quendu) { goto wait_que_empty; } save_flags(flags); cli(); - querequ[h][quendu[h]] = req_p; - if (quendu[h] == 0) { + dev->querequ[dev->quendu] = req_p; + if (dev->quendu == 0) { i = qcnt - 1; } else { - i = quendu[h] - 1; + i = dev->quendu - 1; } - tmport = ioportu[h] + 0x1c; + tmport = dev->ioport + 0x1c; restore_flags(flags); - if ((inb(tmport) == 0) && (in_int[h] == 0) && (in_snd[h] == 0)) { + if ((inb(tmport) == 0) && (dev->in_int == 0) && (dev->in_snd == 0)) { send_s870(h); } return 0; @@ -447,44 +527,45 @@ void send_s870(unsigned char h) Scsi_Cmnd *workrequ; unsigned long flags; unsigned int i; - unsigned char j, tarid; + unsigned char j, target_id; unsigned char *prd; unsigned short int tmpcip, w; unsigned long l, bttl; unsigned int workportu; struct scatterlist *sgpnt; + struct atp_unit *dev = &atp_unit[h]; save_flags(flags); cli(); - if (in_snd[h] != 0) { + if (dev->in_snd != 0) { restore_flags(flags); return; } - in_snd[h] = 1; - if ((last_cmd[h] != 0xff) && ((last_cmd[h] & 0x40) != 0)) { - last_cmd[h] &= 0x0f; - workrequ = curr_req[h][last_cmd[h]]; + dev->in_snd = 1; + if ((dev->last_cmd != 0xff) && ((dev->last_cmd & 0x40) != 0)) { + dev->last_cmd &= 0x0f; + workrequ = dev->id[dev->last_cmd].curr_req; goto cmd_subp; } - workingu[h]++; - j = quhdu[h]; - quhdu[h]++; - if (quhdu[h] >= qcnt) { - quhdu[h] = 0; + dev->working++; + j = dev->quhdu; + dev->quhdu++; + if (dev->quhdu >= qcnt) { + dev->quhdu = 0; } - workrequ = querequ[h][quhdu[h]]; - if (curr_req[h][workrequ->target] == 0) { - curr_req[h][workrequ->target] = workrequ; - last_cmd[h] = workrequ->target; + workrequ = dev->querequ[dev->quhdu]; + if (dev->id[workrequ->target].curr_req == 0) { + dev->id[workrequ->target].curr_req = workrequ; + dev->last_cmd = workrequ->target; goto cmd_subp; } - quhdu[h] = j; - workingu[h]--; - in_snd[h] = 0; + dev->quhdu = j; + dev->working--; + dev->in_snd = 0; restore_flags(flags); return; cmd_subp: - workportu = ioportu[h]; + workportu = dev->ioport; tmport = workportu + 0x1f; if ((inb(tmport) & 0xb0) != 0) { goto abortsnd; @@ -494,43 +575,63 @@ cmd_subp: goto oktosend; } abortsnd: - last_cmd[h] |= 0x40; - in_snd[h] = 0; + dev->last_cmd |= 0x40; + dev->in_snd = 0; restore_flags(flags); return; oktosend: - memcpy(&ata_cdbu[h][0], &workrequ->cmnd[0], workrequ->cmd_len); - if (ata_cdbu[h][0] == 0x25) { + memcpy(&dev->ata_cdbu[0], &workrequ->cmnd[0], workrequ->cmd_len); + if (dev->ata_cdbu[0] == READ_CAPACITY) { if (workrequ->request_bufflen > 8) { workrequ->request_bufflen = 0x08; } } - if (ata_cdbu[h][0] == 0x12) { + /* + * Why limit this ???? + */ + if (dev->ata_cdbu[0] == INQUIRY) { if (workrequ->request_bufflen > 0x24) { workrequ->request_bufflen = 0x24; - ata_cdbu[h][4] = 0x24; + dev->ata_cdbu[4] = 0x24; } } + tmport = workportu + 0x1b; j = inb(tmport) & 0x0e; - tarid = workrequ->target; + target_id = workrequ->target; + + /* + * Wide ? + */ w = 1; - w = w << tarid; - if ((w & wide_idu[h]) != 0) { + w = w << target_id; + if ((w & dev->wide_idu) != 0) { j |= 0x01; - } + } outb(j, tmport); + + /* + * Write the command + */ + tmport = workportu; outb(workrequ->cmd_len, tmport++); outb(0x2c, tmport++); outb(0xcf, tmport++); for (i = 0; i < workrequ->cmd_len; i++) { - outb(ata_cdbu[h][i], tmport++); + outb(dev->ata_cdbu[i], tmport++); } tmport = workportu + 0x0f; - outb(0x00, tmport); + outb(workrequ->lun, tmport); tmport += 0x02; - outb(devspu[h][tarid], tmport++); + /* + * Write the target + */ + outb(dev->id[target_id].devspu, tmport++); + + /* + * Figure out the transfer size + */ if (workrequ->use_sg) { l = 0; @@ -546,38 +647,54 @@ oktosend: } else { l = workrequ->request_bufflen; } + /* + * Write transfer size + */ outb((unsigned char) (((unsigned char *) (&l))[2]), tmport++); outb((unsigned char) (((unsigned char *) (&l))[1]), tmport++); outb((unsigned char) (((unsigned char *) (&l))[0]), tmport++); - j = tarid; - last_lenu[h][j] = l; - tran_lenu[h][j] = 0; + j = target_id; + dev->id[j].last_lenu = l; + dev->id[j].tran_lenu = 0; + /* + * Flip the wide bits + */ if ((j & 0x08) != 0) { j = (j & 0x07) | 0x40; } - if ((ata_cdbu[h][0] == 0x0a) || (ata_cdbu[h][0] == 0x2a) || - (ata_cdbu[h][0] == 0xaa) || (ata_cdbu[h][0] == 0x15)) { + /* + * Check transfer direction + */ + if ((dev->ata_cdbu[0] == WRITE_6) || (dev->ata_cdbu[0] == WRITE_10) || + (dev->ata_cdbu[0] == WRITE_12) || (dev->ata_cdbu[0] == MODE_SELECT)) { outb((unsigned char) (j | 0x20), tmport++); } else { outb(j, tmport++); } + outb((unsigned char)(inb(tmport) | 0x80),tmport); outb(0x80, tmport); tmport = workportu + 0x1c; - dirctu[h][tarid] = 0; + dev->id[target_id].dirctu = 0; if (l == 0) { if (inb(tmport) == 0) { tmport = workportu + 0x18; outb(0x08, tmport); } else { - last_cmd[h] |= 0x40; + dev->last_cmd |= 0x40; } - in_snd[h] = 0; + dev->in_snd = 0; restore_flags(flags); return; } - tmpcip = pciportu[h]; - prd = &prd_tableu[h][tarid][0]; - prd_posu[h][tarid] = prd; + tmpcip = dev->pciport; + prd = dev->id[target_id].prd_tableu; + dev->id[target_id].prd_posu = prd; + + /* + * Now write the request list. Either as scatter/gather or as + * a linear chain. + */ + if (workrequ->use_sg) { sgpnt = (struct scatterlist *) workrequ->request_buffer; @@ -590,6 +707,9 @@ oktosend: } (unsigned short int) (((unsigned short int *) (prd))[i - 1]) = 0x8000; } else { + /* + * For a linear request write a chain of blocks + */ bttl = virt_to_bus(workrequ->request_buffer); l = workrequ->request_bufflen; i = 0; @@ -606,24 +726,24 @@ oktosend: (unsigned long) (((unsigned long *) (prd))[i >> 1]) = bttl; } tmpcip = tmpcip + 4; - prdaddru[h][tarid] = virt_to_bus(&prd_tableu[h][tarid][0]); - outl(prdaddru[h][tarid], tmpcip); + dev->id[target_id].prdaddru = virt_to_bus(dev->id[target_id].prd_tableu); + outl(dev->id[target_id].prdaddru, tmpcip); tmpcip = tmpcip - 2; outb(0x06, tmpcip); outb(0x00, tmpcip); tmpcip = tmpcip - 2; - if ((ata_cdbu[h][0] == 0x0a) || (ata_cdbu[h][0] == 0x2a) || - (ata_cdbu[h][0] == 0xaa) || (ata_cdbu[h][0] == 0x15)) + if ((dev->ata_cdbu[0] == WRITE_6) || (dev->ata_cdbu[0] == WRITE_10) || + (dev->ata_cdbu[0] == WRITE_12) || (dev->ata_cdbu[0] == MODE_SELECT)) { - dirctu[h][tarid] = 0x20; + dev->id[target_id].dirctu = 0x20; if (inb(tmport) == 0) { tmport = workportu + 0x18; outb(0x08, tmport); outb(0x01, tmpcip); } else { - last_cmd[h] |= 0x40; + dev->last_cmd |= 0x40; } - in_snd[h] = 0; + dev->in_snd = 0; restore_flags(flags); return; } @@ -633,9 +753,9 @@ oktosend: outb(0x08, tmport); outb(0x09, tmpcip); } else { - last_cmd[h] |= 0x40; + dev->last_cmd |= 0x40; } - in_snd[h] = 0; + dev->in_snd = 0; restore_flags(flags); return; @@ -657,13 +777,13 @@ int atp870u_command(Scsi_Cmnd * SCpnt) return SCpnt->result; } -unsigned char fun_scam(unsigned char host, unsigned short int *val) +unsigned char fun_scam(struct atp_unit *dev, unsigned short int *val) { unsigned int tmport; unsigned short int i, k; unsigned char j; - tmport = ioportu[host] + 0x1c; + tmport = dev->ioport + 0x1c; outw(*val, tmport); FUN_D7: for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ @@ -706,32 +826,34 @@ void tscam(unsigned char host) unsigned long n; unsigned short int m, assignid_map, val; unsigned char mbuf[33], quintet[2]; - static unsigned char g2q_tab[8] = - {0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27}; + struct atp_unit *dev = &atp_unit[host]; + static unsigned char g2q_tab[8] = { + 0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27 + }; for (i = 0; i < 0x10; i++) { mydlyu(0xffff); } - tmport = ioportu[host] + 1; + tmport = dev->ioport + 1; outb(0x08, tmport++); outb(0x7f, tmport); - tmport = ioportu[host] + 0x11; + tmport = dev->ioport + 0x11; outb(0x20, tmport); - if ((scam_on[host] & 0x40) == 0) { + if ((dev->scam_on & 0x40) == 0) { return; } m = 1; - m <<= host_idu[host]; + m <<= dev->host_idu; j = 16; - if (chip_veru[host] < 4) { + if (dev->chip_veru < 4) { m |= 0xff00; j = 8; } assignid_map = m; - tmport = ioportu[host] + 0x02; + tmport = dev->ioport + 0x02; outb(0x02, tmport++); /* 2*2=4ms,3EH 2/32*3E=3.9ms */ outb(0, tmport++); outb(0, tmport++); @@ -746,7 +868,7 @@ void tscam(unsigned char host) if ((m & assignid_map) != 0) { continue; } - tmport = ioportu[host] + 0x0f; + tmport = dev->ioport + 0x0f; outb(0, tmport++); tmport += 0x02; outb(0, tmport++); @@ -758,14 +880,14 @@ void tscam(unsigned char host) k = i; } outb(k, tmport++); - tmport = ioportu[host] + 0x1b; - if (chip_veru[host] == 4) { + tmport = dev->ioport + 0x1b; + if (dev->chip_veru == 4) { outb((unsigned char) ((inb(tmport) & 0x0e) | 0x01), tmport); } else { outb((unsigned char) (inb(tmport) & 0x0e), tmport); } wait_rdyok: - tmport = ioportu[host] + 0x18; + tmport = dev->ioport + 0x18; outb(0x09, tmport); tmport += 0x07; @@ -776,22 +898,22 @@ wait_rdyok: if ((k == 0x85) || (k == 0x42)) { continue; } - tmport = ioportu[host] + 0x10; + tmport = dev->ioport + 0x10; outb(0x41, tmport); goto wait_rdyok; } assignid_map |= m; } - tmport = ioportu[host] + 0x02; + tmport = dev->ioport + 0x02; outb(0x7f, tmport); - tmport = ioportu[host] + 0x1b; + tmport = dev->ioport + 0x1b; outb(0x02, tmport); outb(0, 0x80); val = 0x0080; /* bsy */ - tmport = ioportu[host] + 0x1c; + tmport = dev->ioport + 0x1c; outw(val, tmport); val |= 0x0040; /* sel */ outw(val, tmport); @@ -836,7 +958,7 @@ TCM_SYNC: if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */ outw(0, tmport--); outb(0, tmport); - tmport = ioportu[host] + 0x15; + tmport = dev->ioport + 0x15; outb(0, tmport); tmport += 0x03; outb(0x09, tmport); @@ -848,11 +970,11 @@ TCM_SYNC: } val &= 0x00ff; /* synchronization */ val |= 0x3f00; - fun_scam(host, &val); + fun_scam(dev, &val); outb(3, 0x80); val &= 0x00ff; /* isolation */ val |= 0x2000; - fun_scam(host, &val); + fun_scam(dev, &val); outb(4, 0x80); i = 8; j = 0; @@ -863,7 +985,7 @@ TCM_ID: outb(5, 0x80); val &= 0x00ff; /* get ID_STRING */ val |= 0x2000; - k = fun_scam(host, &val); + k = fun_scam(dev, &val); if ((k & 0x03) == 0) { goto TCM_5; } @@ -927,11 +1049,11 @@ G2Q_QUIN: /* k=binID#, */ val &= 0x00ff; /* AssignID 1stQuintet,AH=001xxxxx */ m = quintet[0] << 8; val |= m; - fun_scam(host, &val); + fun_scam(dev, &val); val &= 0x00ff; /* AssignID 2ndQuintet,AH=001xxxxx */ m = quintet[1] << 8; val |= m; - fun_scam(host, &val); + fun_scam(dev, &val); goto TCM_SYNC; @@ -949,25 +1071,26 @@ void is870(unsigned long host, unsigned int wkport) static unsigned char synu[6] = {0x80, 1, 3, 1, 0x0c, 0x0e}; static unsigned char synw[6] = {0x80, 1, 3, 1, 0x0c, 0x07}; static unsigned char wide[6] = {0x80, 1, 2, 3, 1, 0}; + struct atp_unit *dev = &atp_unit[host]; sync_idu = 0; tmport = wkport + 0x3a; outb((unsigned char) (inb(tmport) | 0x10), tmport); for (i = 0; i < 16; i++) { - if ((chip_veru[host] != 4) && (i > 7)) { + if ((dev->chip_veru != 4) && (i > 7)) { break; } m = 1; m = m << i; - if ((m & active_idu[host]) != 0) { + if ((m & dev->active_idu) != 0) { continue; } - if (i == host_idu[host]) { - printk(" ID: %2d Host Adapter\n", host_idu[host]); + if (i == dev->host_idu) { + printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_idu); continue; } - if (chip_veru[host] == 4) { + if (dev->chip_veru == 4) { tmport = wkport + 0x1b; j = (inb(tmport) & 0x0e) | 0x01; outb(j, tmport); @@ -984,7 +1107,7 @@ void is870(unsigned long host, unsigned int wkport) tmport += 0x06; outb(0, tmport); tmport += 0x02; - outb(devspu[host][i], tmport++); + outb(dev->id[i].devspu, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); @@ -1003,7 +1126,7 @@ void is870(unsigned long host, unsigned int wkport) continue; } while (inb(tmport) != 0x8e); - active_idu[host] |= m; + dev->active_idu |= m; tmport = wkport + 0x10; outb(0x30, tmport); @@ -1033,7 +1156,7 @@ sel_ok: tmport += 0x07; outb(0, tmport); tmport += 0x02; - outb(devspu[host][i], tmport++); + outb(dev->id[i].devspu, tmport++); outb(0, tmport++); outb(inqd[6], tmport++); outb(inqd[7], tmport++); @@ -1046,7 +1169,7 @@ sel_ok: continue; } while (inb(tmport) != 0x8e); - if (chip_veru[host] == 4) { + if (dev->chip_veru == 4) { tmport = wkport + 0x1b; j = inb(tmport) & 0x0e; outb(j, tmport); @@ -1087,16 +1210,16 @@ rd_inq_data: } inq_ok: mbuf[36] = 0; - printk(" ID: %2d %s\n", i, &mbuf[8]); - devtypeu[host][i] = mbuf[0]; + printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]); + dev->id[i].devtypeu = mbuf[0]; rmb = mbuf[1]; - if (chip_veru[host] != 4) { + if (dev->chip_veru != 4) { goto not_wide; } if ((mbuf[7] & 0x60) == 0) { goto not_wide; } - if ((global_map[host] & 0x20) == 0) { + if ((dev->global_map & 0x20) == 0) { goto not_wide; } tmport = wkport + 0x1b; @@ -1112,7 +1235,7 @@ inq_ok: tmport += 0x06; outb(0, tmport); tmport += 0x02; - outb(devspu[host][i], tmport++); + outb(dev->id[i].devspu, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); @@ -1238,16 +1361,16 @@ widep_cmd: } m = 1; m = m << i; - wide_idu[host] |= m; + dev->wide_idu |= m; not_wide: - if ((devtypeu[host][i] == 0x00) || (devtypeu[host][i] == 0x07)) { + if ((dev->id[i].devtypeu == 0x00) || (dev->id[i].devtypeu == 0x07)) { goto set_sync; } continue; set_sync: tmport = wkport + 0x1b; j = inb(tmport) & 0x0e; - if ((m & wide_idu[host]) != 0) { + if ((m & dev->wide_idu) != 0) { j |= 0x01; } outb(j, tmport); @@ -1261,7 +1384,7 @@ set_sync: tmport += 0x06; outb(0, tmport); tmport += 0x02; - outb(devspu[host][i], tmport++); + outb(dev->id[i].devspu, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); @@ -1289,10 +1412,10 @@ try_sync: if (rmb != 0) { outb(synn[j++], tmport); } else { - if ((m & wide_idu[host]) != 0) { + if ((m & dev->wide_idu) != 0) { outb(synw[j++], tmport); } else { - if ((m & ultra_map[host]) != 0) { + if ((m & dev->ultra_map) != 0) { outb(synu[j++], tmport); } else { outb(synn[j++], tmport); @@ -1407,7 +1530,7 @@ tar_dcons: if (mbuf[4] > 0x0c) { mbuf[4] = 0x0c; } - devspu[host][i] = mbuf[4]; + dev->id[i].devspu = mbuf[4]; if ((mbuf[3] < 0x0d) && (rmb == 0)) { j = 0xa0; goto set_syn_ok; @@ -1426,7 +1549,7 @@ tar_dcons: } j = 0x60; set_syn_ok: - devspu[host][i] = (devspu[host][i] & 0x0f) | j; + dev->id[i].devspu = (dev->id[i].devspu & 0x0f) | j; } tmport = wkport + 0x3a; outb((unsigned char) (inb(tmport) & 0xef), tmport); @@ -1439,124 +1562,124 @@ int atp870u_detect(Scsi_Host_Template * tpnt) unsigned long flags; unsigned int base_io, error, tmport; unsigned short index = 0; - unsigned char pci_bus[3], pci_device_fn[3], chip_ver[3], host_id; + struct pci_dev *pdev[3]; + unsigned char chip_ver[3], host_id; struct Scsi_Host *shpnt = NULL; + int tmpcnt = 0; int count = 0; - static unsigned short devid[7] = - {0x8002, 0x8010, 0x8020, 0x8030, 0x8040, 0x8050, 0}; - static struct pci_dev *pdev = NULL, *acard_pdev[3]; + int result; + + static unsigned short devid[7] = { + 0x8002, 0x8010, 0x8020, 0x8030, 0x8040, 0x8050, 0 + }; - printk("aec671x_detect: \n"); + printk(KERN_INFO "aec671x_detect: \n"); if (!pci_present()) { - printk(" NO BIOS32 SUPPORT.\n"); + printk(KERN_INFO" NO PCI SUPPORT.\n"); return count; } tpnt->proc_name = "atp870u"; for (h = 0; h < 2; h++) { - active_idu[h] = 0; - wide_idu[h] = 0; - host_idu[h] = 0x07; - quhdu[h] = 0; - quendu[h] = 0; - pci_bus[h] = 0; - pci_device_fn[h] = 0xff; - chip_ver[h] = 0; - last_cmd[h] = 0xff; - in_snd[h] = 0; - in_int[h] = 0; + struct atp_unit *dev = &atp_unit[h]; + for(k=0;k<16;k++) + { + dev->id[k].prd_tableu = kmalloc(1024, GFP_KERNEL); + dev->id[k].devspu=0x20; + dev->id[k].devtypeu = 0; + dev->id[k].curr_req = NULL; + } + dev->active_idu = 0; + dev->wide_idu = 0; + dev->host_idu = 0x07; + dev->quhdu = 0; + dev->quendu = 0; + pdev[h]=NULL; + pdev[2]=NULL; + dev->chip_veru = 0; + dev->last_cmd = 0xff; + dev->in_snd = 0; + dev->in_int = 0; for (k = 0; k < qcnt; k++) { - querequ[h][k] = 0; + dev->querequ[k] = 0; } for (k = 0; k < 16; k++) { - curr_req[h][k] = 0; + dev->id[k].curr_req = 0; } } h = 0; while (devid[h] != 0) { - pdev = pci_find_device(0x1191, devid[h], pdev); - if (pdev == NULL) { + pdev[2] = pci_find_device(0x1191, devid[h], pdev[2]); + if (pdev[2] == NULL) { h++; index = 0; continue; } chip_ver[2] = 0; - /* To avoid messing with the things below... */ - acard_pdev[2] = pdev; - pci_device_fn[2] = pdev->devfn; - pci_bus[2] = pdev->bus->number; - if (devid[h] == 0x8002) { - error = pci_read_config_byte(pdev, 0x08, &chip_ver[2]); + error = pci_read_config_byte(pdev[2], 0x08, &chip_ver[2]); if (chip_ver[2] < 2) { goto nxt_devfn; } } - if (devid[h] == 0x8010) { + if (devid[h] == 0x8010 || devid[h] == 0x8050) { chip_ver[2] = 0x04; } - if (pci_device_fn[2] < pci_device_fn[0]) { - acard_pdev[1] = acard_pdev[0]; - pci_bus[1] = pci_bus[0]; - pci_device_fn[1] = pci_device_fn[0]; - chip_ver[1] = chip_ver[0]; - acard_pdev[0] = acard_pdev[2]; - pci_bus[0] = pci_bus[2]; - pci_device_fn[0] = pci_device_fn[2]; - chip_ver[0] = chip_ver[2]; - } else if (pci_device_fn[2] < pci_device_fn[1]) { - acard_pdev[1] = acard_pdev[2]; - pci_bus[1] = pci_bus[2]; - pci_device_fn[1] = pci_device_fn[2]; - chip_ver[1] = chip_ver[2]; - } + pdev[tmpcnt] = pdev[2]; + chip_ver[tmpcnt] = chip_ver[2]; + tmpcnt++; nxt_devfn: index++; if (index > 3) { index = 0; h++; } + if(tmpcnt>1) + break; } for (h = 0; h < 2; h++) { - if (pci_device_fn[h] == 0xff) { + struct atp_unit *dev=&atp_unit[h]; + if (pdev[h]==NULL) { return count; } - pdev = acard_pdev[h]; - pdev->devfn = pci_device_fn[h]; - pdev->bus->number = pci_bus[h]; /* Found an atp870u/w. */ - error = pci_read_config_dword(pdev, 0x10, &base_io); - error += pci_read_config_byte(pdev, 0x3c, &irq); - error += pci_read_config_byte(pdev, 0x49, &host_id); + base_io = pdev[h]->resource[0].start; + irq = pdev[h]->irq; + error = pci_read_config_byte(pdev[h],0x49,&host_id); base_io &= 0xfffffff8; - printk(" ACARD AEC-671X PCI Ultra/W SCSI-3 Host Adapter: %d IO:%x, IRQ:%d.\n" + + if (check_region(base_io,0x40) != 0) + { + return 0; + } + printk(KERN_INFO " ACARD AEC-671X PCI Ultra/W SCSI-3 Host Adapter: %d IO:%x, IRQ:%d.\n" ,h, base_io, irq); - ioportu[h] = base_io; - pciportu[h] = base_io + 0x20; + dev->ioport = base_io; + dev->pciport = base_io + 0x20; irqnumu[h] = irq; host_id &= 0x07; - host_idu[h] = host_id; - chip_veru[h] = chip_ver[h]; + dev->host_idu = host_id; + dev->chip_veru = chip_ver[h]; tmport = base_io + 0x22; - scam_on[h] = inb(tmport); + dev->scam_on = inb(tmport); tmport += 0x0b; - global_map[h] = inb(tmport++); - ultra_map[h] = inw(tmport); - if (ultra_map[h] == 0) { - scam_on[h] = 0x00; - global_map[h] = 0x20; - ultra_map[h] = 0xffff; + dev->global_map = inb(tmport++); + dev->ultra_map = inw(tmport); + if (dev->ultra_map == 0) { + dev->scam_on = 0x00; + dev->global_map = 0x20; + dev->ultra_map = 0xffff; } shpnt = scsi_register(tpnt, 4); save_flags(flags); cli(); - if (request_irq(irq, atp870u_intr_handle, 0, "atp870u", NULL)) { - printk("Unable to allocate IRQ for Acard controller.\n"); + if (request_irq(irq, atp870u_intr_handle, SA_SHIRQ, "atp870u", dev)) { + printk(KERN_ERR "Unable to allocate IRQ for Acard controller.\n"); goto unregister; } tmport = base_io + 0x3a; @@ -1584,9 +1707,11 @@ int atp870u_detect(Scsi_Host_Template * tpnt) is870(h, base_io); tmport = base_io + 0x3a; outb((inb(tmport) & 0xef), tmport); + tmport++; + outb((inb(tmport) | 0x20),tmport); atp_host[h] = shpnt; - if (chip_ver[h] == 4) { + if (dev->chip_veru == 4) { shpnt->max_id = 16; } shpnt->this_id = host_id; @@ -1617,7 +1742,7 @@ int atp870u_abort(Scsi_Cmnd * SCpnt) { unsigned char h, j; unsigned int tmport; -/* printk(" atp870u_abort: \n"); */ + struct atp_unit *dev; for (h = 0; h <= admaxu; h++) { if (SCpnt->host == atp_host[h]) { goto find_adp; @@ -1625,20 +1750,23 @@ int atp870u_abort(Scsi_Cmnd * SCpnt) } panic("Abort host not found !"); find_adp: - printk(" workingu=%x last_cmd=%x ", workingu[h], last_cmd[h]); - printk(" quhdu=%x quendu=%x ", quhdu[h], quendu[h]); - tmport = ioportu[h]; + dev=&atp_unit[h]; + printk(KERN_DEBUG "working=%x last_cmd=%x ", dev->working, dev->last_cmd); + printk(" quhdu=%x quendu=%x ", dev->quhdu, dev->quendu); + tmport = dev->ioport; for (j = 0; j < 0x17; j++) { printk(" r%2x=%2x", j, inb(tmport++)); } tmport += 0x05; printk(" r1c=%2x", inb(tmport)); tmport += 0x03; - printk(" r1f=%2x in_snd=%2x ", inb(tmport), in_snd[h]); + printk(" r1f=%2x in_snd=%2x ", inb(tmport), dev->in_snd); tmport++; printk(" r20=%2x", inb(tmport)); tmport += 0x02; - printk(" r22=%2x \n", inb(tmport)); + printk(" r22=%2x", inb(tmport)); + tmport += 0x18; + printk(" r3a=%2x \n",inb(tmport)); return (SCSI_ABORT_SNOOZE); } @@ -1648,7 +1776,6 @@ int atp870u_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags) /* * See if a bus reset was suggested. */ -/* printk("atp870u_reset: \n"); */ for (h = 0; h <= admaxu; h++) { if (SCpnt->host == atp_host[h]) { goto find_host; @@ -1658,9 +1785,9 @@ int atp870u_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags) find_host: /* SCpnt->result = 0x00080000; SCpnt->scsi_done(SCpnt); - workingu[h]=0; - quhdu[h]=0; - quendu[h]=0; + dev->working=0; + dev->quhdu=0; + dev->quendu=0; return (SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET); */ return (SCSI_RESET_SNOOZE); } @@ -1669,14 +1796,14 @@ const char *atp870u_info(struct Scsi_Host *notused) { static char buffer[128]; - strcpy(buffer, "ACARD AEC-6710/6712 PCI Ultra/W SCSI-3 Adapter Driver V1.0 "); + strcpy(buffer, "ACARD AEC-6710/6712 PCI Ultra/W SCSI-3 Adapter Driver V2.0+ac "); return buffer; } int atp870u_set_info(char *buffer, int length, struct Scsi_Host *HBAptr) { - return (-ENOSYS); /* Currently this is a no-op */ + return -ENOSYS; /* Currently this is a no-op */ } #define BLS buffer + len + size @@ -1714,7 +1841,7 @@ int atp870u_proc_info(char *buffer, char **start, off_t offset, int length, if (offset == 0) { memset(buff, 0, sizeof(buff)); } - size += sprintf(BLS, "ACARD AEC-671X Driver Version: 1.0\n"); + size += sprintf(BLS, "ACARD AEC-671X Driver Version: 2.0+ac\n"); len += size; pos = begin + len; size = 0; @@ -1727,7 +1854,7 @@ int atp870u_proc_info(char *buffer, char **start, off_t offset, int length, pos = begin + len; size = 0; - stop_output: +stop_output: *start = buffer + (offset - begin); /* Start of wanted data */ len -= (offset - begin); /* Start slop */ if (len > length) { @@ -1758,6 +1885,26 @@ int atp870u_biosparam(Scsi_Disk * disk, kdev_t dev, int *ip) return 0; } + +int atp870u_release (struct Scsi_Host *pshost) +{ + int h; + for (h = 0; h <= admaxu; h++) + { + if (pshost == atp_host[h]) { + int k; + free_irq (pshost->irq, &atp_unit[h]); + release_region (pshost->io_port, pshost->n_io_port); + scsi_unregister(pshost); + for(k=0;k<16;k++) + kfree(atp_unit[h].id[k].prd_tableu); + return 0; + } + } + panic("atp870u: bad scsi host passed.\n"); + +} + #ifdef MODULE Scsi_Host_Template driver_template = ATP870U; diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h index 13639945a2a9..39644040a568 100644 --- a/drivers/scsi/atp870u.h +++ b/drivers/scsi/atp870u.h @@ -24,10 +24,11 @@ int atp870u_queuecommand(Scsi_Cmnd *, void (*done) (Scsi_Cmnd *)); int atp870u_abort(Scsi_Cmnd *); int atp870u_reset(Scsi_Cmnd *, unsigned int); int atp870u_biosparam(Disk *, kdev_t, int *); +int atp870u_release(struct Scsi_Host *); void send_s870(unsigned char); -#define qcnt 32 -#define ATP870U_SCATTER 127 +#define qcnt 32 +#define ATP870U_SCATTER 128 #define ATP870U_CMDLUN 1 #ifndef NULL @@ -38,31 +39,33 @@ extern const char *atp870u_info(struct Scsi_Host *); extern int atp870u_proc_info(char *, char **, off_t, int, int, int); -#define ATP870U { \ - proc_name: "atp870u", \ - proc_info: atp870u_proc_info, \ - name: NULL, \ - detect: atp870u_detect, \ - release: NULL, \ - info: atp870u_info, \ - command: atp870u_command, \ - queuecommand: atp870u_queuecommand, \ - eh_strategy_handler: NULL, \ - eh_abort_handler: NULL, \ - eh_device_reset_handler: NULL, \ - eh_bus_reset_handler: NULL, \ - eh_host_reset_handler: NULL, \ - abort: atp870u_abort, \ - reset: atp870u_reset, \ - slave_attach: NULL, \ - bios_param: atp870u_biosparam, \ - can_queue: qcnt, \ - this_id: 1, \ - sg_tablesize: ATP870U_SCATTER, \ - cmd_per_lun: ATP870U_CMDLUN, \ - present: 0, \ - unchecked_isa_dma: 0, \ - use_clustering: ENABLE_CLUSTERING, \ - use_new_eh_code: 0 \ +#define ATP870U { \ + next: NULL, \ + module: NULL, \ + proc_info: atp870u_proc_info, \ + name: NULL, \ + detect: atp870u_detect, \ + release: atp870u_release, \ + info: atp870u_info, \ + command: atp870u_command, \ + queuecommand: atp870u_queuecommand, \ + eh_strategy_handler: NULL, \ + eh_abort_handler: NULL, \ + eh_device_reset_handler: NULL, \ + eh_bus_reset_handler: NULL, \ + eh_host_reset_handler: NULL, \ + abort: atp870u_abort, \ + reset: atp870u_reset, \ + slave_attach: NULL, \ + bios_param: atp870u_biosparam, \ + can_queue: qcnt, /* max simultaneous cmds */\ + this_id: 7, /* scsi id of host adapter */\ + sg_tablesize: ATP870U_SCATTER, /* max scatter-gather cmds */\ + cmd_per_lun: ATP870U_CMDLUN, /* cmds per lun (linked cmds) */\ + present: 0, /* number of 7xxx's present */\ + unchecked_isa_dma: 0, /* no memory DMA restrictions */\ + use_clustering: ENABLE_CLUSTERING, \ + use_new_eh_code: 0 \ } + #endif diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index 4abd2241186a..be0f0a5f2f6b 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c @@ -1049,7 +1049,7 @@ static inline int port_detect \ sh[j]->unchecked_isa_dma = FALSE; else { unsigned long flags; - sh[j]->wish_block = TRUE; +//FIXME// sh[j]->wish_block = TRUE; sh[j]->unchecked_isa_dma = TRUE; flags=claim_dma_lock(); diff --git a/drivers/scsi/eata_dma.c b/drivers/scsi/eata_dma.c index 2ca322c0e688..4e45fc849d9d 100644 --- a/drivers/scsi/eata_dma.c +++ b/drivers/scsi/eata_dma.c @@ -1297,7 +1297,7 @@ short register_HBA(u32 base, struct get_conf *gc, Scsi_Host_Template * tpnt, else hd->primary = TRUE; - sh->wish_block = FALSE; +//FIXME// sh->wish_block = FALSE; if (hd->bustype != IS_ISA) { sh->unchecked_isa_dma = FALSE; diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 6b427ddbc14c..5a13b0e483ed 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -16,6 +16,9 @@ * DTC3181E extensions (c) 1997, Ronald van Cuijlenborg * ronald.van.cuijlenborg@tip.nl or nutty@dds.nl * + * Added ISAPNP support for DTC436 adapters, + * Thomas Sailer, sailer@ife.ee.ethz.ch + * * ALPHA RELEASE 1. * * For more information, please consult @@ -117,7 +120,8 @@ #include "sd.h" #include #include -#include +#include +#include #define NCR_NOT_SET 0 static int ncr_irq=NCR_NOT_SET; @@ -280,6 +284,36 @@ int __init generic_NCR5380_detect(Scsi_Host_Template * tpnt){ else if (dtc_3181e != NCR_NOT_SET) overrides[0].board=BOARD_DTC3181E; + if (!current_override && isapnp_present()) { + struct pci_dev *dev = NULL; + count = 0; + while ((dev = isapnp_find_dev(NULL, ISAPNP_VENDOR('D','T','C'), ISAPNP_FUNCTION(0x436e), dev))) { + if (count >= NO_OVERRIDES) + break; + if (!dev->active && dev->prepare(dev) < 0) { + printk(KERN_ERR "dtc436e probe: prepare failed\n"); + continue; + } + if (!(dev->resource[0].flags & IORESOURCE_IO)) + continue; + if (!dev->active && dev->activate(dev) < 0) { + printk(KERN_ERR "dtc436e probe: activate failed\n"); + continue; + } + if (dev->irq_resource[0].flags & IORESOURCE_IRQ) + overrides[count].irq=dev->irq_resource[0].start; + else + overrides[count].irq=IRQ_NONE; + if (dev->dma_resource[0].flags & IORESOURCE_DMA) + overrides[count].dma=dev->dma_resource[0].start; + else + overrides[count].dma=DMA_NONE; + overrides[count].NCR5380_map_name=(NCR5380_map_type)dev->resource[0].start; + overrides[count].board=BOARD_DTC3181E; + count++; + } + } + tpnt->proc_name = "g_NCR5380"; for (count = 0; current_override < NO_OVERRIDES; ++current_override) { diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index e8042a888f94..6d0112abab73 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c @@ -934,10 +934,8 @@ static void gdth_do_cmd(Scsi_Cmnd *scp,gdth_cmd_str *gdtcmd,int timeout) scp->request.rq_status = RQ_SCSI_BUSY; scp->request.sem = &sem; scp->SCp.this_residual = IOCTL_PRI; - GDTH_LOCK_SCSI_DOCMD(); scsi_do_cmd(scp, cmnd, gdtcmd, sizeof(gdth_cmd_str), gdth_scsi_done, timeout*HZ, 1); - GDTH_UNLOCK_SCSI_DOCMD(); down(&sem); } diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 59a6bee98f9e..5c9fdd941473 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -696,8 +696,6 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){ atomic_set(&retval->host_active,0); retval->host_busy = 0; retval->host_failed = 0; - retval->block = NULL; - retval->wish_block = 0; if(j > 0xffff) panic("Too many extra bytes requested\n"); retval->extra_bytes = j; retval->loaded_as_module = scsi_loadable_module_flag; @@ -723,11 +721,8 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){ retval->ehandler = NULL; /* Initial value until the thing starts up. */ retval->eh_notify = NULL; /* Who we notify when we exit. */ - /* - * Initialize the fields used for mid-level queueing. - */ - retval->pending_commands = NULL; - retval->host_busy = FALSE; + + retval->host_blocked = FALSE; #ifdef DEBUG printk("Register %x %x: %d\n", (int)retval, (int)retval->hostt, j); @@ -783,6 +778,7 @@ static void launch_error_handler_thread(struct Scsi_Host * shpnt) kernel_thread((int (*)(void *))scsi_error_handler, (void *) shpnt, 0); + /* * Now wait for the kernel error thread to initialize itself * as it might be needed when we scan the bus. @@ -873,7 +869,6 @@ unsigned int __init scsi_init(void) printk ("scsi : %d host%s.\n", next_scsi_host, (next_scsi_host == 1) ? "" : "s"); - scsi_make_blocked_list(); /* Now attach the high level drivers */ #ifdef CONFIG_BLK_DEV_SD diff --git a/drivers/scsi/hosts.h b/drivers/scsi/hosts.h index fa6ac5f6c53a..18a9bf45c87e 100644 --- a/drivers/scsi/hosts.h +++ b/drivers/scsi/hosts.h @@ -1,6 +1,6 @@ /* * hosts.h Copyright (C) 1992 Drew Eckhardt - * Copyright (C) 1993, 1994, 1995 Eric Youngdale + * Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale * * mid to low-level SCSI driver interface header * Initial versions: Drew Eckhardt @@ -8,7 +8,7 @@ * * * - * Modified by Eric Youngdale eric@aib.com to + * Modified by Eric Youngdale eric@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. * @@ -301,13 +301,7 @@ struct Scsi_Host */ struct Scsi_Host * next; Scsi_Device * host_queue; - /* - * List of commands that have been rejected because either the host - * or the device was busy. These need to be retried relatively quickly, - * but we need to hold onto it for a short period until the host/device - * is available. - */ - Scsi_Cmnd * pending_commands; + struct task_struct * ehandler; /* Error recovery thread. */ struct semaphore * eh_wait; /* The error recovery thread waits on @@ -340,13 +334,6 @@ struct Scsi_Host unsigned int max_lun; unsigned int max_channel; - /* - * Pointer to a circularly linked list - this indicates the hosts - * that should be locked out of performing I/O while we have an active - * command on this host. - */ - struct Scsi_Host * block; - unsigned wish_block:1; /* These parameters should be set by the detect routine */ unsigned long base; @@ -391,9 +378,14 @@ struct Scsi_Host * Host uses correct SCSI ordering not PC ordering. The bit is * set for the minority of drivers whose authors actually read the spec ;) */ - unsigned reverse_ordering:1; - + + /* + * Indicates that one or more devices on this host were starved, and + * when the device becomes less busy that we need to feed them. + */ + unsigned some_device_starved:1; + void (*select_queue_depths)(struct Scsi_Host *, Scsi_Device *); /* @@ -412,7 +404,6 @@ extern Scsi_Host_Template * scsi_hosts; extern void build_proc_dir_entries(Scsi_Host_Template *); - /* * scsi_init initializes the scsi hosts. */ @@ -456,6 +447,7 @@ struct Scsi_Device_Template void (*finish)(void); /* Perform initialization after attachment */ int (*attach)(Scsi_Device *); /* Attach devices to arrays */ void (*detach)(Scsi_Device *); + int (*init_command)(Scsi_Cmnd *); /* Used by new queueing code. */ }; extern struct Scsi_Device_Template sd_template; diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c index 1deec84e4ed2..393cf909e409 100644 --- a/drivers/scsi/ibmmca.c +++ b/drivers/scsi/ibmmca.c @@ -747,7 +747,7 @@ static void internal_done (Scsi_Cmnd * cmd) static int device_inquiry(int host_index, int ldn) { int retries; - Scsi_Cmnd cmd; + Scsi_Cmnd *cmd; struct im_scb *scb; struct im_tsb *tsb; unsigned char *buf; @@ -757,12 +757,18 @@ static int device_inquiry(int host_index, int ldn) buf = (unsigned char *)(&(ld(host_index)[ldn].buf)); ld(host_index)[ldn].tsb.dev_status = 0; /* prepare stusblock */ + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL|GFP_DMA); + if(cmd==NULL) + { + printk(KERN_ERR "ibmmca: out of memory for inquiry.\n"); + return 0; + } if (bypass_controller) { /* fill the commonly known field for device-inquiry SCSI cmnd */ - cmd.cmd_len = 6; - memset (&(cmd.cmnd), 0x0, sizeof(char) * cmd.cmd_len); - cmd.cmnd[0] = INQUIRY; /* device inquiry */ - cmd.cmnd[4] = 0xff; /* return buffer size = 255 */ + cmd->cmd_len = 6; + memset (&(cmd->cmnd), 0x0, sizeof(char) * cmd->cmd_len); + cmd->cmnd[0] = INQUIRY; /* device inquiry */ + cmd->cmnd[4] = 0xff; /* return buffer size = 255 */ } for (retries = 0; retries < 3; retries++) { @@ -770,8 +776,8 @@ static int device_inquiry(int host_index, int ldn) { /* bypass the hardware integrated command set */ scb->command = IM_OTHER_SCSI_CMD_CMD; scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT; - scb->u1.scsi_cmd_length = cmd.cmd_len; - memcpy (scb->u2.scsi_command, &(cmd.cmnd), cmd.cmd_len); + scb->u1.scsi_cmd_length = cmd->cmd_len; + memcpy (scb->u2.scsi_command, &(cmd->cmnd), cmd->cmd_len); last_scsi_command(host_index)[ldn] = INQUIRY; last_scsi_type(host_index)[ldn] = IM_SCB; } @@ -800,6 +806,7 @@ static int device_inquiry(int host_index, int ldn) return 1; } } + kfree(cmd); /*if all three retries failed, return "no device at this ldn" */ if (retries >= 3) @@ -1406,7 +1413,7 @@ static int ibmmca_getinfo (char *buf, int slot, void *dev) } else if (special == INTEGRATED_SCSI) { /* if the integrated subsystem has been found automatically: */ - len += sprintf (buf + len, "Adapter cathegory: integrated\n"); + len += sprintf (buf + len, "Adapter category: integrated\n"); len += sprintf (buf + len, "Chip revision level: %d\n", ((pos2 & 0xf0) >> 4)); len += sprintf (buf + len, "Chip status: %s\n", @@ -1417,7 +1424,7 @@ static int ibmmca_getinfo (char *buf, int slot, void *dev) else if ((special>=0)&& (special<(sizeof(subsys_list)/sizeof(struct subsys_list_struct)))) { /* if the subsystem is a slot adapter */ - len += sprintf (buf + len, "Adapter cathegory: slot-card\n"); + len += sprintf (buf + len, "Adapter category: slot-card\n"); len += sprintf (buf + len, "Chip revision level: %d\n", ((pos2 & 0xf0) >> 4)); len += sprintf (buf + len, "Chip status: %s\n", @@ -1427,14 +1434,14 @@ static int ibmmca_getinfo (char *buf, int slot, void *dev) } else { - len += sprintf (buf + len, "Adapter cathegory: unknown\n"); + len += sprintf (buf + len, "Adapter category: unknown\n"); } /* common subsystem information to write to the slotn file */ len += sprintf (buf + len, "Subsystem PUN: %d\n", shpnt->this_id); len += sprintf (buf + len, "I/O base address range: 0x%x-0x%x", (unsigned int)(shpnt->io_port), (unsigned int)(shpnt->io_port+7)); - /* Now make sure, the bufferlength is devideable by 4 to avoid + /* Now make sure, the bufferlength is divisible by 4 to avoid * paging problems of the buffer. */ while ( len % sizeof( int ) != ( sizeof ( int ) - 1 ) ) { @@ -1483,7 +1490,7 @@ int ibmmca_detect (Scsi_Host_Template * scsi_template) ((struct ibmmca_hostdata *)shpnt->hostdata)->_pos3 = 0; ((struct ibmmca_hostdata *)shpnt->hostdata)->_special = FORCED_DETECTION; - mca_set_adapter_name(MCA_INTEGSCSI, "forced detected SCSI Adapter"); + mca_set_adapter_name(MCA_INTEGSCSI, "forcibly detected SCSI Adapter"); mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt); mca_mark_as_used(MCA_INTEGSCSI); diff --git a/drivers/scsi/inia100.c b/drivers/scsi/inia100.c index decbb8af72dc..7dfc22fc1fe4 100644 --- a/drivers/scsi/inia100.c +++ b/drivers/scsi/inia100.c @@ -351,6 +351,7 @@ int inia100_detect(Scsi_Host_Template * tpnt) pHCB->pSRB_head = NULL; /* Initial SRB save queue */ pHCB->pSRB_tail = NULL; /* Initial SRB save queue */ pHCB->pSRB_lock = SPIN_LOCK_UNLOCKED; /* SRB save queue lock */ + pHCB->BitAllocFlagLock = SPIN_LOCK_UNLOCKED; /* Get total memory needed for SCB */ sz = orc_num_scb * sizeof(ORC_SCB); if ((pHCB->HCS_virScbArray = (PVOID) kmalloc(sz, GFP_ATOMIC | GFP_DMA)) == NULL) { diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 7c0b9e6b9651..f9fa60742452 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -396,7 +396,7 @@ ips_detect(Scsi_Host_Template *SHT) { sh->cmd_per_lun = sh->hostt->cmd_per_lun; sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; sh->use_clustering = sh->hostt->use_clustering; - sh->wish_block = FALSE; +//FIXME// sh->wish_block = FALSE; /* Store info in HA structure */ ha->io_addr = io_addr; diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index e5ca5c740fc1..209d88091715 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -287,9 +287,9 @@ static mega_scb *pLastScb = NULL; static Scsi_Cmnd *qCompleted = NULL; #if SERDEBUG -volatile static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED; +static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED; #endif -volatile static spinlock_t mega_lock = SPIN_LOCK_UNLOCKED; +static spinlock_t mega_lock = SPIN_LOCK_UNLOCKED; #if SERDEBUG static char strbuf[MAX_SERBUF + 1]; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index e556c07233c0..e70b2d75e5e6 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -132,7 +132,8 @@ typedef unsigned int FreeSectorBitmap; unsigned long scsi_pid = 0; Scsi_Cmnd *last_cmnd = NULL; /* Command groups 3 and 4 are reserved and should never be used. */ -const unsigned char scsi_command_size[8] = { +const unsigned char scsi_command_size[8] = +{ 6, 10, 10, 12, 12, 12, 10, 10 }; @@ -278,9 +279,9 @@ static struct dev_info device_list[] = {"REGAL", "CDC-4X", "*", BLIST_MAX5LUN | BLIST_SINGLELUN}, {"NAKAMICH", "MJ-4.8S", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, {"NAKAMICH", "MJ-5.16S", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, - {"PIONEER", "CD-ROM DRM-600", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, - {"PIONEER", "CD-ROM DRM-602X", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, - {"PIONEER", "CD-ROM DRM-604X", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, + {"PIONEER", "CD-ROM DRM-600", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, + {"PIONEER", "CD-ROM DRM-602X", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, + {"PIONEER", "CD-ROM DRM-604X", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, {"EMULEX", "MD21/S2 ESDI", "*", BLIST_SINGLELUN}, {"CANON", "IPUBJD", "*", BLIST_SPARSELUN}, {"nCipher", "Fastness Crypto", "*", BLIST_FORCELUN}, @@ -322,87 +323,6 @@ static int get_device_flags(unsigned char *response_data) return 0; } -/* - * Function: scsi_make_blocked_list - * - * Purpose: Build linked list of hosts that require blocking. - * - * Arguments: None. - * - * Returns: Nothing - * - * Notes: Blocking is sort of a hack that is used to prevent more than one - * host adapter from being active at one time. This is used in cases - * where the ISA bus becomes unreliable if you have more than one - * host adapter really pumping data through. - * - * We spent a lot of time examining the problem, and I *believe* that - * the problem is bus related as opposed to being a driver bug. - * - * The blocked list is used as part of the synchronization object - * that we use to ensure that only one host is active at one time. - * I (ERY) would like to make this go away someday, but this would - * require that we have a recursive mutex object. - */ - -void scsi_make_blocked_list(void) -{ - int block_count = 0, index; - struct Scsi_Host *sh[128], *shpnt; - - /* - * Create a circular linked list from the scsi hosts which have - * the "wish_block" field in the Scsi_Host structure set. - * The blocked list should include all the scsi hosts using ISA DMA. - * In some systems, using two dma channels simultaneously causes - * unpredictable results. - * Among the scsi hosts in the blocked list, only one host at a time - * is allowed to have active commands queued. The transition from - * one active host to the next one is allowed only when host_busy == 0 - * for the active host (which implies host_busy == 0 for all the hosts - * in the list). Moreover for block devices the transition to a new - * active host is allowed only when a request is completed, since a - * block device request can be divided into multiple scsi commands - * (when there are few sg lists or clustering is disabled). - * - * (DB, 4 Feb 1995) - */ - - - host_active = NULL; - - for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) { - -#if 0 - /* - * Is this is a candidate for the blocked list? - * Useful to put into the blocked list all the hosts whose driver - * does not know about the host->block feature. - */ - if (shpnt->unchecked_isa_dma) - shpnt->wish_block = 1; -#endif - - if (shpnt->wish_block) - sh[block_count++] = shpnt; - } - - if (block_count == 1) - sh[0]->block = NULL; - - else if (block_count > 1) { - - for (index = 0; index < block_count - 1; index++) { - sh[index]->block = sh[index + 1]; - printk("scsi%d : added to blocked host list.\n", - sh[index]->host_no); - } - - sh[block_count - 1]->block = sh[0]; - printk("scsi%d : added to blocked host list.\n", - sh[index]->host_no); - } -} static void scan_scsis_done(Scsi_Cmnd * SCpnt) { @@ -414,10 +334,11 @@ static void scan_scsis_done(Scsi_Cmnd * SCpnt) up(SCpnt->request.sem); } +#ifdef MODULE MODULE_PARM(scsi_logging_level, "i"); MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero"); -#ifndef MODULE +#else static int __init scsi_logging_setup(char *str) { @@ -443,10 +364,12 @@ static int max_scsi_luns = 8; static int max_scsi_luns = 1; #endif +#ifdef MODULE + MODULE_PARM(max_scsi_luns, "i"); MODULE_PARM_DESC(max_scsi_luns, "last scsi LUN (should be between 1 and 8)"); -#ifndef MODULE +#else static int __init scsi_luns_setup(char *str) { @@ -474,15 +397,12 @@ void scsi_wait_cmd (Scsi_Cmnd * SCpnt, const void *cmnd , void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *), int timeout, int retries) { - unsigned long flags; DECLARE_MUTEX_LOCKED(sem); SCpnt->request.sem = &sem; SCpnt->request.rq_status = RQ_SCSI_BUSY; - spin_lock_irqsave(&io_request_lock, flags); scsi_do_cmd (SCpnt, (void *) cmnd, buffer, bufflen, done, timeout, retries); - spin_unlock_irqrestore(&io_request_lock, flags); down (&sem); SCpnt->request.sem = NULL; } @@ -519,6 +439,16 @@ static void scan_scsis(struct Scsi_Host *shpnt, SDpnt = (Scsi_Device *) scsi_init_malloc(sizeof(Scsi_Device), GFP_ATOMIC); if (SDpnt) { + /* + * Register the queue for the device. All I/O requests will come + * in through here. We also need to register a pointer to + * ourselves, since the queue handler won't know what device + * the queue actually represents. We could look it up, but it + * is pointless work. + */ + blk_init_queue(&SDpnt->request_queue, scsi_request_fn); + blk_queue_headactive(&SDpnt->request_queue, 0); + SDpnt->request_queue.queuedata = (void *) SDpnt; /* Make sure we have something that is valid for DMA purposes */ scsi_result = ((!shpnt->unchecked_isa_dma) ? &scsi_result0[0] : scsi_init_malloc(512, GFP_DMA)); @@ -536,6 +466,8 @@ static void scan_scsis(struct Scsi_Host *shpnt, SDpnt->host = shpnt; SDpnt->online = TRUE; + initialize_merge_fn(SDpnt); + init_waitqueue_head(&SDpnt->device_wait); /* @@ -581,7 +513,6 @@ static void scan_scsis(struct Scsi_Host *shpnt, if (sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init) (); - oldSDpnt->scsi_request_fn = NULL; for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) { if (sdtpnt->attach) { (*sdtpnt->attach) (oldSDpnt); @@ -727,6 +658,7 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun, SDpnt->borken = 1; SDpnt->was_reset = 0; SDpnt->expecting_cc_ua = 0; + SDpnt->starved = 0; scsi_cmd[0] = TEST_UNIT_READY; scsi_cmd[1] = lun << 5; @@ -947,6 +879,19 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun, printk("scsi: scan_scsis_single: Cannot malloc\n"); return 0; } + /* + * Register the queue for the device. All I/O requests will come + * in through here. We also need to register a pointer to + * ourselves, since the queue handler won't know what device + * the queue actually represents. We could look it up, but it + * is pointless work. + */ + blk_init_queue(&SDpnt->request_queue, scsi_request_fn); + blk_queue_headactive(&SDpnt->request_queue, 0); + SDpnt->request_queue.queuedata = (void *) SDpnt; + SDpnt->host = shpnt; + initialize_merge_fn(SDpnt); + /* * And hook up our command block to the new device we will be testing * for. @@ -1044,127 +989,6 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun, * of the calling code to ensure that this is the case. */ -Scsi_Cmnd *scsi_request_queueable(struct request * req, Scsi_Device * device) -{ - Scsi_Cmnd *SCpnt = NULL; - int tablesize; - Scsi_Cmnd *found = NULL; - struct buffer_head *bh, *bhp; - - if (!device) - panic("No device passed to scsi_request_queueable().\n"); - - if (req && req->rq_status == RQ_INACTIVE) - panic("Inactive in scsi_request_queueable"); - - /* - * Look for a free command block. If we have been instructed not to queue - * multiple commands to multi-lun devices, then check to see what else is - * going for this device first. - */ - - if (!device->single_lun) { - SCpnt = device->device_queue; - while (SCpnt) { - if (SCpnt->request.rq_status == RQ_INACTIVE) - break; - SCpnt = SCpnt->next; - } - } else { - SCpnt = device->device_queue; - while (SCpnt) { - if (SCpnt->channel == device->channel - && SCpnt->target == device->id) { - if (SCpnt->lun == device->lun) { - if (found == NULL - && SCpnt->request.rq_status == RQ_INACTIVE) { - found = SCpnt; - } - } - if (SCpnt->request.rq_status != RQ_INACTIVE) { - /* - * I think that we should really limit things to one - * outstanding command per device - this is what tends - * to trip up buggy firmware. - */ - return NULL; - } - } - SCpnt = SCpnt->next; - } - SCpnt = found; - } - - if (!SCpnt) - return NULL; - - if (SCSI_BLOCK(device, device->host)) - return NULL; - - if (req) { - memcpy(&SCpnt->request, req, sizeof(struct request)); - tablesize = device->host->sg_tablesize; - bhp = bh = req->bh; - if (!tablesize) - bh = NULL; - /* Take a quick look through the table to see how big it is. - * We already have our copy of req, so we can mess with that - * if we want to. - */ - while (req->nr_sectors && bh) { - bhp = bhp->b_reqnext; - if (!bhp || !CONTIGUOUS_BUFFERS(bh, bhp)) - tablesize--; - req->nr_sectors -= bh->b_size >> 9; - req->sector += bh->b_size >> 9; - if (!tablesize) - break; - bh = bhp; - } - if (req->nr_sectors && bh && bh->b_reqnext) { /* Any leftovers? */ - SCpnt->request.bhtail = bh; - req->bh = bh->b_reqnext; /* Divide request */ - bh->b_reqnext = NULL; - bh = req->bh; - - /* Now reset things so that req looks OK */ - SCpnt->request.nr_sectors -= req->nr_sectors; - req->current_nr_sectors = bh->b_size >> 9; - req->buffer = bh->b_data; - SCpnt->request.sem = NULL; /* Wait until whole thing done */ - } else { - req->rq_status = RQ_INACTIVE; - wake_up(&wait_for_request); - } - } else { - SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Busy, but no request */ - SCpnt->request.sem = NULL; /* And no one is waiting for the device - * either */ - } - - atomic_inc(&SCpnt->host->host_active); - SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n", SCpnt->target, - atomic_read(&SCpnt->host->host_active))); - SCpnt->use_sg = 0; /* Reset the scatter-gather flag */ - SCpnt->old_use_sg = 0; - SCpnt->transfersize = 0; - SCpnt->resid = 0; - SCpnt->underflow = 0; - SCpnt->cmd_len = 0; - - /* - * Since not everyone seems to set the device info correctly - * before Scsi_Cmnd gets send out to scsi_do_command, we do it here. - */ - - SCpnt->channel = device->channel; - SCpnt->lun = device->lun; - SCpnt->target = device->id; - SCpnt->state = SCSI_STATE_INITIALIZING; - SCpnt->owner = SCSI_OWNER_HIGHLEVEL; - - return SCpnt; -} /* This function returns a structure pointer that will be valid for * the device. The wait parameter tells us whether we should wait for @@ -1176,168 +1000,158 @@ Scsi_Cmnd *scsi_request_queueable(struct request * req, Scsi_Device * device) * of the packets for each device */ -Scsi_Cmnd *scsi_allocate_device(struct request ** reqp, Scsi_Device * device, - int wait) -{ - kdev_t dev; - struct request *req = NULL; - int tablesize; - struct buffer_head *bh, *bhp; - struct Scsi_Host *host; - Scsi_Cmnd *SCpnt = NULL; - Scsi_Cmnd *SCwait = NULL; - Scsi_Cmnd *found = NULL; - - if (!device) - panic("No device passed to scsi_allocate_device().\n"); - - if (reqp) - req = *reqp; +/* + * This lock protects the freelist for all devices on the system. + * We could make this finer grained by having a single lock per + * device if it is ever found that there is excessive contention + * on this lock. + */ +static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED; - /* - * See if this request has already been queued by an - * interrupt routine - */ - - if (req) { - if (req->rq_status == RQ_INACTIVE) - return NULL; - dev = req->rq_dev; - } else - dev = 0; /* unused */ +/* + * Used for access to internal allocator used for DMA safe buffers. + */ +static spinlock_t allocator_request_lock = SPIN_LOCK_UNLOCKED; - host = device->host; +/* + * Used to protect insertion into and removal from the queue of + * commands to be processed by the bottom half handler. + */ +static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED; - if (in_interrupt() && SCSI_BLOCK(device, host)) - return NULL; +/* + * Function: scsi_allocate_device + * + * Purpose: Allocate a command descriptor. + * + * Arguments: device - device for which we want a command descriptor + * wait - 1 if we should wait in the event that none + * are available. + * + * Lock status: No locks assumed to be held. This function is SMP-safe. + * + * Returns: Pointer to command descriptor. + * + * Notes: Prior to the new queue code, this function was not SMP-safe. + */ +Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait) +{ + struct Scsi_Host *host; + Scsi_Cmnd *SCpnt = NULL; + Scsi_Device *SDpnt; + unsigned long flags; + + if (!device) + panic("No device passed to scsi_allocate_device().\n"); + + host = device->host; + + spin_lock_irqsave(&device_request_lock, flags); + while (1 == 1) { - if (!device->single_lun) { - SCpnt = device->device_queue; - while (SCpnt) { - SCwait = SCpnt; - if (SCpnt->request.rq_status == RQ_INACTIVE) - break; - SCpnt = SCpnt->next; - } - } else { - SCpnt = device->device_queue; - while (SCpnt) { - if (SCpnt->channel == device->channel - && SCpnt->target == device->id) { - if (SCpnt->lun == device->lun) { - SCwait = SCpnt; - if (found == NULL - && SCpnt->request.rq_status == RQ_INACTIVE) { - found = SCpnt; + SCpnt = NULL; + if (!device->device_blocked) { + if (device->single_lun) { + /* + * FIXME(eric) - this is not at all optimal. Given that + * single lun devices are rare and usually slow + * (i.e. CD changers), this is good enough for now, but + * we may want to come back and optimize this later. + * + * Scan through all of the devices attached to this + * host, and see if any are active or not. If so, + * we need to defer this command. + * + * We really need a busy counter per device. This would + * allow us to more easily figure out whether we should + * do anything here or not. + */ + for (SDpnt = host->host_queue; + SDpnt; + SDpnt = SDpnt->next) { + /* + * Only look for other devices on the same bus + * with the same target ID. + */ + if (SDpnt->channel != device->channel + || SDpnt->id != device->id + || SDpnt == device) { + continue; + } + for (SCpnt = SDpnt->device_queue; + SCpnt; + SCpnt = SCpnt->next) { + if (SCpnt->request.rq_status != RQ_INACTIVE) { + break; } } - if (SCpnt->request.rq_status != RQ_INACTIVE) { - /* - * I think that we should really limit things to one - * outstanding command per device - this is what tends - * to trip up buggy firmware. - */ - found = NULL; + if (SCpnt) { break; } } - SCpnt = SCpnt->next; + if (SDpnt) { + /* + * Some other device in this cluster is busy. + * If asked to wait, we need to wait, otherwise + * return NULL. + */ + SCpnt = NULL; + break; + } + } + /* + * Now we can check for a free command block for this device. + */ + for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) { + if (SCpnt->request.rq_status == RQ_INACTIVE) + break; } - SCpnt = found; } - - /* See if this request has already been queued by an interrupt routine + /* + * If we couldn't find a free command block, and we have been + * asked to wait, then do so. */ - if (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) { - return NULL; + if (SCpnt) { + break; } - if (!SCpnt || SCpnt->request.rq_status != RQ_INACTIVE) { /* Might have changed */ - if (wait && SCwait && SCwait->request.rq_status != RQ_INACTIVE) { - DECLARE_WAITQUEUE(wait,current); - add_wait_queue(&device->device_wait,&wait); - current->state=TASK_UNINTERRUPTIBLE; - spin_unlock(&io_request_lock); - schedule(); - current->state=TASK_RUNNING; - remove_wait_queue(&device->device_wait,&wait); - spin_lock_irq(&io_request_lock); - } else { - if (!wait) - return NULL; - if (!SCwait) { - printk("Attempt to allocate device channel %d," - " target %d, lun %d\n", device->channel, - device->id, device->lun); - panic("No device found in scsi_allocate_device\n"); - } - } + /* + * If we have been asked to wait for a free block, then + * wait here. + */ + spin_unlock_irqrestore(&device_request_lock, flags); + if (wait) { + /* + * This should block until a device command block + * becomes available. + */ + sleep_on(&device->device_wait); + spin_lock_irqsave(&device_request_lock, flags); } else { - if (req) { - memcpy(&SCpnt->request, req, sizeof(struct request)); - tablesize = device->host->sg_tablesize; - bhp = bh = req->bh; - if (!tablesize) - bh = NULL; - /* Take a quick look through the table to see how big it is. - * We already have our copy of req, so we can mess with that - * if we want to. - */ - while (req->nr_sectors && bh) { - bhp = bhp->b_reqnext; - if (!bhp || !CONTIGUOUS_BUFFERS(bh, bhp)) - tablesize--; - req->nr_sectors -= bh->b_size >> 9; - req->sector += bh->b_size >> 9; - if (!tablesize) - break; - bh = bhp; - } - if (req->nr_sectors && bh && bh->b_reqnext) { /* Any leftovers? */ - SCpnt->request.bhtail = bh; - req->bh = bh->b_reqnext; /* Divide request */ - bh->b_reqnext = NULL; - bh = req->bh; - /* Now reset things so that req looks OK */ - SCpnt->request.nr_sectors -= req->nr_sectors; - req->current_nr_sectors = bh->b_size >> 9; - req->buffer = bh->b_data; - SCpnt->request.sem = NULL; /* Wait until whole thing done */ - } else { - req->rq_status = RQ_INACTIVE; - *reqp = req->next; - wake_up(&wait_for_request); - } - } else { - SCpnt->request.rq_status = RQ_SCSI_BUSY; - SCpnt->request.sem = NULL; /* And no one is waiting for this - * to complete */ - } - atomic_inc(&SCpnt->host->host_active); - SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n", - SCpnt->target, - atomic_read(&SCpnt->host->host_active))); - break; + return NULL; } } + SCpnt->request.rq_status = RQ_SCSI_BUSY; + SCpnt->request.sem = NULL; /* And no one is waiting for this + * to complete */ + atomic_inc(&SCpnt->host->host_active); + SCpnt->use_sg = 0; /* Reset the scatter-gather flag */ SCpnt->old_use_sg = 0; SCpnt->transfersize = 0; /* No default transfer size */ SCpnt->cmd_len = 0; - SCpnt->resid = 0; - SCpnt->underflow = 0; /* Do not flag underflow conditions */ - /* Since not everyone seems to set the device info correctly - * before Scsi_Cmnd gets send out to scsi_do_command, we do it here. - * FIXME(eric) This doesn't make any sense. - */ - SCpnt->channel = device->channel; - SCpnt->lun = device->lun; - SCpnt->target = device->id; + SCpnt->underflow = 0; /* Do not flag underflow conditions */ SCpnt->state = SCSI_STATE_INITIALIZING; SCpnt->owner = SCSI_OWNER_HIGHLEVEL; + spin_unlock_irqrestore(&device_request_lock, flags); + + SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n", + SCpnt->target, + atomic_read(&SCpnt->host->host_active))); + return SCpnt; } @@ -1354,6 +1168,9 @@ Scsi_Cmnd *scsi_allocate_device(struct request ** reqp, Scsi_Device * device, */ void scsi_release_command(Scsi_Cmnd * SCpnt) { + unsigned long flags; + spin_lock_irqsave(&device_request_lock, flags); + SCpnt->request.rq_status = RQ_INACTIVE; SCpnt->state = SCSI_STATE_UNUSED; SCpnt->owner = SCSI_OWNER_NOBODY; @@ -1379,21 +1196,25 @@ void scsi_release_command(Scsi_Cmnd * SCpnt) atomic_read(&SCpnt->host->eh_wait->count))); up(SCpnt->host->eh_wait); } + spin_unlock_irqrestore(&device_request_lock, flags); } /* * This is inline because we have stack problemes if we recurse to deeply. */ -inline int internal_cmnd(Scsi_Cmnd * SCpnt) +int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt) { #ifdef DEBUG_DELAY unsigned long clock; #endif struct Scsi_Host *host; int rtn = 0; + unsigned long flags; unsigned long timeout; + ASSERT_LOCK(&io_request_lock, 0); + #if DEBUG unsigned long *ret = 0; #ifdef __mips__ @@ -1427,11 +1248,9 @@ inline int internal_cmnd(Scsi_Cmnd * SCpnt) * interrupt handler (assuming there is one irq-level per * host). */ - spin_unlock_irq(&io_request_lock); while (--ticks_remaining >= 0) mdelay(1 + 999 / HZ); host->resetting = 0; - spin_lock_irq(&io_request_lock); } if (host->hostt->use_new_eh_code) { scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out); @@ -1444,7 +1263,7 @@ inline int internal_cmnd(Scsi_Cmnd * SCpnt) * We will use a queued command if possible, otherwise we will emulate the * queuing and calling of completion function ourselves. */ - SCSI_LOG_MLQUEUE(3, printk("internal_cmnd (host = %d, channel = %d, target = %d, " + SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, " "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n", SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd, SCpnt->buffer, SCpnt->bufflen, SCpnt->done)); @@ -1460,35 +1279,42 @@ inline int internal_cmnd(Scsi_Cmnd * SCpnt) * passes a meaningful return value. */ if (host->hostt->use_new_eh_code) { + spin_lock_irqsave(&io_request_lock, flags); rtn = host->hostt->queuecommand(SCpnt, scsi_done); + spin_unlock_irqrestore(&io_request_lock, flags); if (rtn != 0) { + scsi_delete_timer(SCpnt); scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY); } } else { + spin_lock_irqsave(&io_request_lock, flags); host->hostt->queuecommand(SCpnt, scsi_old_done); + spin_unlock_irqrestore(&io_request_lock, flags); } } else { int temp; SCSI_LOG_MLQUEUE(3, printk("command() : routine at %p\n", host->hostt->command)); + spin_lock_irqsave(&io_request_lock, flags); temp = host->hostt->command(SCpnt); SCpnt->result = temp; #ifdef DEBUG_DELAY + spin_unlock_irqrestore(&io_request_lock, flags); clock = jiffies + 4 * HZ; - spin_unlock_irq(&io_request_lock); while (time_before(jiffies, clock)) barrier(); - spin_lock_irq(&io_request_lock); printk("done(host = %d, result = %04x) : routine at %p\n", host->host_no, temp, host->hostt->command); + spin_lock_irqsave(&io_request_lock, flags); #endif if (host->hostt->use_new_eh_code) { scsi_done(SCpnt); } else { scsi_old_done(SCpnt); } + spin_unlock_irqrestore(&io_request_lock, flags); } - SCSI_LOG_MLQUEUE(3, printk("leaving internal_cmnd()\n")); + SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n")); return rtn; } @@ -1499,6 +1325,32 @@ inline int internal_cmnd(Scsi_Cmnd * SCpnt) * drivers go for the same host at the same time. */ +/* + * Function: scsi_do_cmd + * + * Purpose: Queue a SCSI command + * + * Arguments: SCpnt - command descriptor. + * cmnd - actual SCSI command to be performed. + * buffer - data buffer. + * bufflen - size of data buffer. + * done - completion function to be run. + * timeout - how long to let it run before timeout. + * retries - number of retries we allow. + * + * Lock status: With the new queueing code, this is SMP-safe, and no locks + * need be held upon entry. The old queueing code the lock was + * assumed to be held upon entry. + * + * Returns: Pointer to command descriptor. + * + * Notes: Prior to the new queue code, this function was not SMP-safe. + * Also, this function is now only used for queueing requests + * for things like ioctls and character device requests - this + * is because we essentially just inject a request into the + * queue for the device. Normal block device handling manipulates + * the queue directly. + */ void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd, void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *), int timeout, int retries) @@ -1506,6 +1358,8 @@ void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd, struct Scsi_Host *host = SCpnt->host; Scsi_Device *device = SCpnt->device; + ASSERT_LOCK(&io_request_lock, 0); + SCpnt->owner = SCSI_OWNER_MIDLEVEL; SCSI_LOG_MLQUEUE(4, @@ -1533,16 +1387,6 @@ void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd, * ourselves. */ - SCpnt->pid = scsi_pid++; - - while (SCSI_BLOCK((Scsi_Device *) NULL, host)) { - spin_unlock(&io_request_lock); /* FIXME!!! */ - SCSI_SLEEP(&host->host_wait, SCSI_BLOCK((Scsi_Device *) NULL, host)); - spin_lock_irq(&io_request_lock); /* FIXME!!! */ - } - - if (host->block) - host_active = host; host->host_busy++; device->device_busy++; @@ -1583,39 +1427,61 @@ void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd, SCpnt->internal_timeout = NORMAL_TIMEOUT; SCpnt->abort_reason = 0; SCpnt->result = 0; - internal_cmnd(SCpnt); + + /* + * At this point, we merely set up the command, stick it in the normal + * request queue, and return. Eventually that request will come to the + * top of the list, and will be dispatched. + */ + scsi_insert_special_cmd(SCpnt, 0); SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n")); } -/* This function is the mid-level interrupt routine, which decides how +/* + * This function is the mid-level interrupt routine, which decides how * to handle error conditions. Each invocation of this function must * do one and *only* one of the following: * * 1) Insert command in BH queue. * 2) Activate error handler for host. * - * FIXME(eric) - I am concerned about stack overflow (still). An interrupt could - * come while we are processing the bottom queue, which would cause another command - * to be stuffed onto the bottom queue, and it would in turn be processed as that - * interrupt handler is returning. Given a sufficiently steady rate of returning - * commands, this could cause the stack to overflow. I am not sure what is the most - * appropriate solution here - we should probably keep a depth count, and not process - * any commands while we still have a bottom handler active higher in the stack. + * FIXME(eric) - I am concerned about stack overflow (still). An + * interrupt could come while we are processing the bottom queue, + * which would cause another command to be stuffed onto the bottom + * queue, and it would in turn be processed as that interrupt handler + * is returning. Given a sufficiently steady rate of returning + * commands, this could cause the stack to overflow. I am not sure + * what is the most appropriate solution here - we should probably + * keep a depth count, and not process any commands while we still + * have a bottom handler active higher in the stack. * - * There is currently code in the bottom half handler to monitor recursion in the bottom - * handler and report if it ever happens. If this becomes a problem, it won't be hard to - * engineer something to deal with it so that only the outer layer ever does any real - * processing. + * There is currently code in the bottom half handler to monitor + * recursion in the bottom handler and report if it ever happens. If + * this becomes a problem, it won't be hard to engineer something to + * deal with it so that only the outer layer ever does any real + * processing. */ void scsi_done(Scsi_Cmnd * SCpnt) { + unsigned long flags; + int tstatus; /* * We don't have to worry about this one timing out any more. */ - scsi_delete_timer(SCpnt); + tstatus = scsi_delete_timer(SCpnt); + /* + * If we are unable to remove the timer, it means that the command + * has already timed out. In this case, we have no choice but to + * let the timeout function run, as we have no idea where in fact + * that function could really be. It might be on another processor, + * etc, etc. + */ + if (!tstatus) { + return; + } /* Set the serial numbers back to zero */ SCpnt->serial_number = 0; @@ -1631,6 +1497,8 @@ void scsi_done(Scsi_Cmnd * SCpnt) SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt)); return; } + spin_lock_irqsave(&scsi_bhqueue_lock, flags); + SCpnt->serial_number_at_timeout = 0; SCpnt->state = SCSI_STATE_BHQUEUE; SCpnt->owner = SCSI_OWNER_BH_HANDLER; @@ -1646,6 +1514,10 @@ void scsi_done(Scsi_Cmnd * SCpnt) * We already have the io_request_lock here, since we are called from the * interrupt handler or the error handler. (DB) * + * This may be true at the moment, but I would like to wean all of the low + * level drivers away from using io_request_lock. Technically they should + * all use their own locking. I am adding a small spinlock to protect + * this datastructure to make it safe for that day. (ERY) */ if (!scsi_bh_queue_head) { scsi_bh_queue_head = SCpnt; @@ -1655,6 +1527,7 @@ void scsi_done(Scsi_Cmnd * SCpnt) scsi_bh_queue_tail = SCpnt; } + spin_unlock_irqrestore(&scsi_bhqueue_lock, flags); /* * Mark the bottom half handler to be run. */ @@ -1676,6 +1549,13 @@ void scsi_done(Scsi_Cmnd * SCpnt) * race condition when scsi_done is called after a command has already * timed out but before the time out is processed by the error handler. * (DB) + * + * I believe I have corrected this. We simply monitor the return status of + * del_timer() - if this comes back as 0, it means that the timer has fired + * and that a timeout is in progress. I have modified scsi_done() such + * that in this instance the command is never inserted in the bottom + * half queue. Thus the only time we hold the lock here is when + * we wish to atomically remove the contents of the queue. */ void scsi_bottom_half_handler(void) { @@ -1683,14 +1563,14 @@ void scsi_bottom_half_handler(void) Scsi_Cmnd *SCnext; unsigned long flags; - spin_lock_irqsave(&io_request_lock, flags); while (1 == 1) { + spin_lock_irqsave(&scsi_bhqueue_lock, flags); SCpnt = scsi_bh_queue_head; scsi_bh_queue_head = NULL; + spin_unlock_irqrestore(&scsi_bhqueue_lock, flags); if (SCpnt == NULL) { - spin_unlock_irqrestore(&io_request_lock, flags); return; } SCnext = SCpnt->bh_next; @@ -1774,8 +1654,6 @@ void scsi_bottom_half_handler(void) } /* while(1==1) */ - spin_unlock_irqrestore(&io_request_lock, flags); - } /* @@ -1796,9 +1674,7 @@ int scsi_retry_command(Scsi_Cmnd * SCpnt) SCpnt->request_bufflen = SCpnt->bufflen; SCpnt->use_sg = SCpnt->old_use_sg; SCpnt->cmd_len = SCpnt->old_cmd_len; - SCpnt->result = 0; - memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer); - return internal_cmnd(SCpnt); + return scsi_dispatch_cmd(SCpnt); } /* @@ -1813,32 +1689,14 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt) struct Scsi_Host *host; Scsi_Device *device; + ASSERT_LOCK(&io_request_lock, 0); + host = SCpnt->host; device = SCpnt->device; host->host_busy--; /* Indicate that we are free */ device->device_busy--; /* Decrement device usage counter. */ - if (host->block && host->host_busy == 0) { - host_active = NULL; - - /* For block devices "wake_up" is done in end_scsi_request */ - if (!SCSI_BLK_MAJOR(MAJOR(SCpnt->request.rq_dev))) { - struct Scsi_Host *next; - - for (next = host->block; next != host; next = next->block) - wake_up(&next->host_wait); - } - } - /* - * Now try and drain the mid-level queue if any commands have been - * inserted. Check to see whether the queue even has anything in - * it first, as otherwise this is useless overhead. - */ - if (SCpnt->host->pending_commands != NULL) { - scsi_mlqueue_finish(SCpnt->host, SCpnt->device); - } - wake_up(&host->host_wait); /* * If we have valid sense information, then some kind of recovery @@ -1864,9 +1722,34 @@ static int scsi_register_host(Scsi_Host_Template *); static void scsi_unregister_host(Scsi_Host_Template *); #endif +/* + * Function: scsi_malloc + * + * Purpose: Allocate memory from the DMA-safe pool. + * + * Arguments: len - amount of memory we need. + * + * Lock status: No locks assumed to be held. This function is SMP-safe. + * + * Returns: Pointer to memory block. + * + * Notes: Prior to the new queue code, this function was not SMP-safe. + * This function can only allocate in units of sectors + * (i.e. 512 bytes). + * + * We cannot use the normal system allocator becuase we need + * to be able to guarantee that we can process a complete disk + * I/O request without touching the system allocator. Think + * about it - if the system were heavily swapping, and tried to + * write out a block of memory to disk, and the SCSI code needed + * to allocate more memory in order to be able to write the + * data to disk, you would wedge the system. + */ void *scsi_malloc(unsigned int len) { unsigned int nbits, mask; + unsigned long flags; + int i, j; if (len % SECTOR_SIZE != 0 || len > PAGE_SIZE) return NULL; @@ -1874,6 +1757,8 @@ void *scsi_malloc(unsigned int len) nbits = len >> 9; mask = (1 << nbits) - 1; + spin_lock_irqsave(&allocator_request_lock, flags); + for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++) for (j = 0; j <= SECTORS_PER_PAGE - nbits; j++) { if ((dma_malloc_freelist[i] & (mask << j)) == 0) { @@ -1883,15 +1768,37 @@ void *scsi_malloc(unsigned int len) SCSI_LOG_MLQUEUE(3, printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9))); printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9)); #endif + spin_unlock_irqrestore(&allocator_request_lock, flags); return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9)); } } + spin_unlock_irqrestore(&allocator_request_lock, flags); return NULL; /* Nope. No more */ } +/* + * Function: scsi_free + * + * Purpose: Free memory into the DMA-safe pool. + * + * Arguments: ptr - data block we are freeing. + * len - size of block we are freeing. + * + * Lock status: No locks assumed to be held. This function is SMP-safe. + * + * Returns: Nothing + * + * Notes: This function *must* only be used to free memory + * allocated from scsi_malloc(). + * + * Prior to the new queue code, this function was not SMP-safe. + * This function can only allocate in units of sectors + * (i.e. 512 bytes). + */ int scsi_free(void *obj, unsigned int len) { unsigned int page, sector, nbits, mask; + unsigned long flags; #ifdef DEBUG unsigned long ret = 0; @@ -1905,6 +1812,8 @@ int scsi_free(void *obj, unsigned int len) SCSI_LOG_MLQUEUE(3, printk("SFree: %p %d\n", obj, len)); #endif + spin_lock_irqsave(&allocator_request_lock, flags); + for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) { unsigned long page_addr = (unsigned long) dma_malloc_pages[page]; if ((unsigned long) obj >= page_addr && @@ -1927,6 +1836,7 @@ int scsi_free(void *obj, unsigned int len) } scsi_dma_free_sectors += nbits; dma_malloc_freelist[page] &= ~(mask << sector); + spin_unlock_irqrestore(&allocator_request_lock, flags); return 0; } } @@ -1977,10 +1887,13 @@ void scsi_init_free(char *ptr, unsigned int size) void scsi_build_commandblocks(Scsi_Device * SDpnt) { + unsigned long flags; struct Scsi_Host *host = SDpnt->host; int j; Scsi_Cmnd *SCpnt; + spin_lock_irqsave(&device_request_lock, flags); + if (SDpnt->queue_depth == 0) SDpnt->queue_depth = host->cmd_per_lun; SDpnt->device_queue = NULL; @@ -2020,8 +1933,10 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt) SDpnt->queue_depth, j); SDpnt->queue_depth = j; SDpnt->has_cmdblocks = (0 != j); - } else + } else { SDpnt->has_cmdblocks = 1; + } + spin_unlock_irqrestore(&device_request_lock, flags); } static ssize_t proc_scsi_gen_write(struct file * file, const char * buf, @@ -2450,6 +2365,7 @@ static ssize_t proc_scsi_gen_write(struct file * file, const char * buf, if (HBA_ptr->host_queue == scd) { HBA_ptr->host_queue = scd->next; } + blk_cleanup_queue(&scd->request_queue); scsi_init_free((char *) scd, sizeof(Scsi_Device)); } else { goto out; @@ -2464,13 +2380,27 @@ out: #endif /* - * Go through the device list and recompute the most appropriate size - * for the dma pool. Then grab more memory (as required). + * Function: resize_dma_pool + * + * Purpose: Ensure that the DMA pool is sufficiently large to be + * able to guarantee that we can always process I/O requests + * without calling the system allocator. + * + * Arguments: None. + * + * Lock status: No locks assumed to be held. This function is SMP-safe. + * + * Returns: Nothing + * + * Notes: Prior to the new queue code, this function was not SMP-safe. + * Go through the device list and recompute the most appropriate + * size for the dma pool. Then grab more memory (as required). */ static void resize_dma_pool(void) { int i, k; unsigned long size; + unsigned long flags; struct Scsi_Host *shpnt; struct Scsi_Host *host = NULL; Scsi_Device *SDpnt; @@ -2480,6 +2410,8 @@ static void resize_dma_pool(void) unsigned char **new_dma_malloc_pages = NULL; int out_of_space = 0; + spin_lock_irqsave(&allocator_request_lock, flags); + if (!scsi_hostlist) { /* * Free up the DMA pool. @@ -2499,6 +2431,7 @@ static void resize_dma_pool(void) dma_malloc_freelist = NULL; dma_sectors = 0; scsi_dma_free_sectors = 0; + spin_unlock_irqrestore(&allocator_request_lock, flags); return; } /* Next, check to see if we need to extend the DMA buffer pool */ @@ -2569,8 +2502,10 @@ static void resize_dma_pool(void) if (new_dma_sectors < dma_sectors) new_dma_sectors = dma_sectors; #endif - if (new_dma_sectors <= dma_sectors) + if (new_dma_sectors <= dma_sectors) { + spin_unlock_irqrestore(&allocator_request_lock, flags); return; /* best to quit while we are in front */ + } for (k = 0; k < 20; ++k) { /* just in case */ out_of_space = 0; @@ -2621,6 +2556,7 @@ static void resize_dma_pool(void) break; /* found space ... */ } /* end of for loop */ if (out_of_space) { + spin_unlock_irqrestore(&allocator_request_lock, flags); scsi_need_isa_buffer = new_need_isa_buffer; /* some useful info */ printk(" WARNING, not enough memory, pool not expanded\n"); return; @@ -2645,6 +2581,8 @@ static void resize_dma_pool(void) dma_sectors = new_dma_sectors; scsi_need_isa_buffer = new_need_isa_buffer; + spin_unlock_irqrestore(&allocator_request_lock, flags); + #ifdef DEBUG_INIT printk("resize_dma_pool: dma free sectors = %d\n", scsi_dma_free_sectors); printk("resize_dma_pool: dma sectors = %d\n", dma_sectors); @@ -2747,8 +2685,6 @@ static int scsi_register_host(Scsi_Host_Template * tpnt) printk("scsi : %d host%s.\n", next_scsi_host, (next_scsi_host == 1) ? "" : "s"); - scsi_make_blocked_list(); - /* The next step is to call scan_scsis here. This generates the * Scsi_Devices entries */ @@ -2961,6 +2897,7 @@ static void scsi_unregister_host(Scsi_Host_Template * tpnt) } SDpnt->has_cmdblocks = 0; + blk_cleanup_queue(&SDpnt->request_queue); /* Next free up the Scsi_Device structures for this host */ shpnt->host_queue = SDpnt->next; scsi_init_free((char *) SDpnt, sizeof(Scsi_Device)); @@ -3016,7 +2953,6 @@ static void scsi_unregister_host(Scsi_Host_Template * tpnt) (scsi_memory_upper_value - scsi_init_memory_start) / 1024); #endif - scsi_make_blocked_list(); /* There were some hosts that were loaded at boot time, so we cannot do any more than this */ @@ -3249,12 +3185,11 @@ static void scsi_dump_status(int level) printk("Dump of scsi host parameters:\n"); i = 0; for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) { - printk(" %d %d %d : %d %p\n", + printk(" %d %d %d : %d\n", shpnt->host_failed, shpnt->host_busy, atomic_read(&shpnt->host_active), - shpnt->host_blocked, - shpnt->pending_commands); + shpnt->host_blocked); } @@ -3300,10 +3235,10 @@ static void scsi_dump_status(int level) /* Now dump the request lists for each block device */ printk("Dump of pending block device requests\n"); for (i = 0; i < MAX_BLKDEV; i++) { - if (blk_dev[i].current_request) { + if (blk_dev[i].request_queue.current_request) { struct request *req; printk("%d: ", i); - req = blk_dev[i].current_request; + req = blk_dev[i].request_queue.current_request; while (req) { printk("(%s %d %ld %ld %ld) ", kdevname(req->rq_dev), @@ -3318,7 +3253,7 @@ static void scsi_dump_status(int level) } } } - /* printk("wait_for_request = %p\n", &wait_for_request); */ + printk("wait_for_request = %p\n", &wait_for_request); #endif /* CONFIG_SCSI_LOGGING */ /* } */ } #endif /* CONFIG_PROC_FS */ diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h index 3921cf8e23a6..eba0c14b7255 100644 --- a/drivers/scsi/scsi.h +++ b/drivers/scsi/scsi.h @@ -1,13 +1,13 @@ /* * scsi.h Copyright (C) 1992 Drew Eckhardt - * Copyright (C) 1993, 1994, 1995 Eric Youngdale + * Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale * generic SCSI package header file by * Initial versions: Drew Eckhardt * Subsequent revisions: Eric Youngdale * * * - * Modified by Eric Youngdale eric@aib.com to + * Modified by Eric Youngdale eric@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. */ @@ -49,6 +49,21 @@ extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE]; #define SCSI_TIMEOUT (2*HZ) #endif +/* + * Used for debugging the new queueing code. We want to make sure + * that the lock state is consistent with design. Only do this in + * the user space simulator. + */ +#define ASSERT_LOCK(_LOCK, _COUNT) + +#if defined(__SMP__) && defined(CONFIG_USER_DEBUG) +#undef ASSERT_LOCK +#define ASSERT_LOCK(_LOCK,_COUNT) \ + { if( (_LOCK)->lock != _COUNT ) \ + panic("Lock count inconsistent %s %d\n", __FILE__, __LINE__); \ + } +#endif + /* * Use these to separate status msg and our bytes * @@ -378,6 +393,18 @@ extern int scsi_sense_valid(Scsi_Cmnd *); extern int scsi_decide_disposition(Scsi_Cmnd * SCpnt); extern int scsi_block_when_processing_errors(Scsi_Device *); extern void scsi_sleep(int); +extern int scsi_partsize(struct buffer_head *bh, unsigned long capacity, + unsigned int *cyls, unsigned int *hds, + unsigned int *secs); + +/* + * Prototypes for functions in scsi_lib.c + */ +extern void initialize_merge_fn(Scsi_Device * SDpnt); +extern void scsi_request_fn(request_queue_t * q); + +extern int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int); +extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt); /* * scsi_abort aborts the current command that is executing on host host. @@ -386,17 +413,18 @@ extern void scsi_sleep(int); */ extern void scsi_do_cmd(Scsi_Cmnd *, const void *cmnd, - void *buffer, unsigned bufflen, - void (*done)(struct scsi_cmnd *), - int timeout, int retries); - -extern void scsi_wait_cmd (Scsi_Cmnd *, const void *cmnd , void *buffer, unsigned bufflen, void (*done) (struct scsi_cmnd *), int timeout, int retries); +extern void scsi_wait_cmd(Scsi_Cmnd *, const void *cmnd, + void *buffer, unsigned bufflen, + void (*done) (struct scsi_cmnd *), + int timeout, int retries); -extern Scsi_Cmnd *scsi_allocate_device(struct request **, Scsi_Device *, int); +extern void scsi_request_fn(request_queue_t * q); + +extern Scsi_Cmnd *scsi_allocate_device(Scsi_Device *, int); extern Scsi_Cmnd *scsi_request_queueable(struct request *, Scsi_Device *); @@ -428,9 +456,10 @@ struct scsi_device { wait_queue_head_t device_wait; /* Used to wait if device is busy */ struct Scsi_Host *host; + request_queue_t request_queue; volatile unsigned short device_busy; /* commands actually active on low-level */ - void (*scsi_request_fn) (void); /* Used to jumpstart things after an - * ioctl */ + int (*scsi_init_io_fn) (Scsi_Cmnd *); /* Used to initialize + new request */ Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */ /* public: */ @@ -438,6 +467,8 @@ struct scsi_device { unsigned int manufacturer; /* Manufacturer of device, for using * vendor-specific cmd's */ + unsigned sector_size; /* size in bytes */ + int attached; /* # of high level drivers attached to * this */ int access_count; /* Count of open channels/mounts */ @@ -475,6 +506,10 @@ struct scsi_device { unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN * because we did a bus reset. */ unsigned device_blocked:1; /* Device returned QUEUE_FULL. */ + unsigned ten:1; /* support ten byte read / write */ + unsigned remap:1; /* support remapping */ + unsigned starved:1; /* unable to process commands because + host busy */ }; @@ -577,16 +612,16 @@ struct scsi_cmnd { reconnects. Probably == sector size */ - int resid; /* Number of bytes requested to be + int resid; /* Number of bytes requested to be transferred less actual number transferred (0 if not supported) */ struct request request; /* A copy of the command we are working on */ - unsigned char sense_buffer[64]; /* obtained by REQUEST SENSE when - CHECK CONDITION is received on - original command (auto-sense) */ + unsigned char sense_buffer[64]; /* obtained by REQUEST SENSE when + CHECK CONDITION is received on + original command (auto-sense) */ unsigned flags; @@ -630,6 +665,14 @@ struct scsi_cmnd { unsigned long pid; /* Process ID, starts at 0 */ }; +/* + * Flag bits for the internal_timeout array + */ +#define NORMAL_TIMEOUT 0 +#define IN_ABORT 1 +#define IN_RESET 2 +#define IN_RESET2 4 +#define IN_RESET3 8 /* * Definitions and prototypes used for scsi mid-level queue. @@ -640,61 +683,16 @@ struct scsi_cmnd { extern int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason); extern int scsi_mlqueue_finish(struct Scsi_Host *host, Scsi_Device * device); +extern Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, + int sectors); + +extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, + int block_sectors); + #if defined(MAJOR_NR) && (MAJOR_NR != SCSI_TAPE_MAJOR) #include "hosts.h" -static Scsi_Cmnd *end_scsi_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors) -{ - struct request *req; - struct buffer_head *bh; - - req = &SCpnt->request; - req->errors = 0; - if (!uptodate) { - printk(DEVICE_NAME " I/O error: dev %s, sector %lu\n", - kdevname(req->rq_dev), req->sector); - } - do { - if ((bh = req->bh) != NULL) { - req->bh = bh->b_reqnext; - req->nr_sectors -= bh->b_size >> 9; - req->sector += bh->b_size >> 9; - bh->b_reqnext = NULL; - bh->b_end_io(bh, uptodate); - sectors -= bh->b_size >> 9; - if ((bh = req->bh) != NULL) { - req->current_nr_sectors = bh->b_size >> 9; - if (req->nr_sectors < req->current_nr_sectors) { - req->nr_sectors = req->current_nr_sectors; - printk("end_scsi_request: buffer-list destroyed\n"); - } - } - } - } while (sectors && bh); - if (req->bh) { - req->buffer = bh->b_data; - return SCpnt; - } - DEVICE_OFF(req->rq_dev); - if (req->sem != NULL) { - up(req->sem); - } - add_blkdev_randomness(MAJOR(req->rq_dev)); - - if (SCpnt->host->block) { - struct Scsi_Host *next; - - for (next = SCpnt->host->block; next != SCpnt->host; - next = next->block) - wake_up(&next->host_wait); - } - wake_up(&wait_for_request); - wake_up(&SCpnt->device->device_wait); - scsi_release_command(SCpnt); - return NULL; -} - /* This is just like INIT_REQUEST, but we need to be aware of the fact * that an interrupt may start another request, so we run this with interrupts diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 2b23854b3fce..1e5eb00c1d9a 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -32,16 +32,18 @@ /* A few options that we want selected */ -#define NR_HOSTS_PRESENT 20 -#define NR_FAKE_DISKS 6 -#define N_HEAD 32 -#define N_SECTOR 64 -#define DISK_READONLY(TGT) (1) +#define NR_HOSTS_PRESENT 1 +#define NR_FAKE_DISKS 3 +#define N_HEAD 255 +#define N_SECTOR 63 +#define N_CYLINDER 524 +#define DISK_READONLY(TGT) (0) #define DISK_REMOVEABLE(TGT) (1) +#define DEVICE_TYPE(TGT) (TGT == 2 ? TYPE_TAPE : TYPE_DISK); /* Do not attempt to use a timer to simulate a real disk with latency */ /* Only use this in the actual kernel, not in the simulator. */ -/* #define IMMEDIATE */ +#define IMMEDIATE /* Skip some consistency checking. Good for benchmarking */ #define SPEEDY @@ -58,11 +60,15 @@ static int NR_REAL = -1; #define START_PARTITION 4 /* Time to wait before completing a command */ -#define DISK_SPEED (HZ/10) /* 100ms */ -#define CAPACITY (0x80000) +#define DISK_SPEED (HZ/10) /* 100ms */ +#define CAPACITY (N_HEAD * N_SECTOR * N_CYLINDER) +#define SIZE(TGT) (TGT == 2 ? 2248 : 512) static int starts[] = -{N_HEAD, N_HEAD * N_SECTOR, 50000, CAPACITY, 0}; +{N_SECTOR, + N_HEAD * N_SECTOR, /* Single cylinder */ + N_HEAD * N_SECTOR * 4, + CAPACITY, 0}; static int npart = 0; #include "scsi_debug.h" @@ -112,21 +118,25 @@ static int npart = 0; typedef void (*done_fct_t) (Scsi_Cmnd *); -static volatile done_fct_t do_done[SCSI_DEBUG_MAILBOXES] = {NULL,}; +static volatile done_fct_t do_done[SCSI_DEBUG_MAILBOXES] = +{NULL,}; static void scsi_debug_intr_handle(unsigned long); static struct timer_list timeout[SCSI_DEBUG_MAILBOXES]; -Scsi_Cmnd *SCint[SCSI_DEBUG_MAILBOXES] = {NULL,}; -static char SCrst[SCSI_DEBUG_MAILBOXES] = {0,}; +Scsi_Cmnd *SCint[SCSI_DEBUG_MAILBOXES] = +{NULL,}; +static char SCrst[SCSI_DEBUG_MAILBOXES] = +{0,}; /* * Semaphore used to simulate bus lockups. */ static int scsi_debug_lockup = 0; -static char sense_buffer[128] = {0,}; +static char sense_buffer[128] = +{0,}; static void scsi_dump(Scsi_Cmnd * SCpnt, int flag) { @@ -197,6 +207,14 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) sgcount = 0; sgpnt = NULL; + /* + * The io_request_lock *must* be held at this point. + */ + if( io_request_lock.lock == 0 ) + { + printk("Warning - io_request_lock is not held in queuecommand\n"); + } + /* * If we are being notified of the mid-level reposessing a command due to timeout, * just return. @@ -242,6 +260,10 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) SCpnt->result = 0; done(SCpnt); return 0; + case START_STOP: + SCSI_LOG_LLQUEUE(3, printk("START_STOP\n")); + scsi_debug_errsts = 0; + break; case ALLOW_MEDIUM_REMOVAL: if (cmd[4]) { SCSI_LOG_LLQUEUE(2, printk("Medium removal inhibited...")); @@ -253,7 +275,7 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) case INQUIRY: SCSI_LOG_LLQUEUE(3, printk("Inquiry...(%p %d)\n", buff, bufflen)); memset(buff, 0, bufflen); - buff[0] = TYPE_DISK; + buff[0] = DEVICE_TYPE(target); buff[1] = DISK_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */ buff[2] = 1; buff[4] = 33 - 5; @@ -277,7 +299,10 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) buff[1] = (CAPACITY >> 16) & 0xff; buff[2] = (CAPACITY >> 8) & 0xff; buff[3] = CAPACITY & 0xff; - buff[6] = 2; /* 512 byte sectors */ + buff[4] = 0; + buff[5] = 0; + buff[6] = (SIZE(target) >> 8) & 0xff; /* 512 byte sectors */ + buff[7] = SIZE(target) & 0xff; scsi_debug_errsts = 0; break; case READ_10: @@ -327,15 +352,23 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) p = (struct partition *) (buff + 0x1be); i = 0; while (starts[i + 1]) { + int start_cyl, end_cyl; + + start_cyl = starts[i] / N_HEAD / N_SECTOR; + end_cyl = (starts[i + 1] - 1) / N_HEAD / N_SECTOR; + p->boot_ind = 0; + + p->head = (i == 0 ? 1 : 0); + p->sector = 1 | ((start_cyl >> 8) << 6); + p->cyl = (start_cyl & 0xff); + + p->end_head = N_HEAD - 1; + p->end_sector = N_SECTOR | ((end_cyl >> 8) << 6); + p->end_cyl = (end_cyl & 0xff); + p->start_sect = starts[i]; p->nr_sects = starts[i + 1] - starts[i]; p->sys_ind = 0x81; /* Linux partition */ - p->head = (i == 0 ? 1 : 0); - p->sector = 1; - p->cyl = starts[i] / N_HEAD / N_SECTOR; - p->end_head = N_HEAD - 1; - p->end_sector = N_SECTOR; - p->end_cyl = starts[i + 1] / N_HEAD / N_SECTOR; p++; i++; }; @@ -465,6 +498,8 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) #ifdef IMMEDIATE if (!scsi_debug_lockup) { SCpnt->result = scsi_debug_errsts; + SCint[i] = SCpnt; + do_done[i] = done; scsi_debug_intr_handle(i); /* No timer - do this one right away */ } restore_flags(flags); @@ -490,24 +525,6 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) return 0; } -volatile static int internal_done_flag = 0; -volatile static int internal_done_errcode = 0; -static void internal_done(Scsi_Cmnd * SCpnt) -{ - internal_done_errcode = SCpnt->result; - ++internal_done_flag; -} - -int scsi_debug_command(Scsi_Cmnd * SCpnt) -{ - DEB(printk("scsi_debug_command: ..calling scsi_debug_queuecommand\n")); - scsi_debug_queuecommand(SCpnt, internal_done); - - while (!internal_done_flag); - internal_done_flag = 0; - return internal_done_errcode; -} - /* A "high" level interrupt handler. This should be called once per jiffy * to simulate a regular scsi disk. We use a timer to do this. */ @@ -589,7 +606,7 @@ int scsi_debug_biosparam(Disk * disk, kdev_t dev, int *info) int size = disk->capacity; info[0] = N_HEAD; info[1] = N_SECTOR; - info[2] = (size + 2047) >> 11; + info[2] = N_CYLINDER; if (info[2] >= 1024) info[2] = 1024; return 0; @@ -684,6 +701,21 @@ int scsi_debug_proc_info(char *buffer, char **start, off_t offset, return (len); } +#ifdef CONFIG_USER_DEBUG +/* + * This is a hack for the user space emulator. It allows us to + * "insert" arbitrary numbers of additional drivers. + */ +void *scsi_debug_get_handle(void) +{ + static Scsi_Host_Template driver_copy = SCSI_DEBUG; + void *rtn; + rtn = kmalloc(sizeof(driver_copy), GFP_ATOMIC); + memcpy(rtn, (void *) &driver_copy, sizeof(driver_copy)); + return rtn; +} +#endif + #ifdef MODULE /* Eventually this will go into an include file, but this will be later */ Scsi_Host_Template driver_template = SCSI_DEBUG; diff --git a/drivers/scsi/scsi_debug.h b/drivers/scsi/scsi_debug.h index 6f22616b7535..357b3f5fc22c 100644 --- a/drivers/scsi/scsi_debug.h +++ b/drivers/scsi/scsi_debug.h @@ -27,16 +27,15 @@ int scsi_debug_proc_info(char *, char **, off_t, int, int, int); #define SCSI_DEBUG {proc_info: scsi_debug_proc_info, \ name: "SCSI DEBUG", \ detect: scsi_debug_detect, \ - command: scsi_debug_command, \ queuecommand: scsi_debug_queuecommand, \ abort: scsi_debug_abort, \ reset: scsi_debug_reset, \ bios_param: scsi_debug_biosparam, \ can_queue: SCSI_DEBUG_CANQUEUE, \ this_id: 7, \ - sg_tablesize: SG_ALL, \ + sg_tablesize: 16, \ cmd_per_lun: 3, \ - unchecked_isa_dma: 1, \ + unchecked_isa_dma: 0, \ use_clustering: ENABLE_CLUSTERING, \ use_new_eh_code: 1, \ } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 4663d694b1cf..08deecf39f59 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -35,11 +35,13 @@ #include "hosts.h" #include "constants.h" -#ifdef MODULE +/* + * We must always allow SHUTDOWN_SIGS. Even if we are not a module, + * the host drivers that we are using may be loaded as modules, and + * when we unload these, we need to ensure that the error handler thread + * can be shut down. + */ #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM)) -#else -#define SHUTDOWN_SIGS (0UL) -#endif #ifdef DEBUG #define SENSE_TIMEOUT SCSI_TIMEOUT @@ -128,7 +130,9 @@ void scsi_add_timer(Scsi_Cmnd * SCset, * * Arguments: SCset - command that we are canceling timer for. * - * Returns: Amount of time remaining before command would have timed out. + * Returns: 1 if we were able to detach the timer. 0 if we + * blew it, and the timer function has already started + * to run. * * Notes: This should be turned into an inline function. */ @@ -136,8 +140,7 @@ int scsi_delete_timer(Scsi_Cmnd * SCset) { int rtn; - rtn = jiffies - SCset->eh_timeout.expires; - del_timer(&SCset->eh_timeout); + rtn = del_timer(&SCset->eh_timeout); SCSI_LOG_ERROR_RECOVERY(5, printk("Clearing timer for command %p\n", SCset)); @@ -415,6 +418,7 @@ STATIC int scsi_request_sense(Scsi_Cmnd * SCpnt) {REQUEST_SENSE, 0, 0, 0, 255, 0}; unsigned char scsi_result0[256], *scsi_result = NULL; + ASSERT_LOCK(&io_request_lock, 1); memcpy((void *) SCpnt->cmnd, (void *) generic_sense, sizeof(generic_sense)); @@ -563,10 +567,7 @@ void scsi_sleep(int timeout) add_timer(&timer); - spin_unlock_irq(&io_request_lock); down(&sem); - spin_lock_irq(&io_request_lock); - del_timer(&timer); } @@ -583,6 +584,8 @@ STATIC void scsi_send_eh_cmnd(Scsi_Cmnd * SCpnt, int timeout) { struct Scsi_Host *host; + ASSERT_LOCK(&io_request_lock, 1); + host = SCpnt->host; retry: @@ -811,7 +814,9 @@ STATIC int scsi_try_bus_reset(Scsi_Cmnd * SCpnt) * If we had a successful bus reset, mark the command blocks to expect * a condition code of unit attention. */ + spin_unlock_irq(&io_request_lock); scsi_sleep(BUS_RESET_SETTLE_TIME); + spin_lock_irq(&io_request_lock); if (SCpnt->eh_state == SUCCESS) { Scsi_Device *SDloop; for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) { @@ -854,7 +859,9 @@ STATIC int scsi_try_host_reset(Scsi_Cmnd * SCpnt) * If we had a successful host reset, mark the command blocks to expect * a condition code of unit attention. */ + spin_unlock_irq(&io_request_lock); scsi_sleep(HOST_RESET_SETTLE_TIME); + spin_lock_irq(&io_request_lock); if (SCpnt->eh_state == SUCCESS) { Scsi_Device *SDloop; for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) { @@ -1164,6 +1171,8 @@ STATIC int scsi_check_sense(Scsi_Cmnd * SCpnt) * * Arguments: host - host that we are restarting * + * Lock status: Assumed that locks are not held upon entry. + * * Returns: Nothing * * Notes: When we entered the error handler, we blocked all further @@ -1172,6 +1181,9 @@ STATIC int scsi_check_sense(Scsi_Cmnd * SCpnt) STATIC void scsi_restart_operations(struct Scsi_Host *host) { Scsi_Device *SDpnt; + unsigned long flags; + + ASSERT_LOCK(&io_request_lock, 0); /* * Next free up anything directly waiting upon the host. This will be @@ -1183,18 +1195,23 @@ STATIC void scsi_restart_operations(struct Scsi_Host *host) wake_up(&host->host_wait); /* - * Finally, block devices need an extra kick in the pants. This is because - * the request queueing mechanism may have queued lots of pending requests - * and there won't be a process waiting in a place where we can simply wake - * it up. Thus we simply go through and call the request function to goose - * the various top level drivers and get things moving again. + * Finally we need to re-initiate requests that may be pending. We will + * have had everything blocked while error handling is taking place, and + * now that error recovery is done, we will need to ensure that these + * requests are started. */ + spin_lock_irqsave(&io_request_lock, flags); for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) { - SCSI_LOG_ERROR_RECOVERY(5, printk("Calling request function to restart things...\n")); - - if (SDpnt->scsi_request_fn != NULL) - (*SDpnt->scsi_request_fn) (); + request_queue_t *q; + if ((host->can_queue > 0 && (host->host_busy >= host->can_queue)) + || (host->host_blocked) + || (SDpnt->device_blocked)) { + break; + } + q = &SDpnt->request_queue; + q->request_fn(q); } + spin_unlock_irqrestore(&io_request_lock, flags); } /* @@ -1241,6 +1258,8 @@ STATIC int scsi_unjam_host(struct Scsi_Host *host) Scsi_Cmnd *SCdone; int timed_out; + ASSERT_LOCK(&io_request_lock, 1); + SCdone = NULL; /* @@ -1524,7 +1543,9 @@ STATIC int scsi_unjam_host(struct Scsi_Host *host) * Due to the spinlock, we will never get out of this * loop without a proper wait (DB) */ + spin_unlock_irq(&io_request_lock); scsi_sleep(1 * HZ); + spin_lock_irq(&io_request_lock); goto next_device; } @@ -1617,7 +1638,9 @@ STATIC int scsi_unjam_host(struct Scsi_Host *host) * Due to the spinlock, we will never get out of this * loop without a proper wait. (DB) */ + spin_unlock_irq(&io_request_lock); scsi_sleep(1 * HZ); + spin_lock_irq(&io_request_lock); goto next_device2; } @@ -1768,11 +1791,11 @@ void scsi_error_handler(void *data) lock_kernel(); /* - * Flush resources + * Flush resources */ - + daemonize(); - + /* * Set the name of this process. */ @@ -1821,6 +1844,9 @@ void scsi_error_handler(void *data) host->eh_active = 0; + /* The spinlock is really needed up to this point. (DB) */ + spin_unlock_irqrestore(&io_request_lock, flags); + /* * Note - if the above fails completely, the action is to take * individual devices offline and flush the queue of any @@ -1830,8 +1856,6 @@ void scsi_error_handler(void *data) */ scsi_restart_operations(host); - /* The spinlock is really needed up to this point. (DB) */ - spin_unlock_irqrestore(&io_request_lock, flags); } SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler exiting\n")); diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 1671bd16cf38..3b471d52a6a3 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -19,7 +19,7 @@ #include #define NORMAL_RETRIES 5 -#define NORMAL_TIMEOUT (10 * HZ) +#define IOCTL_NORMAL_TIMEOUT (10 * HZ) #define FORMAT_UNIT_TIMEOUT (2 * 60 * 60 * HZ) #define START_STOP_TIMEOUT (60 * HZ) #define MOVE_MEDIUM_TIMEOUT (5 * 60 * HZ) @@ -69,7 +69,7 @@ static int ioctl_probe(struct Scsi_Host *host, void *buffer) /* * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host. - * The NORMAL_TIMEOUT and NORMAL_RETRIES variables are used. + * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES variables are used. * * dev is the SCSI device struct ptr, *(int *) arg is the length of the * input data, if any, not including the command string & counts, @@ -105,22 +105,18 @@ static void scsi_ioctl_done(Scsi_Cmnd * SCpnt) static int ioctl_internal_command(Scsi_Device * dev, char *cmd, int timeout, int retries) { - unsigned long flags; int result; Scsi_Cmnd *SCpnt; Scsi_Device *SDpnt; - spin_lock_irqsave(&io_request_lock, flags); SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", cmd[0])); - SCpnt = scsi_allocate_device(NULL, dev, 1); + SCpnt = scsi_allocate_device(dev, 1); { DECLARE_MUTEX_LOCKED(sem); SCpnt->request.sem = &sem; scsi_do_cmd(SCpnt, cmd, NULL, 0, scsi_ioctl_done, timeout, retries); - spin_unlock_irqrestore(&io_request_lock, flags); down(&sem); - spin_lock_irqsave(&io_request_lock, flags); SCpnt->request.sem = NULL; } @@ -167,11 +163,8 @@ static int ioctl_internal_command(Scsi_Device * dev, char *cmd, scsi_release_command(SCpnt); SCpnt = NULL; - if (!SDpnt->was_reset && SDpnt->scsi_request_fn) - (*SDpnt->scsi_request_fn) (); wake_up(&SDpnt->device_wait); - spin_unlock_irqrestore(&io_request_lock, flags); return result; } @@ -183,34 +176,33 @@ static int ioctl_internal_command(Scsi_Device * dev, char *cmd, * The structure that we are passed should look like: * * struct sdata { - * unsigned int inlen; [i] Length of data to be written to device + * unsigned int inlen; [i] Length of data to be written to device * unsigned int outlen; [i] Length of data to be read from device * unsigned char cmd[x]; [i] SCSI command (6 <= x <= 12). - * [o] Data read from device starts here. - * [o] On error, sense buffer starts here. + * [o] Data read from device starts here. + * [o] On error, sense buffer starts here. * unsigned char wdata[y]; [i] Data written to device starts here. * }; * Notes: - * - The SCSI command length is determined by examining the 1st byte - * of the given command. There is no way to override this. - * - Data transfers are limited to PAGE_SIZE (4K on i386, 8K on alpha). - * - The length (x + y) must be at least OMAX_SB_LEN bytes long to - * accomodate the sense buffer when an error occurs. - * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that - * old code will not be surprised. - * - If a Unix error occurs (e.g. ENOMEM) then the user will receive - * a negative return and the Unix error code in 'errno'. - * If the SCSI command succeeds then 0 is returned. - * Positive numbers returned are the compacted SCSI error codes (4 - * bytes in one int) where the lowest byte is the SCSI status. - * See the drivers/scsi/scsi.h file for more information on this. + * - The SCSI command length is determined by examining the 1st byte + * of the given command. There is no way to override this. + * - Data transfers are limited to PAGE_SIZE (4K on i386, 8K on alpha). + * - The length (x + y) must be at least OMAX_SB_LEN bytes long to + * accomodate the sense buffer when an error occurs. + * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that + * old code will not be surprised. + * - If a Unix error occurs (e.g. ENOMEM) then the user will receive + * a negative return and the Unix error code in 'errno'. + * If the SCSI command succeeds then 0 is returned. + * Positive numbers returned are the compacted SCSI error codes (4 + * bytes in one int) where the lowest byte is the SCSI status. + * See the drivers/scsi/scsi.h file for more information on this. * */ -#define OMAX_SB_LEN 16 /* Old sense buffer length */ +#define OMAX_SB_LEN 16 /* Old sense buffer length */ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic) { - unsigned long flags; char *buf; unsigned char cmd[12]; char *cmd_in; @@ -251,9 +243,7 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic) buf_needed = (buf_needed + 511) & ~511; if (buf_needed > MAX_BUF) buf_needed = MAX_BUF; - spin_lock_irqsave(&io_request_lock, flags); buf = (char *) scsi_malloc(buf_needed); - spin_unlock_irqrestore(&io_request_lock, flags); if (!buf) return -ENOMEM; memset(buf, 0, buf_needed); @@ -299,23 +289,21 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic) retries = NORMAL_RETRIES; break; default: - timeout = NORMAL_TIMEOUT; + timeout = IOCTL_NORMAL_TIMEOUT; retries = NORMAL_RETRIES; break; } #ifndef DEBUG_NO_CMD - spin_lock_irqsave(&io_request_lock, flags); - SCpnt = scsi_allocate_device(NULL, dev, 1); + SCpnt = scsi_allocate_device(dev, 1); { DECLARE_MUTEX_LOCKED(sem); SCpnt->request.sem = &sem; scsi_do_cmd(SCpnt, cmd, buf, needed, scsi_ioctl_done, timeout, retries); - spin_unlock_irqrestore(&io_request_lock, flags); down(&sem); SCpnt->request.sem = NULL; } @@ -339,7 +327,6 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic) } result = SCpnt->result; - spin_lock_irqsave(&io_request_lock, flags); wake_up(&SCpnt->device->device_wait); SDpnt = SCpnt->device; @@ -349,10 +336,7 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic) if (buf) scsi_free(buf, buf_needed); - if (SDpnt->scsi_request_fn) - (*SDpnt->scsi_request_fn) (); - spin_unlock_irqrestore(&io_request_lock, flags); return result; #else { @@ -445,7 +429,7 @@ int scsi_ioctl(Scsi_Device * dev, int cmd, void *arg) scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; scsi_cmd[4] = SCSI_REMOVAL_PREVENT; return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd, - NORMAL_TIMEOUT, NORMAL_RETRIES); + IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES); break; case SCSI_IOCTL_DOORUNLOCK: if (!dev->removable || !dev->lockable) @@ -455,14 +439,14 @@ int scsi_ioctl(Scsi_Device * dev, int cmd, void *arg) scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; scsi_cmd[4] = SCSI_REMOVAL_ALLOW; return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd, - NORMAL_TIMEOUT, NORMAL_RETRIES); + IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES); case SCSI_IOCTL_TEST_UNIT_READY: scsi_cmd[0] = TEST_UNIT_READY; scsi_cmd[1] = dev->lun << 5; scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; scsi_cmd[4] = 0; return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd, - NORMAL_TIMEOUT, NORMAL_RETRIES); + IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES); break; case SCSI_IOCTL_START_UNIT: scsi_cmd[0] = START_STOP; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c new file mode 100644 index 000000000000..f53383d15a93 --- /dev/null +++ b/drivers/scsi/scsi_lib.c @@ -0,0 +1,781 @@ +/* + * scsi_lib.c Copyright (C) 1999 Eric Youngdale + * + * SCSI queueing library. + * Initial versions: Eric Youngdale (eric@andante.org). + * Based upon conversations with large numbers + * of people at Linux Expo. + */ + +/* + * The fundamental purpose of this file is to contain a library of utility + * routines that can be used by low-level drivers. Ultimately the idea + * is that there should be a sufficiently rich number of functions that it + * would be possible for a driver author to fashion a queueing function for + * a low-level driver if they wished. Note however that this file also + * contains the "default" versions of these functions, as we don't want to + * go through and retrofit queueing functions into all 30 some-odd drivers. + */ + +#define __NO_VERSION__ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define __KERNEL_SYSCALLS__ + +#include + +#include +#include +#include + +#include "scsi.h" +#include "hosts.h" +#include "constants.h" +#include + +/* + * This entire source file deals with the new queueing code. + */ + +/* + * Function: scsi_insert_special_cmd() + * + * Purpose: Insert pre-formed command into request queue. + * + * Arguments: SCpnt - command that is ready to be queued. + * at_head - boolean. True if we should insert at head + * of queue, false if we should insert at tail. + * + * Lock status: Assumed that lock is not held upon entry. + * + * Returns: Nothing + * + * Notes: This function is called from character device and from + * ioctl types of functions where the caller knows exactly + * what SCSI command needs to be issued. The idea is that + * we merely inject the command into the queue (at the head + * for now), and then call the queue request function to actually + * process it. + */ +int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) +{ + unsigned long flags; + request_queue_t *q; + + ASSERT_LOCK(&io_request_lock, 0); + + /* + * The SCpnt already contains a request structure - we will doctor the + * thing up with the appropriate values and use that in the actual + * request queue. + */ + q = &SCpnt->device->request_queue; + SCpnt->request.cmd = SPECIAL; + SCpnt->request.special = (void *) SCpnt; + + /* + * For the moment, we insert at the head of the queue. This may turn + * out to be a bad idea, but we will see about that when we get there. + */ + spin_lock_irqsave(&io_request_lock, flags); + + if (at_head) { + SCpnt->request.next = q->current_request; + q->current_request = &SCpnt->request; + } else { + /* + * FIXME(eric) - we always insert at the tail of the list. Otherwise + * ioctl commands would always take precedence over normal I/O. + */ + SCpnt->request.next = NULL; + if (q->current_request == NULL) { + q->current_request = &SCpnt->request; + } else { + struct request *req; + + for (req = q->current_request; req; req = req->next) { + if (req->next == NULL) { + req->next = &SCpnt->request; + } + } + } + } + + /* + * Now hit the requeue function for the queue. If the host is already + * busy, so be it - we have nothing special to do. If the host can queue + * it, then send it off. + */ + q->request_fn(q); + spin_unlock_irqrestore(&io_request_lock, flags); + return 0; +} + +/* + * Function: scsi_init_cmd_errh() + * + * Purpose: Initialize SCpnt fields related to error handling. + * + * Arguments: SCpnt - command that is ready to be queued. + * + * Returns: Nothing + * + * Notes: This function has the job of initializing a number of + * fields related to error handling. Typically this will + * be called once for each command, as required. + */ +int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt) +{ + ASSERT_LOCK(&io_request_lock, 0); + + SCpnt->owner = SCSI_OWNER_MIDLEVEL; + SCpnt->reset_chain = NULL; + SCpnt->serial_number = 0; + SCpnt->serial_number_at_timeout = 0; + SCpnt->flags = 0; + SCpnt->retries = 0; + + SCpnt->abort_reason = 0; + + memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer); + + if (SCpnt->cmd_len == 0) + SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); + + /* + * We need saved copies of a number of fields - this is because + * error handling may need to overwrite these with different values + * to run different commands, and once error handling is complete, + * we will need to restore these values prior to running the actual + * command. + */ + SCpnt->old_use_sg = SCpnt->use_sg; + SCpnt->old_cmd_len = SCpnt->cmd_len; + memcpy((void *) SCpnt->data_cmnd, + (const void *) SCpnt->cmnd, sizeof(SCpnt->cmnd)); + SCpnt->buffer = SCpnt->request_buffer; + SCpnt->bufflen = SCpnt->request_bufflen; + + SCpnt->reset_chain = NULL; + + SCpnt->internal_timeout = NORMAL_TIMEOUT; + SCpnt->abort_reason = 0; + + return 1; +} + +/* + * Function: scsi_queue_next_request() + * + * Purpose: Handle post-processing of completed commands. + * + * Arguments: SCpnt - command that may need to be requeued. + * + * Returns: Nothing + * + * Notes: After command completion, there may be blocks left + * over which weren't finished by the previous command + * this can be for a number of reasons - the main one is + * that a medium error occurred, and the sectors after + * the bad block need to be re-read. + * + * If SCpnt is NULL, it means that the previous command + * was completely finished, and we should simply start + * a new command, if possible. + */ +void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) +{ + int all_clear; + unsigned long flags; + Scsi_Device *SDpnt; + struct Scsi_Host *SHpnt; + + ASSERT_LOCK(&io_request_lock, 0); + + spin_lock_irqsave(&io_request_lock, flags); + if (SCpnt != NULL) { + + /* + * For some reason, we are not done with this request. + * This happens for I/O errors in the middle of the request, + * in which case we need to request the blocks that come after + * the bad sector. + */ + SCpnt->request.next = q->current_request; + q->current_request = &SCpnt->request; + SCpnt->request.special = (void *) SCpnt; + } + /* + * Just hit the requeue function for the queue. + * FIXME - if this queue is empty, check to see if we might need to + * start requests for other devices attached to the same host. + */ + q->request_fn(q); + + /* + * Now see whether there are other devices on the bus which + * might be starved. If so, hit the request function. If we + * don't find any, then it is safe to reset the flag. If we + * find any device that it is starved, it isn't safe to reset the + * flag as the queue function releases the lock and thus some + * other device might have become starved along the way. + */ + SDpnt = (Scsi_Device *) q->queuedata; + SHpnt = SDpnt->host; + all_clear = 1; + if (SHpnt->some_device_starved) { + for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) { + request_queue_t *q; + if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue)) + || (SHpnt->host_blocked)) { + break; + } + if (SDpnt->device_blocked || !SDpnt->starved) { + continue; + } + q = &SDpnt->request_queue; + q->request_fn(q); + all_clear = 0; + } + if (SDpnt == NULL && all_clear) { + SHpnt->some_device_starved = 0; + } + } + spin_unlock_irqrestore(&io_request_lock, flags); +} + +/* + * Function: scsi_end_request() + * + * Purpose: Post-processing of completed commands called from interrupt + * handler. + * + * Arguments: SCpnt - command that is complete. + * uptodate - 1 if I/O indicates success, 0 for I/O error. + * sectors - number of sectors we want to mark. + * + * Lock status: Assumed that lock is not held upon entry. + * + * Returns: Nothing + * + * Notes: This is called for block device requests in order to + * mark some number of sectors as complete. + */ +Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors) +{ + struct request *req; + struct buffer_head *bh; + + ASSERT_LOCK(&io_request_lock, 0); + + req = &SCpnt->request; + req->errors = 0; + if (!uptodate) { + printk(" I/O error: dev %s, sector %lu\n", + kdevname(req->rq_dev), req->sector); + } + do { + if ((bh = req->bh) != NULL) { + req->bh = bh->b_reqnext; + req->nr_sectors -= bh->b_size >> 9; + req->sector += bh->b_size >> 9; + bh->b_reqnext = NULL; + sectors -= bh->b_size >> 9; + bh->b_end_io(bh, uptodate); + if ((bh = req->bh) != NULL) { + req->current_nr_sectors = bh->b_size >> 9; + if (req->nr_sectors < req->current_nr_sectors) { + req->nr_sectors = req->current_nr_sectors; + printk("scsi_end_request: buffer-list destroyed\n"); + } + } + } + } while (sectors && bh); + + /* + * If there are blocks left over at the end, set up the command + * to queue the remainder of them. + */ + if (req->bh) { + req->buffer = bh->b_data; + return SCpnt; + } + /* + * This request is done. If there is someone blocked waiting for this + * request, wake them up. Typically used to wake up processes trying + * to swap a page into memory. + */ + if (req->sem != NULL) { + up(req->sem); + } + add_blkdev_randomness(MAJOR(req->rq_dev)); + scsi_release_command(SCpnt); + return NULL; +} + +/* + * Function: scsi_io_completion() + * + * Purpose: Completion processing for block device I/O requests. + * + * Arguments: SCpnt - command that is finished. + * + * Lock status: Assumed that no lock is held upon entry. + * + * Returns: Nothing + * + * Notes: This function is matched in terms of capabilities to + * the function that created the scatter-gather list. + * In other words, if there are no bounce buffers + * (the normal case for most drivers), we don't need + * the logic to deal with cleaning up afterwards. + */ +void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, + int block_sectors) +{ + int result = SCpnt->result; + int this_count = SCpnt->bufflen >> 9; + request_queue_t *q = &SCpnt->device->request_queue; + + ASSERT_LOCK(&io_request_lock, 0); + + /* + * Free up any indirection buffers we allocated for DMA purposes. + * For the case of a READ, we need to copy the data out of the + * bounce buffer and into the real buffer. + */ + if (SCpnt->use_sg) { + struct scatterlist *sgpnt; + int i; + + sgpnt = (struct scatterlist *) SCpnt->buffer; + + for (i = 0; i < SCpnt->use_sg; i++) { + if (sgpnt[i].alt_address) { + if (SCpnt->request.cmd == READ) { + memcpy(sgpnt[i].alt_address, + sgpnt[i].address, + sgpnt[i].length); + } + scsi_free(sgpnt[i].address, sgpnt[i].length); + } + } + scsi_free(SCpnt->buffer, SCpnt->sglist_len); + } else { + if (SCpnt->buffer != SCpnt->request.buffer) { + if (SCpnt->request.cmd == READ) { + memcpy(SCpnt->request.buffer, SCpnt->buffer, + SCpnt->bufflen); + } + scsi_free(SCpnt->buffer, SCpnt->bufflen); + } + } + /* + * Next deal with any sectors which we were able to correctly + * handle. + */ + if (good_sectors > 0) { + SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d sectors done.\n", + SCpnt->request.nr_sectors, + good_sectors)); + SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg)); + + SCpnt->request.errors = 0; + /* + * If multiple sectors are requested in one buffer, then + * they will have been finished off by the first command. + * If not, then we have a multi-buffer command. + */ + SCpnt = scsi_end_request(SCpnt, 1, good_sectors); + + /* + * If the command completed without error, then either finish off the + * rest of the command, or start a new one. + */ + if (result == 0) { + scsi_queue_next_request(q, SCpnt); + return; + } + } + /* + * Now, if we were good little boys and girls, Santa left us a request + * sense buffer. We can extract information from this, so we + * can choose a block to remap, etc. + */ + if (driver_byte(result) != 0) { + if (suggestion(result) == SUGGEST_REMAP) { +#ifdef REMAP + /* + * Not yet implemented. A read will fail after being remapped, + * a write will call the strategy routine again. + */ + if (SCpnt->device->remap) { + result = 0; + } +#endif + } + if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70 + && (SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) { + if (SCpnt->device->removable) { + /* detected disc change. set a bit and quietly refuse + * further access. + */ + SCpnt->device->changed = 1; + SCpnt = scsi_end_request(SCpnt, 0, this_count); + scsi_queue_next_request(q, SCpnt); + return; + } else { + /* + * Must have been a power glitch, or a bus reset. + * Could not have been a media change, so we just retry + * the request and see what happens. + */ + scsi_queue_next_request(q, SCpnt); + return; + } + } + /* If we had an ILLEGAL REQUEST returned, then we may have + * performed an unsupported command. The only thing this should be + * would be a ten byte read where only a six byte read was supported. + * Also, on a system where READ CAPACITY failed, we have have read + * past the end of the disk. + */ + + switch (SCpnt->sense_buffer[2]) { + case ILLEGAL_REQUEST: + if (SCpnt->device->ten) { + SCpnt->device->ten = 0; + scsi_queue_next_request(q, SCpnt); + result = 0; + } else { + SCpnt = scsi_end_request(SCpnt, 0, this_count); + scsi_queue_next_request(q, SCpnt); + return; + } + break; + case NOT_READY: + printk(KERN_INFO "Device %x not ready.\n", + SCpnt->request.rq_dev); + SCpnt = scsi_end_request(SCpnt, 0, this_count); + scsi_queue_next_request(q, SCpnt); + return; + break; + case MEDIUM_ERROR: + case VOLUME_OVERFLOW: + printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ", + SCpnt->host->host_no, (int) SCpnt->channel, + (int) SCpnt->target, (int) SCpnt->lun); + print_command(SCpnt->cmnd); + print_sense("sd", SCpnt); + SCpnt = scsi_end_request(SCpnt, 0, block_sectors); + scsi_queue_next_request(q, SCpnt); + return; + default: + break; + } + } /* driver byte != 0 */ + if (result) { + printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n", + SCpnt->device->host->host_no, + SCpnt->device->channel, + SCpnt->device->id, + SCpnt->device->lun, result); + + if (driver_byte(result) & DRIVER_SENSE) + print_sense("sd", SCpnt); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.current_nr_sectors); + scsi_queue_next_request(q, SCpnt); + return; + } +} + +/* + * Function: scsi_get_request_dev() + * + * Purpose: Find the upper-level driver that is responsible for this + * request + * + * Arguments: request - I/O request we are preparing to queue. + * + * Lock status: No locks assumed to be held, but as it happens the + * io_request_lock is held when this is called. + * + * Returns: Nothing + * + * Notes: The requests in the request queue may have originated + * from any block device driver. We need to find out which + * one so that we can later form the appropriate command. + */ +struct Scsi_Device_Template *scsi_get_request_dev(struct request *req) +{ + struct Scsi_Device_Template *spnt; + kdev_t dev = req->rq_dev; + int major = MAJOR(dev); + + ASSERT_LOCK(&io_request_lock, 1); + + for (spnt = scsi_devicelist; spnt; spnt = spnt->next) { + /* + * Search for a block device driver that supports this + * major. + */ + if (spnt->blk && spnt->major == major) { + return spnt; + } + } + return NULL; +} + +/* + * Function: scsi_request_fn() + * + * Purpose: Generic version of request function for SCSI hosts. + * + * Arguments: q - Pointer to actual queue. + * + * Returns: Nothing + * + * Lock status: IO request lock assumed to be held when called. + * + * Notes: The theory is that this function is something which individual + * drivers could also supply if they wished to. The problem + * is that we have 30 some odd low-level drivers in the kernel + * tree already, and it would be most difficult to retrofit + * this crap into all of them. Thus this function has the job + * of acting as a generic queue manager for all of those existing + * drivers. + */ +void scsi_request_fn(request_queue_t * q) +{ + struct request *req; + Scsi_Cmnd *SCpnt; + Scsi_Device *SDpnt; + struct Scsi_Host *SHpnt; + struct Scsi_Device_Template *STpnt; + + ASSERT_LOCK(&io_request_lock, 1); + + SDpnt = (Scsi_Device *) q->queuedata; + if (!SDpnt) { + panic("Missing device"); + } + SHpnt = SDpnt->host; + + /* + * If the host for this device is in error recovery mode, don't + * do anything at all here. When the host leaves error recovery + * mode, it will automatically restart things and start queueing + * commands again. Same goes if the queue is actually plugged, + * if the device itself is blocked, or if the host is fully + * occupied. + */ + if (SHpnt->in_recovery + || q->plugged) { + return; + } + /* + * To start with, we keep looping until the queue is empty, or until + * the host is no longer able to accept any more requests. + */ + while (1 == 1) { + /* + * If the host cannot accept another request, then quit. + */ + if (SDpnt->device_blocked) { + break; + } + if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue)) + || (SHpnt->host_blocked)) { + /* + * If we are unable to process any commands at all for this + * device, then we consider it to be starved. What this means + * is that there are no outstanding commands for this device + * and hence we need a little help getting it started again + * once the host isn't quite so busy. + */ + if (SDpnt->device_busy == 0) { + SDpnt->starved = 1; + SHpnt->some_device_starved = 1; + } + break; + } else { + SDpnt->starved = 0; + } + /* + * Loop through all of the requests in this queue, and find + * one that is queueable. + */ + req = q->current_request; + + /* + * If we couldn't find a request that could be queued, then we + * can also quit. + */ + if (!req) { + break; + } + /* + * Find the actual device driver associated with this command. + * The SPECIAL requests are things like character device or + * ioctls, which did not originate from ll_rw_blk. + */ + if (req->special != NULL) { + STpnt = NULL; + SCpnt = (Scsi_Cmnd *) req->special; + } else { + STpnt = scsi_get_request_dev(req); + if (!STpnt) { + panic("Unable to find device associated with request"); + } + /* + * Now try and find a command block that we can use. + */ + SCpnt = scsi_allocate_device(SDpnt, FALSE); + /* + * If so, we are ready to do something. Bump the count + * while the queue is locked and then break out of the loop. + * Otherwise loop around and try another request. + */ + if (!SCpnt) { + break; + } + SHpnt->host_busy++; + SDpnt->device_busy++; + } + + /* + * FIXME(eric) + * I am not sure where the best place to do this is. We need + * to hook in a place where we are likely to come if in user + * space. Technically the error handling thread should be + * doing this crap, but the error handler isn't used by + * most hosts. + */ + if (SDpnt->was_reset) { + /* + * We need to relock the door, but we might + * be in an interrupt handler. Only do this + * from user space, since we do not want to + * sleep from an interrupt. + */ + if (SDpnt->removable && !in_interrupt()) { + spin_unlock_irq(&io_request_lock); + scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0); + SDpnt->was_reset = 0; + spin_lock_irq(&io_request_lock); + continue; + } + SDpnt->was_reset = 0; + } + /* + * Finally, before we release the lock, we copy the + * request to the command block, and remove the + * request from the request list. Note that we always + * operate on the queue head - there is absolutely no + * reason to search the list, because all of the commands + * in this queue are for the same device. + */ + q->current_request = req->next; + + if (req->special == NULL) { + memcpy(&SCpnt->request, req, sizeof(struct request)); + + /* + * We have copied the data out of the request block - it is now in + * a field in SCpnt. Release the request block. + */ + req->next = NULL; + req->rq_status = RQ_INACTIVE; + wake_up(&wait_for_request); + } + /* + * Now it is finally safe to release the lock. We are not going + * to noodle the request list until this request has been queued + * and we loop back to queue another. + */ + spin_unlock_irq(&io_request_lock); + + if (req->special == NULL) { + /* + * This will do a couple of things: + * 1) Fill in the actual SCSI command. + * 2) Fill in any other upper-level specific fields (timeout). + * + * If this returns 0, it means that the request failed (reading + * past end of disk, reading offline device, etc). This won't + * actually talk to the device, but some kinds of consistency + * checking may cause the request to be rejected immediately. + */ + if (STpnt == NULL) { + STpnt = scsi_get_request_dev(req); + } + /* + * This sets up the scatter-gather table (allocating if + * required). Hosts that need bounce buffers will also + * get those allocated here. + */ + if (!SDpnt->scsi_init_io_fn(SCpnt)) { + continue; + } + /* + * Initialize the actual SCSI command for this request. + */ + if (!STpnt->init_command(SCpnt)) { + continue; + } + } + /* + * Finally, initialize any error handling parameters, and set up + * the timers for timeouts. + */ + scsi_init_cmd_errh(SCpnt); + + /* + * Dispatch the command to the low-level driver. + */ + scsi_dispatch_cmd(SCpnt); + + /* + * Now we need to grab the lock again. We are about to mess with + * the request queue and try to find another command. + */ + spin_lock_irq(&io_request_lock); + } + + /* + * If this is a single-lun device, and we are currently finished + * with this device, then see if we need to get another device + * started. + */ + if (SDpnt->single_lun + && q->current_request == NULL + && SDpnt->device_busy == 0) { + request_queue_t *q; + + for (SDpnt = SHpnt->host_queue; + SDpnt; + SDpnt = SDpnt->next) { + if (((SHpnt->can_queue > 0) + && (SHpnt->host_busy >= SHpnt->can_queue)) + || (SHpnt->host_blocked) + || (SDpnt->device_blocked)) { + break; + } + q = &SDpnt->request_queue; + q->request_fn(q); + } + } +} diff --git a/drivers/scsi/scsi_merge.c b/drivers/scsi/scsi_merge.c new file mode 100644 index 000000000000..e31d1a7a436f --- /dev/null +++ b/drivers/scsi/scsi_merge.c @@ -0,0 +1,770 @@ +/* + * scsi_merge.c Copyright (C) 1999 Eric Youngdale + * + * SCSI queueing library. + * Initial versions: Eric Youngdale (eric@andante.org). + * Based upon conversations with large numbers + * of people at Linux Expo. + */ + +/* + * This file contains queue management functions that are used by SCSI. + * Typically this is used for several purposes. First, we need to ensure + * that commands do not grow so large that they cannot be handled all at + * once by a host adapter. The various flavors of merge functions included + * here serve this purpose. + * + * Note that it would be quite trivial to allow the low-level driver the + * flexibility to define it's own queue handling functions. For the time + * being, the hooks are not present. Right now we are just using the + * data in the host template as an indicator of how we should be handling + * queues, and we select routines that are optimized for that purpose. + * + * Some hosts do not impose any restrictions on the size of a request. + * In such cases none of the merge functions in this file are called, + * and we allow ll_rw_blk to merge requests in the default manner. + * This isn't guaranteed to be optimal, but it should be pretty darned + * good. If someone comes up with ideas of better ways of managing queues + * to improve on the default behavior, then certainly fit it into this + * scheme in whatever manner makes the most sense. Please note that + * since each device has it's own queue, we have considerable flexibility + * in queue management. + */ + +#define __NO_VERSION__ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define __KERNEL_SYSCALLS__ + +#include + +#include +#include +#include + +#include "scsi.h" +#include "hosts.h" +#include "constants.h" +#include + +#ifdef CONFIG_SCSI_DEBUG_QUEUES +/* + * Enable a bunch of additional consistency checking. Turn this off + * if you are benchmarking. + */ + +static int dump_stats(struct request *req, + int use_clustering, + int dma_host, + int segments) +{ + struct buffer_head *bh; + + /* + * Dump the information that we have. We know we have an + * inconsistency. + */ + printk("nr_segments is %lx\n", req->nr_segments); + printk("counted segments is %x\n", segments); + printk("Flags %d %d\n", use_clustering, dma_host); + for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext) + { + printk("Segment 0x%p, blocks %d, addr 0x%lx\n", + bh, + bh->b_size >> 9, + virt_to_phys(bh->b_data - 1)); + } + panic("Ththththaats all folks. Too dangerous to continue.\n"); +} + + +/* + * Simple sanity check that we will use for the first go around + * in order to ensure that we are doing the counting correctly. + * This can be removed for optimization. + */ +#define SANITY_CHECK(req, _CLUSTER, _DMA) \ + if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA) ) \ + { \ + __label__ here; \ +here: \ + printk("Incorrect segment count at 0x%p", &&here); \ + dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA)); \ + } +#else +#define SANITY_CHECK(req, _CLUSTER, _DMA) +#endif + +/* + * FIXME(eric) - the original disk code disabled clustering for MOD + * devices. I have no idea why we thought this was a good idea - my + * guess is that it was an attempt to limit the size of requests to MOD + * devices. + */ +#define CLUSTERABLE_DEVICE(SH,SD) (SH->use_clustering && \ + SD->type != TYPE_MOD) + +/* + * This entire source file deals with the new queueing code. + */ + +/* + * Function: __count_segments() + * + * Purpose: Prototype for queue merge function. + * + * Arguments: q - Queue for which we are merging request. + * req - request into which we wish to merge. + * use_clustering - 1 if this host wishes to use clustering + * dma_host - 1 if this host has ISA DMA issues (bus doesn't + * expose all of the address lines, so that DMA cannot + * be done from an arbitrary address). + * + * Returns: Count of the number of SG segments for the request. + * + * Lock status: + * + * Notes: This is only used for diagnostic purposes. + */ +__inline static int __count_segments(struct request *req, + int use_clustering, + int dma_host) +{ + int ret = 1; + struct buffer_head *bh; + + for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext) { + if (use_clustering) { + /* + * See if we can do this without creating another + * scatter-gather segment. In the event that this is a + * DMA capable host, make sure that a segment doesn't span + * the DMA threshold boundary. + */ + if (dma_host && + virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) { + ret++; + } else if (CONTIGUOUS_BUFFERS(bh, bh->b_reqnext)) { + /* + * This one is OK. Let it go. + */ + continue; + } + ret++; + } else { + ret++; + } + } + return ret; +} + +/* + * Function: __scsi_merge_fn() + * + * Purpose: Prototype for queue merge function. + * + * Arguments: q - Queue for which we are merging request. + * req - request into which we wish to merge. + * bh - Block which we may wish to merge into request + * use_clustering - 1 if this host wishes to use clustering + * dma_host - 1 if this host has ISA DMA issues (bus doesn't + * expose all of the address lines, so that DMA cannot + * be done from an arbitrary address). + * + * Returns: 1 if it is OK to merge the block into the request. 0 + * if it is not OK. + * + * Lock status: io_request_lock is assumed to be held here. + * + * Notes: Some drivers have limited scatter-gather table sizes, and + * thus they cannot queue an infinitely large command. This + * function is called from ll_rw_blk before it attempts to merge + * a new block into a request to make sure that the request will + * not become too large. + * + * This function is not designed to be directly called. Instead + * it should be referenced from other functions where the + * use_clustering and dma_host parameters should be integer + * constants. The compiler should thus be able to properly + * optimize the code, eliminating stuff that is irrelevant. + * It is more maintainable to do this way with a single function + * than to have 4 separate functions all doing roughly the + * same thing. + */ +__inline static int __scsi_merge_fn(request_queue_t * q, + struct request *req, + struct buffer_head *bh, + int use_clustering, + int dma_host) +{ + unsigned int sector, count; + Scsi_Device *SDpnt; + struct Scsi_Host *SHpnt; + + SDpnt = (Scsi_Device *) q->queuedata; + SHpnt = SDpnt->host; + + count = bh->b_size >> 9; + sector = bh->b_rsector; + + /* + * We come in here in one of two cases. The first is that we + * are checking to see if we can add the buffer to the end of the + * request, the other is to see if we should add the request to the + * start. + */ + if (req->sector + req->nr_sectors == sector) { + if (use_clustering) { + /* + * See if we can do this without creating another + * scatter-gather segment. In the event that this is a + * DMA capable host, make sure that a segment doesn't span + * the DMA threshold boundary. + */ + if (dma_host && + virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) { + goto new_segment; + } + if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) { + /* + * This one is OK. Let it go. + */ + return 1; + } + } + goto new_segment; + } else if (req->sector - count == sector) { + if (use_clustering) { + /* + * See if we can do this without creating another + * scatter-gather segment. In the event that this is a + * DMA capable host, make sure that a segment doesn't span + * the DMA threshold boundary. + */ + if (dma_host && + virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) { + goto new_segment; + } + if (CONTIGUOUS_BUFFERS(bh, req->bh)) { + /* + * This one is OK. Let it go. + */ + return 1; + } + } + goto new_segment; + } else { + panic("Attempt to merge sector that doesn't belong"); + } + new_segment: + if (req->nr_segments < SHpnt->sg_tablesize) { + /* + * This will form the start of a new segment. Bump the + * counter. + */ + req->nr_segments++; + return 1; + } else { + return 0; + } +} + +/* + * Function: scsi_merge_fn_() + * + * Purpose: queue merge function. + * + * Arguments: q - Queue for which we are merging request. + * req - request into which we wish to merge. + * bh - Block which we may wish to merge into request + * + * Returns: 1 if it is OK to merge the block into the request. 0 + * if it is not OK. + * + * Lock status: io_request_lock is assumed to be held here. + * + * Notes: Optimized for different cases depending upon whether + * ISA DMA is in use and whether clustering should be used. + */ +#define MERGEFCT(_FUNCTION, _CLUSTER, _DMA) \ +static int _FUNCTION(request_queue_t * q, \ + struct request * req, \ + struct buffer_head * bh) \ +{ \ + int ret; \ + SANITY_CHECK(req, _CLUSTER, _DMA); \ + ret = __scsi_merge_fn(q, req, bh, _CLUSTER, _DMA); \ + return ret; \ +} + +MERGEFCT(scsi_merge_fn_, 0, 0) +MERGEFCT(scsi_merge_fn_d, 0, 1) +MERGEFCT(scsi_merge_fn_c, 1, 0) +MERGEFCT(scsi_merge_fn_dc, 1, 1) +/* + * Function: __scsi_merge_requests_fn() + * + * Purpose: Prototype for queue merge function. + * + * Arguments: q - Queue for which we are merging request. + * req - request into which we wish to merge. + * next - 2nd request that we might want to combine with req + * use_clustering - 1 if this host wishes to use clustering + * dma_host - 1 if this host has ISA DMA issues (bus doesn't + * expose all of the address lines, so that DMA cannot + * be done from an arbitrary address). + * + * Returns: 1 if it is OK to merge the two requests. 0 + * if it is not OK. + * + * Lock status: io_request_lock is assumed to be held here. + * + * Notes: Some drivers have limited scatter-gather table sizes, and + * thus they cannot queue an infinitely large command. This + * function is called from ll_rw_blk before it attempts to merge + * a new block into a request to make sure that the request will + * not become too large. + * + * This function is not designed to be directly called. Instead + * it should be referenced from other functions where the + * use_clustering and dma_host parameters should be integer + * constants. The compiler should thus be able to properly + * optimize the code, eliminating stuff that is irrelevant. + * It is more maintainable to do this way with a single function + * than to have 4 separate functions all doing roughly the + * same thing. + */ +__inline static int __scsi_merge_requests_fn(request_queue_t * q, + struct request *req, + struct request *next, + int use_clustering, + int dma_host) +{ + Scsi_Device *SDpnt; + struct Scsi_Host *SHpnt; + + SDpnt = (Scsi_Device *) q->queuedata; + SHpnt = SDpnt->host; + + /* + * If the two requests together are too large (even assuming that we + * can merge the boundary requests into one segment, then don't + * allow the merge. + */ + if (req->nr_segments + next->nr_segments - 1 > SHpnt->sg_tablesize) { + return 0; + } + /* + * The main question is whether the two segments at the boundaries + * would be considered one or two. + */ + if (use_clustering) { + /* + * See if we can do this without creating another + * scatter-gather segment. In the event that this is a + * DMA capable host, make sure that a segment doesn't span + * the DMA threshold boundary. + */ + if (dma_host && + virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) { + goto dont_combine; + } + if (CONTIGUOUS_BUFFERS(req->bhtail, next->bh)) { + /* + * This one is OK. Let it go. + */ + req->nr_segments += next->nr_segments - 1; + return 1; + } + } + dont_combine: + /* + * We know that the two requests at the boundary should not be combined. + * Make sure we can fix something that is the sum of the two. + * A slightly stricter test than we had above. + */ + if (req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) { + return 0; + } else { + /* + * This will form the start of a new segment. Bump the + * counter. + */ + req->nr_segments += next->nr_segments; + return 1; + } +} + +/* + * Function: scsi_merge_requests_fn_() + * + * Purpose: queue merge function. + * + * Arguments: q - Queue for which we are merging request. + * req - request into which we wish to merge. + * bh - Block which we may wish to merge into request + * + * Returns: 1 if it is OK to merge the block into the request. 0 + * if it is not OK. + * + * Lock status: io_request_lock is assumed to be held here. + * + * Notes: Optimized for different cases depending upon whether + * ISA DMA is in use and whether clustering should be used. + */ +#define MERGEREQFCT(_FUNCTION, _CLUSTER, _DMA) \ +static int _FUNCTION(request_queue_t * q, \ + struct request * req, \ + struct request * next) \ +{ \ + int ret; \ + SANITY_CHECK(req, _CLUSTER, _DMA); \ + ret = __scsi_merge_requests_fn(q, req, next, _CLUSTER, _DMA); \ + return ret; \ +} + +MERGEREQFCT(scsi_merge_requests_fn_, 0, 0) +MERGEREQFCT(scsi_merge_requests_fn_d, 0, 1) +MERGEREQFCT(scsi_merge_requests_fn_c, 1, 0) +MERGEREQFCT(scsi_merge_requests_fn_dc, 1, 1) +/* + * Function: __init_io() + * + * Purpose: Prototype for io initialize function. + * + * Arguments: SCpnt - Command descriptor we wish to initialize + * sg_count_valid - 1 if the sg count in the req is valid. + * use_clustering - 1 if this host wishes to use clustering + * dma_host - 1 if this host has ISA DMA issues (bus doesn't + * expose all of the address lines, so that DMA cannot + * be done from an arbitrary address). + * + * Returns: 1 on success. + * + * Lock status: + * + * Notes: Only the SCpnt argument should be a non-constant variable. + * This function is designed in such a way that it will be + * invoked from a series of small stubs, each of which would + * be optimized for specific circumstances. + * + * The advantage of this is that hosts that don't do DMA + * get versions of the function that essentially don't have + * any of the DMA code. Same goes for clustering - in the + * case of hosts with no need for clustering, there is no point + * in a whole bunch of overhead. + * + * Finally, in the event that a host has set can_queue to SG_ALL + * implying that there is no limit to the length of a scatter + * gather list, the sg count in the request won't be valid + * (mainly because we don't need queue management functions + * which keep the tally uptodate. + */ +__inline static int __init_io(Scsi_Cmnd * SCpnt, + int sg_count_valid, + int use_clustering, + int dma_host) +{ + struct buffer_head *bh; + struct buffer_head *bhprev; + char *buff; + int count; + int i; + struct request *req; + struct scatterlist *sgpnt; + int this_count; + + /* + * FIXME(eric) - don't inline this - it doesn't depend on the + * integer flags. Come to think of it, I don't think this is even + * needed any more. Need to play with it and see if we hit the + * panic. If not, then don't bother. + */ + if (!SCpnt->request.bh) { + /* + * Case of page request (i.e. raw device), or unlinked buffer + * Typically used for swapping, but this isn't how we do + * swapping any more. + */ + panic("I believe this is dead code. If we hit this, I was wrong"); +#if 0 + SCpnt->request_bufflen = SCpnt->request.nr_sectors << 9; + SCpnt->request_buffer = SCpnt->request.buffer; + SCpnt->use_sg = 0; + /* + * FIXME(eric) - need to handle DMA here. + */ +#endif + return 1; + } + req = &SCpnt->request; + /* + * First we need to know how many scatter gather segments are needed. + */ + if (!sg_count_valid) { + count = __count_segments(req, use_clustering, dma_host); + } else { + count = req->nr_segments; + } + + /* + * If the dma pool is nearly empty, then queue a minimal request + * with a single segment. Typically this will satisfy a single + * buffer. + */ + if (dma_host && scsi_dma_free_sectors <= 10) { + this_count = SCpnt->request.current_nr_sectors; + goto single_segment; + } + /* + * Don't bother with scatter-gather if there is only one segment. + */ + if (count == 1) { + this_count = SCpnt->request.nr_sectors; + goto single_segment; + } + SCpnt->use_sg = count; + + /* + * Allocate the actual scatter-gather table itself. + * scsi_malloc can only allocate in chunks of 512 bytes + */ + SCpnt->sglist_len = (SCpnt->use_sg + * sizeof(struct scatterlist) + 511) & ~511; + + sgpnt = (struct scatterlist *) scsi_malloc(SCpnt->sglist_len); + + /* + * Now fill the scatter-gather table. + */ + if (!sgpnt) { + /* + * If we cannot allocate the scatter-gather table, then + * simply write the first buffer all by itself. + */ + printk("Warning - running *really* short on DMA buffers\n"); + this_count = SCpnt->request.current_nr_sectors; + goto single_segment; + } + /* + * Next, walk the list, and fill in the addresses and sizes of + * each segment. + */ + memset(sgpnt, 0, SCpnt->sglist_len); + SCpnt->request_buffer = (char *) sgpnt; + SCpnt->request_bufflen = 0; + bhprev = NULL; + + for (count = 0, bh = SCpnt->request.bh; + bh; bh = bh->b_reqnext) { + if (use_clustering && bhprev != NULL) { + if (dma_host && + virt_to_phys(bhprev->b_data - 1) == ISA_DMA_THRESHOLD) { + /* Nothing - fall through */ + } else if (CONTIGUOUS_BUFFERS(bhprev, bh)) { + /* + * This one is OK. Let it go. + */ + sgpnt[count - 1].length += bh->b_size; + if (!dma_host) { + SCpnt->request_bufflen += bh->b_size; + } + bhprev = bh; + continue; + } + } + count++; + sgpnt[count - 1].address = bh->b_data; + sgpnt[count - 1].length += bh->b_size; + if (!dma_host) { + SCpnt->request_bufflen += bh->b_size; + } + bhprev = bh; + } + + /* + * Verify that the count is correct. + */ + if (count != SCpnt->use_sg) { + panic("Incorrect sg segment count"); + } + if (!dma_host) { + return 1; + } + /* + * Now allocate bounce buffers, if needed. + */ + SCpnt->request_bufflen = 0; + for (i = 0; i < count; i++) { + SCpnt->request_bufflen += sgpnt[i].length; + if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 > + ISA_DMA_THRESHOLD && !sgpnt[count].alt_address) { + sgpnt[i].alt_address = sgpnt[i].address; + sgpnt[i].address = + (char *) scsi_malloc(sgpnt[i].length); + /* + * If we cannot allocate memory for this DMA bounce + * buffer, then queue just what we have done so far. + */ + if (sgpnt[i].address == NULL) { + printk("Warning - running low on DMA memory\n"); + SCpnt->request_bufflen -= sgpnt[i].length; + SCpnt->use_sg = i; + if (i == 0) { + panic("DMA pool exhausted"); + } + break; + } + if (SCpnt->request.cmd == WRITE) { + memcpy(sgpnt[i].address, sgpnt[i].alt_address, + sgpnt[i].length); + } + } + } + return 1; + + single_segment: + /* + * Come here if for any reason we choose to do this as a single + * segment. Possibly the entire request, or possibly a small + * chunk of the entire request. + */ + bh = SCpnt->request.bh; + buff = SCpnt->request.buffer; + + if (dma_host) { + /* + * Allocate a DMA bounce buffer. If the allocation fails, fall + * back and allocate a really small one - enough to satisfy + * the first buffer. + */ + if (virt_to_phys(SCpnt->request.bh->b_data) + + (this_count << 9) - 1 > ISA_DMA_THRESHOLD) { + buff = (char *) scsi_malloc(this_count << 9); + if (!buff) { + printk("Warning - running low on DMA memory\n"); + this_count = SCpnt->request.current_nr_sectors; + buff = (char *) scsi_malloc(this_count << 9); + if (!buff) { + panic("Unable to allocate DMA buffer\n"); + } + } + if (SCpnt->request.cmd == WRITE) + memcpy(buff, (char *) SCpnt->request.buffer, this_count << 9); + } + } + SCpnt->request_bufflen = this_count << 9; + SCpnt->request_buffer = buff; + SCpnt->use_sg = 0; + return 1; +} + +#define INITIO(_FUNCTION, _VALID, _CLUSTER, _DMA) \ +static int _FUNCTION(Scsi_Cmnd * SCpnt) \ +{ \ + return __init_io(SCpnt, _VALID, _CLUSTER, _DMA); \ +} + +/* + * ll_rw_blk.c now keeps track of the number of segments in + * a request. Thus we don't have to do it any more here. + * We always force "_VALID" to 1. Eventually clean this up + * and get rid of the extra argument. + */ +#if 0 +/* Old definitions */ +INITIO(scsi_init_io_, 0, 0, 0) +INITIO(scsi_init_io_d, 0, 0, 1) +INITIO(scsi_init_io_c, 0, 1, 0) +INITIO(scsi_init_io_dc, 0, 1, 1) + +/* Newer redundant definitions. */ +INITIO(scsi_init_io_, 1, 0, 0) +INITIO(scsi_init_io_d, 1, 0, 1) +INITIO(scsi_init_io_c, 1, 1, 0) +INITIO(scsi_init_io_dc, 1, 1, 1) +#endif + +INITIO(scsi_init_io_v, 1, 0, 0) +INITIO(scsi_init_io_vd, 1, 0, 1) +INITIO(scsi_init_io_vc, 1, 1, 0) +INITIO(scsi_init_io_vdc, 1, 1, 1) +/* + * Function: initialize_merge_fn() + * + * Purpose: Initialize merge function for a host + * + * Arguments: SHpnt - Host descriptor. + * + * Returns: Nothing. + * + * Lock status: + * + * Notes: + */ +void initialize_merge_fn(Scsi_Device * SDpnt) +{ + request_queue_t *q; + struct Scsi_Host *SHpnt; + SHpnt = SDpnt->host; + + q = &SDpnt->request_queue; + + /* + * If the host has already selected a merge manager, then don't + * pick a new one. + */ + if (q->merge_fn != NULL) { + return; + } + /* + * If this host has an unlimited tablesize, then don't bother with a + * merge manager. The whole point of the operation is to make sure + * that requests don't grow too large, and this host isn't picky. + */ + if (SHpnt->sg_tablesize == SG_ALL) { + if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) { + SDpnt->scsi_init_io_fn = scsi_init_io_v; + } else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) { + SDpnt->scsi_init_io_fn = scsi_init_io_vd; + } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) { + SDpnt->scsi_init_io_fn = scsi_init_io_vc; + } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) { + SDpnt->scsi_init_io_fn = scsi_init_io_vdc; + } + return; + } + /* + * Now pick out the correct function. + */ + if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) { + q->merge_fn = scsi_merge_fn_; + q->merge_requests_fn = scsi_merge_requests_fn_; + SDpnt->scsi_init_io_fn = scsi_init_io_v; + } else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) { + q->merge_fn = scsi_merge_fn_d; + q->merge_requests_fn = scsi_merge_requests_fn_d; + SDpnt->scsi_init_io_fn = scsi_init_io_vd; + } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) { + q->merge_fn = scsi_merge_fn_c; + q->merge_requests_fn = scsi_merge_requests_fn_c; + SDpnt->scsi_init_io_fn = scsi_init_io_vc; + } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) { + q->merge_fn = scsi_merge_fn_dc; + q->merge_requests_fn = scsi_merge_requests_fn_dc; + SDpnt->scsi_init_io_fn = scsi_init_io_vdc; + } +} diff --git a/drivers/scsi/scsi_obsolete.c b/drivers/scsi/scsi_obsolete.c index b431c3849197..ccfb0b34f8dc 100644 --- a/drivers/scsi/scsi_obsolete.c +++ b/drivers/scsi/scsi_obsolete.c @@ -13,7 +13,7 @@ * Tommy Thorn * Thomas Wuensche * - * Modified by Eric Youngdale eric@aib.com to + * Modified by Eric Youngdale eric@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. * @@ -84,13 +84,15 @@ static int scsi_reset(Scsi_Cmnd *, unsigned int); extern void scsi_old_done(Scsi_Cmnd * SCpnt); int update_timeout(Scsi_Cmnd *, int); extern void scsi_old_times_out(Scsi_Cmnd * SCpnt); -extern void internal_cmnd(Scsi_Cmnd * SCpnt); + +extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt); extern volatile struct Scsi_Host *host_active; #define SCSI_BLOCK(HOST) ((HOST->block && host_active && HOST != host_active) \ || (HOST->can_queue && HOST->host_busy >= HOST->can_queue)) -static unsigned char generic_sense[6] = {REQUEST_SENSE, 0, 0, 0, 255, 0}; +static unsigned char generic_sense[6] = +{REQUEST_SENSE, 0, 0, 0, 255, 0}; /* * This is the number of clock ticks we should wait before we time out @@ -232,7 +234,13 @@ static void scsi_request_sense(Scsi_Cmnd * SCpnt) SCpnt->use_sg = 0; SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); SCpnt->result = 0; - internal_cmnd(SCpnt); + /* + * Ugly, ugly. The newer interfaces all assume that the lock + * isn't held. Mustn't disappoint, or we deadlock the system. + */ + spin_unlock_irq(&io_request_lock); + scsi_dispatch_cmd(SCpnt); + spin_lock_irq(&io_request_lock); } @@ -443,7 +451,7 @@ void scsi_old_done(Scsi_Cmnd * SCpnt) __LINE__); } } - /* end WAS_SENSE */ + /* end WAS_SENSE */ else { #ifdef DEBUG printk("COMMAND COMPLETE message returned, " @@ -628,7 +636,14 @@ void scsi_old_done(Scsi_Cmnd * SCpnt) SCpnt->use_sg = SCpnt->old_use_sg; SCpnt->cmd_len = SCpnt->old_cmd_len; SCpnt->result = 0; - internal_cmnd(SCpnt); + /* + * Ugly, ugly. The newer interfaces all + * assume that the lock isn't held. Mustn't + * disappoint, or we deadlock the system. + */ + spin_unlock_irq(&io_request_lock); + scsi_dispatch_cmd(SCpnt); + spin_lock_irq(&io_request_lock); } break; default: @@ -641,22 +656,18 @@ void scsi_old_done(Scsi_Cmnd * SCpnt) #endif host->host_busy--; /* Indicate that we are free */ - if (host->block && host->host_busy == 0) { - host_active = NULL; - - /* For block devices "wake_up" is done in end_scsi_request */ - if (!SCSI_BLK_MAJOR(MAJOR(SCpnt->request.rq_dev))) { - struct Scsi_Host *next; - - for (next = host->block; next != host; next = next->block) - wake_up(&next->host_wait); - } - } - wake_up(&host->host_wait); SCpnt->result = result | ((exit & 0xff) << 24); SCpnt->use_sg = SCpnt->old_use_sg; SCpnt->cmd_len = SCpnt->old_cmd_len; + /* + * The upper layers assume the lock isn't held. We mustn't + * disappoint them. When the new error handling code is in + * use, the upper code is run from a bottom half handler, so + * it isn't an issue. + */ + spin_unlock_irq(&io_request_lock); SCpnt->done(SCpnt); + spin_lock_irq(&io_request_lock); } #undef CMD_FINISHED #undef REDO @@ -925,8 +936,7 @@ static int scsi_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags) if (host->last_reset - jiffies > 20UL * HZ) host->last_reset = jiffies; } else { - if (!host->block) - host->host_busy++; + host->host_busy++; host->last_reset = jiffies; host->resetting = 1; SCpnt->flags |= (WAS_RESET | IS_RESETTING); @@ -939,8 +949,7 @@ static int scsi_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags) if (time_before(host->last_reset, jiffies) || (time_after(host->last_reset, jiffies + 20 * HZ))) host->last_reset = jiffies; - if (!host->block) - host->host_busy--; + host->host_busy--; } if (reset_flags & SCSI_RESET_SYNCHRONOUS) SCpnt->flags &= ~SYNC_RESET; diff --git a/drivers/scsi/scsi_queue.c b/drivers/scsi/scsi_queue.c index 95c1cac69598..1c64977e9da7 100644 --- a/drivers/scsi/scsi_queue.c +++ b/drivers/scsi/scsi_queue.c @@ -56,14 +56,6 @@ static const char RCSid[] = "$Header: /mnt/ide/home/eric/CVSROOT/linux/drivers/scsi/scsi_queue.c,v 1.1 1997/10/21 11:16:38 eric Exp $"; -/* - * Lock used to prevent more than one process from frobbing the list at the - * same time. FIXME(eric) - there should be separate spinlocks for each host. - * This will reduce contention. - */ - -spinlock_t scsi_mlqueue_lock = SPIN_LOCK_UNLOCKED; -spinlock_t scsi_mlqueue_remove_lock = SPIN_LOCK_UNLOCKED; /* * Function: scsi_mlqueue_insert() @@ -73,6 +65,8 @@ spinlock_t scsi_mlqueue_remove_lock = SPIN_LOCK_UNLOCKED; * Arguments: cmd - command that we are adding to queue. * reason - why we are inserting command to queue. * + * Lock status: Assumed that lock is not held upon entry. + * * Returns: Nothing. * * Notes: We do this for one of two cases. Either the host is busy @@ -84,8 +78,6 @@ spinlock_t scsi_mlqueue_remove_lock = SPIN_LOCK_UNLOCKED; */ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason) { - Scsi_Cmnd *cpnt; - unsigned long flags; struct Scsi_Host *host; SCSI_LOG_MLQUEUE(1, printk("Inserting command %p into mlqueue\n", cmd)); @@ -126,12 +118,12 @@ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason) * If a host is inactive and cannot queue any commands, I don't see * how things could possibly work anyways. */ - if (cmd->device->device_busy == 0) { + if (cmd->device->device_blocked == 0) { if (scsi_retry_command(cmd) == 0) { return 0; } } - cmd->device->device_busy = TRUE; + cmd->device->device_blocked = TRUE; cmd->device_wait = TRUE; } @@ -143,142 +135,9 @@ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason) cmd->bh_next = NULL; /* - * As a performance enhancement, look to see whether the list is - * empty. If it is, then we can just atomicly insert the command - * in the list and return without locking. + * Insert this command at the head of the queue for it's device. + * It will go before all other commands that are already in the queue. */ - if (host->pending_commands == NULL) { - cpnt = xchg(&host->pending_commands, cmd); - if (cpnt == NULL) { - return 0; - } - /* - * Rats. Something slipped in while we were exchanging. - * Swap it back and fall through to do it the hard way. - */ - cmd = xchg(&host->pending_commands, cpnt); - - } - /* - * Next append the command to the list of pending commands. - */ - spin_lock_irqsave(&scsi_mlqueue_lock, flags); - for (cpnt = host->pending_commands; cpnt && cpnt->bh_next; - cpnt = cpnt->bh_next) { - continue; - } - if (cpnt != NULL) { - cpnt->bh_next = cmd; - } else { - host->pending_commands = cmd; - } - - spin_unlock_irqrestore(&scsi_mlqueue_lock, flags); - return 0; -} - -/* - * Function: scsi_mlqueue_finish() - * - * Purpose: Try and queue commands from the midlevel queue. - * - * Arguments: host - host that just finished a command. - * device - device that just finished a command. - * - * Returns: Nothing. - * - * Notes: This could be called either from an interrupt context or a - * normal process context. - */ -int scsi_mlqueue_finish(struct Scsi_Host *host, Scsi_Device * device) -{ - Scsi_Cmnd *cpnt; - unsigned long flags; - Scsi_Cmnd *next; - Scsi_Cmnd *prev; - int reason = 0; - int rtn; - - SCSI_LOG_MLQUEUE(2, printk("scsi_mlqueue_finish starting\n")); - /* - * First, clear the flag for the host/device. We will then start - * pushing commands through until either something else blocks, or - * the queue is empty. - */ - if (host->host_blocked) { - reason = SCSI_MLQUEUE_HOST_BUSY; - host->host_blocked = FALSE; - } - if (device->device_busy) { - reason = SCSI_MLQUEUE_DEVICE_BUSY; - device->device_busy = FALSE; - } - /* - * Walk the list of commands to see if there is anything we can - * queue. This probably needs to be optimized for performance at - * some point. - */ - prev = NULL; - spin_lock_irqsave(&scsi_mlqueue_remove_lock, flags); - for (cpnt = host->pending_commands; cpnt; cpnt = next) { - next = cpnt->bh_next; - /* - * First, see if this command is suitable for being retried now. - */ - if (reason == SCSI_MLQUEUE_HOST_BUSY) { - /* - * The host was busy, but isn't any more. Thus we may be - * able to queue the command now, but we were waiting for - * the device, then we should keep waiting. Similarily, if - * the device is now busy, we should also keep waiting. - */ - if ((cpnt->host_wait == FALSE) - || (device->device_busy == TRUE)) { - prev = cpnt; - continue; - } - } - if (reason == SCSI_MLQUEUE_DEVICE_BUSY) { - /* - * The device was busy, but isn't any more. Thus we may be - * able to queue the command now, but we were waiting for - * the host, then we should keep waiting. Similarily, if - * the host is now busy, we should also keep waiting. - */ - if ((cpnt->device_wait == FALSE) - || (host->host_blocked == TRUE)) { - prev = cpnt; - continue; - } - } - /* - * First, remove the command from the list. - */ - if (prev == NULL) { - host->pending_commands = next; - } else { - prev->bh_next = next; - } - cpnt->bh_next = NULL; - - rtn = scsi_retry_command(cpnt); - - /* - * If we got a non-zero return value, it means that the host rejected - * the command. The internal_cmnd function will have added the - * command back to the end of the list, so we don't have anything - * more to do here except return. - */ - if (rtn) { - spin_unlock_irqrestore(&scsi_mlqueue_remove_lock, flags); - SCSI_LOG_MLQUEUE(1, printk("Unable to remove command %p from mlqueue\n", cpnt)); - goto finish; - } - SCSI_LOG_MLQUEUE(1, printk("Removed command %p from mlqueue\n", cpnt)); - } - - spin_unlock_irqrestore(&scsi_mlqueue_remove_lock, flags); -finish: - SCSI_LOG_MLQUEUE(2, printk("scsi_mlqueue_finish returning\n")); + scsi_insert_special_cmd(cmd, 1); return 0; } diff --git a/drivers/scsi/scsi_syms.c b/drivers/scsi/scsi_syms.c index d3cb4a1031dc..3c94212fd71c 100644 --- a/drivers/scsi/scsi_syms.c +++ b/drivers/scsi/scsi_syms.c @@ -33,8 +33,8 @@ * modules. */ -extern void print_command (unsigned char *command); -extern void print_sense(const char * devclass, Scsi_Cmnd * SCpnt); +extern void print_command(unsigned char *command); +extern void print_sense(const char *devclass, Scsi_Cmnd * SCpnt); extern const char *const scsi_device_types[]; @@ -60,13 +60,12 @@ EXPORT_SYMBOL(print_status); EXPORT_SYMBOL(scsi_dma_free_sectors); EXPORT_SYMBOL(kernel_scsi_ioctl); EXPORT_SYMBOL(scsi_need_isa_buffer); -EXPORT_SYMBOL(scsi_request_queueable); EXPORT_SYMBOL(scsi_release_command); EXPORT_SYMBOL(print_Scsi_Cmnd); EXPORT_SYMBOL(scsi_block_when_processing_errors); EXPORT_SYMBOL(scsi_mark_host_reset); EXPORT_SYMBOL(scsi_ioctl_send_command); -#if defined(CONFIG_SCSI_LOGGING) /* { */ +#if defined(CONFIG_SCSI_LOGGING) /* { */ EXPORT_SYMBOL(scsi_logging_level); #endif @@ -75,6 +74,9 @@ EXPORT_SYMBOL(scsi_sleep); EXPORT_SYMBOL(proc_print_scsidevice); EXPORT_SYMBOL(proc_scsi); +EXPORT_SYMBOL(scsi_io_completion); +EXPORT_SYMBOL(scsi_end_request); + /* * These are here only while I debug the rest of the scsi stuff. */ @@ -83,5 +85,4 @@ EXPORT_SYMBOL(scsi_hosts); EXPORT_SYMBOL(scsi_devicelist); EXPORT_SYMBOL(scsi_device_types); - -#endif /* CONFIG_MODULES */ +#endif /* CONFIG_MODULES */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 240453144db6..ce9e28a41f1b 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1,6 +1,6 @@ /* * sd.c Copyright (C) 1992 Drew Eckhardt - * Copyright (C) 1993, 1994, 1995 Eric Youngdale + * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale * * Linux scsi disk driver * Initial versions: Drew Eckhardt @@ -8,11 +8,11 @@ * * * - * Modified by Eric Youngdale ericy@cais.com to + * Modified by Eric Youngdale ericy@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. * - * Modified by Eric Youngdale eric@aib.com to support loadable + * Modified by Eric Youngdale eric@andante.org to support loadable * low-level scsi drivers. * * Modified by Jirka Hanika geo@ff.cuni.cz to support more @@ -96,13 +96,15 @@ static int fop_revalidate_scsidisk(kdev_t); static int sd_init_onedisk(int); -static void requeue_sd_request(Scsi_Cmnd * SCpnt); static int sd_init(void); static void sd_finish(void); static int sd_attach(Scsi_Device *); static int sd_detect(Scsi_Device *); static void sd_detach(Scsi_Device *); +static void rw_intr(Scsi_Cmnd * SCpnt); + +static int sd_init_command(Scsi_Cmnd *); static int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg) { @@ -196,12 +198,170 @@ static void sd_devname(unsigned int disknum, char *buffer) } struct Scsi_Device_Template sd_template = { - NULL, "disk", "sd", NULL, TYPE_DISK, - SCSI_DISK0_MAJOR, 0, 0, 0, 1, - sd_detect, sd_init, - sd_finish, sd_attach, sd_detach + name:"disk", + tag:"sd", + scsi_type:TYPE_DISK, + major:SCSI_DISK0_MAJOR, + blk:1, + detect:sd_detect, + init:sd_init, + finish:sd_finish, + attach:sd_attach, + detach:sd_detach, + init_command:sd_init_command, }; +static request_queue_t *sd_find_queue(kdev_t dev) +{ + Scsi_Disk *dpnt; + int target; + target = DEVICE_NR(dev); + + dpnt = &rscsi_disks[target]; + if (!dpnt) + return NULL; /* No such device */ + return &dpnt->device->request_queue; +} + +static int sd_init_command(Scsi_Cmnd * SCpnt) +{ + int dev, devm, block, this_count; + Scsi_Disk *dpnt; + char nbuff[6]; + + devm = MINOR(SCpnt->request.rq_dev); + dev = DEVICE_NR(SCpnt->request.rq_dev); + + block = SCpnt->request.sector; + this_count = SCpnt->request_bufflen >> 9; + + SCSI_LOG_HLQUEUE(1, printk("Doing sd request, dev = %d, block = %d\n", devm, block)); + + dpnt = &rscsi_disks[dev]; + if (devm >= (sd_template.dev_max << 4) || + !dpnt || + !dpnt->device->online || + block + SCpnt->request.nr_sectors > sd[devm].nr_sects) { + SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors)); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); + return 0; + } + block += sd[devm].start_sect; + if (dpnt->device->changed) { + /* + * quietly refuse to do anything to a changed disc until the changed + * bit has been reset + */ + /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } + SCSI_LOG_HLQUEUE(2, sd_devname(devm, nbuff)); + SCSI_LOG_HLQUEUE(2, printk("%s : real dev = /dev/%d, block = %d\n", + nbuff, dev, block)); + + /* + * If we have a 1K hardware sectorsize, prevent access to single + * 512 byte sectors. In theory we could handle this - in fact + * the scsi cdrom driver must be able to handle this because + * we typically use 1K blocksizes, and cdroms typically have + * 2K hardware sectorsizes. Of course, things are simpler + * with the cdrom, since it is read-only. For performance + * reasons, the filesystems should be able to handle this + * and not force the scsi disk driver to use bounce buffers + * for this. + */ + if (dpnt->device->sector_size == 1024) { + if ((block & 1) || (SCpnt->request.nr_sectors & 1)) { + printk("sd.c:Bad block number requested"); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } else { + block = block >> 1; + this_count = this_count >> 1; + } + } + if (dpnt->device->sector_size == 2048) { + if ((block & 3) || (SCpnt->request.nr_sectors & 3)) { + printk("sd.c:Bad block number requested"); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } else { + block = block >> 2; + this_count = this_count >> 2; + } + } + switch (SCpnt->request.cmd) { + case WRITE: + if (!dpnt->device->writeable) { + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } + SCpnt->cmnd[0] = WRITE_6; + break; + case READ: + SCpnt->cmnd[0] = READ_6; + break; + default: + panic("Unknown sd command %d\n", SCpnt->request.cmd); + } + + SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", + nbuff, + (SCpnt->request.cmd == WRITE) ? "writing" : "reading", + this_count, SCpnt->request.nr_sectors)); + + SCpnt->cmnd[1] = (SCpnt->lun << 5) & 0xe0; + + if (((this_count > 0xff) || (block > 0x1fffff)) && SCpnt->device->ten) { + if (this_count > 0xffff) + this_count = 0xffff; + + SCpnt->cmnd[0] += READ_10 - READ_6; + SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; + SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; + SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; + SCpnt->cmnd[5] = (unsigned char) block & 0xff; + SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0; + SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; + SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; + } else { + if (this_count > 0xff) + this_count = 0xff; + + SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f); + SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff); + SCpnt->cmnd[3] = (unsigned char) block & 0xff; + SCpnt->cmnd[4] = (unsigned char) this_count; + SCpnt->cmnd[5] = 0; + } + + /* + * We shouldn't disconnect in the middle of a sector, so with a dumb + * host adapter, it's safe to assume that we can at least transfer + * this many bytes between each connect / disconnect. + */ + SCpnt->transfersize = dpnt->device->sector_size; + SCpnt->underflow = this_count << 9; + + SCpnt->allowed = MAX_RETRIES; + SCpnt->timeout_per_command = (SCpnt->device->type == TYPE_DISK ? + SD_TIMEOUT : SD_MOD_TIMEOUT); + + /* + * This is the completion routine we use. This is matched in terms + * of capability to this function. + */ + SCpnt->done = rw_intr; + + /* + * This indicates that the command is ready from our end to be + * queued. + */ + return 1; +} + static int sd_open(struct inode *inode, struct file *filp) { int target; @@ -359,7 +519,7 @@ static void rw_intr(Scsi_Cmnd * SCpnt) int good_sectors = (result == 0 ? this_count : 0); int block_sectors = 1; - sd_devname(DEVICE_NR(SCpnt->request.rq_dev), nbuff); + SCSI_LOG_HLCOMPLETE(1, sd_devname(DEVICE_NR(SCpnt->request.rq_dev), nbuff)); SCSI_LOG_HLCOMPLETE(1, printk("%s : rw_intr(%d, %x [%x %x])\n", nbuff, SCpnt->host->host_no, @@ -380,202 +540,37 @@ static void rw_intr(Scsi_Cmnd * SCpnt) (SCpnt->sense_buffer[4] << 16) | (SCpnt->sense_buffer[5] << 8) | SCpnt->sense_buffer[6]; - int sector_size = - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].sector_size; if (SCpnt->request.bh != NULL) block_sectors = SCpnt->request.bh->b_size >> 9; - if (sector_size == 1024) { + switch (SCpnt->device->sector_size) { + case 1024: error_sector <<= 1; if (block_sectors < 2) block_sectors = 2; - } else if (sector_size == 2048) { + break; + case 2048: error_sector <<= 2; if (block_sectors < 4) block_sectors = 4; - } else if (sector_size == 256) + break; + case 256: error_sector >>= 1; - error_sector -= sd[SD_PARTITION(SCpnt->request.rq_dev)].start_sect; + break; + default: + break; + } + error_sector -= sd[MINOR(SCpnt->request.rq_dev)].start_sect; error_sector &= ~(block_sectors - 1); good_sectors = error_sector - SCpnt->request.sector; if (good_sectors < 0 || good_sectors >= this_count) good_sectors = 0; } /* - * First case : we assume that the command succeeded. One of two things - * will happen here. Either we will be finished, or there will be more - * sectors that we were unable to read last time. + * This calls the generic completion function, now that we know + * how many actual sectors finished, and how many sectors we need + * to say have failed. */ - - if (good_sectors > 0) { - - SCSI_LOG_HLCOMPLETE(1, printk("%s : %ld sectors remain.\n", nbuff, - SCpnt->request.nr_sectors)); - SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg)); - - if (SCpnt->use_sg) { - struct scatterlist *sgpnt; - int i; - sgpnt = (struct scatterlist *) SCpnt->buffer; - for (i = 0; i < SCpnt->use_sg; i++) { - -#if 0 - SCSI_LOG_HLCOMPLETE(3, printk(":%p %p %d\n", sgpnt[i].alt_address, sgpnt[i].address, - sgpnt[i].length)); -#endif - - if (sgpnt[i].alt_address) { - if (SCpnt->request.cmd == READ) - memcpy(sgpnt[i].alt_address, sgpnt[i].address, - sgpnt[i].length); - scsi_free(sgpnt[i].address, sgpnt[i].length); - } - } - - /* Free list of scatter-gather pointers */ - scsi_free(SCpnt->buffer, SCpnt->sglist_len); - } else { - if (SCpnt->buffer != SCpnt->request.buffer) { - SCSI_LOG_HLCOMPLETE(3, printk("nosg: %p %p %d\n", - SCpnt->request.buffer, SCpnt->buffer, - SCpnt->bufflen)); - - if (SCpnt->request.cmd == READ) - memcpy(SCpnt->request.buffer, SCpnt->buffer, - SCpnt->bufflen); - scsi_free(SCpnt->buffer, SCpnt->bufflen); - } - } - /* - * If multiple sectors are requested in one buffer, then - * they will have been finished off by the first command. - * If not, then we have a multi-buffer command. - */ - if (SCpnt->request.nr_sectors > this_count) { - SCpnt->request.errors = 0; - - if (!SCpnt->request.bh) { - SCSI_LOG_HLCOMPLETE(2, printk("%s : handling page request, no buffer\n", - nbuff)); - - /* - * The SCpnt->request.nr_sectors field is always done in - * 512 byte sectors, even if this really isn't the case. - */ - panic("sd.c: linked page request (%lx %x)", - SCpnt->request.sector, this_count); - } - } - SCpnt = end_scsi_request(SCpnt, 1, good_sectors); - if (result == 0) { - requeue_sd_request(SCpnt); - return; - } - } - if (good_sectors == 0) { - - /* Free up any indirection buffers we allocated for DMA purposes. */ - if (SCpnt->use_sg) { - struct scatterlist *sgpnt; - int i; - sgpnt = (struct scatterlist *) SCpnt->buffer; - for (i = 0; i < SCpnt->use_sg; i++) { - SCSI_LOG_HLCOMPLETE(3, printk("err: %p %p %d\n", - SCpnt->request.buffer, SCpnt->buffer, - SCpnt->bufflen)); - if (sgpnt[i].alt_address) { - scsi_free(sgpnt[i].address, sgpnt[i].length); - } - } - scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */ - } else { - SCSI_LOG_HLCOMPLETE(2, printk("nosgerr: %p %p %d\n", - SCpnt->request.buffer, SCpnt->buffer, - SCpnt->bufflen)); - if (SCpnt->buffer != SCpnt->request.buffer) - scsi_free(SCpnt->buffer, SCpnt->bufflen); - } - } - /* - * Now, if we were good little boys and girls, Santa left us a request - * sense buffer. We can extract information from this, so we - * can choose a block to remap, etc. - */ - - if (driver_byte(result) != 0) { - if (suggestion(result) == SUGGEST_REMAP) { -#ifdef REMAP - /* - * Not yet implemented. A read will fail after being remapped, - * a write will call the strategy routine again. - */ - if rscsi_disks - [DEVICE_NR(SCpnt->request.rq_dev)].remap - { - result = 0; - } -#endif - } - if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) { - if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) { - if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) { - /* detected disc change. set a bit and quietly refuse - * further access. - */ - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1; - SCpnt = end_scsi_request(SCpnt, 0, this_count); - requeue_sd_request(SCpnt); - return; - } else { - /* - * Must have been a power glitch, or a bus reset. - * Could not have been a media change, so we just retry - * the request and see what happens. - */ - requeue_sd_request(SCpnt); - return; - } - } - } - /* If we had an ILLEGAL REQUEST returned, then we may have - * performed an unsupported command. The only thing this should be - * would be a ten byte read where only a six byte read was supported. - * Also, on a system where READ CAPACITY failed, we have have read - * past the end of the disk. - */ - - if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) { - if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) { - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0; - requeue_sd_request(SCpnt); - result = 0; - } else { - /* ???? */ - } - } - if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) { - printk("scsi%d: MEDIUM ERROR on channel %d, id %d, lun %d, CDB: ", - SCpnt->host->host_no, (int) SCpnt->channel, - (int) SCpnt->target, (int) SCpnt->lun); - print_command(SCpnt->cmnd); - print_sense("sd", SCpnt); - SCpnt = end_scsi_request(SCpnt, 0, block_sectors); - requeue_sd_request(SCpnt); - return; - } - } /* driver byte != 0 */ - if (result) { - printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n", - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no, - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel, - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id, - rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result); - - if (driver_byte(result) & DRIVER_SENSE) - print_sense("sd", SCpnt); - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors); - requeue_sd_request(SCpnt); - return; - } + scsi_io_completion(SCpnt, good_sectors, block_sectors); } /* * requeue_sd_request() is the request handler function for the sd driver. @@ -583,532 +578,6 @@ static void rw_intr(Scsi_Cmnd * SCpnt) * them to SCSI commands. */ -static void do_sd_request(void) -{ - Scsi_Cmnd *SCpnt = NULL; - Scsi_Device *SDev; - struct request *req = NULL; - int flag = 0; - - while (1 == 1) { - if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) { - return; - } - INIT_SCSI_REQUEST; - SDev = rscsi_disks[CURRENT_DEV].device; - - /* - * If the host for this device is in error recovery mode, don't - * do anything at all here. When the host leaves error recovery - * mode, it will automatically restart things and start queueing - * commands again. - */ - if (SDev->host->in_recovery) { - return; - } - /* - * I am not sure where the best place to do this is. We need - * to hook in a place where we are likely to come if in user - * space. - */ - if (SDev->was_reset) { - /* - * We need to relock the door, but we might - * be in an interrupt handler. Only do this - * from user space, since we do not want to - * sleep from an interrupt. FIXME(eric) - do this - * from the kernel error handling thred. - */ - if (SDev->removable && !in_interrupt()) { - spin_unlock_irq(&io_request_lock); /* FIXME!!!! */ - scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0); - /* scsi_ioctl may allow CURRENT to change, so start over. */ - SDev->was_reset = 0; - spin_lock_irq(&io_request_lock); /* FIXME!!!! */ - continue; - } - SDev->was_reset = 0; - } - /* We have to be careful here. scsi_allocate_device will get a free pointer, - * but there is no guarantee that it is queueable. In normal usage, - * we want to call this, because other types of devices may have the - * host all tied up, and we want to make sure that we have at least - * one request pending for this type of device. We can also come - * through here while servicing an interrupt, because of the need to - * start another command. If we call scsi_allocate_device more than once, - * then the system can wedge if the command is not queueable. The - * scsi_request_queueable function is safe because it checks to make sure - * that the host is able to take another command before it returns - * a pointer. - */ - - if (flag++ == 0) - SCpnt = scsi_allocate_device(&CURRENT, - rscsi_disks[CURRENT_DEV].device, 0); - else - SCpnt = NULL; - - /* - * The following restore_flags leads to latency problems. FIXME. - * Using a "sti()" gets rid of the latency problems but causes - * race conditions and crashes. - */ - - /* This is a performance enhancement. We dig down into the request - * list and try to find a queueable request (i.e. device not busy, - * and host able to accept another command. If we find one, then we - * queue it. This can make a big difference on systems with more than - * one disk drive. We want to have the interrupts off when monkeying - * with the request list, because otherwise the kernel might try to - * slip in a request in between somewhere. - * - * FIXME(eric) - this doesn't belong at this level. The device code in - * ll_rw_blk.c should know how to dig down into the device queue to - * figure out what it can deal with, and what it can't. Consider - * possibility of pulling entire queue down into scsi layer. - */ - if (!SCpnt && sd_template.nr_dev > 1) { - struct request *req1; - req1 = NULL; - req = CURRENT; - while (req) { - SCpnt = scsi_request_queueable(req, - rscsi_disks[DEVICE_NR(req->rq_dev)].device); - if (SCpnt) - break; - req1 = req; - req = req->next; - } - if (SCpnt && req->rq_status == RQ_INACTIVE) { - if (req == CURRENT) - CURRENT = CURRENT->next; - else - req1->next = req->next; - } - } - if (!SCpnt) - return; /* Could not find anything to do */ - - /* Queue command */ - requeue_sd_request(SCpnt); - } /* While */ -} - -static void requeue_sd_request(Scsi_Cmnd * SCpnt) -{ - int dev, devm, block, this_count; - unsigned char cmd[10]; - char nbuff[6]; - int bounce_size, contiguous; - int max_sg; - struct buffer_head *bh, *bhp; - char *buff, *bounce_buffer; - -repeat: - - if (!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) { - do_sd_request(); - return; - } - devm = SD_PARTITION(SCpnt->request.rq_dev); - dev = DEVICE_NR(SCpnt->request.rq_dev); - - block = SCpnt->request.sector; - this_count = 0; - - SCSI_LOG_HLQUEUE(1, printk("Doing sd request, dev = %d, block = %d\n", devm, block)); - - if (devm >= (sd_template.dev_max << 4) || - !rscsi_disks[dev].device || - !rscsi_disks[dev].device->online || - block + SCpnt->request.nr_sectors > sd[devm].nr_sects) { - SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors)); - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); - goto repeat; - } - block += sd[devm].start_sect; - - if (rscsi_disks[dev].device->changed) { - /* - * quietly refuse to do anything to a changed disc until the changed - * bit has been reset - */ - /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - } - sd_devname(devm >> 4, nbuff); - SCSI_LOG_HLQUEUE(2, printk("%s : real dev = /dev/%d, block = %d\n", - nbuff, dev, block)); - - /* - * If we have a 1K hardware sectorsize, prevent access to single - * 512 byte sectors. In theory we could handle this - in fact - * the scsi cdrom driver must be able to handle this because - * we typically use 1K blocksizes, and cdroms typically have - * 2K hardware sectorsizes. Of course, things are simpler - * with the cdrom, since it is read-only. For performance - * reasons, the filesystems should be able to handle this - * and not force the scsi disk driver to use bounce buffers - * for this. - */ - if (rscsi_disks[dev].sector_size == 1024) - if ((block & 1) || (SCpnt->request.nr_sectors & 1)) { - printk("sd.c:Bad block number/count requested"); - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - } - if (rscsi_disks[dev].sector_size == 2048) - if ((block & 3) || (SCpnt->request.nr_sectors & 3)) { - printk("sd.c:Bad block number/count requested"); - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - } - if (rscsi_disks[dev].sector_size == 4096) - if ((block & 7) || (SCpnt->request.nr_sectors & 7)) { - printk("sd.cBad block number/count requested"); - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - } - switch (SCpnt->request.cmd) { - case WRITE: - if (!rscsi_disks[dev].device->writeable) { - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - } - cmd[0] = WRITE_6; - break; - case READ: - cmd[0] = READ_6; - break; - default: - panic("Unknown sd command %d\n", SCpnt->request.cmd); - } - - SCpnt->this_count = 0; - - /* If the host adapter can deal with very large scatter-gather - * requests, it is a waste of time to cluster - */ - contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 : 1); - bounce_buffer = NULL; - bounce_size = (SCpnt->request.nr_sectors << 9); - - /* First see if we need a bounce buffer for this request. If we do, make - * sure that we can allocate a buffer. Do not waste space by allocating - * a bounce buffer if we are straddling the 16Mb line - */ - if (contiguous && SCpnt->request.bh && - virt_to_phys(SCpnt->request.bh->b_data) - + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD - && SCpnt->host->unchecked_isa_dma) { - if (virt_to_phys(SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD) - bounce_buffer = (char *) scsi_malloc(bounce_size); - if (!bounce_buffer) - contiguous = 0; - } - if (contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext) - for (bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, - bhp = bhp->b_reqnext) { - if (!CONTIGUOUS_BUFFERS(bh, bhp)) { - if (bounce_buffer) - scsi_free(bounce_buffer, bounce_size); - contiguous = 0; - break; - } - } - if (!SCpnt->request.bh || contiguous) { - - /* case of page request (i.e. raw device), or unlinked buffer */ - this_count = SCpnt->request.nr_sectors; - buff = SCpnt->request.buffer; - SCpnt->use_sg = 0; - - } else if (SCpnt->host->sg_tablesize == 0 || - (scsi_need_isa_buffer && scsi_dma_free_sectors <= 10)) { - - /* Case of host adapter that cannot scatter-gather. We also - * come here if we are running low on DMA buffer memory. We set - * a threshold higher than that we would need for this request so - * we leave room for other requests. Even though we would not need - * it all, we need to be conservative, because if we run low enough - * we have no choice but to panic. - */ - if (SCpnt->host->sg_tablesize != 0 && - scsi_need_isa_buffer && - scsi_dma_free_sectors <= 10) - printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n"); - - this_count = SCpnt->request.current_nr_sectors; - buff = SCpnt->request.buffer; - SCpnt->use_sg = 0; - - } else { - - /* Scatter-gather capable host adapter */ - struct scatterlist *sgpnt; - int count, this_count_max; - int counted; - - bh = SCpnt->request.bh; - this_count = 0; - this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff); - count = 0; - bhp = NULL; - while (bh) { - if ((this_count + (bh->b_size >> 9)) > this_count_max) - break; - if (!bhp || !CONTIGUOUS_BUFFERS(bhp, bh) || - !CLUSTERABLE_DEVICE(SCpnt) || - (SCpnt->host->unchecked_isa_dma && - virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD)) { - if (count < SCpnt->host->sg_tablesize) - count++; - else - break; - } - this_count += (bh->b_size >> 9); - bhp = bh; - bh = bh->b_reqnext; - } -#if 0 - if (SCpnt->host->unchecked_isa_dma && - virt_to_phys(SCpnt->request.bh->b_data - 1) == ISA_DMA_THRESHOLD) - count--; -#endif - SCpnt->use_sg = count; /* Number of chains */ - /* scsi_malloc can only allocate in chunks of 512 bytes */ - count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511; - - SCpnt->sglist_len = count; - max_sg = count / sizeof(struct scatterlist); - if (SCpnt->host->sg_tablesize < max_sg) - max_sg = SCpnt->host->sg_tablesize; - sgpnt = (struct scatterlist *) scsi_malloc(count); - if (!sgpnt) { - printk("Warning - running *really* short on DMA buffers\n"); - SCpnt->use_sg = 0; /* No memory left - bail out */ - this_count = SCpnt->request.current_nr_sectors; - buff = SCpnt->request.buffer; - } else { - memset(sgpnt, 0, count); /* Zero so it is easy to fill, but only - * if memory is available - */ - buff = (char *) sgpnt; - counted = 0; - for (count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext; - count < SCpnt->use_sg && bh; - count++, bh = bhp) { - - bhp = bh->b_reqnext; - - if (!sgpnt[count].address) - sgpnt[count].address = bh->b_data; - sgpnt[count].length += bh->b_size; - counted += bh->b_size >> 9; - - if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length - 1 > - ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) && - !sgpnt[count].alt_address) { - sgpnt[count].alt_address = sgpnt[count].address; - /* We try to avoid exhausting the DMA pool, since it is - * easier to control usage here. In other places we might - * have a more pressing need, and we would be screwed if - * we ran out */ - if (scsi_dma_free_sectors < (sgpnt[count].length >> 9) + 10) { - sgpnt[count].address = NULL; - } else { - sgpnt[count].address = - (char *) scsi_malloc(sgpnt[count].length); - } - /* If we start running low on DMA buffers, we abort the - * scatter-gather operation, and free all of the memory - * we have allocated. We want to ensure that all scsi - * operations are able to do at least a non-scatter/gather - * operation */ - if (sgpnt[count].address == NULL) { /* Out of dma memory */ -#if 0 - printk("Warning: Running low on SCSI DMA buffers"); - /* Try switching back to a non s-g operation. */ - while (--count >= 0) { - if (sgpnt[count].alt_address) - scsi_free(sgpnt[count].address, - sgpnt[count].length); - } - this_count = SCpnt->request.current_nr_sectors; - buff = SCpnt->request.buffer; - SCpnt->use_sg = 0; - scsi_free(sgpnt, SCpnt->sglist_len); -#endif - SCpnt->use_sg = count; - this_count = counted -= bh->b_size >> 9; - break; - } - } - /* Only cluster buffers if we know that we can supply DMA - * buffers large enough to satisfy the request. Do not cluster - * a new request if this would mean that we suddenly need to - * start using DMA bounce buffers */ - if (bhp && CONTIGUOUS_BUFFERS(bh, bhp) - && CLUSTERABLE_DEVICE(SCpnt)) { - char *tmp; - - if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length + - bhp->b_size - 1 > ISA_DMA_THRESHOLD && - (SCpnt->host->unchecked_isa_dma) && - !sgpnt[count].alt_address) - continue; - - if (!sgpnt[count].alt_address) { - count--; - continue; - } - if (scsi_dma_free_sectors > 10) - tmp = (char *) scsi_malloc(sgpnt[count].length - + bhp->b_size); - else { - tmp = NULL; - max_sg = SCpnt->use_sg; - } - if (tmp) { - scsi_free(sgpnt[count].address, sgpnt[count].length); - sgpnt[count].address = tmp; - count--; - continue; - } - /* If we are allowed another sg chain, then increment - * counter so we can insert it. Otherwise we will end - up truncating */ - - if (SCpnt->use_sg < max_sg) - SCpnt->use_sg++; - } /* contiguous buffers */ - } /* for loop */ - - /* This is actually how many we are going to transfer */ - this_count = counted; - - if (count < SCpnt->use_sg || SCpnt->use_sg - > SCpnt->host->sg_tablesize) { - bh = SCpnt->request.bh; - printk("Use sg, count %d %x %d\n", - SCpnt->use_sg, count, scsi_dma_free_sectors); - printk("maxsg = %x, counted = %d this_count = %d\n", - max_sg, counted, this_count); - while (bh) { - printk("[%p %x] ", bh->b_data, bh->b_size); - bh = bh->b_reqnext; - } - if (SCpnt->use_sg < 16) - for (count = 0; count < SCpnt->use_sg; count++) - printk("{%d:%p %p %d} ", count, - sgpnt[count].address, - sgpnt[count].alt_address, - sgpnt[count].length); - panic("Ooops"); - } - if (SCpnt->request.cmd == WRITE) - for (count = 0; count < SCpnt->use_sg; count++) - if (sgpnt[count].alt_address) - memcpy(sgpnt[count].address, sgpnt[count].alt_address, - sgpnt[count].length); - } /* Able to malloc sgpnt */ - } /* Host adapter capable of scatter-gather */ - - /* Now handle the possibility of DMA to addresses > 16Mb */ - - if (SCpnt->use_sg == 0) { - if (virt_to_phys(buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && - (SCpnt->host->unchecked_isa_dma)) { - if (bounce_buffer) - buff = bounce_buffer; - else - buff = (char *) scsi_malloc(this_count << 9); - if (buff == NULL) { /* Try backing off a bit if we are low on mem */ - this_count = SCpnt->request.current_nr_sectors; - buff = (char *) scsi_malloc(this_count << 9); - if (!buff) - panic("Ran out of DMA buffers."); - } - if (SCpnt->request.cmd == WRITE) - memcpy(buff, (char *) SCpnt->request.buffer, this_count << 9); - } - } - SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", - nbuff, - (SCpnt->request.cmd == WRITE) ? "writing" : "reading", - this_count, SCpnt->request.nr_sectors)); - - cmd[1] = (SCpnt->lun << 5) & 0xe0; - - if (rscsi_disks[dev].sector_size == 4096) { - if (block & 7) - panic("sd.c:Bad block number requested"); - if (this_count & 7) - panic("sd.c:Bad block number requested"); - block = block >> 3; - this_count = block >> 3; - } - if (rscsi_disks[dev].sector_size == 2048) { - if (block & 3) - panic("sd.c:Bad block number requested"); - if (this_count & 3) - panic("sd.c:Bad block number requested"); - block = block >> 2; - this_count = this_count >> 2; - } - if (rscsi_disks[dev].sector_size == 1024) { - if (block & 1) - panic("sd.c:Bad block number requested"); - if (this_count & 1) - panic("sd.c:Bad block number requested"); - block = block >> 1; - this_count = this_count >> 1; - } - if (rscsi_disks[dev].sector_size == 256) { - block = block << 1; - this_count = this_count << 1; - } - if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten) { - if (this_count > 0xffff) - this_count = 0xffff; - - cmd[0] += READ_10 - READ_6; - cmd[2] = (unsigned char) (block >> 24) & 0xff; - cmd[3] = (unsigned char) (block >> 16) & 0xff; - cmd[4] = (unsigned char) (block >> 8) & 0xff; - cmd[5] = (unsigned char) block & 0xff; - cmd[6] = cmd[9] = 0; - cmd[7] = (unsigned char) (this_count >> 8) & 0xff; - cmd[8] = (unsigned char) this_count & 0xff; - } else { - if (this_count > 0xff) - this_count = 0xff; - - cmd[1] |= (unsigned char) ((block >> 16) & 0x1f); - cmd[2] = (unsigned char) ((block >> 8) & 0xff); - cmd[3] = (unsigned char) block & 0xff; - cmd[4] = (unsigned char) this_count; - cmd[5] = 0; - } - - /* - * We shouldn't disconnect in the middle of a sector, so with a dumb - * host adapter, it's safe to assume that we can at least transfer - * this many bytes between each connect / disconnect. - */ - - SCpnt->transfersize = rscsi_disks[dev].sector_size; - SCpnt->underflow = this_count << 9; - SCpnt->cmd_len = 0; - scsi_do_cmd(SCpnt, (void *) cmd, buff, - this_count * rscsi_disks[dev].sector_size, - rw_intr, - (SCpnt->device->type == TYPE_DISK ? - SD_TIMEOUT : SD_MOD_TIMEOUT), - MAX_RETRIES); -} static int check_scsidisk_media_change(kdev_t full_dev) { @@ -1173,19 +642,17 @@ static int check_scsidisk_media_change(kdev_t full_dev) return retval; } -static void sd_wait_cmd (Scsi_Cmnd * SCpnt, const void *cmnd , - void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *), - int timeout, int retries) +static void sd_wait_cmd(Scsi_Cmnd * SCpnt, const void *cmnd, + void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *), + int timeout, int retries) { DECLARE_MUTEX_LOCKED(sem); - + SCpnt->request.sem = &sem; SCpnt->request.rq_status = RQ_SCSI_BUSY; - scsi_do_cmd (SCpnt, (void *) cmnd, - buffer, bufflen, done, timeout, retries); - spin_unlock_irq(&io_request_lock); - down (&sem); - spin_lock_irq(&io_request_lock); + scsi_do_cmd(SCpnt, (void *) cmnd, + buffer, bufflen, done, timeout, retries); + down(&sem); SCpnt->request.sem = NULL; } @@ -1207,6 +674,7 @@ static int sd_init_onedisk(int i) unsigned char *buffer; unsigned long spintime_value = 0; int the_result, retries, spintime; + int sector_size; Scsi_Cmnd *SCpnt; /* @@ -1221,14 +689,13 @@ static int sd_init_onedisk(int i) if (rscsi_disks[i].device->online == FALSE) { return i; } - spin_lock_irq(&io_request_lock); - /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is * considered a fatal error, and many devices report such an error * just after a scsi bus reset. */ - SCpnt = scsi_allocate_device(NULL, rscsi_disks[i].device, 1); + SCpnt = scsi_allocate_device(rscsi_disks[i].device, 1); + buffer = (unsigned char *) scsi_malloc(512); spintime = 0; @@ -1237,7 +704,7 @@ static int sd_init_onedisk(int i) /* Spinup needs to be done for module loads too. */ do { retries = 0; - + while (retries < 3) { cmd[0] = TEST_UNIT_READY; cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0; @@ -1259,11 +726,9 @@ static int sd_init_onedisk(int i) /* Look for non-removable devices that return NOT_READY. * Issue command to spin up drive for these cases. */ if (the_result && !rscsi_disks[i].device->removable && - SCpnt->sense_buffer[2] == NOT_READY) - { + SCpnt->sense_buffer[2] == NOT_READY) { unsigned long time1; - if (!spintime) - { + if (!spintime) { printk("%s: Spinning up disk...", nbuff); cmd[0] = START_STOP; cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0; @@ -1275,19 +740,15 @@ static int sd_init_onedisk(int i) SCpnt->sense_buffer[2] = 0; sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer, - 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES); + 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES); } - spintime = 1; spintime_value = jiffies; time1 = jiffies + HZ; - spin_unlock_irq(&io_request_lock); - while(time_before(jiffies, time1)); /* Wait 1 second for next try */ + while (time_before(jiffies, time1)); /* Wait 1 second for next try */ printk("."); - spin_lock_irq(&io_request_lock); } - } while(the_result && spintime && time_after(spintime_value+100*HZ, jiffies)); - + } while (the_result && spintime && time_after(spintime_value + 100 * HZ, jiffies)); if (spintime) { if (the_result) printk("not responding...\n"); @@ -1305,7 +766,7 @@ static int sd_init_onedisk(int i) SCpnt->sense_buffer[2] = 0; sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer, - 8, sd_init_done, SD_TIMEOUT, MAX_RETRIES); + 8, sd_init_done, SD_TIMEOUT, MAX_RETRIES); the_result = SCpnt->result; retries--; @@ -1344,7 +805,7 @@ static int sd_init_onedisk(int i) printk("%s : block size assumed to be 512 bytes, disk size 1GB. \n", nbuff); rscsi_disks[i].capacity = 0x1fffff; - rscsi_disks[i].sector_size = 512; + sector_size = 512; /* Set dirty bit for removable devices if not ready - sometimes drives * will not report this properly. */ @@ -1363,38 +824,29 @@ static int sd_init_onedisk(int i) (buffer[2] << 8) | buffer[3]); - rscsi_disks[i].sector_size = (buffer[4] << 24) | + sector_size = (buffer[4] << 24) | (buffer[5] << 16) | (buffer[6] << 8) | buffer[7]; - if (rscsi_disks[i].sector_size == 0) { - rscsi_disks[i].sector_size = 512; + if (sector_size == 0) { + sector_size = 512; printk("%s : sector size 0 reported, assuming 512.\n", nbuff); } - if (rscsi_disks[i].sector_size != 512 && - rscsi_disks[i].sector_size != 1024 && - rscsi_disks[i].sector_size != 2048 && - rscsi_disks[i].sector_size != 4096 && - rscsi_disks[i].sector_size != 256) { + if (sector_size != 512 && + sector_size != 1024 && + sector_size != 2048 && + sector_size != 4096 && + sector_size != 256) { printk("%s : unsupported sector size %d.\n", - nbuff, rscsi_disks[i].sector_size); - if (rscsi_disks[i].device->removable) { - rscsi_disks[i].capacity = 0; - } else { - printk("scsi : deleting disk entry.\n"); - sd_detach(rscsi_disks[i].device); - rscsi_disks[i].device = NULL; - - /* Wake up a process waiting for device */ - wake_up(&SCpnt->device->device_wait); - scsi_release_command(SCpnt); - SCpnt = NULL; - scsi_free(buffer, 512); - spin_unlock_irq(&io_request_lock); - - return i; - } + nbuff, sector_size); + /* + * The user might want to re-format the drive with + * a supported sectorsize. Once this happens, it + * would be relatively trivial to set the thing up. + * For this reason, we leave the thing in the table. + */ + rscsi_disks[i].capacity = 0; } - if (rscsi_disks[i].sector_size == 2048) { + if (sector_size == 2048) { int m; /* @@ -1414,7 +866,7 @@ static int sd_init_onedisk(int i) */ int m, mb; int sz_quot, sz_rem; - int hard_sector = rscsi_disks[i].sector_size; + int hard_sector = sector_size; /* There are 16 minors allocated for each major device */ for (m = i << 4; m < ((i + 1) << 4); m++) { sd_hardsizes[m] = hard_sector; @@ -1429,13 +881,13 @@ static int sd_init_onedisk(int i) nbuff, hard_sector, rscsi_disks[i].capacity, mb, sz_quot, sz_rem); } - if (rscsi_disks[i].sector_size == 4096) + if (sector_size == 4096) rscsi_disks[i].capacity <<= 3; - if (rscsi_disks[i].sector_size == 2048) + if (sector_size == 2048) rscsi_disks[i].capacity <<= 2; /* Change into 512 byte sectors */ - if (rscsi_disks[i].sector_size == 1024) + if (sector_size == 1024) rscsi_disks[i].capacity <<= 1; /* Change into 512 byte sectors */ - if (rscsi_disks[i].sector_size == 256) + if (sector_size == 256) rscsi_disks[i].capacity >>= 1; /* Change into 512 byte sectors */ } @@ -1465,7 +917,7 @@ static int sd_init_onedisk(int i) /* same code as READCAPA !! */ sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer, - 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES); + 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES); the_result = SCpnt->result; @@ -1479,15 +931,15 @@ static int sd_init_onedisk(int i) } } /* check for write protect */ + SCpnt->device->ten = 1; + SCpnt->device->remap = 1; + SCpnt->device->sector_size = sector_size; /* Wake up a process waiting for device */ wake_up(&SCpnt->device->device_wait); scsi_release_command(SCpnt); SCpnt = NULL; - rscsi_disks[i].ten = 1; - rscsi_disks[i].remap = 1; scsi_free(buffer, 512); - spin_unlock_irq(&io_request_lock); return i; } @@ -1572,23 +1024,14 @@ static int sd_init() return 0; } -/* - * sd_get_queue() returns the queue which corresponds to a given device. - */ -static struct request **sd_get_queue(kdev_t dev) -{ - return &blk_dev[MAJOR_NR].current_request; -} + static void sd_finish() { struct gendisk *gendisk; int i; for (i = 0; i <= (sd_template.dev_max - 1) / SCSI_DISKS_PER_MAJOR; i++) { - /* FIXME: After 2.2 we should implement multiple sd queues */ - blk_dev[SD_MAJOR(i)].request_fn = DEVICE_REQUEST; - if (i) - blk_dev[SD_MAJOR(i)].queue = sd_get_queue; + blk_dev[SD_MAJOR(i)].queue = sd_find_queue; } for (gendisk = gendisk_head; gendisk != NULL; gendisk = gendisk->next) if (gendisk == sd_gendisks) @@ -1658,7 +1101,6 @@ static int sd_attach(Scsi_Device * SDp) if (i >= sd_template.dev_max) panic("scsi_devices corrupt (sd)"); - SDp->scsi_request_fn = do_sd_request; rscsi_disks[i].device = SDp; rscsi_disks[i].has_part_table = 0; sd_template.nr_dev++; @@ -1713,7 +1155,7 @@ int revalidate_scsidisk(kdev_t dev, int maxusage) * to make sure that everything remains consistent. */ sd_blocksizes[index] = 1024; - if (rscsi_disks[target].sector_size == 2048) + if (rscsi_disks[target].device->sector_size == 2048) sd_blocksizes[index] = 2048; else sd_blocksizes[index] = 1024; @@ -1824,7 +1266,7 @@ void cleanup_module(void) } for (i = 0; i <= (sd_template.dev_max - 1) / SCSI_DISKS_PER_MAJOR; i++) { - blk_dev[SD_MAJOR(i)].request_fn = NULL; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(SD_MAJOR(i))); blk_size[SD_MAJOR(i)] = NULL; hardsect_size[SD_MAJOR(i)] = NULL; read_ahead[SD_MAJOR(i)] = 0; diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index f893b446f028..9bbfbeb509a4 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -5,7 +5,7 @@ * * * - * Modified by Eric Youngdale eric@aib.com to + * Modified by Eric Youngdale eric@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. */ @@ -27,14 +27,11 @@ extern struct hd_struct *sd; typedef struct scsi_disk { unsigned capacity; /* size in blocks */ - unsigned sector_size; /* size in bytes */ Scsi_Device *device; unsigned char ready; /* flag ready for FLOPTICAL */ unsigned char write_prot; /* flag write_protect for rmvable dev */ unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */ unsigned char sector_bit_shift; /* power of 2 sectors per FS block */ - unsigned ten:1; /* support ten byte read / write */ - unsigned remap:1; /* support remapping */ unsigned has_part_table:1; /* has partition table */ } Scsi_Disk; diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 4a2258cb53b1..97a911ddb019 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -355,7 +355,6 @@ static ssize_t sg_read(struct file * filp, char * buf, static ssize_t sg_write(struct file * filp, const char * buf, size_t count, loff_t *ppos) { - unsigned long flags; int mxsize, cmd_size, k; unsigned char cmnd[MAX_COMMAND_SIZE]; int input_size; @@ -432,8 +431,9 @@ static ssize_t sg_write(struct file * filp, const char * buf, return k; /* probably out of space --> ENOMEM */ } /* SCSI_LOG_TIMEOUT(7, printk("sg_write: allocating device\n")); */ - if (! (SCpnt = scsi_allocate_device(NULL, sdp->device, - !(filp->f_flags & O_NONBLOCK)))) { + if (! (SCpnt = scsi_allocate_device(sdp->device, + !(filp->f_flags & O_NONBLOCK)))) + { sg_finish_rem_req(srp, NULL, 0); return -EAGAIN; /* No available command blocks at the moment */ } @@ -448,7 +448,6 @@ static ssize_t sg_write(struct file * filp, const char * buf, cmnd[1]= (cmnd[1] & 0x1f) | (sdp->device->lun << 5); /* SCSI_LOG_TIMEOUT(7, printk("sg_write: do cmd\n")); */ - spin_lock_irqsave(&io_request_lock, flags); SCpnt->use_sg = srp->data.use_sg; SCpnt->sglist_len = srp->data.sglist_len; SCpnt->bufflen = srp->data.bufflen; @@ -467,7 +466,6 @@ static ssize_t sg_write(struct file * filp, const char * buf, (void *)SCpnt->buffer, mxsize, sg_command_done, sfp->timeout, SG_DEFAULT_RETRIES); /* 'mxsize' overwrites SCpnt->bufflen, hence need for b_malloc_len */ - spin_unlock_irqrestore(&io_request_lock, flags); /* SCSI_LOG_TIMEOUT(6, printk("sg_write: sent scsi cmd to mid-level\n")); */ return count; } @@ -1116,7 +1114,9 @@ static void sg_shorten_timeout(Scsi_Cmnd * scpnt) scsi_add_timer(scpnt, scpnt->timeout_per_command, scsi_old_times_out); #else + spin_unlock_irq(&io_request_lock); scsi_sleep(HZ); /* just sleep 1 second and hope ... */ + spin_lock_irq(&io_request_lock); #endif } diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index f7ad10693371..d482a729478a 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -1,17 +1,17 @@ /* * sr.c Copyright (C) 1992 David Giller - * Copyright (C) 1993, 1994, 1995 Eric Youngdale + * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale * * adapted from: * sd.c Copyright (C) 1992 Drew Eckhardt * Linux scsi disk driver by * Drew Eckhardt * - * Modified by Eric Youngdale ericy@cais.com to + * Modified by Eric Youngdale ericy@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. * - * Modified by Eric Youngdale eric@aib.com to support loadable + * Modified by Eric Youngdale eric@andante.org to support loadable * low-level scsi drivers. * * Modified by Thomas Quinot thomas@melchior.cuivre.fdn.fr to @@ -60,17 +60,28 @@ static int sr_attach(Scsi_Device *); static int sr_detect(Scsi_Device *); static void sr_detach(Scsi_Device *); -struct Scsi_Device_Template sr_template = { - NULL, "cdrom", "sr", NULL, TYPE_ROM, - SCSI_CDROM_MAJOR, 0, 0, 0, 1, - sr_detect, sr_init, - sr_finish, sr_attach, sr_detach +static int sr_init_command(Scsi_Cmnd *); + +struct Scsi_Device_Template sr_template = +{ + name:"cdrom", + tag:"sr", + scsi_type:TYPE_ROM, + major:SCSI_CDROM_MAJOR, + blk:1, + detect:sr_detect, + init:sr_init, + finish:sr_finish, + attach:sr_attach, + detach:sr_detach, + init_command:sr_init_command }; Scsi_CD *scsi_CDs = NULL; static int *sr_sizes = NULL; static int *sr_blocksizes = NULL; +static int *sr_hardsizes = NULL; static int sr_open(struct cdrom_device_info *, int); void get_sectorsize(int); @@ -82,7 +93,7 @@ static int sr_packet(struct cdrom_device_info *, struct cdrom_generic_command *) static void sr_release(struct cdrom_device_info *cdi) { - if (scsi_CDs[MINOR(cdi->dev)].sector_size > 2048) + if (scsi_CDs[MINOR(cdi->dev)].device->sector_size > 2048) sr_set_blocklength(MINOR(cdi->dev), 2048); sync_dev(cdi->dev); scsi_CDs[MINOR(cdi->dev)].device->access_count--; @@ -108,7 +119,7 @@ static struct cdrom_device_ops sr_dops = sr_audio_ioctl, /* audio ioctl */ sr_dev_ioctl, /* device-specific ioctl */ CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | - CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | + CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET, @@ -165,7 +176,7 @@ int sr_media_change(struct cdrom_device_info *cdi, int slot) */ scsi_CDs[MINOR(cdi->dev)].needs_sector_size = 1; - scsi_CDs[MINOR(cdi->dev)].sector_size = 2048; + scsi_CDs[MINOR(cdi->dev)].device->sector_size = 2048; } return retval; } @@ -178,7 +189,7 @@ int sr_media_change(struct cdrom_device_info *cdi, int slot) static void rw_intr(Scsi_Cmnd * SCpnt) { int result = SCpnt->result; - int this_count = SCpnt->this_count; + int this_count = SCpnt->bufflen >> 9; int good_sectors = (result == 0 ? this_count : 0); int block_sectors = 0; @@ -191,6 +202,7 @@ static void rw_intr(Scsi_Cmnd * SCpnt) avoid unnecessary additional work such as memcpy's that could be avoided. */ + if (driver_byte(result) != 0 && /* An error occurred */ SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */ (SCpnt->sense_buffer[2] == MEDIUM_ERROR || @@ -205,177 +217,178 @@ static void rw_intr(Scsi_Cmnd * SCpnt) block_sectors = SCpnt->request.bh->b_size >> 9; if (block_sectors < 4) block_sectors = 4; - if (scsi_CDs[device_nr].sector_size == 2048) + if (scsi_CDs[device_nr].device->sector_size == 2048) error_sector <<= 2; error_sector &= ~(block_sectors - 1); good_sectors = error_sector - SCpnt->request.sector; if (good_sectors < 0 || good_sectors >= this_count) good_sectors = 0; /* - The SCSI specification allows for the value returned by READ - CAPACITY to be up to 75 2K sectors past the last readable - block. Therefore, if we hit a medium error within the last - 75 2K sectors, we decrease the saved size value. + * The SCSI specification allows for the value returned by READ + * CAPACITY to be up to 75 2K sectors past the last readable + * block. Therefore, if we hit a medium error within the last + * 75 2K sectors, we decrease the saved size value. */ if ((error_sector >> 1) < sr_sizes[device_nr] && scsi_CDs[device_nr].capacity - error_sector < 4 * 75) sr_sizes[device_nr] = error_sector >> 1; } - if (good_sectors > 0) { /* Some sectors were read successfully. */ - if (SCpnt->use_sg == 0) { - if (SCpnt->buffer != SCpnt->request.buffer) { - int offset; - offset = (SCpnt->request.sector % 4) << 9; - memcpy((char *) SCpnt->request.buffer, - (char *) SCpnt->buffer + offset, - good_sectors << 9); - /* Even though we are not using scatter-gather, we look - * ahead and see if there is a linked request for the - * other half of this buffer. If there is, then satisfy - * it. */ - if ((offset == 0) && good_sectors == 2 && - SCpnt->request.nr_sectors > good_sectors && - SCpnt->request.bh && - SCpnt->request.bh->b_reqnext && - SCpnt->request.bh->b_reqnext->b_size == 1024) { - memcpy((char *) SCpnt->request.bh->b_reqnext->b_data, - (char *) SCpnt->buffer + 1024, - 1024); - good_sectors += 2; - }; - - scsi_free(SCpnt->buffer, 2048); - } - } else { - struct scatterlist *sgpnt; - int i; - sgpnt = (struct scatterlist *) SCpnt->buffer; - for (i = 0; i < SCpnt->use_sg; i++) { - if (sgpnt[i].alt_address) { - if (sgpnt[i].alt_address != sgpnt[i].address) { - memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length); - }; - scsi_free(sgpnt[i].address, sgpnt[i].length); - }; - }; - scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */ - if (SCpnt->request.sector % 4) - good_sectors -= 2; - /* See if there is a padding record at the end that needs to be removed */ - if (good_sectors > SCpnt->request.nr_sectors) - good_sectors -= 2; - }; + /* + * This calls the generic completion function, now that we know + * how many actual sectors finished, and how many sectors we need + * to say have failed. + */ + scsi_io_completion(SCpnt, good_sectors, block_sectors); +} -#ifdef DEBUG - printk("(%x %x %x) ", SCpnt->request.bh, SCpnt->request.nr_sectors, - good_sectors); -#endif - if (SCpnt->request.nr_sectors > this_count) { - SCpnt->request.errors = 0; - if (!SCpnt->request.bh) - panic("sr.c: linked page request (%lx %x)", - SCpnt->request.sector, this_count); - } - SCpnt = end_scsi_request(SCpnt, 1, good_sectors); /* All done */ - if (result == 0) { - requeue_sr_request(SCpnt); - return; - } + +static request_queue_t *sr_find_queue(kdev_t dev) +{ + if (MINOR(dev) >= sr_template.dev_max + || !scsi_CDs[MINOR(dev)].device) + return NULL; /* No such device */ + return &scsi_CDs[MINOR(dev)].device->request_queue; +} + +static int sr_init_command(Scsi_Cmnd * SCpnt) +{ + int dev, devm, block, this_count; + + devm = MINOR(SCpnt->request.rq_dev); + dev = DEVICE_NR(SCpnt->request.rq_dev); + + block = SCpnt->request.sector; + this_count = SCpnt->request_bufflen >> 9; + + if (!SCpnt->request.bh) { + /* + * Umm, yeah, right. Swapping to a cdrom. Nice try. + */ + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; } - if (good_sectors == 0) { - /* We only come through here if no sectors were read successfully. */ - - /* Free up any indirection buffers we allocated for DMA purposes. */ - if (SCpnt->use_sg) { - struct scatterlist *sgpnt; - int i; - sgpnt = (struct scatterlist *) SCpnt->buffer; - for (i = 0; i < SCpnt->use_sg; i++) { - if (sgpnt[i].alt_address) { - scsi_free(sgpnt[i].address, sgpnt[i].length); - } - } - scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */ + SCSI_LOG_HLQUEUE(1, printk("Doing sr request, dev = %d, block = %d\n", devm, block)); + + if (dev >= sr_template.nr_dev || + !scsi_CDs[dev].device || + !scsi_CDs[dev].device->online) { + SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors)); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); + return 0; + } + if (scsi_CDs[dev].device->changed) { + /* + * quietly refuse to do anything to a changed disc until the changed + * bit has been reset + */ + /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } + /* + * we do lazy blocksize switching (when reading XA sectors, + * see CDROMREADMODE2 ioctl) + */ + if (scsi_CDs[dev].device->sector_size > 2048) { + if (!in_interrupt()) + sr_set_blocklength(DEVICE_NR(CURRENT->rq_dev), 2048); + else + printk("sr: can't switch blocksize: in interrupt\n"); + } + if (SCpnt->request.cmd == WRITE) { + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } + if (scsi_CDs[dev].device->sector_size == 1024) { + if ((block & 1) || (SCpnt->request.nr_sectors & 1)) { + printk("sr.c:Bad 1K block number requested (%d %ld)", + block, SCpnt->request.nr_sectors); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; } else { - if (SCpnt->buffer != SCpnt->request.buffer) - scsi_free(SCpnt->buffer, SCpnt->bufflen); + block = block >> 1; + this_count = this_count >> 1; } - } - if (driver_byte(result) != 0) { - if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) { - if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) { - /* detected disc change. set a bit and quietly refuse - * further access. */ - - scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1; - SCpnt = end_scsi_request(SCpnt, 0, this_count); - requeue_sr_request(SCpnt); - return; - } - } - if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) { - printk("sr%d: CD-ROM error: ", - DEVICE_NR(SCpnt->request.rq_dev)); - print_sense("sr", SCpnt); - printk("command was: "); - print_command(SCpnt->cmnd); - if (scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten) { - scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0; - requeue_sr_request(SCpnt); - result = 0; - return; - } else { - SCpnt = end_scsi_request(SCpnt, 0, this_count); - requeue_sr_request(SCpnt); /* Do next request */ - return; - } - - } - if (SCpnt->sense_buffer[2] == NOT_READY) { - printk(KERN_INFO "sr%d: CD-ROM not ready. Make sure you have a disc in the drive.\n", - DEVICE_NR(SCpnt->request.rq_dev)); - SCpnt = end_scsi_request(SCpnt, 0, this_count); - requeue_sr_request(SCpnt); /* Do next request */ - return; - } - if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) { - printk("scsi%d: MEDIUM ERROR on " - "channel %d, id %d, lun %d, CDB: ", - SCpnt->host->host_no, (int) SCpnt->channel, - (int) SCpnt->target, (int) SCpnt->lun); - print_command(SCpnt->cmnd); - print_sense("sr", SCpnt); - SCpnt = end_scsi_request(SCpnt, 0, block_sectors); - requeue_sr_request(SCpnt); - return; + if (scsi_CDs[dev].device->sector_size == 2048) { + if ((block & 3) || (SCpnt->request.nr_sectors & 3)) { + printk("sr.c:Bad 2K block number requested (%d %ld)", + block, SCpnt->request.nr_sectors); + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; + } else { + block = block >> 2; + this_count = this_count >> 2; } - if (SCpnt->sense_buffer[2] == VOLUME_OVERFLOW) { - printk("scsi%d: VOLUME OVERFLOW on " - "channel %d, id %d, lun %d, CDB: ", - SCpnt->host->host_no, (int) SCpnt->channel, - (int) SCpnt->target, (int) SCpnt->lun); - print_command(SCpnt->cmnd); - print_sense("sr", SCpnt); - SCpnt = end_scsi_request(SCpnt, 0, block_sectors); - requeue_sr_request(SCpnt); - return; + } + switch (SCpnt->request.cmd) { + case WRITE: + if (!scsi_CDs[dev].device->writeable) { + SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors); + return 0; } + SCpnt->cmnd[0] = WRITE_6; + break; + case READ: + SCpnt->cmnd[0] = READ_6; + break; + default: + panic("Unknown sd command %d\n", SCpnt->request.cmd); } - /* We only get this far if we have an error we have not recognized */ - if (result) { - printk("SCSI CD error : host %d id %d lun %d return code = %03x\n", - scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no, - scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->id, - scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, - result); - - if (status_byte(result) == CHECK_CONDITION) - print_sense("sr", SCpnt); - - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors); - requeue_sr_request(SCpnt); + + SCSI_LOG_HLQUEUE(2, printk("sr%d : %s %d/%ld 512 byte blocks.\n", + devm, + (SCpnt->request.cmd == WRITE) ? "writing" : "reading", + this_count, SCpnt->request.nr_sectors)); + + SCpnt->cmnd[1] = (SCpnt->lun << 5) & 0xe0; + + if (((this_count > 0xff) || (block > 0x1fffff)) && SCpnt->device->ten) { + if (this_count > 0xffff) + this_count = 0xffff; + + SCpnt->cmnd[0] += READ_10 - READ_6; + SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; + SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; + SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; + SCpnt->cmnd[5] = (unsigned char) block & 0xff; + SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0; + SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; + SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; + } else { + if (this_count > 0xff) + this_count = 0xff; + + SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f); + SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff); + SCpnt->cmnd[3] = (unsigned char) block & 0xff; + SCpnt->cmnd[4] = (unsigned char) this_count; + SCpnt->cmnd[5] = 0; } + + /* + * We shouldn't disconnect in the middle of a sector, so with a dumb + * host adapter, it's safe to assume that we can at least transfer + * this many bytes between each connect / disconnect. + */ + SCpnt->transfersize = scsi_CDs[dev].device->sector_size; + SCpnt->underflow = this_count << 9; + + SCpnt->allowed = MAX_RETRIES; + SCpnt->timeout_per_command = SR_TIMEOUT; + + /* + * This is the completion routine we use. This is matched in terms + * of capability to this function. + */ + SCpnt->done = rw_intr; + + /* + * This indicates that the command is ready from our end to be + * queued. + */ + return 1; } static int sr_open(struct cdrom_device_info *cdi, int purpose) @@ -416,390 +429,6 @@ static int sr_open(struct cdrom_device_info *cdi, int purpose) * translate them to SCSI commands. */ -static void do_sr_request(void) -{ - Scsi_Cmnd *SCpnt = NULL; - struct request *req = NULL; - Scsi_Device *SDev; - int flag = 0; - - while (1 == 1) { - if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) { - return; - }; - - INIT_SCSI_REQUEST; - - SDev = scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device; - - /* - * If the host for this device is in error recovery mode, don't - * do anything at all here. When the host leaves error recovery - * mode, it will automatically restart things and start queueing - * commands again. - */ - if (SDev->host->in_recovery) { - return; - } - /* - * I am not sure where the best place to do this is. We need - * to hook in a place where we are likely to come if in user - * space. - */ - if (SDev->was_reset) { - /* - * We need to relock the door, but we might - * be in an interrupt handler. Only do this - * from user space, since we do not want to - * sleep from an interrupt. - */ - if (SDev->removable && !in_interrupt()) { - spin_unlock_irq(&io_request_lock); /* FIXME!!!! */ - scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0); - spin_lock_irq(&io_request_lock); /* FIXME!!!! */ - /* scsi_ioctl may allow CURRENT to change, so start over. */ - SDev->was_reset = 0; - continue; - } - SDev->was_reset = 0; - } - /* we do lazy blocksize switching (when reading XA sectors, - * see CDROMREADMODE2 ioctl) */ - if (scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].sector_size > 2048) { - if (!in_interrupt()) - sr_set_blocklength(DEVICE_NR(CURRENT->rq_dev), 2048); -#if 1 - else - printk("sr: can't switch blocksize: in interrupt\n"); -#endif - } - if (flag++ == 0) - SCpnt = scsi_allocate_device(&CURRENT, - scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device, 0); - else - SCpnt = NULL; - - /* This is a performance enhancement. We dig down into the request list and - * try to find a queueable request (i.e. device not busy, and host able to - * accept another command. If we find one, then we queue it. This can - * make a big difference on systems with more than one disk drive. We want - * to have the interrupts off when monkeying with the request list, because - * otherwise the kernel might try to slip in a request in between somewhere. */ - - if (!SCpnt && sr_template.nr_dev > 1) { - struct request *req1; - req1 = NULL; - req = CURRENT; - while (req) { - SCpnt = scsi_request_queueable(req, - scsi_CDs[DEVICE_NR(req->rq_dev)].device); - if (SCpnt) - break; - req1 = req; - req = req->next; - } - if (SCpnt && req->rq_status == RQ_INACTIVE) { - if (req == CURRENT) - CURRENT = CURRENT->next; - else - req1->next = req->next; - } - } - if (!SCpnt) - return; /* Could not find anything to do */ - - wake_up(&wait_for_request); - - /* Queue command */ - requeue_sr_request(SCpnt); - } /* While */ -} - -void requeue_sr_request(Scsi_Cmnd * SCpnt) -{ - unsigned int dev, block, realcount; - unsigned char cmd[10], *buffer, tries; - int this_count, start, end_rec; - - tries = 2; - -repeat: - if (!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) { - do_sr_request(); - return; - } - dev = MINOR(SCpnt->request.rq_dev); - block = SCpnt->request.sector; - buffer = NULL; - this_count = 0; - - if (dev >= sr_template.nr_dev) { - /* printk("CD-ROM request error: invalid device.\n"); */ - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - tries = 2; - goto repeat; - } - if (!scsi_CDs[dev].use) { - /* printk("CD-ROM request error: device marked not in use.\n"); */ - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - tries = 2; - goto repeat; - } - if (!scsi_CDs[dev].device->online) { - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - tries = 2; - goto repeat; - } - if (scsi_CDs[dev].device->changed) { - /* - * quietly refuse to do anything to a changed disc - * until the changed bit has been reset - */ - /* printk("CD-ROM has been changed. Prohibiting further I/O.\n"); */ - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - tries = 2; - goto repeat; - } - switch (SCpnt->request.cmd) { - case WRITE: - SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); - goto repeat; - break; - case READ: - cmd[0] = READ_6; - break; - default: - panic("Unknown sr command %d\n", SCpnt->request.cmd); - } - - cmd[1] = (SCpnt->lun << 5) & 0xe0; - - /* - * Now do the grungy work of figuring out which sectors we need, and - * where in memory we are going to put them. - * - * The variables we need are: - * - * this_count= number of 512 byte sectors being read - * block = starting cdrom sector to read. - * realcount = # of cdrom sectors to read - * - * The major difference between a scsi disk and a scsi cdrom - * is that we will always use scatter-gather if we can, because we can - * work around the fact that the buffer cache has a block size of 1024, - * and we have 2048 byte sectors. This code should work for buffers that - * are any multiple of 512 bytes long. - */ - - SCpnt->use_sg = 0; - - if (SCpnt->host->sg_tablesize > 0 && - (!scsi_need_isa_buffer || - scsi_dma_free_sectors >= 10)) { - struct buffer_head *bh; - struct scatterlist *sgpnt; - int count, this_count_max; - bh = SCpnt->request.bh; - this_count = 0; - count = 0; - this_count_max = (scsi_CDs[dev].ten ? 0xffff : 0xff) << 4; - /* Calculate how many links we can use. First see if we need - * a padding record at the start */ - this_count = SCpnt->request.sector % 4; - if (this_count) - count++; - while (bh && count < SCpnt->host->sg_tablesize) { - if ((this_count + (bh->b_size >> 9)) > this_count_max) - break; - this_count += (bh->b_size >> 9); - count++; - bh = bh->b_reqnext; - }; - /* Fix up in case of an odd record at the end */ - end_rec = 0; - if (this_count % 4) { - if (count < SCpnt->host->sg_tablesize) { - count++; - end_rec = (4 - (this_count % 4)) << 9; - this_count += 4 - (this_count % 4); - } else { - count--; - this_count -= (this_count % 4); - }; - }; - SCpnt->use_sg = count; /* Number of chains */ - /* scsi_malloc can only allocate in chunks of 512 bytes */ - count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511; - - SCpnt->sglist_len = count; - sgpnt = (struct scatterlist *) scsi_malloc(count); - if (!sgpnt) { - printk("Warning - running *really* short on DMA buffers\n"); - SCpnt->use_sg = 0; /* No memory left - bail out */ - } else { - buffer = (unsigned char *) sgpnt; - count = 0; - bh = SCpnt->request.bh; - if (SCpnt->request.sector % 4) { - sgpnt[count].length = (SCpnt->request.sector % 4) << 9; - sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length); - if (!sgpnt[count].address) - panic("SCSI DMA pool exhausted."); - sgpnt[count].alt_address = sgpnt[count].address; /* Flag to delete - if needed */ - count++; - }; - for (bh = SCpnt->request.bh; count < SCpnt->use_sg; - count++, bh = bh->b_reqnext) { - if (bh) { /* Need a placeholder at the end of the record? */ - sgpnt[count].address = bh->b_data; - sgpnt[count].length = bh->b_size; - sgpnt[count].alt_address = NULL; - } else { - sgpnt[count].address = (char *) scsi_malloc(end_rec); - if (!sgpnt[count].address) - panic("SCSI DMA pool exhausted."); - sgpnt[count].length = end_rec; - sgpnt[count].alt_address = sgpnt[count].address; - if (count + 1 != SCpnt->use_sg) - panic("Bad sr request list"); - break; - }; - if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length - 1 > - ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) { - sgpnt[count].alt_address = sgpnt[count].address; - /* We try to avoid exhausting the DMA pool, since it is easier - * to control usage here. In other places we might have a more - * pressing need, and we would be screwed if we ran out */ - if (scsi_dma_free_sectors < (sgpnt[count].length >> 9) + 5) { - sgpnt[count].address = NULL; - } else { - sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length); - }; - /* If we start running low on DMA buffers, we abort the scatter-gather - * operation, and free all of the memory we have allocated. We want to - * ensure that all scsi operations are able to do at least a non-scatter/gather - * operation */ - if (sgpnt[count].address == NULL) { /* Out of dma memory */ - printk("Warning: Running low on SCSI DMA buffers\n"); - /* Try switching back to a non scatter-gather operation. */ - while (--count >= 0) { - if (sgpnt[count].alt_address) - scsi_free(sgpnt[count].address, sgpnt[count].length); - }; - SCpnt->use_sg = 0; - scsi_free(buffer, SCpnt->sglist_len); - break; - }; /* if address == NULL */ - }; /* if need DMA fixup */ - }; /* for loop to fill list */ -#ifdef DEBUG - printk("SR: %d %d %d %d %d *** ", SCpnt->use_sg, SCpnt->request.sector, - this_count, - SCpnt->request.current_nr_sectors, - SCpnt->request.nr_sectors); - for (count = 0; count < SCpnt->use_sg; count++) - printk("SGlist: %d %x %x %x\n", count, - sgpnt[count].address, - sgpnt[count].alt_address, - sgpnt[count].length); -#endif - }; /* Able to allocate scatter-gather list */ - }; - - if (SCpnt->use_sg == 0) { - /* We cannot use scatter-gather. Do this the old fashion way */ - if (!SCpnt->request.bh) - this_count = SCpnt->request.nr_sectors; - else - this_count = (SCpnt->request.bh->b_size >> 9); - - start = block % 4; - if (start) { - this_count = ((this_count > 4 - start) ? - (4 - start) : (this_count)); - buffer = (unsigned char *) scsi_malloc(2048); - } else if (this_count < 4) { - buffer = (unsigned char *) scsi_malloc(2048); - } else { - this_count -= this_count % 4; - buffer = (unsigned char *) SCpnt->request.buffer; - if (virt_to_phys(buffer) + (this_count << 9) > ISA_DMA_THRESHOLD && - SCpnt->host->unchecked_isa_dma) - buffer = (unsigned char *) scsi_malloc(this_count << 9); - } - }; - - if (scsi_CDs[dev].sector_size == 2048) - block = block >> 2; /* These are the sectors that the cdrom uses */ - else - block = block & 0xfffffffc; - - realcount = (this_count + 3) / 4; - - if (scsi_CDs[dev].sector_size == 512) - realcount = realcount << 2; - - /* - * Note: The scsi standard says that READ_6 is *optional*, while - * READ_10 is mandatory. Thus there is no point in using - * READ_6. - */ - if (scsi_CDs[dev].ten) { - if (realcount > 0xffff) { - realcount = 0xffff; - this_count = realcount * (scsi_CDs[dev].sector_size >> 9); - } - cmd[0] += READ_10 - READ_6; - cmd[2] = (unsigned char) (block >> 24) & 0xff; - cmd[3] = (unsigned char) (block >> 16) & 0xff; - cmd[4] = (unsigned char) (block >> 8) & 0xff; - cmd[5] = (unsigned char) block & 0xff; - cmd[6] = cmd[9] = 0; - cmd[7] = (unsigned char) (realcount >> 8) & 0xff; - cmd[8] = (unsigned char) realcount & 0xff; - } else { - if (realcount > 0xff) { - realcount = 0xff; - this_count = realcount * (scsi_CDs[dev].sector_size >> 9); - } - cmd[1] |= (unsigned char) ((block >> 16) & 0x1f); - cmd[2] = (unsigned char) ((block >> 8) & 0xff); - cmd[3] = (unsigned char) block & 0xff; - cmd[4] = (unsigned char) realcount; - cmd[5] = 0; - } - -#ifdef DEBUG - { - int i; - printk("ReadCD: %d %d %d %d\n", block, realcount, buffer, this_count); - printk("Use sg: %d\n", SCpnt->use_sg); - printk("Dumping command: "); - for (i = 0; i < 12; i++) - printk("%2.2x ", cmd[i]); - printk("\n"); - }; -#endif - - /* Some dumb host adapters can speed transfers by knowing the - * minimum transfersize in advance. - * - * We shouldn't disconnect in the middle of a sector, but the cdrom - * sector size can be larger than the size of a buffer and the - * transfer may be split to the size of a buffer. So it's safe to - * assume that we can at least transfer the minimum of the buffer - * size (1024) and the sector size between each connect / disconnect. - */ - - SCpnt->transfersize = (scsi_CDs[dev].sector_size > 1024) ? - 1024 : scsi_CDs[dev].sector_size; - - SCpnt->this_count = this_count; - scsi_do_cmd(SCpnt, (void *) cmd, buffer, - realcount * scsi_CDs[dev].sector_size, - rw_intr, SR_TIMEOUT, MAX_RETRIES); -} static int sr_detect(Scsi_Device * SDp) { @@ -833,7 +462,7 @@ static int sr_attach(Scsi_Device * SDp) if (i >= sr_template.dev_max) panic("scsi_devices corrupt (sr)"); - SDp->scsi_request_fn = do_sr_request; + scsi_CDs[i].device = SDp; sr_template.nr_dev++; @@ -860,12 +489,13 @@ void get_sectorsize(int i) unsigned char cmd[10]; unsigned char *buffer; int the_result, retries; + int sector_size; Scsi_Cmnd *SCpnt; - spin_lock_irq(&io_request_lock); buffer = (unsigned char *) scsi_malloc(512); - SCpnt = scsi_allocate_device(NULL, scsi_CDs[i].device, 1); - spin_unlock_irq(&io_request_lock); + + + SCpnt = scsi_allocate_device(scsi_CDs[i].device, 1); retries = 3; do { @@ -879,8 +509,8 @@ void get_sectorsize(int i) /* Do the command and wait.. */ - scsi_wait_cmd (SCpnt, (void *) cmd, (void *) buffer, - 512, sr_init_done, SR_TIMEOUT, MAX_RETRIES); + scsi_wait_cmd(SCpnt, (void *) cmd, (void *) buffer, + 512, sr_init_done, SR_TIMEOUT, MAX_RETRIES); the_result = SCpnt->result; retries--; @@ -894,7 +524,7 @@ void get_sectorsize(int i) if (the_result) { scsi_CDs[i].capacity = 0x1fffff; - scsi_CDs[i].sector_size = 2048; /* A guess, just in case */ + sector_size = 2048; /* A guess, just in case */ scsi_CDs[i].needs_sector_size = 1; } else { #if 0 @@ -905,9 +535,9 @@ void get_sectorsize(int i) (buffer[1] << 16) | (buffer[2] << 8) | buffer[3]); - scsi_CDs[i].sector_size = (buffer[4] << 24) | + sector_size = (buffer[4] << 24) | (buffer[5] << 16) | (buffer[6] << 8) | buffer[7]; - switch (scsi_CDs[i].sector_size) { + switch (sector_size) { /* * HP 4020i CD-Recorder reports 2340 byte sectors * Philips CD-Writers report 2352 byte sectors @@ -917,7 +547,7 @@ void get_sectorsize(int i) case 0: case 2340: case 2352: - scsi_CDs[i].sector_size = 2048; + sector_size = 2048; /* fall through */ case 2048: scsi_CDs[i].capacity *= 4; @@ -926,11 +556,13 @@ void get_sectorsize(int i) break; default: printk("sr%d: unsupported sector size %d.\n", - i, scsi_CDs[i].sector_size); + i, sector_size); scsi_CDs[i].capacity = 0; scsi_CDs[i].needs_sector_size = 1; } + scsi_CDs[i].device->sector_size = sector_size; + /* * Add this so that we have the ability to correctly gauge * what the device is capable of. @@ -959,9 +591,7 @@ void get_capabilities(int i) "" }; - spin_lock_irq(&io_request_lock); buffer = (unsigned char *) scsi_malloc(512); - spin_unlock_irq(&io_request_lock); cmd[0] = MODE_SENSE; cmd[1] = (scsi_CDs[i].device->lun << 5) & 0xe0; cmd[2] = 0x2a; @@ -1008,19 +638,19 @@ void get_capabilities(int i) if ((buffer[n + 3] & 0x1) == 0) /* can't write CD-R media */ scsi_CDs[i].cdi.mask |= CDC_CD_R; - if ((buffer[n+6] & 0x8) == 0) + if ((buffer[n + 6] & 0x8) == 0) /* can't eject */ scsi_CDs[i].cdi.mask |= CDC_OPEN_TRAY; - if ((buffer[n+6] >> 5) == mechtype_individual_changer || - (buffer[n+6] >> 5) == mechtype_cartridge_changer) - scsi_CDs[i].cdi.capacity = - cdrom_number_of_slots(&(scsi_CDs[i].cdi)); + if ((buffer[n + 6] >> 5) == mechtype_individual_changer || + (buffer[n + 6] >> 5) == mechtype_cartridge_changer) + scsi_CDs[i].cdi.capacity = + cdrom_number_of_slots(&(scsi_CDs[i].cdi)); if (scsi_CDs[i].cdi.capacity <= 1) - /* not a changer */ + /* not a changer */ scsi_CDs[i].cdi.mask |= CDC_SELECT_DISC; /*else I don't think it can close its tray - scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */ + scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */ scsi_free(buffer, 512); @@ -1039,24 +669,21 @@ static int sr_packet(struct cdrom_device_info *cdi, struct cdrom_generic_command int stat; /* get the device */ - SCpnt = scsi_allocate_device(NULL, device, 1); + SCpnt = scsi_allocate_device(device, 1); if (SCpnt == NULL) return -ENODEV; /* this just doesn't seem right /axboe */ /* use buffer for ISA DMA */ buflen = (cgc->buflen + 511) & ~511; if (cgc->buffer && SCpnt->host->unchecked_isa_dma && - (virt_to_phys(cgc->buffer) + cgc->buflen - 1 > ISA_DMA_THRESHOLD)) { - spin_lock_irq(&io_request_lock); + (virt_to_phys(cgc->buffer) + cgc->buflen - 1 > ISA_DMA_THRESHOLD)) { buffer = scsi_malloc(buflen); - spin_unlock_irq(&io_request_lock); if (buffer == NULL) { printk("sr: SCSI DMA pool exhausted."); return -ENOMEM; } memcpy(buffer, cgc->buffer, cgc->buflen); } - /* set the LUN */ cgc->cmd[1] |= device->lun << 5; @@ -1065,8 +692,8 @@ static int sr_packet(struct cdrom_device_info *cdi, struct cdrom_generic_command /* scsi_do_cmd sets the command length */ SCpnt->cmd_len = 0; - scsi_wait_cmd (SCpnt, (void *)cgc->cmd, (void *)buffer, cgc->buflen, - sr_init_done, SR_TIMEOUT, MAX_RETRIES); + scsi_wait_cmd(SCpnt, (void *) cgc->cmd, (void *) buffer, cgc->buflen, + sr_init_done, SR_TIMEOUT, MAX_RETRIES); stat = SCpnt->result; @@ -1080,7 +707,6 @@ static int sr_packet(struct cdrom_device_info *cdi, struct cdrom_generic_command memcpy(cgc->buffer, buffer, cgc->buflen); scsi_free(buffer, buflen); } - return stat; } @@ -1113,12 +739,18 @@ static int sr_init() sr_blocksizes = (int *) scsi_init_malloc(sr_template.dev_max * sizeof(int), GFP_ATOMIC); + sr_hardsizes = (int *) scsi_init_malloc(sr_template.dev_max * + sizeof(int), GFP_ATOMIC); /* * These are good guesses for the time being. */ for (i = 0; i < sr_template.dev_max; i++) + { sr_blocksizes[i] = 2048; + sr_hardsizes[i] = 2048; + } blksize_size[MAJOR_NR] = sr_blocksizes; + hardsect_size[MAJOR_NR] = sr_hardsizes; return 0; } @@ -1127,7 +759,7 @@ void sr_finish() int i; char name[6]; - blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + blk_dev[MAJOR_NR].queue = sr_find_queue; blk_size[MAJOR_NR] = sr_sizes; for (i = 0; i < sr_template.nr_dev; ++i) { @@ -1136,7 +768,7 @@ void sr_finish() if (scsi_CDs[i].capacity) continue; scsi_CDs[i].capacity = 0x1fffff; - scsi_CDs[i].sector_size = 2048; /* A guess, just in case */ + scsi_CDs[i].device->sector_size = 2048; /* A guess, just in case */ scsi_CDs[i].needs_sector_size = 1; scsi_CDs[i].device->changed = 1; /* force recheck CD type */ #if 0 @@ -1145,8 +777,9 @@ void sr_finish() printk("Scd sectorsize = %d bytes.\n", scsi_CDs[i].sector_size); #endif scsi_CDs[i].use = 1; - scsi_CDs[i].ten = 1; - scsi_CDs[i].remap = 1; + + scsi_CDs[i].device->ten = 1; + scsi_CDs[i].device->remap = 1; scsi_CDs[i].readcd_known = 0; scsi_CDs[i].readcd_cdda = 0; sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9); @@ -1234,9 +867,12 @@ void cleanup_module(void) scsi_init_free((char *) sr_blocksizes, sr_template.dev_max * sizeof(int)); sr_blocksizes = NULL; + scsi_init_free((char *) sr_hardsizes, sr_template.dev_max * sizeof(int)); + sr_hardsizes = NULL; } blksize_size[MAJOR_NR] = NULL; - blk_dev[MAJOR_NR].request_fn = NULL; + hardsect_size[MAJOR_NR] = sr_hardsizes; + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); blk_size[MAJOR_NR] = NULL; read_ahead[MAJOR_NR] = 0; diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index defb1d358925..e4aad11ab1aa 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h @@ -9,7 +9,7 @@ * * * - * Modified by Eric Youngdale eric@aib.com to + * Modified by Eric Youngdale eric@andante.org to * add scatter-gather, multiple outstanding request, and other * enhancements. */ @@ -21,15 +21,12 @@ typedef struct { unsigned capacity; /* size in blocks */ - unsigned sector_size; /* size in bytes */ Scsi_Device *device; unsigned int vendor; /* vendor code, see sr_vendor.c */ unsigned long ms_offset; /* for reading multisession-CD's */ unsigned char sector_bit_size; /* sector size = 2^sector_bit_size */ unsigned char sector_bit_shift; /* sectors/FS block = 2^sector_bit_shift */ unsigned needs_sector_size:1; /* needs to get sector size */ - unsigned ten:1; /* support ten byte commands */ - unsigned remap:1; /* support remapping */ unsigned use:1; /* is this device still supportable */ unsigned xa_flag:1; /* CD has XA sectors ? */ unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 50d1a19b365c..df963da1f97d 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -16,7 +16,7 @@ #include "sr.h" #if 0 -# define DEBUG +#define DEBUG #endif /* The sr_is_xa() seems to trigger firmware bugs with some drives :-( @@ -32,134 +32,121 @@ extern void get_sectorsize(int); static void sr_ioctl_done(Scsi_Cmnd * SCpnt) { - struct request * req; - - req = &SCpnt->request; - req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ - - if (SCpnt->buffer && req->buffer && SCpnt->buffer != req->buffer) { - memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen); - scsi_free(SCpnt->buffer, (SCpnt->bufflen + 511) & ~511); - SCpnt->buffer = req->buffer; - } - - if (req->sem != NULL) { - up(req->sem); - } + struct request *req; + + req = &SCpnt->request; + req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ + + if (SCpnt->buffer && req->buffer && SCpnt->buffer != req->buffer) { + memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen); + scsi_free(SCpnt->buffer, (SCpnt->bufflen + 511) & ~511); + SCpnt->buffer = req->buffer; + } + if (req->sem != NULL) { + up(req->sem); + } } /* We do our own retries because we want to know what the specific error code is. Normally the UNIT_ATTENTION code will automatically clear after one error */ -int sr_do_ioctl(int target, unsigned char * sr_cmd, void * buffer, unsigned buflength, int quiet) +int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet) { - Scsi_Cmnd * SCpnt; - Scsi_Device * SDev; - int result, err = 0, retries = 0; - unsigned long flags; - char * bounce_buffer; - - spin_lock_irqsave(&io_request_lock, flags); - SDev = scsi_CDs[target].device; - SCpnt = scsi_allocate_device(NULL, scsi_CDs[target].device, 1); - spin_unlock_irqrestore(&io_request_lock, flags); - - /* use ISA DMA buffer if necessary */ - SCpnt->request.buffer=buffer; - if (buffer && SCpnt->host->unchecked_isa_dma && - (virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) { - bounce_buffer = (char *)scsi_malloc((buflength + 511) & ~511); - if (bounce_buffer == NULL) { - printk("SCSI DMA pool exhausted."); - return -ENOMEM; - } - memcpy(bounce_buffer, (char *)buffer, buflength); - buffer = bounce_buffer; - } - -retry: - if( !scsi_block_when_processing_errors(SDev) ) - return -ENODEV; - - scsi_wait_cmd(SCpnt, (void *)sr_cmd, (void *)buffer, buflength, - sr_ioctl_done, IOCTL_TIMEOUT, IOCTL_RETRIES); - - result = SCpnt->result; - - /* Minimal error checking. Ignore cases we know about, and report the rest. */ - if(driver_byte(result) != 0) { - switch(SCpnt->sense_buffer[2] & 0xf) { - case UNIT_ATTENTION: - scsi_CDs[target].device->changed = 1; - if (!quiet) - printk(KERN_INFO "sr%d: disc change detected.\n", target); - if (retries++ < 10) - goto retry; - err = -ENOMEDIUM; - break; - case NOT_READY: /* This happens if there is no disc in drive */ - if (SCpnt->sense_buffer[12] == 0x04 && - SCpnt->sense_buffer[13] == 0x01) { - /* sense: Logical unit is in process of becoming ready */ - if (!quiet) - printk(KERN_INFO "sr%d: CDROM not ready yet.\n", target); - if (retries++ < 10) { - /* sleep 2 sec and try again */ - /* - * The spinlock is silly - we should really lock more of this - * function, but the minimal locking required to not lock up - * is around this - scsi_sleep() assumes we hold the spinlock. - */ - spin_lock_irqsave(&io_request_lock, flags); - scsi_sleep(2*HZ); - spin_unlock_irqrestore(&io_request_lock, flags); - goto retry; - } else { - /* 20 secs are enough? */ - err = -ENOMEDIUM; - break; + Scsi_Cmnd *SCpnt; + Scsi_Device *SDev; + int result, err = 0, retries = 0; + unsigned long flags; + char *bounce_buffer; + + SDev = scsi_CDs[target].device; + SCpnt = scsi_allocate_device(scsi_CDs[target].device, 1); + + /* use ISA DMA buffer if necessary */ + SCpnt->request.buffer = buffer; + if (buffer && SCpnt->host->unchecked_isa_dma && + (virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) { + bounce_buffer = (char *) scsi_malloc((buflength + 511) & ~511); + if (bounce_buffer == NULL) { + printk("SCSI DMA pool exhausted."); + return -ENOMEM; } - } - if (!quiet) - printk(KERN_INFO "sr%d: CDROM not ready. Make sure there is a disc in the drive.\n",target); + memcpy(bounce_buffer, (char *) buffer, buflength); + buffer = bounce_buffer; + } + retry: + if (!scsi_block_when_processing_errors(SDev)) + return -ENODEV; + + + scsi_wait_cmd(SCpnt, (void *) sr_cmd, (void *) buffer, buflength, + sr_ioctl_done, IOCTL_TIMEOUT, IOCTL_RETRIES); + + result = SCpnt->result; + + /* Minimal error checking. Ignore cases we know about, and report the rest. */ + if (driver_byte(result) != 0) { + switch (SCpnt->sense_buffer[2] & 0xf) { + case UNIT_ATTENTION: + scsi_CDs[target].device->changed = 1; + if (!quiet) + printk(KERN_INFO "sr%d: disc change detected.\n", target); + if (retries++ < 10) + goto retry; + err = -ENOMEDIUM; + break; + case NOT_READY: /* This happens if there is no disc in drive */ + if (SCpnt->sense_buffer[12] == 0x04 && + SCpnt->sense_buffer[13] == 0x01) { + /* sense: Logical unit is in process of becoming ready */ + if (!quiet) + printk(KERN_INFO "sr%d: CDROM not ready yet.\n", target); + if (retries++ < 10) { + /* sleep 2 sec and try again */ + scsi_sleep(2 * HZ); + goto retry; + } else { + /* 20 secs are enough? */ + err = -ENOMEDIUM; + break; + } + } + if (!quiet) + printk(KERN_INFO "sr%d: CDROM not ready. Make sure there is a disc in the drive.\n", target); #ifdef DEBUG - print_sense("sr", SCpnt); + print_sense("sr", SCpnt); #endif - err = -ENOMEDIUM; - break; - case ILLEGAL_REQUEST: - if (!quiet) - printk(KERN_ERR "sr%d: CDROM (ioctl) reports ILLEGAL " - "REQUEST.\n", target); - if (SCpnt->sense_buffer[12] == 0x20 && - SCpnt->sense_buffer[13] == 0x00) { - /* sense: Invalid command operation code */ - err = -EDRIVE_CANT_DO_THIS; - } else { - err = -EINVAL; - } + err = -ENOMEDIUM; + break; + case ILLEGAL_REQUEST: + if (!quiet) + printk(KERN_ERR "sr%d: CDROM (ioctl) reports ILLEGAL " + "REQUEST.\n", target); + if (SCpnt->sense_buffer[12] == 0x20 && + SCpnt->sense_buffer[13] == 0x00) { + /* sense: Invalid command operation code */ + err = -EDRIVE_CANT_DO_THIS; + } else { + err = -EINVAL; + } #ifdef DEBUG - print_command(sr_cmd); - print_sense("sr", SCpnt); + print_command(sr_cmd); + print_sense("sr", SCpnt); #endif - break; - default: - printk(KERN_ERR "sr%d: CDROM (ioctl) error, command: ", target); - print_command(sr_cmd); - print_sense("sr", SCpnt); - err = -EIO; + break; + default: + printk(KERN_ERR "sr%d: CDROM (ioctl) error, command: ", target); + print_command(sr_cmd); + print_sense("sr", SCpnt); + err = -EIO; + } } - } - - spin_lock_irqsave(&io_request_lock, flags); - result = SCpnt->result; - /* Wake up a process waiting for device*/ - wake_up(&SCpnt->device->device_wait); - scsi_release_command(SCpnt); - SCpnt = NULL; - spin_unlock_irqrestore(&io_request_lock, flags); - return err; + result = SCpnt->result; + /* Wake up a process waiting for device */ + wake_up(&SCpnt->device->device_wait); + scsi_release_command(SCpnt); + SCpnt = NULL; + return err; } /* ---------------------------------------------------------------------- */ @@ -167,95 +154,94 @@ retry: static int test_unit_ready(int minor) { - u_char sr_cmd[10]; + u_char sr_cmd[10]; - sr_cmd[0] = GPCMD_TEST_UNIT_READY; - sr_cmd[1] = ((scsi_CDs[minor].device -> lun) << 5); - sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; - return sr_do_ioctl(minor, sr_cmd, NULL, 255, 1); + sr_cmd[0] = GPCMD_TEST_UNIT_READY; + sr_cmd[1] = ((scsi_CDs[minor].device->lun) << 5); + sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; + return sr_do_ioctl(minor, sr_cmd, NULL, 255, 1); } int sr_tray_move(struct cdrom_device_info *cdi, int pos) { - u_char sr_cmd[10]; - - sr_cmd[0] = GPCMD_START_STOP_UNIT; - sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device -> lun) << 5); - sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0; - sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */; - - return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 255, 0); + u_char sr_cmd[10]; + + sr_cmd[0] = GPCMD_START_STOP_UNIT; + sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device->lun) << 5); + sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0; + sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ; + + return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 255, 0); } int sr_lock_door(struct cdrom_device_info *cdi, int lock) { - return scsi_ioctl (scsi_CDs[MINOR(cdi->dev)].device, - lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK, - 0); + return scsi_ioctl(scsi_CDs[MINOR(cdi->dev)].device, + lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK, + 0); } int sr_drive_status(struct cdrom_device_info *cdi, int slot) { - if (CDSL_CURRENT != slot) { - /* we have no changer support */ - return -EINVAL; - } - - if (0 == test_unit_ready(MINOR(cdi->dev))) - return CDS_DISC_OK; + if (CDSL_CURRENT != slot) { + /* we have no changer support */ + return -EINVAL; + } + if (0 == test_unit_ready(MINOR(cdi->dev))) + return CDS_DISC_OK; - return CDS_TRAY_OPEN; + return CDS_TRAY_OPEN; } int sr_disk_status(struct cdrom_device_info *cdi) { - struct cdrom_tochdr toc_h; - struct cdrom_tocentry toc_e; - int i,rc,have_datatracks = 0; - - /* look for data tracks */ - if (0 != (rc = sr_audio_ioctl(cdi, CDROMREADTOCHDR, &toc_h))) - return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO; - - for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) { - toc_e.cdte_track = i; - toc_e.cdte_format = CDROM_LBA; - if (sr_audio_ioctl(cdi, CDROMREADTOCENTRY, &toc_e)) - return CDS_NO_INFO; - if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) { - have_datatracks = 1; - break; - } - } - if (!have_datatracks) - return CDS_AUDIO; - - if (scsi_CDs[MINOR(cdi->dev)].xa_flag) - return CDS_XA_2_1; - else - return CDS_DATA_1; + struct cdrom_tochdr toc_h; + struct cdrom_tocentry toc_e; + int i, rc, have_datatracks = 0; + + /* look for data tracks */ + if (0 != (rc = sr_audio_ioctl(cdi, CDROMREADTOCHDR, &toc_h))) + return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO; + + for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) { + toc_e.cdte_track = i; + toc_e.cdte_format = CDROM_LBA; + if (sr_audio_ioctl(cdi, CDROMREADTOCENTRY, &toc_e)) + return CDS_NO_INFO; + if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) { + have_datatracks = 1; + break; + } + } + if (!have_datatracks) + return CDS_AUDIO; + + if (scsi_CDs[MINOR(cdi->dev)].xa_flag) + return CDS_XA_2_1; + else + return CDS_DATA_1; } int sr_get_last_session(struct cdrom_device_info *cdi, - struct cdrom_multisession* ms_info) + struct cdrom_multisession *ms_info) { - ms_info->addr.lba=scsi_CDs[MINOR(cdi->dev)].ms_offset; - ms_info->xa_flag=scsi_CDs[MINOR(cdi->dev)].xa_flag || - (scsi_CDs[MINOR(cdi->dev)].ms_offset > 0); + ms_info->addr.lba = scsi_CDs[MINOR(cdi->dev)].ms_offset; + ms_info->xa_flag = scsi_CDs[MINOR(cdi->dev)].xa_flag || + (scsi_CDs[MINOR(cdi->dev)].ms_offset > 0); return 0; } -int sr_get_mcn(struct cdrom_device_info *cdi,struct cdrom_mcn *mcn) +int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn) { - u_char sr_cmd[10]; + u_char sr_cmd[10]; char buffer[32]; - int result; - + int result; + sr_cmd[0] = GPCMD_READ_SUBCHANNEL; sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device->lun) << 5); - sr_cmd[2] = 0x40; /* I do want the subchannel info */ - sr_cmd[3] = 0x02; /* Give me medium catalog number info */ + sr_cmd[2] = 0x40; /* I do want the subchannel info */ + sr_cmd[3] = 0x02; /* Give me medium catalog number info */ sr_cmd[4] = sr_cmd[5] = 0; sr_cmd[6] = 0; sr_cmd[7] = 0; @@ -263,9 +249,9 @@ int sr_get_mcn(struct cdrom_device_info *cdi,struct cdrom_mcn *mcn) sr_cmd[9] = 0; result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0); - - memcpy (mcn->medium_catalog_number, buffer + 9, 13); - mcn->medium_catalog_number[13] = 0; + + memcpy(mcn->medium_catalog_number, buffer + 9, 13); + mcn->medium_catalog_number[13] = 0; return result; } @@ -273,26 +259,26 @@ int sr_get_mcn(struct cdrom_device_info *cdi,struct cdrom_mcn *mcn) int sr_reset(struct cdrom_device_info *cdi) { invalidate_buffers(cdi->dev); - return 0; + return 0; } int sr_select_speed(struct cdrom_device_info *cdi, int speed) { - u_char sr_cmd[12]; - - if (speed == 0) - speed = 0xffff; /* set to max */ - else - speed *= 177; /* Nx to kbyte/s */ - - memset(sr_cmd,0,12); - sr_cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */ + u_char sr_cmd[12]; + + if (speed == 0) + speed = 0xffff; /* set to max */ + else + speed *= 177; /* Nx to kbyte/s */ + + memset(sr_cmd, 0, 12); + sr_cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */ sr_cmd[1] = (scsi_CDs[MINOR(cdi->dev)].device->lun) << 5; - sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */ - sr_cmd[3] = speed & 0xff; /* LSB */ + sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */ + sr_cmd[3] = speed & 0xff; /* LSB */ - if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0)) - return -EIO; + if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0)) + return -EIO; return 0; } @@ -302,73 +288,72 @@ int sr_select_speed(struct cdrom_device_info *cdi, int speed) /* only cdromreadtochdr and cdromreadtocentry are left - for use with the */ /* sr_disk_status interface for the generic cdrom driver. */ -int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void* arg) +int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { - u_char sr_cmd[10]; - int result, target = MINOR(cdi->dev); - unsigned char buffer[32]; - - switch (cmd) - { - case CDROMREADTOCHDR: - { - struct cdrom_tochdr* tochdr = (struct cdrom_tochdr*)arg; - - sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP; - sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5); - sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; - sr_cmd[6] = 0; - sr_cmd[7] = 0; /* MSB of length (12) */ - sr_cmd[8] = 12; /* LSB of length */ - sr_cmd[9] = 0; - - result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1); - - tochdr->cdth_trk0 = buffer[2]; - tochdr->cdth_trk1 = buffer[3]; - - break; - } - - case CDROMREADTOCENTRY: - { - struct cdrom_tocentry* tocentry = (struct cdrom_tocentry*)arg; - - sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP; - sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | - (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0); - sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; - sr_cmd[6] = tocentry->cdte_track; - sr_cmd[7] = 0; /* MSB of length (12) */ - sr_cmd[8] = 12; /* LSB of length */ - sr_cmd[9] = 0; - - result = sr_do_ioctl (target, sr_cmd, buffer, 12, 0); - - tocentry->cdte_ctrl = buffer[5] & 0xf; - tocentry->cdte_adr = buffer[5] >> 4; - tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0; - if (tocentry->cdte_format == CDROM_MSF) { - tocentry->cdte_addr.msf.minute = buffer[9]; - tocentry->cdte_addr.msf.second = buffer[10]; - tocentry->cdte_addr.msf.frame = buffer[11]; - } else - tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8) - + buffer[10]) << 8) + buffer[11]; - - break; - } - - default: - return -EINVAL; - } + u_char sr_cmd[10]; + int result, target = MINOR(cdi->dev); + unsigned char buffer[32]; + + switch (cmd) { + case CDROMREADTOCHDR: + { + struct cdrom_tochdr *tochdr = (struct cdrom_tochdr *) arg; + + sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP; + sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5); + sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; + sr_cmd[6] = 0; + sr_cmd[7] = 0; /* MSB of length (12) */ + sr_cmd[8] = 12; /* LSB of length */ + sr_cmd[9] = 0; + + result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1); + + tochdr->cdth_trk0 = buffer[2]; + tochdr->cdth_trk1 = buffer[3]; + + break; + } + + case CDROMREADTOCENTRY: + { + struct cdrom_tocentry *tocentry = (struct cdrom_tocentry *) arg; + + sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP; + sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | + (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0); + sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; + sr_cmd[6] = tocentry->cdte_track; + sr_cmd[7] = 0; /* MSB of length (12) */ + sr_cmd[8] = 12; /* LSB of length */ + sr_cmd[9] = 0; + + result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0); + + tocentry->cdte_ctrl = buffer[5] & 0xf; + tocentry->cdte_adr = buffer[5] >> 4; + tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0; + if (tocentry->cdte_format == CDROM_MSF) { + tocentry->cdte_addr.msf.minute = buffer[9]; + tocentry->cdte_addr.msf.second = buffer[10]; + tocentry->cdte_addr.msf.frame = buffer[11]; + } else + tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8) + + buffer[10]) << 8) + buffer[11]; + + break; + } + + default: + return -EINVAL; + } #if 0 - if (result) - printk("DEBUG: sr_audio: result for ioctl %x: %x\n",cmd,result); + if (result) + printk("DEBUG: sr_audio: result for ioctl %x: %x\n", cmd, result); #endif - - return result; + + return result; } /* ----------------------------------------------------------------------- @@ -385,73 +370,78 @@ int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void* arg) * blksize: 2048 | 2336 | 2340 | 2352 */ -int -sr_read_cd(int minor, unsigned char *dest, int lba, int format, int blksize) +int sr_read_cd(int minor, unsigned char *dest, int lba, int format, int blksize) { - unsigned char cmd[12]; + unsigned char cmd[12]; #ifdef DEBUG - printk("sr%d: sr_read_cd lba=%d format=%d blksize=%d\n", - minor,lba,format,blksize); + printk("sr%d: sr_read_cd lba=%d format=%d blksize=%d\n", + minor, lba, format, blksize); #endif - memset(cmd,0,12); - cmd[0] = GPCMD_READ_CD; /* READ_CD */ - cmd[1] = (scsi_CDs[minor].device->lun << 5) | ((format & 7) << 2); - cmd[2] = (unsigned char)(lba >> 24) & 0xff; - cmd[3] = (unsigned char)(lba >> 16) & 0xff; - cmd[4] = (unsigned char)(lba >> 8) & 0xff; - cmd[5] = (unsigned char) lba & 0xff; - cmd[8] = 1; - switch (blksize) { - case 2336: cmd[9] = 0x58; break; - case 2340: cmd[9] = 0x78; break; - case 2352: cmd[9] = 0xf8; break; - default: cmd[9] = 0x10; break; - } - return sr_do_ioctl(minor, cmd, dest, blksize, 0); + memset(cmd, 0, 12); + cmd[0] = GPCMD_READ_CD; /* READ_CD */ + cmd[1] = (scsi_CDs[minor].device->lun << 5) | ((format & 7) << 2); + cmd[2] = (unsigned char) (lba >> 24) & 0xff; + cmd[3] = (unsigned char) (lba >> 16) & 0xff; + cmd[4] = (unsigned char) (lba >> 8) & 0xff; + cmd[5] = (unsigned char) lba & 0xff; + cmd[8] = 1; + switch (blksize) { + case 2336: + cmd[9] = 0x58; + break; + case 2340: + cmd[9] = 0x78; + break; + case 2352: + cmd[9] = 0xf8; + break; + default: + cmd[9] = 0x10; + break; + } + return sr_do_ioctl(minor, cmd, dest, blksize, 0); } /* * read sectors with blocksizes other than 2048 */ -int -sr_read_sector(int minor, int lba, int blksize, unsigned char *dest) +int sr_read_sector(int minor, int lba, int blksize, unsigned char *dest) { - unsigned char cmd[12]; /* the scsi-command */ - int rc; - - /* we try the READ CD command first... */ - if (scsi_CDs[minor].readcd_known) { - rc = sr_read_cd(minor, dest, lba, 0, blksize); - if (-EDRIVE_CANT_DO_THIS != rc) - return rc; - scsi_CDs[minor].readcd_known = 0; - printk("CDROM does'nt support READ CD (0xbe) command\n"); - /* fall & retry the other way */ - } - - /* ... if this fails, we switch the blocksize using MODE SELECT */ - if (blksize != scsi_CDs[minor].sector_size) - if (0 != (rc = sr_set_blocklength(minor, blksize))) - return rc; - + unsigned char cmd[12]; /* the scsi-command */ + int rc; + + /* we try the READ CD command first... */ + if (scsi_CDs[minor].readcd_known) { + rc = sr_read_cd(minor, dest, lba, 0, blksize); + if (-EDRIVE_CANT_DO_THIS != rc) + return rc; + scsi_CDs[minor].readcd_known = 0; + printk("CDROM does'nt support READ CD (0xbe) command\n"); + /* fall & retry the other way */ + } + /* ... if this fails, we switch the blocksize using MODE SELECT */ + if (blksize != scsi_CDs[minor].device->sector_size) { + if (0 != (rc = sr_set_blocklength(minor, blksize))) + return rc; + } #ifdef DEBUG - printk("sr%d: sr_read_sector lba=%d blksize=%d\n",minor,lba,blksize); + printk("sr%d: sr_read_sector lba=%d blksize=%d\n", minor, lba, blksize); #endif - - memset(cmd,0,12); - cmd[0] = GPCMD_READ_10; - cmd[1] = (scsi_CDs[minor].device->lun << 5); - cmd[2] = (unsigned char)(lba >> 24) & 0xff; - cmd[3] = (unsigned char)(lba >> 16) & 0xff; - cmd[4] = (unsigned char)(lba >> 8) & 0xff; - cmd[5] = (unsigned char) lba & 0xff; - cmd[8] = 1; - rc = sr_do_ioctl(minor, cmd, dest, blksize, 0); - - return rc; + + memset(cmd, 0, 12); + cmd[0] = GPCMD_READ_10; + cmd[1] = (scsi_CDs[minor].device->lun << 5); + cmd[2] = (unsigned char) (lba >> 24) & 0xff; + cmd[3] = (unsigned char) (lba >> 16) & 0xff; + cmd[4] = (unsigned char) (lba >> 8) & 0xff; + cmd[5] = (unsigned char) lba & 0xff; + cmd[8] = 1; + rc = sr_do_ioctl(minor, cmd, dest, blksize, 0); + + return rc; } /* @@ -459,55 +449,50 @@ sr_read_sector(int minor, int lba, int blksize, unsigned char *dest) * ret: 1 == mode2 (XA), 0 == mode1, <0 == error */ -int -sr_is_xa(int minor) +int sr_is_xa(int minor) { - unsigned char *raw_sector; - int is_xa; - unsigned long flags; - - if (!xa_test) - return 0; - - spin_lock_irqsave(&io_request_lock, flags); - raw_sector = (unsigned char *) scsi_malloc(2048+512); - spin_unlock_irqrestore(&io_request_lock, flags); - if (!raw_sector) return -ENOMEM; - if (0 == sr_read_sector(minor,scsi_CDs[minor].ms_offset+16, - CD_FRAMESIZE_RAW1,raw_sector)) { - is_xa = (raw_sector[3] == 0x02) ? 1 : 0; - } else { - /* read a raw sector failed for some reason. */ - is_xa = -1; - } - spin_lock_irqsave(&io_request_lock, flags); - scsi_free(raw_sector, 2048+512); - spin_unlock_irqrestore(&io_request_lock, flags); + unsigned char *raw_sector; + int is_xa; + + if (!xa_test) + return 0; + + raw_sector = (unsigned char *) scsi_malloc(2048 + 512); + if (!raw_sector) + return -ENOMEM; + if (0 == sr_read_sector(minor, scsi_CDs[minor].ms_offset + 16, + CD_FRAMESIZE_RAW1, raw_sector)) { + is_xa = (raw_sector[3] == 0x02) ? 1 : 0; + } else { + /* read a raw sector failed for some reason. */ + is_xa = -1; + } + scsi_free(raw_sector, 2048 + 512); #ifdef DEBUG - printk("sr%d: sr_is_xa: %d\n",minor,is_xa); + printk("sr%d: sr_is_xa: %d\n", minor, is_xa); #endif - return is_xa; + return is_xa; } int sr_dev_ioctl(struct cdrom_device_info *cdi, - unsigned int cmd, unsigned long arg) + unsigned int cmd, unsigned long arg) { - int target; - - target = MINOR(cdi->dev); - - switch (cmd) { - case BLKROSET: - case BLKROGET: - case BLKRASET: - case BLKRAGET: - case BLKFLSBUF: - case BLKSSZGET: - return blk_ioctl(cdi->dev, cmd, arg); - - default: - return scsi_ioctl(scsi_CDs[target].device,cmd,(void *) arg); - } + int target; + + target = MINOR(cdi->dev); + + switch (cmd) { + case BLKROSET: + case BLKROGET: + case BLKRASET: + case BLKRAGET: + case BLKFLSBUF: + case BLKSSZGET: + return blk_ioctl(cdi->dev, cmd, arg); + + default: + return scsi_ioctl(scsi_CDs[target].device, cmd, (void *) arg); + } } /* diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c index 0240197dbb2a..56f4f004de18 100644 --- a/drivers/scsi/sr_vendor.c +++ b/drivers/scsi/sr_vendor.c @@ -1,5 +1,5 @@ /* -*-linux-c-*- - * + * vendor-specific code for SCSI CD-ROM's goes here. * * This is needed becauce most of the new features (multisession and @@ -23,15 +23,15 @@ * - TOSHIBA: Detection and support of multisession CD's. * Some XA-Sector tweaking, required for older drives. * - * - SONY: Detection and support of multisession CD's. + * - SONY: Detection and support of multisession CD's. * added by Thomas Quinot * * - PIONEER, HITACHI, PLEXTOR, MATSHITA, TEAC, PHILIPS: known to * work with SONY (SCSI3 now) code. * - * - HP: Much like SONY, but a little different... (Thomas) + * - HP: Much like SONY, but a little different... (Thomas) * HP-Writers only ??? Maybe other CD-Writers work with this too ? - * HP 6020 writers now supported. + * HP 6020 writers now supported. */ #include @@ -47,16 +47,16 @@ #include "sr.h" #if 0 -# define DEBUG +#define DEBUG #endif /* here are some constants to sort the vendors into groups */ -#define VENDOR_SCSI3 1 /* default: scsi-3 mmc */ +#define VENDOR_SCSI3 1 /* default: scsi-3 mmc */ #define VENDOR_NEC 2 #define VENDOR_TOSHIBA 3 -#define VENDOR_WRITER 4 /* pre-scsi3 writers */ +#define VENDOR_WRITER 4 /* pre-scsi3 writers */ #define VENDOR_ID (scsi_CDs[minor].vendor) @@ -66,7 +66,7 @@ void sr_vendor_init(int minor) VENDOR_ID = VENDOR_SCSI3; #else char *vendor = scsi_CDs[minor].device->vendor; - char *model = scsi_CDs[minor].device->model; + char *model = scsi_CDs[minor].device->model; /* default */ VENDOR_ID = VENDOR_SCSI3; @@ -77,24 +77,24 @@ void sr_vendor_init(int minor) if (scsi_CDs[minor].device->type == TYPE_WORM) { VENDOR_ID = VENDOR_WRITER; - } else if (!strncmp (vendor, "NEC", 3)) { + } else if (!strncmp(vendor, "NEC", 3)) { VENDOR_ID = VENDOR_NEC; - if (!strncmp (model,"CD-ROM DRIVE:25", 15) || - !strncmp (model,"CD-ROM DRIVE:36", 15) || - !strncmp (model,"CD-ROM DRIVE:83", 15) || - !strncmp (model,"CD-ROM DRIVE:84 ",16) + if (!strncmp(model, "CD-ROM DRIVE:25", 15) || + !strncmp(model, "CD-ROM DRIVE:36", 15) || + !strncmp(model, "CD-ROM DRIVE:83", 15) || + !strncmp(model, "CD-ROM DRIVE:84 ", 16) #if 0 - /* my NEC 3x returns the read-raw data if a read-raw - is followed by a read for the same sector - aeb */ - || !strncmp (model,"CD-ROM DRIVE:500",16) + /* my NEC 3x returns the read-raw data if a read-raw + is followed by a read for the same sector - aeb */ + || !strncmp(model, "CD-ROM DRIVE:500", 16) #endif - ) + ) /* these can't handle multisession, may hang */ scsi_CDs[minor].cdi.mask |= CDC_MULTI_SESSION; - } else if (!strncmp (vendor, "TOSHIBA", 7)) { + } else if (!strncmp(vendor, "TOSHIBA", 7)) { VENDOR_ID = VENDOR_TOSHIBA; - + } #endif } @@ -105,10 +105,10 @@ void sr_vendor_init(int minor) int sr_set_blocklength(int minor, int blocklength) { - unsigned char *buffer; /* the buffer for the ioctl */ - unsigned char cmd[12]; /* the scsi-command */ - struct ccs_modesel_head *modesel; - int rc,density = 0; + unsigned char *buffer; /* the buffer for the ioctl */ + unsigned char cmd[12]; /* the scsi-command */ + struct ccs_modesel_head *modesel; + int rc, density = 0; #ifdef CONFIG_BLK_DEV_SR_VENDOR if (VENDOR_ID == VENDOR_TOSHIBA) @@ -116,27 +116,29 @@ int sr_set_blocklength(int minor, int blocklength) #endif buffer = (unsigned char *) scsi_malloc(512); - if (!buffer) return -ENOMEM; + if (!buffer) + return -ENOMEM; #ifdef DEBUG - printk("sr%d: MODE SELECT 0x%x/%d\n",minor,density,blocklength); + printk("sr%d: MODE SELECT 0x%x/%d\n", minor, density, blocklength); #endif - memset(cmd,0,12); + memset(cmd, 0, 12); cmd[0] = MODE_SELECT; cmd[1] = (scsi_CDs[minor].device->lun << 5) | (1 << 4); cmd[4] = 12; - modesel = (struct ccs_modesel_head*)buffer; - memset(modesel,0,sizeof(*modesel)); + modesel = (struct ccs_modesel_head *) buffer; + memset(modesel, 0, sizeof(*modesel)); modesel->block_desc_length = 0x08; - modesel->density = density; - modesel->block_length_med = (blocklength >> 8 ) & 0xff; - modesel->block_length_lo = blocklength & 0xff; - if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0))) - scsi_CDs[minor].sector_size = blocklength; + modesel->density = density; + modesel->block_length_med = (blocklength >> 8) & 0xff; + modesel->block_length_lo = blocklength & 0xff; + if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0))) { + scsi_CDs[minor].device->sector_size = blocklength; + } #ifdef DEBUG else printk("sr%d: switching blocklength to %d bytes failed\n", - minor,blocklength); + minor, blocklength); #endif scsi_free(buffer, 512); return rc; @@ -149,28 +151,27 @@ int sr_set_blocklength(int minor, int blocklength) int sr_cd_check(struct cdrom_device_info *cdi) { - unsigned long sector; - unsigned char *buffer; /* the buffer for the ioctl */ - unsigned char cmd[12]; /* the scsi-command */ - int rc,no_multi,minor; + unsigned long sector; + unsigned char *buffer; /* the buffer for the ioctl */ + unsigned char cmd[12]; /* the scsi-command */ + int rc, no_multi, minor; minor = MINOR(cdi->dev); if (scsi_CDs[minor].cdi.mask & CDC_MULTI_SESSION) return 0; - - spin_lock_irq(&io_request_lock); + buffer = (unsigned char *) scsi_malloc(512); - spin_unlock_irq(&io_request_lock); - if(!buffer) return -ENOMEM; - - sector = 0; /* the multisession sector offset goes here */ - no_multi = 0; /* flag: the drive can't handle multisession */ - rc = 0; - - switch(VENDOR_ID) { - + if (!buffer) + return -ENOMEM; + + sector = 0; /* the multisession sector offset goes here */ + no_multi = 0; /* flag: the drive can't handle multisession */ + rc = 0; + + switch (VENDOR_ID) { + case VENDOR_SCSI3: - memset(cmd,0,12); + memset(cmd, 0, 12); cmd[0] = READ_TOC; cmd[1] = (scsi_CDs[minor].device->lun << 5); cmd[8] = 12; @@ -180,70 +181,70 @@ int sr_cd_check(struct cdrom_device_info *cdi) break; if ((buffer[0] << 8) + buffer[1] < 0x0a) { printk(KERN_INFO "sr%d: Hmm, seems the drive " - "doesn't support multisession CD's\n",minor); + "doesn't support multisession CD's\n", minor); no_multi = 1; break; } sector = buffer[11] + (buffer[10] << 8) + - (buffer[9] << 16) + (buffer[8] << 24); + (buffer[9] << 16) + (buffer[8] << 24); if (buffer[6] <= 1) { /* ignore sector offsets from first track */ sector = 0; } break; - + #ifdef CONFIG_BLK_DEV_SR_VENDOR - case VENDOR_NEC: { - unsigned long min,sec,frame; - memset(cmd,0,12); - cmd[0] = 0xde; - cmd[1] = (scsi_CDs[minor].device->lun << 5) | 0x03; - cmd[2] = 0xb0; - rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1); - if (rc != 0) - break; - if (buffer[14] != 0 && buffer[14] != 0xb0) { - printk(KERN_INFO "sr%d: Hmm, seems the cdrom " - "doesn't support multisession CD's\n",minor); - no_multi = 1; + case VENDOR_NEC:{ + unsigned long min, sec, frame; + memset(cmd, 0, 12); + cmd[0] = 0xde; + cmd[1] = (scsi_CDs[minor].device->lun << 5) | 0x03; + cmd[2] = 0xb0; + rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1); + if (rc != 0) + break; + if (buffer[14] != 0 && buffer[14] != 0xb0) { + printk(KERN_INFO "sr%d: Hmm, seems the cdrom " + "doesn't support multisession CD's\n", minor); + no_multi = 1; + break; + } + min = BCD_TO_BIN(buffer[15]); + sec = BCD_TO_BIN(buffer[16]); + frame = BCD_TO_BIN(buffer[17]); + sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; break; } - min = BCD_TO_BIN(buffer[15]); - sec = BCD_TO_BIN(buffer[16]); - frame = BCD_TO_BIN(buffer[17]); - sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame; - break; - } - case VENDOR_TOSHIBA: { - unsigned long min,sec,frame; + case VENDOR_TOSHIBA:{ + unsigned long min, sec, frame; - /* we request some disc information (is it a XA-CD ?, - * where starts the last session ?) */ - memset(cmd,0,12); - cmd[0] = 0xc7; - cmd[1] = (scsi_CDs[minor].device->lun << 5) | 3; - rc = sr_do_ioctl(minor, cmd, buffer, 4, 1); - if (rc == -EINVAL) { - printk(KERN_INFO "sr%d: Hmm, seems the drive " - "doesn't support multisession CD's\n",minor); - no_multi = 1; + /* we request some disc information (is it a XA-CD ?, + * where starts the last session ?) */ + memset(cmd, 0, 12); + cmd[0] = 0xc7; + cmd[1] = (scsi_CDs[minor].device->lun << 5) | 3; + rc = sr_do_ioctl(minor, cmd, buffer, 4, 1); + if (rc == -EINVAL) { + printk(KERN_INFO "sr%d: Hmm, seems the drive " + "doesn't support multisession CD's\n", minor); + no_multi = 1; + break; + } + if (rc != 0) + break; + min = BCD_TO_BIN(buffer[1]); + sec = BCD_TO_BIN(buffer[2]); + frame = BCD_TO_BIN(buffer[3]); + sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; + if (sector) + sector -= CD_MSF_OFFSET; + sr_set_blocklength(minor, 2048); break; } - if (rc != 0) - break; - min = BCD_TO_BIN(buffer[1]); - sec = BCD_TO_BIN(buffer[2]); - frame = BCD_TO_BIN(buffer[3]); - sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame; - if (sector) - sector -= CD_MSF_OFFSET; - sr_set_blocklength(minor,2048); - break; - } case VENDOR_WRITER: - memset(cmd,0,12); + memset(cmd, 0, 12); cmd[0] = READ_TOC; cmd[1] = (scsi_CDs[minor].device->lun << 5); cmd[8] = 0x04; @@ -253,31 +254,29 @@ int sr_cd_check(struct cdrom_device_info *cdi) break; } if ((rc = buffer[2]) == 0) { - printk (KERN_WARNING - "sr%d: No finished session\n",minor); + printk(KERN_WARNING + "sr%d: No finished session\n", minor); break; } - - cmd[0] = READ_TOC; /* Read TOC */ + cmd[0] = READ_TOC; /* Read TOC */ cmd[1] = (scsi_CDs[minor].device->lun << 5); - cmd[6] = rc & 0x7f; /* number of last session */ + cmd[6] = rc & 0x7f; /* number of last session */ cmd[8] = 0x0c; cmd[9] = 0x40; - rc = sr_do_ioctl(minor, cmd, buffer, 12, 1); + rc = sr_do_ioctl(minor, cmd, buffer, 12, 1); if (rc != 0) { break; } - sector = buffer[11] + (buffer[10] << 8) + - (buffer[9] << 16) + (buffer[8] << 24); + (buffer[9] << 16) + (buffer[8] << 24); break; -#endif /* CONFIG_BLK_DEV_SR_VENDOR */ +#endif /* CONFIG_BLK_DEV_SR_VENDOR */ default: /* should not happen */ printk(KERN_WARNING - "sr%d: unknown vendor code (%i), not initialized ?\n", - minor,VENDOR_ID); + "sr%d: unknown vendor code (%i), not initialized ?\n", + minor, VENDOR_ID); sector = 0; no_multi = 1; break; @@ -286,16 +285,17 @@ int sr_cd_check(struct cdrom_device_info *cdi) scsi_CDs[minor].xa_flag = 0; if (CDS_AUDIO != sr_disk_status(cdi) && 1 == sr_is_xa(minor)) scsi_CDs[minor].xa_flag = 1; - - if (2048 != scsi_CDs[minor].sector_size) - sr_set_blocklength(minor,2048); + + if (2048 != scsi_CDs[minor].device->sector_size) { + sr_set_blocklength(minor, 2048); + } if (no_multi) cdi->mask |= CDC_MULTI_SESSION; #ifdef DEBUG if (sector) printk(KERN_DEBUG "sr%d: multisession offset=%lu\n", - minor,sector); + minor, sector); #endif scsi_free(buffer, 512); return rc; diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 057ed9e03332..dde365229aa7 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -286,15 +286,13 @@ static Scsi_Cmnd * st_do_scsi(Scsi_Cmnd * SCpnt, Scsi_Tape * STp, unsigned char *cmd, int bytes, int timeout, int retries, int do_wait) { - unsigned long flags; unsigned char *bp; - spin_lock_irqsave(&io_request_lock, flags); if (SCpnt == NULL) - if ((SCpnt = scsi_allocate_device(NULL, STp->device, 1)) == NULL) { + SCpnt = scsi_allocate_device(STp->device, 1); + if (SCpnt == NULL) { printk(KERN_ERR "st%d: Can't get SCSI request.\n", TAPE_NR(STp->devt)); - spin_unlock_irqrestore(&io_request_lock, flags); return NULL; } @@ -315,7 +313,6 @@ static Scsi_Cmnd * scsi_do_cmd(SCpnt, (void *) cmd, bp, bytes, st_sleep_done, timeout, retries); - spin_unlock_irqrestore(&io_request_lock, flags); if (do_wait) { down(SCpnt->request.sem); diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index 91f511dea0b9..6f37bc11e3b0 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c @@ -826,7 +826,7 @@ static inline int port_detect \ } else { unsigned long flags; - sh[j]->wish_block = TRUE; +//FIXME// sh[j]->wish_block = TRUE; sh[j]->unchecked_isa_dma = TRUE; flags=claim_dma_lock(); diff --git a/drivers/sound/sb_card.c b/drivers/sound/sb_card.c index 46a1d6acefae..5afbf1e3e307 100644 --- a/drivers/sound/sb_card.c +++ b/drivers/sound/sb_card.c @@ -233,38 +233,42 @@ int init_module(void) if (mad16 == 0 && trix == 0 && pas2 == 0 && support == 0) { #ifdef CONFIG_ISAPNP - if (sb_probe_isapnp(&config, &config_mpu)<0) + if (isapnp == 1 && sb_probe_isapnp(&config, &config_mpu)<0) { printk(KERN_ERR "sb_card: No ISAPnP cards found\n"); return -EINVAL; } + else + { +#endif + if (io == -1 || dma == -1 || irq == -1) + { + printk(KERN_ERR "sb_card: I/O, IRQ, and DMA are mandatory\n"); + return -EINVAL; + } + config.io_base = io; + config.irq = irq; + config.dma = dma; + config.dma2 = dma16; + config.card_subtype = type; +#ifdef CONFIG_ISAPNP + } #endif - } - if (io == -1 || dma == -1 || irq == -1) - { - printk(KERN_ERR "sb_card: I/O, IRQ, and DMA are mandatory\n"); - return -EINVAL; - } - config.io_base = io; - config.irq = irq; - config.dma = dma; - config.dma2 = dma16; - config.card_subtype = type; - - if (!probe_sb(&config)) - return -ENODEV; - attach_sb_card(&config); + if (!probe_sb(&config)) + return -ENODEV; + attach_sb_card(&config); - if(config.slots[0]==-1) - return -ENODEV; + if(config.slots[0]==-1) + return -ENODEV; #ifdef CONFIG_MIDI - if (isapnp == 0) - config_mpu.io_base = mpu_io; - if (probe_sbmpu(&config_mpu)) - sbmpu = 1; - if (sbmpu) - attach_sbmpu(&config_mpu); + if (isapnp == 0) + config_mpu.io_base = mpu_io; + if (probe_sbmpu(&config_mpu)) + sbmpu = 1; + if (sbmpu) + attach_sbmpu(&config_mpu); #endif + } SOUND_LOCK; return 0; } diff --git a/drivers/video/fbgen.c b/drivers/video/fbgen.c index ff14807a0909..7cf44a5ec588 100644 --- a/drivers/video/fbgen.c +++ b/drivers/video/fbgen.c @@ -17,6 +17,7 @@ #include #include +#include