Documentation/scsi.txt. The module will be called sg.o. If unsure,
say N.
+Debug new queueing code for SCSI
+CONFIG_SCSI_DEBUG_QUEUES
+ This option turns on a lot of additional consistency checking for the new
+ queueing code. This will adversely affect performance, but it is likely
+ that bugs will be caught sooner if this is turned on. This will typically
+ cause the kernel to panic if an error is detected, but it would have probably
+ crashed if the panic weren't there. Comments/questions/problems to
+ linux-scsi mailing list please. See http://www.andante.org/scsi_queue.html
+ for more uptodate information.
+
Probe all LUNs on each SCSI device
CONFIG_SCSI_MULTI_LUN
If you have a SCSI device that supports more than one LUN (Logical
MSDOS floppies. You will need a program called umssync in order to
make use of umsdos; read Documentation/filesystems/umsdos.txt.
+ To get utilities for initializing/checking UMSDOS filesystem, or
+ latest patches and/or information, visit UMSDOS homepage at
+ http://www.voyager.hr/~mnalis/umsdos/ .
+
This option enlarges your kernel by about 28 KB and it only works if
you said Y to both "fat fs support" and "msdos fs support" above. If
you want to compile this as a module ( = code which can be inserted
The module is called nfsd.o. If you want to compile it as a module,
say M here and read Documentation/modules.txt. If unsure, say N.
-Emulate SUN NFS server
-CONFIG_NFSD_SUN
- If you would like for the server to allow clients to access
- directories that are mount points on the local filesystem (this is
- how nfsd behaves on Sun systems), say Y here.
- If you use Tru64 clients, say Y.
- If unsure, say N.
-
Provide NFSv3 server support (EXPERIMENTAL)
CONFIG_NFSD_V3
If you would like to include the NFSv3 server was well as the NFSv2
S: Maintained
TLAN NETWORK DRIVER
+P: Torben Mathiasen
+M: torben.mathiasen@compaq.com
L: tlan@vuser.vu.union.edu
-S: Orphan
+S: Maintained
TOKEN-RING NETWORK DRIVER
P: Paul Norton
UMSDOS FILESYSTEM
P: Matija Nalis
-M: mnalis@jagor.srce.hr
+M: Matija Nalis <mnalis-umsdos@voyager.hr>
L: linux-kernel@vger.rutgers.edu
+W: http://www.voyager.hr/~mnalis/umsdos/
S: Maintained
UNIFORM CDROM DRIVER
DRIVERS += drivers/char/drm/drm.o
endif
+ifdef CONFIG_AGP
+DRIVERS += drivers/char/agp/agp.o
+endif
+
ifdef CONFIG_NUBUS
DRIVERS := $(DRIVERS) drivers/nubus/nubus.a
endif
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_NLS is not set
# Ftape, the floppy tape device driver
#
# CONFIG_FTAPE is not set
+CONFIG_DRM=y
+CONFIG_DRM_TDFX=y
+# CONFIG_DRM_GAMMA is not set
#
# PCMCIA character device support
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_NLS is not set
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
- "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
+ "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "; cld\n\t"
"setc %%al\n\t"
"popl %%ebp\n\t"
"popl %%edi\n\t"
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
- "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
+ "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry)"; cld\n\t"
"setc %%bl\n\t"
"popl %%ebp\n\t"
"popl %%edi\n\t"
unsigned long flags;
__save_flags(flags); __cli();
- __asm__("lcall (%%edi)"
+ __asm__("lcall (%%edi); cld"
: "=a" (return_code),
"=b" (address),
"=c" (length),
__save_flags(flags); __cli();
__asm__(
- "lcall (%%edi)\n\t"
+ "lcall (%%edi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
unsigned short bx;
unsigned short ret;
- __asm__("lcall (%%edi)\n\t"
+ __asm__("lcall (%%edi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
unsigned long ret;
unsigned long bx = (dev->bus->number << 8) | dev->devfn;
- __asm__("lcall (%%esi)\n\t"
+ __asm__("lcall (%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
unsigned long ret;
unsigned long bx = (dev->bus->number << 8) | dev->devfn;
- __asm__("lcall (%%esi)\n\t"
+ __asm__("lcall (%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
unsigned long ret;
unsigned long bx = (dev->bus->number << 8) | dev->devfn;
- __asm__("lcall (%%esi)\n\t"
+ __asm__("lcall (%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
unsigned long ret;
unsigned long bx = (dev->bus->number << 8) | dev->devfn;
- __asm__("lcall (%%esi)\n\t"
+ __asm__("lcall (%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
unsigned long ret;
unsigned long bx = (dev->bus->number << 8) | dev->devfn;
- __asm__("lcall (%%esi)\n\t"
+ __asm__("lcall (%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
unsigned long ret;
unsigned long bx = (dev->bus->number << 8) | dev->devfn;
- __asm__("lcall (%%esi)\n\t"
+ __asm__("lcall (%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
__asm__("push %%es\n\t"
"push %%ds\n\t"
"pop %%es\n\t"
- "lcall (%%esi)\n\t"
+ "lcall (%%esi); cld\n\t"
"pop %%es\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
struct pt_regs * childregs;
childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1;
- *childregs = *regs;
+ struct_cpy(childregs, regs);
childregs->eax = 0;
childregs->esp = esp;
savesegment(gs,p->thread.gs);
unlazy_fpu(current);
- p->thread.i387 = current->thread.i387;
+ struct_cpy(&p->thread.i387, ¤t->thread.i387);
return 0;
}
#include <asm/e820.h>
unsigned long highstart_pfn, highend_pfn;
+unsigned long *pgd_quicklist = (unsigned long *)0;
static unsigned long totalram_pages = 0;
static unsigned long totalhigh_pages = 0;
if(pgtable_cache_size > high) {
do {
if(pgd_quicklist)
- free_pgd_slow(get_pgd_fast()), freed++;
+ mmlist_modify_lock(), \
+ free_pgd_slow(get_pgd_fast()), \
+ mmlist_modify_unlock(), \
+ freed++;
if(pmd_quicklist)
free_pmd_slow(get_pmd_fast()), freed++;
if(pte_quicklist)
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MAC_PARTITION=y
CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_NLS is not set
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MAC_PARTITION=y
CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_NLS is not set
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MAC_PARTITION=y
CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_NLS is not set
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MAC_PARTITION=y
CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_NLS is not set
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MAC_PARTITION=y
CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_NLS is not set
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_NLS is not set
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_SGI_PARTITION is not set
CONFIG_SUN_PARTITION=y
CONFIG_NLS=y
if (mod->next == NULL)
return -EINVAL;
- if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
- if (put_user(0, ret))
- return -EFAULT;
- else
- return 0;
+ if (!MOD_CAN_QUERY(mod))
+ return put_user(0, ret);
space = 0;
for (i = 0; i < mod->ndeps; ++i) {
space += len;
}
- if (put_user(i, ret))
- return -EFAULT;
- else
- return 0;
+ return put_user(i, ret);
calc_space_needed:
space += len;
if (mod->next == NULL)
return -EINVAL;
- if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
+ if (!MOD_CAN_QUERY(mod))
if (put_user(0, ret))
return -EFAULT;
else
char *strings;
unsigned *vals;
- if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
+ if (!MOD_CAN_QUERY(mod))
if (put_user(0, ret))
return -EFAULT;
else
MOD_DEC_USE_COUNT;
}
-static void ap_request(void)
+static void ap_request(request_queue_t * q)
{
struct cap_request creq;
unsigned int minor;
#endif
end_request(1);
request_count--;
- ap_request();
+ ap_request(NULL);
}
return -1;
}
printk("ap_init: register dev %d\n", MAJOR_NR);
- blk_dev[MAJOR_NR].request_fn = &ap_request;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &ap_request);
for (i=0;i<NUM_APDEVS;i++) {
ap_blocksizes[i] = AP_BLOCK_SIZE;
invalidate_buffers(MKDEV(MAJOR_NR, i));
unregister_blkdev( MAJOR_NR, "apblock" );
- blk_dev[MAJOR_NR].request_fn = 0;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
}
#endif /* MODULE */
}
-static void ddv_request(void)
+static void ddv_request(request_queue_t * q)
{
cli();
ddv_request1();
}
printk("ddv_init: register dev %d\n", MAJOR_NR);
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = DDV_READ_AHEAD;
bif_add_debug_key('d',ddv_status,"DDV status");
if (*gdp)
*gdp = (*gdp)->next;
free_irq(APOPT0_IRQ, NULL);
- blk_dev[MAJOR_NR].request_fn = 0;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
}
#endif /* MODULE */
static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
{
- static void (*RequestFunctions[DAC960_MaxControllers])(void) =
+ static void (*RequestFunctions[DAC960_MaxControllers])(request_queue_t *) =
{ DAC960_RequestFunction0, DAC960_RequestFunction1,
DAC960_RequestFunction2, DAC960_RequestFunction3,
DAC960_RequestFunction4, DAC960_RequestFunction5,
/*
Initialize the I/O Request Function.
*/
- blk_dev[MajorNumber].request_fn =
- RequestFunctions[Controller->ControllerNumber];
+ blk_init_queue(BLK_DEFAULT_QUEUE(MajorNumber),
+ RequestFunctions[Controller->ControllerNumber]);
/*
Initialize the Disk Partitions array, Partition Sizes array, Block Sizes
array, Max Sectors per Request array, and Max Segments per Request array.
/*
Remove the I/O Request Function.
*/
- blk_dev[MajorNumber].request_fn = NULL;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MajorNumber));
/*
Remove the Disk Partitions array, Partition Sizes array, Block Sizes
array, Max Sectors per Request array, and Max Segments per Request array.
boolean WaitForCommand)
{
IO_Request_T **RequestQueuePointer =
- &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].current_request;
+ &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].request_queue.current_request;
IO_Request_T *Request;
DAC960_Command_T *Command;
char *RequestBuffer;
DAC960_RequestFunction0 is the I/O Request Function for DAC960 Controller 0.
*/
-static void DAC960_RequestFunction0(void)
+static void DAC960_RequestFunction0(request_queue_t * q)
{
DAC960_Controller_T *Controller = DAC960_Controllers[0];
ProcessorFlags_T ProcessorFlags;
DAC960_RequestFunction1 is the I/O Request Function for DAC960 Controller 1.
*/
-static void DAC960_RequestFunction1(void)
+static void DAC960_RequestFunction1(request_queue_t * q)
{
DAC960_Controller_T *Controller = DAC960_Controllers[1];
ProcessorFlags_T ProcessorFlags;
DAC960_RequestFunction2 is the I/O Request Function for DAC960 Controller 2.
*/
-static void DAC960_RequestFunction2(void)
+static void DAC960_RequestFunction2(request_queue_t * q)
{
DAC960_Controller_T *Controller = DAC960_Controllers[2];
ProcessorFlags_T ProcessorFlags;
DAC960_RequestFunction3 is the I/O Request Function for DAC960 Controller 3.
*/
-static void DAC960_RequestFunction3(void)
+static void DAC960_RequestFunction3(request_queue_t * q)
{
DAC960_Controller_T *Controller = DAC960_Controllers[3];
ProcessorFlags_T ProcessorFlags;
DAC960_RequestFunction4 is the I/O Request Function for DAC960 Controller 4.
*/
-static void DAC960_RequestFunction4(void)
+static void DAC960_RequestFunction4(request_queue_t * q)
{
DAC960_Controller_T *Controller = DAC960_Controllers[4];
ProcessorFlags_T ProcessorFlags;
DAC960_RequestFunction5 is the I/O Request Function for DAC960 Controller 5.
*/
-static void DAC960_RequestFunction5(void)
+static void DAC960_RequestFunction5(request_queue_t * q)
{
DAC960_Controller_T *Controller = DAC960_Controllers[5];
ProcessorFlags_T ProcessorFlags;
DAC960_RequestFunction6 is the I/O Request Function for DAC960 Controller 6.
*/
-static void DAC960_RequestFunction6(void)
+static void DAC960_RequestFunction6(request_queue_t * q)
{
DAC960_Controller_T *Controller = DAC960_Controllers[6];
ProcessorFlags_T ProcessorFlags;
DAC960_RequestFunction7 is the I/O Request Function for DAC960 Controller 7.
*/
-static void DAC960_RequestFunction7(void)
+static void DAC960_RequestFunction7(request_queue_t * q)
{
DAC960_Controller_T *Controller = DAC960_Controllers[7];
ProcessorFlags_T ProcessorFlags;
static void DAC960_FinalizeController(DAC960_Controller_T *);
static int DAC960_Finalize(NotifierBlock_T *, unsigned long, void *);
-static void DAC960_RequestFunction0(void);
-static void DAC960_RequestFunction1(void);
-static void DAC960_RequestFunction2(void);
-static void DAC960_RequestFunction3(void);
-static void DAC960_RequestFunction4(void);
-static void DAC960_RequestFunction5(void);
-static void DAC960_RequestFunction6(void);
-static void DAC960_RequestFunction7(void);
+static void DAC960_RequestFunction0(request_queue_t *);
+static void DAC960_RequestFunction1(request_queue_t *);
+static void DAC960_RequestFunction2(request_queue_t *);
+static void DAC960_RequestFunction3(request_queue_t *);
+static void DAC960_RequestFunction4(request_queue_t *);
+static void DAC960_RequestFunction5(request_queue_t *);
+static void DAC960_RequestFunction6(request_queue_t *);
+static void DAC960_RequestFunction7(request_queue_t *);
static void DAC960_InterruptHandler(int, void *, Registers_T *);
static void DAC960_QueueMonitoringCommand(DAC960_Command_T *);
static void DAC960_MonitoringTimerFunction(unsigned long);
static void copy_to_acsibuffer( void );
static void copy_from_acsibuffer( void );
static void do_end_requests( void );
-static void do_acsi_request( void );
+static void do_acsi_request( request_queue_t * );
static void redo_acsi_request( void );
static int acsi_ioctl( struct inode *inode, struct file *file, unsigned int
cmd, unsigned long arg );
*
***********************************************************************/
-static void do_acsi_request( void )
+static void do_acsi_request( request_queue_t * q )
{
stdma_lock( acsi_interrupt, NULL );
phys_acsi_buffer = virt_to_phys( acsi_buffer );
STramMask = ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000;
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */
acsi_gendisk.next = gendisk_head;
gendisk_head = &acsi_gendisk;
struct gendisk ** gdp;
del_timer( &acsi_timer );
- blk_dev[MAJOR_NR].request_fn = 0;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
atari_stram_free( acsi_buffer );
if (unregister_blkdev( MAJOR_NR, "ad" ) != 0)
goto repeat;
}
-static void do_fd_request(void)
+static void do_fd_request(request_queue_t * q)
{
redo_fd_request();
}
post_write_timer.data = 0;
post_write_timer.function = post_write;
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_size[MAJOR_NR] = floppy_sizes;
amiga_chip_free(raw_buf);
blk_size[MAJOR_NR] = NULL;
blksize_size[MAJOR_NR] = NULL;
- blk_dev[MAJOR_NR].request_fn = NULL;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
unregister_blkdev(MAJOR_NR, "fd");
}
#endif
}
-void do_fd_request(void)
+void do_fd_request(request_queue_t * q)
{
unsigned long flags;
blk_size[MAJOR_NR] = floppy_sizes;
blksize_size[MAJOR_NR] = floppy_blocksizes;
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E',
{
unregister_blkdev(MAJOR_NR, "fd");
- blk_dev[MAJOR_NR].request_fn = 0;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
timer_active &= ~(1 << FLOPPY_TIMER);
timer_table[FLOPPY_TIMER].fn = 0;
atari_stram_free( DMABuffer );
*/
#define DO_IDA_REQUEST(x) { do_ida_request(x); }
-static void do_ida_request0(void) DO_IDA_REQUEST(0);
-static void do_ida_request1(void) DO_IDA_REQUEST(1);
-static void do_ida_request2(void) DO_IDA_REQUEST(2);
-static void do_ida_request3(void) DO_IDA_REQUEST(3);
-static void do_ida_request4(void) DO_IDA_REQUEST(4);
-static void do_ida_request5(void) DO_IDA_REQUEST(5);
-static void do_ida_request6(void) DO_IDA_REQUEST(6);
-static void do_ida_request7(void) DO_IDA_REQUEST(7);
+static void do_ida_request0(request_queue_t * q) DO_IDA_REQUEST(0);
+static void do_ida_request1(request_queue_t * q) DO_IDA_REQUEST(1);
+static void do_ida_request2(request_queue_t * q) DO_IDA_REQUEST(2);
+static void do_ida_request3(request_queue_t * q) DO_IDA_REQUEST(3);
+static void do_ida_request4(request_queue_t * q) DO_IDA_REQUEST(4);
+static void do_ida_request5(request_queue_t * q) DO_IDA_REQUEST(5);
+static void do_ida_request6(request_queue_t * q) DO_IDA_REQUEST(6);
+static void do_ida_request7(request_queue_t * q) DO_IDA_REQUEST(7);
static void start_io(ctlr_info_t *h);
*/
void __init cpqarray_init(void)
{
- void (*request_fns[MAX_CTLR])(void) = {
+ void (*request_fns[MAX_CTLR])(request_queue_t *) = {
do_ida_request0, do_ida_request1,
do_ida_request2, do_ida_request3,
do_ida_request4, do_ida_request5,
ida_gendisk[i].sizes = ida_sizes + (i*256);
/* ida_gendisk[i].nr_real is handled by getgeometry */
- blk_dev[MAJOR_NR+i].request_fn = request_fns[i];
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR + i), request_fns[i]);
+ blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR + i), 0);
+
blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256);
hardsect_size[MAJOR_NR+i] = ida_hardsizes + (i*256);
read_ahead[MAJOR_NR+i] = READ_AHEAD;
cmdlist_t *c;
int seg, sect;
char *lastdataend;
+ request_queue_t * q;
struct buffer_head *bh;
struct request *creq;
- creq = blk_dev[MAJOR_NR+ctlr].current_request;
+ q = &blk_dev[MAJOR_NR+ctlr].request_queue;
+
+ creq = q->current_request;
if (creq == NULL || creq->rq_status == RQ_INACTIVE)
goto doreq_done;
} else {
DBGPX( printk("Done with %p, queueing %p\n", creq, creq->next); );
creq->rq_status = RQ_INACTIVE;
- blk_dev[MAJOR_NR+ctlr].current_request = creq->next;
+ q->current_request = creq->next;
wake_up(&wait_for_request);
}
schedule_bh( (void *)(void *) redo_fd_request);
}
-static void do_fd_request(void)
+static void do_fd_request(request_queue_t * q)
{
if(usage_count == 0) {
printk("warning: usage count=0, CURRENT=%p exiting\n", CURRENT);
blk_size[MAJOR_NR] = floppy_sizes;
blksize_size[MAJOR_NR] = floppy_blocksizes;
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
config_types();
fdc = 0; /* reset fdc in case of unexpected interrupt */
if (floppy_grab_irq_and_dma()){
del_timer(&fd_timeout);
- blk_dev[MAJOR_NR].request_fn = NULL;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
unregister_blkdev(MAJOR_NR,"fd");
del_timer(&fd_timeout);
return -EBUSY;
schedule();
if (usage_count)
floppy_release_irq_and_dma();
- blk_dev[MAJOR_NR].request_fn = NULL;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
unregister_blkdev(MAJOR_NR,"fd");
}
return have_no_fdc;
unregister_blkdev(MAJOR_NR, "fd");
- blk_dev[MAJOR_NR].request_fn = 0;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
/* eject disk, if any */
dummy = fd_eject(0);
}
panic("unknown hd-command");
}
-static void do_hd_request (void)
+static void do_hd_request (request_queue_t * q)
{
disable_irq(HD_IRQ);
hd_request();
printk("hd: unable to get major %d for hard disk\n",MAJOR_NR);
return -1;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */
hd_gendisk.next = gendisk_head;
gendisk_head = &hd_gendisk;
rq->sector += nsect;
#endif
if ((rq->nr_sectors -= nsect) <= 0)
+ {
+ spin_unlock_irqrestore(&io_request_lock, flags);
break;
+ }
if ((rq->current_nr_sectors -= nsect) == 0) {
if ((rq->bh = rq->bh->b_reqnext) != NULL) {
rq->current_nr_sectors = rq->bh->b_size>>9;
static int hwif_init (ide_hwif_t *hwif)
{
- void (*rfn)(void);
+ ide_drive_t *drive;
+ void (*rfn)(request_queue_t *);
if (!hwif->present)
return 0;
init_gendisk(hwif);
blk_dev[hwif->major].data = hwif;
- blk_dev[hwif->major].request_fn = rfn;
blk_dev[hwif->major].queue = ide_get_queue;
read_ahead[hwif->major] = 8; /* (4kB) */
hwif->present = 1; /* success */
+ /*
+ * FIXME(eric) - This needs to be tested. I *think* that this
+ * is correct. Also, I believe that there is no longer any
+ * reason to have multiple functions (do_ide[0-7]_request)
+ * functions - the queuedata field could be used to indicate
+ * the correct hardware group - either this, or we could add
+ * a new field to request_queue_t to hold this information.
+ */
+ drive = &hwif->drives[0];
+ blk_init_queue(&drive->queue, rfn);
+
+ drive = &hwif->drives[1];
+ blk_init_queue(&drive->queue, rfn);
+
#if (DEBUG_SPINLOCK > 0)
{
static int done = 0;
if (!end_that_request_first(rq, uptodate, hwgroup->drive->name)) {
add_blkdev_randomness(MAJOR(rq->rq_dev));
- hwgroup->drive->queue = rq->next;
- blk_dev[MAJOR(rq->rq_dev)].current_request = NULL;
+ hwgroup->drive->queue.current_request = rq->next;
+ blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL;
hwgroup->rq = NULL;
end_that_request_last(rq);
}
}
}
spin_lock_irqsave(&io_request_lock, flags);
- drive->queue = rq->next;
- blk_dev[MAJOR(rq->rq_dev)].current_request = NULL;
+ drive->queue.current_request = rq->next;
+ blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL;
HWGROUP(drive)->rq = NULL;
rq->rq_status = RQ_INACTIVE;
spin_unlock_irqrestore(&io_request_lock, flags);
{
ide_startstop_t startstop;
unsigned long block, blockend;
- struct request *rq = drive->queue;
+ struct request *rq = drive->queue.current_request;
unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS;
ide_hwif_t *hwif = HWIF(drive);
best = NULL;
drive = hwgroup->drive;
do {
- if (drive->queue && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) {
+ if (drive->queue.current_request && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) {
if (!best
|| (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep)))
|| (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive))))
{
struct blk_dev_struct *bdev = &blk_dev[HWIF(drive)->major];
- if (bdev->current_request != &bdev->plug)
+ if( !bdev->request_queue.plugged )
best = drive;
}
}
drive = hwgroup->drive;
do {
bdev = &blk_dev[HWIF(drive)->major];
- if (bdev->current_request != &bdev->plug) /* FIXME: this will do for now */
- bdev->current_request = NULL; /* (broken since patch-2.1.15) */
+ if( !bdev->request_queue.plugged )
+ bdev->request_queue.current_request = NULL; /* (broken since patch-2.1.15) */
if (drive->sleep && (!sleep || 0 < (signed long)(sleep - drive->sleep)))
sleep = drive->sleep;
} while ((drive = drive->next) != hwgroup->drive);
drive->service_start = jiffies;
bdev = &blk_dev[hwif->major];
- if (bdev->current_request == &bdev->plug) /* FIXME: paranoia */
+ if( bdev->request_queue.plugged ) /* FIXME: paranoia */
printk("%s: Huh? nuking plugged queue\n", drive->name);
- bdev->current_request = hwgroup->rq = drive->queue;
+ bdev->request_queue.current_request = hwgroup->rq = drive->queue.current_request;
spin_unlock(&io_request_lock);
if (!hwif->serialized) /* play it safe with buggy hardware */
ide__sti();
/*
* ide_get_queue() returns the queue which corresponds to a given device.
*/
-struct request **ide_get_queue (kdev_t dev)
+request_queue_t *ide_get_queue (kdev_t dev)
{
ide_hwif_t *hwif = (ide_hwif_t *)blk_dev[MAJOR(dev)].data;
return &hwif->drives[DEVICE_NR(dev) & 1].queue;
}
-void do_ide0_request (void)
+void do_ide0_request (request_queue_t *q)
{
ide_do_request (ide_hwifs[0].hwgroup);
}
#if MAX_HWIFS > 1
-void do_ide1_request (void)
+void do_ide1_request (request_queue_t *q)
{
ide_do_request (ide_hwifs[1].hwgroup);
}
#endif /* MAX_HWIFS > 1 */
#if MAX_HWIFS > 2
-void do_ide2_request (void)
+void do_ide2_request (request_queue_t *q)
{
ide_do_request (ide_hwifs[2].hwgroup);
}
#endif /* MAX_HWIFS > 2 */
#if MAX_HWIFS > 3
-void do_ide3_request (void)
+void do_ide3_request (request_queue_t *q)
{
ide_do_request (ide_hwifs[3].hwgroup);
}
#endif /* MAX_HWIFS > 3 */
#if MAX_HWIFS > 4
-void do_ide4_request (void)
+void do_ide4_request (request_queue_t *q)
{
ide_do_request (ide_hwifs[4].hwgroup);
}
#endif /* MAX_HWIFS > 4 */
#if MAX_HWIFS > 5
-void do_ide5_request (void)
+void do_ide5_request (request_queue_t *q)
{
ide_do_request (ide_hwifs[5].hwgroup);
}
#endif /* MAX_HWIFS > 5 */
#if MAX_HWIFS > 6
-void do_ide6_request (void)
+void do_ide6_request (request_queue_t *q)
{
ide_do_request (ide_hwifs[6].hwgroup);
}
#endif /* MAX_HWIFS > 6 */
#if MAX_HWIFS > 7
-void do_ide7_request (void)
+void do_ide7_request (request_queue_t *q)
{
ide_do_request (ide_hwifs[7].hwgroup);
}
#endif /* MAX_HWIFS > 7 */
#if MAX_HWIFS > 8
-void do_ide8_request (void)
+void do_ide8_request (request_queue_t *q)
{
ide_do_request (ide_hwifs[8].hwgroup);
}
#endif /* MAX_HWIFS > 8 */
#if MAX_HWIFS > 9
-void do_ide9_request (void)
+void do_ide9_request (request_queue_t *q)
{
ide_do_request (ide_hwifs[9].hwgroup);
}
hwgroup->handler = NULL;
del_timer(&hwgroup->timer);
spin_unlock(&io_request_lock);
+
if (drive->unmask)
ide__sti(); /* local CPU only */
startstop = handler(drive); /* service this interrupt, may set handler for next interrupt */
spin_lock_irq(&io_request_lock);
+
/*
* Note that handler() may have set things up for another
* interrupt to occur soon, but it cannot happen until
if (action == ide_wait)
rq->sem = &sem;
spin_lock_irqsave(&io_request_lock, flags);
- cur_rq = drive->queue;
+ cur_rq = drive->queue.current_request;
if (cur_rq == NULL || action == ide_preempt) {
rq->next = cur_rq;
- drive->queue = rq;
+ drive->queue.current_request = rq;
if (action == ide_preempt)
hwgroup->rq = NULL;
} else {
kfree(blksize_size[hwif->major]);
kfree(max_sectors[hwif->major]);
kfree(max_readahead[hwif->major]);
- blk_dev[hwif->major].request_fn = NULL;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(hwif->major));
blk_dev[hwif->major].data = NULL;
blk_dev[hwif->major].queue = NULL;
blksize_size[hwif->major] = NULL;
* NOTE: the device-specific queue() functions
* have to be atomic!
*/
-static inline struct request **get_queue(kdev_t dev)
+static inline request_queue_t *get_queue(kdev_t dev)
{
int major = MAJOR(dev);
struct blk_dev_struct *bdev = blk_dev + major;
if (bdev->queue)
return bdev->queue(dev);
- return &blk_dev[major].current_request;
+ return &blk_dev[major].request_queue;
+}
+
+void blk_cleanup_queue(request_queue_t * q)
+{
+ memset(q, 0, sizeof(*q));
+}
+
+void blk_queue_headactive(request_queue_t * q, int active)
+{
+ q->head_active = active;
+}
+
+void blk_queue_pluggable(request_queue_t * q, int use_plug)
+{
+ q->use_plug = use_plug;
+}
+
+void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
+{
+ q->request_fn = rfn;
+ q->current_request = NULL;
+ q->merge_fn = NULL;
+ q->merge_requests_fn = NULL;
+ q->plug_tq.sync = 0;
+ q->plug_tq.routine = &unplug_device;
+ q->plug_tq.data = q;
+ q->plugged = 0;
+ /*
+ * These booleans describe the queue properties. We set the
+ * default (and most common) values here. Other drivers can
+ * use the appropriate functions to alter the queue properties.
+ * as appropriate.
+ */
+ q->use_plug = 1;
+ q->head_active = 1;
}
/*
*/
void unplug_device(void * data)
{
- struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
- int queue_new_request=0;
+ request_queue_t * q = (request_queue_t *) data;
unsigned long flags;
spin_lock_irqsave(&io_request_lock,flags);
- if (dev->current_request == &dev->plug) {
- struct request * next = dev->plug.next;
- dev->current_request = next;
- if (next || dev->queue) {
- dev->plug.next = NULL;
- queue_new_request = 1;
+ if( q->plugged )
+ {
+ q->plugged = 0;
+ if( q->current_request != NULL )
+ {
+ (q->request_fn)(q);
}
}
- if (queue_new_request)
- (dev->request_fn)();
-
spin_unlock_irqrestore(&io_request_lock,flags);
}
* This is called with interrupts off and no requests on the queue.
* (and with the request spinlock aquired)
*/
-static inline void plug_device(struct blk_dev_struct * dev)
+static inline void plug_device(request_queue_t * q)
{
- if (dev->current_request)
+ if (q->current_request)
return;
- dev->current_request = &dev->plug;
- queue_task(&dev->plug_tq, &tq_disk);
+
+ q->plugged = 1;
+ queue_task(&q->plug_tq, &tq_disk);
}
/*
prev_found = req;
req->rq_status = RQ_ACTIVE;
req->rq_dev = dev;
+ req->special = NULL;
return req;
}
* which is important for drive_stat_acct() above.
*/
-void add_request(struct blk_dev_struct * dev, struct request * req)
+static void add_request(request_queue_t * q, struct request * req)
{
int major = MAJOR(req->rq_dev);
- struct request * tmp, **current_request;
+ struct request * tmp;
unsigned long flags;
- int queue_new_request = 0;
drive_stat_acct(req, req->nr_sectors, 1);
req->next = NULL;
* We use the goto to reduce locking complexity
*/
spin_lock_irqsave(&io_request_lock,flags);
- current_request = get_queue(req->rq_dev);
- if (!(tmp = *current_request)) {
- *current_request = req;
- if (dev->current_request != &dev->plug)
- queue_new_request = 1;
+ if (!(tmp = q->current_request)) {
+ q->current_request = req;
goto out;
}
for ( ; tmp->next ; tmp = tmp->next) {
req->next = tmp->next;
tmp->next = req;
-/* for SCSI devices, call request_fn unconditionally */
- if (scsi_blk_major(major))
- queue_new_request = 1;
- if (major >= COMPAQ_SMART2_MAJOR+0 &&
- major <= COMPAQ_SMART2_MAJOR+7)
- queue_new_request = 1;
+ /*
+ * FIXME(eric) I don't understand why there is a need for this
+ * special case code. It clearly doesn't fit any more with
+ * the new queueing architecture, and it got added in 2.3.10.
+ * I am leaving this in here until I hear back from the COMPAQ
+ * people.
+ */
+ if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7)
+ {
+ (q->request_fn)(q);
+ }
+
if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7)
- queue_new_request = 1;
+ {
+ (q->request_fn)(q);
+ }
+
out:
- if (queue_new_request)
- (dev->request_fn)();
spin_unlock_irqrestore(&io_request_lock,flags);
}
/*
* Has to be called with the request spinlock aquired
*/
-static inline void attempt_merge (struct request *req,
- int max_sectors,
- int max_segments)
+static inline void attempt_merge (request_queue_t * q,
+ struct request *req,
+ int max_sectors,
+ int max_segments)
{
struct request *next = req->next;
int total_segments;
total_segments--;
if (total_segments > max_segments)
return;
+
+ if( q->merge_requests_fn != NULL )
+ {
+ /*
+ * If we are not allowed to merge these requests, then
+ * return. If we are allowed to merge, then the count
+ * will have been updated to the appropriate number,
+ * and we shouldn't do it here too.
+ */
+ if( !(q->merge_requests_fn)(q, req, next) )
+ {
+ return;
+ }
+ }
+ else
+ {
+ req->nr_segments = total_segments;
+ }
+
req->bhtail->b_reqnext = next->bh;
req->bhtail = next->bhtail;
req->nr_sectors += next->nr_sectors;
- req->nr_segments = total_segments;
next->rq_status = RQ_INACTIVE;
req->next = next->next;
wake_up (&wait_for_request);
}
-void make_request(int major,int rw, struct buffer_head * bh)
+static void __make_request(request_queue_t * q,
+ int major,
+ int rw,
+ struct buffer_head * bh)
{
unsigned int sector, count;
struct request * req;
* not to schedule or do something nonatomic
*/
spin_lock_irqsave(&io_request_lock,flags);
- req = *get_queue(bh->b_rdev);
+ req = q->current_request;
if (!req) {
/* MD and loop can't handle plugging without deadlocking */
if (major != MD_MAJOR && major != LOOP_MAJOR &&
- major != DDV_MAJOR && major != NBD_MAJOR)
- plug_device(blk_dev + major); /* is atomic */
+ major != DDV_MAJOR && major != NBD_MAJOR
+ && q->use_plug)
+ plug_device(q); /* is atomic */
} else switch (major) {
+ /*
+ * FIXME(eric) - this entire switch statement is going away
+ * soon, and we will instead key off of q->head_active to decide
+ * whether the top request in the queue is active on the device
+ * or not.
+ */
case IDE0_MAJOR: /* same as HD_MAJOR */
case IDE1_MAJOR:
case FLOPPY_MAJOR:
* All other drivers need to jump over the first entry, as that
* entry may be busy being processed and we thus can't change it.
*/
- if (req == blk_dev[major].current_request)
+ if (req == q->current_request)
req = req->next;
if (!req)
break;
continue;
/* Can we add it to the end of this request? */
if (req->sector + req->nr_sectors == sector) {
- if (req->bhtail->b_data + req->bhtail->b_size
- != bh->b_data) {
- if (req->nr_segments < max_segments)
- req->nr_segments++;
- else continue;
+ /*
+ * The merge_fn is a more advanced way
+ * of accomplishing the same task. Instead
+ * of applying a fixed limit of some sort
+ * we instead define a function which can
+ * determine whether or not it is safe to
+ * merge the request or not.
+ */
+ if( q->merge_fn == NULL )
+ {
+ if (req->bhtail->b_data + req->bhtail->b_size
+ != bh->b_data) {
+ if (req->nr_segments < max_segments)
+ req->nr_segments++;
+ else continue;
+ }
+ }
+ else
+ {
+ /*
+ * See if this queue has rules that
+ * may suggest that we shouldn't merge
+ * this
+ */
+ if( !(q->merge_fn)(q, req, bh) )
+ {
+ continue;
+ }
}
req->bhtail->b_reqnext = bh;
req->bhtail = bh;
req->nr_sectors += count;
drive_stat_acct(req, count, 0);
/* Can we now merge this req with the next? */
- attempt_merge(req, max_sectors, max_segments);
+ attempt_merge(q, req, max_sectors, max_segments);
/* or to the beginning? */
} else if (req->sector - count == sector) {
- if (bh->b_data + bh->b_size
- != req->bh->b_data) {
- if (req->nr_segments < max_segments)
- req->nr_segments++;
- else continue;
+ /*
+ * The merge_fn is a more advanced way
+ * of accomplishing the same task. Instead
+ * of applying a fixed limit of some sort
+ * we instead define a function which can
+ * determine whether or not it is safe to
+ * merge the request or not.
+ */
+ if( q->merge_fn == NULL )
+ {
+ if (bh->b_data + bh->b_size
+ != req->bh->b_data) {
+ if (req->nr_segments < max_segments)
+ req->nr_segments++;
+ else continue;
+ }
+ }
+ else
+ {
+ /*
+ * See if this queue has rules that
+ * may suggest that we shouldn't merge
+ * this
+ */
+ if( !(q->merge_fn)(q, req, bh) )
+ {
+ continue;
+ }
}
bh->b_reqnext = req->bh;
req->bh = bh;
req->errors = 0;
req->sector = sector;
req->nr_sectors = count;
- req->nr_segments = 1;
req->current_nr_sectors = count;
+ req->nr_segments = 1; /* Always 1 for a new request. */
req->buffer = bh->b_data;
req->sem = NULL;
req->bh = bh;
req->bhtail = bh;
req->next = NULL;
- add_request(major+blk_dev,req);
+ add_request(q, req);
return;
end_io:
bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
}
+void make_request(int major,int rw, struct buffer_head * bh)
+{
+ request_queue_t * q;
+ unsigned long flags;
+
+ q = get_queue(bh->b_dev);
+
+ __make_request(q, major, rw, bh);
+
+ spin_lock_irqsave(&io_request_lock,flags);
+ if( !q->plugged )
+ (q->request_fn)(q);
+ spin_unlock_irqrestore(&io_request_lock,flags);
+}
+
+
+
/* This function can be used to request a number of buffers from a block
device. Currently the only restriction is that all buffers must belong to
the same device */
{
unsigned int major;
int correct_size;
- struct blk_dev_struct * dev;
+ request_queue_t * q;
+ unsigned long flags;
int i;
- dev = NULL;
- if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
- dev = blk_dev + major;
- if (!dev || !dev->request_fn) {
+
+ major = MAJOR(bh[0]->b_dev);
+ if (!(q = get_queue(bh[0]->b_dev))) {
printk(KERN_ERR
"ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
continue;
}
#endif
- make_request(MAJOR(bh[i]->b_rdev), rw, bh[i]);
+ __make_request(q, MAJOR(bh[i]->b_rdev), rw, bh[i]);
+ }
+
+ spin_lock_irqsave(&io_request_lock,flags);
+ if( !q->plugged )
+ {
+ (q->request_fn)(q);
}
+ spin_unlock_irqrestore(&io_request_lock,flags);
return;
sorry:
struct blk_dev_struct *dev;
for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
- dev->request_fn = NULL;
dev->queue = NULL;
- dev->current_request = NULL;
- dev->plug.rq_status = RQ_INACTIVE;
- dev->plug.cmd = -1;
- dev->plug.next = NULL;
- dev->plug_tq.sync = 0;
- dev->plug_tq.routine = &unplug_device;
- dev->plug_tq.data = dev;
+ blk_init_queue(&dev->request_queue, NULL);
}
req = all_requests + NR_REQUEST;
EXPORT_SYMBOL(io_request_lock);
EXPORT_SYMBOL(end_that_request_first);
EXPORT_SYMBOL(end_that_request_last);
+EXPORT_SYMBOL(blk_init_queue);
+EXPORT_SYMBOL(blk_cleanup_queue);
+EXPORT_SYMBOL(blk_queue_headactive);
loop_sizes[lo->lo_number] = size;
}
-static void do_lo_request(void)
+static void do_lo_request(request_queue_t * q)
{
int real_block, block, offset, len, blksize, size;
char *dest_addr;
return -ENOMEM;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
for (i=0; i < max_loop; i++) {
memset(&loop_dev[i], 0, sizeof(struct loop_device));
loop_dev[i].lo_number = i;
}
}
-static void do_md_request (void)
+static void do_md_request (request_queue_t * q)
{
printk ("Got md request, not good...");
return;
return (-1);
}
- blk_dev[MD_MAJOR].request_fn=DEVICE_REQUEST;
- blk_dev[MD_MAJOR].current_request=NULL;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MD_MAJOR]=INT_MAX;
memset(md_dev, 0, MAX_MD_DEV * sizeof (struct md_dev));
md_gendisk.next=gendisk_head;
#undef FAIL
#define FAIL( s ) { printk( KERN_ERR "NBD, minor %d: " s "\n", dev ); goto error_out; }
-static void do_nbd_request(void)
+static void do_nbd_request(request_queue_t * q)
{
struct request *req;
int dev;
#endif
blksize_size[MAJOR_NR] = nbd_blksizes;
blk_size[MAJOR_NR] = nbd_sizes;
- blk_dev[MAJOR_NR].request_fn = do_nbd_request;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_nbd_request);
for (i = 0; i < MAX_NBD; i++) {
nbd_dev[i].refcnt = 0;
nbd_dev[i].file = NULL;
static int pcd_detect(void);
static void pcd_probe_capabilities(void);
static void do_pcd_read_drq(void);
-static void do_pcd_request(void);
+static void do_pcd_request(request_queue_t * q);
static void do_pcd_read(void);
static int pcd_blocksizes[PCD_UNITS];
for (unit=0;unit<PCD_UNITS;unit++)
if (PCD.present) register_cdrom(&PCD.info);
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
for (i=0;i<PCD_UNITS;i++) pcd_blocksizes[i] = 1024;
/* I/O request processing */
-static void do_pcd_request (void)
+static void do_pcd_request (request_queue_t * q)
{ int unit;
spin_lock_irqsave(&io_request_lock,saved_flags);
pcd_busy = 0;
end_request(0);
- do_pcd_request();
+ do_pcd_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(1);
pcd_busy = 0;
- do_pcd_request();
+ do_pcd_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
pcd_busy = 0;
pcd_bufblk = -1;
end_request(0);
- do_pcd_request();
+ do_pcd_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
do_pcd_read();
spin_lock_irqsave(&io_request_lock,saved_flags);
- do_pcd_request();
+ do_pcd_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
}
#endif
static void pd_geninit(struct gendisk *ignored);
static int pd_open(struct inode *inode, struct file *file);
-static void do_pd_request(void);
+static void do_pd_request(request_queue_t * q);
static int pd_ioctl(struct inode *inode,struct file *file,
unsigned int cmd, unsigned long arg);
static int pd_release (struct inode *inode, struct file *file);
name,major);
return -1;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
pd_gendisk.major = major;
return (!(RR(1,6) & STAT_BUSY)) ;
}
-static void do_pd_request (void)
+static void do_pd_request (request_queue_t * q)
{ struct buffer_head * bh;
struct request * req;
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(0);
pd_busy = 0;
- do_pd_request();
+ do_pd_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(0);
pd_busy = 0;
- do_pd_request();
+ do_pd_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(1);
pd_busy = 0;
- do_pd_request();
+ do_pd_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(0);
pd_busy = 0;
- do_pd_request();
+ do_pd_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(0);
pd_busy = 0;
- do_pd_request();
+ do_pd_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(0);
pd_busy = 0;
- do_pd_request();
+ do_pd_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(1);
pd_busy = 0;
- do_pd_request();
+ do_pd_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
}
void cleanup_module( void );
#endif
static int pf_open(struct inode *inode, struct file *file);
-static void do_pf_request(void);
+static void do_pf_request(request_queue_t * q);
static int pf_ioctl(struct inode *inode,struct file *file,
unsigned int cmd, unsigned long arg);
major);
return -1;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
for (i=0;i<PF_UNITS;i++) pf_blocksizes[i] = 1024;
return (((RR(1,6)&(STAT_BUSY|pf_mask)) == pf_mask));
}
-static void do_pf_request (void)
+static void do_pf_request (request_queue_t * q)
{ struct buffer_head * bh;
struct request * req;
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(0);
pf_busy = 0;
- do_pf_request();
+ do_pf_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(0);
pf_busy = 0;
- do_pf_request();
+ do_pf_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(1);
pf_busy = 0;
- do_pf_request();
+ do_pf_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(0);
pf_busy = 0;
- do_pf_request();
+ do_pf_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(0);
pf_busy = 0;
- do_pf_request();
+ do_pf_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(0);
pf_busy = 0;
- do_pf_request();
+ do_pf_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
return;
}
spin_lock_irqsave(&io_request_lock,saved_flags);
end_request(1);
pf_busy = 0;
- do_pf_request();
+ do_pf_request(NULL);
spin_unlock_irqrestore(&io_request_lock,saved_flags);
}
static void ps2esdi_geninit(struct gendisk *ignored);
-static void do_ps2esdi_request(void);
+static void do_ps2esdi_request(request_queue_t * q);
static void ps2esdi_readwrite(int cmd, u_char drive, u_int block, u_int count);
return -1;
}
/* set up some global information - indicating device specific info */
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
/* some minor housekeeping - setup the global gendisk structure */
}
/* strategy routine that handles most of the IO requests */
-static void do_ps2esdi_request(void)
+static void do_ps2esdi_request(request_queue_t * q)
{
u_int block, count;
/* since, this routine is called with interrupts cleared - they
printk("%s: DMA above 16MB not supported\n", DEVICE_NAME);
end_request(FAIL);
if (CURRENT)
- do_ps2esdi_request();
+ do_ps2esdi_request(q);
return;
} /* check for above 16Mb dmas */
if ((CURRENT_DEV < ps2esdi_drives) &&
printk("%s: Unknown command\n", DEVICE_NAME);
end_request(FAIL);
if (CURRENT)
- do_ps2esdi_request();
+ do_ps2esdi_request(q);
break;
} /* handle different commands */
}
CURRENT->sector, ps2esdi[MINOR(CURRENT->rq_dev)].nr_sects);
end_request(FAIL);
if (CURRENT)
- do_ps2esdi_request();
+ do_ps2esdi_request(q);
}
} /* main strategy routine */
if (ps2esdi_out_cmd_blk(cmd_blk)) {
printk("%s: Controller failed\n", DEVICE_NAME);
if ((++CURRENT->errors) < MAX_RETRIES)
- return do_ps2esdi_request();
+ return do_ps2esdi_request(NULL);
else {
end_request(FAIL);
if (CURRENT)
- do_ps2esdi_request();
+ do_ps2esdi_request(NULL);
}
}
/* check for failure to put out the command block */
outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
if ((++CURRENT->errors) < MAX_RETRIES)
- do_ps2esdi_request();
+ do_ps2esdi_request(NULL);
else {
end_request(FAIL);
if (CURRENT)
- do_ps2esdi_request();
+ do_ps2esdi_request(NULL);
}
break;
}
outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
if ((++CURRENT->errors) < MAX_RETRIES)
- do_ps2esdi_request();
+ do_ps2esdi_request(NULL);
else {
end_request(FAIL);
if (CURRENT)
- do_ps2esdi_request();
+ do_ps2esdi_request(NULL);
}
break;
outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
end_request(FAIL);
if (CURRENT)
- do_ps2esdi_request();
+ do_ps2esdi_request(NULL);
break;
case INT_CMD_FORMAT:
if (CURRENT->nr_sectors -= CURRENT->current_nr_sectors) {
CURRENT->buffer += CURRENT->current_nr_sectors * SECT_SIZE;
CURRENT->sector += CURRENT->current_nr_sectors;
- do_ps2esdi_request();
+ do_ps2esdi_request(NULL);
} else {
end_request(SUCCES);
if (CURRENT)
- do_ps2esdi_request();
+ do_ps2esdi_request(NULL);
}
}
* allocated size, we must get rid of it...
*
*/
-static void rd_request(void)
+static void rd_request(request_queue_t * q)
{
unsigned int minor;
unsigned long offset, len;
invalidate_buffers(MKDEV(MAJOR_NR, i));
unregister_blkdev( MAJOR_NR, "ramdisk" );
- blk_dev[MAJOR_NR].request_fn = 0;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
}
/* This is the registration and initialization section of the RAM disk driver */
return -EIO;
}
- blk_dev[MAJOR_NR].request_fn = &rd_request;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &rd_request);
for (i = 0; i < NUM_RAMDISKS; i++) {
/* rd_size is given in kB */
static void swim3_select(struct floppy_state *fs, int sel);
static void swim3_action(struct floppy_state *fs, int action);
static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(void);
+static void do_fd_request(request_queue_t * q);
static void start_request(struct floppy_state *fs);
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long));
return (stat & DATA) == 0;
}
-static void do_fd_request(void)
+static void do_fd_request(request_queue_t * q)
{
int i;
for(i=0;i<floppy_count;i++)
MAJOR_NR);
return -EBUSY;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_size[MAJOR_NR] = floppy_sizes;
}
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long));
static void fd_request_timeout(unsigned long);
-static void do_fd_request(void);
+static void do_fd_request(request_queue_t * q);
static void start_request(struct floppy_state *fs);
static struct file_operations floppy_fops = {
MAJOR_NR);
return -EBUSY;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_size[MAJOR_NR] = floppy_sizes;
restore_flags(flags);
}
-static void do_fd_request(void)
+static void do_fd_request(request_queue_t * q)
{
int i;
printk("xd: Unable to get major number %d\n",MAJOR_NR);
return -1;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
xd_gendisk.next = gendisk_head;
gendisk_head = &xd_gendisk;
}
/* do_xd_request: handle an incoming request */
-static void do_xd_request (void)
+static void do_xd_request (request_queue_t * q)
{
u_int block,count,retry;
int code;
struct gendisk ** gdp;
blksize_size[MAJOR_NR] = NULL;
- blk_dev[MAJOR_NR].request_fn = NULL;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
blk_size[MAJOR_NR] = NULL;
hardsect_size[MAJOR_NR] = NULL;
read_ahead[MAJOR_NR] = 0;
static void xd_geninit (struct gendisk *);
static int xd_open (struct inode *inode,struct file *file);
-static void do_xd_request (void);
+static void do_xd_request (request_queue_t * q);
static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg);
static int xd_release (struct inode *inode,struct file *file);
static int xd_reread_partitions (kdev_t dev);
static int current_device = -1;
static void
-do_z2_request( void )
+do_z2_request( request_queue_t * q )
{
u_long start, len, addr, size;
}
}
- blk_dev[ MAJOR_NR ].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
blksize_size[ MAJOR_NR ] = z2_blocksizes;
blk_size[ MAJOR_NR ] = z2_sizes;
static int check_aztcd_media_change(kdev_t full_dev);
static int aztcd_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg);
static void azt_transfer(void);
-static void do_aztcd_request(void);
+static void do_aztcd_request(request_queue_t *);
static void azt_invalidate_buffers(void);
int aztcd_open(struct inode *ip, struct file *fp);
}
}
-static void do_aztcd_request(void)
+static void do_aztcd_request(request_queue_t * q)
{
#ifdef AZT_TEST
printk(" do_aztcd_request(%ld+%ld) Time:%li\n", CURRENT -> sector, CURRENT -> nr_sectors,jiffies);
MAJOR_NR);
return -EIO;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
#ifndef AZT_KERNEL_PRIOR_2_1
blksize_size[MAJOR_NR] = aztcd_blocksizes;
#endif
* data access on a CD is done sequentially, this saves a lot of operations.
*/
static void
-do_cdu31a_request(void)
+do_cdu31a_request(request_queue_t * q)
{
int block;
int nblock;
is_a_cdu31a = strcmp("CD-ROM CDU31A", drive_config.product_id) == 0;
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = CDU31A_READAHEAD;
cdu31a_block_size = 1024; /* 1kB default block size */
/* use 'mount -o block=2048' */
static int cm206_base = CM206_BASE;
static int cm206_irq = CM206_IRQ;
+static int cm206[2] = {0,0}; /* for compatible `insmod' parameter passing */
+
MODULE_PARM(cm206_base, "i"); /* base */
MODULE_PARM(cm206_irq, "i"); /* irq */
MODULE_PARM(cm206, "1-2i"); /* base,irq or irq,base */
/* This is not a very smart implementation. We could optimize for
consecutive block numbers. I'm not convinced this would really
bring down the processor load. */
-static void do_cm206_request(void)
+static void do_cm206_request(request_queue_t * q)
{
long int i, cd_sec_no;
int quarter, error;
cleanup(3);
return -EIO;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
blksize_size[MAJOR_NR] = cm206_blocksizes;
read_ahead[MAJOR_NR] = 16; /* reads ahead what? */
init_bh(CM206_BH, cm206_bh);
#ifdef MODULE
-static int cm206[2] = {0,0}; /* for compatible `insmod' parameter passing */
void __init parse_options(void)
{
/* Schnittstellen zum Kern/FS */
-static void do_gscd_request (void);
+static void do_gscd_request (request_queue_t *);
+static void __do_gscd_request (void);
static int gscd_ioctl (struct inode *, struct file *, unsigned int, unsigned long);
static int gscd_open (struct inode *, struct file *);
static int gscd_release (struct inode *, struct file *);
* I/O request routine called from Linux kernel.
*/
-static void do_gscd_request (void)
+static void do_gscd_request (request_queue_t * q)
+{
+ __do_gscd_request();
+}
+
+static void __do_gscd_request (void)
{
unsigned int block,dev;
unsigned int nsect;
end_request(1);
}
}
- SET_TIMER(do_gscd_request, 1);
+ SET_TIMER(__do_gscd_request, 1);
}
return -EIO;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
blksize_size[MAJOR_NR] = gscd_blocksizes;
read_ahead[MAJOR_NR] = 4;
static void
-do_mcd_request(void)
+do_mcd_request(request_queue_t * q)
{
#ifdef TEST2
printk(" do_mcd_request(%ld+%ld)\n", CURRENT -> sector, CURRENT -> nr_sectors);
}
blksize_size[MAJOR_NR] = mcd_blocksizes;
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = 4;
/* check for card */
/* declared in blk.h */
int mcdx_init(void);
-void do_mcdx_request(void);
+void do_mcdx_request(request_queue_t * q);
/* already declared in init/main */
void mcdx_setup(char *, int *);
}
}
-void do_mcdx_request()
+void do_mcdx_request(request_queue_t * q)
{
int dev;
struct s_drive_stuff *stuffp;
return 1;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = READ_AHEAD;
blksize_size[MAJOR_NR] = mcdx_blocksizes;
}
-static void do_optcd_request(void)
+static void do_optcd_request(request_queue_t * q)
{
DEBUG((DEBUG_REQUEST, "do_optcd_request(%ld+%ld)",
CURRENT -> sector, CURRENT -> nr_sectors));
hardsect_size[MAJOR_NR] = &hsecsize;
blksize_size[MAJOR_NR] = &blksize;
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = 4;
request_region(optcd_port, 4, "optcd");
/*
* I/O request routine, called from Linux kernel.
*/
-static void DO_SBPCD_REQUEST(void)
+static void DO_SBPCD_REQUEST(request_queue_t * q)
{
u_int block;
u_int nsect;
goto init_done;
#endif MODULE
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[MAJOR_NR] = buffers * (CD_FRAMESIZE / 512);
request_region(CDo_command,4,major_name);
SJCD_SET_TIMER( sjcd_poll, 1 );
}
-static void do_sjcd_request( void ){
+static void do_sjcd_request( request_queue_t * q ){
#if defined( SJCD_TRACE )
printk( "SJCD: do_sjcd_request(%ld+%ld)\n",
CURRENT->sector, CURRENT->nr_sectors );
return( -EIO );
}
- blk_dev[ MAJOR_NR ].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
read_ahead[ MAJOR_NR ] = 4;
if( check_region( sjcd_base, 4 ) ){
* data access on a CD is done sequentially, this saves a lot of operations.
*/
static void
-do_cdu535_request(void)
+do_cdu535_request(request_queue_t * q)
{
unsigned int dev;
unsigned int read_size;
MAJOR_NR, CDU535_MESSAGE_NAME);
return -EIO;
}
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
blksize_size[MAJOR_NR] = &sonycd535_block_size;
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */
fi
endmenu
-if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
- bool 'Direct Rendering Manager (XFree86 DRI support) (EXPERIMENTAL)' CONFIG_DRM
- dep_tristate ' 3dfx Banshee/Voodoo3' CONFIG_DRM_TDFX $CONFIG_DRM
- if [ "$CONFIG_DRM" = "y" ]; then
- dep_tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA m
- fi
+bool 'Direct Rendering Manager (XFree86 DRI support) (EXPERIMENTAL)' CONFIG_DRM
+dep_tristate ' 3dfx Banshee/Voodoo3' CONFIG_DRM_TDFX $CONFIG_DRM
+if [ "$CONFIG_DRM" = "y" ]; then
+ dep_tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA m
fi
if [ "$CONFIG_PCMCIA" != "n" ]; then
fi
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
- dep_tristate '/dev/agpgart (AGP Support) (EXPERIMENTAL)' CONFIG_AGP m
- if [ "$CONFIG_AGP" = "m" ]; then
+ tristate '/dev/agpgart (AGP Support) (EXPERIMENTAL)' CONFIG_AGP
+ if [ "$CONFIG_AGP" != "n" ]; then
bool ' Intel 440LX/BX/GX support' CONFIG_AGP_INTEL
bool ' Intel I810/I810 DC100/I810e support' CONFIG_AGP_I810
bool ' VIA VP3/MVP3/Apollo Pro support' CONFIG_AGP_VIA
endif
endif
-ifeq ($(CONFIG_AGP), m)
+ifeq ($(CONFIG_AGP), y)
+ SUB_DIRS += agp
ALL_SUB_DIRS += agp
MOD_SUB_DIRS += agp
+else
+ ifeq ($(CONFIG_AGP), m)
+ ALL_SUB_DIRS += agp
+ MOD_SUB_DIRS += agp
+ endif
endif
include $(TOPDIR)/Rules.make
# space ioctl interface to use agp memory. It also adds a kernel interface
# that other drivers could use to manipulate agp memory.
-M_OBJS := agpgart.o
+O_TARGET := agp.o
-CFLAGS_agp_backend.o :=
-
-ifdef CONFIG_AGP_I810
-CFLAGS_agp_backend.o += -DAGP_BUILD_INTEL_I810
-endif
-ifdef CONFIG_AGP_INTEL
-CFLAGS_agp_backend.o += -DAGP_BUILD_INTEL_GENERIC
-endif
-ifdef CONFIG_AGP_VIA
-CFLAGS_agp_backend.o += -DAGP_BUILD_VIA_GENERIC
-endif
-ifdef CONFIG_AGP_AMD
-CFLAGS_agp_backend.o += -DAGP_BUILD_AMD_IRONGATE
-endif
-ifdef CONFIG_AGP_SIS
-CFLAGS_agp_backend.o += -DAGP_BUILD_SIS_GENERIC
-endif
-ifdef CONFIG_AGP_ALI
-CFLAGS_agp_backend.o += -DAGP_BUILD_ALI_M1541
+ifeq ($(CONFIG_AGP),y)
+ O_OBJS += agpgart.o
+else
+ ifeq ($(CONFIG_AGP), m)
+ M_OBJS += agpgart.o
+ endif
endif
include $(TOPDIR)/Rules.make
-agpgart.o: agp_backend.o agpgart_fe.o
- $(LD) $(LD_RFLAG) -r -o $@ agp_backend.o agpgart_fe.o
+agpgart.o: agpgart_be.o agpgart_fe.o
+ $(LD) $(LD_RFLAG) -r -o $@ agpgart_be.o agpgart_fe.o
--- /dev/null
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AGP_BACKEND_PRIV_H
+#define _AGP_BACKEND_PRIV_H 1
+
+enum aper_size_type {
+ U8_APER_SIZE,
+ U16_APER_SIZE,
+ U32_APER_SIZE,
+ FIXED_APER_SIZE
+};
+
+typedef struct _gatt_mask {
+ unsigned long mask;
+ u32 type;
+ /* totally device specific, for integrated chipsets that
+ * might have different types of memory masks. For other
+ * devices this will probably be ignored */
+} gatt_mask;
+
+typedef struct _aper_size_info_8 {
+ int size;
+ int num_entries;
+ int page_order;
+ u8 size_value;
+} aper_size_info_8;
+
+typedef struct _aper_size_info_16 {
+ int size;
+ int num_entries;
+ int page_order;
+ u16 size_value;
+} aper_size_info_16;
+
+typedef struct _aper_size_info_32 {
+ int size;
+ int num_entries;
+ int page_order;
+ u32 size_value;
+} aper_size_info_32;
+
+typedef struct _aper_size_info_fixed {
+ int size;
+ int num_entries;
+ int page_order;
+} aper_size_info_fixed;
+
+struct agp_bridge_data {
+ agp_version *version;
+ void *aperture_sizes;
+ void *previous_size;
+ void *current_size;
+ void *dev_private_data;
+ struct pci_dev *dev;
+ gatt_mask *masks;
+ unsigned long *gatt_table;
+ unsigned long *gatt_table_real;
+ unsigned long scratch_page;
+ unsigned long gart_bus_addr;
+ unsigned long gatt_bus_addr;
+ u32 mode;
+ enum chipset_type type;
+ enum aper_size_type size_type;
+ u32 *key_list;
+ atomic_t current_memory_agp;
+ atomic_t agp_in_use;
+ int max_memory_agp; /* in number of pages */
+ int needs_scratch_page;
+ int aperture_size_idx;
+ int num_aperture_sizes;
+ int num_of_masks;
+ int capndx;
+
+ /* Links to driver specific functions */
+
+ int (*fetch_size) (void);
+ int (*configure) (void);
+ void (*agp_enable) (u32);
+ void (*cleanup) (void);
+ void (*tlb_flush) (agp_memory *);
+ unsigned long (*mask_memory) (unsigned long, int);
+ void (*cache_flush) (void);
+ int (*create_gatt_table) (void);
+ int (*free_gatt_table) (void);
+ int (*insert_memory) (agp_memory *, off_t, int);
+ int (*remove_memory) (agp_memory *, off_t, int);
+ agp_memory *(*alloc_by_type) (size_t, int);
+ void (*free_by_type) (agp_memory *);
+
+ /* Links to vendor/device specific setup functions */
+#ifdef CONFIG_AGP_INTEL
+ void (*intel_generic_setup) (void);
+#endif
+#ifdef CONFIG_AGP_I810
+ void (*intel_i810_setup) (struct pci_dev *);
+#endif
+#ifdef CONFIG_AGP_VIA
+ void (*via_generic_setup) (void);
+#endif
+#ifdef CONFIG_AGP_SIS
+ void (*sis_generic_setup) (void);
+#endif
+#ifdef CONFIG_AGP_AMD
+ void (*amd_irongate_setup) (void);
+#endif
+#ifdef CONFIG_AGP_ALI
+ void (*ali_generic_setup) (void);
+#endif
+};
+
+#define OUTREG32(mmap, addr, val) *(volatile u32 *)(mmap + (addr)) = (val)
+#define OUTREG16(mmap, addr, val) *(volatile u16 *)(mmap + (addr)) = (val)
+#define OUTREG8 (mmap, addr, val) *(volatile u8 *) (mmap + (addr)) = (val)
+
+#define INREG32(mmap, addr) *(volatile u32 *)(mmap + (addr))
+#define INREG16(mmap, addr) *(volatile u16 *)(mmap + (addr))
+#define INREG8 (mmap, addr) *(volatile u8 *) (mmap + (addr))
+
+#define CACHE_FLUSH agp_bridge.cache_flush
+#define A_SIZE_8(x) ((aper_size_info_8 *) x)
+#define A_SIZE_16(x) ((aper_size_info_16 *) x)
+#define A_SIZE_32(x) ((aper_size_info_32 *) x)
+#define A_SIZE_FIX(x) ((aper_size_info_fixed *) x)
+#define A_IDX8() (A_SIZE_8(agp_bridge.aperture_sizes) + i)
+#define A_IDX16() (A_SIZE_16(agp_bridge.aperture_sizes) + i)
+#define A_IDX32() (A_SIZE_32(agp_bridge.aperture_sizes) + i)
+#define A_IDXFIX() (A_SIZE_FIX(agp_bridge.aperture_sizes) + i)
+#define MAXKEY (4096 * 32)
+
+#ifndef min
+#define min(a,b) (((a)<(b))?(a):(b))
+#endif
+
+#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page)
+
+#ifndef PCI_DEVICE_ID_VIA_82C691_0
+#define PCI_DEVICE_ID_VIA_82C691_0 0x0691
+#endif
+#ifndef PCI_DEVICE_ID_VIA_82C691_1
+#define PCI_DEVICE_ID_VIA_82C691_1 0x8691
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_0
+#define PCI_DEVICE_ID_INTEL_810_0 0x7120
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_DC100_0
+#define PCI_DEVICE_ID_INTEL_810_DC100_0 0x7122
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_E_0
+#define PCI_DEVICE_ID_INTEL_810_E_0 0x7124
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82443GX_0
+#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_1
+#define PCI_DEVICE_ID_INTEL_810_1 0x7121
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_DC100_1
+#define PCI_DEVICE_ID_INTEL_810_DC100_1 0x7123
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_810_E_1
+#define PCI_DEVICE_ID_INTEL_810_E_1 0x7125
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82443GX_1
+#define PCI_DEVICE_ID_INTEL_82443GX_1 0x71a1
+#endif
+#ifndef PCI_DEVICE_ID_AMD_IRONGATE_0
+#define PCI_DEVICE_ID_AMD_IRONGATE_0 0x7006
+#endif
+#ifndef PCI_VENDOR_ID_AL
+#define PCI_VENDOR_ID_AL 0x10b9
+#endif
+#ifndef PCI_DEVICE_ID_AL_M1541_0
+#define PCI_DEVICE_ID_AL_M1541_0 0x1541
+#endif
+
+/* intel register */
+#define INTEL_APBASE 0x10
+#define INTEL_APSIZE 0xb4
+#define INTEL_ATTBASE 0xb8
+#define INTEL_AGPCTRL 0xb0
+#define INTEL_NBXCFG 0x50
+#define INTEL_ERRSTS 0x91
+
+/* intel i810 registers */
+#define I810_GMADDR 0x10
+#define I810_MMADDR 0x14
+#define I810_PTE_BASE 0x10000
+#define I810_PTE_MAIN_UNCACHED 0x00000000
+#define I810_PTE_LOCAL 0x00000002
+#define I810_PTE_VALID 0x00000001
+#define I810_SMRAM_MISCC 0x70
+#define I810_GFX_MEM_WIN_SIZE 0x00010000
+#define I810_GFX_MEM_WIN_32M 0x00010000
+#define I810_GMS 0x000000c0
+#define I810_GMS_DISABLE 0x00000000
+#define I810_PGETBL_CTL 0x2020
+#define I810_PGETBL_ENABLED 0x00000001
+#define I810_DRAM_CTL 0x3000
+#define I810_DRAM_ROW_0 0x00000001
+#define I810_DRAM_ROW_0_SDRAM 0x00000001
+
+/* VIA register */
+#define VIA_APBASE 0x10
+#define VIA_GARTCTRL 0x80
+#define VIA_APSIZE 0x84
+#define VIA_ATTBASE 0x88
+
+/* SiS registers */
+#define SIS_APBASE 0x10
+#define SIS_ATTBASE 0x90
+#define SIS_APSIZE 0x94
+#define SIS_TLBCNTRL 0x97
+#define SIS_TLBFLUSH 0x98
+
+/* AMD registers */
+#define AMD_APBASE 0x10
+#define AMD_MMBASE 0x14
+#define AMD_APSIZE 0xac
+#define AMD_MODECNTL 0xb0
+#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
+#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
+#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
+#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
+
+/* ALi registers */
+#define ALI_APBASE 0x10
+#define ALI_AGPCTRL 0xb8
+#define ALI_ATTBASE 0xbc
+#define ALI_TLBCTRL 0xc0
+
+#endif /* _AGP_BACKEND_PRIV_H */
+++ /dev/null
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight
- * Copyright (C) 1999 Xi Graphics
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#define EXPORT_SYMTAB
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/malloc.h>
-#include <linux/vmalloc.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/pagemap.h>
-#include <linux/miscdevice.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/page.h>
-
-#include <linux/agp_backend.h>
-#include "agp_backendP.h"
-
-static struct agp_bridge_data agp_bridge;
-
-#define CACHE_FLUSH agp_bridge.cache_flush
-
-MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
-MODULE_PARM(agp_try_unsupported, "1i");
-EXPORT_SYMBOL(agp_free_memory);
-EXPORT_SYMBOL(agp_allocate_memory);
-EXPORT_SYMBOL(agp_copy_info);
-EXPORT_SYMBOL(agp_bind_memory);
-EXPORT_SYMBOL(agp_unbind_memory);
-EXPORT_SYMBOL(agp_enable);
-EXPORT_SYMBOL(agp_backend_acquire);
-EXPORT_SYMBOL(agp_backend_release);
-
-static int agp_try_unsupported __initdata = 0;
-
-#ifdef __SMP__
-static atomic_t cpus_waiting;
-#endif
-
-int agp_backend_acquire(void)
-{
- atomic_inc(&(agp_bridge.agp_in_use));
-
- if (atomic_read(&(agp_bridge.agp_in_use)) != 1) {
- atomic_dec(&(agp_bridge.agp_in_use));
- return -EBUSY;
- }
- MOD_INC_USE_COUNT;
- return 0;
-}
-
-void agp_backend_release(void)
-{
- atomic_dec(&(agp_bridge.agp_in_use));
- MOD_DEC_USE_COUNT;
-}
-
-static void flush_cache(void)
-{
- asm volatile ("wbinvd":::"memory");
-}
-
-#ifdef __SMP__
-static void ipi_handler(void *null)
-{
- flush_cache();
- atomic_dec(&cpus_waiting);
- while (atomic_read(&cpus_waiting) > 0)
- barrier();
-}
-
-static void smp_flush_cache(void)
-{
- atomic_set(&cpus_waiting, smp_num_cpus - 1);
- if (smp_call_function(ipi_handler, NULL, 1, 0) != 0)
- panic("agpgart: timed out waiting for the other CPUs!\n");
- flush_cache();
- while (atomic_read(&cpus_waiting) > 0)
- barrier();
-}
-#endif
-
-/*
- * Basic Page Allocation Routines -
- * These routines handle page allocation
- * and by default they reserve the allocated
- * memory. They also handle incrementing the
- * current_memory_agp value, Which is checked
- * against a maximum value.
- */
-
-static void *agp_alloc_page(void)
-{
- void *pt;
-
- pt = (void *) __get_free_page(GFP_KERNEL);
- if (pt == NULL) {
- return NULL;
- }
- atomic_inc(&(mem_map[MAP_NR(pt)].count));
- set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
- atomic_inc(&(agp_bridge.current_memory_agp));
- return pt;
-}
-
-static void agp_destroy_page(void *pt)
-{
- if (pt == NULL)
- return;
-
- atomic_dec(&(mem_map[MAP_NR(pt)].count));
- clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
- wake_up(&mem_map[MAP_NR(pt)].wait);
- free_page((unsigned long) pt);
- atomic_dec(&(agp_bridge.current_memory_agp));
-}
-
-/* End Basic Page Allocation Routines */
-
-/*
- * Generic routines for handling agp_memory structures -
- * They use the basic page allocation routines to do the
- * brunt of the work.
- */
-
-#define MAXKEY (4096 * 32)
-
-static void agp_free_key(int key)
-{
-
- if (key < 0) {
- return;
- }
- if (key < MAXKEY) {
- clear_bit(key, agp_bridge.key_list);
- }
-}
-
-static int agp_get_key(void)
-{
- int bit;
-
- bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);
- if (bit < MAXKEY) {
- set_bit(bit, agp_bridge.key_list);
- return bit;
- }
- return -1;
-}
-
-static agp_memory *agp_create_memory(int scratch_pages)
-{
- agp_memory *new;
-
- new = kmalloc(sizeof(agp_memory), GFP_KERNEL);
-
- if (new == NULL) {
- return NULL;
- }
- memset(new, 0, sizeof(agp_memory));
- new->key = agp_get_key();
-
- if (new->key < 0) {
- kfree(new);
- return NULL;
- }
- new->memory = vmalloc(PAGE_SIZE * scratch_pages);
-
- if (new->memory == NULL) {
- agp_free_key(new->key);
- kfree(new);
- return NULL;
- }
- new->num_scratch_pages = scratch_pages;
- return new;
-}
-
-void agp_free_memory(agp_memory * curr)
-{
- int i;
-
- if (curr == NULL) {
- return;
- }
- if (curr->is_bound == TRUE) {
- agp_unbind_memory(curr);
- }
- if (curr->type != 0) {
- agp_bridge.free_by_type(curr);
- MOD_DEC_USE_COUNT;
- return;
- }
- if (curr->page_count != 0) {
- for (i = 0; i < curr->page_count; i++) {
- curr->memory[i] &= ~(0x00000fff);
- agp_destroy_page((void *) phys_to_virt(curr->memory[i]));
- }
- }
- agp_free_key(curr->key);
- vfree(curr->memory);
- kfree(curr);
- MOD_DEC_USE_COUNT;
-}
-
-#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
-
-agp_memory *agp_allocate_memory(size_t page_count, u32 type)
-{
- int scratch_pages;
- agp_memory *new;
- int i;
-
- if ((atomic_read(&(agp_bridge.current_memory_agp)) + page_count) >
- agp_bridge.max_memory_agp) {
- return NULL;
- }
- if (type != 0) {
- new = agp_bridge.alloc_by_type(page_count, type);
- return new;
- }
- scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
-
- new = agp_create_memory(scratch_pages);
-
- if (new == NULL) {
- return NULL;
- }
- for (i = 0; i < page_count; i++) {
- new->memory[i] = (unsigned long) agp_alloc_page();
-
- if ((void *) new->memory[i] == NULL) {
- /* Free this structure */
- agp_free_memory(new);
- return NULL;
- }
- new->memory[i] =
- agp_bridge.mask_memory(virt_to_phys((void *) new->memory[i]), type);
- new->page_count++;
- }
-
- MOD_INC_USE_COUNT;
- return new;
-}
-
-/* End - Generic routines for handling agp_memory structures */
-
-static int agp_return_size(void)
-{
- int current_size;
- void *temp;
-
- temp = agp_bridge.current_size;
-
- switch (agp_bridge.size_type) {
- case U8_APER_SIZE:
- current_size = ((aper_size_info_8 *) temp)->size;
- break;
- case U16_APER_SIZE:
- current_size = ((aper_size_info_16 *) temp)->size;
- break;
- case U32_APER_SIZE:
- current_size = ((aper_size_info_32 *) temp)->size;
- break;
- case FIXED_APER_SIZE:
- current_size = ((aper_size_info_fixed *) temp)->size;
- break;
- default:
- current_size = 0;
- break;
- }
-
- return current_size;
-}
-
-/* Routine to copy over information structure */
-
-void agp_copy_info(agp_kern_info * info)
-{
- memset(info, 0, sizeof(agp_kern_info));
- info->version.major = agp_bridge.version->major;
- info->version.minor = agp_bridge.version->minor;
- info->device = agp_bridge.dev;
- info->chipset = agp_bridge.type;
- info->mode = agp_bridge.mode;
- info->aper_base = agp_bridge.gart_bus_addr;
- info->aper_size = agp_return_size();
- info->max_memory = agp_bridge.max_memory_agp;
- info->current_memory = atomic_read(&agp_bridge.current_memory_agp);
-}
-
-/* End - Routine to copy over information structure */
-
-/*
- * Routines for handling swapping of agp_memory into the GATT -
- * These routines take agp_memory and insert them into the GATT.
- * They call device specific routines to actually write to the GATT.
- */
-
-int agp_bind_memory(agp_memory * curr, off_t pg_start)
-{
- int ret_val;
-
- if ((curr == NULL) || (curr->is_bound == TRUE)) {
- return -EINVAL;
- }
- if (curr->is_flushed == FALSE) {
- CACHE_FLUSH();
- curr->is_flushed = TRUE;
- }
- ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);
-
- if (ret_val != 0) {
- return ret_val;
- }
- curr->is_bound = TRUE;
- curr->pg_start = pg_start;
- return 0;
-}
-
-int agp_unbind_memory(agp_memory * curr)
-{
- int ret_val;
-
- if (curr == NULL) {
- return -EINVAL;
- }
- if (curr->is_bound != TRUE) {
- return -EINVAL;
- }
- ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);
-
- if (ret_val != 0) {
- return ret_val;
- }
- curr->is_bound = FALSE;
- curr->pg_start = 0;
- return 0;
-}
-
-/* End - Routines for handling swapping of agp_memory into the GATT */
-
-/*
- * Driver routines - start
- * Currently this module supports the
- * i810, 440lx, 440bx, 440gx, via vp3, via mvp3,
- * amd irongate, ALi M1541 and generic support for the
- * SiS chipsets.
- */
-
-/* Generic Agp routines - Start */
-
-static void agp_generic_agp_enable(u32 mode)
-{
- struct pci_dev *device = NULL;
- u32 command, scratch, cap_id;
- u8 cap_ptr;
-
- pci_read_config_dword(agp_bridge.dev,
- agp_bridge.capndx + 4,
- &command);
-
- /*
- * PASS1: go throu all devices that claim to be
- * AGP devices and collect their data.
- */
-
- while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, device)) != NULL) {
- pci_read_config_dword(device, 0x04, &scratch);
-
- if (!(scratch & 0x00100000))
- continue;
-
- pci_read_config_byte(device, 0x34, &cap_ptr);
-
- if (cap_ptr != 0x00) {
- do {
- pci_read_config_dword(device, cap_ptr, &cap_id);
-
- if ((cap_id & 0xff) != 0x02)
- cap_ptr = (cap_id >> 8) & 0xff;
- }
- while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
- }
- if (cap_ptr != 0x00) {
- /*
- * Ok, here we have a AGP device. Disable impossible settings,
- * and adjust the readqueue to the minimum.
- */
-
- pci_read_config_dword(device, cap_ptr + 4, &scratch);
-
- /* adjust RQ depth */
- command =
- ((command & ~0xff000000) |
- min((mode & 0xff000000), min((command & 0xff000000), (scratch & 0xff000000))));
-
- /* disable SBA if it's not supported */
- if (!((command & 0x00000200) && (scratch & 0x00000200) && (mode & 0x00000200)))
- command &= ~0x00000200;
-
- /* disable FW if it's not supported */
- if (!((command & 0x00000010) && (scratch & 0x00000010) && (mode & 0x00000010)))
- command &= ~0x00000010;
-
- if (!((command & 4) && (scratch & 4) && (mode & 4)))
- command &= ~0x00000004;
-
- if (!((command & 2) && (scratch & 2) && (mode & 2)))
- command &= ~0x00000002;
-
- if (!((command & 1) && (scratch & 1) && (mode & 1)))
- command &= ~0x00000001;
- }
- }
- /*
- * PASS2: Figure out the 4X/2X/1X setting and enable the
- * target (our motherboard chipset).
- */
-
- if (command & 4) {
- command &= ~3; /* 4X */
- }
- if (command & 2) {
- command &= ~5; /* 2X */
- }
- if (command & 1) {
- command &= ~6; /* 1X */
- }
- command |= 0x00000100;
-
- pci_write_config_dword(agp_bridge.dev,
- agp_bridge.capndx + 8,
- command);
-
- /*
- * PASS3: Go throu all AGP devices and update the
- * command registers.
- */
-
- while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, device)) != NULL) {
- pci_read_config_dword(device, 0x04, &scratch);
-
- if (!(scratch & 0x00100000))
- continue;
-
- pci_read_config_byte(device, 0x34, &cap_ptr);
-
- if (cap_ptr != 0x00) {
- do {
- pci_read_config_dword(device, cap_ptr, &cap_id);
-
- if ((cap_id & 0xff) != 0x02)
- cap_ptr = (cap_id >> 8) & 0xff;
- }
- while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
- }
- if (cap_ptr != 0x00)
- pci_write_config_dword(device, cap_ptr + 8, command);
- }
-}
-
-static int agp_generic_create_gatt_table(void)
-{
- char *table;
- char *table_end;
- int size;
- int page_order;
- int num_entries;
- int i;
- void *temp;
-
- table = NULL;
- i = agp_bridge.aperture_size_idx;
- temp = agp_bridge.current_size;
- size = page_order = num_entries = 0;
-
- if (agp_bridge.size_type != FIXED_APER_SIZE) {
- do {
- switch (agp_bridge.size_type) {
- case U8_APER_SIZE:
- size = ((aper_size_info_8 *) temp)->size;
- page_order = ((aper_size_info_8 *) temp)->page_order;
- num_entries = ((aper_size_info_8 *) temp)->num_entries;
- break;
- case U16_APER_SIZE:
- size = ((aper_size_info_16 *) temp)->size;
- page_order = ((aper_size_info_16 *) temp)->page_order;
- num_entries = ((aper_size_info_16 *) temp)->num_entries;
- break;
- case U32_APER_SIZE:
- size = ((aper_size_info_32 *) temp)->size;
- page_order = ((aper_size_info_32 *) temp)->page_order;
- num_entries = ((aper_size_info_32 *) temp)->num_entries;
- break;
- /* This case will never really happen */
- case FIXED_APER_SIZE:
- default:
- size = page_order = num_entries = 0;
- break;
- }
-
- table = (char *) __get_free_pages(GFP_KERNEL, page_order);
-
- if (table == NULL) {
- i++;
-
- switch (agp_bridge.size_type) {
- case U8_APER_SIZE:
- agp_bridge.current_size = (((aper_size_info_8 *) agp_bridge.aperture_sizes) + i);
- break;
- case U16_APER_SIZE:
- agp_bridge.current_size = (((aper_size_info_16 *) agp_bridge.aperture_sizes) + i);
- break;
- case U32_APER_SIZE:
- agp_bridge.current_size = (((aper_size_info_32 *) agp_bridge.aperture_sizes) + i);
- break;
- /* This case will never really happen */
- case FIXED_APER_SIZE:
- default:
- size = page_order = num_entries = 0;
- break;
- }
- } else {
- agp_bridge.aperture_size_idx = i;
- }
- } while ((table == NULL) && (i < agp_bridge.num_aperture_sizes));
- } else {
- size = ((aper_size_info_fixed *) temp)->size;
- page_order = ((aper_size_info_fixed *) temp)->page_order;
- num_entries = ((aper_size_info_fixed *) temp)->num_entries;
- table = (char *) __get_free_pages(GFP_KERNEL, page_order);
- }
-
- if (table == NULL) {
- return -ENOMEM;
- }
- table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
-
- for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
- set_bit(PG_reserved, &mem_map[i].flags);
- }
-
- agp_bridge.gatt_table_real = (unsigned long *) table;
- CACHE_FLUSH();
- agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
- (PAGE_SIZE * (1 << page_order)));
- CACHE_FLUSH();
-
- if (agp_bridge.gatt_table == NULL) {
- for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
- clear_bit(PG_reserved, &mem_map[i].flags);
- }
-
- free_pages((unsigned long) table, page_order);
-
- return -ENOMEM;
- }
- agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
-
- for (i = 0; i < num_entries; i++) {
- agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page;
- }
-
- return 0;
-}
-
-static int agp_generic_free_gatt_table(void)
-{
- int i;
- int page_order;
- char *table, *table_end;
- void *temp;
-
- temp = agp_bridge.current_size;
-
- switch (agp_bridge.size_type) {
- case U8_APER_SIZE:
- page_order = ((aper_size_info_8 *) temp)->page_order;
- break;
- case U16_APER_SIZE:
- page_order = ((aper_size_info_16 *) temp)->page_order;
- break;
- case U32_APER_SIZE:
- page_order = ((aper_size_info_32 *) temp)->page_order;
- break;
- case FIXED_APER_SIZE:
- page_order = ((aper_size_info_fixed *) temp)->page_order;
- break;
- default:
- page_order = 0;
- break;
- }
-
- /* Do not worry about freeing memory, because if this is
- * called, then all agp memory is deallocated and removed
- * from the table.
- */
-
- iounmap(agp_bridge.gatt_table);
- table = (char *) agp_bridge.gatt_table_real;
- table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
-
- for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
- clear_bit(PG_reserved, &mem_map[i].flags);
- }
-
- free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
- return 0;
-}
-
-static int agp_generic_insert_memory(agp_memory * mem,
- off_t pg_start, int type)
-{
- int i, j, num_entries;
- void *temp;
-
- temp = agp_bridge.current_size;
-
- switch (agp_bridge.size_type) {
- case U8_APER_SIZE:
- num_entries = ((aper_size_info_8 *) temp)->num_entries;
- break;
- case U16_APER_SIZE:
- num_entries = ((aper_size_info_16 *) temp)->num_entries;
- break;
- case U32_APER_SIZE:
- num_entries = ((aper_size_info_32 *) temp)->num_entries;
- break;
- case FIXED_APER_SIZE:
- num_entries = ((aper_size_info_fixed *) temp)->num_entries;
- break;
- default:
- num_entries = 0;
- break;
- }
-
- if (type != 0 || mem->type != 0) {
- /* The generic routines know nothing of memory types */
- return -EINVAL;
- }
- if ((pg_start + mem->page_count) > num_entries) {
- return -EINVAL;
- }
- j = pg_start;
-
- while (j < (pg_start + mem->page_count)) {
- if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
- return -EBUSY;
- }
- j++;
- }
-
- if (mem->is_flushed == FALSE) {
- CACHE_FLUSH();
- mem->is_flushed = TRUE;
- }
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- agp_bridge.gatt_table[j] = mem->memory[i];
- }
-
- agp_bridge.tlb_flush(mem);
- return 0;
-}
-
-static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start,
- int type)
-{
- int i;
-
- if (type != 0 || mem->type != 0) {
- /* The generic routines know nothing of memory types */
- return -EINVAL;
- }
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page;
- }
-
- agp_bridge.tlb_flush(mem);
- return 0;
-}
-
-static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
-{
- return NULL;
-}
-
-static void agp_generic_free_by_type(agp_memory * curr)
-{
- if (curr->memory != NULL) {
- vfree(curr->memory);
- }
- agp_free_key(curr->key);
- kfree(curr);
-}
-
-void agp_enable(u32 mode)
-{
- agp_bridge.agp_enable(mode);
-}
-
-/* End - Generic Agp routines */
-
-#ifdef AGP_BUILD_INTEL_I810
-
-static aper_size_info_fixed intel_i810_sizes[] =
-{
- {64, 16384, 4},
- /* The 32M mode still requires a 64k gatt */
- {32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY 1
-
-static gatt_mask intel_i810_masks[] =
-{
- {I810_PTE_VALID, 0},
- {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY}
-};
-
-static struct _intel_i810_private {
- struct pci_dev *i810_dev; /* device one */
- volatile unsigned char *registers;
- int num_dcache_entries;
-} intel_i810_private;
-
-static int intel_i810_fetch_size(void)
-{
- u32 smram_miscc;
- aper_size_info_fixed *values;
-
- pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc);
- values = (aper_size_info_fixed *) agp_bridge.aperture_sizes;
-
- if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
- printk("agpgart: i810 is disabled\n");
- return 0;
- }
- if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
- agp_bridge.previous_size =
- agp_bridge.current_size = (void *) (values + 1);
- agp_bridge.aperture_size_idx = 1;
- return values[1].size;
- } else {
- agp_bridge.previous_size =
- agp_bridge.current_size = (void *) (values);
- agp_bridge.aperture_size_idx = 0;
- return values[0].size;
- }
-
- return 0;
-}
-
-static int intel_i810_configure(void)
-{
- aper_size_info_fixed *current_size;
- u32 temp;
- int i;
-
- current_size = (aper_size_info_fixed *) agp_bridge.current_size;
-
- pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
-
- intel_i810_private.registers =
- (volatile unsigned char *) ioremap(temp, 128 * 4096);
-
- if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL)
- & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
- /* This will need to be dynamically assigned */
- printk("agpgart: detected 4MB dedicated video ram.\n");
- intel_i810_private.num_dcache_entries = 1024;
- }
- pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
- agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
- agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
- CACHE_FLUSH();
-
- if (agp_bridge.needs_scratch_page == TRUE) {
- for (i = 0; i < current_size->num_entries; i++) {
- OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4),
- agp_bridge.scratch_page);
- }
- }
- return 0;
-}
-
-static void intel_i810_cleanup(void)
-{
- OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
- iounmap((void *) intel_i810_private.registers);
-}
-
-static void intel_i810_tlbflush(agp_memory * mem)
-{
- return;
-}
-
-static void intel_i810_agp_enable(u32 mode)
-{
- return;
-}
-
-static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
- int type)
-{
- int i, j, num_entries;
- void *temp;
-
- temp = agp_bridge.current_size;
- num_entries = ((aper_size_info_fixed *) temp)->num_entries;
-
- if ((pg_start + mem->page_count) > num_entries) {
- return -EINVAL;
- }
- for (j = pg_start; j < (pg_start + mem->page_count); j++) {
- if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
- return -EBUSY;
- }
- }
-
- if (type != 0 || mem->type != 0) {
- if ((type == AGP_DCACHE_MEMORY) &&
- (mem->type == AGP_DCACHE_MEMORY)) {
- /* special insert */
-
- for (i = pg_start; i < (pg_start + mem->page_count); i++) {
- OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4),
- (i * 4096) | I810_PTE_LOCAL | I810_PTE_VALID);
- }
-
- agp_bridge.tlb_flush(mem);
- return 0;
- }
- return -EINVAL;
- }
- if (mem->is_flushed == FALSE) {
- CACHE_FLUSH();
- mem->is_flushed = TRUE;
- }
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- OUTREG32(intel_i810_private.registers,
- I810_PTE_BASE + (j * 4), mem->memory[i]);
- }
-
- agp_bridge.tlb_flush(mem);
- return 0;
-}
-
-static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
- int type)
-{
- int i;
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4),
- agp_bridge.scratch_page);
- }
-
- agp_bridge.tlb_flush(mem);
- return 0;
-}
-
-static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
- agp_memory *new;
-
- if (type == AGP_DCACHE_MEMORY) {
- if (pg_count != intel_i810_private.num_dcache_entries) {
- return NULL;
- }
- new = agp_create_memory(1);
-
- if (new == NULL) {
- return NULL;
- }
- new->type = AGP_DCACHE_MEMORY;
- new->page_count = pg_count;
- new->num_scratch_pages = 0;
- vfree(new->memory);
- return new;
- }
- return NULL;
-}
-
-static void intel_i810_free_by_type(agp_memory * curr)
-{
- agp_free_key(curr->key);
- kfree(curr);
-}
-
-static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
-{
- /* Type checking must be done elsewhere */
- return addr | agp_bridge.masks[type].mask;
-}
-
-static void intel_i810_setup(struct pci_dev *i810_dev)
-{
- intel_i810_private.i810_dev = i810_dev;
-
- agp_bridge.masks = intel_i810_masks;
- agp_bridge.num_of_masks = 2;
- agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
- agp_bridge.size_type = FIXED_APER_SIZE;
- agp_bridge.num_aperture_sizes = 2;
- agp_bridge.dev_private_data = (void *) &intel_i810_private;
- agp_bridge.needs_scratch_page = TRUE;
- agp_bridge.configure = intel_i810_configure;
- agp_bridge.fetch_size = intel_i810_fetch_size;
- agp_bridge.cleanup = intel_i810_cleanup;
- agp_bridge.tlb_flush = intel_i810_tlbflush;
- agp_bridge.mask_memory = intel_i810_mask_memory;
- agp_bridge.agp_enable = intel_i810_agp_enable;
-#ifdef __SMP__
- agp_bridge.cache_flush = smp_flush_cache;
-#else
- agp_bridge.cache_flush = flush_cache;
-#endif
- agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
- agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
- agp_bridge.insert_memory = intel_i810_insert_entries;
- agp_bridge.remove_memory = intel_i810_remove_entries;
- agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
- agp_bridge.free_by_type = intel_i810_free_by_type;
-}
-
-#endif
-
-#ifdef AGP_BUILD_INTEL_GENERIC
-
-static int intel_fetch_size(void)
-{
- int i;
- u16 temp;
- aper_size_info_16 *values;
-
- pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
- (void *) values = agp_bridge.aperture_sizes;
-
- for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
- if (temp == values[i].size_value) {
- agp_bridge.previous_size =
- agp_bridge.current_size = (void *) (values + i);
- agp_bridge.aperture_size_idx = i;
- return values[i].size;
- }
- }
-
- return 0;
-}
-
-static void intel_tlbflush(agp_memory * mem)
-{
- pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
- pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
-}
-
-static void intel_cleanup(void)
-{
- u16 temp;
- aper_size_info_16 *previous_size;
-
- previous_size = (aper_size_info_16 *) agp_bridge.previous_size;
- pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
- pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
- pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, previous_size->size_value);
-}
-
-static int intel_configure(void)
-{
- u32 temp;
- u16 temp2;
- aper_size_info_16 *current_size;
-
- current_size = (aper_size_info_16 *) agp_bridge.current_size;
-
- /* aperture size */
- pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, current_size->size_value);
-
- /* address to map to */
- pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
- agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- /* attbase - aperture base */
- pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr);
-
- /* agpctrl */
- pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
-
- /* paccfg/nbxcfg */
- pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
- pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, (temp2 & ~(1 << 10)) | (1 << 9));
- /* clear any possible error conditions */
- pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
- return 0;
-}
-
-static unsigned long intel_mask_memory(unsigned long addr, int type)
-{
- /* Memory type is ignored */
-
- return addr | agp_bridge.masks[0].mask;
-}
-
-
-/* Setup function */
-static gatt_mask intel_generic_masks[] =
-{
- {0x00000017, 0}
-};
-
-static aper_size_info_16 intel_generic_sizes[7] =
-{
- {256, 65536, 6, 0},
- {128, 32768, 5, 32},
- {64, 16384, 4, 48},
- {32, 8192, 3, 56},
- {16, 4096, 2, 60},
- {8, 2048, 1, 62},
- {4, 1024, 0, 63}
-};
-
-static void intel_generic_setup(void)
-{
- agp_bridge.masks = intel_generic_masks;
- agp_bridge.num_of_masks = 1;
- agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
- agp_bridge.size_type = U16_APER_SIZE;
- agp_bridge.num_aperture_sizes = 7;
- agp_bridge.dev_private_data = NULL;
- agp_bridge.needs_scratch_page = FALSE;
- agp_bridge.configure = intel_configure;
- agp_bridge.fetch_size = intel_fetch_size;
- agp_bridge.cleanup = intel_cleanup;
- agp_bridge.tlb_flush = intel_tlbflush;
- agp_bridge.mask_memory = intel_mask_memory;
- agp_bridge.agp_enable = agp_generic_agp_enable;
-#ifdef __SMP__
- agp_bridge.cache_flush = smp_flush_cache;
-#else
- agp_bridge.cache_flush = flush_cache;
-#endif
- agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
- agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
- agp_bridge.insert_memory = agp_generic_insert_memory;
- agp_bridge.remove_memory = agp_generic_remove_memory;
- agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
- agp_bridge.free_by_type = agp_generic_free_by_type;
-}
-
-#endif
-
-#ifdef AGP_BUILD_VIA_GENERIC
-
-static int via_fetch_size(void)
-{
- int i;
- u8 temp;
- aper_size_info_8 *values;
-
- (void *) values = agp_bridge.aperture_sizes;
- pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp);
- for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
- if (temp == values[i].size_value) {
- agp_bridge.previous_size =
- agp_bridge.current_size = (void *) (values + i);
- agp_bridge.aperture_size_idx = i;
- return values[i].size;
- }
- }
-
- return 0;
-}
-
-static int via_configure(void)
-{
- u32 temp;
- aper_size_info_8 *current_size;
-
- current_size = (aper_size_info_8 *) agp_bridge.current_size;
- /* aperture size */
- pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, current_size->size_value);
- /* address to map too */
- pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp);
- agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- /* GART control register */
- pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
-
- /* attbase - aperture GATT base */
- pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE,
- (agp_bridge.gatt_bus_addr & 0xfffff000) | 3);
- return 0;
-}
-
-static void via_cleanup(void)
-{
- aper_size_info_8 *previous_size;
-
- previous_size = (aper_size_info_8 *) agp_bridge.previous_size;
- pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0);
- pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, previous_size->size_value);
-}
-
-static void via_tlbflush(agp_memory * mem)
-{
- pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f);
- pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
-}
-
-static unsigned long via_mask_memory(unsigned long addr, int type)
-{
- /* Memory type is ignored */
-
- return addr | agp_bridge.masks[0].mask;
-}
-
-static aper_size_info_8 via_generic_sizes[7] =
-{
- {256, 65536, 6, 0},
- {128, 32768, 5, 128},
- {64, 16384, 4, 192},
- {32, 8192, 3, 224},
- {16, 4096, 2, 240},
- {8, 2048, 1, 248},
- {4, 1024, 0, 252}
-};
-
-static gatt_mask via_generic_masks[] =
-{
- {0x00000000, 0}
-};
-
-static void via_generic_setup(void)
-{
- agp_bridge.masks = via_generic_masks;
- agp_bridge.num_of_masks = 1;
- agp_bridge.aperture_sizes = (void *) via_generic_sizes;
- agp_bridge.size_type = U8_APER_SIZE;
- agp_bridge.num_aperture_sizes = 7;
- agp_bridge.dev_private_data = NULL;
- agp_bridge.needs_scratch_page = FALSE;
- agp_bridge.configure = via_configure;
- agp_bridge.fetch_size = via_fetch_size;
- agp_bridge.cleanup = via_cleanup;
- agp_bridge.tlb_flush = via_tlbflush;
- agp_bridge.mask_memory = via_mask_memory;
- agp_bridge.agp_enable = agp_generic_agp_enable;
-#ifdef __SMP__
- agp_bridge.cache_flush = smp_flush_cache;
-#else
- agp_bridge.cache_flush = flush_cache;
-#endif
- agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
- agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
- agp_bridge.insert_memory = agp_generic_insert_memory;
- agp_bridge.remove_memory = agp_generic_remove_memory;
- agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
- agp_bridge.free_by_type = agp_generic_free_by_type;
-}
-
-#endif
-
-#ifdef AGP_BUILD_SIS_GENERIC
-
-static int sis_fetch_size(void)
-{
- u8 temp_size;
- int i;
- aper_size_info_8 *values;
-
- pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size);
- (void *) values = agp_bridge.aperture_sizes;
- for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
- if ((temp_size == values[i].size_value) ||
- ((temp_size & ~(0x03)) == (values[i].size_value & ~(0x03)))) {
- agp_bridge.previous_size =
- agp_bridge.current_size = (void *) (values + i);
-
- agp_bridge.aperture_size_idx = i;
- return values[i].size;
- }
- }
-
- return 0;
-}
-
-
-static void sis_tlbflush(agp_memory * mem)
-{
- pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02);
-}
-
-static int sis_configure(void)
-{
- u32 temp;
- aper_size_info_8 *current_size;
-
- current_size = (aper_size_info_8 *) agp_bridge.current_size;
- pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05);
- pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp);
- agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE, agp_bridge.gatt_bus_addr);
- pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, current_size->size_value);
- return 0;
-}
-
-static void sis_cleanup(void)
-{
- aper_size_info_8 *previous_size;
-
- previous_size = (aper_size_info_8 *) agp_bridge.previous_size;
- pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, (previous_size->size_value & ~(0x03)));
-}
-
-static unsigned long sis_mask_memory(unsigned long addr, int type)
-{
- /* Memory type is ignored */
-
- return addr | agp_bridge.masks[0].mask;
-}
-
-static aper_size_info_8 sis_generic_sizes[7] =
-{
- {256, 65536, 6, 99},
- {128, 32768, 5, 83},
- {64, 16384, 4, 67},
- {32, 8192, 3, 51},
- {16, 4096, 2, 35},
- {8, 2048, 1, 19},
- {4, 1024, 0, 3}
-};
-
-static gatt_mask sis_generic_masks[] =
-{
- {0x00000000, 0}
-};
-
-static void sis_generic_setup(void)
-{
- agp_bridge.masks = sis_generic_masks;
- agp_bridge.num_of_masks = 1;
- agp_bridge.aperture_sizes = (void *) sis_generic_sizes;
- agp_bridge.size_type = U8_APER_SIZE;
- agp_bridge.num_aperture_sizes = 7;
- agp_bridge.dev_private_data = NULL;
- agp_bridge.needs_scratch_page = FALSE;
- agp_bridge.configure = sis_configure;
- agp_bridge.fetch_size = sis_fetch_size;
- agp_bridge.cleanup = sis_cleanup;
- agp_bridge.tlb_flush = sis_tlbflush;
- agp_bridge.mask_memory = sis_mask_memory;
- agp_bridge.agp_enable = agp_generic_agp_enable;
-#ifdef __SMP__
- agp_bridge.cache_flush = smp_flush_cache;
-#else
- agp_bridge.cache_flush = flush_cache;
-#endif
- agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
- agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
- agp_bridge.insert_memory = agp_generic_insert_memory;
- agp_bridge.remove_memory = agp_generic_remove_memory;
- agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
- agp_bridge.free_by_type = agp_generic_free_by_type;
-}
-
-#endif
-
-#ifdef AGP_BUILD_AMD_IRONGATE
-
-static struct _amd_irongate_private {
- volatile unsigned char *registers;
-} amd_irongate_private;
-
-static int amd_irongate_fetch_size(void)
-{
- int i;
- u32 temp;
- aper_size_info_32 *values;
-
- pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
- temp = (temp & 0x0000000e);
- (void *) values = agp_bridge.aperture_sizes;
- for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
- if (temp == values[i].size_value) {
- agp_bridge.previous_size =
- agp_bridge.current_size = (void *) (values + i);
-
- agp_bridge.aperture_size_idx = i;
- return values[i].size;
- }
- }
-
- return 0;
-}
-
-static int amd_irongate_configure(void)
-{
- aper_size_info_32 *current_size;
- u32 temp;
- u16 enable_reg;
-
- current_size = (aper_size_info_32 *) agp_bridge.current_size;
-
- /* Get the memory mapped registers */
- pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp);
- temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- amd_irongate_private.registers = (volatile unsigned char *) ioremap(temp, 4096);
-
- /* Write out the address of the gatt table */
- OUTREG32(amd_irongate_private.registers, AMD_ATTBASE, agp_bridge.gatt_bus_addr);
-
- /* Write the Sync register */
- pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80);
-
- /* Write the enable register */
- enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
- enable_reg = (enable_reg | 0x0004);
- OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
-
- /* Write out the size register */
- pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
- temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001);
- pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
-
- /* Flush the tlb */
- OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
-
- /* Get the address for the gart region */
- pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
- temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- agp_bridge.gart_bus_addr = temp;
- return 0;
-}
-
-static void amd_irongate_cleanup(void)
-{
- aper_size_info_32 *previous_size;
- u32 temp;
- u16 enable_reg;
-
- previous_size = (aper_size_info_32 *) agp_bridge.previous_size;
-
- enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
- enable_reg = (enable_reg & ~(0x0004));
- OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
-
- /* Write back the previous size and disable gart translation */
- pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
- temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
- pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
- iounmap((void *) amd_irongate_private.registers);
-}
-
-/*
- * This routine could be implemented by taking the addresses
- * written to the GATT, and flushing them individually. However
- * currently it just flushes the whole table. Which is probably
- * more efficent, since agp_memory blocks can be a large number of
- * entries.
- */
-
-static void amd_irongate_tlbflush(agp_memory * temp)
-{
- OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
-}
-
-static unsigned long amd_irongate_mask_memory(unsigned long addr, int type)
-{
- /* Only type 0 is supported by the irongate */
-
- return addr | agp_bridge.masks[0].mask;
-}
-
-static aper_size_info_32 amd_irongate_sizes[7] =
-{
- {2048, 524288, 9, 0x0000000c},
- {1024, 262144, 8, 0x0000000a},
- {512, 131072, 7, 0x00000008},
- {256, 65536, 6, 0x00000006},
- {128, 32768, 5, 0x00000004},
- {64, 16384, 4, 0x00000002},
- {32, 8192, 3, 0x00000000}
-};
-
-static gatt_mask amd_irongate_masks[] =
-{
- {0x00000001, 0}
-};
-
-static void amd_irongate_setup(void)
-{
- agp_bridge.masks = amd_irongate_masks;
- agp_bridge.num_of_masks = 1;
- agp_bridge.aperture_sizes = (void *) amd_irongate_sizes;
- agp_bridge.size_type = U32_APER_SIZE;
- agp_bridge.num_aperture_sizes = 7;
- agp_bridge.dev_private_data = (void *) &amd_irongate_private;
- agp_bridge.needs_scratch_page = FALSE;
- agp_bridge.configure = amd_irongate_configure;
- agp_bridge.fetch_size = amd_irongate_fetch_size;
- agp_bridge.cleanup = amd_irongate_cleanup;
- agp_bridge.tlb_flush = amd_irongate_tlbflush;
- agp_bridge.mask_memory = amd_irongate_mask_memory;
- agp_bridge.agp_enable = agp_generic_agp_enable;
-#ifdef __SMP__
- agp_bridge.cache_flush = smp_flush_cache;
-#else
- agp_bridge.cache_flush = flush_cache;
-#endif
- agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
- agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
- agp_bridge.insert_memory = agp_generic_insert_memory;
- agp_bridge.remove_memory = agp_generic_remove_memory;
- agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
- agp_bridge.free_by_type = agp_generic_free_by_type;
-}
-
-#endif
-
-#ifdef AGP_BUILD_ALI_M1541
-
-static int ali_fetch_size(void)
-{
- int i;
- u32 temp;
- aper_size_info_32 *values;
-
- pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
- temp &= ~(0xfffffff0);
- (void *) values = agp_bridge.aperture_sizes;
-
- for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
- if (temp == values[i].size_value) {
- agp_bridge.previous_size =
- agp_bridge.current_size = (void *) (values + i);
- agp_bridge.aperture_size_idx = i;
- return values[i].size;
- }
- }
-
- return 0;
-}
-
-static void ali_tlbflush(agp_memory * mem)
-{
- u32 temp;
-
- pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
- pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
- ((temp & 0xffffff00) | 0x00000090));
- pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
- ((temp & 0xffffff00) | 0x00000010));
-}
-
-static void ali_cleanup(void)
-{
- aper_size_info_32 *previous_size;
- u32 temp;
-
- previous_size = (aper_size_info_32 *) agp_bridge.previous_size;
-
- pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
- pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
- ((temp & 0xffffff00) | 0x00000090));
- pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, previous_size->size_value);
-}
-
-static int ali_configure(void)
-{
- u32 temp;
- aper_size_info_32 *current_size;
-
- current_size = (aper_size_info_32 *) agp_bridge.current_size;
-
- /* aperture size and gatt addr */
- pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
- agp_bridge.gatt_bus_addr | current_size->size_value);
-
- /* tlb control */
- pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
- pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
- ((temp & 0xffffff00) | 0x00000010));
-
- /* address to map to */
- pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
- agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- return 0;
-}
-
-static unsigned long ali_mask_memory(unsigned long addr, int type)
-{
- /* Memory type is ignored */
-
- return addr | agp_bridge.masks[0].mask;
-}
-
-
-/* Setup function */
-static gatt_mask ali_generic_masks[] =
-{
- {0x00000000, 0}
-};
-
-static aper_size_info_32 ali_generic_sizes[7] =
-{
- {256, 65536, 6, 10},
- {128, 32768, 5, 9},
- {64, 16384, 4, 8},
- {32, 8192, 3, 7},
- {16, 4096, 2, 6},
- {8, 2048, 1, 4},
- {4, 1024, 0, 3}
-};
-
-static void ali_generic_setup(void)
-{
- agp_bridge.masks = ali_generic_masks;
- agp_bridge.num_of_masks = 1;
- agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
- agp_bridge.size_type = U32_APER_SIZE;
- agp_bridge.num_aperture_sizes = 7;
- agp_bridge.dev_private_data = NULL;
- agp_bridge.needs_scratch_page = FALSE;
- agp_bridge.configure = ali_configure;
- agp_bridge.fetch_size = ali_fetch_size;
- agp_bridge.cleanup = ali_cleanup;
- agp_bridge.tlb_flush = ali_tlbflush;
- agp_bridge.mask_memory = ali_mask_memory;
- agp_bridge.agp_enable = agp_generic_agp_enable;
-#ifdef __SMP__
- agp_bridge.cache_flush = smp_flush_cache;
-#else
- agp_bridge.cache_flush = flush_cache;
-#endif
- agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
- agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
- agp_bridge.insert_memory = agp_generic_insert_memory;
- agp_bridge.remove_memory = agp_generic_remove_memory;
- agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
- agp_bridge.free_by_type = agp_generic_free_by_type;
-}
-
-#endif
-
-
-
-/* Supported Device Scanning routine */
-
-static void agp_find_supported_device(void)
-{
- struct pci_dev *dev = NULL;
- u8 cap_ptr = 0x00;
- u32 cap_id, scratch;
-
- if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) {
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
- agp_bridge.dev = dev;
-
- /* Need to test for I810 here */
-#ifdef AGP_BUILD_INTEL_I810
- if (dev->vendor == PCI_VENDOR_ID_INTEL) {
- struct pci_dev *i810_dev;
-
- switch (dev->device) {
- case PCI_DEVICE_ID_INTEL_810_0:
- i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_810_1,
- NULL);
- if (i810_dev == NULL) {
- printk("agpgart: Detected an Intel i810, but could not find the secondary device.\n");
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
- printk("agpgart: Detected an Intel i810 Chipset.\n");
- agp_bridge.type = INTEL_I810;
- agp_bridge.intel_i810_setup(i810_dev);
- return;
-
- case PCI_DEVICE_ID_INTEL_810_DC100_0:
- i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_810_DC100_1,
- NULL);
- if (i810_dev == NULL) {
- printk("agpgart: Detected an Intel i810 DC100, but could not find the secondary device.\n");
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
- printk("agpgart: Detected an Intel i810 DC100 Chipset.\n");
- agp_bridge.type = INTEL_I810;
- agp_bridge.intel_i810_setup(i810_dev);
- return;
-
- case PCI_DEVICE_ID_INTEL_810_E_0:
- i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_810_E_1,
- NULL);
- if (i810_dev == NULL) {
- printk("agpgart: Detected an Intel i810 E, but could not find the secondary device.\n");
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
- printk("agpgart: Detected an Intel i810 E Chipset.\n");
- agp_bridge.type = INTEL_I810;
- agp_bridge.intel_i810_setup(i810_dev);
- return;
- default:
- break;
- }
- }
-#endif
- /* find capndx */
- pci_read_config_dword(dev, 0x04, &scratch);
-
- if (!(scratch & 0x00100000)) {
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
- pci_read_config_byte(dev, 0x34, &cap_ptr);
-
- if (cap_ptr != 0x00) {
- do {
- pci_read_config_dword(dev, cap_ptr, &cap_id);
-
- if ((cap_id & 0xff) != 0x02)
- cap_ptr = (cap_id >> 8) & 0xff;
- }
- while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
- }
- if (cap_ptr == 0x00) {
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
- agp_bridge.capndx = cap_ptr;
-
- /* Fill in the mode register */
- pci_read_config_dword(agp_bridge.dev,
- agp_bridge.capndx + 4,
- &agp_bridge.mode);
-
- switch (dev->vendor) {
-#ifdef AGP_BUILD_INTEL_GENERIC
- case PCI_VENDOR_ID_INTEL:
- switch (dev->device) {
- case PCI_DEVICE_ID_INTEL_82443LX_0:
- agp_bridge.type = INTEL_LX;
- printk("agpgart: Detected an Intel 440LX Chipset.\n");
- agp_bridge.intel_generic_setup();
- return;
-
- case PCI_DEVICE_ID_INTEL_82443BX_0:
- agp_bridge.type = INTEL_BX;
- printk("agpgart: Detected an Intel 440BX Chipset.\n");
- agp_bridge.intel_generic_setup();
- return;
-
- case PCI_DEVICE_ID_INTEL_82443GX_0:
- agp_bridge.type = INTEL_GX;
- printk("agpgart: Detected an Intel 440GX Chipset.\n");
- agp_bridge.intel_generic_setup();
- return;
-
- default:
- if (agp_try_unsupported != 0) {
- printk("agpgart: Trying generic intel routines for device id: %x\n", dev->device);
- agp_bridge.type = INTEL_GENERIC;
- agp_bridge.intel_generic_setup();
- return;
- } else {
- printk("agpgart: Unsupported intel chipset, you might want to try agp_try_unsupported=1.\n");
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
- }
- break;
-#endif
-
-#ifdef AGP_BUILD_VIA_GENERIC
- case PCI_VENDOR_ID_VIA:
- switch (dev->device) {
- case PCI_DEVICE_ID_VIA_82C597_0:
- agp_bridge.type = VIA_VP3;
- printk("agpgart: Detected a VIA VP3 Chipset.\n");
- agp_bridge.via_generic_setup();
- return;
-
- case PCI_DEVICE_ID_VIA_82C598_0:
- agp_bridge.type = VIA_MVP3;
- printk("agpgart: Detected a VIA MVP3 Chipset.\n");
- agp_bridge.via_generic_setup();
- return;
-
- case PCI_DEVICE_ID_VIA_82C691_0:
- agp_bridge.type = VIA_APOLLO_PRO;
- printk("agpgart: Detected a VIA Apollo Pro Chipset.\n");
- agp_bridge.via_generic_setup();
- return;
-
- default:
- if (agp_try_unsupported != 0) {
- printk("agpgart: Trying generic VIA routines for device id: %x\n", dev->device);
- agp_bridge.type = VIA_GENERIC;
- agp_bridge.via_generic_setup();
- return;
- } else {
- printk("agpgart: Unsupported VIA chipset, you might want to try agp_try_unsupported=1.\n");
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
- }
- break;
-#endif
-
-#ifdef AGP_BUILD_SIS_GENERIC
- case PCI_VENDOR_ID_SI:
- switch (dev->device) {
- /* ToDo need to find out the specific devices supported */
- default:
- if (agp_try_unsupported != 0) {
- printk("agpgart: Trying generic SiS routines for device id: %x\n", dev->device);
- agp_bridge.type = SIS_GENERIC;
- agp_bridge.sis_generic_setup();
- return;
- } else {
- printk("agpgart: Unsupported SiS chipset, you might want to try agp_try_unsupported=1.\n");
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
- }
- break;
-#endif
-
-#ifdef AGP_BUILD_AMD_IRONGATE
- case PCI_VENDOR_ID_AMD:
- switch (dev->device) {
- case PCI_DEVICE_ID_AMD_IRONGATE_0:
- agp_bridge.type = AMD_IRONGATE;
- printk("agpgart: Detected an AMD Irongate Chipset.\n");
- agp_bridge.amd_irongate_setup();
- return;
-
- default:
- if (agp_try_unsupported != 0) {
- printk("agpgart: Trying Amd irongate routines for device id: %x\n", dev->device);
- agp_bridge.type = AMD_GENERIC;
- agp_bridge.amd_irongate_setup();
- return;
- } else {
- printk("agpgart: Unsupported Amd chipset, you might want to try agp_try_unsupported=1.\n");
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
- }
- break;
-#endif
-
-#ifdef AGP_BUILD_ALI_M1541
- case PCI_VENDOR_ID_AL:
- switch (dev->device) {
- case PCI_DEVICE_ID_AL_M1541_0:
- agp_bridge.type = ALI_M1541;
- printk("agpgart: Detected an ALi M1541 Chipset\n");
- agp_bridge.ali_generic_setup();
- return;
- default:
- if (agp_try_unsupported != 0) {
- printk("agpgart: Trying ALi generic routines for device id: %x\n", dev->device);
- agp_bridge.type = ALI_GENERIC;
- agp_bridge.ali_generic_setup();
- return;
- } else {
- printk("agpgart: Unsupported ALi chipset, you might want to type agp_try_unsupported=1.\n");
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
- }
- break;
-#endif
- default:
- agp_bridge.type = NOT_SUPPORTED;
- return;
- }
-}
-
-struct agp_max_table {
- int mem;
- int agp;
-};
-
-static struct agp_max_table agp_maxes_table[9] =
-{
- {0, 0},
- {32, 4},
- {64, 28},
- {128, 96},
- {256, 204},
- {512, 440},
- {1024, 942},
- {2048, 1920},
- {4096, 3932}
-};
-
-static int agp_find_max(void)
-{
- int memory;
- float t;
- int index;
- int result;
-
- memory = virt_to_phys(high_memory) / 0x100000;
- index = 0;
-
- while ((memory > agp_maxes_table[index].mem) &&
- (index < 8)) {
- index++;
- }
-
- t = (memory - agp_maxes_table[index - 1].mem) /
- (agp_maxes_table[index].mem - agp_maxes_table[index - 1].mem);
-
- result = agp_maxes_table[index - 1].agp +
- (t * (agp_maxes_table[index].agp - agp_maxes_table[index - 1].agp));
-
- printk("agpgart: Maximum main memory to use for agp memory: %dM\n", result);
- result = (result * 0x100000) / 4096;
- return result;
-}
-
-#define AGPGART_VERSION_MAJOR 0
-#define AGPGART_VERSION_MINOR 99
-
-static agp_version agp_current_version =
-{
- AGPGART_VERSION_MAJOR,
- AGPGART_VERSION_MINOR
-};
-
-static int agp_backend_initialize(void)
-{
- int size_value;
-
- memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
- agp_bridge.type = NOT_SUPPORTED;
-#ifdef AGP_BUILD_INTEL_GENERIC
- agp_bridge.intel_generic_setup = intel_generic_setup;
-#endif
-#ifdef AGP_BUILD_INTEL_I810
- agp_bridge.intel_i810_setup = intel_i810_setup;
-#endif
-#ifdef AGP_BUILD_VIA_GENERIC
- agp_bridge.via_generic_setup = via_generic_setup;
-#endif
-#ifdef AGP_BUILD_SIS_GENERIC
- agp_bridge.sis_generic_setup = sis_generic_setup;
-#endif
-#ifdef AGP_BUILD_AMD_IRONGATE
- agp_bridge.amd_irongate_setup = amd_irongate_setup;
-#endif
-#ifdef AGP_BUILD_ALI_M1541
- agp_bridge.ali_generic_setup = ali_generic_setup;
-#endif
- agp_bridge.max_memory_agp = agp_find_max();
- agp_bridge.version = &agp_current_version;
- agp_find_supported_device();
-
- if (agp_bridge.needs_scratch_page == TRUE) {
- agp_bridge.scratch_page = (unsigned long) agp_alloc_page();
-
- if ((void *) (agp_bridge.scratch_page) == NULL) {
- printk("agpgart: unable to get memory for scratch page.\n");
- return -ENOMEM;
- }
- agp_bridge.scratch_page = virt_to_phys((void *) agp_bridge.scratch_page);
- agp_bridge.scratch_page = agp_bridge.mask_memory(agp_bridge.scratch_page, 0);
- }
- if (agp_bridge.type == NOT_SUPPORTED) {
- printk("agpgart: no supported devices found.\n");
- return -EINVAL;
- }
- size_value = agp_bridge.fetch_size();
-
- if (size_value == 0) {
- printk("agpgart: unable to detrimine aperture size.\n");
- return -EINVAL;
- }
- if (agp_bridge.create_gatt_table()) {
- printk("agpgart: unable to get memory for graphics translation table.\n");
- return -ENOMEM;
- }
- agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
-
- if (agp_bridge.key_list == NULL) {
- printk("agpgart: error allocating memory for key lists.\n");
- agp_bridge.free_gatt_table();
- return -ENOMEM;
- }
- memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
-
- if (agp_bridge.configure()) {
- printk("agpgart: error configuring host chipset.\n");
- agp_bridge.free_gatt_table();
- vfree(agp_bridge.key_list);
- return -EINVAL;
- }
- printk("agpgart: Physical address of the agp aperture: 0x%lx\n", agp_bridge.gart_bus_addr);
- printk("agpgart: Agp aperture is %dM in size.\n", size_value);
- return 0;
-}
-
-static void agp_backend_cleanup(void)
-{
- agp_bridge.cleanup();
- agp_bridge.free_gatt_table();
- vfree(agp_bridge.key_list);
-
- if (agp_bridge.needs_scratch_page == TRUE) {
- agp_bridge.scratch_page &= ~(0x00000fff);
- agp_destroy_page((void *) phys_to_virt(agp_bridge.scratch_page));
- }
-}
-
-extern int agp_frontend_initialize(void);
-extern void agp_frontend_cleanup(void);
-
-#ifdef MODULE
-int init_module(void)
-{
- int ret_val;
-
- printk("Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
- AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
- ret_val = agp_backend_initialize();
-
- if (ret_val != 0) {
- return ret_val;
- }
- ret_val = agp_frontend_initialize();
-
- if (ret_val != 0) {
- agp_backend_cleanup();
- return ret_val;
- }
- return 0;
-}
-
-void cleanup_module(void)
-{
- agp_frontend_cleanup();
- agp_backend_cleanup();
-}
-
-#endif
+++ /dev/null
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight
- * Copyright (C) 1999 Xi Graphics
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _AGP_BACKEND_PRIV_H
-#define _AGP_BACKEND_PRIV_H 1
-
-enum aper_size_type {
- U8_APER_SIZE,
- U16_APER_SIZE,
- U32_APER_SIZE,
- FIXED_APER_SIZE
-};
-
-typedef struct _gatt_mask {
- unsigned long mask;
- u32 type;
- /* totally device specific, for integrated chipsets that
- * might have different types of memory masks. For other
- * devices this will probably be ignored */
-} gatt_mask;
-
-typedef struct _aper_size_info_8 {
- int size;
- int num_entries;
- int page_order;
- u8 size_value;
-} aper_size_info_8;
-
-typedef struct _aper_size_info_16 {
- int size;
- int num_entries;
- int page_order;
- u16 size_value;
-} aper_size_info_16;
-
-typedef struct _aper_size_info_32 {
- int size;
- int num_entries;
- int page_order;
- u32 size_value;
-} aper_size_info_32;
-
-typedef struct _aper_size_info_fixed {
- int size;
- int num_entries;
- int page_order;
-} aper_size_info_fixed;
-
-struct agp_bridge_data {
- agp_version *version;
- void *aperture_sizes;
- void *previous_size;
- void *current_size;
- void *dev_private_data;
- struct pci_dev *dev;
- gatt_mask *masks;
- unsigned long *gatt_table;
- unsigned long *gatt_table_real;
- unsigned long scratch_page;
- unsigned long gart_bus_addr;
- unsigned long gatt_bus_addr;
- u32 mode;
- enum chipset_type type;
- enum aper_size_type size_type;
- u32 *key_list;
- atomic_t current_memory_agp;
- atomic_t agp_in_use;
- int max_memory_agp; /* in number of pages */
- int needs_scratch_page;
- int aperture_size_idx;
- int num_aperture_sizes;
- int num_of_masks;
- int capndx;
-
- /* Links to driver specific functions */
-
- int (*fetch_size) (void); /* returns the index into the size table */
- int (*configure) (void);
- void (*agp_enable) (u32);
- void (*cleanup) (void);
- void (*tlb_flush) (agp_memory *);
- unsigned long (*mask_memory) (unsigned long, int);
- void (*cache_flush) (void);
- int (*create_gatt_table) (void);
- int (*free_gatt_table) (void);
- int (*insert_memory) (agp_memory *, off_t, int);
- int (*remove_memory) (agp_memory *, off_t, int);
- agp_memory *(*alloc_by_type) (size_t, int);
- void (*free_by_type) (agp_memory *);
-
- /* Links to vendor/device specific setup functions */
-#ifdef AGP_BUILD_INTEL_GENERIC
- void (*intel_generic_setup) (void);
-#endif
-#ifdef AGP_BUILD_INTEL_I810
- void (*intel_i810_setup) (struct pci_dev *);
-#endif
-#ifdef AGP_BUILD_VIA_GENERIC
- void (*via_generic_setup) (void);
-#endif
-#ifdef AGP_BUILD_SIS_GENERIC
- void (*sis_generic_setup) (void);
-#endif
-#ifdef AGP_BUILD_AMD_IRONGATE
- void (*amd_irongate_setup) (void);
-#endif
-#ifdef AGP_BUILD_ALI_M1541
- void (*ali_generic_setup) (void);
-#endif
-};
-
-#define OUTREG32(mmap, addr, val) *(volatile u32 *)(mmap + (addr)) = (val)
-#define OUTREG16(mmap, addr, val) *(volatile u16 *)(mmap + (addr)) = (val)
-#define OUTREG8 (mmap, addr, val) *(volatile u8 *) (mmap + (addr)) = (val)
-
-#define INREG32(mmap, addr) *(volatile u32 *)(mmap + (addr))
-#define INREG16(mmap, addr) *(volatile u16 *)(mmap + (addr))
-#define INREG8 (mmap, addr) *(volatile u8 *) (mmap + (addr))
-
-#ifndef min
-#define min(a,b) (((a)<(b))?(a):(b))
-#endif
-
-#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page)
-
-#ifndef PCI_DEVICE_ID_VIA_82C691_0
-#define PCI_DEVICE_ID_VIA_82C691_0 0x0691
-#endif
-#ifndef PCI_DEVICE_ID_VIA_82C691_1
-#define PCI_DEVICE_ID_VIA_82C691_1 0x8691
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_0
-#define PCI_DEVICE_ID_INTEL_810_0 0x7120
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_DC100_0
-#define PCI_DEVICE_ID_INTEL_810_DC100_0 0x7122
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_E_0
-#define PCI_DEVICE_ID_INTEL_810_E_0 0x7124
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_82443GX_0
-#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_1
-#define PCI_DEVICE_ID_INTEL_810_1 0x7121
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_DC100_1
-#define PCI_DEVICE_ID_INTEL_810_DC100_1 0x7123
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_810_E_1
-#define PCI_DEVICE_ID_INTEL_810_E_1 0x7125
-#endif
-#ifndef PCI_DEVICE_ID_INTEL_82443GX_1
-#define PCI_DEVICE_ID_INTEL_82443GX_1 0x71a1
-#endif
-#ifndef PCI_DEVICE_ID_AMD_IRONGATE_0
-#define PCI_DEVICE_ID_AMD_IRONGATE_0 0x7006
-#endif
-#ifndef PCI_VENDOR_ID_AL
-#define PCI_VENDOR_ID_AL 0x10b9
-#endif
-#ifndef PCI_DEVICE_ID_AL_M1541_0
-#define PCI_DEVICE_ID_AL_M1541_0 0x1541
-#endif
-
-/* intel register */
-#define INTEL_APBASE 0x10
-#define INTEL_APSIZE 0xb4
-#define INTEL_ATTBASE 0xb8
-#define INTEL_AGPCTRL 0xb0
-#define INTEL_NBXCFG 0x50
-#define INTEL_ERRSTS 0x91
-
-/* intel i810 registers */
-#define I810_GMADDR 0x10
-#define I810_MMADDR 0x14
-#define I810_PTE_BASE 0x10000
-#define I810_PTE_MAIN_UNCACHED 0x00000000
-#define I810_PTE_LOCAL 0x00000002
-#define I810_PTE_VALID 0x00000001
-#define I810_SMRAM_MISCC 0x70
-#define I810_GFX_MEM_WIN_SIZE 0x00010000
-#define I810_GFX_MEM_WIN_32M 0x00010000
-#define I810_GMS 0x000000c0
-#define I810_GMS_DISABLE 0x00000000
-#define I810_PGETBL_CTL 0x2020
-#define I810_PGETBL_ENABLED 0x00000001
-#define I810_DRAM_CTL 0x3000
-#define I810_DRAM_ROW_0 0x00000001
-#define I810_DRAM_ROW_0_SDRAM 0x00000001
-
-/* VIA register */
-#define VIA_APBASE 0x10
-#define VIA_GARTCTRL 0x80
-#define VIA_APSIZE 0x84
-#define VIA_ATTBASE 0x88
-
-/* SiS registers */
-#define SIS_APBASE 0x10
-#define SIS_ATTBASE 0x90
-#define SIS_APSIZE 0x94
-#define SIS_TLBCNTRL 0x97
-#define SIS_TLBFLUSH 0x98
-
-/* AMD registers */
-#define AMD_APBASE 0x10
-#define AMD_MMBASE 0x14
-#define AMD_APSIZE 0xac
-#define AMD_MODECNTL 0xb0
-#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
-#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
-#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
-#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
-
-/* ALi registers */
-#define ALI_APBASE 0x10
-#define ALI_AGPCTRL 0xb8
-#define ALI_ATTBASE 0xbc
-#define ALI_TLBCTRL 0xc0
-
-#endif /* _AGP_BACKEND_PRIV_H */
--- /dev/null
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#define EXPORT_SYMTAB
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/malloc.h>
+#include <linux/vmalloc.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/page.h>
+
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
+MODULE_PARM(agp_try_unsupported, "1i");
+EXPORT_SYMBOL(agp_free_memory);
+EXPORT_SYMBOL(agp_allocate_memory);
+EXPORT_SYMBOL(agp_copy_info);
+EXPORT_SYMBOL(agp_bind_memory);
+EXPORT_SYMBOL(agp_unbind_memory);
+EXPORT_SYMBOL(agp_enable);
+EXPORT_SYMBOL(agp_backend_acquire);
+EXPORT_SYMBOL(agp_backend_release);
+
+static void flush_cache(void);
+
+static struct agp_bridge_data agp_bridge;
+static int agp_try_unsupported __initdata = 0;
+#ifdef __SMP__
+static atomic_t cpus_waiting;
+
+static void ipi_handler(void *null)
+{
+ flush_cache();
+ atomic_dec(&cpus_waiting);
+ while (atomic_read(&cpus_waiting) > 0)
+ barrier();
+}
+
+static void smp_flush_cache(void)
+{
+ atomic_set(&cpus_waiting, smp_num_cpus - 1);
+ if (smp_call_function(ipi_handler, NULL, 1, 0) != 0)
+ panic("agpgart: timed out waiting for the other CPUs!\n");
+ flush_cache();
+ while (atomic_read(&cpus_waiting) > 0)
+ barrier();
+}
+#define global_cache_flush smp_flush_cache
+#else /* __SMP__ */
+#define global_cache_flush flush_cache
+#endif /* __SMP__ */
+
+static void flush_cache(void)
+{
+ asm volatile ("wbinvd":::"memory");
+}
+
+
+int agp_backend_acquire(void)
+{
+ atomic_inc(&agp_bridge.agp_in_use);
+
+ if (atomic_read(&agp_bridge.agp_in_use) != 1) {
+ atomic_dec(&agp_bridge.agp_in_use);
+ return -EBUSY;
+ }
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+void agp_backend_release(void)
+{
+ atomic_dec(&agp_bridge.agp_in_use);
+ MOD_DEC_USE_COUNT;
+}
+
+/*
+ * Basic Page Allocation Routines -
+ * These routines handle page allocation
+ * and by default they reserve the allocated
+ * memory. They also handle incrementing the
+ * current_memory_agp value, Which is checked
+ * against a maximum value.
+ */
+
+static unsigned long agp_alloc_page(void)
+{
+ void *pt;
+
+ pt = (void *) __get_free_page(GFP_KERNEL);
+ if (pt == NULL) {
+ return 0;
+ }
+ atomic_inc(&mem_map[MAP_NR(pt)].count);
+ set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
+ atomic_inc(&agp_bridge.current_memory_agp);
+ return (unsigned long) pt;
+}
+
+static void agp_destroy_page(unsigned long page)
+{
+ void *pt = (void *) page;
+
+ if (pt == NULL) {
+ return;
+ }
+ atomic_dec(&mem_map[MAP_NR(pt)].count);
+ clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
+ wake_up(&mem_map[MAP_NR(pt)].wait);
+ free_page((unsigned long) pt);
+ atomic_dec(&agp_bridge.current_memory_agp);
+}
+
+/* End Basic Page Allocation Routines */
+
+/*
+ * Generic routines for handling agp_memory structures -
+ * They use the basic page allocation routines to do the
+ * brunt of the work.
+ */
+
+
+static void agp_free_key(int key)
+{
+
+ if (key < 0) {
+ return;
+ }
+ if (key < MAXKEY) {
+ clear_bit(key, agp_bridge.key_list);
+ }
+}
+
+static int agp_get_key(void)
+{
+ int bit;
+
+ bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);
+ if (bit < MAXKEY) {
+ set_bit(bit, agp_bridge.key_list);
+ return bit;
+ }
+ return -1;
+}
+
+static agp_memory *agp_create_memory(int scratch_pages)
+{
+ agp_memory *new;
+
+ new = kmalloc(sizeof(agp_memory), GFP_KERNEL);
+
+ if (new == NULL) {
+ return NULL;
+ }
+ memset(new, 0, sizeof(agp_memory));
+ new->key = agp_get_key();
+
+ if (new->key < 0) {
+ kfree(new);
+ return NULL;
+ }
+ new->memory = vmalloc(PAGE_SIZE * scratch_pages);
+
+ if (new->memory == NULL) {
+ agp_free_key(new->key);
+ kfree(new);
+ return NULL;
+ }
+ new->num_scratch_pages = scratch_pages;
+ return new;
+}
+
+void agp_free_memory(agp_memory * curr)
+{
+ int i;
+
+ if (curr == NULL) {
+ return;
+ }
+ if (curr->is_bound == TRUE) {
+ agp_unbind_memory(curr);
+ }
+ if (curr->type != 0) {
+ agp_bridge.free_by_type(curr);
+ MOD_DEC_USE_COUNT;
+ return;
+ }
+ if (curr->page_count != 0) {
+ for (i = 0; i < curr->page_count; i++) {
+ curr->memory[i] &= ~(0x00000fff);
+ agp_destroy_page((unsigned long)
+ phys_to_virt(curr->memory[i]));
+ }
+ }
+ agp_free_key(curr->key);
+ vfree(curr->memory);
+ kfree(curr);
+ MOD_DEC_USE_COUNT;
+}
+
+#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
+
+agp_memory *agp_allocate_memory(size_t page_count, u32 type)
+{
+ int scratch_pages;
+ agp_memory *new;
+ int i;
+
+ if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) >
+ agp_bridge.max_memory_agp) {
+ return NULL;
+ }
+ if (type != 0) {
+ new = agp_bridge.alloc_by_type(page_count, type);
+ return new;
+ }
+ scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+
+ new = agp_create_memory(scratch_pages);
+
+ if (new == NULL) {
+ return NULL;
+ }
+ for (i = 0; i < page_count; i++) {
+ new->memory[i] = agp_alloc_page();
+
+ if (new->memory[i] == 0) {
+ /* Free this structure */
+ agp_free_memory(new);
+ return NULL;
+ }
+ new->memory[i] =
+ agp_bridge.mask_memory(
+ virt_to_phys((void *) new->memory[i]),
+ type);
+ new->page_count++;
+ }
+
+ MOD_INC_USE_COUNT;
+ return new;
+}
+
+/* End - Generic routines for handling agp_memory structures */
+
+static int agp_return_size(void)
+{
+ int current_size;
+ void *temp;
+
+ temp = agp_bridge.current_size;
+
+ switch (agp_bridge.size_type) {
+ case U8_APER_SIZE:
+ current_size = A_SIZE_8(temp)->size;
+ break;
+ case U16_APER_SIZE:
+ current_size = A_SIZE_16(temp)->size;
+ break;
+ case U32_APER_SIZE:
+ current_size = A_SIZE_32(temp)->size;
+ break;
+ case FIXED_APER_SIZE:
+ current_size = A_SIZE_FIX(temp)->size;
+ break;
+ default:
+ current_size = 0;
+ break;
+ }
+
+ return current_size;
+}
+
+/* Routine to copy over information structure */
+
+void agp_copy_info(agp_kern_info * info)
+{
+ memset(info, 0, sizeof(agp_kern_info));
+ info->version.major = agp_bridge.version->major;
+ info->version.minor = agp_bridge.version->minor;
+ info->device = agp_bridge.dev;
+ info->chipset = agp_bridge.type;
+ info->mode = agp_bridge.mode;
+ info->aper_base = agp_bridge.gart_bus_addr;
+ info->aper_size = agp_return_size();
+ info->max_memory = agp_bridge.max_memory_agp;
+ info->current_memory = atomic_read(&agp_bridge.current_memory_agp);
+}
+
+/* End - Routine to copy over information structure */
+
+/*
+ * Routines for handling swapping of agp_memory into the GATT -
+ * These routines take agp_memory and insert them into the GATT.
+ * They call device specific routines to actually write to the GATT.
+ */
+
+int agp_bind_memory(agp_memory * curr, off_t pg_start)
+{
+ int ret_val;
+
+ if ((curr == NULL) || (curr->is_bound == TRUE)) {
+ return -EINVAL;
+ }
+ if (curr->is_flushed == FALSE) {
+ CACHE_FLUSH();
+ curr->is_flushed = TRUE;
+ }
+ ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);
+
+ if (ret_val != 0) {
+ return ret_val;
+ }
+ curr->is_bound = TRUE;
+ curr->pg_start = pg_start;
+ return 0;
+}
+
+int agp_unbind_memory(agp_memory * curr)
+{
+ int ret_val;
+
+ if (curr == NULL) {
+ return -EINVAL;
+ }
+ if (curr->is_bound != TRUE) {
+ return -EINVAL;
+ }
+ ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);
+
+ if (ret_val != 0) {
+ return ret_val;
+ }
+ curr->is_bound = FALSE;
+ curr->pg_start = 0;
+ return 0;
+}
+
+/* End - Routines for handling swapping of agp_memory into the GATT */
+
+/*
+ * Driver routines - start
+ * Currently this module supports the
+ * i810, 440lx, 440bx, 440gx, via vp3, via mvp3,
+ * amd irongate, ALi M1541 and generic support for the
+ * SiS chipsets.
+ */
+
+/* Generic Agp routines - Start */
+
+static void agp_generic_agp_enable(u32 mode)
+{
+ struct pci_dev *device = NULL;
+ u32 command, scratch, cap_id;
+ u8 cap_ptr;
+
+ pci_read_config_dword(agp_bridge.dev,
+ agp_bridge.capndx + 4,
+ &command);
+
+ /*
+ * PASS1: go throu all devices that claim to be
+ * AGP devices and collect their data.
+ */
+
+ while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
+ device)) != NULL) {
+ pci_read_config_dword(device, 0x04, &scratch);
+
+ if (!(scratch & 0x00100000))
+ continue;
+
+ pci_read_config_byte(device, 0x34, &cap_ptr);
+
+ if (cap_ptr != 0x00) {
+ do {
+ pci_read_config_dword(device,
+ cap_ptr, &cap_id);
+
+ if ((cap_id & 0xff) != 0x02)
+ cap_ptr = (cap_id >> 8) & 0xff;
+ }
+ while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+ }
+ if (cap_ptr != 0x00) {
+ /*
+ * Ok, here we have a AGP device. Disable impossible
+ * settings, and adjust the readqueue to the minimum.
+ */
+
+ pci_read_config_dword(device, cap_ptr + 4, &scratch);
+
+ /* adjust RQ depth */
+ command =
+ ((command & ~0xff000000) |
+ min((mode & 0xff000000),
+ min((command & 0xff000000),
+ (scratch & 0xff000000))));
+
+ /* disable SBA if it's not supported */
+ if (!((command & 0x00000200) &&
+ (scratch & 0x00000200) &&
+ (mode & 0x00000200)))
+ command &= ~0x00000200;
+
+ /* disable FW if it's not supported */
+ if (!((command & 0x00000010) &&
+ (scratch & 0x00000010) &&
+ (mode & 0x00000010)))
+ command &= ~0x00000010;
+
+ if (!((command & 4) &&
+ (scratch & 4) &&
+ (mode & 4)))
+ command &= ~0x00000004;
+
+ if (!((command & 2) &&
+ (scratch & 2) &&
+ (mode & 2)))
+ command &= ~0x00000002;
+
+ if (!((command & 1) &&
+ (scratch & 1) &&
+ (mode & 1)))
+ command &= ~0x00000001;
+ }
+ }
+ /*
+ * PASS2: Figure out the 4X/2X/1X setting and enable the
+ * target (our motherboard chipset).
+ */
+
+ if (command & 4) {
+ command &= ~3; /* 4X */
+ }
+ if (command & 2) {
+ command &= ~5; /* 2X */
+ }
+ if (command & 1) {
+ command &= ~6; /* 1X */
+ }
+ command |= 0x00000100;
+
+ pci_write_config_dword(agp_bridge.dev,
+ agp_bridge.capndx + 8,
+ command);
+
+ /*
+ * PASS3: Go throu all AGP devices and update the
+ * command registers.
+ */
+
+ while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
+ device)) != NULL) {
+ pci_read_config_dword(device, 0x04, &scratch);
+
+ if (!(scratch & 0x00100000))
+ continue;
+
+ pci_read_config_byte(device, 0x34, &cap_ptr);
+
+ if (cap_ptr != 0x00) {
+ do {
+ pci_read_config_dword(device,
+ cap_ptr, &cap_id);
+
+ if ((cap_id & 0xff) != 0x02)
+ cap_ptr = (cap_id >> 8) & 0xff;
+ }
+ while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+ }
+ if (cap_ptr != 0x00)
+ pci_write_config_dword(device, cap_ptr + 8, command);
+ }
+}
+
+static int agp_generic_create_gatt_table(void)
+{
+ char *table;
+ char *table_end;
+ int size;
+ int page_order;
+ int num_entries;
+ int i;
+ void *temp;
+
+ table = NULL;
+ i = agp_bridge.aperture_size_idx;
+ temp = agp_bridge.current_size;
+ size = page_order = num_entries = 0;
+
+ if (agp_bridge.size_type != FIXED_APER_SIZE) {
+ do {
+ switch (agp_bridge.size_type) {
+ case U8_APER_SIZE:
+ size = A_SIZE_8(temp)->size;
+ page_order =
+ A_SIZE_8(temp)->page_order;
+ num_entries =
+ A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ size = A_SIZE_16(temp)->size;
+ page_order = A_SIZE_16(temp)->page_order;
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ size = A_SIZE_32(temp)->size;
+ page_order = A_SIZE_32(temp)->page_order;
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ /* This case will never really happen. */
+ case FIXED_APER_SIZE:
+ default:
+ size = page_order = num_entries = 0;
+ break;
+ }
+
+ table = (char *) __get_free_pages(GFP_KERNEL,
+ page_order);
+
+ if (table == NULL) {
+ i++;
+ switch (agp_bridge.size_type) {
+ case U8_APER_SIZE:
+ agp_bridge.current_size = A_IDX8();
+ break;
+ case U16_APER_SIZE:
+ agp_bridge.current_size = A_IDX16();
+ break;
+ case U32_APER_SIZE:
+ agp_bridge.current_size = A_IDX32();
+ break;
+ /* This case will never really
+ * happen.
+ */
+ case FIXED_APER_SIZE:
+ default:
+ agp_bridge.current_size =
+ agp_bridge.current_size;
+ break;
+ }
+ } else {
+ agp_bridge.aperture_size_idx = i;
+ }
+ } while ((table == NULL) &&
+ (i < agp_bridge.num_aperture_sizes));
+ } else {
+ size = ((aper_size_info_fixed *) temp)->size;
+ page_order = ((aper_size_info_fixed *) temp)->page_order;
+ num_entries = ((aper_size_info_fixed *) temp)->num_entries;
+ table = (char *) __get_free_pages(GFP_KERNEL, page_order);
+ }
+
+ if (table == NULL) {
+ return -ENOMEM;
+ }
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
+ set_bit(PG_reserved, &mem_map[i].flags);
+ }
+
+ agp_bridge.gatt_table_real = (unsigned long *) table;
+ CACHE_FLUSH();
+ agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
+ (PAGE_SIZE * (1 << page_order)));
+ CACHE_FLUSH();
+
+ if (agp_bridge.gatt_table == NULL) {
+ for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
+ clear_bit(PG_reserved, &mem_map[i].flags);
+ }
+
+ free_pages((unsigned long) table, page_order);
+
+ return -ENOMEM;
+ }
+ agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
+
+ for (i = 0; i < num_entries; i++) {
+ agp_bridge.gatt_table[i] =
+ (unsigned long) agp_bridge.scratch_page;
+ }
+
+ return 0;
+}
+
+static int agp_generic_free_gatt_table(void)
+{
+ int i;
+ int page_order;
+ char *table, *table_end;
+ void *temp;
+
+ temp = agp_bridge.current_size;
+
+ switch (agp_bridge.size_type) {
+ case U8_APER_SIZE:
+ page_order = A_SIZE_8(temp)->page_order;
+ break;
+ case U16_APER_SIZE:
+ page_order = A_SIZE_16(temp)->page_order;
+ break;
+ case U32_APER_SIZE:
+ page_order = A_SIZE_32(temp)->page_order;
+ break;
+ case FIXED_APER_SIZE:
+ page_order = A_SIZE_FIX(temp)->page_order;
+ break;
+ default:
+ page_order = 0;
+ break;
+ }
+
+ /* Do not worry about freeing memory, because if this is
+ * called, then all agp memory is deallocated and removed
+ * from the table.
+ */
+
+ iounmap(agp_bridge.gatt_table);
+ table = (char *) agp_bridge.gatt_table_real;
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
+ clear_bit(PG_reserved, &mem_map[i].flags);
+ }
+
+ free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
+ return 0;
+}
+
+static int agp_generic_insert_memory(agp_memory * mem,
+ off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ void *temp;
+
+ temp = agp_bridge.current_size;
+
+ switch (agp_bridge.size_type) {
+ case U8_APER_SIZE:
+ num_entries = A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ case FIXED_APER_SIZE:
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ break;
+ default:
+ num_entries = 0;
+ break;
+ }
+
+ if (type != 0 || mem->type != 0) {
+ /* The generic routines know nothing of memory types */
+ return -EINVAL;
+ }
+ if ((pg_start + mem->page_count) > num_entries) {
+ return -EINVAL;
+ }
+ j = pg_start;
+
+ while (j < (pg_start + mem->page_count)) {
+ if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
+ return -EBUSY;
+ }
+ j++;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ CACHE_FLUSH();
+ mem->is_flushed = TRUE;
+ }
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ agp_bridge.gatt_table[j] = mem->memory[i];
+ }
+
+ agp_bridge.tlb_flush(mem);
+ return 0;
+}
+
+static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if (type != 0 || mem->type != 0) {
+ /* The generic routines know nothing of memory types */
+ return -EINVAL;
+ }
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ agp_bridge.gatt_table[i] =
+ (unsigned long) agp_bridge.scratch_page;
+ }
+
+ agp_bridge.tlb_flush(mem);
+ return 0;
+}
+
+static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
+{
+ return NULL;
+}
+
+static void agp_generic_free_by_type(agp_memory * curr)
+{
+ if (curr->memory != NULL) {
+ vfree(curr->memory);
+ }
+ agp_free_key(curr->key);
+ kfree(curr);
+}
+
+void agp_enable(u32 mode)
+{
+ agp_bridge.agp_enable(mode);
+}
+
+/* End - Generic Agp routines */
+
+#ifdef CONFIG_AGP_I810
+static aper_size_info_fixed intel_i810_sizes[] =
+{
+ {64, 16384, 4},
+ /* The 32M mode still requires a 64k gatt */
+ {32, 8192, 4}
+};
+
+#define AGP_DCACHE_MEMORY 1
+
+static gatt_mask intel_i810_masks[] =
+{
+ {I810_PTE_VALID, 0},
+ {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY}
+};
+
+static struct _intel_i810_private {
+ struct pci_dev *i810_dev; /* device one */
+ volatile u8 *registers;
+ int num_dcache_entries;
+} intel_i810_private;
+
+static int intel_i810_fetch_size(void)
+{
+ u32 smram_miscc;
+ aper_size_info_fixed *values;
+
+ pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc);
+ values = A_SIZE_FIX(agp_bridge.aperture_sizes);
+
+ if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+ printk("agpgart: i810 is disabled\n");
+ return 0;
+ }
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+ agp_bridge.previous_size =
+ agp_bridge.current_size = (void *) (values + 1);
+ agp_bridge.aperture_size_idx = 1;
+ return values[1].size;
+ } else {
+ agp_bridge.previous_size =
+ agp_bridge.current_size = (void *) (values);
+ agp_bridge.aperture_size_idx = 0;
+ return values[0].size;
+ }
+
+ return 0;
+}
+
+static int intel_i810_configure(void)
+{
+ aper_size_info_fixed *current_size;
+ u32 temp;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge.current_size);
+
+ pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
+ temp &= 0xfff80000;
+
+ intel_i810_private.registers =
+ (volatile u8 *) ioremap(temp, 128 * 4096);
+
+ if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL)
+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+ /* This will need to be dynamically assigned */
+ printk(KERN_INFO
+ "agpgart: detected 4MB dedicated video ram.\n");
+ intel_i810_private.num_dcache_entries = 1024;
+ }
+ pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
+ agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
+ agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
+ CACHE_FLUSH();
+
+ if (agp_bridge.needs_scratch_page == TRUE) {
+ for (i = 0; i < current_size->num_entries; i++) {
+ OUTREG32(intel_i810_private.registers,
+ I810_PTE_BASE + (i * 4),
+ agp_bridge.scratch_page);
+ }
+ }
+ return 0;
+}
+
+static void intel_i810_cleanup(void)
+{
+ OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
+ iounmap((void *) intel_i810_private.registers);
+}
+
+static void intel_i810_tlbflush(agp_memory * mem)
+{
+ return;
+}
+
+static void intel_i810_agp_enable(u32 mode)
+{
+ return;
+}
+
+static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
+ void *temp;
+
+ temp = agp_bridge.current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if ((pg_start + mem->page_count) > num_entries) {
+ return -EINVAL;
+ }
+ for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+ if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
+ return -EBUSY;
+ }
+ }
+
+ if (type != 0 || mem->type != 0) {
+ if ((type == AGP_DCACHE_MEMORY) &&
+ (mem->type == AGP_DCACHE_MEMORY)) {
+ /* special insert */
+
+ for (i = pg_start;
+ i < (pg_start + mem->page_count); i++) {
+ OUTREG32(intel_i810_private.registers,
+ I810_PTE_BASE + (i * 4),
+ (i * 4096) | I810_PTE_LOCAL |
+ I810_PTE_VALID);
+ }
+
+ agp_bridge.tlb_flush(mem);
+ return 0;
+ }
+ return -EINVAL;
+ }
+ if (mem->is_flushed == FALSE) {
+ CACHE_FLUSH();
+ mem->is_flushed = TRUE;
+ }
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ OUTREG32(intel_i810_private.registers,
+ I810_PTE_BASE + (j * 4), mem->memory[i]);
+ }
+
+ agp_bridge.tlb_flush(mem);
+ return 0;
+}
+
+static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ OUTREG32(intel_i810_private.registers,
+ I810_PTE_BASE + (i * 4),
+ agp_bridge.scratch_page);
+ }
+
+ agp_bridge.tlb_flush(mem);
+ return 0;
+}
+
+static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
+{
+ agp_memory *new;
+
+ if (type == AGP_DCACHE_MEMORY) {
+ if (pg_count != intel_i810_private.num_dcache_entries) {
+ return NULL;
+ }
+ new = agp_create_memory(1);
+
+ if (new == NULL) {
+ return NULL;
+ }
+ new->type = AGP_DCACHE_MEMORY;
+ new->page_count = pg_count;
+ new->num_scratch_pages = 0;
+ vfree(new->memory);
+ return new;
+ }
+ return NULL;
+}
+
+static void intel_i810_free_by_type(agp_memory * curr)
+{
+ agp_free_key(curr->key);
+ kfree(curr);
+}
+
+static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
+{
+ /* Type checking must be done elsewhere */
+ return addr | agp_bridge.masks[type].mask;
+}
+
+static void intel_i810_setup(struct pci_dev *i810_dev)
+{
+ intel_i810_private.i810_dev = i810_dev;
+
+ agp_bridge.masks = intel_i810_masks;
+ agp_bridge.num_of_masks = 2;
+ agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
+ agp_bridge.size_type = FIXED_APER_SIZE;
+ agp_bridge.num_aperture_sizes = 2;
+ agp_bridge.dev_private_data = (void *) &intel_i810_private;
+ agp_bridge.needs_scratch_page = TRUE;
+ agp_bridge.configure = intel_i810_configure;
+ agp_bridge.fetch_size = intel_i810_fetch_size;
+ agp_bridge.cleanup = intel_i810_cleanup;
+ agp_bridge.tlb_flush = intel_i810_tlbflush;
+ agp_bridge.mask_memory = intel_i810_mask_memory;
+ agp_bridge.agp_enable = intel_i810_agp_enable;
+ agp_bridge.cache_flush = global_cache_flush;
+ agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+ agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+ agp_bridge.insert_memory = intel_i810_insert_entries;
+ agp_bridge.remove_memory = intel_i810_remove_entries;
+ agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
+ agp_bridge.free_by_type = intel_i810_free_by_type;
+}
+
+#endif
+
+#ifdef CONFIG_AGP_INTEL
+
+static int intel_fetch_size(void)
+{
+ int i;
+ u16 temp;
+ aper_size_info_16 *values;
+
+ pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
+ values = A_SIZE_16(agp_bridge.aperture_sizes);
+
+ for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge.previous_size =
+ agp_bridge.current_size = (void *) (values + i);
+ agp_bridge.aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void intel_tlbflush(agp_memory * mem)
+{
+ pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
+ pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
+}
+
+static void intel_cleanup(void)
+{
+ u16 temp;
+ aper_size_info_16 *previous_size;
+
+ previous_size = A_SIZE_16(agp_bridge.previous_size);
+ pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
+ previous_size->size_value);
+}
+
+static int intel_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ aper_size_info_16 *current_size;
+
+ current_size = A_SIZE_16(agp_bridge.current_size);
+
+ /* aperture size */
+ pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
+ current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+ agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+ agp_bridge.gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
+
+ /* paccfg/nbxcfg */
+ pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
+ (temp2 & ~(1 << 10)) | (1 << 9));
+ /* clear any possible error conditions */
+ pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
+ return 0;
+}
+
+static unsigned long intel_mask_memory(unsigned long addr, int type)
+{
+ /* Memory type is ignored */
+
+ return addr | agp_bridge.masks[0].mask;
+}
+
+
+/* Setup function */
+static gatt_mask intel_generic_masks[] =
+{
+ {0x00000017, 0}
+};
+
+static aper_size_info_16 intel_generic_sizes[7] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56},
+ {16, 4096, 2, 60},
+ {8, 2048, 1, 62},
+ {4, 1024, 0, 63}
+};
+
+static void intel_generic_setup(void)
+{
+ agp_bridge.masks = intel_generic_masks;
+ agp_bridge.num_of_masks = 1;
+ agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
+ agp_bridge.size_type = U16_APER_SIZE;
+ agp_bridge.num_aperture_sizes = 7;
+ agp_bridge.dev_private_data = NULL;
+ agp_bridge.needs_scratch_page = FALSE;
+ agp_bridge.configure = intel_configure;
+ agp_bridge.fetch_size = intel_fetch_size;
+ agp_bridge.cleanup = intel_cleanup;
+ agp_bridge.tlb_flush = intel_tlbflush;
+ agp_bridge.mask_memory = intel_mask_memory;
+ agp_bridge.agp_enable = agp_generic_agp_enable;
+ agp_bridge.cache_flush = global_cache_flush;
+ agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+ agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+ agp_bridge.insert_memory = agp_generic_insert_memory;
+ agp_bridge.remove_memory = agp_generic_remove_memory;
+ agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+ agp_bridge.free_by_type = agp_generic_free_by_type;
+}
+
+#endif
+
+#ifdef CONFIG_AGP_VIA
+
+static int via_fetch_size(void)
+{
+ int i;
+ u8 temp;
+ aper_size_info_8 *values;
+
+ values = A_SIZE_8(agp_bridge.aperture_sizes);
+ pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp);
+ for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge.previous_size =
+ agp_bridge.current_size = (void *) (values + i);
+ agp_bridge.aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static int via_configure(void)
+{
+ u32 temp;
+ aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge.current_size);
+ /* aperture size */
+ pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
+ current_size->size_value);
+ /* address to map too */
+ pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp);
+ agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* GART control register */
+ pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
+
+ /* attbase - aperture GATT base */
+ pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE,
+ (agp_bridge.gatt_bus_addr & 0xfffff000) | 3);
+ return 0;
+}
+
+static void via_cleanup(void)
+{
+ aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge.previous_size);
+ pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0);
+ pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
+ previous_size->size_value);
+}
+
+static void via_tlbflush(agp_memory * mem)
+{
+ pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f);
+ pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
+}
+
+static unsigned long via_mask_memory(unsigned long addr, int type)
+{
+ /* Memory type is ignored */
+
+ return addr | agp_bridge.masks[0].mask;
+}
+
+static aper_size_info_8 via_generic_sizes[7] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 128},
+ {64, 16384, 4, 192},
+ {32, 8192, 3, 224},
+ {16, 4096, 2, 240},
+ {8, 2048, 1, 248},
+ {4, 1024, 0, 252}
+};
+
+static gatt_mask via_generic_masks[] =
+{
+ {0x00000000, 0}
+};
+
+static void via_generic_setup(void)
+{
+ agp_bridge.masks = via_generic_masks;
+ agp_bridge.num_of_masks = 1;
+ agp_bridge.aperture_sizes = (void *) via_generic_sizes;
+ agp_bridge.size_type = U8_APER_SIZE;
+ agp_bridge.num_aperture_sizes = 7;
+ agp_bridge.dev_private_data = NULL;
+ agp_bridge.needs_scratch_page = FALSE;
+ agp_bridge.configure = via_configure;
+ agp_bridge.fetch_size = via_fetch_size;
+ agp_bridge.cleanup = via_cleanup;
+ agp_bridge.tlb_flush = via_tlbflush;
+ agp_bridge.mask_memory = via_mask_memory;
+ agp_bridge.agp_enable = agp_generic_agp_enable;
+ agp_bridge.cache_flush = global_cache_flush;
+ agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+ agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+ agp_bridge.insert_memory = agp_generic_insert_memory;
+ agp_bridge.remove_memory = agp_generic_remove_memory;
+ agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+ agp_bridge.free_by_type = agp_generic_free_by_type;
+}
+
+#endif
+
+#ifdef CONFIG_AGP_SIS
+
+static int sis_fetch_size(void)
+{
+ u8 temp_size;
+ int i;
+ aper_size_info_8 *values;
+
+ pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size);
+ values = A_SIZE_8(agp_bridge.aperture_sizes);
+ for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+ if ((temp_size == values[i].size_value) ||
+ ((temp_size & ~(0x03)) ==
+ (values[i].size_value & ~(0x03)))) {
+ agp_bridge.previous_size =
+ agp_bridge.current_size = (void *) (values + i);
+
+ agp_bridge.aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+
+static void sis_tlbflush(agp_memory * mem)
+{
+ pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02);
+}
+
+static int sis_configure(void)
+{
+ u32 temp;
+ aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge.current_size);
+ pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05);
+ pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp);
+ agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE,
+ agp_bridge.gatt_bus_addr);
+ pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
+ current_size->size_value);
+ return 0;
+}
+
+static void sis_cleanup(void)
+{
+ aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge.previous_size);
+ pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
+ (previous_size->size_value & ~(0x03)));
+}
+
+static unsigned long sis_mask_memory(unsigned long addr, int type)
+{
+ /* Memory type is ignored */
+
+ return addr | agp_bridge.masks[0].mask;
+}
+
+static aper_size_info_8 sis_generic_sizes[7] =
+{
+ {256, 65536, 6, 99},
+ {128, 32768, 5, 83},
+ {64, 16384, 4, 67},
+ {32, 8192, 3, 51},
+ {16, 4096, 2, 35},
+ {8, 2048, 1, 19},
+ {4, 1024, 0, 3}
+};
+
+static gatt_mask sis_generic_masks[] =
+{
+ {0x00000000, 0}
+};
+
+static void sis_generic_setup(void)
+{
+ agp_bridge.masks = sis_generic_masks;
+ agp_bridge.num_of_masks = 1;
+ agp_bridge.aperture_sizes = (void *) sis_generic_sizes;
+ agp_bridge.size_type = U8_APER_SIZE;
+ agp_bridge.num_aperture_sizes = 7;
+ agp_bridge.dev_private_data = NULL;
+ agp_bridge.needs_scratch_page = FALSE;
+ agp_bridge.configure = sis_configure;
+ agp_bridge.fetch_size = sis_fetch_size;
+ agp_bridge.cleanup = sis_cleanup;
+ agp_bridge.tlb_flush = sis_tlbflush;
+ agp_bridge.mask_memory = sis_mask_memory;
+ agp_bridge.agp_enable = agp_generic_agp_enable;
+ agp_bridge.cache_flush = global_cache_flush;
+ agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+ agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+ agp_bridge.insert_memory = agp_generic_insert_memory;
+ agp_bridge.remove_memory = agp_generic_remove_memory;
+ agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+ agp_bridge.free_by_type = agp_generic_free_by_type;
+}
+
+#endif
+
+#ifdef CONFIG_AGP_AMD
+
+static struct _amd_irongate_private {
+ volatile u8 *registers;
+} amd_irongate_private;
+
+static int amd_irongate_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ aper_size_info_32 *values;
+
+ pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
+ temp = (temp & 0x0000000e);
+ values = A_SIZE_32(agp_bridge.aperture_sizes);
+ for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge.previous_size =
+ agp_bridge.current_size = (void *) (values + i);
+
+ agp_bridge.aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static int amd_irongate_configure(void)
+{
+ aper_size_info_32 *current_size;
+ u32 temp;
+ u16 enable_reg;
+
+ current_size = A_SIZE_32(agp_bridge.current_size);
+
+ /* Get the memory mapped registers */
+ pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp);
+ temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096);
+
+ /* Write out the address of the gatt table */
+ OUTREG32(amd_irongate_private.registers, AMD_ATTBASE,
+ agp_bridge.gatt_bus_addr);
+
+ /* Write the Sync register */
+ pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80);
+
+ /* Write the enable register */
+ enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
+ enable_reg = (enable_reg | 0x0004);
+ OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
+
+ /* Write out the size register */
+ pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
+ temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+ | 0x00000001);
+ pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
+
+ /* Flush the tlb */
+ OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
+
+ /* Get the address for the gart region */
+ pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
+ temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge.gart_bus_addr = temp;
+ return 0;
+}
+
+static void amd_irongate_cleanup(void)
+{
+ aper_size_info_32 *previous_size;
+ u32 temp;
+ u16 enable_reg;
+
+ previous_size = A_SIZE_32(agp_bridge.previous_size);
+
+ enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
+ enable_reg = (enable_reg & ~(0x0004));
+ OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
+
+ /* Write back the previous size and disable gart translation */
+ pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
+ temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+ pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
+ iounmap((void *) amd_irongate_private.registers);
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually. However
+ * currently it just flushes the whole table. Which is probably
+ * more efficent, since agp_memory blocks can be a large number of
+ * entries.
+ */
+
+static void amd_irongate_tlbflush(agp_memory * temp)
+{
+ OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
+}
+
+static unsigned long amd_irongate_mask_memory(unsigned long addr, int type)
+{
+ /* Only type 0 is supported by the irongate */
+
+ return addr | agp_bridge.masks[0].mask;
+}
+
+static aper_size_info_32 amd_irongate_sizes[7] =
+{
+ {2048, 524288, 9, 0x0000000c},
+ {1024, 262144, 8, 0x0000000a},
+ {512, 131072, 7, 0x00000008},
+ {256, 65536, 6, 0x00000006},
+ {128, 32768, 5, 0x00000004},
+ {64, 16384, 4, 0x00000002},
+ {32, 8192, 3, 0x00000000}
+};
+
+static gatt_mask amd_irongate_masks[] =
+{
+ {0x00000001, 0}
+};
+
+static void amd_irongate_setup(void)
+{
+ agp_bridge.masks = amd_irongate_masks;
+ agp_bridge.num_of_masks = 1;
+ agp_bridge.aperture_sizes = (void *) amd_irongate_sizes;
+ agp_bridge.size_type = U32_APER_SIZE;
+ agp_bridge.num_aperture_sizes = 7;
+ agp_bridge.dev_private_data = (void *) &amd_irongate_private;
+ agp_bridge.needs_scratch_page = FALSE;
+ agp_bridge.configure = amd_irongate_configure;
+ agp_bridge.fetch_size = amd_irongate_fetch_size;
+ agp_bridge.cleanup = amd_irongate_cleanup;
+ agp_bridge.tlb_flush = amd_irongate_tlbflush;
+ agp_bridge.mask_memory = amd_irongate_mask_memory;
+ agp_bridge.agp_enable = agp_generic_agp_enable;
+ agp_bridge.cache_flush = global_cache_flush;
+ agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+ agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+ agp_bridge.insert_memory = agp_generic_insert_memory;
+ agp_bridge.remove_memory = agp_generic_remove_memory;
+ agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+ agp_bridge.free_by_type = agp_generic_free_by_type;
+}
+
+#endif
+
+#ifdef CONFIG_AGP_ALI
+
+static int ali_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ aper_size_info_32 *values;
+
+ pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
+ temp &= ~(0xfffffff0);
+ values = A_SIZE_32(agp_bridge.aperture_sizes);
+
+ for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge.previous_size =
+ agp_bridge.current_size = (void *) (values + i);
+ agp_bridge.aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void ali_tlbflush(agp_memory * mem)
+{
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+ pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
+ ((temp & 0xffffff00) | 0x00000090));
+ pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
+ ((temp & 0xffffff00) | 0x00000010));
+}
+
+static void ali_cleanup(void)
+{
+ aper_size_info_32 *previous_size;
+ u32 temp;
+
+ previous_size = A_SIZE_32(agp_bridge.previous_size);
+
+ pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+ pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
+ ((temp & 0xffffff00) | 0x00000090));
+ pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
+ previous_size->size_value);
+}
+
+static int ali_configure(void)
+{
+ u32 temp;
+ aper_size_info_32 *current_size;
+
+ current_size = A_SIZE_32(agp_bridge.current_size);
+
+ /* aperture size and gatt addr */
+ pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
+ agp_bridge.gatt_bus_addr | current_size->size_value);
+
+ /* tlb control */
+ pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+ pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
+ ((temp & 0xffffff00) | 0x00000010));
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
+ agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ return 0;
+}
+
+static unsigned long ali_mask_memory(unsigned long addr, int type)
+{
+ /* Memory type is ignored */
+
+ return addr | agp_bridge.masks[0].mask;
+}
+
+
+/* Setup function */
+static gatt_mask ali_generic_masks[] =
+{
+ {0x00000000, 0}
+};
+
+static aper_size_info_32 ali_generic_sizes[7] =
+{
+ {256, 65536, 6, 10},
+ {128, 32768, 5, 9},
+ {64, 16384, 4, 8},
+ {32, 8192, 3, 7},
+ {16, 4096, 2, 6},
+ {8, 2048, 1, 4},
+ {4, 1024, 0, 3}
+};
+
+static void ali_generic_setup(void)
+{
+ agp_bridge.masks = ali_generic_masks;
+ agp_bridge.num_of_masks = 1;
+ agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
+ agp_bridge.size_type = U32_APER_SIZE;
+ agp_bridge.num_aperture_sizes = 7;
+ agp_bridge.dev_private_data = NULL;
+ agp_bridge.needs_scratch_page = FALSE;
+ agp_bridge.configure = ali_configure;
+ agp_bridge.fetch_size = ali_fetch_size;
+ agp_bridge.cleanup = ali_cleanup;
+ agp_bridge.tlb_flush = ali_tlbflush;
+ agp_bridge.mask_memory = ali_mask_memory;
+ agp_bridge.agp_enable = agp_generic_agp_enable;
+ agp_bridge.cache_flush = global_cache_flush;
+ agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+ agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+ agp_bridge.insert_memory = agp_generic_insert_memory;
+ agp_bridge.remove_memory = agp_generic_remove_memory;
+ agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+ agp_bridge.free_by_type = agp_generic_free_by_type;
+}
+
+#endif
+
+
+
+/* Supported Device Scanning routine */
+
+static void agp_find_supported_device(void)
+{
+ struct pci_dev *dev = NULL;
+ u8 cap_ptr = 0x00;
+ u32 cap_id, scratch;
+
+ if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) {
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+ agp_bridge.dev = dev;
+
+ /* Need to test for I810 here */
+#ifdef CONFIG_AGP_I810
+ if (dev->vendor == PCI_VENDOR_ID_INTEL) {
+ struct pci_dev *i810_dev;
+
+ switch (dev->device) {
+ case PCI_DEVICE_ID_INTEL_810_0:
+ i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_810_1,
+ NULL);
+ if (i810_dev == NULL) {
+ printk("agpgart: Detected an Intel i810,"
+ " but could not find the secondary"
+ " device.\n");
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+ printk(KERN_INFO "agpgart: Detected an Intel "
+ "i810 Chipset.\n");
+ agp_bridge.type = INTEL_I810;
+ agp_bridge.intel_i810_setup(i810_dev);
+ return;
+
+ case PCI_DEVICE_ID_INTEL_810_DC100_0:
+ i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_810_DC100_1,
+ NULL);
+ if (i810_dev == NULL) {
+ printk("agpgart: Detected an Intel i810 "
+ "DC100, but could not find the "
+ "secondary device.\n");
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+ printk(KERN_INFO "agpgart: Detected an Intel i810 "
+ "DC100 Chipset.\n");
+ agp_bridge.type = INTEL_I810;
+ agp_bridge.intel_i810_setup(i810_dev);
+ return;
+
+ case PCI_DEVICE_ID_INTEL_810_E_0:
+ i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_810_E_1,
+ NULL);
+ if (i810_dev == NULL) {
+ printk("agpgart: Detected an Intel i810 E"
+ ", but could not find the secondary "
+ "device.\n");
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+ printk(KERN_INFO "agpgart: Detected an Intel i810 E "
+ "Chipset.\n");
+ agp_bridge.type = INTEL_I810;
+ agp_bridge.intel_i810_setup(i810_dev);
+ return;
+ default:
+ break;
+ }
+ }
+#endif
+ /* find capndx */
+ pci_read_config_dword(dev, 0x04, &scratch);
+
+ if (!(scratch & 0x00100000)) {
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+ pci_read_config_byte(dev, 0x34, &cap_ptr);
+
+ if (cap_ptr != 0x00) {
+ do {
+ pci_read_config_dword(dev, cap_ptr, &cap_id);
+
+ if ((cap_id & 0xff) != 0x02)
+ cap_ptr = (cap_id >> 8) & 0xff;
+ }
+ while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+ }
+ if (cap_ptr == 0x00) {
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+ agp_bridge.capndx = cap_ptr;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(agp_bridge.dev,
+ agp_bridge.capndx + 4,
+ &agp_bridge.mode);
+
+ switch (dev->vendor) {
+#ifdef CONFIG_AGP_INTEL
+ case PCI_VENDOR_ID_INTEL:
+ switch (dev->device) {
+ case PCI_DEVICE_ID_INTEL_82443LX_0:
+ agp_bridge.type = INTEL_LX;
+ printk(KERN_INFO "agpgart: Detected an Intel 440LX"
+ " Chipset.\n");
+ agp_bridge.intel_generic_setup();
+ return;
+
+ case PCI_DEVICE_ID_INTEL_82443BX_0:
+ agp_bridge.type = INTEL_BX;
+ printk(KERN_INFO "agpgart: Detected an Intel 440BX "
+ "Chipset.\n");
+ agp_bridge.intel_generic_setup();
+ return;
+
+ case PCI_DEVICE_ID_INTEL_82443GX_0:
+ agp_bridge.type = INTEL_GX;
+ printk(KERN_INFO "agpgart: Detected an Intel 440GX "
+ "Chipset.\n");
+ agp_bridge.intel_generic_setup();
+ return;
+
+ default:
+ if (agp_try_unsupported != 0) {
+ printk("agpgart: Trying generic intel "
+ "routines for device id: %x\n",
+ dev->device);
+ agp_bridge.type = INTEL_GENERIC;
+ agp_bridge.intel_generic_setup();
+ return;
+ } else {
+ printk("agpgart: Unsupported intel chipset,"
+ " you might want to try "
+ "agp_try_unsupported=1.\n");
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+ }
+ break;
+#endif
+
+#ifdef CONFIG_AGP_VIA
+ case PCI_VENDOR_ID_VIA:
+ switch (dev->device) {
+ case PCI_DEVICE_ID_VIA_82C597_0:
+ agp_bridge.type = VIA_VP3;
+ printk(KERN_INFO "agpgart: Detected a VIA VP3 "
+ "Chipset.\n");
+ agp_bridge.via_generic_setup();
+ return;
+
+ case PCI_DEVICE_ID_VIA_82C598_0:
+ agp_bridge.type = VIA_MVP3;
+ printk(KERN_INFO "agpgart: Detected a VIA MVP3 "
+ "Chipset.\n");
+ agp_bridge.via_generic_setup();
+ return;
+
+ case PCI_DEVICE_ID_VIA_82C691_0:
+ agp_bridge.type = VIA_APOLLO_PRO;
+ printk(KERN_INFO "agpgart: Detected a VIA Apollo "
+ "Pro Chipset.\n");
+ agp_bridge.via_generic_setup();
+ return;
+
+ default:
+ if (agp_try_unsupported != 0) {
+ printk("agpgart: Trying generic VIA routines"
+ " for device id: %x\n", dev->device);
+ agp_bridge.type = VIA_GENERIC;
+ agp_bridge.via_generic_setup();
+ return;
+ } else {
+ printk("agpgart: Unsupported VIA chipset,"
+ " you might want to try "
+ "agp_try_unsupported=1.\n");
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+ }
+ break;
+#endif
+
+#ifdef CONFIG_AGP_SIS
+ case PCI_VENDOR_ID_SI:
+ switch (dev->device) {
+ /* ToDo need to find out the
+ * specific devices supported.
+ */
+ default:
+ if (agp_try_unsupported != 0) {
+ printk("agpgart: Trying generic SiS routines"
+ " for device id: %x\n", dev->device);
+ agp_bridge.type = SIS_GENERIC;
+ agp_bridge.sis_generic_setup();
+ return;
+ } else {
+ printk("agpgart: Unsupported SiS chipset, "
+ "you might want to try "
+ "agp_try_unsupported=1.\n");
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+ }
+ break;
+#endif
+
+#ifdef CONFIG_AGP_AMD
+ case PCI_VENDOR_ID_AMD:
+ switch (dev->device) {
+ case PCI_DEVICE_ID_AMD_IRONGATE_0:
+ agp_bridge.type = AMD_IRONGATE;
+ printk(KERN_INFO "agpgart: Detected an AMD Irongate"
+ " Chipset.\n");
+ agp_bridge.amd_irongate_setup();
+ return;
+
+ default:
+ if (agp_try_unsupported != 0) {
+ printk("agpgart: Trying Amd irongate"
+ " routines for device id: %x\n",
+ dev->device);
+ agp_bridge.type = AMD_GENERIC;
+ agp_bridge.amd_irongate_setup();
+ return;
+ } else {
+ printk("agpgart: Unsupported Amd chipset,"
+ " you might want to try "
+ "agp_try_unsupported=1.\n");
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+ }
+ break;
+#endif
+
+#ifdef CONFIG_AGP_ALI
+ case PCI_VENDOR_ID_AL:
+ switch (dev->device) {
+ case PCI_DEVICE_ID_AL_M1541_0:
+ agp_bridge.type = ALI_M1541;
+ printk(KERN_INFO "agpgart: Detected an ALi M1541"
+ " Chipset\n");
+ agp_bridge.ali_generic_setup();
+ return;
+ default:
+ if (agp_try_unsupported != 0) {
+ printk("agpgart: Trying ALi generic routines"
+ " for device id: %x\n", dev->device);
+ agp_bridge.type = ALI_GENERIC;
+ agp_bridge.ali_generic_setup();
+ return;
+ } else {
+ printk("agpgart: Unsupported ALi chipset,"
+ " you might want to type "
+ "agp_try_unsupported=1.\n");
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+ }
+ break;
+#endif
+ default:
+ agp_bridge.type = NOT_SUPPORTED;
+ return;
+ }
+}
+
+struct agp_max_table {
+ int mem;
+ int agp;
+};
+
+static struct agp_max_table maxes_table[9] =
+{
+ {0, 0},
+ {32, 4},
+ {64, 28},
+ {128, 96},
+ {256, 204},
+ {512, 440},
+ {1024, 942},
+ {2048, 1920},
+ {4096, 3932}
+};
+
+static int agp_find_max(void)
+{
+ int memory;
+ float t;
+ int index;
+ int result;
+
+ memory = virt_to_phys(high_memory) / 0x100000;
+ index = 0;
+
+ while ((memory > maxes_table[index].mem) &&
+ (index < 8)) {
+ index++;
+ }
+
+ t = (memory - maxes_table[index - 1].mem) /
+ (maxes_table[index].mem - maxes_table[index - 1].mem);
+
+ result = maxes_table[index - 1].agp +
+ (t * (maxes_table[index].agp - maxes_table[index - 1].agp));
+
+ printk(KERN_INFO "agpgart: Maximum main memory to use "
+ "for agp memory: %dM\n", result);
+ result = (result * 0x100000) / 4096;
+ return result;
+}
+
+#define AGPGART_VERSION_MAJOR 0
+#define AGPGART_VERSION_MINOR 99
+
+static agp_version agp_current_version =
+{
+ AGPGART_VERSION_MAJOR,
+ AGPGART_VERSION_MINOR
+};
+
+static int agp_backend_initialize(void)
+{
+ int size_value;
+
+ memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
+ agp_bridge.type = NOT_SUPPORTED;
+#ifdef CONFIG_AGP_INTEL
+ agp_bridge.intel_generic_setup = intel_generic_setup;
+#endif
+#ifdef CONFIG_AGP_I810
+ agp_bridge.intel_i810_setup = intel_i810_setup;
+#endif
+#ifdef CONFIG_AGP_VIA
+ agp_bridge.via_generic_setup = via_generic_setup;
+#endif
+#ifdef CONFIG_AGP_SIS
+ agp_bridge.sis_generic_setup = sis_generic_setup;
+#endif
+#ifdef CONFIG_AGP_AMD
+ agp_bridge.amd_irongate_setup = amd_irongate_setup;
+#endif
+#ifdef CONFIG_AGP_ALI
+ agp_bridge.ali_generic_setup = ali_generic_setup;
+#endif
+ agp_bridge.max_memory_agp = agp_find_max();
+ agp_bridge.version = &agp_current_version;
+ agp_find_supported_device();
+
+ if (agp_bridge.needs_scratch_page == TRUE) {
+ agp_bridge.scratch_page = agp_alloc_page();
+
+ if (agp_bridge.scratch_page == 0) {
+ printk("agpgart: unable to get memory for "
+ "scratch page.\n");
+ return -ENOMEM;
+ }
+ agp_bridge.scratch_page =
+ virt_to_phys((void *) agp_bridge.scratch_page);
+ agp_bridge.scratch_page =
+ agp_bridge.mask_memory(agp_bridge.scratch_page, 0);
+ }
+ if (agp_bridge.type == NOT_SUPPORTED) {
+ printk("agpgart: no supported devices found.\n");
+ return -EINVAL;
+ }
+ size_value = agp_bridge.fetch_size();
+
+ if (size_value == 0) {
+ printk("agpgart: unable to detrimine aperture size.\n");
+ return -EINVAL;
+ }
+ if (agp_bridge.create_gatt_table()) {
+ printk("agpgart: unable to get memory for graphics "
+ "translation table.\n");
+ return -ENOMEM;
+ }
+ agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
+
+ if (agp_bridge.key_list == NULL) {
+ printk("agpgart: error allocating memory for key lists.\n");
+ agp_bridge.free_gatt_table();
+ return -ENOMEM;
+ }
+ memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
+
+ if (agp_bridge.configure()) {
+ printk("agpgart: error configuring host chipset.\n");
+ agp_bridge.free_gatt_table();
+ vfree(agp_bridge.key_list);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "agpgart: Physical address of the agp aperture:"
+ " 0x%lx\n", agp_bridge.gart_bus_addr);
+ printk(KERN_INFO "agpgart: Agp aperture is %dM in size.\n",
+ size_value);
+ return 0;
+}
+
+static void agp_backend_cleanup(void)
+{
+ agp_bridge.cleanup();
+ agp_bridge.free_gatt_table();
+ vfree(agp_bridge.key_list);
+
+ if (agp_bridge.needs_scratch_page == TRUE) {
+ agp_bridge.scratch_page &= ~(0x00000fff);
+ agp_destroy_page((unsigned long)
+ phys_to_virt(agp_bridge.scratch_page));
+ }
+}
+
+extern int agp_frontend_initialize(void);
+extern void agp_frontend_cleanup(void);
+
+static int __init agp_init(void)
+{
+ int ret_val;
+
+ printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
+ AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
+ ret_val = agp_backend_initialize();
+
+ if (ret_val != 0) {
+ return ret_val;
+ }
+ ret_val = agp_frontend_initialize();
+
+ if (ret_val != 0) {
+ agp_backend_cleanup();
+ return ret_val;
+ }
+ return 0;
+}
+
+static void __exit agp_cleanup(void)
+{
+ agp_frontend_cleanup();
+ agp_backend_cleanup();
+}
+
+module_init(agp_init);
+module_exit(agp_cleanup);
/*
* AGPGART module frontend version 0.99
* Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight
- * Copyright (C) 1999 Xi Graphics
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
*/
#define __NO_VERSION__
-#include <linux/config.h>
#include <linux/version.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/agpgart.h>
#include <asm/system.h>
#include <asm/uaccess.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/mman.h>
agp_segment *user_seg;
int i;
- seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count), GFP_KERNEL);
+ seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count),
+ GFP_KERNEL);
if (seg == NULL) {
kfree(region->seg_list);
return -ENOMEM;
priv = agp_find_private(temp->pid);
if (priv != NULL) {
- clear_bit(AGP_FF_IS_VALID, &(priv->access_flags));
- clear_bit(AGP_FF_IS_CLIENT, &(priv->access_flags));
+ clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
}
client = client->next;
kfree(temp);
priv = agp_find_private(clients->pid);
if (priv != NULL) {
- set_bit(AGP_FF_IS_VALID, &(priv->access_flags));
- set_bit(AGP_FF_IS_CLIENT, &(priv->access_flags));
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
}
clients = clients->next;
}
{
agp_client *clients;
- clear_bit(AGP_FF_IS_VALID, &(controller_priv->access_flags));
+ clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
clients = controller->clients;
while (clients != NULL) {
priv = agp_find_private(clients->pid);
if (priv != NULL) {
- clear_bit(AGP_FF_IS_VALID, &(priv->access_flags));
+ clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
}
clients = clients->next;
}
AGP_UNLOCK();
return -EPERM;
}
- if (!(test_bit(AGP_FF_IS_VALID, &(priv->access_flags)))) {
+ if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) {
AGP_UNLOCK();
return -EPERM;
}
current_size = current_size * 0x100000;
offset = vma->vm_pgoff << PAGE_SHIFT;
- if (test_bit(AGP_FF_IS_CLIENT, &(priv->access_flags))) {
+ if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
if ((size + offset) > current_size) {
AGP_UNLOCK();
return -EINVAL;
AGP_UNLOCK();
return -EPERM;
}
- if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot)) {
+ if (!agp_find_seg_in_client(client, offset,
+ size, vma->vm_page_prot)) {
AGP_UNLOCK();
return -EINVAL;
}
- if (remap_page_range(vma->vm_start, (kerninfo.aper_base + offset),
+ if (remap_page_range(vma->vm_start,
+ (kerninfo.aper_base + offset),
size, vma->vm_page_prot)) {
AGP_UNLOCK();
return -EAGAIN;
AGP_UNLOCK();
return 0;
}
- if (test_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags))) {
+ if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
if (size != current_size) {
AGP_UNLOCK();
return -EINVAL;
AGP_LOCK();
- if (test_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags))) {
+ if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
agp_controller *controller;
controller = agp_find_controller_by_pid(priv->my_pid);
if (controller != NULL) {
if (controller == agp_fe.current_controller) {
- agp_controller_release_current(controller, priv);
+ agp_controller_release_current(controller,
+ priv);
}
agp_remove_controller(controller);
}
}
- if (test_bit(AGP_FF_IS_CLIENT, &(priv->access_flags))) {
+ if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
agp_remove_client(priv->my_pid);
}
agp_remove_file_private(priv);
return -ENOMEM;
}
memset(priv, 0, sizeof(agp_file_private));
- set_bit(AGP_FF_ALLOW_CLIENT, &(priv->access_flags));
+ set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
priv->my_pid = current->pid;
if ((current->uid == 0) || (current->suid == 0)) {
/* Root priv, can be controller */
- set_bit(AGP_FF_ALLOW_CONTROLLER, &(priv->access_flags));
+ set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
}
client = agp_find_client_by_pid(current->pid);
if (client != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &(priv->access_flags));
- set_bit(AGP_FF_IS_VALID, &(priv->access_flags));
+ set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
}
file->private_data = (void *) priv;
agp_insert_file_private(priv);
userinfo.version.major = kerninfo.version.major;
userinfo.version.minor = kerninfo.version.minor;
- userinfo.bridge_id = kerninfo.device->vendor | (kerninfo.device->device << 16);
+ userinfo.bridge_id = kerninfo.device->vendor |
+ (kerninfo.device->device << 16);
userinfo.agp_mode = kerninfo.mode;
userinfo.aper_base = kerninfo.aper_base;
userinfo.aper_size = kerninfo.aper_size;
static int agpioc_acquire_wrap(agp_file_private * priv, unsigned long arg)
{
agp_controller *controller;
- if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &(priv->access_flags)))) {
+ if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) {
return -EPERM;
}
if (agp_fe.current_controller != NULL) {
agp_controller_make_current(controller);
}
- set_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags));
- set_bit(AGP_FF_IS_VALID, &(priv->access_flags));
+ set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
return 0;
}
client_priv = agp_find_private(reserve.pid);
if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &(client_priv->access_flags));
- set_bit(AGP_FF_IS_VALID, &(client_priv->access_flags));
+ set_bit(AGP_FF_IS_CLIENT,
+ &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID,
+ &client_priv->access_flags);
}
if (client == NULL) {
/* client is already removed */
} else {
agp_segment *segment;
- segment = kmalloc((sizeof(agp_segment) * reserve.seg_count), GFP_KERNEL);
+ segment = kmalloc((sizeof(agp_segment) * reserve.seg_count),
+ GFP_KERNEL);
if (segment == NULL) {
return -ENOMEM;
}
- if (copy_from_user(segment, (void *) reserve.seg_list, GFP_KERNEL)) {
+ if (copy_from_user(segment, (void *) reserve.seg_list,
+ GFP_KERNEL)) {
kfree(segment);
return -EFAULT;
}
client_priv = agp_find_private(reserve.pid);
if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &(client_priv->access_flags));
- set_bit(AGP_FF_IS_VALID, &(client_priv->access_flags));
+ set_bit(AGP_FF_IS_CLIENT,
+ &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID,
+ &client_priv->access_flags);
}
return agp_create_segment(client, &reserve);
} else {
return -EBUSY;
}
if (cmd != AGPIOC_ACQUIRE) {
- if (!(test_bit(AGP_FF_IS_CONTROLLER, &(curr_priv->access_flags)))) {
+ if (!(test_bit(AGP_FF_IS_CONTROLLER,
+ &curr_priv->access_flags))) {
return -EPERM;
}
- /* Use the original pid of the controller, in case it's threaded */
+ /* Use the original pid of the controller,
+ * in case it's threaded */
if (agp_fe.current_controller->pid != curr_priv->my_pid) {
return -EBUSY;
#endif
/* Generic cmpxchg added in 2.3.x */
-#if CPU != 386
#ifndef __HAVE_ARCH_CMPXCHG
/* Include this here so that driver can be
used with older kernels. */
#define cmpxchg(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \
(unsigned long)(n),sizeof(*(ptr))))
-#endif
-#else
- /* Compiling for a 386 proper... */
-#error DRI not supported on Intel 80386
#endif
/* Macros to make printk easier */
/* Misc. support (init.c) */
extern int drm_flags;
extern void drm_parse_options(char *s);
+extern int drm_cpu_valid(void);
/* Device support (fops.c) */
drm_file_t *priv;
if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */
+ if (!drm_cpu_valid()) return -EINVAL;
DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
}
}
+/* drm_cpu_valid returns non-zero if the DRI will run on this CPU, and 0
+ * otherwise. */
+
+int drm_cpu_valid(void)
+{
+#if defined(__i386__)
+ if (boot_cpu_data.x86 == 3) return 0; /* No cmpxchg on a 386 */
+#endif
+ return 1;
+}
/*
* linux/drivers/char/synclink.c
*
- * ==FILEDATE 19990901==
+ * ==FILEDATE 19991207==
*
* Device driver for Microgate SyncLink ISA and PCI
* high speed multiprotocol serial adapters.
#endif
static char *driver_name = "SyncLink serial driver";
-static char *driver_version = "1.14";
+static char *driver_version = "1.15";
static struct tty_driver serial_driver, callout_driver;
static int serial_refcount;
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_reset(info);
- spin_unlock_irqrestore(&info->irq_spinlock,flags);
/* Verify the reset state of some registers. */
}
}
- spin_lock_irqsave(&info->irq_spinlock,flags);
usc_reset(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_reset(info);
- spin_unlock_irqrestore(&info->irq_spinlock,flags);
/*
* Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
EndTime=100;
while( EndTime-- && !info->irq_occurred ) {
set_current_state(TASK_INTERRUPTIBLE);
}
}
+ spin_lock_irqsave(&info->irq_spinlock,flags);
usc_reset( info );
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
/* restore current port options */
memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
static int i2ob_install_device(struct i2o_controller *, struct i2o_device *, int);
static void i2ob_end_request(struct request *);
-static void i2ob_request(void);
+static void i2ob_request(request_queue_t * q);
/*
* Dump messages.
printk(KERN_INFO "\n");
}
-
/*
* Get a message
*/
{
struct i2o_controller *c = dev->controller;
int tid = dev->tid;
- u32 *msg;
- u32 *mptr;
+ unsigned long msg;
+ unsigned long mptr;
u64 offset;
struct request *req = ireq->req;
struct buffer_head *bh = req->bh;
/*
* Build the message based on the request.
*/
- __raw_writel(i2ob_context|(unit<<8), &msg[2]);
- __raw_writel(ireq->num, &msg[3]);
- __raw_writel(req->nr_sectors << 9, &msg[5]);
+ __raw_writel(i2ob_context|(unit<<8), msg+8);
+ __raw_writel(ireq->num, msg+12);
+ __raw_writel(req->nr_sectors << 9, msg+20);
/* This can be optimised later - just want to be sure its right for
starters */
offset = ((u64)(req->sector+base)) << 9;
- __raw_writel( offset & 0xFFFFFFFF, &msg[6]);
- __raw_writel(offset>>32, &msg[7]);
+ __raw_writel( offset & 0xFFFFFFFF, msg+24);
+ __raw_writel(offset>>32, msg+28);
mptr=msg+8;
if(req->cmd == READ)
{
- __raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, &msg[1]);
+ __raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, msg+4);
/* We don't yet do cache/readahead and other magic */
- __raw_writel(1<<16, &msg[4]);
+ __raw_writel(1<<16, msg+16);
while(bh!=NULL)
{
/*
* sucky to read.
*/
if(bh->b_reqnext)
- __raw_writel(0x10000000|(bh->b_size), mptr++);
+ __raw_writel(0x10000000|(bh->b_size), mptr);
else
- __raw_writel(0xD0000000|(bh->b_size), mptr++);
+ __raw_writel(0xD0000000|(bh->b_size), mptr);
- __raw_writel(virt_to_bus(bh->b_data), mptr++);
+ __raw_writel(virt_to_bus(bh->b_data), mptr+4);
+ mptr+=8;
count -= bh->b_size;
bh = bh->b_reqnext;
}
}
else if(req->cmd == WRITE)
{
- __raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, &msg[1]);
- __raw_writel(1<<16, &msg[4]);
+ __raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, msg+4);
+ __raw_writel(1<<16, msg+16);
while(bh!=NULL)
{
if(bh->b_reqnext)
- __raw_writel(0x14000000|(bh->b_size), mptr++);
+ __raw_writel(0x14000000|(bh->b_size), mptr);
else
- __raw_writel(0xD4000000|(bh->b_size), mptr++);
+ __raw_writel(0xD4000000|(bh->b_size), mptr);
count -= bh->b_size;
- __raw_writel(virt_to_bus(bh->b_data), mptr++);
+ __raw_writel(virt_to_bus(bh->b_data), mptr+4);
+ mptr+=8;
bh = bh->b_reqnext;
}
}
- __raw_writel(I2O_MESSAGE_SIZE(mptr-msg) | SGL_OFFSET_8, &msg[0]);
+ __raw_writel(I2O_MESSAGE_SIZE(mptr-msg) | SGL_OFFSET_8, msg);
if(req->current_nr_sectors > 8)
printk("Gathered sectors %ld.\n",
if(count != 0)
{
- printk("Request count botched by %d.\n", count);
- msg[5] -= count;
+ printk(KERN_ERR "Request count botched by %d.\n", count);
}
i2o_post_message(c,m);
*/
atomic_dec(&queue_depth);
- i2ob_request();
+ i2ob_request(NULL);
spin_unlock_irqrestore(&io_request_lock, flags);
}
/*
* Restart any requests.
*/
- i2ob_request();
+ i2ob_request(NULL);
/*
* Free the lock.
* we use it.
*/
-static void i2ob_request(void)
+static void i2ob_request(request_queue_t * q)
{
struct request *req;
struct i2ob_request *ireq;
}
}
-
/*
* SCSI-CAM for ioctl geometry mapping
* Duplicated with SCSI - this should be moved into somewhere common
blk_size[MAJOR_NR] = i2ob_sizes;
max_sectors[MAJOR_NR] = i2ob_max_sectors;
- blk_dev[MAJOR_NR].request_fn = i2ob_request;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), i2ob_request);
+ blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0);
+
for (i = 0; i < MAX_I2OB << 4; i++) {
i2ob_dev[i].refcnt = 0;
i2ob_dev[i].flags = 0;
* new PCI BIOS interface.
* Alan Cox <alan@redhat.com>: Fixed the out of memory
* handling.
+ *
+ * Torben Mathiasen <torben.mathiasen@compaq.com> New Maintainer!
*
********************************************************************/
priv = (TLanPrivateInfo *) dev->priv;
- dev->name = priv->devName;
- strcpy( priv->devName, " " );
-
dev = init_etherdev( dev, sizeof(TLanPrivateInfo) );
dev->base_addr = io_base;
}
priv->sa_int = dev->mem_start & 0x02;
priv->debug = dev->mem_end;
-
+ spin_lock_init(&priv->lock);
printk("TLAN %d.%d: %s irq=%2d io=%04x, %s, Rev. %d\n",
TLanVersionMajor,
*
** This file is best viewed/edited with tabstop=4, colums>=132
*
+ *
+ * Dec 10, 1999 Torben Mathiasen <torben.mathiasen@compaq.com>
+ * New Maintainer
+ *
********************************************************************/
{
short i, j;
for (i=0, j=0; i<24; i++, j+=stride)
- printk("%1x", ((int)readb(pcid + j)) & 0x0f);
+ printk("%1x", ((int)isa_readb(pcid + j)) & 0x0f);
printk("\n");
}
*/
if (ibmtr_probe1(dev, base_addr))
- {
-#ifndef MODULE
-#ifndef PCMCIA
- tr_freedev(dev);
-#endif
-#endif
return -ENODEV;
- } else
+ else
return 0;
}
else if (base_addr != 0) /* Don't probe at all. */
int ioaddr = ibmtr_portlist[i];
if (check_region(ioaddr, IBMTR_IO_EXTENT))
continue;
- if (ibmtr_probe1(dev, ioaddr)) {
-#ifndef MODULE
-#ifndef PCMCIA
- tr_freedev(dev);
-#endif
-#endif
- } else
+ if (!ibmtr_probe1(dev, ioaddr))
return 0;
}
* Suboptimize knowing first byte different
*/
- ctemp = readb(cd_chanid) & 0x0f;
+ ctemp = isa_readb(cd_chanid) & 0x0f;
if (ctemp != *tchanid) { /* NOT ISA card, try MCA */
tchanid=mcchannelid;
cardpresent=TR_MCA;
*/
for (i=2,j=1; i<=46; i=i+2,j++)
{
- if ((readb(cd_chanid+i) & 0x0f) != tchanid[j]) {
+ if ((isa_readb(cd_chanid+i) & 0x0f) != tchanid[j]) {
cardpresent=NOTOK; /* match failed, not TR card */
break;
}
* as it has different IRQ settings
*/
- if (cardpresent == TR_ISA && (readb(AIPFID + t_mmio)==0x0e))
+ if (cardpresent == TR_ISA && (isa_readb(AIPFID + t_mmio)==0x0e))
cardpresent=TR_ISAPNP;
if (cardpresent == NOTOK) { /* "channel_id" did not match, report */
if (intr==3)
irq=11;
timeout = jiffies + TR_SPIN_INTERVAL;
- while(!readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN))
+ while(!isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN))
if (time_after(jiffies, timeout)) {
DPRINTK("Hardware timeout during initialization.\n");
kfree_s(ti, sizeof(struct tok_info));
return -ENODEV;
}
- ti->sram=((__u32)readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)<<12);
+ ti->sram=((__u32)isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)<<12);
ti->global_int_enable=PIOaddr+ADAPTINTREL;
ti->adapter_int_enable=PIOaddr+ADAPTINTREL;
break;
for (i=0; i<0x18; i=i+2)
{
/* technical reference states to do this */
- temp = readb(ti->mmio + AIP + i) & 0x0f;
+ temp = isa_readb(ti->mmio + AIP + i) & 0x0f;
#if !TR_NEWFORMAT
printk("%1X",ti->hw_address[j]=temp);
#else
#endif
/* get Adapter type: 'F' = Adapter/A, 'E' = 16/4 Adapter II,...*/
- ti->adapter_type = readb(ti->mmio + AIPADAPTYPE);
+ ti->adapter_type = isa_readb(ti->mmio + AIPADAPTYPE);
/* get Data Rate: F=4Mb, E=16Mb, D=4Mb & 16Mb ?? */
- ti->data_rate = readb(ti->mmio + AIPDATARATE);
+ ti->data_rate = isa_readb(ti->mmio + AIPDATARATE);
/* Get Early Token Release support?: F=no, E=4Mb, D=16Mb, C=4&16Mb */
- ti->token_release = readb(ti->mmio + AIPEARLYTOKEN);
+ ti->token_release = isa_readb(ti->mmio + AIPEARLYTOKEN);
/* How much shared RAM is on adapter ? */
#ifdef PCMCIA
#endif
/* We need to set or do a bunch of work here based on previous results.. */
/* Support paging? What sizes?: F=no, E=16k, D=32k, C=16 & 32k */
- ti->shared_ram_paging = readb(ti->mmio + AIPSHRAMPAGE);
+ ti->shared_ram_paging = isa_readb(ti->mmio + AIPSHRAMPAGE);
/* Available DHB 4Mb size: F=2048, E=4096, D=4464 */
- switch (readb(ti->mmio + AIP4MBDHB)) {
+ switch (isa_readb(ti->mmio + AIP4MBDHB)) {
case 0xe :
ti->dhb_size4mb = 4096;
break;
}
/* Available DHB 16Mb size: F=2048, E=4096, D=8192, C=16384, B=17960 */
- switch (readb(ti->mmio + AIP16MBDHB)) {
+ switch (isa_readb(ti->mmio + AIP16MBDHB)) {
case 0xe :
ti->dhb_size16mb = 4096;
break;
/*
* determine how much of total RAM is mapped into PC space
*/
- ti->mapped_ram_size=1<<((((readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)) >>2) & 0x03) + 4);
+ ti->mapped_ram_size=1<<((((isa_readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)) >>2) & 0x03) + 4);
ti->page_mask=0;
if (ti->shared_ram_paging == 0xf) { /* No paging in adapter */
ti->mapped_ram_size = ti->avail_shared_ram;
static __u32 ram_bndry_mask[]={0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000};
__u32 new_base, rrr_32, chk_base, rbm;
- rrr_32 = ((readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD))>>2) & 0x00000003;
+ rrr_32 = ((isa_readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD))>>2) & 0x00000003;
rbm = ram_bndry_mask[rrr_32];
new_base = (ibmtr_mem_base + (~rbm)) & rbm; /* up to boundary */
chk_base = new_base + (ti->mapped_ram_size<<9);
'B' - 64KB less 512 bytes at top
(WARNING ... must zero top bytes in INIT */
- avail_sram_code=0xf-readb(adapt_info->mmio + AIPAVAILSHRAM);
+ avail_sram_code=0xf-isa_readb(adapt_info->mmio + AIPAVAILSHRAM);
if (avail_sram_code)
return size_code[avail_sram_code];
else /* for code 'F', must compute size from RRR(3,2) bits */
- return 1<<((readb(adapt_info->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)>>2)+4);
+ return 1<<((isa_readb(adapt_info->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)>>2)+4);
}
static int __init trdev_init(struct net_device *dev)
}
SET_PAGE(ti->srb);
for (i=0; i<sizeof(struct srb_set_funct_addr); i++)
- writeb(0, ti->srb+i);
+ isa_writeb(0, ti->srb+i);
- writeb(DIR_SET_FUNC_ADDR,
+ isa_writeb(DIR_SET_FUNC_ADDR,
ti->srb + offsetof(struct srb_set_funct_addr, command));
DPRINTK("Setting functional address: ");
for (i=0; i<4; i++)
{
- writeb(address[i],
+ isa_writeb(address[i],
ti->srb + offsetof(struct srb_set_funct_addr, funct_address)+i);
printk("%02X ", address[i]);
}
- writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
printk("\n");
}
struct tok_info *ti=(struct tok_info *)dev->priv;
/* init the spinlock */
- ti->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&ti->lock);
if (ti->open_status==CLOSED) tok_init_card(dev);
struct tok_info *ti=(struct tok_info *) dev->priv;
- writeb(DIR_CLOSE_ADAPTER,
+ isa_writeb(DIR_CLOSE_ADAPTER,
ti->srb + offsetof(struct srb_close_adapter, command));
- writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
ti->open_status=CLOSED;
sleep_on(&ti->wait_for_tok_int);
- if (readb(ti->srb + offsetof(struct srb_close_adapter, ret_code)))
+ if (isa_readb(ti->srb + offsetof(struct srb_close_adapter, ret_code)))
DPRINTK("close adapter failed: %02X\n",
- (int)readb(ti->srb + offsetof(struct srb_close_adapter, ret_code)));
+ (int)isa_readb(ti->srb + offsetof(struct srb_close_adapter, ret_code)));
dev->start = 0;
#ifdef PCMCIA
/* Disable interrupts till processing is finished */
dev->interrupt=1;
- writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
+ isa_writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
/* Reset interrupt for ISA boards */
if (ti->adapter_int_enable)
the extra levels of logic and call depth for the
original solution. */
- status=readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD);
+ status=isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD);
#ifdef PCMCIA
/* Check if the PCMCIA card was pulled. */
if (status == 0xFF)
}
/* Check ISRP EVEN too. */
- if ( readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) == 0xFF)
+ if ( isa_readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) == 0xFF)
{
DPRINTK("PCMCIA card removed.\n");
spin_unlock(&(ti->lock));
int i;
__u32 check_reason;
- check_reason=ti->mmio + ntohs(readw(ti->sram + ACA_OFFSET + ACA_RW +WWCR_EVEN));
+ check_reason=ti->mmio + ntohs(isa_readw(ti->sram + ACA_OFFSET + ACA_RW +WWCR_EVEN));
DPRINTK("Adapter check interrupt\n");
DPRINTK("8 reason bytes follow: ");
for(i=0; i<8; i++, check_reason++)
- printk("%02X ", (int)readb(check_reason));
+ printk("%02X ", (int)isa_readb(check_reason));
printk("\n");
- writeb((~ADAP_CHK_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
- writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+ isa_writeb((~ADAP_CHK_INT), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+ isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
dev->interrupt=0;
- } else if (readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)
+ } else if (isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)
& (TCR_INT | ERR_INT | ACCESS_INT)) {
DPRINTK("adapter error: ISRP_EVEN : %02x\n",
- (int)readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN));
- writeb(~(TCR_INT | ERR_INT | ACCESS_INT),
+ (int)isa_readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN));
+ isa_writeb(~(TCR_INT | ERR_INT | ACCESS_INT),
ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
- writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+ isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
dev->interrupt=0;
} else if (status
if (status & SRB_RESP_INT) { /* SRB response */
- switch(readb(ti->srb)) { /* SRB command check */
+ switch(isa_readb(ti->srb)) { /* SRB command check */
case XMIT_DIR_FRAME: {
unsigned char xmit_ret_code;
- xmit_ret_code=readb(ti->srb + offsetof(struct srb_xmit, ret_code));
+ xmit_ret_code=isa_readb(ti->srb + offsetof(struct srb_xmit, ret_code));
if (xmit_ret_code != 0xff) {
DPRINTK("error on xmit_dir_frame request: %02X\n",
xmit_ret_code);
case XMIT_UI_FRAME: {
unsigned char xmit_ret_code;
- xmit_ret_code=readb(ti->srb + offsetof(struct srb_xmit, ret_code));
+ xmit_ret_code=isa_readb(ti->srb + offsetof(struct srb_xmit, ret_code));
if (xmit_ret_code != 0xff) {
DPRINTK("error on xmit_ui_frame request: %02X\n",
xmit_ret_code);
unsigned char open_ret_code;
__u16 open_error_code;
- ti->srb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, srb_addr)));
- ti->ssb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, ssb_addr)));
- ti->arb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, arb_addr)));
- ti->asb=ti->sram+ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, asb_addr)));
+ ti->srb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, srb_addr)));
+ ti->ssb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, ssb_addr)));
+ ti->arb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, arb_addr)));
+ ti->asb=ti->sram+ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, asb_addr)));
ti->current_skb=NULL;
- open_ret_code = readb(ti->init_srb +offsetof(struct srb_open_response, ret_code));
- open_error_code = ntohs(readw(ti->init_srb +offsetof(struct srb_open_response, error_code)));
+ open_ret_code = isa_readb(ti->init_srb +offsetof(struct srb_open_response, ret_code));
+ open_error_code = ntohs(isa_readw(ti->init_srb +offsetof(struct srb_open_response, error_code)));
if (open_ret_code==7) {
#else
DPRINTK("Adapter initialized and opened.\n");
#endif
- writeb(~(SRB_RESP_INT),
+ isa_writeb(~(SRB_RESP_INT),
ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
- writeb(~(CMD_IN_SRB),
+ isa_writeb(~(CMD_IN_SRB),
ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD);
open_sap(EXTENDED_SAP,dev);
break;
case DLC_OPEN_SAP:
- if (readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))) {
+ if (isa_readb(ti->srb+offsetof(struct dlc_open_sap, ret_code))) {
DPRINTK("open_sap failed: ret_code = %02X,retrying\n",
- (int)readb(ti->srb+offsetof(struct dlc_open_sap, ret_code)));
+ (int)isa_readb(ti->srb+offsetof(struct dlc_open_sap, ret_code)));
ibmtr_reset_timer(&(ti->tr_timer), dev);
} else {
ti->exsap_station_id=
- readw(ti->srb+offsetof(struct dlc_open_sap, station_id));
+ isa_readw(ti->srb+offsetof(struct dlc_open_sap, station_id));
ti->open_status=SUCCESS; /* TR adapter is now available */
wake_up(&ti->wait_for_reset);
}
case DIR_SET_GRP_ADDR:
case DIR_SET_FUNC_ADDR:
case DLC_CLOSE_SAP:
- if (readb(ti->srb+offsetof(struct srb_interrupt, ret_code)))
+ if (isa_readb(ti->srb+offsetof(struct srb_interrupt, ret_code)))
DPRINTK("error on %02X: %02X\n",
- (int)readb(ti->srb+offsetof(struct srb_interrupt, command)),
- (int)readb(ti->srb+offsetof(struct srb_interrupt, ret_code)));
+ (int)isa_readb(ti->srb+offsetof(struct srb_interrupt, command)),
+ (int)isa_readb(ti->srb+offsetof(struct srb_interrupt, ret_code)));
break;
case DIR_READ_LOG:
- if (readb(ti->srb+offsetof(struct srb_read_log, ret_code)))
+ if (isa_readb(ti->srb+offsetof(struct srb_read_log, ret_code)))
DPRINTK("error on dir_read_log: %02X\n",
- (int)readb(ti->srb+offsetof(struct srb_read_log, ret_code)));
+ (int)isa_readb(ti->srb+offsetof(struct srb_read_log, ret_code)));
else
if (IBMTR_DEBUG_MESSAGES) {
DPRINTK(
"A/C errors %02X, Abort delimiters %02X, Lost frames %02X\n"
"Receive congestion count %02X, Frame copied errors %02X\n"
"Frequency errors %02X, Token errors %02X\n",
- (int)readb(ti->srb+offsetof(struct srb_read_log,
+ (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
line_errors)),
- (int)readb(ti->srb+offsetof(struct srb_read_log,
+ (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
internal_errors)),
- (int)readb(ti->srb+offsetof(struct srb_read_log,
+ (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
burst_errors)),
- (int)readb(ti->srb+offsetof(struct srb_read_log, A_C_errors)),
- (int)readb(ti->srb+offsetof(struct srb_read_log,
+ (int)isa_readb(ti->srb+offsetof(struct srb_read_log, A_C_errors)),
+ (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
abort_delimiters)),
- (int)readb(ti->srb+offsetof(struct srb_read_log,
+ (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
lost_frames)),
- (int)readb(ti->srb+offsetof(struct srb_read_log,
+ (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
recv_congest_count)),
- (int)readb(ti->srb+offsetof(struct srb_read_log,
+ (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
frame_copied_errors)),
- (int)readb(ti->srb+offsetof(struct srb_read_log,
+ (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
frequency_errors)),
- (int)readb(ti->srb+offsetof(struct srb_read_log,
+ (int)isa_readb(ti->srb+offsetof(struct srb_read_log,
token_errors)));
}
dev->tbusy=0;
default:
DPRINTK("Unknown command %02X encountered\n",
- (int)readb(ti->srb));
+ (int)isa_readb(ti->srb));
} /* SRB command check */
- writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD);
- writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+ isa_writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD);
+ isa_writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
skip_reset:
} /* SRB response */
if (status & ASB_FREE_INT) { /* ASB response */
- switch(readb(ti->asb)) { /* ASB command check */
+ switch(isa_readb(ti->asb)) { /* ASB command check */
case REC_DATA:
case XMIT_UI_FRAME:
default:
DPRINTK("unknown command in asb %02X\n",
- (int)readb(ti->asb));
+ (int)isa_readb(ti->asb));
} /* ASB command check */
- if (readb(ti->asb+2)!=0xff) /* checks ret_code */
+ if (isa_readb(ti->asb+2)!=0xff) /* checks ret_code */
DPRINTK("ASB error %02X in cmd %02X\n",
- (int)readb(ti->asb+2),(int)readb(ti->asb));
- writeb(~ASB_FREE_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+ (int)isa_readb(ti->asb+2),(int)isa_readb(ti->asb));
+ isa_writeb(~ASB_FREE_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
} /* ASB response */
if (status & ARB_CMD_INT) { /* ARB response */
- switch (readb(ti->arb)) { /* ARB command check */
+ switch (isa_readb(ti->arb)) { /* ARB command check */
case DLC_STATUS:
DPRINTK("DLC_STATUS new status: %02X on station %02X\n",
- ntohs(readw(ti->arb + offsetof(struct arb_dlc_status, status))),
- ntohs(readw(ti->arb
+ ntohs(isa_readw(ti->arb + offsetof(struct arb_dlc_status, status))),
+ ntohs(isa_readw(ti->arb
+offsetof(struct arb_dlc_status, station_id))));
break;
case RING_STAT_CHANGE: {
unsigned short ring_status;
- ring_status=ntohs(readw(ti->arb
+ ring_status=ntohs(isa_readw(ti->arb
+offsetof(struct arb_ring_stat_change, ring_status)));
if (ring_status & (SIGNAL_LOSS | LOBE_FAULT)) {
default:
DPRINTK("Unknown command %02X in arb\n",
- (int)readb(ti->arb));
+ (int)isa_readb(ti->arb));
break;
} /* ARB command check */
- writeb(~ARB_CMD_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
- writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(~ARB_CMD_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+ isa_writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
} /* ARB response */
if (status & SSB_RESP_INT) { /* SSB response */
unsigned char retcode;
- switch (readb(ti->ssb)) { /* SSB command check */
+ switch (isa_readb(ti->ssb)) { /* SSB command check */
case XMIT_DIR_FRAME:
case XMIT_UI_FRAME:
- retcode = readb(ti->ssb+2);
+ retcode = isa_readb(ti->ssb+2);
if (retcode && (retcode != 0x22)) /* checks ret_code */
DPRINTK("xmit ret_code: %02X xmit error code: %02X\n",
- (int)retcode, (int)readb(ti->ssb+6));
+ (int)retcode, (int)isa_readb(ti->ssb+6));
else ti->tr_stats.tx_packets++;
break;
case XMIT_XID_CMD:
- DPRINTK("xmit xid ret_code: %02X\n", (int)readb(ti->ssb+2));
+ DPRINTK("xmit xid ret_code: %02X\n", (int)isa_readb(ti->ssb+2));
default:
- DPRINTK("Unknown command %02X in ssb\n", (int)readb(ti->ssb));
+ DPRINTK("Unknown command %02X in ssb\n", (int)isa_readb(ti->ssb));
} /* SSB command check */
- writeb(~SSB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
- writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(~SSB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+ isa_writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
} /* SSB response */
} /* SRB, ARB, ASB or SSB response */
dev->interrupt=0;
- writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+ isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
break;
case FIRST_INT:
/* we assign the shared-ram address for ISA devices */
if(!ti->sram) {
- writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN);
+ isa_writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN);
ti->sram=((__u32)ti->sram_base << 12);
}
ti->init_srb=ti->sram
- +ntohs((unsigned short)readw(ti->mmio+ ACA_OFFSET + WRBR_EVEN));
- SET_PAGE(ntohs((unsigned short)readw(ti->mmio+ACA_OFFSET + WRBR_EVEN)));
+ +ntohs((unsigned short)isa_readw(ti->mmio+ ACA_OFFSET + WRBR_EVEN));
+ SET_PAGE(ntohs((unsigned short)isa_readw(ti->mmio+ACA_OFFSET + WRBR_EVEN)));
dev->mem_start = ti->sram;
dev->mem_end = ti->sram + (ti->mapped_ram_size<<9) - 1;
{
int i;
DPRINTK("init_srb(%p):", ti->init_srb);
- for (i=0;i<17;i++) printk("%02X ", (int)readb(ti->init_srb+i));
+ for (i=0;i<17;i++) printk("%02X ", (int)isa_readb(ti->init_srb+i));
printk("\n");
}
#endif
- hw_encoded_addr = readw(ti->init_srb
+ hw_encoded_addr = isa_readw(ti->init_srb
+ offsetof(struct srb_init_response, encoded_address));
#if !TR_NEWFORMAT
#endif
encoded_addr=(ti->sram + ntohs(hw_encoded_addr));
- ti->ring_speed = readb(ti->init_srb+offsetof(struct srb_init_response, init_status)) & 0x01 ? 16 : 4;
+ ti->ring_speed = isa_readb(ti->init_srb+offsetof(struct srb_init_response, init_status)) & 0x01 ? 16 : 4;
#if !TR_NEWFORMAT
DPRINTK("encoded addr (%04X,%04X,%08X): ", hw_encoded_addr,
ntohs(hw_encoded_addr), encoded_addr);
ti->ring_speed, ti->sram);
#endif
- ti->auto_ringspeedsave=readb(ti->init_srb
+ ti->auto_ringspeedsave=isa_readb(ti->init_srb
+offsetof(struct srb_init_response, init_status_2)) & 0x4 ? TRUE : FALSE;
#if !TR_NEWFORMAT
for(i=0;i<TR_ALEN;i++) {
- dev->dev_addr[i]=readb(encoded_addr + i);
+ dev->dev_addr[i]=isa_readb(encoded_addr + i);
printk("%02X%s", dev->dev_addr[i], (i==TR_ALEN-1) ? "" : ":" );
}
printk("\n");
#ifdef ENABLE_PAGING
if(ti->page_mask)
- writeb(SRPR_ENABLE_PAGING, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
+ isa_writeb(SRPR_ENABLE_PAGING, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
#endif
- writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
+ isa_writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
#if !TR_NEWFORMAT
DPRINTK("resetting card\n");
#endif
ti->open_status=IN_PROGRESS;
- writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+ isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
return 0;
}
SET_PAGE(ti->srb);
for (i=0; i<sizeof(struct dlc_open_sap); i++)
- writeb(0, ti->srb+i);
+ isa_writeb(0, ti->srb+i);
- writeb(DLC_OPEN_SAP, ti->srb + offsetof(struct dlc_open_sap, command));
- writew(htons(MAX_I_FIELD),
+ isa_writeb(DLC_OPEN_SAP, ti->srb + offsetof(struct dlc_open_sap, command));
+ isa_writew(htons(MAX_I_FIELD),
ti->srb + offsetof(struct dlc_open_sap, max_i_field));
- writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY,
+ isa_writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY,
ti->srb + offsetof(struct dlc_open_sap, sap_options));
- writeb(SAP_OPEN_STATION_CNT,
+ isa_writeb(SAP_OPEN_STATION_CNT,
ti->srb + offsetof(struct dlc_open_sap, station_count));
- writeb(type, ti->srb + offsetof(struct dlc_open_sap, sap_value));
+ isa_writeb(type, ti->srb + offsetof(struct dlc_open_sap, sap_value));
- writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
}
DPRINTK("now opening the board...\n");
#endif
- writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
- writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD);
+ isa_writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+ isa_writeb(~CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_RESET + ISRA_ODD);
for (i=0; i<sizeof(struct dir_open_adapter); i++)
- writeb(0, ti->init_srb+i);
+ isa_writeb(0, ti->init_srb+i);
- writeb(DIR_OPEN_ADAPTER,
+ isa_writeb(DIR_OPEN_ADAPTER,
ti->init_srb + offsetof(struct dir_open_adapter, command));
- writew(htons(OPEN_PASS_BCON_MAC),
+ isa_writew(htons(OPEN_PASS_BCON_MAC),
ti->init_srb + offsetof(struct dir_open_adapter, open_options));
if (ti->ring_speed == 16) {
- writew(htons(ti->dhb_size16mb),
+ isa_writew(htons(ti->dhb_size16mb),
ti->init_srb + offsetof(struct dir_open_adapter, dhb_length));
- writew(htons(ti->rbuf_cnt16),
+ isa_writew(htons(ti->rbuf_cnt16),
ti->init_srb + offsetof(struct dir_open_adapter, num_rcv_buf));
- writew(htons(ti->rbuf_len16),
+ isa_writew(htons(ti->rbuf_len16),
ti->init_srb + offsetof(struct dir_open_adapter, rcv_buf_len));
} else {
- writew(htons(ti->dhb_size4mb),
+ isa_writew(htons(ti->dhb_size4mb),
ti->init_srb + offsetof(struct dir_open_adapter, dhb_length));
- writew(htons(ti->rbuf_cnt4),
+ isa_writew(htons(ti->rbuf_cnt4),
ti->init_srb + offsetof(struct dir_open_adapter, num_rcv_buf));
- writew(htons(ti->rbuf_len4),
+ isa_writew(htons(ti->rbuf_len4),
ti->init_srb + offsetof(struct dir_open_adapter, rcv_buf_len));
}
- writeb(NUM_DHB, /* always 2 */
+ isa_writeb(NUM_DHB, /* always 2 */
ti->init_srb + offsetof(struct dir_open_adapter, num_dhb));
- writeb(DLC_MAX_SAP,
+ isa_writeb(DLC_MAX_SAP,
ti->init_srb + offsetof(struct dir_open_adapter, dlc_max_sap));
- writeb(DLC_MAX_STA,
+ isa_writeb(DLC_MAX_STA,
ti->init_srb + offsetof(struct dir_open_adapter, dlc_max_sta));
ti->srb=ti->init_srb; /* We use this one in the interrupt handler */
- writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
- writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+ isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
}
int i;
struct trllc *llc;
- if (readb(ti->asb + offsetof(struct asb_xmit_resp, ret_code))!=0xFF)
+ if (isa_readb(ti->asb + offsetof(struct asb_xmit_resp, ret_code))!=0xFF)
DPRINTK("ASB not free !!!\n");
/* in providing the transmit interrupts,
to stuff with data. Here we compute the
effective address where we will place data.*/
dhb=ti->sram
- +ntohs(readw(ti->arb + offsetof(struct arb_xmit_req, dhb_address)));
+ +ntohs(isa_readw(ti->arb + offsetof(struct arb_xmit_req, dhb_address)));
/* Figure out the size of the 802.5 header */
if (!(trhdr->saddr[0] & 0x80)) /* RIF present? */
llc = (struct trllc *)(ti->current_skb->data + hdr_len);
- xmit_command = readb(ti->srb + offsetof(struct srb_xmit, command));
+ xmit_command = isa_readb(ti->srb + offsetof(struct srb_xmit, command));
- writeb(xmit_command, ti->asb + offsetof(struct asb_xmit_resp, command));
- writew(readb(ti->srb + offsetof(struct srb_xmit, station_id)),
+ isa_writeb(xmit_command, ti->asb + offsetof(struct asb_xmit_resp, command));
+ isa_writew(isa_readb(ti->srb + offsetof(struct srb_xmit, station_id)),
ti->asb + offsetof(struct asb_xmit_resp, station_id));
- writeb(llc->ssap, ti->asb + offsetof(struct asb_xmit_resp, rsap_value));
- writeb(readb(ti->srb + offsetof(struct srb_xmit, cmd_corr)),
+ isa_writeb(llc->ssap, ti->asb + offsetof(struct asb_xmit_resp, rsap_value));
+ isa_writeb(isa_readb(ti->srb + offsetof(struct srb_xmit, cmd_corr)),
ti->asb + offsetof(struct asb_xmit_resp, cmd_corr));
- writeb(0, ti->asb + offsetof(struct asb_xmit_resp, ret_code));
+ isa_writeb(0, ti->asb + offsetof(struct asb_xmit_resp, ret_code));
if ((xmit_command==XMIT_XID_CMD) || (xmit_command==XMIT_TEST_CMD)) {
- writew(htons(0x11),
+ isa_writew(htons(0x11),
ti->asb + offsetof(struct asb_xmit_resp, frame_length));
- writeb(0x0e, ti->asb + offsetof(struct asb_xmit_resp, hdr_length));
- writeb(AC, dhb);
- writeb(LLC_FRAME, dhb+1);
+ isa_writeb(0x0e, ti->asb + offsetof(struct asb_xmit_resp, hdr_length));
+ isa_writeb(AC, dhb);
+ isa_writeb(LLC_FRAME, dhb+1);
- for (i=0; i<TR_ALEN; i++) writeb((int)0x0FF, dhb+i+2);
- for (i=0; i<TR_ALEN; i++) writeb(0, dhb+i+TR_ALEN+2);
+ for (i=0; i<TR_ALEN; i++) isa_writeb((int)0x0FF, dhb+i+2);
+ for (i=0; i<TR_ALEN; i++) isa_writeb(0, dhb+i+TR_ALEN+2);
- writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
return;
}
* the token ring packet is copied from sk_buff to the adapter
* buffer identified in the command data received with the interrupt.
*/
- writeb(hdr_len, ti->asb + offsetof(struct asb_xmit_resp, hdr_length));
- writew(htons(ti->current_skb->len),
+ isa_writeb(hdr_len, ti->asb + offsetof(struct asb_xmit_resp, hdr_length));
+ isa_writew(htons(ti->current_skb->len),
ti->asb + offsetof(struct asb_xmit_resp, frame_length));
- memcpy_toio(dhb, ti->current_skb->data, ti->current_skb->len);
+ isa_memcpy_toio(dhb, ti->current_skb->data, ti->current_skb->len);
- writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
ti->tr_stats.tx_bytes+=ti->current_skb->len;
dev->tbusy=0;
dev_kfree_skb(ti->current_skb);
struct iphdr *iph;
rbuffer=(ti->sram
- +ntohs(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))))+2;
+ +ntohs(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))))+2;
- if(readb(ti->asb + offsetof(struct asb_rec, ret_code))!=0xFF)
+ if(isa_readb(ti->asb + offsetof(struct asb_rec, ret_code))!=0xFF)
DPRINTK("ASB not free !!!\n");
- writeb(REC_DATA,
+ isa_writeb(REC_DATA,
ti->asb + offsetof(struct asb_rec, command));
- writew(readw(ti->arb + offsetof(struct arb_rec_req, station_id)),
+ isa_writew(isa_readw(ti->arb + offsetof(struct arb_rec_req, station_id)),
ti->asb + offsetof(struct asb_rec, station_id));
- writew(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr)),
+ isa_writew(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr)),
ti->asb + offsetof(struct asb_rec, rec_buf_addr));
- lan_hdr_len=readb(ti->arb + offsetof(struct arb_rec_req, lan_hdr_len));
+ lan_hdr_len=isa_readb(ti->arb + offsetof(struct arb_rec_req, lan_hdr_len));
hdr_len = lan_hdr_len + sizeof(struct trllc) + sizeof(struct iphdr);
llc=(rbuffer + offsetof(struct rec_buf, data) + lan_hdr_len);
DPRINTK("offsetof data: %02X lan_hdr_len: %02X\n",
(unsigned int)offsetof(struct rec_buf,data), (unsigned int)lan_hdr_len);
DPRINTK("llc: %08X rec_buf_addr: %04X ti->sram: %p\n", llc,
- ntohs(readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))),
+ ntohs(isa_readw(ti->arb + offsetof(struct arb_rec_req, rec_buf_addr))),
ti->sram);
DPRINTK("dsap: %02X, ssap: %02X, llc: %02X, protid: %02X%02X%02X, "
"ethertype: %04X\n",
- (int)readb(llc + offsetof(struct trllc, dsap)),
- (int)readb(llc + offsetof(struct trllc, ssap)),
- (int)readb(llc + offsetof(struct trllc, llc)),
- (int)readb(llc + offsetof(struct trllc, protid)),
- (int)readb(llc + offsetof(struct trllc, protid)+1),
- (int)readb(llc + offsetof(struct trllc, protid)+2),
- (int)readw(llc + offsetof(struct trllc, ethertype)));
+ (int)isa_readb(llc + offsetof(struct trllc, dsap)),
+ (int)isa_readb(llc + offsetof(struct trllc, ssap)),
+ (int)isa_readb(llc + offsetof(struct trllc, llc)),
+ (int)isa_readb(llc + offsetof(struct trllc, protid)),
+ (int)isa_readb(llc + offsetof(struct trllc, protid)+1),
+ (int)isa_readb(llc + offsetof(struct trllc, protid)+2),
+ (int)isa_readw(llc + offsetof(struct trllc, ethertype)));
#endif
- if (readb(llc + offsetof(struct trllc, llc))!=UI_CMD) {
- writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
+ if (isa_readb(llc + offsetof(struct trllc, llc))!=UI_CMD) {
+ isa_writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
ti->tr_stats.rx_dropped++;
- writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
return;
}
- length = ntohs(readw(ti->arb+offsetof(struct arb_rec_req, frame_len)));
- if ((readb(llc + offsetof(struct trllc, dsap))==EXTENDED_SAP) &&
- (readb(llc + offsetof(struct trllc, ssap))==EXTENDED_SAP) &&
+ length = ntohs(isa_readw(ti->arb+offsetof(struct arb_rec_req, frame_len)));
+ if ((isa_readb(llc + offsetof(struct trllc, dsap))==EXTENDED_SAP) &&
+ (isa_readb(llc + offsetof(struct trllc, ssap))==EXTENDED_SAP) &&
(length>=hdr_len)) {
IPv4_p = 1;
}
DPRINTK("Probably non-IP frame received.\n");
DPRINTK("ssap: %02X dsap: %02X saddr: %02X:%02X:%02X:%02X:%02X:%02X "
"daddr: %02X:%02X:%02X:%02X:%02X:%02X\n",
- (int)readb(llc + offsetof(struct trllc, ssap)),
- (int)readb(llc + offsetof(struct trllc, dsap)),
- (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)),
- (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+1),
- (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+2),
- (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+3),
- (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+4),
- (int)readb(trhhdr + offsetof(struct trh_hdr, saddr)+5),
- (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)),
- (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+1),
- (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+2),
- (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+3),
- (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+4),
- (int)readb(trhhdr + offsetof(struct trh_hdr, daddr)+5));
+ (int)isa_readb(llc + offsetof(struct trllc, ssap)),
+ (int)isa_readb(llc + offsetof(struct trllc, dsap)),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+1),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+2),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+3),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+4),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, saddr)+5),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+1),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+2),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+3),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+4),
+ (int)isa_readb(trhhdr + offsetof(struct trh_hdr, daddr)+5));
}
#endif
if (!(skb=dev_alloc_skb(skb_size))) {
DPRINTK("out of memory. frame dropped.\n");
ti->tr_stats.rx_dropped++;
- writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
- writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
+ isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
return;
}
skb_reserve(skb, sizeof(struct trh_hdr)-lan_hdr_len+sizeof(struct trllc));
skb->dev=dev;
data=skb->data;
- rbuffer_len=ntohs(readw(rbuffer + offsetof(struct rec_buf, buf_len)));
+ rbuffer_len=ntohs(isa_readw(rbuffer + offsetof(struct rec_buf, buf_len)));
rbufdata = rbuffer + offsetof(struct rec_buf,data);
if (IPv4_p) {
/* Copy the headers without checksumming */
- memcpy_fromio(data, rbufdata, hdr_len);
+ isa_memcpy_fromio(data, rbufdata, hdr_len);
/* Watch for padded packets and bogons */
iph=(struct iphdr*)(data + lan_hdr_len + sizeof(struct trllc));
length < rbuffer_len ? length : rbuffer_len,
chksum);
else
- memcpy_fromio(data, rbufdata, rbuffer_len);
- rbuffer = ntohs(readw(rbuffer));
+ isa_memcpy_fromio(data, rbufdata, rbuffer_len);
+ rbuffer = ntohs(isa_readw(rbuffer));
if (!rbuffer)
break;
length -= rbuffer_len;
data += rbuffer_len;
rbuffer += ti->sram;
- rbuffer_len = ntohs(readw(rbuffer + offsetof(struct rec_buf, buf_len)));
+ rbuffer_len = ntohs(isa_readw(rbuffer + offsetof(struct rec_buf, buf_len)));
rbufdata = rbuffer + offsetof(struct rec_buf, data);
}
- writeb(0, ti->asb + offsetof(struct asb_rec, ret_code));
+ isa_writeb(0, ti->asb + offsetof(struct asb_rec, ret_code));
- writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
ti->tr_stats.rx_bytes += skb->len;
ti->tr_stats.rx_packets++;
/* Save skb; we'll need it when the adapter asks for the data */
ti->current_skb=skb;
- writeb(XMIT_UI_FRAME, ti->srb + offsetof(struct srb_xmit, command));
- writew(ti->exsap_station_id, ti->srb
+ isa_writeb(XMIT_UI_FRAME, ti->srb + offsetof(struct srb_xmit, command));
+ isa_writew(ti->exsap_station_id, ti->srb
+offsetof(struct srb_xmit, station_id));
- writeb(CMD_IN_SRB, (ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD));
+ isa_writeb(CMD_IN_SRB, (ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD));
spin_unlock_irqrestore(&(ti->lock), flags);
dev->trans_start=jiffies;
ti=(struct tok_info *) dev->priv;
ti->readlog_pending = 0;
- writeb(DIR_READ_LOG, ti->srb);
- writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
- writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ isa_writeb(DIR_READ_LOG, ti->srb);
+ isa_writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+ isa_writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
dev->tbusy=1; /* really srb busy... */
}
#define ACA_RW 0x00
#ifdef ENABLE_PAGING
-#define SET_PAGE(x) (writeb(((x>>8)&ti.page_mask), \
+#define SET_PAGE(x) (isa_writeb(((x>>8)&ti.page_mask), \
ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN))
#else
#define SET_PAGE(x)
* David Mosberger-Tang, Martin Mares
*/
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs'
-bool ' Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ bool 'Enable extra checks in new queueing code' CONFIG_SCSI_DEBUG_QUEUES
+fi
+bool ' Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
+
bool ' Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS
bool ' SCSI logging facility' CONFIG_SCSI_LOGGING
OX_OBJS := scsi_syms.o
endif
L_OBJS += scsi_n_syms.o hosts.o scsi_ioctl.o constants.o scsicam.o
- L_OBJS += scsi_error.o scsi_obsolete.o scsi_queue.o
- L_OBJS += scsi_proc.o
+ L_OBJS += scsi_error.o scsi_obsolete.o scsi_queue.o scsi_lib.o
+ L_OBJS += scsi_merge.o scsi_proc.o
else
ifeq ($(CONFIG_SCSI),m)
MIX_OBJS += scsi_syms.o
$(CC) $(CFLAGS) -c megaraid.c
scsi_mod.o: $(MIX_OBJS) hosts.o scsi.o scsi_ioctl.o constants.o \
- scsicam.o scsi_proc.o scsi_error.o scsi_obsolete.o scsi_queue.o
+ scsicam.o scsi_proc.o scsi_error.o scsi_obsolete.o \
+ scsi_queue.o scsi_lib.o scsi_merge.o
$(LD) $(LD_RFLAG) -r -o $@ $(MIX_OBJS) hosts.o scsi.o scsi_ioctl.o \
- constants.o scsicam.o scsi_proc.o \
- scsi_error.o scsi_obsolete.o scsi_queue.o \
+ constants.o scsicam.o scsi_proc.o scsi_merge.o \
+ scsi_error.o scsi_obsolete.o scsi_queue.o scsi_lib.o
sr_mod.o: sr.o sr_ioctl.o sr_vendor.o
$(LD) $(LD_RFLAG) -r -o $@ sr.o sr_ioctl.o sr_vendor.o
-/* $Id: advansys.c,v 1.67 1999/11/18 20:13:15 bobf Exp bobf $ */
-#define ASC_VERSION "3.2K" /* AdvanSys Driver Version */
+/* $Id: advansys.c,v 1.68 1999/11/19 01:57:47 bobf Exp bobf $ */
+#define ASC_VERSION "3.2L" /* AdvanSys Driver Version */
/*
* advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
4. Increase Wide board scatter-gather list maximum length to
255 when the driver is compiled into the kernel.
+ 3.2L (11/18/99):
+ 1. Fix bug in adv_get_sglist() that caused an assertion failure
+ at line 7475. The reqp->sgblkp pointer must be initialized
+ to NULL in adv_get_sglist().
+
J. Known Problems/Fix List (XXX)
1. Need to add memory mapping workaround. Test the memory mapping.
slp = (struct scatterlist *) scp->request_buffer;
sg_elem_cnt = scp->use_sg;
prev_sg_block = NULL;
+ reqp->sgblkp == NULL;
- ASC_ASSERT(reqp->sgblkp == NULL);
do
{
/*
* check for timeout, and if we are doing something like this
* we are pretty desperate anyways.
*/
+ spin_unlock_irq(&io_request_lock);
scsi_sleep(4*HZ);
+ spin_lock_irq(&io_request_lock);
WAIT(STATUS(SCpnt->host->io_port),
STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF);
* check for timeout, and if we are doing something like this
* we are pretty desperate anyways.
*/
+ spin_unlock_irq(&io_request_lock);
scsi_sleep(4*HZ);
+ spin_lock_irq(&io_request_lock);
WAIT(STATUS(SCpnt->host->io_port),
STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF);
* static const char RCSid[] = "$Header: /usr/src/linux/kernel/blk_drv/scsi/RCS/atp870u.c,v 1.0 1997/05/07 15:22:00 root Exp root $";
*/
-static unsigned char admaxu = 1, host_idu[2], chip_veru[2], scam_on[2], global_map[2];
-static unsigned short int active_idu[2], wide_idu[2], sync_idu, ultra_map[2];
-static int workingu[2] = {0, 0};
+static unsigned char admaxu = 1;
+static unsigned short int sync_idu;
-static Scsi_Cmnd *querequ[2][qcnt], *curr_req[2][16];
-
-static unsigned char devspu[2][16] = {
- {0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20},
- {0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}
-};
-
-static unsigned char dirctu[2][16], last_cmd[2], in_snd[2], in_int[2];
-static unsigned char ata_cdbu[2][16];
-static unsigned int ioportu[2] = {0, 0};
static unsigned int irqnumu[2] = {0, 0};
-static unsigned short int pciportu[2];
-static unsigned long prdaddru[2][16], tran_lenu[2][16], last_lenu[2][16];
-static unsigned char prd_tableu[2][16][1024];
-static unsigned char *prd_posu[2][16];
-static unsigned char quhdu[2], quendu[2];
-static unsigned char devtypeu[2][16] =
+struct atp_unit
{
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ unsigned long ioport;
+ unsigned long irq;
+ unsigned long pciport;
+ unsigned char last_cmd;
+ unsigned char in_snd;
+ unsigned char in_int;
+ unsigned char quhdu;
+ unsigned char quendu;
+ unsigned char scam_on;
+ unsigned char global_map;
+ unsigned char chip_veru;
+ unsigned char host_idu;
+ int working;
+ unsigned short wide_idu;
+ unsigned short active_idu;
+ unsigned short ultra_map;
+ unsigned char ata_cdbu[16];
+ Scsi_Cmnd *querequ[qcnt];
+ struct atp_id
+ {
+ unsigned char dirctu;
+ unsigned char devspu;
+ unsigned char devtypeu;
+ unsigned long prdaddru;
+ unsigned long tran_lenu;
+ unsigned long last_lenu;
+ unsigned char *prd_posu;
+ unsigned char *prd_tableu;
+ Scsi_Cmnd *curr_req;
+ } id[16];
};
static struct Scsi_Host *atp_host[2] = {NULL, NULL};
+static struct atp_unit atp_unit[2];
static void atp870u_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
unsigned short int tmpcip, id;
- unsigned char i, j, h, tarid, lun;
+ unsigned char i, j, h, target_id, lun;
unsigned char *prd;
Scsi_Cmnd *workrequ;
unsigned int workportu, tmport;
unsigned long adrcntu, k;
int errstus;
+ struct atp_unit *dev = dev_id;
for (h = 0; h < 2; h++) {
if (irq == irqnumu[h]) {
}
return;
irq_numok:
- in_int[h] = 1;
- workportu = ioportu[h];
+ dev->in_int = 1;
+ workportu = dev->ioport;
tmport = workportu;
-
- if (workingu[h] != 0)
+
+ if (dev->working != 0)
{
tmport += 0x1f;
j = inb(tmport);
- tmpcip = pciportu[h];
+ tmpcip = dev->pciport;
if ((inb(tmpcip) & 0x08) != 0)
{
tmpcip += 0x2;
- while ((inb(tmpcip) & 0x08) != 0);
+ for (k=0; k < 1000; k++)
+ {
+ if ((inb(tmpcip) & 0x08) == 0)
+ {
+ goto stop_dma;
+ }
+ if ((inb(tmpcip) & 0x01) == 0)
+ {
+ goto stop_dma;
+ }
+ }
}
- tmpcip = pciportu[h];
+stop_dma:
+ tmpcip = dev->pciport;
outb(0x00, tmpcip);
tmport -= 0x08;
i = inb(tmport);
if ((j & 0x40) == 0)
{
- if ((last_cmd[h] & 0x40) == 0)
+ if ((dev->last_cmd & 0x40) == 0)
{
- last_cmd[h] = 0xff;
+ dev->last_cmd = 0xff;
}
}
- else last_cmd[h] |= 0x40;
+ else dev->last_cmd |= 0x40;
tmport -= 0x02;
- tarid = inb(tmport);
+ target_id = inb(tmport);
tmport += 0x02;
- if ((tarid & 0x40) != 0) {
- tarid = (tarid & 0x07) | 0x08;
+ /*
+ * Remap wide devices onto id numbers
+ */
+
+ if ((target_id & 0x40) != 0) {
+ target_id = (target_id & 0x07) | 0x08;
} else {
- tarid &= 0x07;
+ target_id &= 0x07;
}
+
if (i == 0x85)
{
- if (wide_idu[h] != 0)
+ /*
+ * Flip wide
+ */
+ if (dev->wide_idu != 0)
{
tmport = workportu + 0x1b;
j = inb(tmport) & 0x0e;
j |= 0x01;
outb(j, tmport);
}
- if (((quhdu[h] != quendu[h]) || (last_cmd[h] != 0xff)) &&
- (in_snd[h] == 0))
+ /*
+ * Issue more commands
+ */
+ if (((dev->quhdu != dev->quendu) || (dev->last_cmd != 0xff)) &&
+ (dev->in_snd == 0))
{
send_s870(h);
}
- in_int[h] = 0;
+ /*
+ * Done
+ */
+ dev->in_int = 0;
return;
}
if (i == 0x21)
((unsigned char *) &adrcntu)[2] = inb(tmport++);
((unsigned char *) &adrcntu)[1] = inb(tmport++);
((unsigned char *) &adrcntu)[0] = inb(tmport);
- k = last_lenu[h][tarid];
+ k = dev->id[target_id].last_lenu;
k -= adrcntu;
- tran_lenu[h][tarid] = k;
- last_lenu[h][tarid] = adrcntu;
+ dev->id[target_id].tran_lenu = k;
+ dev->id[target_id].last_lenu = adrcntu;
tmport -= 0x04;
outb(0x41, tmport);
tmport += 0x08;
outb(0x08, tmport);
- in_int[h] = 0;
+ dev->in_int = 0;
return;
}
if ((i == 0x80) || (i == 0x8f))
lun = 0;
tmport -= 0x07;
j = inb(tmport);
- if (j == 0x44) {
+ if (j == 0x44 || i==0x80) {
tmport += 0x0d;
lun = inb(tmport) & 0x07;
} else {
((unsigned char *) &adrcntu)[2] = inb(tmport++);
((unsigned char *) &adrcntu)[1] = inb(tmport++);
((unsigned char *) &adrcntu)[0] = inb(tmport);
- k = last_lenu[h][tarid];
+ k = dev->id[target_id].last_lenu;
k -= adrcntu;
- tran_lenu[h][tarid] = k;
- last_lenu[h][tarid] = adrcntu;
+ dev->id[target_id].tran_lenu = k;
+ dev->id[target_id].last_lenu = adrcntu;
tmport += 0x04;
outb(0x08, tmport);
- in_int[h] = 0;
+ dev->in_int = 0;
return;
}
else
{
outb(0x46, tmport);
- dirctu[h][tarid] = 0x00;
+ dev->id[target_id].dirctu = 0x00;
tmport += 0x02;
outb(0x00, tmport++);
outb(0x00, tmport++);
outb(0x00, tmport++);
tmport += 0x03;
outb(0x08, tmport);
- in_int[h] = 0;
+ dev->in_int = 0;
return;
}
}
tmport = workportu + 0x10;
outb(0x45, tmport);
tmport += 0x06;
- tarid = inb(tmport);
- if ((tarid & 0x10) != 0)
+ target_id = inb(tmport);
+ /*
+ * Remap wide identifiers
+ */
+ if ((target_id & 0x10) != 0)
{
- tarid = (tarid & 0x07) | 0x08;
+ target_id = (target_id & 0x07) | 0x08;
} else {
- tarid &= 0x07;
+ target_id &= 0x07;
}
- workrequ = curr_req[h][tarid];
+ workrequ = dev->id[target_id].curr_req;
tmport = workportu + 0x0f;
outb(lun, tmport);
tmport += 0x02;
- outb(devspu[h][tarid], tmport++);
- adrcntu = tran_lenu[h][tarid];
- k = last_lenu[h][tarid];
+ outb(dev->id[target_id].devspu, tmport++);
+ adrcntu = dev->id[target_id].tran_lenu;
+ k = dev->id[target_id].last_lenu;
outb(((unsigned char *) &k)[2], tmport++);
outb(((unsigned char *) &k)[1], tmport++);
outb(((unsigned char *) &k)[0], tmport++);
- j = tarid;
- if (tarid > 7) {
+ /* Remap wide */
+ j = target_id;
+ if (target_id > 7) {
j = (j & 0x07) | 0x40;
}
- j |= dirctu[h][tarid];
+ /* Add direction */
+ j |= dev->id[target_id].dirctu;
outb(j, tmport++);
outb(0x80, tmport);
tmport = workportu + 0x1b;
j = inb(tmport) & 0x0e;
id = 1;
- id = id << tarid;
- if ((id & wide_idu[h]) != 0) {
+ id = id << target_id;
+ /*
+ * Is this a wide device
+ */
+ if ((id & dev->wide_idu) != 0) {
j |= 0x01;
}
outb(j, tmport);
- if (last_lenu[h][tarid] == 0) {
+
+ if (dev->id[target_id].last_lenu == 0) {
tmport = workportu + 0x18;
outb(0x08, tmport);
- in_int[h] = 0;
+ dev->in_int = 0;
return;
}
- prd = prd_posu[h][tarid];
+ prd = dev->id[target_id].prd_posu;
while (adrcntu != 0)
{
id = ((unsigned short int *) (prd))[2];
(k - adrcntu);
((unsigned long *) (prd))[0] += adrcntu;
adrcntu = 0;
- prd_posu[h][tarid] = prd;
+ dev->id[target_id].prd_posu = prd;
} else {
adrcntu -= k;
- prdaddru[h][tarid] += 0x08;
+ dev->id[target_id].prdaddru += 0x08;
prd += 0x08;
if (adrcntu == 0) {
- prd_posu[h][tarid] = prd;
+ dev->id[target_id].prd_posu = prd;
}
}
}
- tmpcip = pciportu[h] + 0x04;
- outl(prdaddru[h][tarid], tmpcip);
+ tmpcip = dev->pciport + 0x04;
+ outl(dev->id[target_id].prdaddru, tmpcip);
tmpcip -= 0x02;
outb(0x06, tmpcip);
outb(0x00, tmpcip);
tmpcip -= 0x02;
tmport = workportu + 0x18;
- if (dirctu[h][tarid] != 0) {
+ /*
+ * Check transfer direction
+ */
+ if (dev->id[target_id].dirctu != 0) {
outb(0x08, tmport);
outb(0x01, tmpcip);
- in_int[h] = 0;
+ dev->in_int = 0;
return;
}
outb(0x08, tmport);
outb(0x09, tmpcip);
- in_int[h] = 0;
+ dev->in_int = 0;
return;
}
- workrequ = curr_req[h][tarid];
+
+ /*
+ * Current scsi request on this target
+ */
+
+ workrequ = dev->id[target_id].curr_req;
+
if (i == 0x42) {
errstus = 0x02;
workrequ->result = errstus;
errstus = inb(tmport);
workrequ->result = errstus;
go_42:
+ /*
+ * Complete the command
+ */
spin_lock_irqsave(&io_request_lock, flags);
(*workrequ->scsi_done) (workrequ);
spin_unlock_irqrestore(&io_request_lock, flags);
- curr_req[h][tarid] = 0;
- workingu[h]--;
- if (wide_idu[h] != 0) {
+ /*
+ * Clear it off the queue
+ */
+ dev->id[target_id].curr_req = 0;
+ dev->working--;
+ /*
+ * Take it back wide
+ */
+ if (dev->wide_idu != 0) {
tmport = workportu + 0x1b;
j = inb(tmport) & 0x0e;
j |= 0x01;
outb(j, tmport);
}
- if (((last_cmd[h] != 0xff) || (quhdu[h] != quendu[h])) &&
- (in_snd[h] == 0))
+ /*
+ * If there is stuff to send and nothing going then send it
+ */
+ if (((dev->last_cmd != 0xff) || (dev->quhdu != dev->quendu)) &&
+ (dev->in_snd == 0))
{
send_s870(h);
}
- in_int[h] = 0;
+ dev->in_int = 0;
return;
}
if (i == 0x4f) {
i &= 0x0f;
if (i == 0x09) {
tmpcip = tmpcip + 4;
- outl(prdaddru[h][tarid], tmpcip);
+ outl(dev->id[target_id].prdaddru, tmpcip);
tmpcip = tmpcip - 2;
outb(0x06, tmpcip);
outb(0x00, tmpcip);
tmpcip = tmpcip - 2;
tmport = workportu + 0x10;
outb(0x41, tmport);
- dirctu[h][tarid] = 0x00;
+ dev->id[target_id].dirctu = 0x00;
tmport += 0x08;
outb(0x08, tmport);
outb(0x09, tmpcip);
- in_int[h] = 0;
+ dev->in_int = 0;
return;
}
if (i == 0x08) {
tmpcip = tmpcip + 4;
- outl(prdaddru[h][tarid], tmpcip);
+ outl(dev->id[target_id].prdaddru, tmpcip);
tmpcip = tmpcip - 2;
outb(0x06, tmpcip);
outb(0x00, tmpcip);
outb(0x41, tmport);
tmport += 0x05;
outb((unsigned char) (inb(tmport) | 0x20), tmport);
- dirctu[h][tarid] = 0x20;
+ dev->id[target_id].dirctu = 0x20;
tmport += 0x03;
outb(0x08, tmport);
outb(0x01, tmpcip);
- in_int[h] = 0;
+ dev->in_int = 0;
return;
}
tmport -= 0x07;
} else {
outb(0x46, tmport);
}
- dirctu[h][tarid] = 0x00;
+ dev->id[target_id].dirctu = 0x00;
tmport += 0x02;
outb(0x00, tmport++);
outb(0x00, tmport++);
outb(0x00, tmport++);
tmport += 0x03;
outb(0x08, tmport);
- in_int[h] = 0;
+ dev->in_int = 0;
return;
} else {
tmport = workportu + 0x17;
inb(tmport);
- workingu[h] = 0;
- in_int[h] = 0;
+ dev->working = 0;
+ dev->in_int = 0;
return;
}
}
unsigned long flags;
unsigned short int m;
unsigned int tmport;
+ struct atp_unit *dev;
for (h = 0; h <= admaxu; h++) {
if (req_p->host == atp_host[h]) {
done(req_p);
return 0;
}
+ dev = &atp_unit[h];
m = 1;
m = m << req_p->target;
- if ((m & active_idu[h]) == 0) {
+
+ /*
+ * Fake a timeout for missing targets
+ */
+
+ if ((m & dev->active_idu) == 0) {
req_p->result = 0x00040000;
done(req_p);
return 0;
if (done) {
req_p->scsi_done = done;
} else {
- printk("atp870u_queuecommand: done can't be NULL\n");
+ printk(KERN_WARNING "atp870u_queuecommand: done can't be NULL\n");
req_p->result = 0;
done(req_p);
return 0;
}
- quendu[h]++;
- if (quendu[h] >= qcnt) {
- quendu[h] = 0;
+ /*
+ * Count new command
+ */
+ dev->quendu++;
+ if (dev->quendu >= qcnt) {
+ dev->quendu = 0;
}
+ /*
+ * Check queue state
+ */
wait_que_empty:
- if (quhdu[h] == quendu[h]) {
+ if (dev->quhdu == dev->quendu) {
goto wait_que_empty;
}
save_flags(flags);
cli();
- querequ[h][quendu[h]] = req_p;
- if (quendu[h] == 0) {
+ dev->querequ[dev->quendu] = req_p;
+ if (dev->quendu == 0) {
i = qcnt - 1;
} else {
- i = quendu[h] - 1;
+ i = dev->quendu - 1;
}
- tmport = ioportu[h] + 0x1c;
+ tmport = dev->ioport + 0x1c;
restore_flags(flags);
- if ((inb(tmport) == 0) && (in_int[h] == 0) && (in_snd[h] == 0)) {
+ if ((inb(tmport) == 0) && (dev->in_int == 0) && (dev->in_snd == 0)) {
send_s870(h);
}
return 0;
Scsi_Cmnd *workrequ;
unsigned long flags;
unsigned int i;
- unsigned char j, tarid;
+ unsigned char j, target_id;
unsigned char *prd;
unsigned short int tmpcip, w;
unsigned long l, bttl;
unsigned int workportu;
struct scatterlist *sgpnt;
+ struct atp_unit *dev = &atp_unit[h];
save_flags(flags);
cli();
- if (in_snd[h] != 0) {
+ if (dev->in_snd != 0) {
restore_flags(flags);
return;
}
- in_snd[h] = 1;
- if ((last_cmd[h] != 0xff) && ((last_cmd[h] & 0x40) != 0)) {
- last_cmd[h] &= 0x0f;
- workrequ = curr_req[h][last_cmd[h]];
+ dev->in_snd = 1;
+ if ((dev->last_cmd != 0xff) && ((dev->last_cmd & 0x40) != 0)) {
+ dev->last_cmd &= 0x0f;
+ workrequ = dev->id[dev->last_cmd].curr_req;
goto cmd_subp;
}
- workingu[h]++;
- j = quhdu[h];
- quhdu[h]++;
- if (quhdu[h] >= qcnt) {
- quhdu[h] = 0;
+ dev->working++;
+ j = dev->quhdu;
+ dev->quhdu++;
+ if (dev->quhdu >= qcnt) {
+ dev->quhdu = 0;
}
- workrequ = querequ[h][quhdu[h]];
- if (curr_req[h][workrequ->target] == 0) {
- curr_req[h][workrequ->target] = workrequ;
- last_cmd[h] = workrequ->target;
+ workrequ = dev->querequ[dev->quhdu];
+ if (dev->id[workrequ->target].curr_req == 0) {
+ dev->id[workrequ->target].curr_req = workrequ;
+ dev->last_cmd = workrequ->target;
goto cmd_subp;
}
- quhdu[h] = j;
- workingu[h]--;
- in_snd[h] = 0;
+ dev->quhdu = j;
+ dev->working--;
+ dev->in_snd = 0;
restore_flags(flags);
return;
cmd_subp:
- workportu = ioportu[h];
+ workportu = dev->ioport;
tmport = workportu + 0x1f;
if ((inb(tmport) & 0xb0) != 0) {
goto abortsnd;
goto oktosend;
}
abortsnd:
- last_cmd[h] |= 0x40;
- in_snd[h] = 0;
+ dev->last_cmd |= 0x40;
+ dev->in_snd = 0;
restore_flags(flags);
return;
oktosend:
- memcpy(&ata_cdbu[h][0], &workrequ->cmnd[0], workrequ->cmd_len);
- if (ata_cdbu[h][0] == 0x25) {
+ memcpy(&dev->ata_cdbu[0], &workrequ->cmnd[0], workrequ->cmd_len);
+ if (dev->ata_cdbu[0] == READ_CAPACITY) {
if (workrequ->request_bufflen > 8) {
workrequ->request_bufflen = 0x08;
}
}
- if (ata_cdbu[h][0] == 0x12) {
+ /*
+ * Why limit this ????
+ */
+ if (dev->ata_cdbu[0] == INQUIRY) {
if (workrequ->request_bufflen > 0x24) {
workrequ->request_bufflen = 0x24;
- ata_cdbu[h][4] = 0x24;
+ dev->ata_cdbu[4] = 0x24;
}
}
+
tmport = workportu + 0x1b;
j = inb(tmport) & 0x0e;
- tarid = workrequ->target;
+ target_id = workrequ->target;
+
+ /*
+ * Wide ?
+ */
w = 1;
- w = w << tarid;
- if ((w & wide_idu[h]) != 0) {
+ w = w << target_id;
+ if ((w & dev->wide_idu) != 0) {
j |= 0x01;
- }
+ }
outb(j, tmport);
+
+ /*
+ * Write the command
+ */
+
tmport = workportu;
outb(workrequ->cmd_len, tmport++);
outb(0x2c, tmport++);
outb(0xcf, tmport++);
for (i = 0; i < workrequ->cmd_len; i++) {
- outb(ata_cdbu[h][i], tmport++);
+ outb(dev->ata_cdbu[i], tmport++);
}
tmport = workportu + 0x0f;
- outb(0x00, tmport);
+ outb(workrequ->lun, tmport);
tmport += 0x02;
- outb(devspu[h][tarid], tmport++);
+ /*
+ * Write the target
+ */
+ outb(dev->id[target_id].devspu, tmport++);
+
+ /*
+ * Figure out the transfer size
+ */
if (workrequ->use_sg)
{
l = 0;
} else {
l = workrequ->request_bufflen;
}
+ /*
+ * Write transfer size
+ */
outb((unsigned char) (((unsigned char *) (&l))[2]), tmport++);
outb((unsigned char) (((unsigned char *) (&l))[1]), tmport++);
outb((unsigned char) (((unsigned char *) (&l))[0]), tmport++);
- j = tarid;
- last_lenu[h][j] = l;
- tran_lenu[h][j] = 0;
+ j = target_id;
+ dev->id[j].last_lenu = l;
+ dev->id[j].tran_lenu = 0;
+ /*
+ * Flip the wide bits
+ */
if ((j & 0x08) != 0) {
j = (j & 0x07) | 0x40;
}
- if ((ata_cdbu[h][0] == 0x0a) || (ata_cdbu[h][0] == 0x2a) ||
- (ata_cdbu[h][0] == 0xaa) || (ata_cdbu[h][0] == 0x15)) {
+ /*
+ * Check transfer direction
+ */
+ if ((dev->ata_cdbu[0] == WRITE_6) || (dev->ata_cdbu[0] == WRITE_10) ||
+ (dev->ata_cdbu[0] == WRITE_12) || (dev->ata_cdbu[0] == MODE_SELECT)) {
outb((unsigned char) (j | 0x20), tmport++);
} else {
outb(j, tmport++);
}
+ outb((unsigned char)(inb(tmport) | 0x80),tmport);
outb(0x80, tmport);
tmport = workportu + 0x1c;
- dirctu[h][tarid] = 0;
+ dev->id[target_id].dirctu = 0;
if (l == 0) {
if (inb(tmport) == 0) {
tmport = workportu + 0x18;
outb(0x08, tmport);
} else {
- last_cmd[h] |= 0x40;
+ dev->last_cmd |= 0x40;
}
- in_snd[h] = 0;
+ dev->in_snd = 0;
restore_flags(flags);
return;
}
- tmpcip = pciportu[h];
- prd = &prd_tableu[h][tarid][0];
- prd_posu[h][tarid] = prd;
+ tmpcip = dev->pciport;
+ prd = dev->id[target_id].prd_tableu;
+ dev->id[target_id].prd_posu = prd;
+
+ /*
+ * Now write the request list. Either as scatter/gather or as
+ * a linear chain.
+ */
+
if (workrequ->use_sg)
{
sgpnt = (struct scatterlist *) workrequ->request_buffer;
}
(unsigned short int) (((unsigned short int *) (prd))[i - 1]) = 0x8000;
} else {
+ /*
+ * For a linear request write a chain of blocks
+ */
bttl = virt_to_bus(workrequ->request_buffer);
l = workrequ->request_bufflen;
i = 0;
(unsigned long) (((unsigned long *) (prd))[i >> 1]) = bttl;
}
tmpcip = tmpcip + 4;
- prdaddru[h][tarid] = virt_to_bus(&prd_tableu[h][tarid][0]);
- outl(prdaddru[h][tarid], tmpcip);
+ dev->id[target_id].prdaddru = virt_to_bus(dev->id[target_id].prd_tableu);
+ outl(dev->id[target_id].prdaddru, tmpcip);
tmpcip = tmpcip - 2;
outb(0x06, tmpcip);
outb(0x00, tmpcip);
tmpcip = tmpcip - 2;
- if ((ata_cdbu[h][0] == 0x0a) || (ata_cdbu[h][0] == 0x2a) ||
- (ata_cdbu[h][0] == 0xaa) || (ata_cdbu[h][0] == 0x15))
+ if ((dev->ata_cdbu[0] == WRITE_6) || (dev->ata_cdbu[0] == WRITE_10) ||
+ (dev->ata_cdbu[0] == WRITE_12) || (dev->ata_cdbu[0] == MODE_SELECT))
{
- dirctu[h][tarid] = 0x20;
+ dev->id[target_id].dirctu = 0x20;
if (inb(tmport) == 0) {
tmport = workportu + 0x18;
outb(0x08, tmport);
outb(0x01, tmpcip);
} else {
- last_cmd[h] |= 0x40;
+ dev->last_cmd |= 0x40;
}
- in_snd[h] = 0;
+ dev->in_snd = 0;
restore_flags(flags);
return;
}
outb(0x08, tmport);
outb(0x09, tmpcip);
} else {
- last_cmd[h] |= 0x40;
+ dev->last_cmd |= 0x40;
}
- in_snd[h] = 0;
+ dev->in_snd = 0;
restore_flags(flags);
return;
return SCpnt->result;
}
-unsigned char fun_scam(unsigned char host, unsigned short int *val)
+unsigned char fun_scam(struct atp_unit *dev, unsigned short int *val)
{
unsigned int tmport;
unsigned short int i, k;
unsigned char j;
- tmport = ioportu[host] + 0x1c;
+ tmport = dev->ioport + 0x1c;
outw(*val, tmport);
FUN_D7:
for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */
unsigned long n;
unsigned short int m, assignid_map, val;
unsigned char mbuf[33], quintet[2];
- static unsigned char g2q_tab[8] =
- {0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27};
+ struct atp_unit *dev = &atp_unit[host];
+ static unsigned char g2q_tab[8] = {
+ 0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27
+ };
for (i = 0; i < 0x10; i++) {
mydlyu(0xffff);
}
- tmport = ioportu[host] + 1;
+ tmport = dev->ioport + 1;
outb(0x08, tmport++);
outb(0x7f, tmport);
- tmport = ioportu[host] + 0x11;
+ tmport = dev->ioport + 0x11;
outb(0x20, tmport);
- if ((scam_on[host] & 0x40) == 0) {
+ if ((dev->scam_on & 0x40) == 0) {
return;
}
m = 1;
- m <<= host_idu[host];
+ m <<= dev->host_idu;
j = 16;
- if (chip_veru[host] < 4) {
+ if (dev->chip_veru < 4) {
m |= 0xff00;
j = 8;
}
assignid_map = m;
- tmport = ioportu[host] + 0x02;
+ tmport = dev->ioport + 0x02;
outb(0x02, tmport++); /* 2*2=4ms,3EH 2/32*3E=3.9ms */
outb(0, tmport++);
outb(0, tmport++);
if ((m & assignid_map) != 0) {
continue;
}
- tmport = ioportu[host] + 0x0f;
+ tmport = dev->ioport + 0x0f;
outb(0, tmport++);
tmport += 0x02;
outb(0, tmport++);
k = i;
}
outb(k, tmport++);
- tmport = ioportu[host] + 0x1b;
- if (chip_veru[host] == 4) {
+ tmport = dev->ioport + 0x1b;
+ if (dev->chip_veru == 4) {
outb((unsigned char) ((inb(tmport) & 0x0e) | 0x01), tmport);
} else {
outb((unsigned char) (inb(tmport) & 0x0e), tmport);
}
wait_rdyok:
- tmport = ioportu[host] + 0x18;
+ tmport = dev->ioport + 0x18;
outb(0x09, tmport);
tmport += 0x07;
if ((k == 0x85) || (k == 0x42)) {
continue;
}
- tmport = ioportu[host] + 0x10;
+ tmport = dev->ioport + 0x10;
outb(0x41, tmport);
goto wait_rdyok;
}
assignid_map |= m;
}
- tmport = ioportu[host] + 0x02;
+ tmport = dev->ioport + 0x02;
outb(0x7f, tmport);
- tmport = ioportu[host] + 0x1b;
+ tmport = dev->ioport + 0x1b;
outb(0x02, tmport);
outb(0, 0x80);
val = 0x0080; /* bsy */
- tmport = ioportu[host] + 0x1c;
+ tmport = dev->ioport + 0x1c;
outw(val, tmport);
val |= 0x0040; /* sel */
outw(val, tmport);
if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */
outw(0, tmport--);
outb(0, tmport);
- tmport = ioportu[host] + 0x15;
+ tmport = dev->ioport + 0x15;
outb(0, tmport);
tmport += 0x03;
outb(0x09, tmport);
}
val &= 0x00ff; /* synchronization */
val |= 0x3f00;
- fun_scam(host, &val);
+ fun_scam(dev, &val);
outb(3, 0x80);
val &= 0x00ff; /* isolation */
val |= 0x2000;
- fun_scam(host, &val);
+ fun_scam(dev, &val);
outb(4, 0x80);
i = 8;
j = 0;
outb(5, 0x80);
val &= 0x00ff; /* get ID_STRING */
val |= 0x2000;
- k = fun_scam(host, &val);
+ k = fun_scam(dev, &val);
if ((k & 0x03) == 0) {
goto TCM_5;
}
val &= 0x00ff; /* AssignID 1stQuintet,AH=001xxxxx */
m = quintet[0] << 8;
val |= m;
- fun_scam(host, &val);
+ fun_scam(dev, &val);
val &= 0x00ff; /* AssignID 2ndQuintet,AH=001xxxxx */
m = quintet[1] << 8;
val |= m;
- fun_scam(host, &val);
+ fun_scam(dev, &val);
goto TCM_SYNC;
static unsigned char synu[6] = {0x80, 1, 3, 1, 0x0c, 0x0e};
static unsigned char synw[6] = {0x80, 1, 3, 1, 0x0c, 0x07};
static unsigned char wide[6] = {0x80, 1, 2, 3, 1, 0};
+ struct atp_unit *dev = &atp_unit[host];
sync_idu = 0;
tmport = wkport + 0x3a;
outb((unsigned char) (inb(tmport) | 0x10), tmport);
for (i = 0; i < 16; i++) {
- if ((chip_veru[host] != 4) && (i > 7)) {
+ if ((dev->chip_veru != 4) && (i > 7)) {
break;
}
m = 1;
m = m << i;
- if ((m & active_idu[host]) != 0) {
+ if ((m & dev->active_idu) != 0) {
continue;
}
- if (i == host_idu[host]) {
- printk(" ID: %2d Host Adapter\n", host_idu[host]);
+ if (i == dev->host_idu) {
+ printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_idu);
continue;
}
- if (chip_veru[host] == 4) {
+ if (dev->chip_veru == 4) {
tmport = wkport + 0x1b;
j = (inb(tmport) & 0x0e) | 0x01;
outb(j, tmport);
tmport += 0x06;
outb(0, tmport);
tmport += 0x02;
- outb(devspu[host][i], tmport++);
+ outb(dev->id[i].devspu, tmport++);
outb(0, tmport++);
outb(satn[6], tmport++);
outb(satn[7], tmport++);
continue;
}
while (inb(tmport) != 0x8e);
- active_idu[host] |= m;
+ dev->active_idu |= m;
tmport = wkport + 0x10;
outb(0x30, tmport);
tmport += 0x07;
outb(0, tmport);
tmport += 0x02;
- outb(devspu[host][i], tmport++);
+ outb(dev->id[i].devspu, tmport++);
outb(0, tmport++);
outb(inqd[6], tmport++);
outb(inqd[7], tmport++);
continue;
}
while (inb(tmport) != 0x8e);
- if (chip_veru[host] == 4) {
+ if (dev->chip_veru == 4) {
tmport = wkport + 0x1b;
j = inb(tmport) & 0x0e;
outb(j, tmport);
}
inq_ok:
mbuf[36] = 0;
- printk(" ID: %2d %s\n", i, &mbuf[8]);
- devtypeu[host][i] = mbuf[0];
+ printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]);
+ dev->id[i].devtypeu = mbuf[0];
rmb = mbuf[1];
- if (chip_veru[host] != 4) {
+ if (dev->chip_veru != 4) {
goto not_wide;
}
if ((mbuf[7] & 0x60) == 0) {
goto not_wide;
}
- if ((global_map[host] & 0x20) == 0) {
+ if ((dev->global_map & 0x20) == 0) {
goto not_wide;
}
tmport = wkport + 0x1b;
tmport += 0x06;
outb(0, tmport);
tmport += 0x02;
- outb(devspu[host][i], tmport++);
+ outb(dev->id[i].devspu, tmport++);
outb(0, tmport++);
outb(satn[6], tmport++);
outb(satn[7], tmport++);
}
m = 1;
m = m << i;
- wide_idu[host] |= m;
+ dev->wide_idu |= m;
not_wide:
- if ((devtypeu[host][i] == 0x00) || (devtypeu[host][i] == 0x07)) {
+ if ((dev->id[i].devtypeu == 0x00) || (dev->id[i].devtypeu == 0x07)) {
goto set_sync;
}
continue;
set_sync:
tmport = wkport + 0x1b;
j = inb(tmport) & 0x0e;
- if ((m & wide_idu[host]) != 0) {
+ if ((m & dev->wide_idu) != 0) {
j |= 0x01;
}
outb(j, tmport);
tmport += 0x06;
outb(0, tmport);
tmport += 0x02;
- outb(devspu[host][i], tmport++);
+ outb(dev->id[i].devspu, tmport++);
outb(0, tmport++);
outb(satn[6], tmport++);
outb(satn[7], tmport++);
if (rmb != 0) {
outb(synn[j++], tmport);
} else {
- if ((m & wide_idu[host]) != 0) {
+ if ((m & dev->wide_idu) != 0) {
outb(synw[j++], tmport);
} else {
- if ((m & ultra_map[host]) != 0) {
+ if ((m & dev->ultra_map) != 0) {
outb(synu[j++], tmport);
} else {
outb(synn[j++], tmport);
if (mbuf[4] > 0x0c) {
mbuf[4] = 0x0c;
}
- devspu[host][i] = mbuf[4];
+ dev->id[i].devspu = mbuf[4];
if ((mbuf[3] < 0x0d) && (rmb == 0)) {
j = 0xa0;
goto set_syn_ok;
}
j = 0x60;
set_syn_ok:
- devspu[host][i] = (devspu[host][i] & 0x0f) | j;
+ dev->id[i].devspu = (dev->id[i].devspu & 0x0f) | j;
}
tmport = wkport + 0x3a;
outb((unsigned char) (inb(tmport) & 0xef), tmport);
unsigned long flags;
unsigned int base_io, error, tmport;
unsigned short index = 0;
- unsigned char pci_bus[3], pci_device_fn[3], chip_ver[3], host_id;
+ struct pci_dev *pdev[3];
+ unsigned char chip_ver[3], host_id;
struct Scsi_Host *shpnt = NULL;
+ int tmpcnt = 0;
int count = 0;
- static unsigned short devid[7] =
- {0x8002, 0x8010, 0x8020, 0x8030, 0x8040, 0x8050, 0};
- static struct pci_dev *pdev = NULL, *acard_pdev[3];
+ int result;
+
+ static unsigned short devid[7] = {
+ 0x8002, 0x8010, 0x8020, 0x8030, 0x8040, 0x8050, 0
+ };
- printk("aec671x_detect: \n");
+ printk(KERN_INFO "aec671x_detect: \n");
if (!pci_present()) {
- printk(" NO BIOS32 SUPPORT.\n");
+ printk(KERN_INFO" NO PCI SUPPORT.\n");
return count;
}
tpnt->proc_name = "atp870u";
for (h = 0; h < 2; h++) {
- active_idu[h] = 0;
- wide_idu[h] = 0;
- host_idu[h] = 0x07;
- quhdu[h] = 0;
- quendu[h] = 0;
- pci_bus[h] = 0;
- pci_device_fn[h] = 0xff;
- chip_ver[h] = 0;
- last_cmd[h] = 0xff;
- in_snd[h] = 0;
- in_int[h] = 0;
+ struct atp_unit *dev = &atp_unit[h];
+ for(k=0;k<16;k++)
+ {
+ dev->id[k].prd_tableu = kmalloc(1024, GFP_KERNEL);
+ dev->id[k].devspu=0x20;
+ dev->id[k].devtypeu = 0;
+ dev->id[k].curr_req = NULL;
+ }
+ dev->active_idu = 0;
+ dev->wide_idu = 0;
+ dev->host_idu = 0x07;
+ dev->quhdu = 0;
+ dev->quendu = 0;
+ pdev[h]=NULL;
+ pdev[2]=NULL;
+ dev->chip_veru = 0;
+ dev->last_cmd = 0xff;
+ dev->in_snd = 0;
+ dev->in_int = 0;
for (k = 0; k < qcnt; k++) {
- querequ[h][k] = 0;
+ dev->querequ[k] = 0;
}
for (k = 0; k < 16; k++) {
- curr_req[h][k] = 0;
+ dev->id[k].curr_req = 0;
}
}
h = 0;
while (devid[h] != 0) {
- pdev = pci_find_device(0x1191, devid[h], pdev);
- if (pdev == NULL) {
+ pdev[2] = pci_find_device(0x1191, devid[h], pdev[2]);
+ if (pdev[2] == NULL) {
h++;
index = 0;
continue;
}
chip_ver[2] = 0;
- /* To avoid messing with the things below... */
- acard_pdev[2] = pdev;
- pci_device_fn[2] = pdev->devfn;
- pci_bus[2] = pdev->bus->number;
-
if (devid[h] == 0x8002) {
- error = pci_read_config_byte(pdev, 0x08, &chip_ver[2]);
+ error = pci_read_config_byte(pdev[2], 0x08, &chip_ver[2]);
if (chip_ver[2] < 2) {
goto nxt_devfn;
}
}
- if (devid[h] == 0x8010) {
+ if (devid[h] == 0x8010 || devid[h] == 0x8050) {
chip_ver[2] = 0x04;
}
- if (pci_device_fn[2] < pci_device_fn[0]) {
- acard_pdev[1] = acard_pdev[0];
- pci_bus[1] = pci_bus[0];
- pci_device_fn[1] = pci_device_fn[0];
- chip_ver[1] = chip_ver[0];
- acard_pdev[0] = acard_pdev[2];
- pci_bus[0] = pci_bus[2];
- pci_device_fn[0] = pci_device_fn[2];
- chip_ver[0] = chip_ver[2];
- } else if (pci_device_fn[2] < pci_device_fn[1]) {
- acard_pdev[1] = acard_pdev[2];
- pci_bus[1] = pci_bus[2];
- pci_device_fn[1] = pci_device_fn[2];
- chip_ver[1] = chip_ver[2];
- }
+ pdev[tmpcnt] = pdev[2];
+ chip_ver[tmpcnt] = chip_ver[2];
+ tmpcnt++;
nxt_devfn:
index++;
if (index > 3) {
index = 0;
h++;
}
+ if(tmpcnt>1)
+ break;
}
for (h = 0; h < 2; h++) {
- if (pci_device_fn[h] == 0xff) {
+ struct atp_unit *dev=&atp_unit[h];
+ if (pdev[h]==NULL) {
return count;
}
- pdev = acard_pdev[h];
- pdev->devfn = pci_device_fn[h];
- pdev->bus->number = pci_bus[h];
/* Found an atp870u/w. */
- error = pci_read_config_dword(pdev, 0x10, &base_io);
- error += pci_read_config_byte(pdev, 0x3c, &irq);
- error += pci_read_config_byte(pdev, 0x49, &host_id);
+ base_io = pdev[h]->resource[0].start;
+ irq = pdev[h]->irq;
+ error = pci_read_config_byte(pdev[h],0x49,&host_id);
base_io &= 0xfffffff8;
- printk(" ACARD AEC-671X PCI Ultra/W SCSI-3 Host Adapter: %d IO:%x, IRQ:%d.\n"
+
+ if (check_region(base_io,0x40) != 0)
+ {
+ return 0;
+ }
+ printk(KERN_INFO " ACARD AEC-671X PCI Ultra/W SCSI-3 Host Adapter: %d IO:%x, IRQ:%d.\n"
,h, base_io, irq);
- ioportu[h] = base_io;
- pciportu[h] = base_io + 0x20;
+ dev->ioport = base_io;
+ dev->pciport = base_io + 0x20;
irqnumu[h] = irq;
host_id &= 0x07;
- host_idu[h] = host_id;
- chip_veru[h] = chip_ver[h];
+ dev->host_idu = host_id;
+ dev->chip_veru = chip_ver[h];
tmport = base_io + 0x22;
- scam_on[h] = inb(tmport);
+ dev->scam_on = inb(tmport);
tmport += 0x0b;
- global_map[h] = inb(tmport++);
- ultra_map[h] = inw(tmport);
- if (ultra_map[h] == 0) {
- scam_on[h] = 0x00;
- global_map[h] = 0x20;
- ultra_map[h] = 0xffff;
+ dev->global_map = inb(tmport++);
+ dev->ultra_map = inw(tmport);
+ if (dev->ultra_map == 0) {
+ dev->scam_on = 0x00;
+ dev->global_map = 0x20;
+ dev->ultra_map = 0xffff;
}
shpnt = scsi_register(tpnt, 4);
save_flags(flags);
cli();
- if (request_irq(irq, atp870u_intr_handle, 0, "atp870u", NULL)) {
- printk("Unable to allocate IRQ for Acard controller.\n");
+ if (request_irq(irq, atp870u_intr_handle, SA_SHIRQ, "atp870u", dev)) {
+ printk(KERN_ERR "Unable to allocate IRQ for Acard controller.\n");
goto unregister;
}
tmport = base_io + 0x3a;
is870(h, base_io);
tmport = base_io + 0x3a;
outb((inb(tmport) & 0xef), tmport);
+ tmport++;
+ outb((inb(tmport) | 0x20),tmport);
atp_host[h] = shpnt;
- if (chip_ver[h] == 4) {
+ if (dev->chip_veru == 4) {
shpnt->max_id = 16;
}
shpnt->this_id = host_id;
{
unsigned char h, j;
unsigned int tmport;
-/* printk(" atp870u_abort: \n"); */
+ struct atp_unit *dev;
for (h = 0; h <= admaxu; h++) {
if (SCpnt->host == atp_host[h]) {
goto find_adp;
}
panic("Abort host not found !");
find_adp:
- printk(" workingu=%x last_cmd=%x ", workingu[h], last_cmd[h]);
- printk(" quhdu=%x quendu=%x ", quhdu[h], quendu[h]);
- tmport = ioportu[h];
+ dev=&atp_unit[h];
+ printk(KERN_DEBUG "working=%x last_cmd=%x ", dev->working, dev->last_cmd);
+ printk(" quhdu=%x quendu=%x ", dev->quhdu, dev->quendu);
+ tmport = dev->ioport;
for (j = 0; j < 0x17; j++) {
printk(" r%2x=%2x", j, inb(tmport++));
}
tmport += 0x05;
printk(" r1c=%2x", inb(tmport));
tmport += 0x03;
- printk(" r1f=%2x in_snd=%2x ", inb(tmport), in_snd[h]);
+ printk(" r1f=%2x in_snd=%2x ", inb(tmport), dev->in_snd);
tmport++;
printk(" r20=%2x", inb(tmport));
tmport += 0x02;
- printk(" r22=%2x \n", inb(tmport));
+ printk(" r22=%2x", inb(tmport));
+ tmport += 0x18;
+ printk(" r3a=%2x \n",inb(tmport));
return (SCSI_ABORT_SNOOZE);
}
/*
* See if a bus reset was suggested.
*/
-/* printk("atp870u_reset: \n"); */
for (h = 0; h <= admaxu; h++) {
if (SCpnt->host == atp_host[h]) {
goto find_host;
find_host:
/* SCpnt->result = 0x00080000;
SCpnt->scsi_done(SCpnt);
- workingu[h]=0;
- quhdu[h]=0;
- quendu[h]=0;
+ dev->working=0;
+ dev->quhdu=0;
+ dev->quendu=0;
return (SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET); */
return (SCSI_RESET_SNOOZE);
}
{
static char buffer[128];
- strcpy(buffer, "ACARD AEC-6710/6712 PCI Ultra/W SCSI-3 Adapter Driver V1.0 ");
+ strcpy(buffer, "ACARD AEC-6710/6712 PCI Ultra/W SCSI-3 Adapter Driver V2.0+ac ");
return buffer;
}
int atp870u_set_info(char *buffer, int length, struct Scsi_Host *HBAptr)
{
- return (-ENOSYS); /* Currently this is a no-op */
+ return -ENOSYS; /* Currently this is a no-op */
}
#define BLS buffer + len + size
if (offset == 0) {
memset(buff, 0, sizeof(buff));
}
- size += sprintf(BLS, "ACARD AEC-671X Driver Version: 1.0\n");
+ size += sprintf(BLS, "ACARD AEC-671X Driver Version: 2.0+ac\n");
len += size;
pos = begin + len;
size = 0;
pos = begin + len;
size = 0;
- stop_output:
+stop_output:
*start = buffer + (offset - begin); /* Start of wanted data */
len -= (offset - begin); /* Start slop */
if (len > length) {
return 0;
}
+
+int atp870u_release (struct Scsi_Host *pshost)
+{
+ int h;
+ for (h = 0; h <= admaxu; h++)
+ {
+ if (pshost == atp_host[h]) {
+ int k;
+ free_irq (pshost->irq, &atp_unit[h]);
+ release_region (pshost->io_port, pshost->n_io_port);
+ scsi_unregister(pshost);
+ for(k=0;k<16;k++)
+ kfree(atp_unit[h].id[k].prd_tableu);
+ return 0;
+ }
+ }
+ panic("atp870u: bad scsi host passed.\n");
+
+}
+
#ifdef MODULE
Scsi_Host_Template driver_template = ATP870U;
int atp870u_abort(Scsi_Cmnd *);
int atp870u_reset(Scsi_Cmnd *, unsigned int);
int atp870u_biosparam(Disk *, kdev_t, int *);
+int atp870u_release(struct Scsi_Host *);
void send_s870(unsigned char);
-#define qcnt 32
-#define ATP870U_SCATTER 127
+#define qcnt 32
+#define ATP870U_SCATTER 128
#define ATP870U_CMDLUN 1
#ifndef NULL
extern int atp870u_proc_info(char *, char **, off_t, int, int, int);
-#define ATP870U { \
- proc_name: "atp870u", \
- proc_info: atp870u_proc_info, \
- name: NULL, \
- detect: atp870u_detect, \
- release: NULL, \
- info: atp870u_info, \
- command: atp870u_command, \
- queuecommand: atp870u_queuecommand, \
- eh_strategy_handler: NULL, \
- eh_abort_handler: NULL, \
- eh_device_reset_handler: NULL, \
- eh_bus_reset_handler: NULL, \
- eh_host_reset_handler: NULL, \
- abort: atp870u_abort, \
- reset: atp870u_reset, \
- slave_attach: NULL, \
- bios_param: atp870u_biosparam, \
- can_queue: qcnt, \
- this_id: 1, \
- sg_tablesize: ATP870U_SCATTER, \
- cmd_per_lun: ATP870U_CMDLUN, \
- present: 0, \
- unchecked_isa_dma: 0, \
- use_clustering: ENABLE_CLUSTERING, \
- use_new_eh_code: 0 \
+#define ATP870U { \
+ next: NULL, \
+ module: NULL, \
+ proc_info: atp870u_proc_info, \
+ name: NULL, \
+ detect: atp870u_detect, \
+ release: atp870u_release, \
+ info: atp870u_info, \
+ command: atp870u_command, \
+ queuecommand: atp870u_queuecommand, \
+ eh_strategy_handler: NULL, \
+ eh_abort_handler: NULL, \
+ eh_device_reset_handler: NULL, \
+ eh_bus_reset_handler: NULL, \
+ eh_host_reset_handler: NULL, \
+ abort: atp870u_abort, \
+ reset: atp870u_reset, \
+ slave_attach: NULL, \
+ bios_param: atp870u_biosparam, \
+ can_queue: qcnt, /* max simultaneous cmds */\
+ this_id: 7, /* scsi id of host adapter */\
+ sg_tablesize: ATP870U_SCATTER, /* max scatter-gather cmds */\
+ cmd_per_lun: ATP870U_CMDLUN, /* cmds per lun (linked cmds) */\
+ present: 0, /* number of 7xxx's present */\
+ unchecked_isa_dma: 0, /* no memory DMA restrictions */\
+ use_clustering: ENABLE_CLUSTERING, \
+ use_new_eh_code: 0 \
}
+
#endif
sh[j]->unchecked_isa_dma = FALSE;
else {
unsigned long flags;
- sh[j]->wish_block = TRUE;
+//FIXME// sh[j]->wish_block = TRUE;
sh[j]->unchecked_isa_dma = TRUE;
flags=claim_dma_lock();
else
hd->primary = TRUE;
- sh->wish_block = FALSE;
+//FIXME// sh->wish_block = FALSE;
if (hd->bustype != IS_ISA) {
sh->unchecked_isa_dma = FALSE;
* DTC3181E extensions (c) 1997, Ronald van Cuijlenborg
* ronald.van.cuijlenborg@tip.nl or nutty@dds.nl
*
+ * Added ISAPNP support for DTC436 adapters,
+ * Thomas Sailer, sailer@ife.ee.ethz.ch
+ *
* ALPHA RELEASE 1.
*
* For more information, please consult
#include "sd.h"
#include <linux/stat.h>
#include <linux/init.h>
-#include<linux/ioport.h>
+#include <linux/ioport.h>
+#include <linux/isapnp.h>
#define NCR_NOT_SET 0
static int ncr_irq=NCR_NOT_SET;
else if (dtc_3181e != NCR_NOT_SET)
overrides[0].board=BOARD_DTC3181E;
+ if (!current_override && isapnp_present()) {
+ struct pci_dev *dev = NULL;
+ count = 0;
+ while ((dev = isapnp_find_dev(NULL, ISAPNP_VENDOR('D','T','C'), ISAPNP_FUNCTION(0x436e), dev))) {
+ if (count >= NO_OVERRIDES)
+ break;
+ if (!dev->active && dev->prepare(dev) < 0) {
+ printk(KERN_ERR "dtc436e probe: prepare failed\n");
+ continue;
+ }
+ if (!(dev->resource[0].flags & IORESOURCE_IO))
+ continue;
+ if (!dev->active && dev->activate(dev) < 0) {
+ printk(KERN_ERR "dtc436e probe: activate failed\n");
+ continue;
+ }
+ if (dev->irq_resource[0].flags & IORESOURCE_IRQ)
+ overrides[count].irq=dev->irq_resource[0].start;
+ else
+ overrides[count].irq=IRQ_NONE;
+ if (dev->dma_resource[0].flags & IORESOURCE_DMA)
+ overrides[count].dma=dev->dma_resource[0].start;
+ else
+ overrides[count].dma=DMA_NONE;
+ overrides[count].NCR5380_map_name=(NCR5380_map_type)dev->resource[0].start;
+ overrides[count].board=BOARD_DTC3181E;
+ count++;
+ }
+ }
+
tpnt->proc_name = "g_NCR5380";
for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
scp->request.rq_status = RQ_SCSI_BUSY;
scp->request.sem = &sem;
scp->SCp.this_residual = IOCTL_PRI;
- GDTH_LOCK_SCSI_DOCMD();
scsi_do_cmd(scp, cmnd, gdtcmd, sizeof(gdth_cmd_str),
gdth_scsi_done, timeout*HZ, 1);
- GDTH_UNLOCK_SCSI_DOCMD();
down(&sem);
}
atomic_set(&retval->host_active,0);
retval->host_busy = 0;
retval->host_failed = 0;
- retval->block = NULL;
- retval->wish_block = 0;
if(j > 0xffff) panic("Too many extra bytes requested\n");
retval->extra_bytes = j;
retval->loaded_as_module = scsi_loadable_module_flag;
retval->ehandler = NULL; /* Initial value until the thing starts up. */
retval->eh_notify = NULL; /* Who we notify when we exit. */
- /*
- * Initialize the fields used for mid-level queueing.
- */
- retval->pending_commands = NULL;
- retval->host_busy = FALSE;
+
+ retval->host_blocked = FALSE;
#ifdef DEBUG
printk("Register %x %x: %d\n", (int)retval, (int)retval->hostt, j);
kernel_thread((int (*)(void *))scsi_error_handler,
(void *) shpnt, 0);
+
/*
* Now wait for the kernel error thread to initialize itself
* as it might be needed when we scan the bus.
printk ("scsi : %d host%s.\n", next_scsi_host,
(next_scsi_host == 1) ? "" : "s");
- scsi_make_blocked_list();
/* Now attach the high level drivers */
#ifdef CONFIG_BLK_DEV_SD
/*
* hosts.h Copyright (C) 1992 Drew Eckhardt
- * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ * Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale
*
* mid to low-level SCSI driver interface header
* Initial versions: Drew Eckhardt
*
* <drew@colorado.edu>
*
- * Modified by Eric Youngdale eric@aib.com to
+ * Modified by Eric Youngdale eric@andante.org to
* add scatter-gather, multiple outstanding request, and other
* enhancements.
*
*/
struct Scsi_Host * next;
Scsi_Device * host_queue;
- /*
- * List of commands that have been rejected because either the host
- * or the device was busy. These need to be retried relatively quickly,
- * but we need to hold onto it for a short period until the host/device
- * is available.
- */
- Scsi_Cmnd * pending_commands;
+
struct task_struct * ehandler; /* Error recovery thread. */
struct semaphore * eh_wait; /* The error recovery thread waits on
unsigned int max_lun;
unsigned int max_channel;
- /*
- * Pointer to a circularly linked list - this indicates the hosts
- * that should be locked out of performing I/O while we have an active
- * command on this host.
- */
- struct Scsi_Host * block;
- unsigned wish_block:1;
/* These parameters should be set by the detect routine */
unsigned long base;
* Host uses correct SCSI ordering not PC ordering. The bit is
* set for the minority of drivers whose authors actually read the spec ;)
*/
-
unsigned reverse_ordering:1;
-
+
+ /*
+ * Indicates that one or more devices on this host were starved, and
+ * when the device becomes less busy that we need to feed them.
+ */
+ unsigned some_device_starved:1;
+
void (*select_queue_depths)(struct Scsi_Host *, Scsi_Device *);
/*
extern void build_proc_dir_entries(Scsi_Host_Template *);
-
/*
* scsi_init initializes the scsi hosts.
*/
void (*finish)(void); /* Perform initialization after attachment */
int (*attach)(Scsi_Device *); /* Attach devices to arrays */
void (*detach)(Scsi_Device *);
+ int (*init_command)(Scsi_Cmnd *); /* Used by new queueing code. */
};
extern struct Scsi_Device_Template sd_template;
static int device_inquiry(int host_index, int ldn)
{
int retries;
- Scsi_Cmnd cmd;
+ Scsi_Cmnd *cmd;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
buf = (unsigned char *)(&(ld(host_index)[ldn].buf));
ld(host_index)[ldn].tsb.dev_status = 0; /* prepare stusblock */
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL|GFP_DMA);
+ if(cmd==NULL)
+ {
+ printk(KERN_ERR "ibmmca: out of memory for inquiry.\n");
+ return 0;
+ }
if (bypass_controller)
{ /* fill the commonly known field for device-inquiry SCSI cmnd */
- cmd.cmd_len = 6;
- memset (&(cmd.cmnd), 0x0, sizeof(char) * cmd.cmd_len);
- cmd.cmnd[0] = INQUIRY; /* device inquiry */
- cmd.cmnd[4] = 0xff; /* return buffer size = 255 */
+ cmd->cmd_len = 6;
+ memset (&(cmd->cmnd), 0x0, sizeof(char) * cmd->cmd_len);
+ cmd->cmnd[0] = INQUIRY; /* device inquiry */
+ cmd->cmnd[4] = 0xff; /* return buffer size = 255 */
}
for (retries = 0; retries < 3; retries++)
{
{ /* bypass the hardware integrated command set */
scb->command = IM_OTHER_SCSI_CMD_CMD;
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT;
- scb->u1.scsi_cmd_length = cmd.cmd_len;
- memcpy (scb->u2.scsi_command, &(cmd.cmnd), cmd.cmd_len);
+ scb->u1.scsi_cmd_length = cmd->cmd_len;
+ memcpy (scb->u2.scsi_command, &(cmd->cmnd), cmd->cmd_len);
last_scsi_command(host_index)[ldn] = INQUIRY;
last_scsi_type(host_index)[ldn] = IM_SCB;
}
return 1;
}
}
+ kfree(cmd);
/*if all three retries failed, return "no device at this ldn" */
if (retries >= 3)
}
else if (special == INTEGRATED_SCSI)
{ /* if the integrated subsystem has been found automatically: */
- len += sprintf (buf + len, "Adapter cathegory: integrated\n");
+ len += sprintf (buf + len, "Adapter category: integrated\n");
len += sprintf (buf + len, "Chip revision level: %d\n",
((pos2 & 0xf0) >> 4));
len += sprintf (buf + len, "Chip status: %s\n",
else if ((special>=0)&&
(special<(sizeof(subsys_list)/sizeof(struct subsys_list_struct))))
{ /* if the subsystem is a slot adapter */
- len += sprintf (buf + len, "Adapter cathegory: slot-card\n");
+ len += sprintf (buf + len, "Adapter category: slot-card\n");
len += sprintf (buf + len, "Chip revision level: %d\n",
((pos2 & 0xf0) >> 4));
len += sprintf (buf + len, "Chip status: %s\n",
}
else
{
- len += sprintf (buf + len, "Adapter cathegory: unknown\n");
+ len += sprintf (buf + len, "Adapter category: unknown\n");
}
/* common subsystem information to write to the slotn file */
len += sprintf (buf + len, "Subsystem PUN: %d\n", shpnt->this_id);
len += sprintf (buf + len, "I/O base address range: 0x%x-0x%x",
(unsigned int)(shpnt->io_port),
(unsigned int)(shpnt->io_port+7));
- /* Now make sure, the bufferlength is devideable by 4 to avoid
+ /* Now make sure, the bufferlength is divisible by 4 to avoid
* paging problems of the buffer. */
while ( len % sizeof( int ) != ( sizeof ( int ) - 1 ) )
{
((struct ibmmca_hostdata *)shpnt->hostdata)->_pos3 = 0;
((struct ibmmca_hostdata *)shpnt->hostdata)->_special =
FORCED_DETECTION;
- mca_set_adapter_name(MCA_INTEGSCSI, "forced detected SCSI Adapter");
+ mca_set_adapter_name(MCA_INTEGSCSI, "forcibly detected SCSI Adapter");
mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo,
shpnt);
mca_mark_as_used(MCA_INTEGSCSI);
pHCB->pSRB_head = NULL; /* Initial SRB save queue */
pHCB->pSRB_tail = NULL; /* Initial SRB save queue */
pHCB->pSRB_lock = SPIN_LOCK_UNLOCKED; /* SRB save queue lock */
+ pHCB->BitAllocFlagLock = SPIN_LOCK_UNLOCKED;
/* Get total memory needed for SCB */
sz = orc_num_scb * sizeof(ORC_SCB);
if ((pHCB->HCS_virScbArray = (PVOID) kmalloc(sz, GFP_ATOMIC | GFP_DMA)) == NULL) {
sh->cmd_per_lun = sh->hostt->cmd_per_lun;
sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma;
sh->use_clustering = sh->hostt->use_clustering;
- sh->wish_block = FALSE;
+//FIXME// sh->wish_block = FALSE;
/* Store info in HA structure */
ha->io_addr = io_addr;
static Scsi_Cmnd *qCompleted = NULL;
#if SERDEBUG
-volatile static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED;
#endif
-volatile static spinlock_t mega_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t mega_lock = SPIN_LOCK_UNLOCKED;
#if SERDEBUG
static char strbuf[MAX_SERBUF + 1];
unsigned long scsi_pid = 0;
Scsi_Cmnd *last_cmnd = NULL;
/* Command groups 3 and 4 are reserved and should never be used. */
-const unsigned char scsi_command_size[8] = {
+const unsigned char scsi_command_size[8] =
+{
6, 10, 10, 12,
12, 12, 10, 10
};
{"REGAL", "CDC-4X", "*", BLIST_MAX5LUN | BLIST_SINGLELUN},
{"NAKAMICH", "MJ-4.8S", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
{"NAKAMICH", "MJ-5.16S", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
- {"PIONEER", "CD-ROM DRM-600", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
- {"PIONEER", "CD-ROM DRM-602X", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
- {"PIONEER", "CD-ROM DRM-604X", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"PIONEER", "CD-ROM DRM-600", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"PIONEER", "CD-ROM DRM-602X", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"PIONEER", "CD-ROM DRM-604X", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
{"EMULEX", "MD21/S2 ESDI", "*", BLIST_SINGLELUN},
{"CANON", "IPUBJD", "*", BLIST_SPARSELUN},
{"nCipher", "Fastness Crypto", "*", BLIST_FORCELUN},
return 0;
}
-/*
- * Function: scsi_make_blocked_list
- *
- * Purpose: Build linked list of hosts that require blocking.
- *
- * Arguments: None.
- *
- * Returns: Nothing
- *
- * Notes: Blocking is sort of a hack that is used to prevent more than one
- * host adapter from being active at one time. This is used in cases
- * where the ISA bus becomes unreliable if you have more than one
- * host adapter really pumping data through.
- *
- * We spent a lot of time examining the problem, and I *believe* that
- * the problem is bus related as opposed to being a driver bug.
- *
- * The blocked list is used as part of the synchronization object
- * that we use to ensure that only one host is active at one time.
- * I (ERY) would like to make this go away someday, but this would
- * require that we have a recursive mutex object.
- */
-
-void scsi_make_blocked_list(void)
-{
- int block_count = 0, index;
- struct Scsi_Host *sh[128], *shpnt;
-
- /*
- * Create a circular linked list from the scsi hosts which have
- * the "wish_block" field in the Scsi_Host structure set.
- * The blocked list should include all the scsi hosts using ISA DMA.
- * In some systems, using two dma channels simultaneously causes
- * unpredictable results.
- * Among the scsi hosts in the blocked list, only one host at a time
- * is allowed to have active commands queued. The transition from
- * one active host to the next one is allowed only when host_busy == 0
- * for the active host (which implies host_busy == 0 for all the hosts
- * in the list). Moreover for block devices the transition to a new
- * active host is allowed only when a request is completed, since a
- * block device request can be divided into multiple scsi commands
- * (when there are few sg lists or clustering is disabled).
- *
- * (DB, 4 Feb 1995)
- */
-
-
- host_active = NULL;
-
- for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
-
-#if 0
- /*
- * Is this is a candidate for the blocked list?
- * Useful to put into the blocked list all the hosts whose driver
- * does not know about the host->block feature.
- */
- if (shpnt->unchecked_isa_dma)
- shpnt->wish_block = 1;
-#endif
-
- if (shpnt->wish_block)
- sh[block_count++] = shpnt;
- }
-
- if (block_count == 1)
- sh[0]->block = NULL;
-
- else if (block_count > 1) {
-
- for (index = 0; index < block_count - 1; index++) {
- sh[index]->block = sh[index + 1];
- printk("scsi%d : added to blocked host list.\n",
- sh[index]->host_no);
- }
-
- sh[block_count - 1]->block = sh[0];
- printk("scsi%d : added to blocked host list.\n",
- sh[index]->host_no);
- }
-}
static void scan_scsis_done(Scsi_Cmnd * SCpnt)
{
up(SCpnt->request.sem);
}
+#ifdef MODULE
MODULE_PARM(scsi_logging_level, "i");
MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
-#ifndef MODULE
+#else
static int __init scsi_logging_setup(char *str)
{
static int max_scsi_luns = 1;
#endif
+#ifdef MODULE
+
MODULE_PARM(max_scsi_luns, "i");
MODULE_PARM_DESC(max_scsi_luns, "last scsi LUN (should be between 1 and 8)");
-#ifndef MODULE
+#else
static int __init scsi_luns_setup(char *str)
{
void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *),
int timeout, int retries)
{
- unsigned long flags;
DECLARE_MUTEX_LOCKED(sem);
SCpnt->request.sem = &sem;
SCpnt->request.rq_status = RQ_SCSI_BUSY;
- spin_lock_irqsave(&io_request_lock, flags);
scsi_do_cmd (SCpnt, (void *) cmnd,
buffer, bufflen, done, timeout, retries);
- spin_unlock_irqrestore(&io_request_lock, flags);
down (&sem);
SCpnt->request.sem = NULL;
}
SDpnt = (Scsi_Device *) scsi_init_malloc(sizeof(Scsi_Device),
GFP_ATOMIC);
if (SDpnt) {
+ /*
+ * Register the queue for the device. All I/O requests will come
+ * in through here. We also need to register a pointer to
+ * ourselves, since the queue handler won't know what device
+ * the queue actually represents. We could look it up, but it
+ * is pointless work.
+ */
+ blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
+ blk_queue_headactive(&SDpnt->request_queue, 0);
+ SDpnt->request_queue.queuedata = (void *) SDpnt;
/* Make sure we have something that is valid for DMA purposes */
scsi_result = ((!shpnt->unchecked_isa_dma)
? &scsi_result0[0] : scsi_init_malloc(512, GFP_DMA));
SDpnt->host = shpnt;
SDpnt->online = TRUE;
+ initialize_merge_fn(SDpnt);
+
init_waitqueue_head(&SDpnt->device_wait);
/*
if (sdtpnt->init && sdtpnt->dev_noticed)
(*sdtpnt->init) ();
- oldSDpnt->scsi_request_fn = NULL;
for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
if (sdtpnt->attach) {
(*sdtpnt->attach) (oldSDpnt);
SDpnt->borken = 1;
SDpnt->was_reset = 0;
SDpnt->expecting_cc_ua = 0;
+ SDpnt->starved = 0;
scsi_cmd[0] = TEST_UNIT_READY;
scsi_cmd[1] = lun << 5;
printk("scsi: scan_scsis_single: Cannot malloc\n");
return 0;
}
+ /*
+ * Register the queue for the device. All I/O requests will come
+ * in through here. We also need to register a pointer to
+ * ourselves, since the queue handler won't know what device
+ * the queue actually represents. We could look it up, but it
+ * is pointless work.
+ */
+ blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
+ blk_queue_headactive(&SDpnt->request_queue, 0);
+ SDpnt->request_queue.queuedata = (void *) SDpnt;
+ SDpnt->host = shpnt;
+ initialize_merge_fn(SDpnt);
+
/*
* And hook up our command block to the new device we will be testing
* for.
* of the calling code to ensure that this is the case.
*/
-Scsi_Cmnd *scsi_request_queueable(struct request * req, Scsi_Device * device)
-{
- Scsi_Cmnd *SCpnt = NULL;
- int tablesize;
- Scsi_Cmnd *found = NULL;
- struct buffer_head *bh, *bhp;
-
- if (!device)
- panic("No device passed to scsi_request_queueable().\n");
-
- if (req && req->rq_status == RQ_INACTIVE)
- panic("Inactive in scsi_request_queueable");
-
- /*
- * Look for a free command block. If we have been instructed not to queue
- * multiple commands to multi-lun devices, then check to see what else is
- * going for this device first.
- */
-
- if (!device->single_lun) {
- SCpnt = device->device_queue;
- while (SCpnt) {
- if (SCpnt->request.rq_status == RQ_INACTIVE)
- break;
- SCpnt = SCpnt->next;
- }
- } else {
- SCpnt = device->device_queue;
- while (SCpnt) {
- if (SCpnt->channel == device->channel
- && SCpnt->target == device->id) {
- if (SCpnt->lun == device->lun) {
- if (found == NULL
- && SCpnt->request.rq_status == RQ_INACTIVE) {
- found = SCpnt;
- }
- }
- if (SCpnt->request.rq_status != RQ_INACTIVE) {
- /*
- * I think that we should really limit things to one
- * outstanding command per device - this is what tends
- * to trip up buggy firmware.
- */
- return NULL;
- }
- }
- SCpnt = SCpnt->next;
- }
- SCpnt = found;
- }
-
- if (!SCpnt)
- return NULL;
-
- if (SCSI_BLOCK(device, device->host))
- return NULL;
-
- if (req) {
- memcpy(&SCpnt->request, req, sizeof(struct request));
- tablesize = device->host->sg_tablesize;
- bhp = bh = req->bh;
- if (!tablesize)
- bh = NULL;
- /* Take a quick look through the table to see how big it is.
- * We already have our copy of req, so we can mess with that
- * if we want to.
- */
- while (req->nr_sectors && bh) {
- bhp = bhp->b_reqnext;
- if (!bhp || !CONTIGUOUS_BUFFERS(bh, bhp))
- tablesize--;
- req->nr_sectors -= bh->b_size >> 9;
- req->sector += bh->b_size >> 9;
- if (!tablesize)
- break;
- bh = bhp;
- }
- if (req->nr_sectors && bh && bh->b_reqnext) { /* Any leftovers? */
- SCpnt->request.bhtail = bh;
- req->bh = bh->b_reqnext; /* Divide request */
- bh->b_reqnext = NULL;
- bh = req->bh;
-
- /* Now reset things so that req looks OK */
- SCpnt->request.nr_sectors -= req->nr_sectors;
- req->current_nr_sectors = bh->b_size >> 9;
- req->buffer = bh->b_data;
- SCpnt->request.sem = NULL; /* Wait until whole thing done */
- } else {
- req->rq_status = RQ_INACTIVE;
- wake_up(&wait_for_request);
- }
- } else {
- SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Busy, but no request */
- SCpnt->request.sem = NULL; /* And no one is waiting for the device
- * either */
- }
-
- atomic_inc(&SCpnt->host->host_active);
- SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n", SCpnt->target,
- atomic_read(&SCpnt->host->host_active)));
- SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
- SCpnt->old_use_sg = 0;
- SCpnt->transfersize = 0;
- SCpnt->resid = 0;
- SCpnt->underflow = 0;
- SCpnt->cmd_len = 0;
-
- /*
- * Since not everyone seems to set the device info correctly
- * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
- */
-
- SCpnt->channel = device->channel;
- SCpnt->lun = device->lun;
- SCpnt->target = device->id;
- SCpnt->state = SCSI_STATE_INITIALIZING;
- SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
-
- return SCpnt;
-}
/* This function returns a structure pointer that will be valid for
* the device. The wait parameter tells us whether we should wait for
* of the packets for each device
*/
-Scsi_Cmnd *scsi_allocate_device(struct request ** reqp, Scsi_Device * device,
- int wait)
-{
- kdev_t dev;
- struct request *req = NULL;
- int tablesize;
- struct buffer_head *bh, *bhp;
- struct Scsi_Host *host;
- Scsi_Cmnd *SCpnt = NULL;
- Scsi_Cmnd *SCwait = NULL;
- Scsi_Cmnd *found = NULL;
-
- if (!device)
- panic("No device passed to scsi_allocate_device().\n");
-
- if (reqp)
- req = *reqp;
+/*
+ * This lock protects the freelist for all devices on the system.
+ * We could make this finer grained by having a single lock per
+ * device if it is ever found that there is excessive contention
+ * on this lock.
+ */
+static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
- /*
- * See if this request has already been queued by an
- * interrupt routine
- */
-
- if (req) {
- if (req->rq_status == RQ_INACTIVE)
- return NULL;
- dev = req->rq_dev;
- } else
- dev = 0; /* unused */
+/*
+ * Used for access to internal allocator used for DMA safe buffers.
+ */
+static spinlock_t allocator_request_lock = SPIN_LOCK_UNLOCKED;
- host = device->host;
+/*
+ * Used to protect insertion into and removal from the queue of
+ * commands to be processed by the bottom half handler.
+ */
+static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
- if (in_interrupt() && SCSI_BLOCK(device, host))
- return NULL;
+/*
+ * Function: scsi_allocate_device
+ *
+ * Purpose: Allocate a command descriptor.
+ *
+ * Arguments: device - device for which we want a command descriptor
+ * wait - 1 if we should wait in the event that none
+ * are available.
+ *
+ * Lock status: No locks assumed to be held. This function is SMP-safe.
+ *
+ * Returns: Pointer to command descriptor.
+ *
+ * Notes: Prior to the new queue code, this function was not SMP-safe.
+ */
+Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait)
+{
+ struct Scsi_Host *host;
+ Scsi_Cmnd *SCpnt = NULL;
+ Scsi_Device *SDpnt;
+ unsigned long flags;
+
+ if (!device)
+ panic("No device passed to scsi_allocate_device().\n");
+
+ host = device->host;
+
+ spin_lock_irqsave(&device_request_lock, flags);
+
while (1 == 1) {
- if (!device->single_lun) {
- SCpnt = device->device_queue;
- while (SCpnt) {
- SCwait = SCpnt;
- if (SCpnt->request.rq_status == RQ_INACTIVE)
- break;
- SCpnt = SCpnt->next;
- }
- } else {
- SCpnt = device->device_queue;
- while (SCpnt) {
- if (SCpnt->channel == device->channel
- && SCpnt->target == device->id) {
- if (SCpnt->lun == device->lun) {
- SCwait = SCpnt;
- if (found == NULL
- && SCpnt->request.rq_status == RQ_INACTIVE) {
- found = SCpnt;
+ SCpnt = NULL;
+ if (!device->device_blocked) {
+ if (device->single_lun) {
+ /*
+ * FIXME(eric) - this is not at all optimal. Given that
+ * single lun devices are rare and usually slow
+ * (i.e. CD changers), this is good enough for now, but
+ * we may want to come back and optimize this later.
+ *
+ * Scan through all of the devices attached to this
+ * host, and see if any are active or not. If so,
+ * we need to defer this command.
+ *
+ * We really need a busy counter per device. This would
+ * allow us to more easily figure out whether we should
+ * do anything here or not.
+ */
+ for (SDpnt = host->host_queue;
+ SDpnt;
+ SDpnt = SDpnt->next) {
+ /*
+ * Only look for other devices on the same bus
+ * with the same target ID.
+ */
+ if (SDpnt->channel != device->channel
+ || SDpnt->id != device->id
+ || SDpnt == device) {
+ continue;
+ }
+ for (SCpnt = SDpnt->device_queue;
+ SCpnt;
+ SCpnt = SCpnt->next) {
+ if (SCpnt->request.rq_status != RQ_INACTIVE) {
+ break;
}
}
- if (SCpnt->request.rq_status != RQ_INACTIVE) {
- /*
- * I think that we should really limit things to one
- * outstanding command per device - this is what tends
- * to trip up buggy firmware.
- */
- found = NULL;
+ if (SCpnt) {
break;
}
}
- SCpnt = SCpnt->next;
+ if (SDpnt) {
+ /*
+ * Some other device in this cluster is busy.
+ * If asked to wait, we need to wait, otherwise
+ * return NULL.
+ */
+ SCpnt = NULL;
+ break;
+ }
+ }
+ /*
+ * Now we can check for a free command block for this device.
+ */
+ for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
+ if (SCpnt->request.rq_status == RQ_INACTIVE)
+ break;
}
- SCpnt = found;
}
-
- /* See if this request has already been queued by an interrupt routine
+ /*
+ * If we couldn't find a free command block, and we have been
+ * asked to wait, then do so.
*/
- if (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) {
- return NULL;
+ if (SCpnt) {
+ break;
}
- if (!SCpnt || SCpnt->request.rq_status != RQ_INACTIVE) { /* Might have changed */
- if (wait && SCwait && SCwait->request.rq_status != RQ_INACTIVE) {
- DECLARE_WAITQUEUE(wait,current);
- add_wait_queue(&device->device_wait,&wait);
- current->state=TASK_UNINTERRUPTIBLE;
- spin_unlock(&io_request_lock);
- schedule();
- current->state=TASK_RUNNING;
- remove_wait_queue(&device->device_wait,&wait);
- spin_lock_irq(&io_request_lock);
- } else {
- if (!wait)
- return NULL;
- if (!SCwait) {
- printk("Attempt to allocate device channel %d,"
- " target %d, lun %d\n", device->channel,
- device->id, device->lun);
- panic("No device found in scsi_allocate_device\n");
- }
- }
+ /*
+ * If we have been asked to wait for a free block, then
+ * wait here.
+ */
+ spin_unlock_irqrestore(&device_request_lock, flags);
+ if (wait) {
+ /*
+ * This should block until a device command block
+ * becomes available.
+ */
+ sleep_on(&device->device_wait);
+ spin_lock_irqsave(&device_request_lock, flags);
} else {
- if (req) {
- memcpy(&SCpnt->request, req, sizeof(struct request));
- tablesize = device->host->sg_tablesize;
- bhp = bh = req->bh;
- if (!tablesize)
- bh = NULL;
- /* Take a quick look through the table to see how big it is.
- * We already have our copy of req, so we can mess with that
- * if we want to.
- */
- while (req->nr_sectors && bh) {
- bhp = bhp->b_reqnext;
- if (!bhp || !CONTIGUOUS_BUFFERS(bh, bhp))
- tablesize--;
- req->nr_sectors -= bh->b_size >> 9;
- req->sector += bh->b_size >> 9;
- if (!tablesize)
- break;
- bh = bhp;
- }
- if (req->nr_sectors && bh && bh->b_reqnext) { /* Any leftovers? */
- SCpnt->request.bhtail = bh;
- req->bh = bh->b_reqnext; /* Divide request */
- bh->b_reqnext = NULL;
- bh = req->bh;
- /* Now reset things so that req looks OK */
- SCpnt->request.nr_sectors -= req->nr_sectors;
- req->current_nr_sectors = bh->b_size >> 9;
- req->buffer = bh->b_data;
- SCpnt->request.sem = NULL; /* Wait until whole thing done */
- } else {
- req->rq_status = RQ_INACTIVE;
- *reqp = req->next;
- wake_up(&wait_for_request);
- }
- } else {
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- SCpnt->request.sem = NULL; /* And no one is waiting for this
- * to complete */
- }
- atomic_inc(&SCpnt->host->host_active);
- SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
- SCpnt->target,
- atomic_read(&SCpnt->host->host_active)));
- break;
+ return NULL;
}
}
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = NULL; /* And no one is waiting for this
+ * to complete */
+ atomic_inc(&SCpnt->host->host_active);
+
SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
SCpnt->old_use_sg = 0;
SCpnt->transfersize = 0; /* No default transfer size */
SCpnt->cmd_len = 0;
- SCpnt->resid = 0;
- SCpnt->underflow = 0; /* Do not flag underflow conditions */
- /* Since not everyone seems to set the device info correctly
- * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
- * FIXME(eric) This doesn't make any sense.
- */
- SCpnt->channel = device->channel;
- SCpnt->lun = device->lun;
- SCpnt->target = device->id;
+ SCpnt->underflow = 0; /* Do not flag underflow conditions */
SCpnt->state = SCSI_STATE_INITIALIZING;
SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
+ spin_unlock_irqrestore(&device_request_lock, flags);
+
+ SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
+ SCpnt->target,
+ atomic_read(&SCpnt->host->host_active)));
+
return SCpnt;
}
*/
void scsi_release_command(Scsi_Cmnd * SCpnt)
{
+ unsigned long flags;
+ spin_lock_irqsave(&device_request_lock, flags);
+
SCpnt->request.rq_status = RQ_INACTIVE;
SCpnt->state = SCSI_STATE_UNUSED;
SCpnt->owner = SCSI_OWNER_NOBODY;
atomic_read(&SCpnt->host->eh_wait->count)));
up(SCpnt->host->eh_wait);
}
+ spin_unlock_irqrestore(&device_request_lock, flags);
}
/*
* This is inline because we have stack problemes if we recurse to deeply.
*/
-inline int internal_cmnd(Scsi_Cmnd * SCpnt)
+int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
{
#ifdef DEBUG_DELAY
unsigned long clock;
#endif
struct Scsi_Host *host;
int rtn = 0;
+ unsigned long flags;
unsigned long timeout;
+ ASSERT_LOCK(&io_request_lock, 0);
+
#if DEBUG
unsigned long *ret = 0;
#ifdef __mips__
* interrupt handler (assuming there is one irq-level per
* host).
*/
- spin_unlock_irq(&io_request_lock);
while (--ticks_remaining >= 0)
mdelay(1 + 999 / HZ);
host->resetting = 0;
- spin_lock_irq(&io_request_lock);
}
if (host->hostt->use_new_eh_code) {
scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
* We will use a queued command if possible, otherwise we will emulate the
* queuing and calling of completion function ourselves.
*/
- SCSI_LOG_MLQUEUE(3, printk("internal_cmnd (host = %d, channel = %d, target = %d, "
+ SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
"command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
* passes a meaningful return value.
*/
if (host->hostt->use_new_eh_code) {
+ spin_lock_irqsave(&io_request_lock, flags);
rtn = host->hostt->queuecommand(SCpnt, scsi_done);
+ spin_unlock_irqrestore(&io_request_lock, flags);
if (rtn != 0) {
+ scsi_delete_timer(SCpnt);
scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
}
} else {
+ spin_lock_irqsave(&io_request_lock, flags);
host->hostt->queuecommand(SCpnt, scsi_old_done);
+ spin_unlock_irqrestore(&io_request_lock, flags);
}
} else {
int temp;
SCSI_LOG_MLQUEUE(3, printk("command() : routine at %p\n", host->hostt->command));
+ spin_lock_irqsave(&io_request_lock, flags);
temp = host->hostt->command(SCpnt);
SCpnt->result = temp;
#ifdef DEBUG_DELAY
+ spin_unlock_irqrestore(&io_request_lock, flags);
clock = jiffies + 4 * HZ;
- spin_unlock_irq(&io_request_lock);
while (time_before(jiffies, clock))
barrier();
- spin_lock_irq(&io_request_lock);
printk("done(host = %d, result = %04x) : routine at %p\n",
host->host_no, temp, host->hostt->command);
+ spin_lock_irqsave(&io_request_lock, flags);
#endif
if (host->hostt->use_new_eh_code) {
scsi_done(SCpnt);
} else {
scsi_old_done(SCpnt);
}
+ spin_unlock_irqrestore(&io_request_lock, flags);
}
- SCSI_LOG_MLQUEUE(3, printk("leaving internal_cmnd()\n"));
+ SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
return rtn;
}
* drivers go for the same host at the same time.
*/
+/*
+ * Function: scsi_do_cmd
+ *
+ * Purpose: Queue a SCSI command
+ *
+ * Arguments: SCpnt - command descriptor.
+ * cmnd - actual SCSI command to be performed.
+ * buffer - data buffer.
+ * bufflen - size of data buffer.
+ * done - completion function to be run.
+ * timeout - how long to let it run before timeout.
+ * retries - number of retries we allow.
+ *
+ * Lock status: With the new queueing code, this is SMP-safe, and no locks
+ * need be held upon entry. The old queueing code the lock was
+ * assumed to be held upon entry.
+ *
+ * Returns: Pointer to command descriptor.
+ *
+ * Notes: Prior to the new queue code, this function was not SMP-safe.
+ * Also, this function is now only used for queueing requests
+ * for things like ioctls and character device requests - this
+ * is because we essentially just inject a request into the
+ * queue for the device. Normal block device handling manipulates
+ * the queue directly.
+ */
void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
int timeout, int retries)
struct Scsi_Host *host = SCpnt->host;
Scsi_Device *device = SCpnt->device;
+ ASSERT_LOCK(&io_request_lock, 0);
+
SCpnt->owner = SCSI_OWNER_MIDLEVEL;
SCSI_LOG_MLQUEUE(4,
* ourselves.
*/
- SCpnt->pid = scsi_pid++;
-
- while (SCSI_BLOCK((Scsi_Device *) NULL, host)) {
- spin_unlock(&io_request_lock); /* FIXME!!! */
- SCSI_SLEEP(&host->host_wait, SCSI_BLOCK((Scsi_Device *) NULL, host));
- spin_lock_irq(&io_request_lock); /* FIXME!!! */
- }
-
- if (host->block)
- host_active = host;
host->host_busy++;
device->device_busy++;
SCpnt->internal_timeout = NORMAL_TIMEOUT;
SCpnt->abort_reason = 0;
SCpnt->result = 0;
- internal_cmnd(SCpnt);
+
+ /*
+ * At this point, we merely set up the command, stick it in the normal
+ * request queue, and return. Eventually that request will come to the
+ * top of the list, and will be dispatched.
+ */
+ scsi_insert_special_cmd(SCpnt, 0);
SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
}
-/* This function is the mid-level interrupt routine, which decides how
+/*
+ * This function is the mid-level interrupt routine, which decides how
* to handle error conditions. Each invocation of this function must
* do one and *only* one of the following:
*
* 1) Insert command in BH queue.
* 2) Activate error handler for host.
*
- * FIXME(eric) - I am concerned about stack overflow (still). An interrupt could
- * come while we are processing the bottom queue, which would cause another command
- * to be stuffed onto the bottom queue, and it would in turn be processed as that
- * interrupt handler is returning. Given a sufficiently steady rate of returning
- * commands, this could cause the stack to overflow. I am not sure what is the most
- * appropriate solution here - we should probably keep a depth count, and not process
- * any commands while we still have a bottom handler active higher in the stack.
+ * FIXME(eric) - I am concerned about stack overflow (still). An
+ * interrupt could come while we are processing the bottom queue,
+ * which would cause another command to be stuffed onto the bottom
+ * queue, and it would in turn be processed as that interrupt handler
+ * is returning. Given a sufficiently steady rate of returning
+ * commands, this could cause the stack to overflow. I am not sure
+ * what is the most appropriate solution here - we should probably
+ * keep a depth count, and not process any commands while we still
+ * have a bottom handler active higher in the stack.
*
- * There is currently code in the bottom half handler to monitor recursion in the bottom
- * handler and report if it ever happens. If this becomes a problem, it won't be hard to
- * engineer something to deal with it so that only the outer layer ever does any real
- * processing.
+ * There is currently code in the bottom half handler to monitor
+ * recursion in the bottom handler and report if it ever happens. If
+ * this becomes a problem, it won't be hard to engineer something to
+ * deal with it so that only the outer layer ever does any real
+ * processing.
*/
void scsi_done(Scsi_Cmnd * SCpnt)
{
+ unsigned long flags;
+ int tstatus;
/*
* We don't have to worry about this one timing out any more.
*/
- scsi_delete_timer(SCpnt);
+ tstatus = scsi_delete_timer(SCpnt);
+ /*
+ * If we are unable to remove the timer, it means that the command
+ * has already timed out. In this case, we have no choice but to
+ * let the timeout function run, as we have no idea where in fact
+ * that function could really be. It might be on another processor,
+ * etc, etc.
+ */
+ if (!tstatus) {
+ return;
+ }
/* Set the serial numbers back to zero */
SCpnt->serial_number = 0;
SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt));
return;
}
+ spin_lock_irqsave(&scsi_bhqueue_lock, flags);
+
SCpnt->serial_number_at_timeout = 0;
SCpnt->state = SCSI_STATE_BHQUEUE;
SCpnt->owner = SCSI_OWNER_BH_HANDLER;
* We already have the io_request_lock here, since we are called from the
* interrupt handler or the error handler. (DB)
*
+ * This may be true at the moment, but I would like to wean all of the low
+ * level drivers away from using io_request_lock. Technically they should
+ * all use their own locking. I am adding a small spinlock to protect
+ * this datastructure to make it safe for that day. (ERY)
*/
if (!scsi_bh_queue_head) {
scsi_bh_queue_head = SCpnt;
scsi_bh_queue_tail = SCpnt;
}
+ spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
/*
* Mark the bottom half handler to be run.
*/
* race condition when scsi_done is called after a command has already
* timed out but before the time out is processed by the error handler.
* (DB)
+ *
+ * I believe I have corrected this. We simply monitor the return status of
+ * del_timer() - if this comes back as 0, it means that the timer has fired
+ * and that a timeout is in progress. I have modified scsi_done() such
+ * that in this instance the command is never inserted in the bottom
+ * half queue. Thus the only time we hold the lock here is when
+ * we wish to atomically remove the contents of the queue.
*/
void scsi_bottom_half_handler(void)
{
Scsi_Cmnd *SCnext;
unsigned long flags;
- spin_lock_irqsave(&io_request_lock, flags);
while (1 == 1) {
+ spin_lock_irqsave(&scsi_bhqueue_lock, flags);
SCpnt = scsi_bh_queue_head;
scsi_bh_queue_head = NULL;
+ spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
if (SCpnt == NULL) {
- spin_unlock_irqrestore(&io_request_lock, flags);
return;
}
SCnext = SCpnt->bh_next;
} /* while(1==1) */
- spin_unlock_irqrestore(&io_request_lock, flags);
-
}
/*
SCpnt->request_bufflen = SCpnt->bufflen;
SCpnt->use_sg = SCpnt->old_use_sg;
SCpnt->cmd_len = SCpnt->old_cmd_len;
- SCpnt->result = 0;
- memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
- return internal_cmnd(SCpnt);
+ return scsi_dispatch_cmd(SCpnt);
}
/*
struct Scsi_Host *host;
Scsi_Device *device;
+ ASSERT_LOCK(&io_request_lock, 0);
+
host = SCpnt->host;
device = SCpnt->device;
host->host_busy--; /* Indicate that we are free */
device->device_busy--; /* Decrement device usage counter. */
- if (host->block && host->host_busy == 0) {
- host_active = NULL;
-
- /* For block devices "wake_up" is done in end_scsi_request */
- if (!SCSI_BLK_MAJOR(MAJOR(SCpnt->request.rq_dev))) {
- struct Scsi_Host *next;
-
- for (next = host->block; next != host; next = next->block)
- wake_up(&next->host_wait);
- }
- }
- /*
- * Now try and drain the mid-level queue if any commands have been
- * inserted. Check to see whether the queue even has anything in
- * it first, as otherwise this is useless overhead.
- */
- if (SCpnt->host->pending_commands != NULL) {
- scsi_mlqueue_finish(SCpnt->host, SCpnt->device);
- }
- wake_up(&host->host_wait);
/*
* If we have valid sense information, then some kind of recovery
static void scsi_unregister_host(Scsi_Host_Template *);
#endif
+/*
+ * Function: scsi_malloc
+ *
+ * Purpose: Allocate memory from the DMA-safe pool.
+ *
+ * Arguments: len - amount of memory we need.
+ *
+ * Lock status: No locks assumed to be held. This function is SMP-safe.
+ *
+ * Returns: Pointer to memory block.
+ *
+ * Notes: Prior to the new queue code, this function was not SMP-safe.
+ * This function can only allocate in units of sectors
+ * (i.e. 512 bytes).
+ *
+ * We cannot use the normal system allocator becuase we need
+ * to be able to guarantee that we can process a complete disk
+ * I/O request without touching the system allocator. Think
+ * about it - if the system were heavily swapping, and tried to
+ * write out a block of memory to disk, and the SCSI code needed
+ * to allocate more memory in order to be able to write the
+ * data to disk, you would wedge the system.
+ */
void *scsi_malloc(unsigned int len)
{
unsigned int nbits, mask;
+ unsigned long flags;
+
int i, j;
if (len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
return NULL;
nbits = len >> 9;
mask = (1 << nbits) - 1;
+ spin_lock_irqsave(&allocator_request_lock, flags);
+
for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
for (j = 0; j <= SECTORS_PER_PAGE - nbits; j++) {
if ((dma_malloc_freelist[i] & (mask << j)) == 0) {
SCSI_LOG_MLQUEUE(3, printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9)));
printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9));
#endif
+ spin_unlock_irqrestore(&allocator_request_lock, flags);
return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
}
}
+ spin_unlock_irqrestore(&allocator_request_lock, flags);
return NULL; /* Nope. No more */
}
+/*
+ * Function: scsi_free
+ *
+ * Purpose: Free memory into the DMA-safe pool.
+ *
+ * Arguments: ptr - data block we are freeing.
+ * len - size of block we are freeing.
+ *
+ * Lock status: No locks assumed to be held. This function is SMP-safe.
+ *
+ * Returns: Nothing
+ *
+ * Notes: This function *must* only be used to free memory
+ * allocated from scsi_malloc().
+ *
+ * Prior to the new queue code, this function was not SMP-safe.
+ * This function can only allocate in units of sectors
+ * (i.e. 512 bytes).
+ */
int scsi_free(void *obj, unsigned int len)
{
unsigned int page, sector, nbits, mask;
+ unsigned long flags;
#ifdef DEBUG
unsigned long ret = 0;
SCSI_LOG_MLQUEUE(3, printk("SFree: %p %d\n", obj, len));
#endif
+ spin_lock_irqsave(&allocator_request_lock, flags);
+
for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
if ((unsigned long) obj >= page_addr &&
}
scsi_dma_free_sectors += nbits;
dma_malloc_freelist[page] &= ~(mask << sector);
+ spin_unlock_irqrestore(&allocator_request_lock, flags);
return 0;
}
}
void scsi_build_commandblocks(Scsi_Device * SDpnt)
{
+ unsigned long flags;
struct Scsi_Host *host = SDpnt->host;
int j;
Scsi_Cmnd *SCpnt;
+ spin_lock_irqsave(&device_request_lock, flags);
+
if (SDpnt->queue_depth == 0)
SDpnt->queue_depth = host->cmd_per_lun;
SDpnt->device_queue = NULL;
SDpnt->queue_depth, j);
SDpnt->queue_depth = j;
SDpnt->has_cmdblocks = (0 != j);
- } else
+ } else {
SDpnt->has_cmdblocks = 1;
+ }
+ spin_unlock_irqrestore(&device_request_lock, flags);
}
static ssize_t proc_scsi_gen_write(struct file * file, const char * buf,
if (HBA_ptr->host_queue == scd) {
HBA_ptr->host_queue = scd->next;
}
+ blk_cleanup_queue(&scd->request_queue);
scsi_init_free((char *) scd, sizeof(Scsi_Device));
} else {
goto out;
#endif
/*
- * Go through the device list and recompute the most appropriate size
- * for the dma pool. Then grab more memory (as required).
+ * Function: resize_dma_pool
+ *
+ * Purpose: Ensure that the DMA pool is sufficiently large to be
+ * able to guarantee that we can always process I/O requests
+ * without calling the system allocator.
+ *
+ * Arguments: None.
+ *
+ * Lock status: No locks assumed to be held. This function is SMP-safe.
+ *
+ * Returns: Nothing
+ *
+ * Notes: Prior to the new queue code, this function was not SMP-safe.
+ * Go through the device list and recompute the most appropriate
+ * size for the dma pool. Then grab more memory (as required).
*/
static void resize_dma_pool(void)
{
int i, k;
unsigned long size;
+ unsigned long flags;
struct Scsi_Host *shpnt;
struct Scsi_Host *host = NULL;
Scsi_Device *SDpnt;
unsigned char **new_dma_malloc_pages = NULL;
int out_of_space = 0;
+ spin_lock_irqsave(&allocator_request_lock, flags);
+
if (!scsi_hostlist) {
/*
* Free up the DMA pool.
dma_malloc_freelist = NULL;
dma_sectors = 0;
scsi_dma_free_sectors = 0;
+ spin_unlock_irqrestore(&allocator_request_lock, flags);
return;
}
/* Next, check to see if we need to extend the DMA buffer pool */
if (new_dma_sectors < dma_sectors)
new_dma_sectors = dma_sectors;
#endif
- if (new_dma_sectors <= dma_sectors)
+ if (new_dma_sectors <= dma_sectors) {
+ spin_unlock_irqrestore(&allocator_request_lock, flags);
return; /* best to quit while we are in front */
+ }
for (k = 0; k < 20; ++k) { /* just in case */
out_of_space = 0;
break; /* found space ... */
} /* end of for loop */
if (out_of_space) {
+ spin_unlock_irqrestore(&allocator_request_lock, flags);
scsi_need_isa_buffer = new_need_isa_buffer; /* some useful info */
printk(" WARNING, not enough memory, pool not expanded\n");
return;
dma_sectors = new_dma_sectors;
scsi_need_isa_buffer = new_need_isa_buffer;
+ spin_unlock_irqrestore(&allocator_request_lock, flags);
+
#ifdef DEBUG_INIT
printk("resize_dma_pool: dma free sectors = %d\n", scsi_dma_free_sectors);
printk("resize_dma_pool: dma sectors = %d\n", dma_sectors);
printk("scsi : %d host%s.\n", next_scsi_host,
(next_scsi_host == 1) ? "" : "s");
- scsi_make_blocked_list();
-
/* The next step is to call scan_scsis here. This generates the
* Scsi_Devices entries
*/
}
SDpnt->has_cmdblocks = 0;
+ blk_cleanup_queue(&SDpnt->request_queue);
/* Next free up the Scsi_Device structures for this host */
shpnt->host_queue = SDpnt->next;
scsi_init_free((char *) SDpnt, sizeof(Scsi_Device));
(scsi_memory_upper_value - scsi_init_memory_start) / 1024);
#endif
- scsi_make_blocked_list();
/* There were some hosts that were loaded at boot time, so we cannot
do any more than this */
printk("Dump of scsi host parameters:\n");
i = 0;
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
- printk(" %d %d %d : %d %p\n",
+ printk(" %d %d %d : %d\n",
shpnt->host_failed,
shpnt->host_busy,
atomic_read(&shpnt->host_active),
- shpnt->host_blocked,
- shpnt->pending_commands);
+ shpnt->host_blocked);
}
/* Now dump the request lists for each block device */
printk("Dump of pending block device requests\n");
for (i = 0; i < MAX_BLKDEV; i++) {
- if (blk_dev[i].current_request) {
+ if (blk_dev[i].request_queue.current_request) {
struct request *req;
printk("%d: ", i);
- req = blk_dev[i].current_request;
+ req = blk_dev[i].request_queue.current_request;
while (req) {
printk("(%s %d %ld %ld %ld) ",
kdevname(req->rq_dev),
}
}
}
- /* printk("wait_for_request = %p\n", &wait_for_request); */
+ printk("wait_for_request = %p\n", &wait_for_request);
#endif /* CONFIG_SCSI_LOGGING */ /* } */
}
#endif /* CONFIG_PROC_FS */
/*
* scsi.h Copyright (C) 1992 Drew Eckhardt
- * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ * Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale
* generic SCSI package header file by
* Initial versions: Drew Eckhardt
* Subsequent revisions: Eric Youngdale
*
* <drew@colorado.edu>
*
- * Modified by Eric Youngdale eric@aib.com to
+ * Modified by Eric Youngdale eric@andante.org to
* add scatter-gather, multiple outstanding request, and other
* enhancements.
*/
#define SCSI_TIMEOUT (2*HZ)
#endif
+/*
+ * Used for debugging the new queueing code. We want to make sure
+ * that the lock state is consistent with design. Only do this in
+ * the user space simulator.
+ */
+#define ASSERT_LOCK(_LOCK, _COUNT)
+
+#if defined(__SMP__) && defined(CONFIG_USER_DEBUG)
+#undef ASSERT_LOCK
+#define ASSERT_LOCK(_LOCK,_COUNT) \
+ { if( (_LOCK)->lock != _COUNT ) \
+ panic("Lock count inconsistent %s %d\n", __FILE__, __LINE__); \
+ }
+#endif
+
/*
* Use these to separate status msg and our bytes
*
extern int scsi_decide_disposition(Scsi_Cmnd * SCpnt);
extern int scsi_block_when_processing_errors(Scsi_Device *);
extern void scsi_sleep(int);
+extern int scsi_partsize(struct buffer_head *bh, unsigned long capacity,
+ unsigned int *cyls, unsigned int *hds,
+ unsigned int *secs);
+
+/*
+ * Prototypes for functions in scsi_lib.c
+ */
+extern void initialize_merge_fn(Scsi_Device * SDpnt);
+extern void scsi_request_fn(request_queue_t * q);
+
+extern int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int);
+extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt);
/*
* scsi_abort aborts the current command that is executing on host host.
*/
extern void scsi_do_cmd(Scsi_Cmnd *, const void *cmnd,
- void *buffer, unsigned bufflen,
- void (*done)(struct scsi_cmnd *),
- int timeout, int retries);
-
-extern void scsi_wait_cmd (Scsi_Cmnd *, const void *cmnd ,
void *buffer, unsigned bufflen,
void (*done) (struct scsi_cmnd *),
int timeout, int retries);
+extern void scsi_wait_cmd(Scsi_Cmnd *, const void *cmnd,
+ void *buffer, unsigned bufflen,
+ void (*done) (struct scsi_cmnd *),
+ int timeout, int retries);
-extern Scsi_Cmnd *scsi_allocate_device(struct request **, Scsi_Device *, int);
+extern void scsi_request_fn(request_queue_t * q);
+
+extern Scsi_Cmnd *scsi_allocate_device(Scsi_Device *, int);
extern Scsi_Cmnd *scsi_request_queueable(struct request *, Scsi_Device *);
wait_queue_head_t device_wait; /* Used to wait if
device is busy */
struct Scsi_Host *host;
+ request_queue_t request_queue;
volatile unsigned short device_busy; /* commands actually active on low-level */
- void (*scsi_request_fn) (void); /* Used to jumpstart things after an
- * ioctl */
+ int (*scsi_init_io_fn) (Scsi_Cmnd *); /* Used to initialize
+ new request */
Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */
/* public: */
unsigned int manufacturer; /* Manufacturer of device, for using
* vendor-specific cmd's */
+ unsigned sector_size; /* size in bytes */
+
int attached; /* # of high level drivers attached to
* this */
int access_count; /* Count of open channels/mounts */
unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN
* because we did a bus reset. */
unsigned device_blocked:1; /* Device returned QUEUE_FULL. */
+ unsigned ten:1; /* support ten byte read / write */
+ unsigned remap:1; /* support remapping */
+ unsigned starved:1; /* unable to process commands because
+ host busy */
};
reconnects. Probably == sector
size */
- int resid; /* Number of bytes requested to be
+ int resid; /* Number of bytes requested to be
transferred less actual number
transferred (0 if not supported) */
struct request request; /* A copy of the command we are
working on */
- unsigned char sense_buffer[64]; /* obtained by REQUEST SENSE when
- CHECK CONDITION is received on
- original command (auto-sense) */
+ unsigned char sense_buffer[64]; /* obtained by REQUEST SENSE when
+ CHECK CONDITION is received on
+ original command (auto-sense) */
unsigned flags;
unsigned long pid; /* Process ID, starts at 0 */
};
+/*
+ * Flag bits for the internal_timeout array
+ */
+#define NORMAL_TIMEOUT 0
+#define IN_ABORT 1
+#define IN_RESET 2
+#define IN_RESET2 4
+#define IN_RESET3 8
/*
* Definitions and prototypes used for scsi mid-level queue.
extern int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason);
extern int scsi_mlqueue_finish(struct Scsi_Host *host, Scsi_Device * device);
+extern Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate,
+ int sectors);
+
+extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
+ int block_sectors);
+
#if defined(MAJOR_NR) && (MAJOR_NR != SCSI_TAPE_MAJOR)
#include "hosts.h"
-static Scsi_Cmnd *end_scsi_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
-{
- struct request *req;
- struct buffer_head *bh;
-
- req = &SCpnt->request;
- req->errors = 0;
- if (!uptodate) {
- printk(DEVICE_NAME " I/O error: dev %s, sector %lu\n",
- kdevname(req->rq_dev), req->sector);
- }
- do {
- if ((bh = req->bh) != NULL) {
- req->bh = bh->b_reqnext;
- req->nr_sectors -= bh->b_size >> 9;
- req->sector += bh->b_size >> 9;
- bh->b_reqnext = NULL;
- bh->b_end_io(bh, uptodate);
- sectors -= bh->b_size >> 9;
- if ((bh = req->bh) != NULL) {
- req->current_nr_sectors = bh->b_size >> 9;
- if (req->nr_sectors < req->current_nr_sectors) {
- req->nr_sectors = req->current_nr_sectors;
- printk("end_scsi_request: buffer-list destroyed\n");
- }
- }
- }
- } while (sectors && bh);
- if (req->bh) {
- req->buffer = bh->b_data;
- return SCpnt;
- }
- DEVICE_OFF(req->rq_dev);
- if (req->sem != NULL) {
- up(req->sem);
- }
- add_blkdev_randomness(MAJOR(req->rq_dev));
-
- if (SCpnt->host->block) {
- struct Scsi_Host *next;
-
- for (next = SCpnt->host->block; next != SCpnt->host;
- next = next->block)
- wake_up(&next->host_wait);
- }
- wake_up(&wait_for_request);
- wake_up(&SCpnt->device->device_wait);
- scsi_release_command(SCpnt);
- return NULL;
-}
-
/* This is just like INIT_REQUEST, but we need to be aware of the fact
* that an interrupt may start another request, so we run this with interrupts
/* A few options that we want selected */
-#define NR_HOSTS_PRESENT 20
-#define NR_FAKE_DISKS 6
-#define N_HEAD 32
-#define N_SECTOR 64
-#define DISK_READONLY(TGT) (1)
+#define NR_HOSTS_PRESENT 1
+#define NR_FAKE_DISKS 3
+#define N_HEAD 255
+#define N_SECTOR 63
+#define N_CYLINDER 524
+#define DISK_READONLY(TGT) (0)
#define DISK_REMOVEABLE(TGT) (1)
+#define DEVICE_TYPE(TGT) (TGT == 2 ? TYPE_TAPE : TYPE_DISK);
/* Do not attempt to use a timer to simulate a real disk with latency */
/* Only use this in the actual kernel, not in the simulator. */
-/* #define IMMEDIATE */
+#define IMMEDIATE
/* Skip some consistency checking. Good for benchmarking */
#define SPEEDY
#define START_PARTITION 4
/* Time to wait before completing a command */
-#define DISK_SPEED (HZ/10) /* 100ms */
-#define CAPACITY (0x80000)
+#define DISK_SPEED (HZ/10) /* 100ms */
+#define CAPACITY (N_HEAD * N_SECTOR * N_CYLINDER)
+#define SIZE(TGT) (TGT == 2 ? 2248 : 512)
static int starts[] =
-{N_HEAD, N_HEAD * N_SECTOR, 50000, CAPACITY, 0};
+{N_SECTOR,
+ N_HEAD * N_SECTOR, /* Single cylinder */
+ N_HEAD * N_SECTOR * 4,
+ CAPACITY, 0};
static int npart = 0;
#include "scsi_debug.h"
typedef void (*done_fct_t) (Scsi_Cmnd *);
-static volatile done_fct_t do_done[SCSI_DEBUG_MAILBOXES] = {NULL,};
+static volatile done_fct_t do_done[SCSI_DEBUG_MAILBOXES] =
+{NULL,};
static void scsi_debug_intr_handle(unsigned long);
static struct timer_list timeout[SCSI_DEBUG_MAILBOXES];
-Scsi_Cmnd *SCint[SCSI_DEBUG_MAILBOXES] = {NULL,};
-static char SCrst[SCSI_DEBUG_MAILBOXES] = {0,};
+Scsi_Cmnd *SCint[SCSI_DEBUG_MAILBOXES] =
+{NULL,};
+static char SCrst[SCSI_DEBUG_MAILBOXES] =
+{0,};
/*
* Semaphore used to simulate bus lockups.
*/
static int scsi_debug_lockup = 0;
-static char sense_buffer[128] = {0,};
+static char sense_buffer[128] =
+{0,};
static void scsi_dump(Scsi_Cmnd * SCpnt, int flag)
{
sgcount = 0;
sgpnt = NULL;
+ /*
+ * The io_request_lock *must* be held at this point.
+ */
+ if( io_request_lock.lock == 0 )
+ {
+ printk("Warning - io_request_lock is not held in queuecommand\n");
+ }
+
/*
* If we are being notified of the mid-level reposessing a command due to timeout,
* just return.
SCpnt->result = 0;
done(SCpnt);
return 0;
+ case START_STOP:
+ SCSI_LOG_LLQUEUE(3, printk("START_STOP\n"));
+ scsi_debug_errsts = 0;
+ break;
case ALLOW_MEDIUM_REMOVAL:
if (cmd[4]) {
SCSI_LOG_LLQUEUE(2, printk("Medium removal inhibited..."));
case INQUIRY:
SCSI_LOG_LLQUEUE(3, printk("Inquiry...(%p %d)\n", buff, bufflen));
memset(buff, 0, bufflen);
- buff[0] = TYPE_DISK;
+ buff[0] = DEVICE_TYPE(target);
buff[1] = DISK_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
buff[2] = 1;
buff[4] = 33 - 5;
buff[1] = (CAPACITY >> 16) & 0xff;
buff[2] = (CAPACITY >> 8) & 0xff;
buff[3] = CAPACITY & 0xff;
- buff[6] = 2; /* 512 byte sectors */
+ buff[4] = 0;
+ buff[5] = 0;
+ buff[6] = (SIZE(target) >> 8) & 0xff; /* 512 byte sectors */
+ buff[7] = SIZE(target) & 0xff;
scsi_debug_errsts = 0;
break;
case READ_10:
p = (struct partition *) (buff + 0x1be);
i = 0;
while (starts[i + 1]) {
+ int start_cyl, end_cyl;
+
+ start_cyl = starts[i] / N_HEAD / N_SECTOR;
+ end_cyl = (starts[i + 1] - 1) / N_HEAD / N_SECTOR;
+ p->boot_ind = 0;
+
+ p->head = (i == 0 ? 1 : 0);
+ p->sector = 1 | ((start_cyl >> 8) << 6);
+ p->cyl = (start_cyl & 0xff);
+
+ p->end_head = N_HEAD - 1;
+ p->end_sector = N_SECTOR | ((end_cyl >> 8) << 6);
+ p->end_cyl = (end_cyl & 0xff);
+
p->start_sect = starts[i];
p->nr_sects = starts[i + 1] - starts[i];
p->sys_ind = 0x81; /* Linux partition */
- p->head = (i == 0 ? 1 : 0);
- p->sector = 1;
- p->cyl = starts[i] / N_HEAD / N_SECTOR;
- p->end_head = N_HEAD - 1;
- p->end_sector = N_SECTOR;
- p->end_cyl = starts[i + 1] / N_HEAD / N_SECTOR;
p++;
i++;
};
#ifdef IMMEDIATE
if (!scsi_debug_lockup) {
SCpnt->result = scsi_debug_errsts;
+ SCint[i] = SCpnt;
+ do_done[i] = done;
scsi_debug_intr_handle(i); /* No timer - do this one right away */
}
restore_flags(flags);
return 0;
}
-volatile static int internal_done_flag = 0;
-volatile static int internal_done_errcode = 0;
-static void internal_done(Scsi_Cmnd * SCpnt)
-{
- internal_done_errcode = SCpnt->result;
- ++internal_done_flag;
-}
-
-int scsi_debug_command(Scsi_Cmnd * SCpnt)
-{
- DEB(printk("scsi_debug_command: ..calling scsi_debug_queuecommand\n"));
- scsi_debug_queuecommand(SCpnt, internal_done);
-
- while (!internal_done_flag);
- internal_done_flag = 0;
- return internal_done_errcode;
-}
-
/* A "high" level interrupt handler. This should be called once per jiffy
* to simulate a regular scsi disk. We use a timer to do this. */
int size = disk->capacity;
info[0] = N_HEAD;
info[1] = N_SECTOR;
- info[2] = (size + 2047) >> 11;
+ info[2] = N_CYLINDER;
if (info[2] >= 1024)
info[2] = 1024;
return 0;
return (len);
}
+#ifdef CONFIG_USER_DEBUG
+/*
+ * This is a hack for the user space emulator. It allows us to
+ * "insert" arbitrary numbers of additional drivers.
+ */
+void *scsi_debug_get_handle(void)
+{
+ static Scsi_Host_Template driver_copy = SCSI_DEBUG;
+ void *rtn;
+ rtn = kmalloc(sizeof(driver_copy), GFP_ATOMIC);
+ memcpy(rtn, (void *) &driver_copy, sizeof(driver_copy));
+ return rtn;
+}
+#endif
+
#ifdef MODULE
/* Eventually this will go into an include file, but this will be later */
Scsi_Host_Template driver_template = SCSI_DEBUG;
#define SCSI_DEBUG {proc_info: scsi_debug_proc_info, \
name: "SCSI DEBUG", \
detect: scsi_debug_detect, \
- command: scsi_debug_command, \
queuecommand: scsi_debug_queuecommand, \
abort: scsi_debug_abort, \
reset: scsi_debug_reset, \
bios_param: scsi_debug_biosparam, \
can_queue: SCSI_DEBUG_CANQUEUE, \
this_id: 7, \
- sg_tablesize: SG_ALL, \
+ sg_tablesize: 16, \
cmd_per_lun: 3, \
- unchecked_isa_dma: 1, \
+ unchecked_isa_dma: 0, \
use_clustering: ENABLE_CLUSTERING, \
use_new_eh_code: 1, \
}
#include "hosts.h"
#include "constants.h"
-#ifdef MODULE
+/*
+ * We must always allow SHUTDOWN_SIGS. Even if we are not a module,
+ * the host drivers that we are using may be loaded as modules, and
+ * when we unload these, we need to ensure that the error handler thread
+ * can be shut down.
+ */
#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
-#else
-#define SHUTDOWN_SIGS (0UL)
-#endif
#ifdef DEBUG
#define SENSE_TIMEOUT SCSI_TIMEOUT
*
* Arguments: SCset - command that we are canceling timer for.
*
- * Returns: Amount of time remaining before command would have timed out.
+ * Returns: 1 if we were able to detach the timer. 0 if we
+ * blew it, and the timer function has already started
+ * to run.
*
* Notes: This should be turned into an inline function.
*/
{
int rtn;
- rtn = jiffies - SCset->eh_timeout.expires;
- del_timer(&SCset->eh_timeout);
+ rtn = del_timer(&SCset->eh_timeout);
SCSI_LOG_ERROR_RECOVERY(5, printk("Clearing timer for command %p\n", SCset));
{REQUEST_SENSE, 0, 0, 0, 255, 0};
unsigned char scsi_result0[256], *scsi_result = NULL;
+ ASSERT_LOCK(&io_request_lock, 1);
memcpy((void *) SCpnt->cmnd, (void *) generic_sense,
sizeof(generic_sense));
add_timer(&timer);
- spin_unlock_irq(&io_request_lock);
down(&sem);
- spin_lock_irq(&io_request_lock);
-
del_timer(&timer);
}
{
struct Scsi_Host *host;
+ ASSERT_LOCK(&io_request_lock, 1);
+
host = SCpnt->host;
retry:
* If we had a successful bus reset, mark the command blocks to expect
* a condition code of unit attention.
*/
+ spin_unlock_irq(&io_request_lock);
scsi_sleep(BUS_RESET_SETTLE_TIME);
+ spin_lock_irq(&io_request_lock);
if (SCpnt->eh_state == SUCCESS) {
Scsi_Device *SDloop;
for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) {
* If we had a successful host reset, mark the command blocks to expect
* a condition code of unit attention.
*/
+ spin_unlock_irq(&io_request_lock);
scsi_sleep(HOST_RESET_SETTLE_TIME);
+ spin_lock_irq(&io_request_lock);
if (SCpnt->eh_state == SUCCESS) {
Scsi_Device *SDloop;
for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) {
*
* Arguments: host - host that we are restarting
*
+ * Lock status: Assumed that locks are not held upon entry.
+ *
* Returns: Nothing
*
* Notes: When we entered the error handler, we blocked all further
STATIC void scsi_restart_operations(struct Scsi_Host *host)
{
Scsi_Device *SDpnt;
+ unsigned long flags;
+
+ ASSERT_LOCK(&io_request_lock, 0);
/*
* Next free up anything directly waiting upon the host. This will be
wake_up(&host->host_wait);
/*
- * Finally, block devices need an extra kick in the pants. This is because
- * the request queueing mechanism may have queued lots of pending requests
- * and there won't be a process waiting in a place where we can simply wake
- * it up. Thus we simply go through and call the request function to goose
- * the various top level drivers and get things moving again.
+ * Finally we need to re-initiate requests that may be pending. We will
+ * have had everything blocked while error handling is taking place, and
+ * now that error recovery is done, we will need to ensure that these
+ * requests are started.
*/
+ spin_lock_irqsave(&io_request_lock, flags);
for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) {
- SCSI_LOG_ERROR_RECOVERY(5, printk("Calling request function to restart things...\n"));
-
- if (SDpnt->scsi_request_fn != NULL)
- (*SDpnt->scsi_request_fn) ();
+ request_queue_t *q;
+ if ((host->can_queue > 0 && (host->host_busy >= host->can_queue))
+ || (host->host_blocked)
+ || (SDpnt->device_blocked)) {
+ break;
+ }
+ q = &SDpnt->request_queue;
+ q->request_fn(q);
}
+ spin_unlock_irqrestore(&io_request_lock, flags);
}
/*
Scsi_Cmnd *SCdone;
int timed_out;
+ ASSERT_LOCK(&io_request_lock, 1);
+
SCdone = NULL;
/*
* Due to the spinlock, we will never get out of this
* loop without a proper wait (DB)
*/
+ spin_unlock_irq(&io_request_lock);
scsi_sleep(1 * HZ);
+ spin_lock_irq(&io_request_lock);
goto next_device;
}
* Due to the spinlock, we will never get out of this
* loop without a proper wait. (DB)
*/
+ spin_unlock_irq(&io_request_lock);
scsi_sleep(1 * HZ);
+ spin_lock_irq(&io_request_lock);
goto next_device2;
}
lock_kernel();
/*
- * Flush resources
+ * Flush resources
*/
-
+
daemonize();
-
+
/*
* Set the name of this process.
*/
host->eh_active = 0;
+ /* The spinlock is really needed up to this point. (DB) */
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
/*
* Note - if the above fails completely, the action is to take
* individual devices offline and flush the queue of any
*/
scsi_restart_operations(host);
- /* The spinlock is really needed up to this point. (DB) */
- spin_unlock_irqrestore(&io_request_lock, flags);
}
SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler exiting\n"));
#include <scsi/scsi_ioctl.h>
#define NORMAL_RETRIES 5
-#define NORMAL_TIMEOUT (10 * HZ)
+#define IOCTL_NORMAL_TIMEOUT (10 * HZ)
#define FORMAT_UNIT_TIMEOUT (2 * 60 * 60 * HZ)
#define START_STOP_TIMEOUT (60 * HZ)
#define MOVE_MEDIUM_TIMEOUT (5 * 60 * HZ)
/*
* The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host.
- * The NORMAL_TIMEOUT and NORMAL_RETRIES variables are used.
+ * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES variables are used.
*
* dev is the SCSI device struct ptr, *(int *) arg is the length of the
* input data, if any, not including the command string & counts,
static int ioctl_internal_command(Scsi_Device * dev, char *cmd,
int timeout, int retries)
{
- unsigned long flags;
int result;
Scsi_Cmnd *SCpnt;
Scsi_Device *SDpnt;
- spin_lock_irqsave(&io_request_lock, flags);
SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", cmd[0]));
- SCpnt = scsi_allocate_device(NULL, dev, 1);
+ SCpnt = scsi_allocate_device(dev, 1);
{
DECLARE_MUTEX_LOCKED(sem);
SCpnt->request.sem = &sem;
scsi_do_cmd(SCpnt, cmd, NULL, 0, scsi_ioctl_done, timeout, retries);
- spin_unlock_irqrestore(&io_request_lock, flags);
down(&sem);
- spin_lock_irqsave(&io_request_lock, flags);
SCpnt->request.sem = NULL;
}
scsi_release_command(SCpnt);
SCpnt = NULL;
- if (!SDpnt->was_reset && SDpnt->scsi_request_fn)
- (*SDpnt->scsi_request_fn) ();
wake_up(&SDpnt->device_wait);
- spin_unlock_irqrestore(&io_request_lock, flags);
return result;
}
* The structure that we are passed should look like:
*
* struct sdata {
- * unsigned int inlen; [i] Length of data to be written to device
+ * unsigned int inlen; [i] Length of data to be written to device
* unsigned int outlen; [i] Length of data to be read from device
* unsigned char cmd[x]; [i] SCSI command (6 <= x <= 12).
- * [o] Data read from device starts here.
- * [o] On error, sense buffer starts here.
+ * [o] Data read from device starts here.
+ * [o] On error, sense buffer starts here.
* unsigned char wdata[y]; [i] Data written to device starts here.
* };
* Notes:
- * - The SCSI command length is determined by examining the 1st byte
- * of the given command. There is no way to override this.
- * - Data transfers are limited to PAGE_SIZE (4K on i386, 8K on alpha).
- * - The length (x + y) must be at least OMAX_SB_LEN bytes long to
- * accomodate the sense buffer when an error occurs.
- * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
- * old code will not be surprised.
- * - If a Unix error occurs (e.g. ENOMEM) then the user will receive
- * a negative return and the Unix error code in 'errno'.
- * If the SCSI command succeeds then 0 is returned.
- * Positive numbers returned are the compacted SCSI error codes (4
- * bytes in one int) where the lowest byte is the SCSI status.
- * See the drivers/scsi/scsi.h file for more information on this.
+ * - The SCSI command length is determined by examining the 1st byte
+ * of the given command. There is no way to override this.
+ * - Data transfers are limited to PAGE_SIZE (4K on i386, 8K on alpha).
+ * - The length (x + y) must be at least OMAX_SB_LEN bytes long to
+ * accomodate the sense buffer when an error occurs.
+ * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
+ * old code will not be surprised.
+ * - If a Unix error occurs (e.g. ENOMEM) then the user will receive
+ * a negative return and the Unix error code in 'errno'.
+ * If the SCSI command succeeds then 0 is returned.
+ * Positive numbers returned are the compacted SCSI error codes (4
+ * bytes in one int) where the lowest byte is the SCSI status.
+ * See the drivers/scsi/scsi.h file for more information on this.
*
*/
-#define OMAX_SB_LEN 16 /* Old sense buffer length */
+#define OMAX_SB_LEN 16 /* Old sense buffer length */
int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
{
- unsigned long flags;
char *buf;
unsigned char cmd[12];
char *cmd_in;
buf_needed = (buf_needed + 511) & ~511;
if (buf_needed > MAX_BUF)
buf_needed = MAX_BUF;
- spin_lock_irqsave(&io_request_lock, flags);
buf = (char *) scsi_malloc(buf_needed);
- spin_unlock_irqrestore(&io_request_lock, flags);
if (!buf)
return -ENOMEM;
memset(buf, 0, buf_needed);
retries = NORMAL_RETRIES;
break;
default:
- timeout = NORMAL_TIMEOUT;
+ timeout = IOCTL_NORMAL_TIMEOUT;
retries = NORMAL_RETRIES;
break;
}
#ifndef DEBUG_NO_CMD
- spin_lock_irqsave(&io_request_lock, flags);
- SCpnt = scsi_allocate_device(NULL, dev, 1);
+ SCpnt = scsi_allocate_device(dev, 1);
{
DECLARE_MUTEX_LOCKED(sem);
SCpnt->request.sem = &sem;
scsi_do_cmd(SCpnt, cmd, buf, needed, scsi_ioctl_done,
timeout, retries);
- spin_unlock_irqrestore(&io_request_lock, flags);
down(&sem);
SCpnt->request.sem = NULL;
}
}
result = SCpnt->result;
- spin_lock_irqsave(&io_request_lock, flags);
wake_up(&SCpnt->device->device_wait);
SDpnt = SCpnt->device;
if (buf)
scsi_free(buf, buf_needed);
- if (SDpnt->scsi_request_fn)
- (*SDpnt->scsi_request_fn) ();
- spin_unlock_irqrestore(&io_request_lock, flags);
return result;
#else
{
scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
scsi_cmd[4] = SCSI_REMOVAL_PREVENT;
return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
- NORMAL_TIMEOUT, NORMAL_RETRIES);
+ IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES);
break;
case SCSI_IOCTL_DOORUNLOCK:
if (!dev->removable || !dev->lockable)
scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
scsi_cmd[4] = SCSI_REMOVAL_ALLOW;
return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
- NORMAL_TIMEOUT, NORMAL_RETRIES);
+ IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES);
case SCSI_IOCTL_TEST_UNIT_READY:
scsi_cmd[0] = TEST_UNIT_READY;
scsi_cmd[1] = dev->lun << 5;
scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
scsi_cmd[4] = 0;
return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
- NORMAL_TIMEOUT, NORMAL_RETRIES);
+ IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES);
break;
case SCSI_IOCTL_START_UNIT:
scsi_cmd[0] = START_STOP;
--- /dev/null
+/*
+ * scsi_lib.c Copyright (C) 1999 Eric Youngdale
+ *
+ * SCSI queueing library.
+ * Initial versions: Eric Youngdale (eric@andante.org).
+ * Based upon conversations with large numbers
+ * of people at Linux Expo.
+ */
+
+/*
+ * The fundamental purpose of this file is to contain a library of utility
+ * routines that can be used by low-level drivers. Ultimately the idea
+ * is that there should be a sufficiently rich number of functions that it
+ * would be possible for a driver author to fashion a queueing function for
+ * a low-level driver if they wished. Note however that this file also
+ * contains the "default" versions of these functions, as we don't want to
+ * go through and retrofit queueing functions into all 30 some-odd drivers.
+ */
+
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/stat.h>
+#include <linux/blk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+
+
+#define __KERNEL_SYSCALLS__
+
+#include <linux/unistd.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+#include <scsi/scsi_ioctl.h>
+
+/*
+ * This entire source file deals with the new queueing code.
+ */
+
+/*
+ * Function: scsi_insert_special_cmd()
+ *
+ * Purpose: Insert pre-formed command into request queue.
+ *
+ * Arguments: SCpnt - command that is ready to be queued.
+ * at_head - boolean. True if we should insert at head
+ * of queue, false if we should insert at tail.
+ *
+ * Lock status: Assumed that lock is not held upon entry.
+ *
+ * Returns: Nothing
+ *
+ * Notes: This function is called from character device and from
+ * ioctl types of functions where the caller knows exactly
+ * what SCSI command needs to be issued. The idea is that
+ * we merely inject the command into the queue (at the head
+ * for now), and then call the queue request function to actually
+ * process it.
+ */
+int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
+{
+ unsigned long flags;
+ request_queue_t *q;
+
+ ASSERT_LOCK(&io_request_lock, 0);
+
+ /*
+ * The SCpnt already contains a request structure - we will doctor the
+ * thing up with the appropriate values and use that in the actual
+ * request queue.
+ */
+ q = &SCpnt->device->request_queue;
+ SCpnt->request.cmd = SPECIAL;
+ SCpnt->request.special = (void *) SCpnt;
+
+ /*
+ * For the moment, we insert at the head of the queue. This may turn
+ * out to be a bad idea, but we will see about that when we get there.
+ */
+ spin_lock_irqsave(&io_request_lock, flags);
+
+ if (at_head) {
+ SCpnt->request.next = q->current_request;
+ q->current_request = &SCpnt->request;
+ } else {
+ /*
+ * FIXME(eric) - we always insert at the tail of the list. Otherwise
+ * ioctl commands would always take precedence over normal I/O.
+ */
+ SCpnt->request.next = NULL;
+ if (q->current_request == NULL) {
+ q->current_request = &SCpnt->request;
+ } else {
+ struct request *req;
+
+ for (req = q->current_request; req; req = req->next) {
+ if (req->next == NULL) {
+ req->next = &SCpnt->request;
+ }
+ }
+ }
+ }
+
+ /*
+ * Now hit the requeue function for the queue. If the host is already
+ * busy, so be it - we have nothing special to do. If the host can queue
+ * it, then send it off.
+ */
+ q->request_fn(q);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return 0;
+}
+
+/*
+ * Function: scsi_init_cmd_errh()
+ *
+ * Purpose: Initialize SCpnt fields related to error handling.
+ *
+ * Arguments: SCpnt - command that is ready to be queued.
+ *
+ * Returns: Nothing
+ *
+ * Notes: This function has the job of initializing a number of
+ * fields related to error handling. Typically this will
+ * be called once for each command, as required.
+ */
+int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt)
+{
+ ASSERT_LOCK(&io_request_lock, 0);
+
+ SCpnt->owner = SCSI_OWNER_MIDLEVEL;
+ SCpnt->reset_chain = NULL;
+ SCpnt->serial_number = 0;
+ SCpnt->serial_number_at_timeout = 0;
+ SCpnt->flags = 0;
+ SCpnt->retries = 0;
+
+ SCpnt->abort_reason = 0;
+
+ memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
+
+ if (SCpnt->cmd_len == 0)
+ SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
+
+ /*
+ * We need saved copies of a number of fields - this is because
+ * error handling may need to overwrite these with different values
+ * to run different commands, and once error handling is complete,
+ * we will need to restore these values prior to running the actual
+ * command.
+ */
+ SCpnt->old_use_sg = SCpnt->use_sg;
+ SCpnt->old_cmd_len = SCpnt->cmd_len;
+ memcpy((void *) SCpnt->data_cmnd,
+ (const void *) SCpnt->cmnd, sizeof(SCpnt->cmnd));
+ SCpnt->buffer = SCpnt->request_buffer;
+ SCpnt->bufflen = SCpnt->request_bufflen;
+
+ SCpnt->reset_chain = NULL;
+
+ SCpnt->internal_timeout = NORMAL_TIMEOUT;
+ SCpnt->abort_reason = 0;
+
+ return 1;
+}
+
+/*
+ * Function: scsi_queue_next_request()
+ *
+ * Purpose: Handle post-processing of completed commands.
+ *
+ * Arguments: SCpnt - command that may need to be requeued.
+ *
+ * Returns: Nothing
+ *
+ * Notes: After command completion, there may be blocks left
+ * over which weren't finished by the previous command
+ * this can be for a number of reasons - the main one is
+ * that a medium error occurred, and the sectors after
+ * the bad block need to be re-read.
+ *
+ * If SCpnt is NULL, it means that the previous command
+ * was completely finished, and we should simply start
+ * a new command, if possible.
+ */
+void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
+{
+ int all_clear;
+ unsigned long flags;
+ Scsi_Device *SDpnt;
+ struct Scsi_Host *SHpnt;
+
+ ASSERT_LOCK(&io_request_lock, 0);
+
+ spin_lock_irqsave(&io_request_lock, flags);
+ if (SCpnt != NULL) {
+
+ /*
+ * For some reason, we are not done with this request.
+ * This happens for I/O errors in the middle of the request,
+ * in which case we need to request the blocks that come after
+ * the bad sector.
+ */
+ SCpnt->request.next = q->current_request;
+ q->current_request = &SCpnt->request;
+ SCpnt->request.special = (void *) SCpnt;
+ }
+ /*
+ * Just hit the requeue function for the queue.
+ * FIXME - if this queue is empty, check to see if we might need to
+ * start requests for other devices attached to the same host.
+ */
+ q->request_fn(q);
+
+ /*
+ * Now see whether there are other devices on the bus which
+ * might be starved. If so, hit the request function. If we
+ * don't find any, then it is safe to reset the flag. If we
+ * find any device that it is starved, it isn't safe to reset the
+ * flag as the queue function releases the lock and thus some
+ * other device might have become starved along the way.
+ */
+ SDpnt = (Scsi_Device *) q->queuedata;
+ SHpnt = SDpnt->host;
+ all_clear = 1;
+ if (SHpnt->some_device_starved) {
+ for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
+ request_queue_t *q;
+ if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
+ || (SHpnt->host_blocked)) {
+ break;
+ }
+ if (SDpnt->device_blocked || !SDpnt->starved) {
+ continue;
+ }
+ q = &SDpnt->request_queue;
+ q->request_fn(q);
+ all_clear = 0;
+ }
+ if (SDpnt == NULL && all_clear) {
+ SHpnt->some_device_starved = 0;
+ }
+ }
+ spin_unlock_irqrestore(&io_request_lock, flags);
+}
+
+/*
+ * Function: scsi_end_request()
+ *
+ * Purpose: Post-processing of completed commands called from interrupt
+ * handler.
+ *
+ * Arguments: SCpnt - command that is complete.
+ * uptodate - 1 if I/O indicates success, 0 for I/O error.
+ * sectors - number of sectors we want to mark.
+ *
+ * Lock status: Assumed that lock is not held upon entry.
+ *
+ * Returns: Nothing
+ *
+ * Notes: This is called for block device requests in order to
+ * mark some number of sectors as complete.
+ */
+Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
+{
+ struct request *req;
+ struct buffer_head *bh;
+
+ ASSERT_LOCK(&io_request_lock, 0);
+
+ req = &SCpnt->request;
+ req->errors = 0;
+ if (!uptodate) {
+ printk(" I/O error: dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
+ }
+ do {
+ if ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ req->nr_sectors -= bh->b_size >> 9;
+ req->sector += bh->b_size >> 9;
+ bh->b_reqnext = NULL;
+ sectors -= bh->b_size >> 9;
+ bh->b_end_io(bh, uptodate);
+ if ((bh = req->bh) != NULL) {
+ req->current_nr_sectors = bh->b_size >> 9;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("scsi_end_request: buffer-list destroyed\n");
+ }
+ }
+ }
+ } while (sectors && bh);
+
+ /*
+ * If there are blocks left over at the end, set up the command
+ * to queue the remainder of them.
+ */
+ if (req->bh) {
+ req->buffer = bh->b_data;
+ return SCpnt;
+ }
+ /*
+ * This request is done. If there is someone blocked waiting for this
+ * request, wake them up. Typically used to wake up processes trying
+ * to swap a page into memory.
+ */
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+ add_blkdev_randomness(MAJOR(req->rq_dev));
+ scsi_release_command(SCpnt);
+ return NULL;
+}
+
+/*
+ * Function: scsi_io_completion()
+ *
+ * Purpose: Completion processing for block device I/O requests.
+ *
+ * Arguments: SCpnt - command that is finished.
+ *
+ * Lock status: Assumed that no lock is held upon entry.
+ *
+ * Returns: Nothing
+ *
+ * Notes: This function is matched in terms of capabilities to
+ * the function that created the scatter-gather list.
+ * In other words, if there are no bounce buffers
+ * (the normal case for most drivers), we don't need
+ * the logic to deal with cleaning up afterwards.
+ */
+void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
+ int block_sectors)
+{
+ int result = SCpnt->result;
+ int this_count = SCpnt->bufflen >> 9;
+ request_queue_t *q = &SCpnt->device->request_queue;
+
+ ASSERT_LOCK(&io_request_lock, 0);
+
+ /*
+ * Free up any indirection buffers we allocated for DMA purposes.
+ * For the case of a READ, we need to copy the data out of the
+ * bounce buffer and into the real buffer.
+ */
+ if (SCpnt->use_sg) {
+ struct scatterlist *sgpnt;
+ int i;
+
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+
+ for (i = 0; i < SCpnt->use_sg; i++) {
+ if (sgpnt[i].alt_address) {
+ if (SCpnt->request.cmd == READ) {
+ memcpy(sgpnt[i].alt_address,
+ sgpnt[i].address,
+ sgpnt[i].length);
+ }
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ }
+ }
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len);
+ } else {
+ if (SCpnt->buffer != SCpnt->request.buffer) {
+ if (SCpnt->request.cmd == READ) {
+ memcpy(SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+ }
+ scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ }
+ }
+ /*
+ * Next deal with any sectors which we were able to correctly
+ * handle.
+ */
+ if (good_sectors > 0) {
+ SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d sectors done.\n",
+ SCpnt->request.nr_sectors,
+ good_sectors));
+ SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg));
+
+ SCpnt->request.errors = 0;
+ /*
+ * If multiple sectors are requested in one buffer, then
+ * they will have been finished off by the first command.
+ * If not, then we have a multi-buffer command.
+ */
+ SCpnt = scsi_end_request(SCpnt, 1, good_sectors);
+
+ /*
+ * If the command completed without error, then either finish off the
+ * rest of the command, or start a new one.
+ */
+ if (result == 0) {
+ scsi_queue_next_request(q, SCpnt);
+ return;
+ }
+ }
+ /*
+ * Now, if we were good little boys and girls, Santa left us a request
+ * sense buffer. We can extract information from this, so we
+ * can choose a block to remap, etc.
+ */
+ if (driver_byte(result) != 0) {
+ if (suggestion(result) == SUGGEST_REMAP) {
+#ifdef REMAP
+ /*
+ * Not yet implemented. A read will fail after being remapped,
+ * a write will call the strategy routine again.
+ */
+ if (SCpnt->device->remap) {
+ result = 0;
+ }
+#endif
+ }
+ if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70
+ && (SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
+ if (SCpnt->device->removable) {
+ /* detected disc change. set a bit and quietly refuse
+ * further access.
+ */
+ SCpnt->device->changed = 1;
+ SCpnt = scsi_end_request(SCpnt, 0, this_count);
+ scsi_queue_next_request(q, SCpnt);
+ return;
+ } else {
+ /*
+ * Must have been a power glitch, or a bus reset.
+ * Could not have been a media change, so we just retry
+ * the request and see what happens.
+ */
+ scsi_queue_next_request(q, SCpnt);
+ return;
+ }
+ }
+ /* If we had an ILLEGAL REQUEST returned, then we may have
+ * performed an unsupported command. The only thing this should be
+ * would be a ten byte read where only a six byte read was supported.
+ * Also, on a system where READ CAPACITY failed, we have have read
+ * past the end of the disk.
+ */
+
+ switch (SCpnt->sense_buffer[2]) {
+ case ILLEGAL_REQUEST:
+ if (SCpnt->device->ten) {
+ SCpnt->device->ten = 0;
+ scsi_queue_next_request(q, SCpnt);
+ result = 0;
+ } else {
+ SCpnt = scsi_end_request(SCpnt, 0, this_count);
+ scsi_queue_next_request(q, SCpnt);
+ return;
+ }
+ break;
+ case NOT_READY:
+ printk(KERN_INFO "Device %x not ready.\n",
+ SCpnt->request.rq_dev);
+ SCpnt = scsi_end_request(SCpnt, 0, this_count);
+ scsi_queue_next_request(q, SCpnt);
+ return;
+ break;
+ case MEDIUM_ERROR:
+ case VOLUME_OVERFLOW:
+ printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ",
+ SCpnt->host->host_no, (int) SCpnt->channel,
+ (int) SCpnt->target, (int) SCpnt->lun);
+ print_command(SCpnt->cmnd);
+ print_sense("sd", SCpnt);
+ SCpnt = scsi_end_request(SCpnt, 0, block_sectors);
+ scsi_queue_next_request(q, SCpnt);
+ return;
+ default:
+ break;
+ }
+ } /* driver byte != 0 */
+ if (result) {
+ printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
+ SCpnt->device->host->host_no,
+ SCpnt->device->channel,
+ SCpnt->device->id,
+ SCpnt->device->lun, result);
+
+ if (driver_byte(result) & DRIVER_SENSE)
+ print_sense("sd", SCpnt);
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
+ scsi_queue_next_request(q, SCpnt);
+ return;
+ }
+}
+
+/*
+ * Function: scsi_get_request_dev()
+ *
+ * Purpose: Find the upper-level driver that is responsible for this
+ * request
+ *
+ * Arguments: request - I/O request we are preparing to queue.
+ *
+ * Lock status: No locks assumed to be held, but as it happens the
+ * io_request_lock is held when this is called.
+ *
+ * Returns: Nothing
+ *
+ * Notes: The requests in the request queue may have originated
+ * from any block device driver. We need to find out which
+ * one so that we can later form the appropriate command.
+ */
+struct Scsi_Device_Template *scsi_get_request_dev(struct request *req)
+{
+ struct Scsi_Device_Template *spnt;
+ kdev_t dev = req->rq_dev;
+ int major = MAJOR(dev);
+
+ ASSERT_LOCK(&io_request_lock, 1);
+
+ for (spnt = scsi_devicelist; spnt; spnt = spnt->next) {
+ /*
+ * Search for a block device driver that supports this
+ * major.
+ */
+ if (spnt->blk && spnt->major == major) {
+ return spnt;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Function: scsi_request_fn()
+ *
+ * Purpose: Generic version of request function for SCSI hosts.
+ *
+ * Arguments: q - Pointer to actual queue.
+ *
+ * Returns: Nothing
+ *
+ * Lock status: IO request lock assumed to be held when called.
+ *
+ * Notes: The theory is that this function is something which individual
+ * drivers could also supply if they wished to. The problem
+ * is that we have 30 some odd low-level drivers in the kernel
+ * tree already, and it would be most difficult to retrofit
+ * this crap into all of them. Thus this function has the job
+ * of acting as a generic queue manager for all of those existing
+ * drivers.
+ */
+void scsi_request_fn(request_queue_t * q)
+{
+ struct request *req;
+ Scsi_Cmnd *SCpnt;
+ Scsi_Device *SDpnt;
+ struct Scsi_Host *SHpnt;
+ struct Scsi_Device_Template *STpnt;
+
+ ASSERT_LOCK(&io_request_lock, 1);
+
+ SDpnt = (Scsi_Device *) q->queuedata;
+ if (!SDpnt) {
+ panic("Missing device");
+ }
+ SHpnt = SDpnt->host;
+
+ /*
+ * If the host for this device is in error recovery mode, don't
+ * do anything at all here. When the host leaves error recovery
+ * mode, it will automatically restart things and start queueing
+ * commands again. Same goes if the queue is actually plugged,
+ * if the device itself is blocked, or if the host is fully
+ * occupied.
+ */
+ if (SHpnt->in_recovery
+ || q->plugged) {
+ return;
+ }
+ /*
+ * To start with, we keep looping until the queue is empty, or until
+ * the host is no longer able to accept any more requests.
+ */
+ while (1 == 1) {
+ /*
+ * If the host cannot accept another request, then quit.
+ */
+ if (SDpnt->device_blocked) {
+ break;
+ }
+ if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
+ || (SHpnt->host_blocked)) {
+ /*
+ * If we are unable to process any commands at all for this
+ * device, then we consider it to be starved. What this means
+ * is that there are no outstanding commands for this device
+ * and hence we need a little help getting it started again
+ * once the host isn't quite so busy.
+ */
+ if (SDpnt->device_busy == 0) {
+ SDpnt->starved = 1;
+ SHpnt->some_device_starved = 1;
+ }
+ break;
+ } else {
+ SDpnt->starved = 0;
+ }
+ /*
+ * Loop through all of the requests in this queue, and find
+ * one that is queueable.
+ */
+ req = q->current_request;
+
+ /*
+ * If we couldn't find a request that could be queued, then we
+ * can also quit.
+ */
+ if (!req) {
+ break;
+ }
+ /*
+ * Find the actual device driver associated with this command.
+ * The SPECIAL requests are things like character device or
+ * ioctls, which did not originate from ll_rw_blk.
+ */
+ if (req->special != NULL) {
+ STpnt = NULL;
+ SCpnt = (Scsi_Cmnd *) req->special;
+ } else {
+ STpnt = scsi_get_request_dev(req);
+ if (!STpnt) {
+ panic("Unable to find device associated with request");
+ }
+ /*
+ * Now try and find a command block that we can use.
+ */
+ SCpnt = scsi_allocate_device(SDpnt, FALSE);
+ /*
+ * If so, we are ready to do something. Bump the count
+ * while the queue is locked and then break out of the loop.
+ * Otherwise loop around and try another request.
+ */
+ if (!SCpnt) {
+ break;
+ }
+ SHpnt->host_busy++;
+ SDpnt->device_busy++;
+ }
+
+ /*
+ * FIXME(eric)
+ * I am not sure where the best place to do this is. We need
+ * to hook in a place where we are likely to come if in user
+ * space. Technically the error handling thread should be
+ * doing this crap, but the error handler isn't used by
+ * most hosts.
+ */
+ if (SDpnt->was_reset) {
+ /*
+ * We need to relock the door, but we might
+ * be in an interrupt handler. Only do this
+ * from user space, since we do not want to
+ * sleep from an interrupt.
+ */
+ if (SDpnt->removable && !in_interrupt()) {
+ spin_unlock_irq(&io_request_lock);
+ scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0);
+ SDpnt->was_reset = 0;
+ spin_lock_irq(&io_request_lock);
+ continue;
+ }
+ SDpnt->was_reset = 0;
+ }
+ /*
+ * Finally, before we release the lock, we copy the
+ * request to the command block, and remove the
+ * request from the request list. Note that we always
+ * operate on the queue head - there is absolutely no
+ * reason to search the list, because all of the commands
+ * in this queue are for the same device.
+ */
+ q->current_request = req->next;
+
+ if (req->special == NULL) {
+ memcpy(&SCpnt->request, req, sizeof(struct request));
+
+ /*
+ * We have copied the data out of the request block - it is now in
+ * a field in SCpnt. Release the request block.
+ */
+ req->next = NULL;
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+ }
+ /*
+ * Now it is finally safe to release the lock. We are not going
+ * to noodle the request list until this request has been queued
+ * and we loop back to queue another.
+ */
+ spin_unlock_irq(&io_request_lock);
+
+ if (req->special == NULL) {
+ /*
+ * This will do a couple of things:
+ * 1) Fill in the actual SCSI command.
+ * 2) Fill in any other upper-level specific fields (timeout).
+ *
+ * If this returns 0, it means that the request failed (reading
+ * past end of disk, reading offline device, etc). This won't
+ * actually talk to the device, but some kinds of consistency
+ * checking may cause the request to be rejected immediately.
+ */
+ if (STpnt == NULL) {
+ STpnt = scsi_get_request_dev(req);
+ }
+ /*
+ * This sets up the scatter-gather table (allocating if
+ * required). Hosts that need bounce buffers will also
+ * get those allocated here.
+ */
+ if (!SDpnt->scsi_init_io_fn(SCpnt)) {
+ continue;
+ }
+ /*
+ * Initialize the actual SCSI command for this request.
+ */
+ if (!STpnt->init_command(SCpnt)) {
+ continue;
+ }
+ }
+ /*
+ * Finally, initialize any error handling parameters, and set up
+ * the timers for timeouts.
+ */
+ scsi_init_cmd_errh(SCpnt);
+
+ /*
+ * Dispatch the command to the low-level driver.
+ */
+ scsi_dispatch_cmd(SCpnt);
+
+ /*
+ * Now we need to grab the lock again. We are about to mess with
+ * the request queue and try to find another command.
+ */
+ spin_lock_irq(&io_request_lock);
+ }
+
+ /*
+ * If this is a single-lun device, and we are currently finished
+ * with this device, then see if we need to get another device
+ * started.
+ */
+ if (SDpnt->single_lun
+ && q->current_request == NULL
+ && SDpnt->device_busy == 0) {
+ request_queue_t *q;
+
+ for (SDpnt = SHpnt->host_queue;
+ SDpnt;
+ SDpnt = SDpnt->next) {
+ if (((SHpnt->can_queue > 0)
+ && (SHpnt->host_busy >= SHpnt->can_queue))
+ || (SHpnt->host_blocked)
+ || (SDpnt->device_blocked)) {
+ break;
+ }
+ q = &SDpnt->request_queue;
+ q->request_fn(q);
+ }
+ }
+}
--- /dev/null
+/*
+ * scsi_merge.c Copyright (C) 1999 Eric Youngdale
+ *
+ * SCSI queueing library.
+ * Initial versions: Eric Youngdale (eric@andante.org).
+ * Based upon conversations with large numbers
+ * of people at Linux Expo.
+ */
+
+/*
+ * This file contains queue management functions that are used by SCSI.
+ * Typically this is used for several purposes. First, we need to ensure
+ * that commands do not grow so large that they cannot be handled all at
+ * once by a host adapter. The various flavors of merge functions included
+ * here serve this purpose.
+ *
+ * Note that it would be quite trivial to allow the low-level driver the
+ * flexibility to define it's own queue handling functions. For the time
+ * being, the hooks are not present. Right now we are just using the
+ * data in the host template as an indicator of how we should be handling
+ * queues, and we select routines that are optimized for that purpose.
+ *
+ * Some hosts do not impose any restrictions on the size of a request.
+ * In such cases none of the merge functions in this file are called,
+ * and we allow ll_rw_blk to merge requests in the default manner.
+ * This isn't guaranteed to be optimal, but it should be pretty darned
+ * good. If someone comes up with ideas of better ways of managing queues
+ * to improve on the default behavior, then certainly fit it into this
+ * scheme in whatever manner makes the most sense. Please note that
+ * since each device has it's own queue, we have considerable flexibility
+ * in queue management.
+ */
+
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/stat.h>
+#include <linux/blk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+
+
+#define __KERNEL_SYSCALLS__
+
+#include <linux/unistd.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+#include <scsi/scsi_ioctl.h>
+
+#ifdef CONFIG_SCSI_DEBUG_QUEUES
+/*
+ * Enable a bunch of additional consistency checking. Turn this off
+ * if you are benchmarking.
+ */
+
+static int dump_stats(struct request *req,
+ int use_clustering,
+ int dma_host,
+ int segments)
+{
+ struct buffer_head *bh;
+
+ /*
+ * Dump the information that we have. We know we have an
+ * inconsistency.
+ */
+ printk("nr_segments is %lx\n", req->nr_segments);
+ printk("counted segments is %x\n", segments);
+ printk("Flags %d %d\n", use_clustering, dma_host);
+ for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext)
+ {
+ printk("Segment 0x%p, blocks %d, addr 0x%lx\n",
+ bh,
+ bh->b_size >> 9,
+ virt_to_phys(bh->b_data - 1));
+ }
+ panic("Ththththaats all folks. Too dangerous to continue.\n");
+}
+
+
+/*
+ * Simple sanity check that we will use for the first go around
+ * in order to ensure that we are doing the counting correctly.
+ * This can be removed for optimization.
+ */
+#define SANITY_CHECK(req, _CLUSTER, _DMA) \
+ if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA) ) \
+ { \
+ __label__ here; \
+here: \
+ printk("Incorrect segment count at 0x%p", &&here); \
+ dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA)); \
+ }
+#else
+#define SANITY_CHECK(req, _CLUSTER, _DMA)
+#endif
+
+/*
+ * FIXME(eric) - the original disk code disabled clustering for MOD
+ * devices. I have no idea why we thought this was a good idea - my
+ * guess is that it was an attempt to limit the size of requests to MOD
+ * devices.
+ */
+#define CLUSTERABLE_DEVICE(SH,SD) (SH->use_clustering && \
+ SD->type != TYPE_MOD)
+
+/*
+ * This entire source file deals with the new queueing code.
+ */
+
+/*
+ * Function: __count_segments()
+ *
+ * Purpose: Prototype for queue merge function.
+ *
+ * Arguments: q - Queue for which we are merging request.
+ * req - request into which we wish to merge.
+ * use_clustering - 1 if this host wishes to use clustering
+ * dma_host - 1 if this host has ISA DMA issues (bus doesn't
+ * expose all of the address lines, so that DMA cannot
+ * be done from an arbitrary address).
+ *
+ * Returns: Count of the number of SG segments for the request.
+ *
+ * Lock status:
+ *
+ * Notes: This is only used for diagnostic purposes.
+ */
+__inline static int __count_segments(struct request *req,
+ int use_clustering,
+ int dma_host)
+{
+ int ret = 1;
+ struct buffer_head *bh;
+
+ for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext) {
+ if (use_clustering) {
+ /*
+ * See if we can do this without creating another
+ * scatter-gather segment. In the event that this is a
+ * DMA capable host, make sure that a segment doesn't span
+ * the DMA threshold boundary.
+ */
+ if (dma_host &&
+ virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) {
+ ret++;
+ } else if (CONTIGUOUS_BUFFERS(bh, bh->b_reqnext)) {
+ /*
+ * This one is OK. Let it go.
+ */
+ continue;
+ }
+ ret++;
+ } else {
+ ret++;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Function: __scsi_merge_fn()
+ *
+ * Purpose: Prototype for queue merge function.
+ *
+ * Arguments: q - Queue for which we are merging request.
+ * req - request into which we wish to merge.
+ * bh - Block which we may wish to merge into request
+ * use_clustering - 1 if this host wishes to use clustering
+ * dma_host - 1 if this host has ISA DMA issues (bus doesn't
+ * expose all of the address lines, so that DMA cannot
+ * be done from an arbitrary address).
+ *
+ * Returns: 1 if it is OK to merge the block into the request. 0
+ * if it is not OK.
+ *
+ * Lock status: io_request_lock is assumed to be held here.
+ *
+ * Notes: Some drivers have limited scatter-gather table sizes, and
+ * thus they cannot queue an infinitely large command. This
+ * function is called from ll_rw_blk before it attempts to merge
+ * a new block into a request to make sure that the request will
+ * not become too large.
+ *
+ * This function is not designed to be directly called. Instead
+ * it should be referenced from other functions where the
+ * use_clustering and dma_host parameters should be integer
+ * constants. The compiler should thus be able to properly
+ * optimize the code, eliminating stuff that is irrelevant.
+ * It is more maintainable to do this way with a single function
+ * than to have 4 separate functions all doing roughly the
+ * same thing.
+ */
+__inline static int __scsi_merge_fn(request_queue_t * q,
+ struct request *req,
+ struct buffer_head *bh,
+ int use_clustering,
+ int dma_host)
+{
+ unsigned int sector, count;
+ Scsi_Device *SDpnt;
+ struct Scsi_Host *SHpnt;
+
+ SDpnt = (Scsi_Device *) q->queuedata;
+ SHpnt = SDpnt->host;
+
+ count = bh->b_size >> 9;
+ sector = bh->b_rsector;
+
+ /*
+ * We come in here in one of two cases. The first is that we
+ * are checking to see if we can add the buffer to the end of the
+ * request, the other is to see if we should add the request to the
+ * start.
+ */
+ if (req->sector + req->nr_sectors == sector) {
+ if (use_clustering) {
+ /*
+ * See if we can do this without creating another
+ * scatter-gather segment. In the event that this is a
+ * DMA capable host, make sure that a segment doesn't span
+ * the DMA threshold boundary.
+ */
+ if (dma_host &&
+ virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) {
+ goto new_segment;
+ }
+ if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) {
+ /*
+ * This one is OK. Let it go.
+ */
+ return 1;
+ }
+ }
+ goto new_segment;
+ } else if (req->sector - count == sector) {
+ if (use_clustering) {
+ /*
+ * See if we can do this without creating another
+ * scatter-gather segment. In the event that this is a
+ * DMA capable host, make sure that a segment doesn't span
+ * the DMA threshold boundary.
+ */
+ if (dma_host &&
+ virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) {
+ goto new_segment;
+ }
+ if (CONTIGUOUS_BUFFERS(bh, req->bh)) {
+ /*
+ * This one is OK. Let it go.
+ */
+ return 1;
+ }
+ }
+ goto new_segment;
+ } else {
+ panic("Attempt to merge sector that doesn't belong");
+ }
+ new_segment:
+ if (req->nr_segments < SHpnt->sg_tablesize) {
+ /*
+ * This will form the start of a new segment. Bump the
+ * counter.
+ */
+ req->nr_segments++;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/*
+ * Function: scsi_merge_fn_()
+ *
+ * Purpose: queue merge function.
+ *
+ * Arguments: q - Queue for which we are merging request.
+ * req - request into which we wish to merge.
+ * bh - Block which we may wish to merge into request
+ *
+ * Returns: 1 if it is OK to merge the block into the request. 0
+ * if it is not OK.
+ *
+ * Lock status: io_request_lock is assumed to be held here.
+ *
+ * Notes: Optimized for different cases depending upon whether
+ * ISA DMA is in use and whether clustering should be used.
+ */
+#define MERGEFCT(_FUNCTION, _CLUSTER, _DMA) \
+static int _FUNCTION(request_queue_t * q, \
+ struct request * req, \
+ struct buffer_head * bh) \
+{ \
+ int ret; \
+ SANITY_CHECK(req, _CLUSTER, _DMA); \
+ ret = __scsi_merge_fn(q, req, bh, _CLUSTER, _DMA); \
+ return ret; \
+}
+
+MERGEFCT(scsi_merge_fn_, 0, 0)
+MERGEFCT(scsi_merge_fn_d, 0, 1)
+MERGEFCT(scsi_merge_fn_c, 1, 0)
+MERGEFCT(scsi_merge_fn_dc, 1, 1)
+/*
+ * Function: __scsi_merge_requests_fn()
+ *
+ * Purpose: Prototype for queue merge function.
+ *
+ * Arguments: q - Queue for which we are merging request.
+ * req - request into which we wish to merge.
+ * next - 2nd request that we might want to combine with req
+ * use_clustering - 1 if this host wishes to use clustering
+ * dma_host - 1 if this host has ISA DMA issues (bus doesn't
+ * expose all of the address lines, so that DMA cannot
+ * be done from an arbitrary address).
+ *
+ * Returns: 1 if it is OK to merge the two requests. 0
+ * if it is not OK.
+ *
+ * Lock status: io_request_lock is assumed to be held here.
+ *
+ * Notes: Some drivers have limited scatter-gather table sizes, and
+ * thus they cannot queue an infinitely large command. This
+ * function is called from ll_rw_blk before it attempts to merge
+ * a new block into a request to make sure that the request will
+ * not become too large.
+ *
+ * This function is not designed to be directly called. Instead
+ * it should be referenced from other functions where the
+ * use_clustering and dma_host parameters should be integer
+ * constants. The compiler should thus be able to properly
+ * optimize the code, eliminating stuff that is irrelevant.
+ * It is more maintainable to do this way with a single function
+ * than to have 4 separate functions all doing roughly the
+ * same thing.
+ */
+__inline static int __scsi_merge_requests_fn(request_queue_t * q,
+ struct request *req,
+ struct request *next,
+ int use_clustering,
+ int dma_host)
+{
+ Scsi_Device *SDpnt;
+ struct Scsi_Host *SHpnt;
+
+ SDpnt = (Scsi_Device *) q->queuedata;
+ SHpnt = SDpnt->host;
+
+ /*
+ * If the two requests together are too large (even assuming that we
+ * can merge the boundary requests into one segment, then don't
+ * allow the merge.
+ */
+ if (req->nr_segments + next->nr_segments - 1 > SHpnt->sg_tablesize) {
+ return 0;
+ }
+ /*
+ * The main question is whether the two segments at the boundaries
+ * would be considered one or two.
+ */
+ if (use_clustering) {
+ /*
+ * See if we can do this without creating another
+ * scatter-gather segment. In the event that this is a
+ * DMA capable host, make sure that a segment doesn't span
+ * the DMA threshold boundary.
+ */
+ if (dma_host &&
+ virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) {
+ goto dont_combine;
+ }
+ if (CONTIGUOUS_BUFFERS(req->bhtail, next->bh)) {
+ /*
+ * This one is OK. Let it go.
+ */
+ req->nr_segments += next->nr_segments - 1;
+ return 1;
+ }
+ }
+ dont_combine:
+ /*
+ * We know that the two requests at the boundary should not be combined.
+ * Make sure we can fix something that is the sum of the two.
+ * A slightly stricter test than we had above.
+ */
+ if (req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) {
+ return 0;
+ } else {
+ /*
+ * This will form the start of a new segment. Bump the
+ * counter.
+ */
+ req->nr_segments += next->nr_segments;
+ return 1;
+ }
+}
+
+/*
+ * Function: scsi_merge_requests_fn_()
+ *
+ * Purpose: queue merge function.
+ *
+ * Arguments: q - Queue for which we are merging request.
+ * req - request into which we wish to merge.
+ * bh - Block which we may wish to merge into request
+ *
+ * Returns: 1 if it is OK to merge the block into the request. 0
+ * if it is not OK.
+ *
+ * Lock status: io_request_lock is assumed to be held here.
+ *
+ * Notes: Optimized for different cases depending upon whether
+ * ISA DMA is in use and whether clustering should be used.
+ */
+#define MERGEREQFCT(_FUNCTION, _CLUSTER, _DMA) \
+static int _FUNCTION(request_queue_t * q, \
+ struct request * req, \
+ struct request * next) \
+{ \
+ int ret; \
+ SANITY_CHECK(req, _CLUSTER, _DMA); \
+ ret = __scsi_merge_requests_fn(q, req, next, _CLUSTER, _DMA); \
+ return ret; \
+}
+
+MERGEREQFCT(scsi_merge_requests_fn_, 0, 0)
+MERGEREQFCT(scsi_merge_requests_fn_d, 0, 1)
+MERGEREQFCT(scsi_merge_requests_fn_c, 1, 0)
+MERGEREQFCT(scsi_merge_requests_fn_dc, 1, 1)
+/*
+ * Function: __init_io()
+ *
+ * Purpose: Prototype for io initialize function.
+ *
+ * Arguments: SCpnt - Command descriptor we wish to initialize
+ * sg_count_valid - 1 if the sg count in the req is valid.
+ * use_clustering - 1 if this host wishes to use clustering
+ * dma_host - 1 if this host has ISA DMA issues (bus doesn't
+ * expose all of the address lines, so that DMA cannot
+ * be done from an arbitrary address).
+ *
+ * Returns: 1 on success.
+ *
+ * Lock status:
+ *
+ * Notes: Only the SCpnt argument should be a non-constant variable.
+ * This function is designed in such a way that it will be
+ * invoked from a series of small stubs, each of which would
+ * be optimized for specific circumstances.
+ *
+ * The advantage of this is that hosts that don't do DMA
+ * get versions of the function that essentially don't have
+ * any of the DMA code. Same goes for clustering - in the
+ * case of hosts with no need for clustering, there is no point
+ * in a whole bunch of overhead.
+ *
+ * Finally, in the event that a host has set can_queue to SG_ALL
+ * implying that there is no limit to the length of a scatter
+ * gather list, the sg count in the request won't be valid
+ * (mainly because we don't need queue management functions
+ * which keep the tally uptodate.
+ */
+__inline static int __init_io(Scsi_Cmnd * SCpnt,
+ int sg_count_valid,
+ int use_clustering,
+ int dma_host)
+{
+ struct buffer_head *bh;
+ struct buffer_head *bhprev;
+ char *buff;
+ int count;
+ int i;
+ struct request *req;
+ struct scatterlist *sgpnt;
+ int this_count;
+
+ /*
+ * FIXME(eric) - don't inline this - it doesn't depend on the
+ * integer flags. Come to think of it, I don't think this is even
+ * needed any more. Need to play with it and see if we hit the
+ * panic. If not, then don't bother.
+ */
+ if (!SCpnt->request.bh) {
+ /*
+ * Case of page request (i.e. raw device), or unlinked buffer
+ * Typically used for swapping, but this isn't how we do
+ * swapping any more.
+ */
+ panic("I believe this is dead code. If we hit this, I was wrong");
+#if 0
+ SCpnt->request_bufflen = SCpnt->request.nr_sectors << 9;
+ SCpnt->request_buffer = SCpnt->request.buffer;
+ SCpnt->use_sg = 0;
+ /*
+ * FIXME(eric) - need to handle DMA here.
+ */
+#endif
+ return 1;
+ }
+ req = &SCpnt->request;
+ /*
+ * First we need to know how many scatter gather segments are needed.
+ */
+ if (!sg_count_valid) {
+ count = __count_segments(req, use_clustering, dma_host);
+ } else {
+ count = req->nr_segments;
+ }
+
+ /*
+ * If the dma pool is nearly empty, then queue a minimal request
+ * with a single segment. Typically this will satisfy a single
+ * buffer.
+ */
+ if (dma_host && scsi_dma_free_sectors <= 10) {
+ this_count = SCpnt->request.current_nr_sectors;
+ goto single_segment;
+ }
+ /*
+ * Don't bother with scatter-gather if there is only one segment.
+ */
+ if (count == 1) {
+ this_count = SCpnt->request.nr_sectors;
+ goto single_segment;
+ }
+ SCpnt->use_sg = count;
+
+ /*
+ * Allocate the actual scatter-gather table itself.
+ * scsi_malloc can only allocate in chunks of 512 bytes
+ */
+ SCpnt->sglist_len = (SCpnt->use_sg
+ * sizeof(struct scatterlist) + 511) & ~511;
+
+ sgpnt = (struct scatterlist *) scsi_malloc(SCpnt->sglist_len);
+
+ /*
+ * Now fill the scatter-gather table.
+ */
+ if (!sgpnt) {
+ /*
+ * If we cannot allocate the scatter-gather table, then
+ * simply write the first buffer all by itself.
+ */
+ printk("Warning - running *really* short on DMA buffers\n");
+ this_count = SCpnt->request.current_nr_sectors;
+ goto single_segment;
+ }
+ /*
+ * Next, walk the list, and fill in the addresses and sizes of
+ * each segment.
+ */
+ memset(sgpnt, 0, SCpnt->sglist_len);
+ SCpnt->request_buffer = (char *) sgpnt;
+ SCpnt->request_bufflen = 0;
+ bhprev = NULL;
+
+ for (count = 0, bh = SCpnt->request.bh;
+ bh; bh = bh->b_reqnext) {
+ if (use_clustering && bhprev != NULL) {
+ if (dma_host &&
+ virt_to_phys(bhprev->b_data - 1) == ISA_DMA_THRESHOLD) {
+ /* Nothing - fall through */
+ } else if (CONTIGUOUS_BUFFERS(bhprev, bh)) {
+ /*
+ * This one is OK. Let it go.
+ */
+ sgpnt[count - 1].length += bh->b_size;
+ if (!dma_host) {
+ SCpnt->request_bufflen += bh->b_size;
+ }
+ bhprev = bh;
+ continue;
+ }
+ }
+ count++;
+ sgpnt[count - 1].address = bh->b_data;
+ sgpnt[count - 1].length += bh->b_size;
+ if (!dma_host) {
+ SCpnt->request_bufflen += bh->b_size;
+ }
+ bhprev = bh;
+ }
+
+ /*
+ * Verify that the count is correct.
+ */
+ if (count != SCpnt->use_sg) {
+ panic("Incorrect sg segment count");
+ }
+ if (!dma_host) {
+ return 1;
+ }
+ /*
+ * Now allocate bounce buffers, if needed.
+ */
+ SCpnt->request_bufflen = 0;
+ for (i = 0; i < count; i++) {
+ SCpnt->request_bufflen += sgpnt[i].length;
+ if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 >
+ ISA_DMA_THRESHOLD && !sgpnt[count].alt_address) {
+ sgpnt[i].alt_address = sgpnt[i].address;
+ sgpnt[i].address =
+ (char *) scsi_malloc(sgpnt[i].length);
+ /*
+ * If we cannot allocate memory for this DMA bounce
+ * buffer, then queue just what we have done so far.
+ */
+ if (sgpnt[i].address == NULL) {
+ printk("Warning - running low on DMA memory\n");
+ SCpnt->request_bufflen -= sgpnt[i].length;
+ SCpnt->use_sg = i;
+ if (i == 0) {
+ panic("DMA pool exhausted");
+ }
+ break;
+ }
+ if (SCpnt->request.cmd == WRITE) {
+ memcpy(sgpnt[i].address, sgpnt[i].alt_address,
+ sgpnt[i].length);
+ }
+ }
+ }
+ return 1;
+
+ single_segment:
+ /*
+ * Come here if for any reason we choose to do this as a single
+ * segment. Possibly the entire request, or possibly a small
+ * chunk of the entire request.
+ */
+ bh = SCpnt->request.bh;
+ buff = SCpnt->request.buffer;
+
+ if (dma_host) {
+ /*
+ * Allocate a DMA bounce buffer. If the allocation fails, fall
+ * back and allocate a really small one - enough to satisfy
+ * the first buffer.
+ */
+ if (virt_to_phys(SCpnt->request.bh->b_data)
+ + (this_count << 9) - 1 > ISA_DMA_THRESHOLD) {
+ buff = (char *) scsi_malloc(this_count << 9);
+ if (!buff) {
+ printk("Warning - running low on DMA memory\n");
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = (char *) scsi_malloc(this_count << 9);
+ if (!buff) {
+ panic("Unable to allocate DMA buffer\n");
+ }
+ }
+ if (SCpnt->request.cmd == WRITE)
+ memcpy(buff, (char *) SCpnt->request.buffer, this_count << 9);
+ }
+ }
+ SCpnt->request_bufflen = this_count << 9;
+ SCpnt->request_buffer = buff;
+ SCpnt->use_sg = 0;
+ return 1;
+}
+
+#define INITIO(_FUNCTION, _VALID, _CLUSTER, _DMA) \
+static int _FUNCTION(Scsi_Cmnd * SCpnt) \
+{ \
+ return __init_io(SCpnt, _VALID, _CLUSTER, _DMA); \
+}
+
+/*
+ * ll_rw_blk.c now keeps track of the number of segments in
+ * a request. Thus we don't have to do it any more here.
+ * We always force "_VALID" to 1. Eventually clean this up
+ * and get rid of the extra argument.
+ */
+#if 0
+/* Old definitions */
+INITIO(scsi_init_io_, 0, 0, 0)
+INITIO(scsi_init_io_d, 0, 0, 1)
+INITIO(scsi_init_io_c, 0, 1, 0)
+INITIO(scsi_init_io_dc, 0, 1, 1)
+
+/* Newer redundant definitions. */
+INITIO(scsi_init_io_, 1, 0, 0)
+INITIO(scsi_init_io_d, 1, 0, 1)
+INITIO(scsi_init_io_c, 1, 1, 0)
+INITIO(scsi_init_io_dc, 1, 1, 1)
+#endif
+
+INITIO(scsi_init_io_v, 1, 0, 0)
+INITIO(scsi_init_io_vd, 1, 0, 1)
+INITIO(scsi_init_io_vc, 1, 1, 0)
+INITIO(scsi_init_io_vdc, 1, 1, 1)
+/*
+ * Function: initialize_merge_fn()
+ *
+ * Purpose: Initialize merge function for a host
+ *
+ * Arguments: SHpnt - Host descriptor.
+ *
+ * Returns: Nothing.
+ *
+ * Lock status:
+ *
+ * Notes:
+ */
+void initialize_merge_fn(Scsi_Device * SDpnt)
+{
+ request_queue_t *q;
+ struct Scsi_Host *SHpnt;
+ SHpnt = SDpnt->host;
+
+ q = &SDpnt->request_queue;
+
+ /*
+ * If the host has already selected a merge manager, then don't
+ * pick a new one.
+ */
+ if (q->merge_fn != NULL) {
+ return;
+ }
+ /*
+ * If this host has an unlimited tablesize, then don't bother with a
+ * merge manager. The whole point of the operation is to make sure
+ * that requests don't grow too large, and this host isn't picky.
+ */
+ if (SHpnt->sg_tablesize == SG_ALL) {
+ if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
+ SDpnt->scsi_init_io_fn = scsi_init_io_v;
+ } else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
+ SDpnt->scsi_init_io_fn = scsi_init_io_vd;
+ } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
+ SDpnt->scsi_init_io_fn = scsi_init_io_vc;
+ } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
+ SDpnt->scsi_init_io_fn = scsi_init_io_vdc;
+ }
+ return;
+ }
+ /*
+ * Now pick out the correct function.
+ */
+ if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
+ q->merge_fn = scsi_merge_fn_;
+ q->merge_requests_fn = scsi_merge_requests_fn_;
+ SDpnt->scsi_init_io_fn = scsi_init_io_v;
+ } else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
+ q->merge_fn = scsi_merge_fn_d;
+ q->merge_requests_fn = scsi_merge_requests_fn_d;
+ SDpnt->scsi_init_io_fn = scsi_init_io_vd;
+ } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
+ q->merge_fn = scsi_merge_fn_c;
+ q->merge_requests_fn = scsi_merge_requests_fn_c;
+ SDpnt->scsi_init_io_fn = scsi_init_io_vc;
+ } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
+ q->merge_fn = scsi_merge_fn_dc;
+ q->merge_requests_fn = scsi_merge_requests_fn_dc;
+ SDpnt->scsi_init_io_fn = scsi_init_io_vdc;
+ }
+}
* Tommy Thorn <tthorn>
* Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
*
- * Modified by Eric Youngdale eric@aib.com to
+ * Modified by Eric Youngdale eric@andante.org to
* add scatter-gather, multiple outstanding request, and other
* enhancements.
*
extern void scsi_old_done(Scsi_Cmnd * SCpnt);
int update_timeout(Scsi_Cmnd *, int);
extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
-extern void internal_cmnd(Scsi_Cmnd * SCpnt);
+
+extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt);
extern volatile struct Scsi_Host *host_active;
#define SCSI_BLOCK(HOST) ((HOST->block && host_active && HOST != host_active) \
|| (HOST->can_queue && HOST->host_busy >= HOST->can_queue))
-static unsigned char generic_sense[6] = {REQUEST_SENSE, 0, 0, 0, 255, 0};
+static unsigned char generic_sense[6] =
+{REQUEST_SENSE, 0, 0, 0, 255, 0};
/*
* This is the number of clock ticks we should wait before we time out
SCpnt->use_sg = 0;
SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
SCpnt->result = 0;
- internal_cmnd(SCpnt);
+ /*
+ * Ugly, ugly. The newer interfaces all assume that the lock
+ * isn't held. Mustn't disappoint, or we deadlock the system.
+ */
+ spin_unlock_irq(&io_request_lock);
+ scsi_dispatch_cmd(SCpnt);
+ spin_lock_irq(&io_request_lock);
}
__LINE__);
}
}
- /* end WAS_SENSE */
+ /* end WAS_SENSE */
else {
#ifdef DEBUG
printk("COMMAND COMPLETE message returned, "
SCpnt->use_sg = SCpnt->old_use_sg;
SCpnt->cmd_len = SCpnt->old_cmd_len;
SCpnt->result = 0;
- internal_cmnd(SCpnt);
+ /*
+ * Ugly, ugly. The newer interfaces all
+ * assume that the lock isn't held. Mustn't
+ * disappoint, or we deadlock the system.
+ */
+ spin_unlock_irq(&io_request_lock);
+ scsi_dispatch_cmd(SCpnt);
+ spin_lock_irq(&io_request_lock);
}
break;
default:
#endif
host->host_busy--; /* Indicate that we are free */
- if (host->block && host->host_busy == 0) {
- host_active = NULL;
-
- /* For block devices "wake_up" is done in end_scsi_request */
- if (!SCSI_BLK_MAJOR(MAJOR(SCpnt->request.rq_dev))) {
- struct Scsi_Host *next;
-
- for (next = host->block; next != host; next = next->block)
- wake_up(&next->host_wait);
- }
- }
- wake_up(&host->host_wait);
SCpnt->result = result | ((exit & 0xff) << 24);
SCpnt->use_sg = SCpnt->old_use_sg;
SCpnt->cmd_len = SCpnt->old_cmd_len;
+ /*
+ * The upper layers assume the lock isn't held. We mustn't
+ * disappoint them. When the new error handling code is in
+ * use, the upper code is run from a bottom half handler, so
+ * it isn't an issue.
+ */
+ spin_unlock_irq(&io_request_lock);
SCpnt->done(SCpnt);
+ spin_lock_irq(&io_request_lock);
}
#undef CMD_FINISHED
#undef REDO
if (host->last_reset - jiffies > 20UL * HZ)
host->last_reset = jiffies;
} else {
- if (!host->block)
- host->host_busy++;
+ host->host_busy++;
host->last_reset = jiffies;
host->resetting = 1;
SCpnt->flags |= (WAS_RESET | IS_RESETTING);
if (time_before(host->last_reset, jiffies) ||
(time_after(host->last_reset, jiffies + 20 * HZ)))
host->last_reset = jiffies;
- if (!host->block)
- host->host_busy--;
+ host->host_busy--;
}
if (reset_flags & SCSI_RESET_SYNCHRONOUS)
SCpnt->flags &= ~SYNC_RESET;
static const char RCSid[] = "$Header: /mnt/ide/home/eric/CVSROOT/linux/drivers/scsi/scsi_queue.c,v 1.1 1997/10/21 11:16:38 eric Exp $";
-/*
- * Lock used to prevent more than one process from frobbing the list at the
- * same time. FIXME(eric) - there should be separate spinlocks for each host.
- * This will reduce contention.
- */
-
-spinlock_t scsi_mlqueue_lock = SPIN_LOCK_UNLOCKED;
-spinlock_t scsi_mlqueue_remove_lock = SPIN_LOCK_UNLOCKED;
/*
* Function: scsi_mlqueue_insert()
* Arguments: cmd - command that we are adding to queue.
* reason - why we are inserting command to queue.
*
+ * Lock status: Assumed that lock is not held upon entry.
+ *
* Returns: Nothing.
*
* Notes: We do this for one of two cases. Either the host is busy
*/
int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason)
{
- Scsi_Cmnd *cpnt;
- unsigned long flags;
struct Scsi_Host *host;
SCSI_LOG_MLQUEUE(1, printk("Inserting command %p into mlqueue\n", cmd));
* If a host is inactive and cannot queue any commands, I don't see
* how things could possibly work anyways.
*/
- if (cmd->device->device_busy == 0) {
+ if (cmd->device->device_blocked == 0) {
if (scsi_retry_command(cmd) == 0) {
return 0;
}
}
- cmd->device->device_busy = TRUE;
+ cmd->device->device_blocked = TRUE;
cmd->device_wait = TRUE;
}
cmd->bh_next = NULL;
/*
- * As a performance enhancement, look to see whether the list is
- * empty. If it is, then we can just atomicly insert the command
- * in the list and return without locking.
+ * Insert this command at the head of the queue for it's device.
+ * It will go before all other commands that are already in the queue.
*/
- if (host->pending_commands == NULL) {
- cpnt = xchg(&host->pending_commands, cmd);
- if (cpnt == NULL) {
- return 0;
- }
- /*
- * Rats. Something slipped in while we were exchanging.
- * Swap it back and fall through to do it the hard way.
- */
- cmd = xchg(&host->pending_commands, cpnt);
-
- }
- /*
- * Next append the command to the list of pending commands.
- */
- spin_lock_irqsave(&scsi_mlqueue_lock, flags);
- for (cpnt = host->pending_commands; cpnt && cpnt->bh_next;
- cpnt = cpnt->bh_next) {
- continue;
- }
- if (cpnt != NULL) {
- cpnt->bh_next = cmd;
- } else {
- host->pending_commands = cmd;
- }
-
- spin_unlock_irqrestore(&scsi_mlqueue_lock, flags);
- return 0;
-}
-
-/*
- * Function: scsi_mlqueue_finish()
- *
- * Purpose: Try and queue commands from the midlevel queue.
- *
- * Arguments: host - host that just finished a command.
- * device - device that just finished a command.
- *
- * Returns: Nothing.
- *
- * Notes: This could be called either from an interrupt context or a
- * normal process context.
- */
-int scsi_mlqueue_finish(struct Scsi_Host *host, Scsi_Device * device)
-{
- Scsi_Cmnd *cpnt;
- unsigned long flags;
- Scsi_Cmnd *next;
- Scsi_Cmnd *prev;
- int reason = 0;
- int rtn;
-
- SCSI_LOG_MLQUEUE(2, printk("scsi_mlqueue_finish starting\n"));
- /*
- * First, clear the flag for the host/device. We will then start
- * pushing commands through until either something else blocks, or
- * the queue is empty.
- */
- if (host->host_blocked) {
- reason = SCSI_MLQUEUE_HOST_BUSY;
- host->host_blocked = FALSE;
- }
- if (device->device_busy) {
- reason = SCSI_MLQUEUE_DEVICE_BUSY;
- device->device_busy = FALSE;
- }
- /*
- * Walk the list of commands to see if there is anything we can
- * queue. This probably needs to be optimized for performance at
- * some point.
- */
- prev = NULL;
- spin_lock_irqsave(&scsi_mlqueue_remove_lock, flags);
- for (cpnt = host->pending_commands; cpnt; cpnt = next) {
- next = cpnt->bh_next;
- /*
- * First, see if this command is suitable for being retried now.
- */
- if (reason == SCSI_MLQUEUE_HOST_BUSY) {
- /*
- * The host was busy, but isn't any more. Thus we may be
- * able to queue the command now, but we were waiting for
- * the device, then we should keep waiting. Similarily, if
- * the device is now busy, we should also keep waiting.
- */
- if ((cpnt->host_wait == FALSE)
- || (device->device_busy == TRUE)) {
- prev = cpnt;
- continue;
- }
- }
- if (reason == SCSI_MLQUEUE_DEVICE_BUSY) {
- /*
- * The device was busy, but isn't any more. Thus we may be
- * able to queue the command now, but we were waiting for
- * the host, then we should keep waiting. Similarily, if
- * the host is now busy, we should also keep waiting.
- */
- if ((cpnt->device_wait == FALSE)
- || (host->host_blocked == TRUE)) {
- prev = cpnt;
- continue;
- }
- }
- /*
- * First, remove the command from the list.
- */
- if (prev == NULL) {
- host->pending_commands = next;
- } else {
- prev->bh_next = next;
- }
- cpnt->bh_next = NULL;
-
- rtn = scsi_retry_command(cpnt);
-
- /*
- * If we got a non-zero return value, it means that the host rejected
- * the command. The internal_cmnd function will have added the
- * command back to the end of the list, so we don't have anything
- * more to do here except return.
- */
- if (rtn) {
- spin_unlock_irqrestore(&scsi_mlqueue_remove_lock, flags);
- SCSI_LOG_MLQUEUE(1, printk("Unable to remove command %p from mlqueue\n", cpnt));
- goto finish;
- }
- SCSI_LOG_MLQUEUE(1, printk("Removed command %p from mlqueue\n", cpnt));
- }
-
- spin_unlock_irqrestore(&scsi_mlqueue_remove_lock, flags);
-finish:
- SCSI_LOG_MLQUEUE(2, printk("scsi_mlqueue_finish returning\n"));
+ scsi_insert_special_cmd(cmd, 1);
return 0;
}
* modules.
*/
-extern void print_command (unsigned char *command);
-extern void print_sense(const char * devclass, Scsi_Cmnd * SCpnt);
+extern void print_command(unsigned char *command);
+extern void print_sense(const char *devclass, Scsi_Cmnd * SCpnt);
extern const char *const scsi_device_types[];
EXPORT_SYMBOL(scsi_dma_free_sectors);
EXPORT_SYMBOL(kernel_scsi_ioctl);
EXPORT_SYMBOL(scsi_need_isa_buffer);
-EXPORT_SYMBOL(scsi_request_queueable);
EXPORT_SYMBOL(scsi_release_command);
EXPORT_SYMBOL(print_Scsi_Cmnd);
EXPORT_SYMBOL(scsi_block_when_processing_errors);
EXPORT_SYMBOL(scsi_mark_host_reset);
EXPORT_SYMBOL(scsi_ioctl_send_command);
-#if defined(CONFIG_SCSI_LOGGING) /* { */
+#if defined(CONFIG_SCSI_LOGGING) /* { */
EXPORT_SYMBOL(scsi_logging_level);
#endif
EXPORT_SYMBOL(proc_print_scsidevice);
EXPORT_SYMBOL(proc_scsi);
+EXPORT_SYMBOL(scsi_io_completion);
+EXPORT_SYMBOL(scsi_end_request);
+
/*
* These are here only while I debug the rest of the scsi stuff.
*/
EXPORT_SYMBOL(scsi_devicelist);
EXPORT_SYMBOL(scsi_device_types);
-
-#endif /* CONFIG_MODULES */
+#endif /* CONFIG_MODULES */
/*
* sd.c Copyright (C) 1992 Drew Eckhardt
- * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
*
* Linux scsi disk driver
* Initial versions: Drew Eckhardt
*
* <drew@colorado.edu>
*
- * Modified by Eric Youngdale ericy@cais.com to
+ * Modified by Eric Youngdale ericy@andante.org to
* add scatter-gather, multiple outstanding request, and other
* enhancements.
*
- * Modified by Eric Youngdale eric@aib.com to support loadable
+ * Modified by Eric Youngdale eric@andante.org to support loadable
* low-level scsi drivers.
*
* Modified by Jirka Hanika geo@ff.cuni.cz to support more
static int sd_init_onedisk(int);
-static void requeue_sd_request(Scsi_Cmnd * SCpnt);
static int sd_init(void);
static void sd_finish(void);
static int sd_attach(Scsi_Device *);
static int sd_detect(Scsi_Device *);
static void sd_detach(Scsi_Device *);
+static void rw_intr(Scsi_Cmnd * SCpnt);
+
+static int sd_init_command(Scsi_Cmnd *);
static int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
{
}
struct Scsi_Device_Template sd_template = {
- NULL, "disk", "sd", NULL, TYPE_DISK,
- SCSI_DISK0_MAJOR, 0, 0, 0, 1,
- sd_detect, sd_init,
- sd_finish, sd_attach, sd_detach
+ name:"disk",
+ tag:"sd",
+ scsi_type:TYPE_DISK,
+ major:SCSI_DISK0_MAJOR,
+ blk:1,
+ detect:sd_detect,
+ init:sd_init,
+ finish:sd_finish,
+ attach:sd_attach,
+ detach:sd_detach,
+ init_command:sd_init_command,
};
+static request_queue_t *sd_find_queue(kdev_t dev)
+{
+ Scsi_Disk *dpnt;
+ int target;
+ target = DEVICE_NR(dev);
+
+ dpnt = &rscsi_disks[target];
+ if (!dpnt)
+ return NULL; /* No such device */
+ return &dpnt->device->request_queue;
+}
+
+static int sd_init_command(Scsi_Cmnd * SCpnt)
+{
+ int dev, devm, block, this_count;
+ Scsi_Disk *dpnt;
+ char nbuff[6];
+
+ devm = MINOR(SCpnt->request.rq_dev);
+ dev = DEVICE_NR(SCpnt->request.rq_dev);
+
+ block = SCpnt->request.sector;
+ this_count = SCpnt->request_bufflen >> 9;
+
+ SCSI_LOG_HLQUEUE(1, printk("Doing sd request, dev = %d, block = %d\n", devm, block));
+
+ dpnt = &rscsi_disks[dev];
+ if (devm >= (sd_template.dev_max << 4) ||
+ !dpnt ||
+ !dpnt->device->online ||
+ block + SCpnt->request.nr_sectors > sd[devm].nr_sects) {
+ SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors));
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
+ return 0;
+ }
+ block += sd[devm].start_sect;
+ if (dpnt->device->changed) {
+ /*
+ * quietly refuse to do anything to a changed disc until the changed
+ * bit has been reset
+ */
+ /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ return 0;
+ }
+ SCSI_LOG_HLQUEUE(2, sd_devname(devm, nbuff));
+ SCSI_LOG_HLQUEUE(2, printk("%s : real dev = /dev/%d, block = %d\n",
+ nbuff, dev, block));
+
+ /*
+ * If we have a 1K hardware sectorsize, prevent access to single
+ * 512 byte sectors. In theory we could handle this - in fact
+ * the scsi cdrom driver must be able to handle this because
+ * we typically use 1K blocksizes, and cdroms typically have
+ * 2K hardware sectorsizes. Of course, things are simpler
+ * with the cdrom, since it is read-only. For performance
+ * reasons, the filesystems should be able to handle this
+ * and not force the scsi disk driver to use bounce buffers
+ * for this.
+ */
+ if (dpnt->device->sector_size == 1024) {
+ if ((block & 1) || (SCpnt->request.nr_sectors & 1)) {
+ printk("sd.c:Bad block number requested");
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ return 0;
+ } else {
+ block = block >> 1;
+ this_count = this_count >> 1;
+ }
+ }
+ if (dpnt->device->sector_size == 2048) {
+ if ((block & 3) || (SCpnt->request.nr_sectors & 3)) {
+ printk("sd.c:Bad block number requested");
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ return 0;
+ } else {
+ block = block >> 2;
+ this_count = this_count >> 2;
+ }
+ }
+ switch (SCpnt->request.cmd) {
+ case WRITE:
+ if (!dpnt->device->writeable) {
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ return 0;
+ }
+ SCpnt->cmnd[0] = WRITE_6;
+ break;
+ case READ:
+ SCpnt->cmnd[0] = READ_6;
+ break;
+ default:
+ panic("Unknown sd command %d\n", SCpnt->request.cmd);
+ }
+
+ SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
+ nbuff,
+ (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
+ this_count, SCpnt->request.nr_sectors));
+
+ SCpnt->cmnd[1] = (SCpnt->lun << 5) & 0xe0;
+
+ if (((this_count > 0xff) || (block > 0x1fffff)) && SCpnt->device->ten) {
+ if (this_count > 0xffff)
+ this_count = 0xffff;
+
+ SCpnt->cmnd[0] += READ_10 - READ_6;
+ SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
+ SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
+ SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
+ SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+ SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
+ SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
+ SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+ } else {
+ if (this_count > 0xff)
+ this_count = 0xff;
+
+ SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+ SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
+ SCpnt->cmnd[3] = (unsigned char) block & 0xff;
+ SCpnt->cmnd[4] = (unsigned char) this_count;
+ SCpnt->cmnd[5] = 0;
+ }
+
+ /*
+ * We shouldn't disconnect in the middle of a sector, so with a dumb
+ * host adapter, it's safe to assume that we can at least transfer
+ * this many bytes between each connect / disconnect.
+ */
+ SCpnt->transfersize = dpnt->device->sector_size;
+ SCpnt->underflow = this_count << 9;
+
+ SCpnt->allowed = MAX_RETRIES;
+ SCpnt->timeout_per_command = (SCpnt->device->type == TYPE_DISK ?
+ SD_TIMEOUT : SD_MOD_TIMEOUT);
+
+ /*
+ * This is the completion routine we use. This is matched in terms
+ * of capability to this function.
+ */
+ SCpnt->done = rw_intr;
+
+ /*
+ * This indicates that the command is ready from our end to be
+ * queued.
+ */
+ return 1;
+}
+
static int sd_open(struct inode *inode, struct file *filp)
{
int target;
int good_sectors = (result == 0 ? this_count : 0);
int block_sectors = 1;
- sd_devname(DEVICE_NR(SCpnt->request.rq_dev), nbuff);
+ SCSI_LOG_HLCOMPLETE(1, sd_devname(DEVICE_NR(SCpnt->request.rq_dev), nbuff));
SCSI_LOG_HLCOMPLETE(1, printk("%s : rw_intr(%d, %x [%x %x])\n", nbuff,
SCpnt->host->host_no,
(SCpnt->sense_buffer[4] << 16) |
(SCpnt->sense_buffer[5] << 8) |
SCpnt->sense_buffer[6];
- int sector_size =
- rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].sector_size;
if (SCpnt->request.bh != NULL)
block_sectors = SCpnt->request.bh->b_size >> 9;
- if (sector_size == 1024) {
+ switch (SCpnt->device->sector_size) {
+ case 1024:
error_sector <<= 1;
if (block_sectors < 2)
block_sectors = 2;
- } else if (sector_size == 2048) {
+ break;
+ case 2048:
error_sector <<= 2;
if (block_sectors < 4)
block_sectors = 4;
- } else if (sector_size == 256)
+ break;
+ case 256:
error_sector >>= 1;
- error_sector -= sd[SD_PARTITION(SCpnt->request.rq_dev)].start_sect;
+ break;
+ default:
+ break;
+ }
+ error_sector -= sd[MINOR(SCpnt->request.rq_dev)].start_sect;
error_sector &= ~(block_sectors - 1);
good_sectors = error_sector - SCpnt->request.sector;
if (good_sectors < 0 || good_sectors >= this_count)
good_sectors = 0;
}
/*
- * First case : we assume that the command succeeded. One of two things
- * will happen here. Either we will be finished, or there will be more
- * sectors that we were unable to read last time.
+ * This calls the generic completion function, now that we know
+ * how many actual sectors finished, and how many sectors we need
+ * to say have failed.
*/
-
- if (good_sectors > 0) {
-
- SCSI_LOG_HLCOMPLETE(1, printk("%s : %ld sectors remain.\n", nbuff,
- SCpnt->request.nr_sectors));
- SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg));
-
- if (SCpnt->use_sg) {
- struct scatterlist *sgpnt;
- int i;
- sgpnt = (struct scatterlist *) SCpnt->buffer;
- for (i = 0; i < SCpnt->use_sg; i++) {
-
-#if 0
- SCSI_LOG_HLCOMPLETE(3, printk(":%p %p %d\n", sgpnt[i].alt_address, sgpnt[i].address,
- sgpnt[i].length));
-#endif
-
- if (sgpnt[i].alt_address) {
- if (SCpnt->request.cmd == READ)
- memcpy(sgpnt[i].alt_address, sgpnt[i].address,
- sgpnt[i].length);
- scsi_free(sgpnt[i].address, sgpnt[i].length);
- }
- }
-
- /* Free list of scatter-gather pointers */
- scsi_free(SCpnt->buffer, SCpnt->sglist_len);
- } else {
- if (SCpnt->buffer != SCpnt->request.buffer) {
- SCSI_LOG_HLCOMPLETE(3, printk("nosg: %p %p %d\n",
- SCpnt->request.buffer, SCpnt->buffer,
- SCpnt->bufflen));
-
- if (SCpnt->request.cmd == READ)
- memcpy(SCpnt->request.buffer, SCpnt->buffer,
- SCpnt->bufflen);
- scsi_free(SCpnt->buffer, SCpnt->bufflen);
- }
- }
- /*
- * If multiple sectors are requested in one buffer, then
- * they will have been finished off by the first command.
- * If not, then we have a multi-buffer command.
- */
- if (SCpnt->request.nr_sectors > this_count) {
- SCpnt->request.errors = 0;
-
- if (!SCpnt->request.bh) {
- SCSI_LOG_HLCOMPLETE(2, printk("%s : handling page request, no buffer\n",
- nbuff));
-
- /*
- * The SCpnt->request.nr_sectors field is always done in
- * 512 byte sectors, even if this really isn't the case.
- */
- panic("sd.c: linked page request (%lx %x)",
- SCpnt->request.sector, this_count);
- }
- }
- SCpnt = end_scsi_request(SCpnt, 1, good_sectors);
- if (result == 0) {
- requeue_sd_request(SCpnt);
- return;
- }
- }
- if (good_sectors == 0) {
-
- /* Free up any indirection buffers we allocated for DMA purposes. */
- if (SCpnt->use_sg) {
- struct scatterlist *sgpnt;
- int i;
- sgpnt = (struct scatterlist *) SCpnt->buffer;
- for (i = 0; i < SCpnt->use_sg; i++) {
- SCSI_LOG_HLCOMPLETE(3, printk("err: %p %p %d\n",
- SCpnt->request.buffer, SCpnt->buffer,
- SCpnt->bufflen));
- if (sgpnt[i].alt_address) {
- scsi_free(sgpnt[i].address, sgpnt[i].length);
- }
- }
- scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
- } else {
- SCSI_LOG_HLCOMPLETE(2, printk("nosgerr: %p %p %d\n",
- SCpnt->request.buffer, SCpnt->buffer,
- SCpnt->bufflen));
- if (SCpnt->buffer != SCpnt->request.buffer)
- scsi_free(SCpnt->buffer, SCpnt->bufflen);
- }
- }
- /*
- * Now, if we were good little boys and girls, Santa left us a request
- * sense buffer. We can extract information from this, so we
- * can choose a block to remap, etc.
- */
-
- if (driver_byte(result) != 0) {
- if (suggestion(result) == SUGGEST_REMAP) {
-#ifdef REMAP
- /*
- * Not yet implemented. A read will fail after being remapped,
- * a write will call the strategy routine again.
- */
- if rscsi_disks
- [DEVICE_NR(SCpnt->request.rq_dev)].remap
- {
- result = 0;
- }
-#endif
- }
- if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
- if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
- if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
- /* detected disc change. set a bit and quietly refuse
- * further access.
- */
- rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
- SCpnt = end_scsi_request(SCpnt, 0, this_count);
- requeue_sd_request(SCpnt);
- return;
- } else {
- /*
- * Must have been a power glitch, or a bus reset.
- * Could not have been a media change, so we just retry
- * the request and see what happens.
- */
- requeue_sd_request(SCpnt);
- return;
- }
- }
- }
- /* If we had an ILLEGAL REQUEST returned, then we may have
- * performed an unsupported command. The only thing this should be
- * would be a ten byte read where only a six byte read was supported.
- * Also, on a system where READ CAPACITY failed, we have have read
- * past the end of the disk.
- */
-
- if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
- if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
- rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
- requeue_sd_request(SCpnt);
- result = 0;
- } else {
- /* ???? */
- }
- }
- if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {
- printk("scsi%d: MEDIUM ERROR on channel %d, id %d, lun %d, CDB: ",
- SCpnt->host->host_no, (int) SCpnt->channel,
- (int) SCpnt->target, (int) SCpnt->lun);
- print_command(SCpnt->cmnd);
- print_sense("sd", SCpnt);
- SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
- requeue_sd_request(SCpnt);
- return;
- }
- } /* driver byte != 0 */
- if (result) {
- printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
- rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
- rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
- rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
- rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
-
- if (driver_byte(result) & DRIVER_SENSE)
- print_sense("sd", SCpnt);
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
- requeue_sd_request(SCpnt);
- return;
- }
+ scsi_io_completion(SCpnt, good_sectors, block_sectors);
}
/*
* requeue_sd_request() is the request handler function for the sd driver.
* them to SCSI commands.
*/
-static void do_sd_request(void)
-{
- Scsi_Cmnd *SCpnt = NULL;
- Scsi_Device *SDev;
- struct request *req = NULL;
- int flag = 0;
-
- while (1 == 1) {
- if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
- return;
- }
- INIT_SCSI_REQUEST;
- SDev = rscsi_disks[CURRENT_DEV].device;
-
- /*
- * If the host for this device is in error recovery mode, don't
- * do anything at all here. When the host leaves error recovery
- * mode, it will automatically restart things and start queueing
- * commands again.
- */
- if (SDev->host->in_recovery) {
- return;
- }
- /*
- * I am not sure where the best place to do this is. We need
- * to hook in a place where we are likely to come if in user
- * space.
- */
- if (SDev->was_reset) {
- /*
- * We need to relock the door, but we might
- * be in an interrupt handler. Only do this
- * from user space, since we do not want to
- * sleep from an interrupt. FIXME(eric) - do this
- * from the kernel error handling thred.
- */
- if (SDev->removable && !in_interrupt()) {
- spin_unlock_irq(&io_request_lock); /* FIXME!!!! */
- scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
- /* scsi_ioctl may allow CURRENT to change, so start over. */
- SDev->was_reset = 0;
- spin_lock_irq(&io_request_lock); /* FIXME!!!! */
- continue;
- }
- SDev->was_reset = 0;
- }
- /* We have to be careful here. scsi_allocate_device will get a free pointer,
- * but there is no guarantee that it is queueable. In normal usage,
- * we want to call this, because other types of devices may have the
- * host all tied up, and we want to make sure that we have at least
- * one request pending for this type of device. We can also come
- * through here while servicing an interrupt, because of the need to
- * start another command. If we call scsi_allocate_device more than once,
- * then the system can wedge if the command is not queueable. The
- * scsi_request_queueable function is safe because it checks to make sure
- * that the host is able to take another command before it returns
- * a pointer.
- */
-
- if (flag++ == 0)
- SCpnt = scsi_allocate_device(&CURRENT,
- rscsi_disks[CURRENT_DEV].device, 0);
- else
- SCpnt = NULL;
-
- /*
- * The following restore_flags leads to latency problems. FIXME.
- * Using a "sti()" gets rid of the latency problems but causes
- * race conditions and crashes.
- */
-
- /* This is a performance enhancement. We dig down into the request
- * list and try to find a queueable request (i.e. device not busy,
- * and host able to accept another command. If we find one, then we
- * queue it. This can make a big difference on systems with more than
- * one disk drive. We want to have the interrupts off when monkeying
- * with the request list, because otherwise the kernel might try to
- * slip in a request in between somewhere.
- *
- * FIXME(eric) - this doesn't belong at this level. The device code in
- * ll_rw_blk.c should know how to dig down into the device queue to
- * figure out what it can deal with, and what it can't. Consider
- * possibility of pulling entire queue down into scsi layer.
- */
- if (!SCpnt && sd_template.nr_dev > 1) {
- struct request *req1;
- req1 = NULL;
- req = CURRENT;
- while (req) {
- SCpnt = scsi_request_queueable(req,
- rscsi_disks[DEVICE_NR(req->rq_dev)].device);
- if (SCpnt)
- break;
- req1 = req;
- req = req->next;
- }
- if (SCpnt && req->rq_status == RQ_INACTIVE) {
- if (req == CURRENT)
- CURRENT = CURRENT->next;
- else
- req1->next = req->next;
- }
- }
- if (!SCpnt)
- return; /* Could not find anything to do */
-
- /* Queue command */
- requeue_sd_request(SCpnt);
- } /* While */
-}
-
-static void requeue_sd_request(Scsi_Cmnd * SCpnt)
-{
- int dev, devm, block, this_count;
- unsigned char cmd[10];
- char nbuff[6];
- int bounce_size, contiguous;
- int max_sg;
- struct buffer_head *bh, *bhp;
- char *buff, *bounce_buffer;
-
-repeat:
-
- if (!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
- do_sd_request();
- return;
- }
- devm = SD_PARTITION(SCpnt->request.rq_dev);
- dev = DEVICE_NR(SCpnt->request.rq_dev);
-
- block = SCpnt->request.sector;
- this_count = 0;
-
- SCSI_LOG_HLQUEUE(1, printk("Doing sd request, dev = %d, block = %d\n", devm, block));
-
- if (devm >= (sd_template.dev_max << 4) ||
- !rscsi_disks[dev].device ||
- !rscsi_disks[dev].device->online ||
- block + SCpnt->request.nr_sectors > sd[devm].nr_sects) {
- SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors));
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
- SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
- goto repeat;
- }
- block += sd[devm].start_sect;
-
- if (rscsi_disks[dev].device->changed) {
- /*
- * quietly refuse to do anything to a changed disc until the changed
- * bit has been reset
- */
- /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
- goto repeat;
- }
- sd_devname(devm >> 4, nbuff);
- SCSI_LOG_HLQUEUE(2, printk("%s : real dev = /dev/%d, block = %d\n",
- nbuff, dev, block));
-
- /*
- * If we have a 1K hardware sectorsize, prevent access to single
- * 512 byte sectors. In theory we could handle this - in fact
- * the scsi cdrom driver must be able to handle this because
- * we typically use 1K blocksizes, and cdroms typically have
- * 2K hardware sectorsizes. Of course, things are simpler
- * with the cdrom, since it is read-only. For performance
- * reasons, the filesystems should be able to handle this
- * and not force the scsi disk driver to use bounce buffers
- * for this.
- */
- if (rscsi_disks[dev].sector_size == 1024)
- if ((block & 1) || (SCpnt->request.nr_sectors & 1)) {
- printk("sd.c:Bad block number/count requested");
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
- goto repeat;
- }
- if (rscsi_disks[dev].sector_size == 2048)
- if ((block & 3) || (SCpnt->request.nr_sectors & 3)) {
- printk("sd.c:Bad block number/count requested");
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
- goto repeat;
- }
- if (rscsi_disks[dev].sector_size == 4096)
- if ((block & 7) || (SCpnt->request.nr_sectors & 7)) {
- printk("sd.cBad block number/count requested");
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
- goto repeat;
- }
- switch (SCpnt->request.cmd) {
- case WRITE:
- if (!rscsi_disks[dev].device->writeable) {
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
- goto repeat;
- }
- cmd[0] = WRITE_6;
- break;
- case READ:
- cmd[0] = READ_6;
- break;
- default:
- panic("Unknown sd command %d\n", SCpnt->request.cmd);
- }
-
- SCpnt->this_count = 0;
-
- /* If the host adapter can deal with very large scatter-gather
- * requests, it is a waste of time to cluster
- */
- contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 : 1);
- bounce_buffer = NULL;
- bounce_size = (SCpnt->request.nr_sectors << 9);
-
- /* First see if we need a bounce buffer for this request. If we do, make
- * sure that we can allocate a buffer. Do not waste space by allocating
- * a bounce buffer if we are straddling the 16Mb line
- */
- if (contiguous && SCpnt->request.bh &&
- virt_to_phys(SCpnt->request.bh->b_data)
- + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
- && SCpnt->host->unchecked_isa_dma) {
- if (virt_to_phys(SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
- bounce_buffer = (char *) scsi_malloc(bounce_size);
- if (!bounce_buffer)
- contiguous = 0;
- }
- if (contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
- for (bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
- bhp = bhp->b_reqnext) {
- if (!CONTIGUOUS_BUFFERS(bh, bhp)) {
- if (bounce_buffer)
- scsi_free(bounce_buffer, bounce_size);
- contiguous = 0;
- break;
- }
- }
- if (!SCpnt->request.bh || contiguous) {
-
- /* case of page request (i.e. raw device), or unlinked buffer */
- this_count = SCpnt->request.nr_sectors;
- buff = SCpnt->request.buffer;
- SCpnt->use_sg = 0;
-
- } else if (SCpnt->host->sg_tablesize == 0 ||
- (scsi_need_isa_buffer && scsi_dma_free_sectors <= 10)) {
-
- /* Case of host adapter that cannot scatter-gather. We also
- * come here if we are running low on DMA buffer memory. We set
- * a threshold higher than that we would need for this request so
- * we leave room for other requests. Even though we would not need
- * it all, we need to be conservative, because if we run low enough
- * we have no choice but to panic.
- */
- if (SCpnt->host->sg_tablesize != 0 &&
- scsi_need_isa_buffer &&
- scsi_dma_free_sectors <= 10)
- printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
-
- this_count = SCpnt->request.current_nr_sectors;
- buff = SCpnt->request.buffer;
- SCpnt->use_sg = 0;
-
- } else {
-
- /* Scatter-gather capable host adapter */
- struct scatterlist *sgpnt;
- int count, this_count_max;
- int counted;
-
- bh = SCpnt->request.bh;
- this_count = 0;
- this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
- count = 0;
- bhp = NULL;
- while (bh) {
- if ((this_count + (bh->b_size >> 9)) > this_count_max)
- break;
- if (!bhp || !CONTIGUOUS_BUFFERS(bhp, bh) ||
- !CLUSTERABLE_DEVICE(SCpnt) ||
- (SCpnt->host->unchecked_isa_dma &&
- virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD)) {
- if (count < SCpnt->host->sg_tablesize)
- count++;
- else
- break;
- }
- this_count += (bh->b_size >> 9);
- bhp = bh;
- bh = bh->b_reqnext;
- }
-#if 0
- if (SCpnt->host->unchecked_isa_dma &&
- virt_to_phys(SCpnt->request.bh->b_data - 1) == ISA_DMA_THRESHOLD)
- count--;
-#endif
- SCpnt->use_sg = count; /* Number of chains */
- /* scsi_malloc can only allocate in chunks of 512 bytes */
- count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511;
-
- SCpnt->sglist_len = count;
- max_sg = count / sizeof(struct scatterlist);
- if (SCpnt->host->sg_tablesize < max_sg)
- max_sg = SCpnt->host->sg_tablesize;
- sgpnt = (struct scatterlist *) scsi_malloc(count);
- if (!sgpnt) {
- printk("Warning - running *really* short on DMA buffers\n");
- SCpnt->use_sg = 0; /* No memory left - bail out */
- this_count = SCpnt->request.current_nr_sectors;
- buff = SCpnt->request.buffer;
- } else {
- memset(sgpnt, 0, count); /* Zero so it is easy to fill, but only
- * if memory is available
- */
- buff = (char *) sgpnt;
- counted = 0;
- for (count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
- count < SCpnt->use_sg && bh;
- count++, bh = bhp) {
-
- bhp = bh->b_reqnext;
-
- if (!sgpnt[count].address)
- sgpnt[count].address = bh->b_data;
- sgpnt[count].length += bh->b_size;
- counted += bh->b_size >> 9;
-
- if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length - 1 >
- ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
- !sgpnt[count].alt_address) {
- sgpnt[count].alt_address = sgpnt[count].address;
- /* We try to avoid exhausting the DMA pool, since it is
- * easier to control usage here. In other places we might
- * have a more pressing need, and we would be screwed if
- * we ran out */
- if (scsi_dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
- sgpnt[count].address = NULL;
- } else {
- sgpnt[count].address =
- (char *) scsi_malloc(sgpnt[count].length);
- }
- /* If we start running low on DMA buffers, we abort the
- * scatter-gather operation, and free all of the memory
- * we have allocated. We want to ensure that all scsi
- * operations are able to do at least a non-scatter/gather
- * operation */
- if (sgpnt[count].address == NULL) { /* Out of dma memory */
-#if 0
- printk("Warning: Running low on SCSI DMA buffers");
- /* Try switching back to a non s-g operation. */
- while (--count >= 0) {
- if (sgpnt[count].alt_address)
- scsi_free(sgpnt[count].address,
- sgpnt[count].length);
- }
- this_count = SCpnt->request.current_nr_sectors;
- buff = SCpnt->request.buffer;
- SCpnt->use_sg = 0;
- scsi_free(sgpnt, SCpnt->sglist_len);
-#endif
- SCpnt->use_sg = count;
- this_count = counted -= bh->b_size >> 9;
- break;
- }
- }
- /* Only cluster buffers if we know that we can supply DMA
- * buffers large enough to satisfy the request. Do not cluster
- * a new request if this would mean that we suddenly need to
- * start using DMA bounce buffers */
- if (bhp && CONTIGUOUS_BUFFERS(bh, bhp)
- && CLUSTERABLE_DEVICE(SCpnt)) {
- char *tmp;
-
- if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length +
- bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
- (SCpnt->host->unchecked_isa_dma) &&
- !sgpnt[count].alt_address)
- continue;
-
- if (!sgpnt[count].alt_address) {
- count--;
- continue;
- }
- if (scsi_dma_free_sectors > 10)
- tmp = (char *) scsi_malloc(sgpnt[count].length
- + bhp->b_size);
- else {
- tmp = NULL;
- max_sg = SCpnt->use_sg;
- }
- if (tmp) {
- scsi_free(sgpnt[count].address, sgpnt[count].length);
- sgpnt[count].address = tmp;
- count--;
- continue;
- }
- /* If we are allowed another sg chain, then increment
- * counter so we can insert it. Otherwise we will end
- up truncating */
-
- if (SCpnt->use_sg < max_sg)
- SCpnt->use_sg++;
- } /* contiguous buffers */
- } /* for loop */
-
- /* This is actually how many we are going to transfer */
- this_count = counted;
-
- if (count < SCpnt->use_sg || SCpnt->use_sg
- > SCpnt->host->sg_tablesize) {
- bh = SCpnt->request.bh;
- printk("Use sg, count %d %x %d\n",
- SCpnt->use_sg, count, scsi_dma_free_sectors);
- printk("maxsg = %x, counted = %d this_count = %d\n",
- max_sg, counted, this_count);
- while (bh) {
- printk("[%p %x] ", bh->b_data, bh->b_size);
- bh = bh->b_reqnext;
- }
- if (SCpnt->use_sg < 16)
- for (count = 0; count < SCpnt->use_sg; count++)
- printk("{%d:%p %p %d} ", count,
- sgpnt[count].address,
- sgpnt[count].alt_address,
- sgpnt[count].length);
- panic("Ooops");
- }
- if (SCpnt->request.cmd == WRITE)
- for (count = 0; count < SCpnt->use_sg; count++)
- if (sgpnt[count].alt_address)
- memcpy(sgpnt[count].address, sgpnt[count].alt_address,
- sgpnt[count].length);
- } /* Able to malloc sgpnt */
- } /* Host adapter capable of scatter-gather */
-
- /* Now handle the possibility of DMA to addresses > 16Mb */
-
- if (SCpnt->use_sg == 0) {
- if (virt_to_phys(buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
- (SCpnt->host->unchecked_isa_dma)) {
- if (bounce_buffer)
- buff = bounce_buffer;
- else
- buff = (char *) scsi_malloc(this_count << 9);
- if (buff == NULL) { /* Try backing off a bit if we are low on mem */
- this_count = SCpnt->request.current_nr_sectors;
- buff = (char *) scsi_malloc(this_count << 9);
- if (!buff)
- panic("Ran out of DMA buffers.");
- }
- if (SCpnt->request.cmd == WRITE)
- memcpy(buff, (char *) SCpnt->request.buffer, this_count << 9);
- }
- }
- SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
- nbuff,
- (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
- this_count, SCpnt->request.nr_sectors));
-
- cmd[1] = (SCpnt->lun << 5) & 0xe0;
-
- if (rscsi_disks[dev].sector_size == 4096) {
- if (block & 7)
- panic("sd.c:Bad block number requested");
- if (this_count & 7)
- panic("sd.c:Bad block number requested");
- block = block >> 3;
- this_count = block >> 3;
- }
- if (rscsi_disks[dev].sector_size == 2048) {
- if (block & 3)
- panic("sd.c:Bad block number requested");
- if (this_count & 3)
- panic("sd.c:Bad block number requested");
- block = block >> 2;
- this_count = this_count >> 2;
- }
- if (rscsi_disks[dev].sector_size == 1024) {
- if (block & 1)
- panic("sd.c:Bad block number requested");
- if (this_count & 1)
- panic("sd.c:Bad block number requested");
- block = block >> 1;
- this_count = this_count >> 1;
- }
- if (rscsi_disks[dev].sector_size == 256) {
- block = block << 1;
- this_count = this_count << 1;
- }
- if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten) {
- if (this_count > 0xffff)
- this_count = 0xffff;
-
- cmd[0] += READ_10 - READ_6;
- cmd[2] = (unsigned char) (block >> 24) & 0xff;
- cmd[3] = (unsigned char) (block >> 16) & 0xff;
- cmd[4] = (unsigned char) (block >> 8) & 0xff;
- cmd[5] = (unsigned char) block & 0xff;
- cmd[6] = cmd[9] = 0;
- cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
- cmd[8] = (unsigned char) this_count & 0xff;
- } else {
- if (this_count > 0xff)
- this_count = 0xff;
-
- cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
- cmd[2] = (unsigned char) ((block >> 8) & 0xff);
- cmd[3] = (unsigned char) block & 0xff;
- cmd[4] = (unsigned char) this_count;
- cmd[5] = 0;
- }
-
- /*
- * We shouldn't disconnect in the middle of a sector, so with a dumb
- * host adapter, it's safe to assume that we can at least transfer
- * this many bytes between each connect / disconnect.
- */
-
- SCpnt->transfersize = rscsi_disks[dev].sector_size;
- SCpnt->underflow = this_count << 9;
- SCpnt->cmd_len = 0;
- scsi_do_cmd(SCpnt, (void *) cmd, buff,
- this_count * rscsi_disks[dev].sector_size,
- rw_intr,
- (SCpnt->device->type == TYPE_DISK ?
- SD_TIMEOUT : SD_MOD_TIMEOUT),
- MAX_RETRIES);
-}
static int check_scsidisk_media_change(kdev_t full_dev)
{
return retval;
}
-static void sd_wait_cmd (Scsi_Cmnd * SCpnt, const void *cmnd ,
- void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *),
- int timeout, int retries)
+static void sd_wait_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
+ void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
+ int timeout, int retries)
{
DECLARE_MUTEX_LOCKED(sem);
-
+
SCpnt->request.sem = &sem;
SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scsi_do_cmd (SCpnt, (void *) cmnd,
- buffer, bufflen, done, timeout, retries);
- spin_unlock_irq(&io_request_lock);
- down (&sem);
- spin_lock_irq(&io_request_lock);
+ scsi_do_cmd(SCpnt, (void *) cmnd,
+ buffer, bufflen, done, timeout, retries);
+ down(&sem);
SCpnt->request.sem = NULL;
}
unsigned char *buffer;
unsigned long spintime_value = 0;
int the_result, retries, spintime;
+ int sector_size;
Scsi_Cmnd *SCpnt;
/*
if (rscsi_disks[i].device->online == FALSE) {
return i;
}
- spin_lock_irq(&io_request_lock);
-
/* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is
* considered a fatal error, and many devices report such an error
* just after a scsi bus reset.
*/
- SCpnt = scsi_allocate_device(NULL, rscsi_disks[i].device, 1);
+ SCpnt = scsi_allocate_device(rscsi_disks[i].device, 1);
+
buffer = (unsigned char *) scsi_malloc(512);
spintime = 0;
/* Spinup needs to be done for module loads too. */
do {
retries = 0;
-
+
while (retries < 3) {
cmd[0] = TEST_UNIT_READY;
cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
/* Look for non-removable devices that return NOT_READY.
* Issue command to spin up drive for these cases. */
if (the_result && !rscsi_disks[i].device->removable &&
- SCpnt->sense_buffer[2] == NOT_READY)
- {
+ SCpnt->sense_buffer[2] == NOT_READY) {
unsigned long time1;
- if (!spintime)
- {
+ if (!spintime) {
printk("%s: Spinning up disk...", nbuff);
cmd[0] = START_STOP;
cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
SCpnt->sense_buffer[2] = 0;
sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
- 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
+ 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
}
-
spintime = 1;
spintime_value = jiffies;
time1 = jiffies + HZ;
- spin_unlock_irq(&io_request_lock);
- while(time_before(jiffies, time1)); /* Wait 1 second for next try */
+ while (time_before(jiffies, time1)); /* Wait 1 second for next try */
printk(".");
- spin_lock_irq(&io_request_lock);
}
- } while(the_result && spintime && time_after(spintime_value+100*HZ, jiffies));
-
+ } while (the_result && spintime && time_after(spintime_value + 100 * HZ, jiffies));
if (spintime) {
if (the_result)
printk("not responding...\n");
SCpnt->sense_buffer[2] = 0;
sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
- 8, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
+ 8, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
the_result = SCpnt->result;
retries--;
printk("%s : block size assumed to be 512 bytes, disk size 1GB. \n",
nbuff);
rscsi_disks[i].capacity = 0x1fffff;
- rscsi_disks[i].sector_size = 512;
+ sector_size = 512;
/* Set dirty bit for removable devices if not ready - sometimes drives
* will not report this properly. */
(buffer[2] << 8) |
buffer[3]);
- rscsi_disks[i].sector_size = (buffer[4] << 24) |
+ sector_size = (buffer[4] << 24) |
(buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
- if (rscsi_disks[i].sector_size == 0) {
- rscsi_disks[i].sector_size = 512;
+ if (sector_size == 0) {
+ sector_size = 512;
printk("%s : sector size 0 reported, assuming 512.\n", nbuff);
}
- if (rscsi_disks[i].sector_size != 512 &&
- rscsi_disks[i].sector_size != 1024 &&
- rscsi_disks[i].sector_size != 2048 &&
- rscsi_disks[i].sector_size != 4096 &&
- rscsi_disks[i].sector_size != 256) {
+ if (sector_size != 512 &&
+ sector_size != 1024 &&
+ sector_size != 2048 &&
+ sector_size != 4096 &&
+ sector_size != 256) {
printk("%s : unsupported sector size %d.\n",
- nbuff, rscsi_disks[i].sector_size);
- if (rscsi_disks[i].device->removable) {
- rscsi_disks[i].capacity = 0;
- } else {
- printk("scsi : deleting disk entry.\n");
- sd_detach(rscsi_disks[i].device);
- rscsi_disks[i].device = NULL;
-
- /* Wake up a process waiting for device */
- wake_up(&SCpnt->device->device_wait);
- scsi_release_command(SCpnt);
- SCpnt = NULL;
- scsi_free(buffer, 512);
- spin_unlock_irq(&io_request_lock);
-
- return i;
- }
+ nbuff, sector_size);
+ /*
+ * The user might want to re-format the drive with
+ * a supported sectorsize. Once this happens, it
+ * would be relatively trivial to set the thing up.
+ * For this reason, we leave the thing in the table.
+ */
+ rscsi_disks[i].capacity = 0;
}
- if (rscsi_disks[i].sector_size == 2048) {
+ if (sector_size == 2048) {
int m;
/*
*/
int m, mb;
int sz_quot, sz_rem;
- int hard_sector = rscsi_disks[i].sector_size;
+ int hard_sector = sector_size;
/* There are 16 minors allocated for each major device */
for (m = i << 4; m < ((i + 1) << 4); m++) {
sd_hardsizes[m] = hard_sector;
nbuff, hard_sector, rscsi_disks[i].capacity,
mb, sz_quot, sz_rem);
}
- if (rscsi_disks[i].sector_size == 4096)
+ if (sector_size == 4096)
rscsi_disks[i].capacity <<= 3;
- if (rscsi_disks[i].sector_size == 2048)
+ if (sector_size == 2048)
rscsi_disks[i].capacity <<= 2; /* Change into 512 byte sectors */
- if (rscsi_disks[i].sector_size == 1024)
+ if (sector_size == 1024)
rscsi_disks[i].capacity <<= 1; /* Change into 512 byte sectors */
- if (rscsi_disks[i].sector_size == 256)
+ if (sector_size == 256)
rscsi_disks[i].capacity >>= 1; /* Change into 512 byte sectors */
}
/* same code as READCAPA !! */
sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
- 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
+ 512, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
the_result = SCpnt->result;
}
} /* check for write protect */
+ SCpnt->device->ten = 1;
+ SCpnt->device->remap = 1;
+ SCpnt->device->sector_size = sector_size;
/* Wake up a process waiting for device */
wake_up(&SCpnt->device->device_wait);
scsi_release_command(SCpnt);
SCpnt = NULL;
- rscsi_disks[i].ten = 1;
- rscsi_disks[i].remap = 1;
scsi_free(buffer, 512);
- spin_unlock_irq(&io_request_lock);
return i;
}
return 0;
}
-/*
- * sd_get_queue() returns the queue which corresponds to a given device.
- */
-static struct request **sd_get_queue(kdev_t dev)
-{
- return &blk_dev[MAJOR_NR].current_request;
-}
+
static void sd_finish()
{
struct gendisk *gendisk;
int i;
for (i = 0; i <= (sd_template.dev_max - 1) / SCSI_DISKS_PER_MAJOR; i++) {
- /* FIXME: After 2.2 we should implement multiple sd queues */
- blk_dev[SD_MAJOR(i)].request_fn = DEVICE_REQUEST;
- if (i)
- blk_dev[SD_MAJOR(i)].queue = sd_get_queue;
+ blk_dev[SD_MAJOR(i)].queue = sd_find_queue;
}
for (gendisk = gendisk_head; gendisk != NULL; gendisk = gendisk->next)
if (gendisk == sd_gendisks)
if (i >= sd_template.dev_max)
panic("scsi_devices corrupt (sd)");
- SDp->scsi_request_fn = do_sd_request;
rscsi_disks[i].device = SDp;
rscsi_disks[i].has_part_table = 0;
sd_template.nr_dev++;
* to make sure that everything remains consistent.
*/
sd_blocksizes[index] = 1024;
- if (rscsi_disks[target].sector_size == 2048)
+ if (rscsi_disks[target].device->sector_size == 2048)
sd_blocksizes[index] = 2048;
else
sd_blocksizes[index] = 1024;
}
for (i = 0; i <= (sd_template.dev_max - 1) / SCSI_DISKS_PER_MAJOR; i++) {
- blk_dev[SD_MAJOR(i)].request_fn = NULL;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(SD_MAJOR(i)));
blk_size[SD_MAJOR(i)] = NULL;
hardsect_size[SD_MAJOR(i)] = NULL;
read_ahead[SD_MAJOR(i)] = 0;
*
* <drew@colorado.edu>
*
- * Modified by Eric Youngdale eric@aib.com to
+ * Modified by Eric Youngdale eric@andante.org to
* add scatter-gather, multiple outstanding request, and other
* enhancements.
*/
typedef struct scsi_disk {
unsigned capacity; /* size in blocks */
- unsigned sector_size; /* size in bytes */
Scsi_Device *device;
unsigned char ready; /* flag ready for FLOPTICAL */
unsigned char write_prot; /* flag write_protect for rmvable dev */
unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */
unsigned char sector_bit_shift; /* power of 2 sectors per FS block */
- unsigned ten:1; /* support ten byte read / write */
- unsigned remap:1; /* support remapping */
unsigned has_part_table:1; /* has partition table */
} Scsi_Disk;
static ssize_t sg_write(struct file * filp, const char * buf,
size_t count, loff_t *ppos)
{
- unsigned long flags;
int mxsize, cmd_size, k;
unsigned char cmnd[MAX_COMMAND_SIZE];
int input_size;
return k; /* probably out of space --> ENOMEM */
}
/* SCSI_LOG_TIMEOUT(7, printk("sg_write: allocating device\n")); */
- if (! (SCpnt = scsi_allocate_device(NULL, sdp->device,
- !(filp->f_flags & O_NONBLOCK)))) {
+ if (! (SCpnt = scsi_allocate_device(sdp->device,
+ !(filp->f_flags & O_NONBLOCK))))
+ {
sg_finish_rem_req(srp, NULL, 0);
return -EAGAIN; /* No available command blocks at the moment */
}
cmnd[1]= (cmnd[1] & 0x1f) | (sdp->device->lun << 5);
/* SCSI_LOG_TIMEOUT(7, printk("sg_write: do cmd\n")); */
- spin_lock_irqsave(&io_request_lock, flags);
SCpnt->use_sg = srp->data.use_sg;
SCpnt->sglist_len = srp->data.sglist_len;
SCpnt->bufflen = srp->data.bufflen;
(void *)SCpnt->buffer, mxsize,
sg_command_done, sfp->timeout, SG_DEFAULT_RETRIES);
/* 'mxsize' overwrites SCpnt->bufflen, hence need for b_malloc_len */
- spin_unlock_irqrestore(&io_request_lock, flags);
/* SCSI_LOG_TIMEOUT(6, printk("sg_write: sent scsi cmd to mid-level\n")); */
return count;
}
scsi_add_timer(scpnt, scpnt->timeout_per_command,
scsi_old_times_out);
#else
+ spin_unlock_irq(&io_request_lock);
scsi_sleep(HZ); /* just sleep 1 second and hope ... */
+ spin_lock_irq(&io_request_lock);
#endif
}
/*
* sr.c Copyright (C) 1992 David Giller
- * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
*
* adapted from:
* sd.c Copyright (C) 1992 Drew Eckhardt
* Linux scsi disk driver by
* Drew Eckhardt <drew@colorado.edu>
*
- * Modified by Eric Youngdale ericy@cais.com to
+ * Modified by Eric Youngdale ericy@andante.org to
* add scatter-gather, multiple outstanding request, and other
* enhancements.
*
- * Modified by Eric Youngdale eric@aib.com to support loadable
+ * Modified by Eric Youngdale eric@andante.org to support loadable
* low-level scsi drivers.
*
* Modified by Thomas Quinot thomas@melchior.cuivre.fdn.fr to
static int sr_detect(Scsi_Device *);
static void sr_detach(Scsi_Device *);
-struct Scsi_Device_Template sr_template = {
- NULL, "cdrom", "sr", NULL, TYPE_ROM,
- SCSI_CDROM_MAJOR, 0, 0, 0, 1,
- sr_detect, sr_init,
- sr_finish, sr_attach, sr_detach
+static int sr_init_command(Scsi_Cmnd *);
+
+struct Scsi_Device_Template sr_template =
+{
+ name:"cdrom",
+ tag:"sr",
+ scsi_type:TYPE_ROM,
+ major:SCSI_CDROM_MAJOR,
+ blk:1,
+ detect:sr_detect,
+ init:sr_init,
+ finish:sr_finish,
+ attach:sr_attach,
+ detach:sr_detach,
+ init_command:sr_init_command
};
Scsi_CD *scsi_CDs = NULL;
static int *sr_sizes = NULL;
static int *sr_blocksizes = NULL;
+static int *sr_hardsizes = NULL;
static int sr_open(struct cdrom_device_info *, int);
void get_sectorsize(int);
static void sr_release(struct cdrom_device_info *cdi)
{
- if (scsi_CDs[MINOR(cdi->dev)].sector_size > 2048)
+ if (scsi_CDs[MINOR(cdi->dev)].device->sector_size > 2048)
sr_set_blocklength(MINOR(cdi->dev), 2048);
sync_dev(cdi->dev);
scsi_CDs[MINOR(cdi->dev)].device->access_count--;
sr_audio_ioctl, /* audio ioctl */
sr_dev_ioctl, /* device-specific ioctl */
CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED |
- CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED |
+ CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED |
CDC_PLAY_AUDIO | CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS |
CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM |
CDC_GENERIC_PACKET,
*/
scsi_CDs[MINOR(cdi->dev)].needs_sector_size = 1;
- scsi_CDs[MINOR(cdi->dev)].sector_size = 2048;
+ scsi_CDs[MINOR(cdi->dev)].device->sector_size = 2048;
}
return retval;
}
static void rw_intr(Scsi_Cmnd * SCpnt)
{
int result = SCpnt->result;
- int this_count = SCpnt->this_count;
+ int this_count = SCpnt->bufflen >> 9;
int good_sectors = (result == 0 ? this_count : 0);
int block_sectors = 0;
avoid unnecessary additional work such as memcpy's that could be avoided.
*/
+
if (driver_byte(result) != 0 && /* An error occurred */
SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */
(SCpnt->sense_buffer[2] == MEDIUM_ERROR ||
block_sectors = SCpnt->request.bh->b_size >> 9;
if (block_sectors < 4)
block_sectors = 4;
- if (scsi_CDs[device_nr].sector_size == 2048)
+ if (scsi_CDs[device_nr].device->sector_size == 2048)
error_sector <<= 2;
error_sector &= ~(block_sectors - 1);
good_sectors = error_sector - SCpnt->request.sector;
if (good_sectors < 0 || good_sectors >= this_count)
good_sectors = 0;
/*
- The SCSI specification allows for the value returned by READ
- CAPACITY to be up to 75 2K sectors past the last readable
- block. Therefore, if we hit a medium error within the last
- 75 2K sectors, we decrease the saved size value.
+ * The SCSI specification allows for the value returned by READ
+ * CAPACITY to be up to 75 2K sectors past the last readable
+ * block. Therefore, if we hit a medium error within the last
+ * 75 2K sectors, we decrease the saved size value.
*/
if ((error_sector >> 1) < sr_sizes[device_nr] &&
scsi_CDs[device_nr].capacity - error_sector < 4 * 75)
sr_sizes[device_nr] = error_sector >> 1;
}
- if (good_sectors > 0) { /* Some sectors were read successfully. */
- if (SCpnt->use_sg == 0) {
- if (SCpnt->buffer != SCpnt->request.buffer) {
- int offset;
- offset = (SCpnt->request.sector % 4) << 9;
- memcpy((char *) SCpnt->request.buffer,
- (char *) SCpnt->buffer + offset,
- good_sectors << 9);
- /* Even though we are not using scatter-gather, we look
- * ahead and see if there is a linked request for the
- * other half of this buffer. If there is, then satisfy
- * it. */
- if ((offset == 0) && good_sectors == 2 &&
- SCpnt->request.nr_sectors > good_sectors &&
- SCpnt->request.bh &&
- SCpnt->request.bh->b_reqnext &&
- SCpnt->request.bh->b_reqnext->b_size == 1024) {
- memcpy((char *) SCpnt->request.bh->b_reqnext->b_data,
- (char *) SCpnt->buffer + 1024,
- 1024);
- good_sectors += 2;
- };
-
- scsi_free(SCpnt->buffer, 2048);
- }
- } else {
- struct scatterlist *sgpnt;
- int i;
- sgpnt = (struct scatterlist *) SCpnt->buffer;
- for (i = 0; i < SCpnt->use_sg; i++) {
- if (sgpnt[i].alt_address) {
- if (sgpnt[i].alt_address != sgpnt[i].address) {
- memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
- };
- scsi_free(sgpnt[i].address, sgpnt[i].length);
- };
- };
- scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
- if (SCpnt->request.sector % 4)
- good_sectors -= 2;
- /* See if there is a padding record at the end that needs to be removed */
- if (good_sectors > SCpnt->request.nr_sectors)
- good_sectors -= 2;
- };
+ /*
+ * This calls the generic completion function, now that we know
+ * how many actual sectors finished, and how many sectors we need
+ * to say have failed.
+ */
+ scsi_io_completion(SCpnt, good_sectors, block_sectors);
+}
-#ifdef DEBUG
- printk("(%x %x %x) ", SCpnt->request.bh, SCpnt->request.nr_sectors,
- good_sectors);
-#endif
- if (SCpnt->request.nr_sectors > this_count) {
- SCpnt->request.errors = 0;
- if (!SCpnt->request.bh)
- panic("sr.c: linked page request (%lx %x)",
- SCpnt->request.sector, this_count);
- }
- SCpnt = end_scsi_request(SCpnt, 1, good_sectors); /* All done */
- if (result == 0) {
- requeue_sr_request(SCpnt);
- return;
- }
+
+static request_queue_t *sr_find_queue(kdev_t dev)
+{
+ if (MINOR(dev) >= sr_template.dev_max
+ || !scsi_CDs[MINOR(dev)].device)
+ return NULL; /* No such device */
+ return &scsi_CDs[MINOR(dev)].device->request_queue;
+}
+
+static int sr_init_command(Scsi_Cmnd * SCpnt)
+{
+ int dev, devm, block, this_count;
+
+ devm = MINOR(SCpnt->request.rq_dev);
+ dev = DEVICE_NR(SCpnt->request.rq_dev);
+
+ block = SCpnt->request.sector;
+ this_count = SCpnt->request_bufflen >> 9;
+
+ if (!SCpnt->request.bh) {
+ /*
+ * Umm, yeah, right. Swapping to a cdrom. Nice try.
+ */
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ return 0;
}
- if (good_sectors == 0) {
- /* We only come through here if no sectors were read successfully. */
-
- /* Free up any indirection buffers we allocated for DMA purposes. */
- if (SCpnt->use_sg) {
- struct scatterlist *sgpnt;
- int i;
- sgpnt = (struct scatterlist *) SCpnt->buffer;
- for (i = 0; i < SCpnt->use_sg; i++) {
- if (sgpnt[i].alt_address) {
- scsi_free(sgpnt[i].address, sgpnt[i].length);
- }
- }
- scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
+ SCSI_LOG_HLQUEUE(1, printk("Doing sr request, dev = %d, block = %d\n", devm, block));
+
+ if (dev >= sr_template.nr_dev ||
+ !scsi_CDs[dev].device ||
+ !scsi_CDs[dev].device->online) {
+ SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors));
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
+ return 0;
+ }
+ if (scsi_CDs[dev].device->changed) {
+ /*
+ * quietly refuse to do anything to a changed disc until the changed
+ * bit has been reset
+ */
+ /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ return 0;
+ }
+ /*
+ * we do lazy blocksize switching (when reading XA sectors,
+ * see CDROMREADMODE2 ioctl)
+ */
+ if (scsi_CDs[dev].device->sector_size > 2048) {
+ if (!in_interrupt())
+ sr_set_blocklength(DEVICE_NR(CURRENT->rq_dev), 2048);
+ else
+ printk("sr: can't switch blocksize: in interrupt\n");
+ }
+ if (SCpnt->request.cmd == WRITE) {
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ return 0;
+ }
+ if (scsi_CDs[dev].device->sector_size == 1024) {
+ if ((block & 1) || (SCpnt->request.nr_sectors & 1)) {
+ printk("sr.c:Bad 1K block number requested (%d %ld)",
+ block, SCpnt->request.nr_sectors);
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ return 0;
} else {
- if (SCpnt->buffer != SCpnt->request.buffer)
- scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ block = block >> 1;
+ this_count = this_count >> 1;
}
-
}
- if (driver_byte(result) != 0) {
- if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
- if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
- /* detected disc change. set a bit and quietly refuse
- * further access. */
-
- scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
- SCpnt = end_scsi_request(SCpnt, 0, this_count);
- requeue_sr_request(SCpnt);
- return;
- }
- }
- if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
- printk("sr%d: CD-ROM error: ",
- DEVICE_NR(SCpnt->request.rq_dev));
- print_sense("sr", SCpnt);
- printk("command was: ");
- print_command(SCpnt->cmnd);
- if (scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
- scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
- requeue_sr_request(SCpnt);
- result = 0;
- return;
- } else {
- SCpnt = end_scsi_request(SCpnt, 0, this_count);
- requeue_sr_request(SCpnt); /* Do next request */
- return;
- }
-
- }
- if (SCpnt->sense_buffer[2] == NOT_READY) {
- printk(KERN_INFO "sr%d: CD-ROM not ready. Make sure you have a disc in the drive.\n",
- DEVICE_NR(SCpnt->request.rq_dev));
- SCpnt = end_scsi_request(SCpnt, 0, this_count);
- requeue_sr_request(SCpnt); /* Do next request */
- return;
- }
- if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {
- printk("scsi%d: MEDIUM ERROR on "
- "channel %d, id %d, lun %d, CDB: ",
- SCpnt->host->host_no, (int) SCpnt->channel,
- (int) SCpnt->target, (int) SCpnt->lun);
- print_command(SCpnt->cmnd);
- print_sense("sr", SCpnt);
- SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
- requeue_sr_request(SCpnt);
- return;
+ if (scsi_CDs[dev].device->sector_size == 2048) {
+ if ((block & 3) || (SCpnt->request.nr_sectors & 3)) {
+ printk("sr.c:Bad 2K block number requested (%d %ld)",
+ block, SCpnt->request.nr_sectors);
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ return 0;
+ } else {
+ block = block >> 2;
+ this_count = this_count >> 2;
}
- if (SCpnt->sense_buffer[2] == VOLUME_OVERFLOW) {
- printk("scsi%d: VOLUME OVERFLOW on "
- "channel %d, id %d, lun %d, CDB: ",
- SCpnt->host->host_no, (int) SCpnt->channel,
- (int) SCpnt->target, (int) SCpnt->lun);
- print_command(SCpnt->cmnd);
- print_sense("sr", SCpnt);
- SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
- requeue_sr_request(SCpnt);
- return;
+ }
+ switch (SCpnt->request.cmd) {
+ case WRITE:
+ if (!scsi_CDs[dev].device->writeable) {
+ SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ return 0;
}
+ SCpnt->cmnd[0] = WRITE_6;
+ break;
+ case READ:
+ SCpnt->cmnd[0] = READ_6;
+ break;
+ default:
+ panic("Unknown sd command %d\n", SCpnt->request.cmd);
}
- /* We only get this far if we have an error we have not recognized */
- if (result) {
- printk("SCSI CD error : host %d id %d lun %d return code = %03x\n",
- scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
- scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
- scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->lun,
- result);
-
- if (status_byte(result) == CHECK_CONDITION)
- print_sense("sr", SCpnt);
-
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
- requeue_sr_request(SCpnt);
+
+ SCSI_LOG_HLQUEUE(2, printk("sr%d : %s %d/%ld 512 byte blocks.\n",
+ devm,
+ (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
+ this_count, SCpnt->request.nr_sectors));
+
+ SCpnt->cmnd[1] = (SCpnt->lun << 5) & 0xe0;
+
+ if (((this_count > 0xff) || (block > 0x1fffff)) && SCpnt->device->ten) {
+ if (this_count > 0xffff)
+ this_count = 0xffff;
+
+ SCpnt->cmnd[0] += READ_10 - READ_6;
+ SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
+ SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
+ SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
+ SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+ SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
+ SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
+ SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+ } else {
+ if (this_count > 0xff)
+ this_count = 0xff;
+
+ SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+ SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
+ SCpnt->cmnd[3] = (unsigned char) block & 0xff;
+ SCpnt->cmnd[4] = (unsigned char) this_count;
+ SCpnt->cmnd[5] = 0;
}
+
+ /*
+ * We shouldn't disconnect in the middle of a sector, so with a dumb
+ * host adapter, it's safe to assume that we can at least transfer
+ * this many bytes between each connect / disconnect.
+ */
+ SCpnt->transfersize = scsi_CDs[dev].device->sector_size;
+ SCpnt->underflow = this_count << 9;
+
+ SCpnt->allowed = MAX_RETRIES;
+ SCpnt->timeout_per_command = SR_TIMEOUT;
+
+ /*
+ * This is the completion routine we use. This is matched in terms
+ * of capability to this function.
+ */
+ SCpnt->done = rw_intr;
+
+ /*
+ * This indicates that the command is ready from our end to be
+ * queued.
+ */
+ return 1;
}
static int sr_open(struct cdrom_device_info *cdi, int purpose)
* translate them to SCSI commands.
*/
-static void do_sr_request(void)
-{
- Scsi_Cmnd *SCpnt = NULL;
- struct request *req = NULL;
- Scsi_Device *SDev;
- int flag = 0;
-
- while (1 == 1) {
- if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
- return;
- };
-
- INIT_SCSI_REQUEST;
-
- SDev = scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device;
-
- /*
- * If the host for this device is in error recovery mode, don't
- * do anything at all here. When the host leaves error recovery
- * mode, it will automatically restart things and start queueing
- * commands again.
- */
- if (SDev->host->in_recovery) {
- return;
- }
- /*
- * I am not sure where the best place to do this is. We need
- * to hook in a place where we are likely to come if in user
- * space.
- */
- if (SDev->was_reset) {
- /*
- * We need to relock the door, but we might
- * be in an interrupt handler. Only do this
- * from user space, since we do not want to
- * sleep from an interrupt.
- */
- if (SDev->removable && !in_interrupt()) {
- spin_unlock_irq(&io_request_lock); /* FIXME!!!! */
- scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
- spin_lock_irq(&io_request_lock); /* FIXME!!!! */
- /* scsi_ioctl may allow CURRENT to change, so start over. */
- SDev->was_reset = 0;
- continue;
- }
- SDev->was_reset = 0;
- }
- /* we do lazy blocksize switching (when reading XA sectors,
- * see CDROMREADMODE2 ioctl) */
- if (scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].sector_size > 2048) {
- if (!in_interrupt())
- sr_set_blocklength(DEVICE_NR(CURRENT->rq_dev), 2048);
-#if 1
- else
- printk("sr: can't switch blocksize: in interrupt\n");
-#endif
- }
- if (flag++ == 0)
- SCpnt = scsi_allocate_device(&CURRENT,
- scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device, 0);
- else
- SCpnt = NULL;
-
- /* This is a performance enhancement. We dig down into the request list and
- * try to find a queueable request (i.e. device not busy, and host able to
- * accept another command. If we find one, then we queue it. This can
- * make a big difference on systems with more than one disk drive. We want
- * to have the interrupts off when monkeying with the request list, because
- * otherwise the kernel might try to slip in a request in between somewhere. */
-
- if (!SCpnt && sr_template.nr_dev > 1) {
- struct request *req1;
- req1 = NULL;
- req = CURRENT;
- while (req) {
- SCpnt = scsi_request_queueable(req,
- scsi_CDs[DEVICE_NR(req->rq_dev)].device);
- if (SCpnt)
- break;
- req1 = req;
- req = req->next;
- }
- if (SCpnt && req->rq_status == RQ_INACTIVE) {
- if (req == CURRENT)
- CURRENT = CURRENT->next;
- else
- req1->next = req->next;
- }
- }
- if (!SCpnt)
- return; /* Could not find anything to do */
-
- wake_up(&wait_for_request);
-
- /* Queue command */
- requeue_sr_request(SCpnt);
- } /* While */
-}
-
-void requeue_sr_request(Scsi_Cmnd * SCpnt)
-{
- unsigned int dev, block, realcount;
- unsigned char cmd[10], *buffer, tries;
- int this_count, start, end_rec;
-
- tries = 2;
-
-repeat:
- if (!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
- do_sr_request();
- return;
- }
- dev = MINOR(SCpnt->request.rq_dev);
- block = SCpnt->request.sector;
- buffer = NULL;
- this_count = 0;
-
- if (dev >= sr_template.nr_dev) {
- /* printk("CD-ROM request error: invalid device.\n"); */
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
- tries = 2;
- goto repeat;
- }
- if (!scsi_CDs[dev].use) {
- /* printk("CD-ROM request error: device marked not in use.\n"); */
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
- tries = 2;
- goto repeat;
- }
- if (!scsi_CDs[dev].device->online) {
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
- tries = 2;
- goto repeat;
- }
- if (scsi_CDs[dev].device->changed) {
- /*
- * quietly refuse to do anything to a changed disc
- * until the changed bit has been reset
- */
- /* printk("CD-ROM has been changed. Prohibiting further I/O.\n"); */
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
- tries = 2;
- goto repeat;
- }
- switch (SCpnt->request.cmd) {
- case WRITE:
- SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
- goto repeat;
- break;
- case READ:
- cmd[0] = READ_6;
- break;
- default:
- panic("Unknown sr command %d\n", SCpnt->request.cmd);
- }
-
- cmd[1] = (SCpnt->lun << 5) & 0xe0;
-
- /*
- * Now do the grungy work of figuring out which sectors we need, and
- * where in memory we are going to put them.
- *
- * The variables we need are:
- *
- * this_count= number of 512 byte sectors being read
- * block = starting cdrom sector to read.
- * realcount = # of cdrom sectors to read
- *
- * The major difference between a scsi disk and a scsi cdrom
- * is that we will always use scatter-gather if we can, because we can
- * work around the fact that the buffer cache has a block size of 1024,
- * and we have 2048 byte sectors. This code should work for buffers that
- * are any multiple of 512 bytes long.
- */
-
- SCpnt->use_sg = 0;
-
- if (SCpnt->host->sg_tablesize > 0 &&
- (!scsi_need_isa_buffer ||
- scsi_dma_free_sectors >= 10)) {
- struct buffer_head *bh;
- struct scatterlist *sgpnt;
- int count, this_count_max;
- bh = SCpnt->request.bh;
- this_count = 0;
- count = 0;
- this_count_max = (scsi_CDs[dev].ten ? 0xffff : 0xff) << 4;
- /* Calculate how many links we can use. First see if we need
- * a padding record at the start */
- this_count = SCpnt->request.sector % 4;
- if (this_count)
- count++;
- while (bh && count < SCpnt->host->sg_tablesize) {
- if ((this_count + (bh->b_size >> 9)) > this_count_max)
- break;
- this_count += (bh->b_size >> 9);
- count++;
- bh = bh->b_reqnext;
- };
- /* Fix up in case of an odd record at the end */
- end_rec = 0;
- if (this_count % 4) {
- if (count < SCpnt->host->sg_tablesize) {
- count++;
- end_rec = (4 - (this_count % 4)) << 9;
- this_count += 4 - (this_count % 4);
- } else {
- count--;
- this_count -= (this_count % 4);
- };
- };
- SCpnt->use_sg = count; /* Number of chains */
- /* scsi_malloc can only allocate in chunks of 512 bytes */
- count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511;
-
- SCpnt->sglist_len = count;
- sgpnt = (struct scatterlist *) scsi_malloc(count);
- if (!sgpnt) {
- printk("Warning - running *really* short on DMA buffers\n");
- SCpnt->use_sg = 0; /* No memory left - bail out */
- } else {
- buffer = (unsigned char *) sgpnt;
- count = 0;
- bh = SCpnt->request.bh;
- if (SCpnt->request.sector % 4) {
- sgpnt[count].length = (SCpnt->request.sector % 4) << 9;
- sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
- if (!sgpnt[count].address)
- panic("SCSI DMA pool exhausted.");
- sgpnt[count].alt_address = sgpnt[count].address; /* Flag to delete
- if needed */
- count++;
- };
- for (bh = SCpnt->request.bh; count < SCpnt->use_sg;
- count++, bh = bh->b_reqnext) {
- if (bh) { /* Need a placeholder at the end of the record? */
- sgpnt[count].address = bh->b_data;
- sgpnt[count].length = bh->b_size;
- sgpnt[count].alt_address = NULL;
- } else {
- sgpnt[count].address = (char *) scsi_malloc(end_rec);
- if (!sgpnt[count].address)
- panic("SCSI DMA pool exhausted.");
- sgpnt[count].length = end_rec;
- sgpnt[count].alt_address = sgpnt[count].address;
- if (count + 1 != SCpnt->use_sg)
- panic("Bad sr request list");
- break;
- };
- if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length - 1 >
- ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
- sgpnt[count].alt_address = sgpnt[count].address;
- /* We try to avoid exhausting the DMA pool, since it is easier
- * to control usage here. In other places we might have a more
- * pressing need, and we would be screwed if we ran out */
- if (scsi_dma_free_sectors < (sgpnt[count].length >> 9) + 5) {
- sgpnt[count].address = NULL;
- } else {
- sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
- };
- /* If we start running low on DMA buffers, we abort the scatter-gather
- * operation, and free all of the memory we have allocated. We want to
- * ensure that all scsi operations are able to do at least a non-scatter/gather
- * operation */
- if (sgpnt[count].address == NULL) { /* Out of dma memory */
- printk("Warning: Running low on SCSI DMA buffers\n");
- /* Try switching back to a non scatter-gather operation. */
- while (--count >= 0) {
- if (sgpnt[count].alt_address)
- scsi_free(sgpnt[count].address, sgpnt[count].length);
- };
- SCpnt->use_sg = 0;
- scsi_free(buffer, SCpnt->sglist_len);
- break;
- }; /* if address == NULL */
- }; /* if need DMA fixup */
- }; /* for loop to fill list */
-#ifdef DEBUG
- printk("SR: %d %d %d %d %d *** ", SCpnt->use_sg, SCpnt->request.sector,
- this_count,
- SCpnt->request.current_nr_sectors,
- SCpnt->request.nr_sectors);
- for (count = 0; count < SCpnt->use_sg; count++)
- printk("SGlist: %d %x %x %x\n", count,
- sgpnt[count].address,
- sgpnt[count].alt_address,
- sgpnt[count].length);
-#endif
- }; /* Able to allocate scatter-gather list */
- };
-
- if (SCpnt->use_sg == 0) {
- /* We cannot use scatter-gather. Do this the old fashion way */
- if (!SCpnt->request.bh)
- this_count = SCpnt->request.nr_sectors;
- else
- this_count = (SCpnt->request.bh->b_size >> 9);
-
- start = block % 4;
- if (start) {
- this_count = ((this_count > 4 - start) ?
- (4 - start) : (this_count));
- buffer = (unsigned char *) scsi_malloc(2048);
- } else if (this_count < 4) {
- buffer = (unsigned char *) scsi_malloc(2048);
- } else {
- this_count -= this_count % 4;
- buffer = (unsigned char *) SCpnt->request.buffer;
- if (virt_to_phys(buffer) + (this_count << 9) > ISA_DMA_THRESHOLD &&
- SCpnt->host->unchecked_isa_dma)
- buffer = (unsigned char *) scsi_malloc(this_count << 9);
- }
- };
-
- if (scsi_CDs[dev].sector_size == 2048)
- block = block >> 2; /* These are the sectors that the cdrom uses */
- else
- block = block & 0xfffffffc;
-
- realcount = (this_count + 3) / 4;
-
- if (scsi_CDs[dev].sector_size == 512)
- realcount = realcount << 2;
-
- /*
- * Note: The scsi standard says that READ_6 is *optional*, while
- * READ_10 is mandatory. Thus there is no point in using
- * READ_6.
- */
- if (scsi_CDs[dev].ten) {
- if (realcount > 0xffff) {
- realcount = 0xffff;
- this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
- }
- cmd[0] += READ_10 - READ_6;
- cmd[2] = (unsigned char) (block >> 24) & 0xff;
- cmd[3] = (unsigned char) (block >> 16) & 0xff;
- cmd[4] = (unsigned char) (block >> 8) & 0xff;
- cmd[5] = (unsigned char) block & 0xff;
- cmd[6] = cmd[9] = 0;
- cmd[7] = (unsigned char) (realcount >> 8) & 0xff;
- cmd[8] = (unsigned char) realcount & 0xff;
- } else {
- if (realcount > 0xff) {
- realcount = 0xff;
- this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
- }
- cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
- cmd[2] = (unsigned char) ((block >> 8) & 0xff);
- cmd[3] = (unsigned char) block & 0xff;
- cmd[4] = (unsigned char) realcount;
- cmd[5] = 0;
- }
-
-#ifdef DEBUG
- {
- int i;
- printk("ReadCD: %d %d %d %d\n", block, realcount, buffer, this_count);
- printk("Use sg: %d\n", SCpnt->use_sg);
- printk("Dumping command: ");
- for (i = 0; i < 12; i++)
- printk("%2.2x ", cmd[i]);
- printk("\n");
- };
-#endif
-
- /* Some dumb host adapters can speed transfers by knowing the
- * minimum transfersize in advance.
- *
- * We shouldn't disconnect in the middle of a sector, but the cdrom
- * sector size can be larger than the size of a buffer and the
- * transfer may be split to the size of a buffer. So it's safe to
- * assume that we can at least transfer the minimum of the buffer
- * size (1024) and the sector size between each connect / disconnect.
- */
-
- SCpnt->transfersize = (scsi_CDs[dev].sector_size > 1024) ?
- 1024 : scsi_CDs[dev].sector_size;
-
- SCpnt->this_count = this_count;
- scsi_do_cmd(SCpnt, (void *) cmd, buffer,
- realcount * scsi_CDs[dev].sector_size,
- rw_intr, SR_TIMEOUT, MAX_RETRIES);
-}
static int sr_detect(Scsi_Device * SDp)
{
if (i >= sr_template.dev_max)
panic("scsi_devices corrupt (sr)");
- SDp->scsi_request_fn = do_sr_request;
+
scsi_CDs[i].device = SDp;
sr_template.nr_dev++;
unsigned char cmd[10];
unsigned char *buffer;
int the_result, retries;
+ int sector_size;
Scsi_Cmnd *SCpnt;
- spin_lock_irq(&io_request_lock);
buffer = (unsigned char *) scsi_malloc(512);
- SCpnt = scsi_allocate_device(NULL, scsi_CDs[i].device, 1);
- spin_unlock_irq(&io_request_lock);
+
+
+ SCpnt = scsi_allocate_device(scsi_CDs[i].device, 1);
retries = 3;
do {
/* Do the command and wait.. */
- scsi_wait_cmd (SCpnt, (void *) cmd, (void *) buffer,
- 512, sr_init_done, SR_TIMEOUT, MAX_RETRIES);
+ scsi_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
+ 512, sr_init_done, SR_TIMEOUT, MAX_RETRIES);
the_result = SCpnt->result;
retries--;
if (the_result) {
scsi_CDs[i].capacity = 0x1fffff;
- scsi_CDs[i].sector_size = 2048; /* A guess, just in case */
+ sector_size = 2048; /* A guess, just in case */
scsi_CDs[i].needs_sector_size = 1;
} else {
#if 0
(buffer[1] << 16) |
(buffer[2] << 8) |
buffer[3]);
- scsi_CDs[i].sector_size = (buffer[4] << 24) |
+ sector_size = (buffer[4] << 24) |
(buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
- switch (scsi_CDs[i].sector_size) {
+ switch (sector_size) {
/*
* HP 4020i CD-Recorder reports 2340 byte sectors
* Philips CD-Writers report 2352 byte sectors
case 0:
case 2340:
case 2352:
- scsi_CDs[i].sector_size = 2048;
+ sector_size = 2048;
/* fall through */
case 2048:
scsi_CDs[i].capacity *= 4;
break;
default:
printk("sr%d: unsupported sector size %d.\n",
- i, scsi_CDs[i].sector_size);
+ i, sector_size);
scsi_CDs[i].capacity = 0;
scsi_CDs[i].needs_sector_size = 1;
}
+ scsi_CDs[i].device->sector_size = sector_size;
+
/*
* Add this so that we have the ability to correctly gauge
* what the device is capable of.
""
};
- spin_lock_irq(&io_request_lock);
buffer = (unsigned char *) scsi_malloc(512);
- spin_unlock_irq(&io_request_lock);
cmd[0] = MODE_SENSE;
cmd[1] = (scsi_CDs[i].device->lun << 5) & 0xe0;
cmd[2] = 0x2a;
if ((buffer[n + 3] & 0x1) == 0)
/* can't write CD-R media */
scsi_CDs[i].cdi.mask |= CDC_CD_R;
- if ((buffer[n+6] & 0x8) == 0)
+ if ((buffer[n + 6] & 0x8) == 0)
/* can't eject */
scsi_CDs[i].cdi.mask |= CDC_OPEN_TRAY;
- if ((buffer[n+6] >> 5) == mechtype_individual_changer ||
- (buffer[n+6] >> 5) == mechtype_cartridge_changer)
- scsi_CDs[i].cdi.capacity =
- cdrom_number_of_slots(&(scsi_CDs[i].cdi));
+ if ((buffer[n + 6] >> 5) == mechtype_individual_changer ||
+ (buffer[n + 6] >> 5) == mechtype_cartridge_changer)
+ scsi_CDs[i].cdi.capacity =
+ cdrom_number_of_slots(&(scsi_CDs[i].cdi));
if (scsi_CDs[i].cdi.capacity <= 1)
- /* not a changer */
+ /* not a changer */
scsi_CDs[i].cdi.mask |= CDC_SELECT_DISC;
/*else I don't think it can close its tray
- scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */
+ scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */
scsi_free(buffer, 512);
int stat;
/* get the device */
- SCpnt = scsi_allocate_device(NULL, device, 1);
+ SCpnt = scsi_allocate_device(device, 1);
if (SCpnt == NULL)
return -ENODEV; /* this just doesn't seem right /axboe */
/* use buffer for ISA DMA */
buflen = (cgc->buflen + 511) & ~511;
if (cgc->buffer && SCpnt->host->unchecked_isa_dma &&
- (virt_to_phys(cgc->buffer) + cgc->buflen - 1 > ISA_DMA_THRESHOLD)) {
- spin_lock_irq(&io_request_lock);
+ (virt_to_phys(cgc->buffer) + cgc->buflen - 1 > ISA_DMA_THRESHOLD)) {
buffer = scsi_malloc(buflen);
- spin_unlock_irq(&io_request_lock);
if (buffer == NULL) {
printk("sr: SCSI DMA pool exhausted.");
return -ENOMEM;
}
memcpy(buffer, cgc->buffer, cgc->buflen);
}
-
/* set the LUN */
cgc->cmd[1] |= device->lun << 5;
/* scsi_do_cmd sets the command length */
SCpnt->cmd_len = 0;
- scsi_wait_cmd (SCpnt, (void *)cgc->cmd, (void *)buffer, cgc->buflen,
- sr_init_done, SR_TIMEOUT, MAX_RETRIES);
+ scsi_wait_cmd(SCpnt, (void *) cgc->cmd, (void *) buffer, cgc->buflen,
+ sr_init_done, SR_TIMEOUT, MAX_RETRIES);
stat = SCpnt->result;
memcpy(cgc->buffer, buffer, cgc->buflen);
scsi_free(buffer, buflen);
}
-
return stat;
}
sr_blocksizes = (int *) scsi_init_malloc(sr_template.dev_max *
sizeof(int), GFP_ATOMIC);
+ sr_hardsizes = (int *) scsi_init_malloc(sr_template.dev_max *
+ sizeof(int), GFP_ATOMIC);
/*
* These are good guesses for the time being.
*/
for (i = 0; i < sr_template.dev_max; i++)
+ {
sr_blocksizes[i] = 2048;
+ sr_hardsizes[i] = 2048;
+ }
blksize_size[MAJOR_NR] = sr_blocksizes;
+ hardsect_size[MAJOR_NR] = sr_hardsizes;
return 0;
}
int i;
char name[6];
- blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_dev[MAJOR_NR].queue = sr_find_queue;
blk_size[MAJOR_NR] = sr_sizes;
for (i = 0; i < sr_template.nr_dev; ++i) {
if (scsi_CDs[i].capacity)
continue;
scsi_CDs[i].capacity = 0x1fffff;
- scsi_CDs[i].sector_size = 2048; /* A guess, just in case */
+ scsi_CDs[i].device->sector_size = 2048; /* A guess, just in case */
scsi_CDs[i].needs_sector_size = 1;
scsi_CDs[i].device->changed = 1; /* force recheck CD type */
#if 0
printk("Scd sectorsize = %d bytes.\n", scsi_CDs[i].sector_size);
#endif
scsi_CDs[i].use = 1;
- scsi_CDs[i].ten = 1;
- scsi_CDs[i].remap = 1;
+
+ scsi_CDs[i].device->ten = 1;
+ scsi_CDs[i].device->remap = 1;
scsi_CDs[i].readcd_known = 0;
scsi_CDs[i].readcd_cdda = 0;
sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9);
scsi_init_free((char *) sr_blocksizes, sr_template.dev_max * sizeof(int));
sr_blocksizes = NULL;
+ scsi_init_free((char *) sr_hardsizes, sr_template.dev_max * sizeof(int));
+ sr_hardsizes = NULL;
}
blksize_size[MAJOR_NR] = NULL;
- blk_dev[MAJOR_NR].request_fn = NULL;
+ hardsect_size[MAJOR_NR] = sr_hardsizes;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
blk_size[MAJOR_NR] = NULL;
read_ahead[MAJOR_NR] = 0;
*
* <drew@colorado.edu>
*
- * Modified by Eric Youngdale eric@aib.com to
+ * Modified by Eric Youngdale eric@andante.org to
* add scatter-gather, multiple outstanding request, and other
* enhancements.
*/
typedef struct {
unsigned capacity; /* size in blocks */
- unsigned sector_size; /* size in bytes */
Scsi_Device *device;
unsigned int vendor; /* vendor code, see sr_vendor.c */
unsigned long ms_offset; /* for reading multisession-CD's */
unsigned char sector_bit_size; /* sector size = 2^sector_bit_size */
unsigned char sector_bit_shift; /* sectors/FS block = 2^sector_bit_shift */
unsigned needs_sector_size:1; /* needs to get sector size */
- unsigned ten:1; /* support ten byte commands */
- unsigned remap:1; /* support remapping */
unsigned use:1; /* is this device still supportable */
unsigned xa_flag:1; /* CD has XA sectors ? */
unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */
#include "sr.h"
#if 0
-# define DEBUG
+#define DEBUG
#endif
/* The sr_is_xa() seems to trigger firmware bugs with some drives :-(
static void sr_ioctl_done(Scsi_Cmnd * SCpnt)
{
- struct request * req;
-
- req = &SCpnt->request;
- req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
-
- if (SCpnt->buffer && req->buffer && SCpnt->buffer != req->buffer) {
- memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen);
- scsi_free(SCpnt->buffer, (SCpnt->bufflen + 511) & ~511);
- SCpnt->buffer = req->buffer;
- }
-
- if (req->sem != NULL) {
- up(req->sem);
- }
+ struct request *req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (SCpnt->buffer && req->buffer && SCpnt->buffer != req->buffer) {
+ memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen);
+ scsi_free(SCpnt->buffer, (SCpnt->bufflen + 511) & ~511);
+ SCpnt->buffer = req->buffer;
+ }
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
}
/* We do our own retries because we want to know what the specific
error code is. Normally the UNIT_ATTENTION code will automatically
clear after one error */
-int sr_do_ioctl(int target, unsigned char * sr_cmd, void * buffer, unsigned buflength, int quiet)
+int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet)
{
- Scsi_Cmnd * SCpnt;
- Scsi_Device * SDev;
- int result, err = 0, retries = 0;
- unsigned long flags;
- char * bounce_buffer;
-
- spin_lock_irqsave(&io_request_lock, flags);
- SDev = scsi_CDs[target].device;
- SCpnt = scsi_allocate_device(NULL, scsi_CDs[target].device, 1);
- spin_unlock_irqrestore(&io_request_lock, flags);
-
- /* use ISA DMA buffer if necessary */
- SCpnt->request.buffer=buffer;
- if (buffer && SCpnt->host->unchecked_isa_dma &&
- (virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) {
- bounce_buffer = (char *)scsi_malloc((buflength + 511) & ~511);
- if (bounce_buffer == NULL) {
- printk("SCSI DMA pool exhausted.");
- return -ENOMEM;
- }
- memcpy(bounce_buffer, (char *)buffer, buflength);
- buffer = bounce_buffer;
- }
-
-retry:
- if( !scsi_block_when_processing_errors(SDev) )
- return -ENODEV;
-
- scsi_wait_cmd(SCpnt, (void *)sr_cmd, (void *)buffer, buflength,
- sr_ioctl_done, IOCTL_TIMEOUT, IOCTL_RETRIES);
-
- result = SCpnt->result;
-
- /* Minimal error checking. Ignore cases we know about, and report the rest. */
- if(driver_byte(result) != 0) {
- switch(SCpnt->sense_buffer[2] & 0xf) {
- case UNIT_ATTENTION:
- scsi_CDs[target].device->changed = 1;
- if (!quiet)
- printk(KERN_INFO "sr%d: disc change detected.\n", target);
- if (retries++ < 10)
- goto retry;
- err = -ENOMEDIUM;
- break;
- case NOT_READY: /* This happens if there is no disc in drive */
- if (SCpnt->sense_buffer[12] == 0x04 &&
- SCpnt->sense_buffer[13] == 0x01) {
- /* sense: Logical unit is in process of becoming ready */
- if (!quiet)
- printk(KERN_INFO "sr%d: CDROM not ready yet.\n", target);
- if (retries++ < 10) {
- /* sleep 2 sec and try again */
- /*
- * The spinlock is silly - we should really lock more of this
- * function, but the minimal locking required to not lock up
- * is around this - scsi_sleep() assumes we hold the spinlock.
- */
- spin_lock_irqsave(&io_request_lock, flags);
- scsi_sleep(2*HZ);
- spin_unlock_irqrestore(&io_request_lock, flags);
- goto retry;
- } else {
- /* 20 secs are enough? */
- err = -ENOMEDIUM;
- break;
+ Scsi_Cmnd *SCpnt;
+ Scsi_Device *SDev;
+ int result, err = 0, retries = 0;
+ unsigned long flags;
+ char *bounce_buffer;
+
+ SDev = scsi_CDs[target].device;
+ SCpnt = scsi_allocate_device(scsi_CDs[target].device, 1);
+
+ /* use ISA DMA buffer if necessary */
+ SCpnt->request.buffer = buffer;
+ if (buffer && SCpnt->host->unchecked_isa_dma &&
+ (virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) {
+ bounce_buffer = (char *) scsi_malloc((buflength + 511) & ~511);
+ if (bounce_buffer == NULL) {
+ printk("SCSI DMA pool exhausted.");
+ return -ENOMEM;
}
- }
- if (!quiet)
- printk(KERN_INFO "sr%d: CDROM not ready. Make sure there is a disc in the drive.\n",target);
+ memcpy(bounce_buffer, (char *) buffer, buflength);
+ buffer = bounce_buffer;
+ }
+ retry:
+ if (!scsi_block_when_processing_errors(SDev))
+ return -ENODEV;
+
+
+ scsi_wait_cmd(SCpnt, (void *) sr_cmd, (void *) buffer, buflength,
+ sr_ioctl_done, IOCTL_TIMEOUT, IOCTL_RETRIES);
+
+ result = SCpnt->result;
+
+ /* Minimal error checking. Ignore cases we know about, and report the rest. */
+ if (driver_byte(result) != 0) {
+ switch (SCpnt->sense_buffer[2] & 0xf) {
+ case UNIT_ATTENTION:
+ scsi_CDs[target].device->changed = 1;
+ if (!quiet)
+ printk(KERN_INFO "sr%d: disc change detected.\n", target);
+ if (retries++ < 10)
+ goto retry;
+ err = -ENOMEDIUM;
+ break;
+ case NOT_READY: /* This happens if there is no disc in drive */
+ if (SCpnt->sense_buffer[12] == 0x04 &&
+ SCpnt->sense_buffer[13] == 0x01) {
+ /* sense: Logical unit is in process of becoming ready */
+ if (!quiet)
+ printk(KERN_INFO "sr%d: CDROM not ready yet.\n", target);
+ if (retries++ < 10) {
+ /* sleep 2 sec and try again */
+ scsi_sleep(2 * HZ);
+ goto retry;
+ } else {
+ /* 20 secs are enough? */
+ err = -ENOMEDIUM;
+ break;
+ }
+ }
+ if (!quiet)
+ printk(KERN_INFO "sr%d: CDROM not ready. Make sure there is a disc in the drive.\n", target);
#ifdef DEBUG
- print_sense("sr", SCpnt);
+ print_sense("sr", SCpnt);
#endif
- err = -ENOMEDIUM;
- break;
- case ILLEGAL_REQUEST:
- if (!quiet)
- printk(KERN_ERR "sr%d: CDROM (ioctl) reports ILLEGAL "
- "REQUEST.\n", target);
- if (SCpnt->sense_buffer[12] == 0x20 &&
- SCpnt->sense_buffer[13] == 0x00) {
- /* sense: Invalid command operation code */
- err = -EDRIVE_CANT_DO_THIS;
- } else {
- err = -EINVAL;
- }
+ err = -ENOMEDIUM;
+ break;
+ case ILLEGAL_REQUEST:
+ if (!quiet)
+ printk(KERN_ERR "sr%d: CDROM (ioctl) reports ILLEGAL "
+ "REQUEST.\n", target);
+ if (SCpnt->sense_buffer[12] == 0x20 &&
+ SCpnt->sense_buffer[13] == 0x00) {
+ /* sense: Invalid command operation code */
+ err = -EDRIVE_CANT_DO_THIS;
+ } else {
+ err = -EINVAL;
+ }
#ifdef DEBUG
- print_command(sr_cmd);
- print_sense("sr", SCpnt);
+ print_command(sr_cmd);
+ print_sense("sr", SCpnt);
#endif
- break;
- default:
- printk(KERN_ERR "sr%d: CDROM (ioctl) error, command: ", target);
- print_command(sr_cmd);
- print_sense("sr", SCpnt);
- err = -EIO;
+ break;
+ default:
+ printk(KERN_ERR "sr%d: CDROM (ioctl) error, command: ", target);
+ print_command(sr_cmd);
+ print_sense("sr", SCpnt);
+ err = -EIO;
+ }
}
- }
-
- spin_lock_irqsave(&io_request_lock, flags);
- result = SCpnt->result;
- /* Wake up a process waiting for device*/
- wake_up(&SCpnt->device->device_wait);
- scsi_release_command(SCpnt);
- SCpnt = NULL;
- spin_unlock_irqrestore(&io_request_lock, flags);
- return err;
+ result = SCpnt->result;
+ /* Wake up a process waiting for device */
+ wake_up(&SCpnt->device->device_wait);
+ scsi_release_command(SCpnt);
+ SCpnt = NULL;
+ return err;
}
/* ---------------------------------------------------------------------- */
static int test_unit_ready(int minor)
{
- u_char sr_cmd[10];
+ u_char sr_cmd[10];
- sr_cmd[0] = GPCMD_TEST_UNIT_READY;
- sr_cmd[1] = ((scsi_CDs[minor].device -> lun) << 5);
- sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
- return sr_do_ioctl(minor, sr_cmd, NULL, 255, 1);
+ sr_cmd[0] = GPCMD_TEST_UNIT_READY;
+ sr_cmd[1] = ((scsi_CDs[minor].device->lun) << 5);
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+ return sr_do_ioctl(minor, sr_cmd, NULL, 255, 1);
}
int sr_tray_move(struct cdrom_device_info *cdi, int pos)
{
- u_char sr_cmd[10];
-
- sr_cmd[0] = GPCMD_START_STOP_UNIT;
- sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device -> lun) << 5);
- sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
- sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */;
-
- return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 255, 0);
+ u_char sr_cmd[10];
+
+ sr_cmd[0] = GPCMD_START_STOP_UNIT;
+ sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device->lun) << 5);
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ;
+
+ return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 255, 0);
}
int sr_lock_door(struct cdrom_device_info *cdi, int lock)
{
- return scsi_ioctl (scsi_CDs[MINOR(cdi->dev)].device,
- lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK,
- 0);
+ return scsi_ioctl(scsi_CDs[MINOR(cdi->dev)].device,
+ lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK,
+ 0);
}
int sr_drive_status(struct cdrom_device_info *cdi, int slot)
{
- if (CDSL_CURRENT != slot) {
- /* we have no changer support */
- return -EINVAL;
- }
-
- if (0 == test_unit_ready(MINOR(cdi->dev)))
- return CDS_DISC_OK;
+ if (CDSL_CURRENT != slot) {
+ /* we have no changer support */
+ return -EINVAL;
+ }
+ if (0 == test_unit_ready(MINOR(cdi->dev)))
+ return CDS_DISC_OK;
- return CDS_TRAY_OPEN;
+ return CDS_TRAY_OPEN;
}
int sr_disk_status(struct cdrom_device_info *cdi)
{
- struct cdrom_tochdr toc_h;
- struct cdrom_tocentry toc_e;
- int i,rc,have_datatracks = 0;
-
- /* look for data tracks */
- if (0 != (rc = sr_audio_ioctl(cdi, CDROMREADTOCHDR, &toc_h)))
- return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO;
-
- for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) {
- toc_e.cdte_track = i;
- toc_e.cdte_format = CDROM_LBA;
- if (sr_audio_ioctl(cdi, CDROMREADTOCENTRY, &toc_e))
- return CDS_NO_INFO;
- if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) {
- have_datatracks = 1;
- break;
- }
- }
- if (!have_datatracks)
- return CDS_AUDIO;
-
- if (scsi_CDs[MINOR(cdi->dev)].xa_flag)
- return CDS_XA_2_1;
- else
- return CDS_DATA_1;
+ struct cdrom_tochdr toc_h;
+ struct cdrom_tocentry toc_e;
+ int i, rc, have_datatracks = 0;
+
+ /* look for data tracks */
+ if (0 != (rc = sr_audio_ioctl(cdi, CDROMREADTOCHDR, &toc_h)))
+ return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO;
+
+ for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) {
+ toc_e.cdte_track = i;
+ toc_e.cdte_format = CDROM_LBA;
+ if (sr_audio_ioctl(cdi, CDROMREADTOCENTRY, &toc_e))
+ return CDS_NO_INFO;
+ if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) {
+ have_datatracks = 1;
+ break;
+ }
+ }
+ if (!have_datatracks)
+ return CDS_AUDIO;
+
+ if (scsi_CDs[MINOR(cdi->dev)].xa_flag)
+ return CDS_XA_2_1;
+ else
+ return CDS_DATA_1;
}
int sr_get_last_session(struct cdrom_device_info *cdi,
- struct cdrom_multisession* ms_info)
+ struct cdrom_multisession *ms_info)
{
- ms_info->addr.lba=scsi_CDs[MINOR(cdi->dev)].ms_offset;
- ms_info->xa_flag=scsi_CDs[MINOR(cdi->dev)].xa_flag ||
- (scsi_CDs[MINOR(cdi->dev)].ms_offset > 0);
+ ms_info->addr.lba = scsi_CDs[MINOR(cdi->dev)].ms_offset;
+ ms_info->xa_flag = scsi_CDs[MINOR(cdi->dev)].xa_flag ||
+ (scsi_CDs[MINOR(cdi->dev)].ms_offset > 0);
return 0;
}
-int sr_get_mcn(struct cdrom_device_info *cdi,struct cdrom_mcn *mcn)
+int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
{
- u_char sr_cmd[10];
+ u_char sr_cmd[10];
char buffer[32];
- int result;
-
+ int result;
+
sr_cmd[0] = GPCMD_READ_SUBCHANNEL;
sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device->lun) << 5);
- sr_cmd[2] = 0x40; /* I do want the subchannel info */
- sr_cmd[3] = 0x02; /* Give me medium catalog number info */
+ sr_cmd[2] = 0x40; /* I do want the subchannel info */
+ sr_cmd[3] = 0x02; /* Give me medium catalog number info */
sr_cmd[4] = sr_cmd[5] = 0;
sr_cmd[6] = 0;
sr_cmd[7] = 0;
sr_cmd[9] = 0;
result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0);
-
- memcpy (mcn->medium_catalog_number, buffer + 9, 13);
- mcn->medium_catalog_number[13] = 0;
+
+ memcpy(mcn->medium_catalog_number, buffer + 9, 13);
+ mcn->medium_catalog_number[13] = 0;
return result;
}
int sr_reset(struct cdrom_device_info *cdi)
{
invalidate_buffers(cdi->dev);
- return 0;
+ return 0;
}
int sr_select_speed(struct cdrom_device_info *cdi, int speed)
{
- u_char sr_cmd[12];
-
- if (speed == 0)
- speed = 0xffff; /* set to max */
- else
- speed *= 177; /* Nx to kbyte/s */
-
- memset(sr_cmd,0,12);
- sr_cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */
+ u_char sr_cmd[12];
+
+ if (speed == 0)
+ speed = 0xffff; /* set to max */
+ else
+ speed *= 177; /* Nx to kbyte/s */
+
+ memset(sr_cmd, 0, 12);
+ sr_cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */
sr_cmd[1] = (scsi_CDs[MINOR(cdi->dev)].device->lun) << 5;
- sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */
- sr_cmd[3] = speed & 0xff; /* LSB */
+ sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */
+ sr_cmd[3] = speed & 0xff; /* LSB */
- if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0))
- return -EIO;
+ if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0))
+ return -EIO;
return 0;
}
/* only cdromreadtochdr and cdromreadtocentry are left - for use with the */
/* sr_disk_status interface for the generic cdrom driver. */
-int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void* arg)
+int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg)
{
- u_char sr_cmd[10];
- int result, target = MINOR(cdi->dev);
- unsigned char buffer[32];
-
- switch (cmd)
- {
- case CDROMREADTOCHDR:
- {
- struct cdrom_tochdr* tochdr = (struct cdrom_tochdr*)arg;
-
- sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5);
- sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
- sr_cmd[6] = 0;
- sr_cmd[7] = 0; /* MSB of length (12) */
- sr_cmd[8] = 12; /* LSB of length */
- sr_cmd[9] = 0;
-
- result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1);
-
- tochdr->cdth_trk0 = buffer[2];
- tochdr->cdth_trk1 = buffer[3];
-
- break;
- }
-
- case CDROMREADTOCENTRY:
- {
- struct cdrom_tocentry* tocentry = (struct cdrom_tocentry*)arg;
-
- sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) |
- (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0);
- sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
- sr_cmd[6] = tocentry->cdte_track;
- sr_cmd[7] = 0; /* MSB of length (12) */
- sr_cmd[8] = 12; /* LSB of length */
- sr_cmd[9] = 0;
-
- result = sr_do_ioctl (target, sr_cmd, buffer, 12, 0);
-
- tocentry->cdte_ctrl = buffer[5] & 0xf;
- tocentry->cdte_adr = buffer[5] >> 4;
- tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0;
- if (tocentry->cdte_format == CDROM_MSF) {
- tocentry->cdte_addr.msf.minute = buffer[9];
- tocentry->cdte_addr.msf.second = buffer[10];
- tocentry->cdte_addr.msf.frame = buffer[11];
- } else
- tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
- + buffer[10]) << 8) + buffer[11];
-
- break;
- }
-
- default:
- return -EINVAL;
- }
+ u_char sr_cmd[10];
+ int result, target = MINOR(cdi->dev);
+ unsigned char buffer[32];
+
+ switch (cmd) {
+ case CDROMREADTOCHDR:
+ {
+ struct cdrom_tochdr *tochdr = (struct cdrom_tochdr *) arg;
+
+ sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5);
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = 0; /* MSB of length (12) */
+ sr_cmd[8] = 12; /* LSB of length */
+ sr_cmd[9] = 0;
+
+ result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1);
+
+ tochdr->cdth_trk0 = buffer[2];
+ tochdr->cdth_trk1 = buffer[3];
+
+ break;
+ }
+
+ case CDROMREADTOCENTRY:
+ {
+ struct cdrom_tocentry *tocentry = (struct cdrom_tocentry *) arg;
+
+ sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) |
+ (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0);
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = tocentry->cdte_track;
+ sr_cmd[7] = 0; /* MSB of length (12) */
+ sr_cmd[8] = 12; /* LSB of length */
+ sr_cmd[9] = 0;
+
+ result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0);
+
+ tocentry->cdte_ctrl = buffer[5] & 0xf;
+ tocentry->cdte_adr = buffer[5] >> 4;
+ tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0;
+ if (tocentry->cdte_format == CDROM_MSF) {
+ tocentry->cdte_addr.msf.minute = buffer[9];
+ tocentry->cdte_addr.msf.second = buffer[10];
+ tocentry->cdte_addr.msf.frame = buffer[11];
+ } else
+ tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+ + buffer[10]) << 8) + buffer[11];
+
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
#if 0
- if (result)
- printk("DEBUG: sr_audio: result for ioctl %x: %x\n",cmd,result);
+ if (result)
+ printk("DEBUG: sr_audio: result for ioctl %x: %x\n", cmd, result);
#endif
-
- return result;
+
+ return result;
}
/* -----------------------------------------------------------------------
* blksize: 2048 | 2336 | 2340 | 2352
*/
-int
-sr_read_cd(int minor, unsigned char *dest, int lba, int format, int blksize)
+int sr_read_cd(int minor, unsigned char *dest, int lba, int format, int blksize)
{
- unsigned char cmd[12];
+ unsigned char cmd[12];
#ifdef DEBUG
- printk("sr%d: sr_read_cd lba=%d format=%d blksize=%d\n",
- minor,lba,format,blksize);
+ printk("sr%d: sr_read_cd lba=%d format=%d blksize=%d\n",
+ minor, lba, format, blksize);
#endif
- memset(cmd,0,12);
- cmd[0] = GPCMD_READ_CD; /* READ_CD */
- cmd[1] = (scsi_CDs[minor].device->lun << 5) | ((format & 7) << 2);
- cmd[2] = (unsigned char)(lba >> 24) & 0xff;
- cmd[3] = (unsigned char)(lba >> 16) & 0xff;
- cmd[4] = (unsigned char)(lba >> 8) & 0xff;
- cmd[5] = (unsigned char) lba & 0xff;
- cmd[8] = 1;
- switch (blksize) {
- case 2336: cmd[9] = 0x58; break;
- case 2340: cmd[9] = 0x78; break;
- case 2352: cmd[9] = 0xf8; break;
- default: cmd[9] = 0x10; break;
- }
- return sr_do_ioctl(minor, cmd, dest, blksize, 0);
+ memset(cmd, 0, 12);
+ cmd[0] = GPCMD_READ_CD; /* READ_CD */
+ cmd[1] = (scsi_CDs[minor].device->lun << 5) | ((format & 7) << 2);
+ cmd[2] = (unsigned char) (lba >> 24) & 0xff;
+ cmd[3] = (unsigned char) (lba >> 16) & 0xff;
+ cmd[4] = (unsigned char) (lba >> 8) & 0xff;
+ cmd[5] = (unsigned char) lba & 0xff;
+ cmd[8] = 1;
+ switch (blksize) {
+ case 2336:
+ cmd[9] = 0x58;
+ break;
+ case 2340:
+ cmd[9] = 0x78;
+ break;
+ case 2352:
+ cmd[9] = 0xf8;
+ break;
+ default:
+ cmd[9] = 0x10;
+ break;
+ }
+ return sr_do_ioctl(minor, cmd, dest, blksize, 0);
}
/*
* read sectors with blocksizes other than 2048
*/
-int
-sr_read_sector(int minor, int lba, int blksize, unsigned char *dest)
+int sr_read_sector(int minor, int lba, int blksize, unsigned char *dest)
{
- unsigned char cmd[12]; /* the scsi-command */
- int rc;
-
- /* we try the READ CD command first... */
- if (scsi_CDs[minor].readcd_known) {
- rc = sr_read_cd(minor, dest, lba, 0, blksize);
- if (-EDRIVE_CANT_DO_THIS != rc)
- return rc;
- scsi_CDs[minor].readcd_known = 0;
- printk("CDROM does'nt support READ CD (0xbe) command\n");
- /* fall & retry the other way */
- }
-
- /* ... if this fails, we switch the blocksize using MODE SELECT */
- if (blksize != scsi_CDs[minor].sector_size)
- if (0 != (rc = sr_set_blocklength(minor, blksize)))
- return rc;
-
+ unsigned char cmd[12]; /* the scsi-command */
+ int rc;
+
+ /* we try the READ CD command first... */
+ if (scsi_CDs[minor].readcd_known) {
+ rc = sr_read_cd(minor, dest, lba, 0, blksize);
+ if (-EDRIVE_CANT_DO_THIS != rc)
+ return rc;
+ scsi_CDs[minor].readcd_known = 0;
+ printk("CDROM does'nt support READ CD (0xbe) command\n");
+ /* fall & retry the other way */
+ }
+ /* ... if this fails, we switch the blocksize using MODE SELECT */
+ if (blksize != scsi_CDs[minor].device->sector_size) {
+ if (0 != (rc = sr_set_blocklength(minor, blksize)))
+ return rc;
+ }
#ifdef DEBUG
- printk("sr%d: sr_read_sector lba=%d blksize=%d\n",minor,lba,blksize);
+ printk("sr%d: sr_read_sector lba=%d blksize=%d\n", minor, lba, blksize);
#endif
-
- memset(cmd,0,12);
- cmd[0] = GPCMD_READ_10;
- cmd[1] = (scsi_CDs[minor].device->lun << 5);
- cmd[2] = (unsigned char)(lba >> 24) & 0xff;
- cmd[3] = (unsigned char)(lba >> 16) & 0xff;
- cmd[4] = (unsigned char)(lba >> 8) & 0xff;
- cmd[5] = (unsigned char) lba & 0xff;
- cmd[8] = 1;
- rc = sr_do_ioctl(minor, cmd, dest, blksize, 0);
-
- return rc;
+
+ memset(cmd, 0, 12);
+ cmd[0] = GPCMD_READ_10;
+ cmd[1] = (scsi_CDs[minor].device->lun << 5);
+ cmd[2] = (unsigned char) (lba >> 24) & 0xff;
+ cmd[3] = (unsigned char) (lba >> 16) & 0xff;
+ cmd[4] = (unsigned char) (lba >> 8) & 0xff;
+ cmd[5] = (unsigned char) lba & 0xff;
+ cmd[8] = 1;
+ rc = sr_do_ioctl(minor, cmd, dest, blksize, 0);
+
+ return rc;
}
/*
* ret: 1 == mode2 (XA), 0 == mode1, <0 == error
*/
-int
-sr_is_xa(int minor)
+int sr_is_xa(int minor)
{
- unsigned char *raw_sector;
- int is_xa;
- unsigned long flags;
-
- if (!xa_test)
- return 0;
-
- spin_lock_irqsave(&io_request_lock, flags);
- raw_sector = (unsigned char *) scsi_malloc(2048+512);
- spin_unlock_irqrestore(&io_request_lock, flags);
- if (!raw_sector) return -ENOMEM;
- if (0 == sr_read_sector(minor,scsi_CDs[minor].ms_offset+16,
- CD_FRAMESIZE_RAW1,raw_sector)) {
- is_xa = (raw_sector[3] == 0x02) ? 1 : 0;
- } else {
- /* read a raw sector failed for some reason. */
- is_xa = -1;
- }
- spin_lock_irqsave(&io_request_lock, flags);
- scsi_free(raw_sector, 2048+512);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ unsigned char *raw_sector;
+ int is_xa;
+
+ if (!xa_test)
+ return 0;
+
+ raw_sector = (unsigned char *) scsi_malloc(2048 + 512);
+ if (!raw_sector)
+ return -ENOMEM;
+ if (0 == sr_read_sector(minor, scsi_CDs[minor].ms_offset + 16,
+ CD_FRAMESIZE_RAW1, raw_sector)) {
+ is_xa = (raw_sector[3] == 0x02) ? 1 : 0;
+ } else {
+ /* read a raw sector failed for some reason. */
+ is_xa = -1;
+ }
+ scsi_free(raw_sector, 2048 + 512);
#ifdef DEBUG
- printk("sr%d: sr_is_xa: %d\n",minor,is_xa);
+ printk("sr%d: sr_is_xa: %d\n", minor, is_xa);
#endif
- return is_xa;
+ return is_xa;
}
int sr_dev_ioctl(struct cdrom_device_info *cdi,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
- int target;
-
- target = MINOR(cdi->dev);
-
- switch (cmd) {
- case BLKROSET:
- case BLKROGET:
- case BLKRASET:
- case BLKRAGET:
- case BLKFLSBUF:
- case BLKSSZGET:
- return blk_ioctl(cdi->dev, cmd, arg);
-
- default:
- return scsi_ioctl(scsi_CDs[target].device,cmd,(void *) arg);
- }
+ int target;
+
+ target = MINOR(cdi->dev);
+
+ switch (cmd) {
+ case BLKROSET:
+ case BLKROGET:
+ case BLKRASET:
+ case BLKRAGET:
+ case BLKFLSBUF:
+ case BLKSSZGET:
+ return blk_ioctl(cdi->dev, cmd, arg);
+
+ default:
+ return scsi_ioctl(scsi_CDs[target].device, cmd, (void *) arg);
+ }
}
/*
/* -*-linux-c-*-
- *
+
* vendor-specific code for SCSI CD-ROM's goes here.
*
* This is needed becauce most of the new features (multisession and
* - TOSHIBA: Detection and support of multisession CD's.
* Some XA-Sector tweaking, required for older drives.
*
- * - SONY: Detection and support of multisession CD's.
+ * - SONY: Detection and support of multisession CD's.
* added by Thomas Quinot <thomas@cuivre.freenix.fr>
*
* - PIONEER, HITACHI, PLEXTOR, MATSHITA, TEAC, PHILIPS: known to
* work with SONY (SCSI3 now) code.
*
- * - HP: Much like SONY, but a little different... (Thomas)
+ * - HP: Much like SONY, but a little different... (Thomas)
* HP-Writers only ??? Maybe other CD-Writers work with this too ?
- * HP 6020 writers now supported.
+ * HP 6020 writers now supported.
*/
#include <linux/config.h>
#include "sr.h"
#if 0
-# define DEBUG
+#define DEBUG
#endif
/* here are some constants to sort the vendors into groups */
-#define VENDOR_SCSI3 1 /* default: scsi-3 mmc */
+#define VENDOR_SCSI3 1 /* default: scsi-3 mmc */
#define VENDOR_NEC 2
#define VENDOR_TOSHIBA 3
-#define VENDOR_WRITER 4 /* pre-scsi3 writers */
+#define VENDOR_WRITER 4 /* pre-scsi3 writers */
#define VENDOR_ID (scsi_CDs[minor].vendor)
VENDOR_ID = VENDOR_SCSI3;
#else
char *vendor = scsi_CDs[minor].device->vendor;
- char *model = scsi_CDs[minor].device->model;
+ char *model = scsi_CDs[minor].device->model;
/* default */
VENDOR_ID = VENDOR_SCSI3;
if (scsi_CDs[minor].device->type == TYPE_WORM) {
VENDOR_ID = VENDOR_WRITER;
- } else if (!strncmp (vendor, "NEC", 3)) {
+ } else if (!strncmp(vendor, "NEC", 3)) {
VENDOR_ID = VENDOR_NEC;
- if (!strncmp (model,"CD-ROM DRIVE:25", 15) ||
- !strncmp (model,"CD-ROM DRIVE:36", 15) ||
- !strncmp (model,"CD-ROM DRIVE:83", 15) ||
- !strncmp (model,"CD-ROM DRIVE:84 ",16)
+ if (!strncmp(model, "CD-ROM DRIVE:25", 15) ||
+ !strncmp(model, "CD-ROM DRIVE:36", 15) ||
+ !strncmp(model, "CD-ROM DRIVE:83", 15) ||
+ !strncmp(model, "CD-ROM DRIVE:84 ", 16)
#if 0
- /* my NEC 3x returns the read-raw data if a read-raw
- is followed by a read for the same sector - aeb */
- || !strncmp (model,"CD-ROM DRIVE:500",16)
+ /* my NEC 3x returns the read-raw data if a read-raw
+ is followed by a read for the same sector - aeb */
+ || !strncmp(model, "CD-ROM DRIVE:500", 16)
#endif
- )
+ )
/* these can't handle multisession, may hang */
scsi_CDs[minor].cdi.mask |= CDC_MULTI_SESSION;
- } else if (!strncmp (vendor, "TOSHIBA", 7)) {
+ } else if (!strncmp(vendor, "TOSHIBA", 7)) {
VENDOR_ID = VENDOR_TOSHIBA;
-
+
}
#endif
}
int sr_set_blocklength(int minor, int blocklength)
{
- unsigned char *buffer; /* the buffer for the ioctl */
- unsigned char cmd[12]; /* the scsi-command */
- struct ccs_modesel_head *modesel;
- int rc,density = 0;
+ unsigned char *buffer; /* the buffer for the ioctl */
+ unsigned char cmd[12]; /* the scsi-command */
+ struct ccs_modesel_head *modesel;
+ int rc, density = 0;
#ifdef CONFIG_BLK_DEV_SR_VENDOR
if (VENDOR_ID == VENDOR_TOSHIBA)
#endif
buffer = (unsigned char *) scsi_malloc(512);
- if (!buffer) return -ENOMEM;
+ if (!buffer)
+ return -ENOMEM;
#ifdef DEBUG
- printk("sr%d: MODE SELECT 0x%x/%d\n",minor,density,blocklength);
+ printk("sr%d: MODE SELECT 0x%x/%d\n", minor, density, blocklength);
#endif
- memset(cmd,0,12);
+ memset(cmd, 0, 12);
cmd[0] = MODE_SELECT;
cmd[1] = (scsi_CDs[minor].device->lun << 5) | (1 << 4);
cmd[4] = 12;
- modesel = (struct ccs_modesel_head*)buffer;
- memset(modesel,0,sizeof(*modesel));
+ modesel = (struct ccs_modesel_head *) buffer;
+ memset(modesel, 0, sizeof(*modesel));
modesel->block_desc_length = 0x08;
- modesel->density = density;
- modesel->block_length_med = (blocklength >> 8 ) & 0xff;
- modesel->block_length_lo = blocklength & 0xff;
- if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0)))
- scsi_CDs[minor].sector_size = blocklength;
+ modesel->density = density;
+ modesel->block_length_med = (blocklength >> 8) & 0xff;
+ modesel->block_length_lo = blocklength & 0xff;
+ if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0))) {
+ scsi_CDs[minor].device->sector_size = blocklength;
+ }
#ifdef DEBUG
else
printk("sr%d: switching blocklength to %d bytes failed\n",
- minor,blocklength);
+ minor, blocklength);
#endif
scsi_free(buffer, 512);
return rc;
int sr_cd_check(struct cdrom_device_info *cdi)
{
- unsigned long sector;
- unsigned char *buffer; /* the buffer for the ioctl */
- unsigned char cmd[12]; /* the scsi-command */
- int rc,no_multi,minor;
+ unsigned long sector;
+ unsigned char *buffer; /* the buffer for the ioctl */
+ unsigned char cmd[12]; /* the scsi-command */
+ int rc, no_multi, minor;
minor = MINOR(cdi->dev);
if (scsi_CDs[minor].cdi.mask & CDC_MULTI_SESSION)
return 0;
-
- spin_lock_irq(&io_request_lock);
+
buffer = (unsigned char *) scsi_malloc(512);
- spin_unlock_irq(&io_request_lock);
- if(!buffer) return -ENOMEM;
-
- sector = 0; /* the multisession sector offset goes here */
- no_multi = 0; /* flag: the drive can't handle multisession */
- rc = 0;
-
- switch(VENDOR_ID) {
-
+ if (!buffer)
+ return -ENOMEM;
+
+ sector = 0; /* the multisession sector offset goes here */
+ no_multi = 0; /* flag: the drive can't handle multisession */
+ rc = 0;
+
+ switch (VENDOR_ID) {
+
case VENDOR_SCSI3:
- memset(cmd,0,12);
+ memset(cmd, 0, 12);
cmd[0] = READ_TOC;
cmd[1] = (scsi_CDs[minor].device->lun << 5);
cmd[8] = 12;
break;
if ((buffer[0] << 8) + buffer[1] < 0x0a) {
printk(KERN_INFO "sr%d: Hmm, seems the drive "
- "doesn't support multisession CD's\n",minor);
+ "doesn't support multisession CD's\n", minor);
no_multi = 1;
break;
}
sector = buffer[11] + (buffer[10] << 8) +
- (buffer[9] << 16) + (buffer[8] << 24);
+ (buffer[9] << 16) + (buffer[8] << 24);
if (buffer[6] <= 1) {
/* ignore sector offsets from first track */
sector = 0;
}
break;
-
+
#ifdef CONFIG_BLK_DEV_SR_VENDOR
- case VENDOR_NEC: {
- unsigned long min,sec,frame;
- memset(cmd,0,12);
- cmd[0] = 0xde;
- cmd[1] = (scsi_CDs[minor].device->lun << 5) | 0x03;
- cmd[2] = 0xb0;
- rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1);
- if (rc != 0)
- break;
- if (buffer[14] != 0 && buffer[14] != 0xb0) {
- printk(KERN_INFO "sr%d: Hmm, seems the cdrom "
- "doesn't support multisession CD's\n",minor);
- no_multi = 1;
+ case VENDOR_NEC:{
+ unsigned long min, sec, frame;
+ memset(cmd, 0, 12);
+ cmd[0] = 0xde;
+ cmd[1] = (scsi_CDs[minor].device->lun << 5) | 0x03;
+ cmd[2] = 0xb0;
+ rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1);
+ if (rc != 0)
+ break;
+ if (buffer[14] != 0 && buffer[14] != 0xb0) {
+ printk(KERN_INFO "sr%d: Hmm, seems the cdrom "
+ "doesn't support multisession CD's\n", minor);
+ no_multi = 1;
+ break;
+ }
+ min = BCD_TO_BIN(buffer[15]);
+ sec = BCD_TO_BIN(buffer[16]);
+ frame = BCD_TO_BIN(buffer[17]);
+ sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
break;
}
- min = BCD_TO_BIN(buffer[15]);
- sec = BCD_TO_BIN(buffer[16]);
- frame = BCD_TO_BIN(buffer[17]);
- sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame;
- break;
- }
- case VENDOR_TOSHIBA: {
- unsigned long min,sec,frame;
+ case VENDOR_TOSHIBA:{
+ unsigned long min, sec, frame;
- /* we request some disc information (is it a XA-CD ?,
- * where starts the last session ?) */
- memset(cmd,0,12);
- cmd[0] = 0xc7;
- cmd[1] = (scsi_CDs[minor].device->lun << 5) | 3;
- rc = sr_do_ioctl(minor, cmd, buffer, 4, 1);
- if (rc == -EINVAL) {
- printk(KERN_INFO "sr%d: Hmm, seems the drive "
- "doesn't support multisession CD's\n",minor);
- no_multi = 1;
+ /* we request some disc information (is it a XA-CD ?,
+ * where starts the last session ?) */
+ memset(cmd, 0, 12);
+ cmd[0] = 0xc7;
+ cmd[1] = (scsi_CDs[minor].device->lun << 5) | 3;
+ rc = sr_do_ioctl(minor, cmd, buffer, 4, 1);
+ if (rc == -EINVAL) {
+ printk(KERN_INFO "sr%d: Hmm, seems the drive "
+ "doesn't support multisession CD's\n", minor);
+ no_multi = 1;
+ break;
+ }
+ if (rc != 0)
+ break;
+ min = BCD_TO_BIN(buffer[1]);
+ sec = BCD_TO_BIN(buffer[2]);
+ frame = BCD_TO_BIN(buffer[3]);
+ sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
+ if (sector)
+ sector -= CD_MSF_OFFSET;
+ sr_set_blocklength(minor, 2048);
break;
}
- if (rc != 0)
- break;
- min = BCD_TO_BIN(buffer[1]);
- sec = BCD_TO_BIN(buffer[2]);
- frame = BCD_TO_BIN(buffer[3]);
- sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame;
- if (sector)
- sector -= CD_MSF_OFFSET;
- sr_set_blocklength(minor,2048);
- break;
- }
case VENDOR_WRITER:
- memset(cmd,0,12);
+ memset(cmd, 0, 12);
cmd[0] = READ_TOC;
cmd[1] = (scsi_CDs[minor].device->lun << 5);
cmd[8] = 0x04;
break;
}
if ((rc = buffer[2]) == 0) {
- printk (KERN_WARNING
- "sr%d: No finished session\n",minor);
+ printk(KERN_WARNING
+ "sr%d: No finished session\n", minor);
break;
}
-
- cmd[0] = READ_TOC; /* Read TOC */
+ cmd[0] = READ_TOC; /* Read TOC */
cmd[1] = (scsi_CDs[minor].device->lun << 5);
- cmd[6] = rc & 0x7f; /* number of last session */
+ cmd[6] = rc & 0x7f; /* number of last session */
cmd[8] = 0x0c;
cmd[9] = 0x40;
- rc = sr_do_ioctl(minor, cmd, buffer, 12, 1);
+ rc = sr_do_ioctl(minor, cmd, buffer, 12, 1);
if (rc != 0) {
break;
}
-
sector = buffer[11] + (buffer[10] << 8) +
- (buffer[9] << 16) + (buffer[8] << 24);
+ (buffer[9] << 16) + (buffer[8] << 24);
break;
-#endif /* CONFIG_BLK_DEV_SR_VENDOR */
+#endif /* CONFIG_BLK_DEV_SR_VENDOR */
default:
/* should not happen */
printk(KERN_WARNING
- "sr%d: unknown vendor code (%i), not initialized ?\n",
- minor,VENDOR_ID);
+ "sr%d: unknown vendor code (%i), not initialized ?\n",
+ minor, VENDOR_ID);
sector = 0;
no_multi = 1;
break;
scsi_CDs[minor].xa_flag = 0;
if (CDS_AUDIO != sr_disk_status(cdi) && 1 == sr_is_xa(minor))
scsi_CDs[minor].xa_flag = 1;
-
- if (2048 != scsi_CDs[minor].sector_size)
- sr_set_blocklength(minor,2048);
+
+ if (2048 != scsi_CDs[minor].device->sector_size) {
+ sr_set_blocklength(minor, 2048);
+ }
if (no_multi)
cdi->mask |= CDC_MULTI_SESSION;
#ifdef DEBUG
if (sector)
printk(KERN_DEBUG "sr%d: multisession offset=%lu\n",
- minor,sector);
+ minor, sector);
#endif
scsi_free(buffer, 512);
return rc;
st_do_scsi(Scsi_Cmnd * SCpnt, Scsi_Tape * STp, unsigned char *cmd, int bytes,
int timeout, int retries, int do_wait)
{
- unsigned long flags;
unsigned char *bp;
- spin_lock_irqsave(&io_request_lock, flags);
if (SCpnt == NULL)
- if ((SCpnt = scsi_allocate_device(NULL, STp->device, 1)) == NULL) {
+ SCpnt = scsi_allocate_device(STp->device, 1);
+ if (SCpnt == NULL) {
printk(KERN_ERR "st%d: Can't get SCSI request.\n",
TAPE_NR(STp->devt));
- spin_unlock_irqrestore(&io_request_lock, flags);
return NULL;
}
scsi_do_cmd(SCpnt, (void *) cmd, bp, bytes,
st_sleep_done, timeout, retries);
- spin_unlock_irqrestore(&io_request_lock, flags);
if (do_wait) {
down(SCpnt->request.sem);
}
else {
unsigned long flags;
- sh[j]->wish_block = TRUE;
+//FIXME// sh[j]->wish_block = TRUE;
sh[j]->unchecked_isa_dma = TRUE;
flags=claim_dma_lock();
if (mad16 == 0 && trix == 0 && pas2 == 0 && support == 0)
{
#ifdef CONFIG_ISAPNP
- if (sb_probe_isapnp(&config, &config_mpu)<0)
+ if (isapnp == 1 && sb_probe_isapnp(&config, &config_mpu)<0)
{
printk(KERN_ERR "sb_card: No ISAPnP cards found\n");
return -EINVAL;
}
+ else
+ {
+#endif
+ if (io == -1 || dma == -1 || irq == -1)
+ {
+ printk(KERN_ERR "sb_card: I/O, IRQ, and DMA are mandatory\n");
+ return -EINVAL;
+ }
+ config.io_base = io;
+ config.irq = irq;
+ config.dma = dma;
+ config.dma2 = dma16;
+ config.card_subtype = type;
+#ifdef CONFIG_ISAPNP
+ }
#endif
- }
- if (io == -1 || dma == -1 || irq == -1)
- {
- printk(KERN_ERR "sb_card: I/O, IRQ, and DMA are mandatory\n");
- return -EINVAL;
- }
- config.io_base = io;
- config.irq = irq;
- config.dma = dma;
- config.dma2 = dma16;
- config.card_subtype = type;
-
- if (!probe_sb(&config))
- return -ENODEV;
- attach_sb_card(&config);
+ if (!probe_sb(&config))
+ return -ENODEV;
+ attach_sb_card(&config);
- if(config.slots[0]==-1)
- return -ENODEV;
+ if(config.slots[0]==-1)
+ return -ENODEV;
#ifdef CONFIG_MIDI
- if (isapnp == 0)
- config_mpu.io_base = mpu_io;
- if (probe_sbmpu(&config_mpu))
- sbmpu = 1;
- if (sbmpu)
- attach_sbmpu(&config_mpu);
+ if (isapnp == 0)
+ config_mpu.io_base = mpu_io;
+ if (probe_sbmpu(&config_mpu))
+ sbmpu = 1;
+ if (sbmpu)
+ attach_sbmpu(&config_mpu);
#endif
+ }
SOUND_LOCK;
return 0;
}
#include <asm/uaccess.h>
#include <asm/io.h>
+#include <video/fbcon.h>
static int currcon = 0;
fi
tristate 'NFS server support' CONFIG_NFSD
if [ "$CONFIG_NFSD" != "n" ]; then
- bool ' Emulate SUN NFS server' CONFIG_NFSD_SUN
- fi
- if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
- bool ' Provide NFSv3 server support (EXPERIMENTAL)' CONFIG_NFSD_V3
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ bool ' Provide NFSv3 server support (EXPERIMENTAL)' CONFIG_NFSD_V3
+ fi
fi
if [ "$CONFIG_NFS_FS" = "y" -o "$CONFIG_NFSD" = "y" ]; then
*
* ------------------------------------------------------------------------- */
-#include <linux/string.h>
-#include <linux/sched.h>
#include "autofs_i.h"
static int autofs_readlink(struct dentry *dentry, char *buffer, int buflen)
{
- struct autofs_symlink *sl;
- int len;
-
- sl = (struct autofs_symlink *)dentry->d_inode->u.generic_ip;
- len = sl->len;
- if (len > buflen) len = buflen;
- copy_to_user(buffer, sl->data, len);
- return len;
+ char *s=((struct autofs_symlink *)dentry->d_inode->u.generic_ip)->data;
+ return vfs_readlink(dentry, buffer, buflen, s);
}
-static struct dentry * autofs_follow_link(struct dentry *dentry,
- struct dentry *base,
- unsigned int follow)
+static struct dentry *autofs_follow_link(struct dentry *dentry, struct dentry *base, unsigned flags)
{
- struct autofs_symlink *sl;
-
- sl = (struct autofs_symlink *)dentry->d_inode->u.generic_ip;
- return lookup_dentry(sl->data, base, follow);
+ char *s=((struct autofs_symlink *)dentry->d_inode->u.generic_ip)->data;
+ return vfs_follow_link(dentry, base, flags, s);
}
struct inode_operations autofs_symlink_inode_operations = {
- NULL, /* file operations */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- autofs_readlink, /* readlink */
- autofs_follow_link, /* follow_link */
- NULL, /* get_block */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* truncate */
- NULL, /* permission */
- NULL /* revalidate */
+ readlink: autofs_readlink,
+ follow_link: autofs_follow_link
};
return err;
}
-int block_write_range(struct dentry *dentry, struct page *page,
+int block_write_zero_range(struct inode *inode, struct page *page,
unsigned zerofrom, unsigned from, unsigned to,
const char * buf)
{
- struct inode *inode = dentry->d_inode;
unsigned zeroto = 0, block_start, block_end;
unsigned long block;
int err = 0, partial = 0, need_balance_dirty = 0;
int block_write_partial_page(struct file *file, struct page *page, unsigned long offset, unsigned long bytes, const char * buf)
{
- struct dentry *dentry = file->f_dentry;
+ struct inode *inode = file->f_dentry->d_inode;
int err;
if (!PageLocked(page))
if (bytes+offset < 0 || bytes+offset > PAGE_SIZE)
BUG();
- err = block_write_range(dentry, page, offset,offset,offset+bytes, buf);
+ err = block_write_range(inode, page, offset, bytes, buf);
return err ? err : bytes;
}
int block_write_cont_page(struct file *file, struct page *page, unsigned long offset, unsigned long bytes, const char * buf)
{
- struct dentry *dentry = file->f_dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file->f_dentry->d_inode;
int err;
unsigned zerofrom = offset;
else if (page->index == (inode->i_size >> PAGE_CACHE_SHIFT) &&
offset > (inode->i_size & ~PAGE_CACHE_MASK))
zerofrom = inode->i_size & ~PAGE_CACHE_MASK;
- err = block_write_range(dentry, page, zerofrom,offset,offset+bytes,buf);
+ err = block_write_zero_range(inode, page, zerofrom,offset,offset+bytes,
+ buf);
return err ? err : bytes;
}
* mark_buffer_uptodate() functions propagate buffer state into the
* page struct once IO has completed.
*/
-int block_read_full_page(struct dentry * dentry, struct page * page)
+static inline int __block_read_full_page(struct inode *inode, struct page *page)
{
- struct inode *inode = dentry->d_inode;
unsigned long iblock;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
unsigned int blocksize, blocks;
return 0;
}
+int block_read_full_page(struct dentry *dentry, struct page *page)
+{
+ return __block_read_full_page(dentry->d_inode, page);
+}
+
+int block_symlink(struct inode *inode, const char *symname, int len)
+{
+ struct page *page = grab_cache_page(&inode->i_data, 0);
+ mm_segment_t fs;
+ int err = -ENOMEM;
+
+ if (!page)
+ goto fail;
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = block_write_range(inode, page, 0, len-1, symname);
+ set_fs(fs);
+ inode->i_size = len-1;
+ if (err)
+ goto fail_write;
+ /*
+ * Notice that we are _not_ going to block here - end of page is
+ * unmapped, so this will only try to map the rest of page, see
+ * that it is unmapped (typically even will not look into inode -
+ * ->i_size will be enough for everything) and zero it out.
+ * OTOH it's obviously correct and should make the page up-to-date.
+ */
+ err = __block_read_full_page(inode, page);
+ wait_on_page(page);
+ page_cache_release(page);
+ if (err < 0)
+ goto fail;
+ mark_inode_dirty(inode);
+ return 0;
+fail_write:
+ UnlockPage(page);
+ page_cache_release(page);
+fail:
+ return err;
+}
+
/*
* Try to increase the number of buffers available: the size argument
* is used to determine what kind of buffers we want.
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/locks.h>
-#include <asm/segment.h>
-#include <asm/uaccess.h>
-#include <linux/string.h>
#include <linux/coda.h>
#include <linux/coda_linux.h>
#include <linux/coda_cache.h>
#include <linux/coda_proc.h>
-static int coda_readlink(struct dentry *de, char *buffer, int length);
-static struct dentry *coda_follow_link(struct dentry *, struct dentry *,
- unsigned int);
-
-struct inode_operations coda_symlink_inode_operations = {
- NULL, /* no file-operations */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- coda_readlink, /* readlink */
- coda_follow_link, /* follow_link */
- NULL, /* get_block */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* truncate */
- NULL, /* permission */
- NULL /* revalidate */
-};
-
-static int coda_readlink(struct dentry *de, char *buffer, int length)
+static int coda_symlink_filler(struct dentry *dentry, struct page *page)
{
- struct inode *inode = de->d_inode;
- int len;
- int error;
- char *buf;
- struct coda_inode_info *cp;
- ENTRY;
-
- cp = ITOC(inode);
- coda_vfs_stat.readlink++;
-
- /* the maximum length we receive is len */
- if ( length > CODA_MAXPATHLEN )
- len = CODA_MAXPATHLEN;
- else
- len = length;
- CODA_ALLOC(buf, char *, len);
- if ( !buf )
- return -ENOMEM;
-
- error = venus_readlink(inode->i_sb, &(cp->c_fid), buf, &len);
-
- CDEBUG(D_INODE, "result %s\n", buf);
- if (! error) {
- copy_to_user(buffer, buf, len);
- put_user('\0', buffer + len);
- error = len;
- }
- if ( buf )
- CODA_FREE(buf, len);
- return error;
-}
-
-static struct dentry *coda_follow_link(struct dentry *de, struct dentry *base,
- unsigned int follow)
-{
- struct inode *inode = de->d_inode;
+ struct inode *inode = dentry->d_inode;
int error;
struct coda_inode_info *cnp;
- unsigned int len;
- char mem[CODA_MAXPATHLEN];
- char *path;
- ENTRY;
- CDEBUG(D_INODE, "(%x/%ld)\n", inode->i_dev, inode->i_ino);
-
+ unsigned int len = PAGE_SIZE;
+ char *p = (char*)kmap(page);
+
cnp = ITOC(inode);
coda_vfs_stat.follow_link++;
- len = CODA_MAXPATHLEN;
- error = venus_readlink(inode->i_sb, &(cnp->c_fid), mem, &len);
-
- if (error) {
- dput(base);
- return ERR_PTR(error);
- }
- len = strlen(mem);
- path = kmalloc(len + 1, GFP_KERNEL);
- if (!path) {
- dput(base);
- return ERR_PTR(-ENOMEM);
- }
- memcpy(path, mem, len);
- path[len] = 0;
-
- base = lookup_dentry(path, base, follow);
- kfree(path);
- return base;
+ error = venus_readlink(inode->i_sb, &(cnp->c_fid), p, &len);
+ if (error)
+ goto fail;
+ SetPageUptodate(page);
+ kunmap(page);
+ UnlockPage(page);
+ return 0;
+
+fail:
+ SetPageError(page);
+ kunmap(page);
+ UnlockPage(page);
+ return error;
}
+
+struct inode_operations coda_symlink_inode_operations = {
+ readlink: page_readlink,
+ follow_link: page_follow_link,
+ readpage: coda_symlink_filler
+};
else if (S_ISDIR(inode->i_mode))
inode->i_op = &ext2_dir_inode_operations;
else if (S_ISLNK(inode->i_mode))
- inode->i_op = &ext2_symlink_inode_operations;
+ inode->i_op = inode->i_blocks
+ ?&ext2_symlink_inode_operations
+ :&ext2_fast_symlink_inode_operations;
else
init_special_inode(inode, inode->i_mode,
le32_to_cpu(raw_inode->i_block[0]));
int ext2_symlink (struct inode * dir, struct dentry *dentry, const char * symname)
{
- struct ext2_dir_entry_2 * de;
struct inode * inode;
- struct buffer_head * bh = NULL, * name_block = NULL;
- char * link;
- int i, l, err = -EIO;
- char c;
+ struct ext2_dir_entry_2 * de;
+ struct buffer_head * bh = NULL;
+ int l, err;
- if (!(inode = ext2_new_inode (dir, S_IFLNK, &err))) {
- return err;
- }
- inode->i_mode = S_IFLNK | S_IRWXUGO;
- inode->i_op = &ext2_symlink_inode_operations;
- for (l = 0; l < inode->i_sb->s_blocksize - 1 &&
- symname [l]; l++)
- ;
- if (l >= sizeof (inode->u.ext2_i.i_data)) {
-
- ext2_debug ("l=%d, normal symlink\n", l);
-
- name_block = ext2_bread (inode, 0, 1, &err);
- if (!name_block) {
- inode->i_nlink--;
- mark_inode_dirty(inode);
- iput (inode);
- return err;
- }
- link = name_block->b_data;
- } else {
- link = (char *) inode->u.ext2_i.i_data;
+ err = -ENAMETOOLONG;
+ l = strlen(symname)+1;
+ if (l > dir->i_sb->s_blocksize)
+ goto out;
- ext2_debug ("l=%d, fast symlink\n", l);
+ err = -EIO;
+ if (!(inode = ext2_new_inode (dir, S_IFLNK, &err)))
+ goto out;
+ inode->i_mode = S_IFLNK | S_IRWXUGO;
+
+ if (l > sizeof (inode->u.ext2_i.i_data)) {
+ inode->i_op = &ext2_symlink_inode_operations;
+ err = block_symlink(inode, symname, l);
+ if (err)
+ goto out_no_entry;
+ } else {
+ inode->i_op = &ext2_fast_symlink_inode_operations;
+ memcpy((char*)&inode->u.ext2_i.i_data,symname,l);
+ inode->i_size = l-1;
}
- i = 0;
- while (i < inode->i_sb->s_blocksize - 1 && (c = *(symname++)))
- link[i++] = c;
- link[i] = 0;
- if (name_block) {
- mark_buffer_dirty(name_block, 1);
- brelse (name_block);
- }
- inode->i_size = i;
mark_inode_dirty(inode);
bh = ext2_add_entry (dir, dentry->d_name.name, dentry->d_name.len, &de, &err);
*/
#include <linux/fs.h>
-#include <asm/uaccess.h>
+#include <linux/ext2_fs.h>
-
-
-static int ext2_readlink (struct dentry *, char *, int);
-static struct dentry *ext2_follow_link(struct dentry *, struct dentry *, unsigned int);
-
-/*
- * symlinks can't do much...
- */
-struct inode_operations ext2_symlink_inode_operations = {
- NULL, /* no file-operations */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- ext2_readlink, /* readlink */
- ext2_follow_link, /* follow_link */
- NULL, /* get_block */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* truncate */
- NULL, /* permission */
- NULL /* revalidate */
-};
-
-static struct dentry * ext2_follow_link(struct dentry * dentry,
- struct dentry *base,
- unsigned int follow)
+static int ext2_readlink(struct dentry *dentry, char *buffer, int buflen)
{
- struct inode *inode = dentry->d_inode;
- struct buffer_head * bh = NULL;
- int error;
- char * link;
-
- link = (char *) inode->u.ext2_i.i_data;
- if (inode->i_blocks) {
- if (!(bh = ext2_bread (inode, 0, 0, &error))) {
- dput(base);
- return ERR_PTR(-EIO);
- }
- link = bh->b_data;
- }
- UPDATE_ATIME(inode);
- base = lookup_dentry(link, base, follow);
- if (bh)
- brelse(bh);
- return base;
+ char *s = (char *)dentry->d_inode->u.ext2_i.i_data;
+ return vfs_readlink(dentry, buffer, buflen, s);
}
-static int ext2_readlink (struct dentry * dentry, char * buffer, int buflen)
+static struct dentry *ext2_follow_link(struct dentry *dentry, struct dentry *base, unsigned flags)
{
- struct inode *inode = dentry->d_inode;
- struct buffer_head * bh = NULL;
- char * link;
- int i;
-
- if (buflen > inode->i_sb->s_blocksize - 1)
- buflen = inode->i_sb->s_blocksize - 1;
+ char *s = (char *)dentry->d_inode->u.ext2_i.i_data;
+ return vfs_follow_link(dentry, base, flags, s);
+}
- link = (char *) inode->u.ext2_i.i_data;
- if (inode->i_blocks) {
- int err;
- bh = ext2_bread (inode, 0, 0, &err);
- if (!bh) {
- if(err < 0) /* indicate type of error */
- return err;
- return 0;
- }
- link = bh->b_data;
- }
+struct inode_operations ext2_fast_symlink_inode_operations = {
+ readlink: ext2_readlink,
+ follow_link: ext2_follow_link,
+};
- i = 0;
- while (i < buflen && link[i])
- i++;
- if (copy_to_user(buffer, link, i))
- i = -EFAULT;
- if (bh)
- brelse (bh);
- return i;
-}
+struct inode_operations ext2_symlink_inode_operations = {
+ readlink: page_readlink,
+ follow_link: page_follow_link,
+ get_block: ext2_get_block,
+ readpage: block_read_full_page,
+};
case F_SETLKW:
err = fcntl_setlk(fd, cmd, (struct flock *) arg);
break;
+#if BIT_PER_LONG == 32 /* LFS only on 32 bit platforms */
+ case F_GETLK64:
+ err = fcntl_getlk64(fd, (struct flock64 *) arg);
+ break;
+ case F_SETLK64:
+ err = fcntl_setlk64(fd, cmd, (struct flock64 *) arg);
+ break;
+ case F_SETLKW64:
+ err = fcntl_setlk64(fd, cmd, (struct flock64 *) arg);
+ break;
+#endif
case F_GETOWN:
/*
* XXX If f_owner is a process group, the
struct nlm_block **head, *block;
struct file_lock *fl;
- dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %ld-%ld ty=%d\n",
+ dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
file, lock->fl.fl_pid, lock->fl.fl_start,
lock->fl.fl_end, lock->fl.fl_type);
for (head = &nlm_blocked; (block = *head); head = &block->b_next) {
fl = &block->b_call.a_args.lock.fl;
- dprintk(" check f=%p pd=%d %ld-%ld ty=%d\n",
+ dprintk(" check f=%p pd=%d %Ld-%Ld ty=%d\n",
block->b_file, fl->fl_pid, fl->fl_start,
fl->fl_end, fl->fl_type);
if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
struct nlm_block *block;
int error;
- dprintk("lockd: nlmsvc_lock(%04x/%ld, ty=%d, pi=%d, %ld-%ld, bl=%d)\n",
+ dprintk("lockd: nlmsvc_lock(%04x/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_type, lock->fl.fl_pid,
{
struct file_lock *fl;
- dprintk("lockd: nlmsvc_testlock(%04x/%ld, ty=%d, %ld-%ld)\n",
+ dprintk("lockd: nlmsvc_testlock(%04x/%ld, ty=%d, %Ld-%Ld)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_type,
lock->fl.fl_end);
if ((fl = posix_test_lock(&file->f_file, &lock->fl)) != NULL) {
- dprintk("lockd: conflicting lock(ty=%d, %ld-%ld)\n",
+ dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
fl->fl_type, fl->fl_start, fl->fl_end);
conflock->caller = "somehost"; /* FIXME */
conflock->oh.len = 0; /* don't return OH info */
{
int error;
- dprintk("lockd: nlmsvc_unlock(%04x/%ld, pi=%d, %ld-%ld)\n",
+ dprintk("lockd: nlmsvc_unlock(%04x/%ld, pi=%d, %Ld-%Ld)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_pid,
{
struct nlm_block *block;
- dprintk("lockd: nlmsvc_cancel(%04x/%ld, pi=%d, %ld-%ld)\n",
+ dprintk("lockd: nlmsvc_cancel(%04x/%ld, pi=%d, %Ld-%Ld)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_pid,
fl->fl_pid = ntohl(*p++);
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
- fl->fl_start = ntohl(*p++);
+ fl->fl_start = (u_long)ntohl(*p++); // Up to 4G-1
len = ntohl(*p++);
if (len == 0 || (fl->fl_end = fl->fl_start + len - 1) < 0)
fl->fl_end = NLM_OFFSET_MAX;
return NULL;
*p++ = htonl(fl->fl_pid);
- *p++ = htonl(lock->fl.fl_start);
+ *p++ = htonl((u_long)lock->fl.fl_start);
if (lock->fl.fl_end == NLM_OFFSET_MAX)
*p++ = xdr_zero;
else
- *p++ = htonl(lock->fl.fl_end - lock->fl.fl_start + 1);
+ *p++ = htonl((u_long)(lock->fl.fl_end - lock->fl.fl_start + 1));
return p;
}
if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
return 0;
- *p++ = htonl(fl->fl_start);
+ *p++ = htonl((u_long)fl->fl_start);
if (fl->fl_end == NLM_OFFSET_MAX)
*p++ = xdr_zero;
else
- *p++ = htonl(fl->fl_end - fl->fl_start + 1);
+ *p++ = htonl((u_long)(fl->fl_end - fl->fl_start + 1));
}
return p;
fl->fl_flags = FL_POSIX;
fl->fl_type = excl? F_WRLCK : F_RDLCK;
- fl->fl_start = ntohl(*p++);
+ fl->fl_start = (u_long)ntohl(*p++);
len = ntohl(*p++);
if (len == 0 || (fl->fl_end = fl->fl_start + len - 1) < 0)
fl->fl_end = NLM_OFFSET_MAX;
#include <asm/uaccess.h>
-#define OFFSET_MAX ((off_t)LONG_MAX) /* FIXME: move elsewhere? */
+#define OFFSET_MAX (~(loff_t)0ULL >> 1) /* FIXME: move elsewhere? */
static int flock_make_lock(struct file *filp, struct file_lock *fl,
unsigned int cmd);
static int posix_make_lock(struct file *filp, struct file_lock *fl,
- struct flock *l);
+ struct flock64 *l);
static int flock_locks_conflict(struct file_lock *caller_fl,
struct file_lock *sys_fl);
static int posix_locks_conflict(struct file_lock *caller_fl,
if (waiter->fl_prevblock) {
printk(KERN_ERR "locks_insert_block: remove duplicated lock "
- "(pid=%d %ld-%ld type=%d)\n",
+ "(pid=%d %Ld-%Ld type=%d)\n",
waiter->fl_pid, waiter->fl_start,
waiter->fl_end, waiter->fl_type);
locks_delete_block(waiter->fl_prevblock, waiter);
/* Report the first existing lock that would conflict with l.
* This implements the F_GETLK command of fcntl().
*/
-int fcntl_getlk(unsigned int fd, struct flock *l)
+static int do_fcntl_getlk(unsigned int fd, struct flock64 *flock)
{
struct file *filp;
struct file_lock *fl,file_lock;
- struct flock flock;
int error;
- error = -EFAULT;
- if (copy_from_user(&flock, l, sizeof(flock)))
- goto out;
error = -EINVAL;
- if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
+ if ((flock->l_type != F_RDLCK) && (flock->l_type != F_WRLCK))
goto out;
error = -EBADF;
if (!filp->f_dentry || !filp->f_dentry->d_inode)
goto out_putf;
- if (!posix_make_lock(filp, &file_lock, &flock))
+ if (!posix_make_lock(filp, &file_lock, flock))
goto out_putf;
if (filp->f_op->lock) {
fl = posix_test_lock(filp, &file_lock);
}
- flock.l_type = F_UNLCK;
+ flock->l_type = F_UNLCK;
if (fl != NULL) {
- flock.l_pid = fl->fl_pid;
- flock.l_start = fl->fl_start;
- flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
+ flock->l_pid = fl->fl_pid;
+ flock->l_start = fl->fl_start;
+ flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
fl->fl_end - fl->fl_start + 1;
- flock.l_whence = 0;
- flock.l_type = fl->fl_type;
+ flock->l_whence = 0;
+ flock->l_type = fl->fl_type;
}
- error = -EFAULT;
- if (!copy_to_user(l, &flock, sizeof(flock)))
- error = 0;
out_putf:
fput(filp);
/* Apply the lock described by l to an open file descriptor.
* This implements both the F_SETLK and F_SETLKW commands of fcntl().
*/
-int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
+static int do_fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock64 *flock)
{
struct file *filp;
struct file_lock file_lock;
- struct flock flock;
struct dentry * dentry;
struct inode *inode;
int error;
- /*
- * This might block, so we do it before checking the inode.
- */
- error = -EFAULT;
- if (copy_from_user(&flock, l, sizeof(flock)))
- goto out;
-
/* Get arguments and validate them ...
*/
}
error = -EINVAL;
- if (!posix_make_lock(filp, &file_lock, &flock))
+ if (!posix_make_lock(filp, &file_lock, flock))
goto out_putf;
error = -EBADF;
- switch (flock.l_type) {
+ switch (flock->l_type) {
case F_RDLCK:
if (!(filp->f_mode & FMODE_READ))
goto out_putf;
return error;
}
+int fcntl_getlk(unsigned int fd, struct flock *l)
+{
+ struct flock flock;
+ struct flock64 fl64;
+ int error;
+
+ error = -EFAULT;
+ if (copy_from_user(&flock, l, sizeof(flock)))
+ goto out;
+
+ /* Convert to 64-bit offsets for internal use */
+ fl64.l_type = flock.l_type;
+ fl64.l_whence = flock.l_whence;
+ fl64.l_start = (unsigned long)flock.l_start;
+ fl64.l_len = (unsigned long)flock.l_len;
+ fl64.l_pid = flock.l_pid;
+
+ error = do_fcntl_getlk(fd, &fl64);
+ if (error)
+ goto out;
+
+ /* and back again... */
+ flock.l_type = fl64.l_type;
+ flock.l_whence = fl64.l_whence;
+ flock.l_start = (unsigned long)fl64.l_start;
+ flock.l_len = (unsigned long)fl64.l_len;
+ flock.l_pid = fl64.l_pid;
+
+ if (copy_to_user(l, &flock, sizeof(flock)))
+ error = -EFAULT;
+out:
+ return error;
+}
+
+int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
+{
+ struct flock flock;
+ struct flock64 fl64;
+ int error;
+
+ error = -EFAULT;
+ if (copy_from_user(&flock, l, sizeof(flock)))
+ goto out;
+
+ /* Convert to 64-bit offsets for internal use */
+ fl64.l_type = flock.l_type;
+ fl64.l_whence = flock.l_whence;
+ fl64.l_start = (unsigned long)flock.l_start;
+ fl64.l_len = (unsigned long)flock.l_len;
+ fl64.l_pid = flock.l_pid;
+
+ error = do_fcntl_setlk(fd, cmd, &fl64);
+out:
+ return error;
+}
+
+#if BITS_PER_LONG == 32 /* LFS versions for 32 bit platforms */
+int fcntl_getlk64(unsigned int fd, struct flock64 *l)
+{
+ struct flock64 fl64;
+ int error;
+
+ error = -EFAULT;
+ if (copy_from_user(&fl64, l, sizeof(fl64)))
+ goto out;
+
+ error = do_fcntl_getlk(fd, &fl64);
+
+ if (!error && copy_to_user(l, &fl64, sizeof(fl64)))
+ error = -EFAULT;
+out:
+ return error;
+}
+
+int fcntl_setlk64(unsigned int fd, unsigned int cmd, struct flock64 *l)
+{
+ struct flock64 fl64;
+ int error;
+
+ error = -EFAULT;
+ if (copy_from_user(&fl64, l, sizeof(fl64)))
+ goto out;
+
+ error = do_fcntl_setlk(fd, cmd, &fl64);
+out:
+ return error;
+}
+#endif
+
/*
* This function is called when the file is being removed
* from the task's fd array.
* style lock.
*/
static int posix_make_lock(struct file *filp, struct file_lock *fl,
- struct flock *l)
+ struct flock64 *l)
{
off_t start;
p += sprintf(p, "FLOCK ADVISORY ");
}
p += sprintf(p, "%s ", (fl->fl_type == F_RDLCK) ? "READ " : "WRITE");
- p += sprintf(p, "%d %s:%ld %ld %ld ",
+ p += sprintf(p, "%d %s:%ld %Ld %Ld ",
fl->fl_pid,
kdevname(inode->i_dev), inode->i_ino, fl->fl_start,
fl->fl_end);
{
struct minix_dir_entry * de;
struct inode * inode = NULL;
- struct buffer_head * bh = NULL, * name_block = NULL;
+ struct buffer_head * bh = NULL;
int i;
- char c;
-
- inode = minix_new_inode(dir, &i);
- if (i)
- return i;
+ int err;
+
+ err = -ENAMETOOLONG;
+ i = strlen(symname)+1;
+ if (i>1024)
+ goto out;
+ inode = minix_new_inode(dir, &err);
+ if (err)
+ goto out;
+ err = -ENOSPC;
if (!inode)
- return -ENOSPC;
+ goto out;
inode->i_mode = S_IFLNK | 0777;
inode->i_op = &minix_symlink_inode_operations;
- name_block = minix_bread(inode,0,1);
- if (!name_block) {
- inode->i_nlink--;
- mark_inode_dirty(inode);
- iput(inode);
- return -ENOSPC;
- }
- i = 0;
- while (i < 1023 && (c=*(symname++)))
- name_block->b_data[i++] = c;
- name_block->b_data[i] = 0;
- mark_buffer_dirty(name_block, 1);
- brelse(name_block);
- inode->i_size = i;
- mark_inode_dirty(inode);
- i = minix_add_entry(dir, dentry->d_name.name,
+ err = block_symlink(inode, symname, i);
+ if (err)
+ goto fail;
+
+ err = minix_add_entry(dir, dentry->d_name.name,
dentry->d_name.len, &bh, &de);
- if (i) {
- inode->i_nlink--;
- mark_inode_dirty(inode);
- iput(inode);
- return i;
- }
+ if (err)
+ goto fail;
+
de->inode = inode->i_ino;
mark_buffer_dirty(bh, 1);
brelse(bh);
d_instantiate(dentry, inode);
- return 0;
+out:
+ return err;
+fail:
+ inode->i_nlink--;
+ mark_inode_dirty(inode);
+ iput(inode);
+ goto out;
}
int minix_link(struct dentry * old_dentry, struct inode * dir,
* Copyright (C) 1991, 1992 Linus Torvalds
*
* minix symlink handling code
+ *
+ * Code removed. 1999, AV ;-)
*/
-#include <linux/errno.h>
-#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/minix_fs.h>
-#include <linux/stat.h>
-
-#include <asm/uaccess.h>
-
-static int minix_readlink(struct dentry *, char *, int);
-static struct dentry *minix_follow_link(struct dentry *, struct dentry *, unsigned int);
/*
* symlinks can't do much...
*/
struct inode_operations minix_symlink_inode_operations = {
- NULL, /* no file-operations */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- minix_readlink, /* readlink */
- minix_follow_link, /* follow_link */
- NULL, /* get_block */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* truncate */
- NULL, /* permission */
- NULL /* revalidate */
+ readlink: page_readlink,
+ follow_link: page_follow_link,
+ get_block: minix_get_block,
+ readpage: block_read_full_page
};
-
-static struct dentry * minix_follow_link(struct dentry * dentry,
- struct dentry * base,
- unsigned int follow)
-{
- struct inode *inode = dentry->d_inode;
- struct buffer_head * bh;
-
- bh = minix_bread(inode, 0, 0);
- if (!bh) {
- dput(base);
- return ERR_PTR(-EIO);
- }
- UPDATE_ATIME(inode);
- base = lookup_dentry(bh->b_data, base, follow);
- brelse(bh);
- return base;
-}
-
-static int minix_readlink(struct dentry * dentry, char * buffer, int buflen)
-{
- struct buffer_head * bh;
- int i;
- char c;
-
- if (buflen > 1023)
- buflen = 1023;
- bh = minix_bread(dentry->d_inode, 0, 0);
- if (!bh)
- return 0;
- i = 0;
- while (i<buflen && (c = bh->b_data[i])) {
- i++;
- put_user(c,buffer++);
- }
- brelse(bh);
- return i;
-}
#include <linux/proc_fs.h>
#include <linux/smp_lock.h>
#include <linux/quotaops.h>
+#include <linux/pagemap.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
unlock_kernel();
return error;
}
+
+int vfs_readlink(struct dentry *dentry, char *buffer, int buflen, char *link)
+{
+ u32 len;
+
+ len = PTR_ERR(link);
+ if (IS_ERR(link))
+ goto out;
+
+ len = strlen(link);
+ if (len > buflen)
+ len = buflen;
+ copy_to_user(buffer, link, len);
+out:
+ return len;
+}
+
+struct dentry *
+vfs_follow_link(struct dentry *dentry, struct dentry *base,
+unsigned int follow, char *link)
+{
+ struct dentry *result;
+ UPDATE_ATIME(dentry->d_inode);
+
+ if (IS_ERR(link))
+ goto fail;
+
+ result = lookup_dentry(link, base, follow);
+ return result;
+
+fail:
+ dput(base);
+ return (struct dentry *)link;
+}
+
+/* get the link contents into pagecache */
+static char *page_getlink(struct dentry * dentry, struct page **ppage)
+{
+ struct page * page;
+ page = read_cache_page(&dentry->d_inode->i_data, 0,
+ (filler_t *)dentry->d_inode->i_op->readpage,
+ dentry);
+ if (IS_ERR(page))
+ goto sync_fail;
+ wait_on_page(page);
+ if (!Page_Uptodate(page))
+ goto async_fail;
+ *ppage = page;
+ return (char*) kmap(page);
+
+async_fail:
+ page_cache_release(page);
+ return ERR_PTR(-EIO);
+
+sync_fail:
+ return (char*)page;
+}
+
+int page_readlink(struct dentry *dentry, char *buffer, int buflen)
+{
+ struct page *page = NULL;
+ char *s = page_getlink(dentry, &page);
+ int res = vfs_readlink(dentry,buffer,buflen,s);
+ if (page) {
+ kunmap(page);
+ page_cache_release(page);
+ }
+ return res;
+}
+
+struct dentry *
+page_follow_link(struct dentry *dentry, struct dentry *base, unsigned int follow)
+{
+ struct page *page = NULL;
+ char *s = page_getlink(dentry, &page);
+ struct dentry *res = vfs_follow_link(dentry,base,follow,s);
+ if (page) {
+ kunmap(page);
+ page_cache_release(page);
+ }
+ return res;
+}
{
struct dentry *dentry = filp->f_dentry;
struct inode *inode = dentry->d_inode;
- struct page *page, **hash;
+ struct page *page;
long offset;
int res;
if ((offset = nfs_readdir_offset(inode, filp->f_pos)) < 0)
goto no_dirent_page;
- hash = page_hash(&inode->i_data, offset);
- page = __find_get_page(&inode->i_data, offset, hash);
+ page = find_get_page(&inode->i_data, offset);
if (!page)
goto no_dirent_page;
if (!Page_Uptodate(page))
dprintk("NFS: nfs_lock(f=%4x/%ld, t=%x, fl=%x, r=%ld:%ld)\n",
inode->i_dev, inode->i_ino,
fl->fl_type, fl->fl_flags,
- fl->fl_start, fl->fl_end);
+ (unsigned long) fl->fl_start, (unsigned long) fl->fl_end);
if (!inode)
return -EINVAL;
#include <linux/malloc.h>
#include <linux/string.h>
-#include <asm/uaccess.h>
-
-static int nfs_readlink(struct dentry *, char *, int);
-static struct dentry *nfs_follow_link(struct dentry *, struct dentry *, unsigned int);
-
-/*
- * symlinks can't do much...
- */
-struct inode_operations nfs_symlink_inode_operations = {
- NULL, /* no file-operations */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- nfs_readlink, /* readlink */
- nfs_follow_link, /* follow_link */
- NULL, /* get_block */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* truncate */
- NULL, /* permission */
- NULL /* revalidate */
-};
-
/* Symlink caching in the page cache is even more simplistic
* and straight-forward than readdir caching.
*/
if (IS_ERR(page))
goto read_failed;
if (!Page_Uptodate(page))
- goto followlink_read_error;
+ goto getlink_read_error;
*ppage = page;
p = (u32 *) kmap(page);
return (char*)(p+1);
-followlink_read_error:
+getlink_read_error:
page_cache_release(page);
return ERR_PTR(-EIO);
read_failed:
static int nfs_readlink(struct dentry *dentry, char *buffer, int buflen)
{
struct page *page = NULL;
- u32 len;
- char *s = nfs_getlink(dentry, &page);
- UPDATE_ATIME(dentry->d_inode);
-
- len = PTR_ERR(s);
- if (IS_ERR(s))
- goto out;
-
- len = strlen(s);
- if (len > buflen)
- len = buflen;
- copy_to_user(buffer, s, len);
- kunmap(page);
- page_cache_release(page);
-out:
- return len;
+ int res = vfs_readlink(dentry,buffer,buflen,nfs_getlink(dentry,&page));
+ if (page) {
+ kunmap(page);
+ page_cache_release(page);
+ }
+ return res;
}
static struct dentry *
nfs_follow_link(struct dentry *dentry, struct dentry *base, unsigned int follow)
{
- struct dentry *result;
struct page *page = NULL;
- char *s = nfs_getlink(dentry, &page);
- UPDATE_ATIME(dentry->d_inode);
-
- if (IS_ERR(s))
- goto fail;
-
- result = lookup_dentry(s, base, follow);
-
- kunmap(page);
- page_cache_release(page);
- return result;
-
-fail:
- return (struct dentry *)s;
+ struct dentry *res = vfs_follow_link(dentry, base, follow,
+ nfs_getlink(dentry, &page));
+ if (page) {
+ kunmap(page);
+ page_cache_release(page);
+ }
+ return res;
}
+
+/*
+ * symlinks can't do much...
+ */
+struct inode_operations nfs_symlink_inode_operations = {
+ readlink: nfs_readlink,
+ follow_link: nfs_follow_link,
+};
return exp;
}
-/*
- * Check whether there are any exports for a device.
- */
-static int
-exp_device_in_use(kdev_t dev)
-{
- struct svc_client *clp;
-
- for (clp = clients; clp; clp = clp->cl_next) {
- if (exp_find(clp, dev))
- return 1;
- }
- return 0;
-}
/*
* Look up the device of the parent fs.
goto finish;
err = -EINVAL;
+ if (!(inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV) ||
+ inode->i_sb->s_op->read_inode == NULL) {
+ dprintk("exp_export: export of invalid fs type.\n");
+ goto finish;
+ }
+
if ((parent = exp_child(clp, dev, dentry)) != NULL) {
dprintk("exp_export: export not valid (Rule 3).\n");
goto finish;
S_IFBLK, /* NF3BLK */
S_IFCHR, /* NF3CHR */
S_IFLNK, /* NF3LNK */
- S_IFIFO, /* NF3FIFO */
S_IFSOCK, /* NF3SOCK */
+ S_IFIFO, /* NF3FIFO */
};
/*
return nfserr_inval;
rdev = ((argp->major) << 8) | (argp->minor);
} else
- if (argp->ftype != NF3SOCK || argp->ftype != NF3FIFO)
+ if (argp->ftype != NF3SOCK && argp->ftype != NF3FIFO)
return nfserr_inval;
type = nfs3_ftypes[argp->ftype];
return nfserr_noent;
}
-/*
- * Deny access to certain file systems
- */
-static inline int
-fs_off_limits(struct super_block *sb)
-{
- return !sb || sb->s_magic == NFS_SUPER_MAGIC
- || sb->s_magic == PROC_SUPER_MAGIC;
-}
-
-/*
- * Check whether directory is a mount point, but it is all right if
- * this is precisely the local mount point being exported.
- */
-static inline int
-nfsd_iscovered(struct dentry *dentry, struct svc_export *exp)
-{
- return (dentry != dentry->d_covers &&
- dentry != exp->ex_dentry);
-}
/*
* Look up one component of a pathname.
if (err)
goto out;
#endif
- err = nfserr_noent;
- if (fs_off_limits(dparent->d_sb))
- goto out;
err = nfserr_acces;
- if (nfsd_iscovered(dparent, exp))
- goto out;
/* Lookup the name, but don't follow links */
dchild = lookup_dentry(name, dget(dparent), 0);
dentry = fhp->fh_dentry;
err = nfserr_perm;
- if (nfsd_iscovered(dentry, fhp->fh_export))
- goto out;
dirp = dentry->d_inode;
if (!dirp->i_op || !dirp->i_op->symlink)
goto out;
dold = tfhp->fh_dentry;
dest = dold->d_inode;
- err = nfserr_acces;
- if (nfsd_iscovered(ddir, ffhp->fh_export))
- goto out_unlock;
- /* FIXME: nxdev for NFSv3 */
+ err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
if (dirp->i_dev != dest->i_dev)
goto out_unlock;
tdentry = tfhp->fh_dentry;
tdir = tdentry->d_inode;
+ err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+ if (fdir->i_dev != tdir->i_dev)
+ goto out;
+
/* N.B. We shouldn't need this ... dentry layer handles it */
err = nfserr_perm;
if (!flen || (fname[0] == '.' &&
dprintk(" owner %d/%d user %d/%d\n",
inode->i_uid, inode->i_gid, current->fsuid, current->fsgid);
#endif
-#ifndef CONFIG_NFSD_SUN
- if (dentry->d_mounts != dentry) {
- return nfserr_perm;
- }
-#endif
if (acc & (MAY_WRITE | MAY_SATTR | MAY_TRUNC)) {
if (EX_RDONLY(exp) || IS_RDONLY(inode))
return nfserr_rofs;
- if (S_ISDIR(inode->i_mode) && nfsd_iscovered(dentry, exp))
- return nfserr_perm;
if (/* (acc & MAY_WRITE) && */ IS_IMMUTABLE(inode))
return nfserr_perm;
}
struct statfs fs;
struct inode *mft;
ntfs_volume *vol;
+ ntfs_u64 size;
int error;
ntfs_debug(DEBUG_OTHER, "ntfs_statfs\n");
fs.f_type=NTFS_SUPER_MAGIC;
fs.f_bsize=vol->clustersize;
- error = ntfs_get_volumesize( NTFS_SB2VOL( sb ), &fs.f_blocks );
+ error = ntfs_get_volumesize( NTFS_SB2VOL( sb ), &size );
if( error )
return -error;
+ fs.f_blocks = size; /* volumesize is in clusters */
fs.f_bfree=ntfs_get_free_cluster_count(vol->bitmap);
fs.f_bavail=fs.f_bfree;
/* Number of files is limited by free space only, so we lie here */
fs.f_ffree=0;
mft=iget(sb,FILE_MFT);
- fs.f_files=mft->i_size >> vol->mft_recordbits;
+ if (!mft)
+ return -EIO;
+ /* So ... we lie... thus this following cast of loff_t value
+ is ok here.. */
+ fs.f_files = (unsigned long)mft->i_size / vol->mft_recordsize;
iput(mft);
/* should be read from volume */
if(vol->mft_clusters_per_record<0 && vol->mft_clusters_per_record!=-10)
ntfs_error("Unexpected data #4 in boot block\n");
- vol->clustersize = vol->blocksize * vol->clusterfactor;
- if (vol->mft_clusters_per_record > 0)
- vol->mft_recordbits = vol->clustersize * vol->mft_clusters_per_record;
+ vol->clustersize=vol->blocksize*vol->clusterfactor;
+ if(vol->mft_clusters_per_record>0)
+ vol->mft_recordsize=
+ vol->clustersize*vol->mft_clusters_per_record;
else
- vol->mft_recordbits = -vol->mft_clusters_per_record;
-
- vol->mft_recordsize = 1 << vol->mft_recordbits;
+ vol->mft_recordsize=1<<(-vol->mft_clusters_per_record);
vol->index_recordsize=vol->clustersize*vol->index_clusters_per_record;
/* FIXME: long long value */
vol->mft_cluster=NTFS_GETU64(boot+0x30);
* Writes the volume size into vol_size. Returns 0 if successful
* or error.
*/
-int ntfs_get_volumesize(ntfs_volume *vol, long *vol_size )
+int ntfs_get_volumesize(ntfs_volume *vol, ntfs_u64 *vol_size )
{
ntfs_io io;
- ntfs_u64 size;
char *cluster0;
if( !vol_size )
io.do_read=1;
io.size=vol->clustersize;
ntfs_getput_clusters(vol,0,0,&io);
- size=NTFS_GETU64(cluster0+0x28);
+ *vol_size = NTFS_GETU64(cluster0+0x28);
ntfs_free(cluster0);
- /* FIXME: more than 2**32 cluster */
- /* FIXME: gcc will emit udivdi3 if we don't truncate it */
- *vol_size = ((unsigned long)size)/vol->clusterfactor;
return 0;
}
#define ALLOC_REQUIRE_SIZE 2
int ntfs_get_free_cluster_count(ntfs_inode *bitmap);
-int ntfs_get_volumesize(ntfs_volume *vol, long *vol_size );
+int ntfs_get_volumesize(ntfs_volume *vol, ntfs_u64 *vol_size );
int ntfs_init_volume(ntfs_volume *vol,char *boot);
int ntfs_load_special_files(ntfs_volume *vol);
int ntfs_release_volume(ntfs_volume *vol);
define_bool CONFIG_MSDOS_PARTITION y
fi
fi
-if [ "$CONFIG_MSDOS_PARTITION" = "y" ]; then
+if [ "$CONFIG_PARTITION_ADVANCED" = "y" -a \
+ "$CONFIG_MSDOS_PARTITION" = "y" ]; then
bool ' BSD disklabel (FreeBSD partition tables) support' CONFIG_BSD_DISKLABEL
bool ' Solaris (x86) partition table support' CONFIG_SOLARIS_X86_PARTITION
bool ' Unixware slices support' CONFIG_UNIXWARE_DISKLABEL
return PROC_DYNAMIC_FIRST + i;
}
-static int proc_readlink(struct dentry * dentry, char * buffer, int buflen)
+static int proc_readlink(struct dentry *dentry, char *buffer, int buflen)
{
- struct inode *inode = dentry->d_inode;
- struct proc_dir_entry * de;
- int len;
- de = (struct proc_dir_entry *) inode->u.generic_ip;
- len = de->size+1;
- if (len > buflen)
- len = buflen;
- copy_to_user(buffer, de->data, len);
- return len;
+ char *s=((struct proc_dir_entry *)dentry->d_inode->u.generic_ip)->data;
+ return vfs_readlink(dentry, buffer, buflen, s);
}
-struct dentry * proc_follow_link(struct dentry * dentry, struct dentry *base, unsigned int follow)
+static struct dentry *proc_follow_link(struct dentry *dentry, struct dentry *base, unsigned flags)
{
- struct inode *inode = dentry->d_inode;
- struct proc_dir_entry * de;
- de = (struct proc_dir_entry *) inode->u.generic_ip;
- return lookup_dentry(de->data, base, follow);
+ char *s=((struct proc_dir_entry *)dentry->d_inode->u.generic_ip)->data;
+ return vfs_follow_link(dentry, base, flags, s);
}
static struct inode_operations proc_link_inode_operations = {
- NULL, /* no file-ops */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- proc_readlink, /* readlink */
- proc_follow_link, /* follow_link */
+ readlink: proc_readlink,
+ follow_link: proc_follow_link
};
/*
*/
static int proc_self_readlink(struct dentry *dentry, char *buffer, int buflen)
{
- int len;
char tmp[30];
-
- len = sprintf(tmp, "%d", current->pid);
- if (buflen < len)
- len = buflen;
- copy_to_user(buffer, tmp, len);
- return len;
+ sprintf(tmp, "%d", current->pid);
+ return vfs_readlink(dentry,buffer,buflen,tmp);
}
static struct dentry * proc_self_follow_link(struct dentry *dentry,
unsigned int follow)
{
char tmp[30];
-
sprintf(tmp, "%d", current->pid);
- return lookup_dentry(tmp, base, follow);
+ return vfs_follow_link(dentry,base,follow,tmp);
}
static struct inode_operations proc_self_inode_operations = {
- NULL, /* no file-ops */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- proc_self_readlink, /* readlink */
- proc_self_follow_link, /* follow_link */
+ readlink: proc_self_readlink,
+ follow_link: proc_self_follow_link
};
static struct proc_dir_entry proc_root_self = {
return result;
}
-static int
-romfs_readlink(struct dentry *dentry, char *buffer, int len)
-{
- struct inode *inode = dentry->d_inode;
- int mylen;
- char buf[ROMFS_MAXFN]; /* XXX dynamic */
-
- if (!inode || !S_ISLNK(inode->i_mode)) {
- mylen = -EBADF;
- goto out;
- }
-
- mylen = min(sizeof(buf), inode->i_size);
-
- if (romfs_copyfrom(inode, buf, inode->u.romfs_i.i_dataoffset, mylen) <= 0) {
- mylen = -EIO;
- goto out;
- }
- copy_to_user(buffer, buf, mylen);
-
-out:
- return mylen;
-}
-
-static struct dentry *romfs_follow_link(struct dentry *dentry,
- struct dentry *base,
- unsigned int follow)
-{
- struct inode *inode = dentry->d_inode;
- char *link;
- int len, cnt;
-
- len = inode->i_size;
-
- dentry = ERR_PTR(-EAGAIN); /* correct? */
- if (!(link = kmalloc(len+1, GFP_KERNEL)))
- goto outnobuf;
-
- cnt = romfs_copyfrom(inode, link, inode->u.romfs_i.i_dataoffset, len);
- if (len != cnt) {
- dentry = ERR_PTR(-EIO);
- goto out;
- } else
- link[len] = 0;
-
- dentry = lookup_dentry(link, base, follow);
- kfree(link);
-
- if (0) {
-out:
- kfree(link);
-outnobuf:
- dput(base);
- }
- return dentry;
-}
-
/* Mapping from our types to the kernel */
static struct file_operations romfs_file_operations = {
};
static struct inode_operations romfs_link_inode_operations = {
- NULL, /* no file operations on symlinks */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- romfs_readlink, /* readlink */
- romfs_follow_link, /* follow_link */
- NULL, /* get_block */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* truncate */
- NULL, /* permission */
- NULL /* revalidate */
+ readlink: page_readlink,
+ follow_link: page_follow_link,
+ readpage: romfs_readpage
};
static mode_t romfs_modemap[] =
int max_super_blocks = NR_SUPER;
LIST_HEAD(super_blocks);
-static struct file_system_type *file_systems = (struct file_system_type *) NULL;
-struct vfsmount *vfsmntlist = (struct vfsmount *) NULL;
-static struct vfsmount *vfsmnttail = (struct vfsmount *) NULL,
- *mru_vfsmnt = (struct vfsmount *) NULL;
+static struct file_system_type *file_systems = NULL;
+struct vfsmount *vfsmntlist = NULL;
+static struct vfsmount *vfsmnttail = NULL, *mru_vfsmnt = NULL;
/*
* This part handles the management of the list of mounted filesystems.
{
struct vfsmount *lptr;
- if (vfsmntlist == (struct vfsmount *)NULL)
- return ((struct vfsmount *)NULL);
+ if (vfsmntlist == NULL)
+ return NULL;
- if (mru_vfsmnt != (struct vfsmount *)NULL &&
- mru_vfsmnt->mnt_dev == dev)
+ if (mru_vfsmnt != NULL && mru_vfsmnt->mnt_dev == dev)
return (mru_vfsmnt);
- for (lptr = vfsmntlist;
- lptr != (struct vfsmount *)NULL;
- lptr = lptr->mnt_next)
+ for (lptr = vfsmntlist; lptr != NULL; lptr = lptr->mnt_next)
if (lptr->mnt_dev == dev) {
mru_vfsmnt = lptr;
return (lptr);
}
- return ((struct vfsmount *)NULL);
- /* NOTREACHED */
+ return NULL;
}
static struct vfsmount *add_vfsmnt(struct super_block *sb,
{
struct vfsmount *lptr, *tofree;
- if (vfsmntlist == (struct vfsmount *)NULL)
+ if (vfsmntlist == NULL)
return;
lptr = vfsmntlist;
if (lptr->mnt_dev == dev) {
if (vfsmnttail->mnt_dev == dev)
vfsmnttail = vfsmntlist;
} else {
- while (lptr->mnt_next != (struct vfsmount *)NULL) {
+ while (lptr->mnt_next != NULL) {
if (lptr->mnt_next->mnt_dev == dev)
break;
lptr = lptr->mnt_next;
}
tofree = lptr->mnt_next;
- if (tofree == (struct vfsmount *)NULL)
+ if (tofree == NULL)
return;
lptr->mnt_next = lptr->mnt_next->mnt_next;
if (vfsmnttail->mnt_dev == dev)
int sysv_symlink(struct inode * dir, struct dentry * dentry,
const char * symname)
{
- struct sysv_dir_entry * de;
struct inode * inode;
- struct buffer_head * name_block;
- char * name_block_data;
- struct super_block * sb;
- int i;
- char c;
+ struct sysv_dir_entry * de;
struct buffer_head * bh;
-
+ int err;
+ int l;
+
+ err = -ENAMETOOLONG;
+ l = strlen(symname)+1;
+ if (l > dir->i_sb->sv_block_size_1)
+ goto out;
+ err = -ENOSPC;
if (!(inode = sysv_new_inode(dir)))
- return -ENOSPC;
+ goto out;
inode->i_mode = S_IFLNK | 0777;
inode->i_op = &sysv_symlink_inode_operations;
- name_block = sysv_file_bread(inode, 0, 1);
- if (!name_block) {
- inode->i_nlink--;
- mark_inode_dirty(inode);
- iput(inode);
- return -ENOSPC;
- }
- sb = inode->i_sb;
- name_block_data = name_block->b_data;
- i = 0;
- while (i < sb->sv_block_size_1 && (c = *(symname++)))
- name_block_data[i++] = c;
- name_block_data[i] = 0;
- mark_buffer_dirty(name_block, 1);
- brelse(name_block);
- inode->i_size = i;
+ err = block_symlink(inode, symname, l);
+ if (err)
+ goto out_no_entry;
mark_inode_dirty(inode);
- bh = sysv_find_entry(dir, dentry->d_name.name,
- dentry->d_name.len, &de);
- if (bh) {
- inode->i_nlink--;
- mark_inode_dirty(inode);
- iput(inode);
- brelse(bh);
- return -EEXIST;
- }
- i = sysv_add_entry(dir, dentry->d_name.name,
+ err = sysv_add_entry(dir, dentry->d_name.name,
dentry->d_name.len, &bh, &de);
- if (i) {
- inode->i_nlink--;
- mark_inode_dirty(inode);
- iput(inode);
- return i;
- }
+ if (err)
+ goto out_no_entry;
de->inode = inode->i_ino;
mark_buffer_dirty(bh, 1);
brelse(bh);
d_instantiate(dentry, inode);
- return 0;
+out:
+ return err;
+out_no_entry:
+ inode->i_nlink--;
+ mark_inode_dirty(inode);
+ iput(inode);
+ goto out;
}
int sysv_link(struct dentry * old_dentry, struct inode * dir,
* SystemV/Coherent symlink handling code
*/
-#include <linux/errno.h>
-#include <linux/sched.h>
#include <linux/sysv_fs.h>
-#include <linux/stat.h>
-
-#include <asm/uaccess.h>
-
-static int sysv_readlink(struct dentry *, char *, int);
-static struct dentry *sysv_follow_link(struct dentry *, struct dentry *, unsigned int);
/*
* symlinks can't do much...
*/
struct inode_operations sysv_symlink_inode_operations = {
- NULL, /* no file-operations */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- sysv_readlink, /* readlink */
- sysv_follow_link, /* follow_link */
- NULL, /* get_block */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* truncate */
- NULL, /* permission */
- NULL /* revalidate */
+ readlink: page_readlink,
+ follow_link: page_follow_link,
+ get_block: sysv_get_block,
+ readpage: block_read_full_page
};
-
-static struct dentry *sysv_follow_link(struct dentry * dentry,
- struct dentry * base,
- unsigned int follow)
-{
- struct inode *inode = dentry->d_inode;
- struct buffer_head * bh;
-
- bh = sysv_file_bread(inode, 0, 0);
- if (!bh) {
- dput(base);
- return ERR_PTR(-EIO);
- }
- UPDATE_ATIME(inode);
- base = lookup_dentry(bh->b_data, base, follow);
- brelse(bh);
- return base;
-}
-
-static int sysv_readlink(struct dentry * dentry, char * buffer, int buflen)
-{
- struct inode *inode = dentry->d_inode;
- struct buffer_head * bh;
- char * bh_data;
- int i;
- char c;
-
- if (buflen > inode->i_sb->sv_block_size_1)
- buflen = inode->i_sb->sv_block_size_1;
- bh = sysv_file_bread(inode, 0, 0);
- if (!bh)
- return 0;
- bh_data = bh->b_data;
- i = 0;
- while (i<buflen && (c = bh_data[i])) {
- i++;
- put_user(c,buffer++);
- }
- brelse(bh);
- return i;
-}
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/malloc.h>
+#include <linux/pagemap.h>
#include "udf_i.h"
-static int udf_readlink(struct dentry *, char *, int);
-static struct dentry * udf_follow_link(struct dentry * dentry,
- struct dentry * base, unsigned int follow);
-
-/*
- * symlinks can't do much...
- */
-struct inode_operations udf_symlink_inode_operations = {
- NULL, /* no file-operations */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- udf_readlink, /* readlink */
- udf_follow_link, /* follow_link */
- NULL, /* get_block */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* truncate */
- NULL, /* permission */
- NULL /* revalidate */
-};
-
-int udf_pc_to_char(char *from, int fromlen, char **to)
+static void udf_pc_to_char(char *from, int fromlen, char *to)
{
struct PathComponent *pc;
int elen = 0, len = 0;
+ char *p = to;
- *to = (char *)kmalloc(fromlen, GFP_KERNEL);
-
- if (!(*to))
- return -1;
-
- while (elen < fromlen)
- {
+ while (elen < fromlen) {
pc = (struct PathComponent *)(from + elen);
- if (pc->componentType == 1 && pc->lengthComponentIdent == 0)
- {
- (*to)[0] = '/';
- len = 1;
- }
- else if (pc->componentType == 3)
- {
- memcpy(&(*to)[len], "../", 3);
- len += 3;
- }
- else if (pc->componentType == 4)
- {
- memcpy(&(*to)[len], "./", 2);
- len += 2;
- }
- else if (pc->componentType == 5)
- {
- memcpy(&(*to)[len], pc->componentIdent, pc->lengthComponentIdent);
- len += pc->lengthComponentIdent + 1;
- (*to)[len-1] = '/';
+ switch (pc->componentType) {
+ case 1:
+ if (pc->lengthComponentIdent == 0) {
+ p = to;
+ *p++ = '/';
+ }
+ break;
+ case 3:
+ memcpy(p, "../", 3);
+ p += 3;
+ break;
+ case 4:
+ memcpy(p, "./", 2);
+ p += 2;
+ /* that would be . - just ignore */
+ break;
+ case 5:
+ memcpy(p+len, pc->componentIdent,
+ pc->lengthComponentIdent);
+ p += pc->lengthComponentIdent;
+ *p++ = '/';
}
elen += sizeof(struct PathComponent) + pc->lengthComponentIdent;
}
- if (len)
- {
- len --;
- (*to)[len] = '\0';
+ if (p>to+1) {
+ p[-1] = '\0';
}
- return len;
}
-static struct dentry * udf_follow_link(struct dentry * dentry,
- struct dentry * base, unsigned int follow)
+static int udf_symlink_filler(struct dentry * dentry, struct page *page)
{
struct inode *inode = dentry->d_inode;
struct buffer_head *bh = NULL;
- char *symlink, *tmpbuf;
- int len;
-
- if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB)
- {
- bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
-
- if (!bh)
- return 0;
-
- symlink = bh->b_data + udf_file_entry_alloc_offset(inode);
- }
- else
- {
- bh = bread(inode->i_dev, udf_block_map(inode, 0), inode->i_sb->s_blocksize);
-
- if (!bh)
- return 0;
-
- symlink = bh->b_data;
- }
-
- if ((len = udf_pc_to_char(symlink, inode->i_size, &tmpbuf)) >= 0)
- {
- base = lookup_dentry(tmpbuf, base, follow);
- kfree(tmpbuf);
- return base;
- }
- else
- return ERR_PTR(-ENOMEM);
-}
+ char *symlink;
+ int err;
-static int udf_readlink(struct dentry * dentry, char * buffer, int buflen)
-{
- struct inode *inode = dentry->d_inode;
- struct buffer_head *bh = NULL;
- char *symlink, *tmpbuf;
- int len;
+ char *p = (char*)kmap(page);
- if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB)
- {
- bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+ err = -EIO;
+ if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB) {
+ bh = udf_tread(inode->i_sb, inode->i_ino,
+ inode->i_sb->s_blocksize);
if (!bh)
- return 0;
+ goto out;
symlink = bh->b_data + udf_file_entry_alloc_offset(inode);
- }
- else
- {
- bh = bread(inode->i_dev, udf_block_map(inode, 0), inode->i_sb->s_blocksize);
+ } else {
+ bh = bread(inode->i_dev, udf_block_map(inode, 0),
+ inode->i_sb->s_blocksize);
if (!bh)
- return 0;
+ goto out;
symlink = bh->b_data;
}
- if ((len = udf_pc_to_char(symlink, inode->i_size, &tmpbuf)) >= 0)
- {
- if (copy_to_user(buffer, tmpbuf, len > buflen ? buflen : len))
- len = -EFAULT;
- kfree(tmpbuf);
- }
- else
- len = -ENOMEM;
-
- UPDATE_ATIME(inode);
- if (bh)
- udf_release_data(bh);
- return len;
+ udf_pc_to_char(symlink, inode->i_size, p);
+ udf_release_data(bh);
+ SetPageUptodate(page);
+ kunmap(page);
+ UnlockPage(page);
+ return 0;
+out:
+ SetPageError(page);
+ kunmap(page);
+ UnlockPage(page);
+ return -EIO;
}
+
+/*
+ * symlinks can't do much...
+ */
+struct inode_operations udf_symlink_inode_operations = {
+ readlink: page_readlink,
+ follow_link: page_follow_link,
+ readpage: udf_symlink_filler,
+};
else if (S_ISDIR(inode->i_mode))
inode->i_op = &ufs_dir_inode_operations;
else if (S_ISLNK(inode->i_mode))
- inode->i_op = &ufs_symlink_inode_operations;
+ inode->i_op = inode->i_blocks
+ ?&ufs_symlink_inode_operations
+ :&ufs_fast_symlink_inode_operations;
else
init_special_inode(inode, inode->i_mode,
SWAB32(ufs_inode->ui_u2.ui_addr.ui_db[0]));
int ufs_symlink (struct inode * dir, struct dentry * dentry,
const char * symname)
{
- struct super_block * sb;
+ struct super_block * sb = dir->i_sb;
struct ufs_dir_entry * de;
struct inode * inode;
- struct buffer_head * bh, * name_block;
- char * link;
- unsigned i, l;
+ struct buffer_head * bh = NULL;
+ unsigned l;
int err;
- char c;
- unsigned swab;
+ unsigned swab = sb->u.ufs_sb.s_swab;
UFSD(("ENTER\n"))
- sb = dir->i_sb;
- swab = sb->u.ufs_sb.s_swab;
- bh = name_block = NULL;
+
+ err = -ENAMETOOLONG;
+ l = strlen(symname)+1;
+ if (l > dir->i_sb->s_blocksize)
+ goto out;
+
err = -EIO;
if (!(inode = ufs_new_inode (dir, S_IFLNK, &err))) {
return err;
}
inode->i_mode = S_IFLNK | S_IRWXUGO;
- inode->i_op = &ufs_symlink_inode_operations;
- for (l = 0; l < sb->s_blocksize - 1 && symname [l]; l++);
- /***if (l >= sizeof (inode->u.ufs_i.i_data)) {***/
+ /***if (l > sizeof (inode->u.ufs_i.i_data)) {***/
if (1) {
/* slow symlink */
- name_block = ufs_bread (inode, 0, 1, &err);
- if (!name_block) {
- inode->i_nlink--;
- mark_inode_dirty(inode);
- iput (inode);
- return err;
- }
- link = name_block->b_data;
-
+ inode->i_op = &ufs_symlink_inode_operations;
+ err = block_symlink(inode, symname, l);
+ if (err)
+ goto out_no_entry;
} else {
/* fast symlink */
- link = (char *) inode->u.ufs_i.i_u1.i_data;
- }
- i = 0;
- while (i < sb->s_blocksize - 1 && (c = *(symname++)))
- link[i++] = c;
- link[i] = 0;
- if (name_block) {
- mark_buffer_dirty(name_block, 1);
- brelse (name_block);
+ inode->i_op = &ufs_fast_symlink_inode_operations;
+ memcpy((char*)&inode->u.ufs_i.i_u1.i_data,symname,l);
+ inode->i_size = l-1;
}
- inode->i_size = i;
mark_inode_dirty(inode);
bh = ufs_add_entry (dir, dentry->d_name.name, dentry->d_name.len, &de, &err);
int ufs_link (struct dentry * old_dentry, struct inode * dir,
struct dentry *dentry)
{
- struct super_block * sb;
struct inode *inode = old_dentry->d_inode;
+ struct super_block * sb = inode->i_sb;
struct ufs_dir_entry * de;
struct buffer_head * bh;
int err;
- unsigned swab;
-
- inode = old_dentry->d_inode;
- sb = inode->i_sb;
- swab = sb->u.ufs_sb.s_swab;
+ unsigned swab = sb->u.ufs_sb.s_swab;
if (S_ISDIR(inode->i_mode))
return -EPERM;
* ext2 symlink handling code
*/
-#include <asm/uaccess.h>
-
-#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/ufs_fs.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/stat.h>
-
-
-#undef UFS_SYMLINK_DEBUG
-#ifdef UFS_SYMLINK_DEBUG
-#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
-#else
-#define UFSD(x)
-#endif
-
-
-static struct dentry * ufs_follow_link(struct dentry * dentry,
- struct dentry * base, unsigned int follow)
+static int ufs_readlink(struct dentry *dentry, char *buffer, int buflen)
{
- struct inode * inode;
- struct buffer_head * bh;
- int error;
- char * link;
-
- UFSD(("ENTER\n"))
-
- inode = dentry->d_inode;
- bh = NULL;
- /* slow symlink */
- if (inode->i_blocks) {
- if (!(bh = ufs_bread (inode, 0, 0, &error))) {
- dput(base);
- return ERR_PTR(-EIO);
- }
- link = bh->b_data;
- }
- /* fast symlink */
- else {
- link = (char *) inode->u.ufs_i.i_u1.i_symlink;
- }
- UPDATE_ATIME(inode);
- base = lookup_dentry(link, base, follow);
- if (bh)
- brelse(bh);
- UFSD(("EXIT\n"))
- return base;
+ char *s = (char *)dentry->d_inode->u.ufs_i.i_u1.i_symlink;
+ return vfs_readlink(dentry, buffer, buflen, s);
}
-static int ufs_readlink (struct dentry * dentry, char * buffer, int buflen)
+static struct dentry *ufs_follow_link(struct dentry *dentry, struct dentry *base, unsigned flags)
{
- struct super_block * sb;
- struct inode * inode;
- struct buffer_head * bh;
- char * link;
- int i;
-
- UFSD(("ENTER\n"))
-
- inode = dentry->d_inode;
- sb = inode->i_sb;
- bh = NULL;
- if (buflen > sb->s_blocksize - 1)
- buflen = sb->s_blocksize - 1;
- /* slow symlink */
- if (inode->i_blocks) {
- int err;
- bh = ufs_bread (inode, 0, 0, &err);
- if (!bh) {
- if(err < 0) /* indicate type of error */
- return err;
- return 0;
- }
- link = bh->b_data;
- }
- /* fast symlink */
- else {
- link = (char *) inode->u.ufs_i.i_u1.i_symlink;
- }
- i = 0;
- while (i < buflen && link[i])
- i++;
- if (copy_to_user(buffer, link, i))
- i = -EFAULT;
- UPDATE_ATIME(inode);
- if (bh)
- brelse (bh);
- UFSD(("ENTER\n"))
- return i;
+ char *s = (char *)dentry->d_inode->u.ufs_i.i_u1.i_symlink;
+ return vfs_follow_link(dentry, base, flags, s);
}
+struct inode_operations ufs_fast_symlink_inode_operations = {
+ readlink: ufs_readlink,
+ follow_link: ufs_follow_link,
+};
+
struct inode_operations ufs_symlink_inode_operations = {
- NULL, /* no file-operations */
- NULL, /* create */
- NULL, /* lookup */
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- ufs_readlink, /* readlink */
- ufs_follow_link, /* follow_link */
- NULL, /* get_block */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* truncate */
- NULL, /* permission */
- NULL /* revalidate */
+ readlink: page_readlink,
+ follow_link: page_follow_link,
+ get_block: ufs_getfrag_block,
+ readpage: block_read_full_page
};
come and test it! See notes below for some more information, or if
you are trying to use UMSDOS as root partition.
+Userland NOTE: new umsdos_progs (umssync, umssetup, udosctl & friends) that
+will compile and work on 2.2.x kernels and glibc based systems may be found
+at http://cvs.linux.hr/
+
+Also look at the quick-hack "homepage" for umsdos filesystem at
+http://www.voyager.hr/~mnalis/umsdos
+
+Information below is getting outdated slowly -- I'll fix it one day when I
+get enough time - there are more important things to fix right now.
+
Legend: those lines marked with '+' on the beggining of line indicates it
passed all of my tests, and performed perfect in all of them.
#if UMS_DEBUG
+/*
+ * check for wait queue in 2.3.x
+ */
+inline void uq_log (char *txt, struct inode *inode)
+{
+ printk (KERN_ERR "%s: (%lu) magic=%lu creator=%lu lock=%u\n", txt, inode->i_ino, inode->u.umsdos_i.dir_info.p.__magic, inode->u.umsdos_i.dir_info.p.__creator, inode->u.umsdos_i.dir_info.p.lock.lock);
+}
+
/*
* check a superblock
*/
}
}
#else
+inline void uq_log (char *txt, struct inode *inode) {};
void check_sb (struct super_block *sb, const char c) {};
void check_inode (struct inode *inode) {};
void checkd_inode (struct inode *inode) {};
umsdos_dentry_validate, /* d_revalidate(struct dentry *, int) */
NULL, /* d_hash */
NULL, /* d_compare */
- umsdos_dentry_dput, /* d_delete(struct dentry *) */
- NULL,
- NULL,
+ umsdos_dentry_dput /* d_delete(struct dentry *) */
};
NULL, /* writepage */
NULL, /* truncate */
NULL, /* permission */
- NULL, /* revalidate */
+ NULL /* revalidate */
};
printk(KERN_ERR "umsdos_setup_dir: %s/%s not a dir!\n",
dir->d_parent->d_name.name, dir->d_name.name);
+ init_waitqueue_head (&inode->u.umsdos_i.dir_info.p);
+ inode->u.umsdos_i.dir_info.looking = 0;
+ inode->u.umsdos_i.dir_info.creating = 0;
+ inode->u.umsdos_i.dir_info.pid = 0;
+
inode->i_op = &umsdos_rdir_inode_operations;
if (umsdos_have_emd(dir)) {
Printk((KERN_DEBUG "umsdos_setup_dir: %s/%s using EMD\n",
NULL, /* write_super */
fat_statfs, /* statfs */
NULL, /* remount_fs */
- fat_clear_inode, /* clear_inode */
+ fat_clear_inode /* clear_inode */
};
/*
if (!res)
goto out_fail;
- printk (KERN_INFO "UMSDOS 0.85 "
+ printk (KERN_INFO "UMSDOS 0.86 "
"(compatibility level %d.%d, fast msdos)\n",
UMSDOS_VERSION, UMSDOS_RELEASE);
*
* Return 0 if success.
*/
- extern struct inode_operations umsdos_rdir_inode_operations;
ret = umsdos_make_emd(dentry);
Printk(("UMSDOS_ioctl_dir: INIT_EMD %s/%s, ret=%d\n",
dentry->d_parent->d_name.name, dentry->d_name.name, ret));
- dir->i_op = (ret == 0)
- ? &umsdos_dir_inode_operations
- : &umsdos_rdir_inode_operations;
+ umsdos_setup_dir (dentry);
goto out;
}
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
ret = msdos_rename (dir, old_dentry, dir, new_dentry);
+ d_drop(new_dentry);
+ d_drop(old_dentry);
dput(new_dentry);
}
dput(old_dentry);
#include <linux/umsdos_fs.h>
#include <linux/malloc.h>
-#if 1
+#define UMSDOS_DIR_LOCK
+
+#ifdef UMSDOS_DIR_LOCK
+
+static inline void u_sleep_on (struct inode *dir)
+{
+ sleep_on (&dir->u.umsdos_i.dir_info.p);
+}
+
+static inline void u_wake_up (struct inode *dir)
+{
+ wake_up (&dir->u.umsdos_i.dir_info.p);
+}
+
/*
* Wait for creation exclusivity.
* Return 0 if the dir was already available.
{
int ret = 0;
- if (dir->u.umsdos_i.u.dir_info.creating
- && dir->u.umsdos_i.u.dir_info.pid != current->pid) {
- sleep_on (&dir->u.umsdos_i.u.dir_info.p);
+ if (dir->u.umsdos_i.dir_info.creating
+ && dir->u.umsdos_i.dir_info.pid != current->pid) {
+ PRINTK (("creating && dir_info.pid=%lu, current->pid=%u\n", dir->u.umsdos_i.dir_info.pid, current->pid));
+ u_sleep_on (dir);
ret = 1;
}
return ret;
*/
static void umsdos_waitlookup (struct inode *dir)
{
- while (dir->u.umsdos_i.u.dir_info.looking) {
- sleep_on (&dir->u.umsdos_i.u.dir_info.p);
+ while (dir->u.umsdos_i.dir_info.looking) {
+ u_sleep_on (dir);
}
}
* if we (the process) own the lock
*/
while (umsdos_waitcreate (dir) != 0);
- dir->u.umsdos_i.u.dir_info.creating++;
- dir->u.umsdos_i.u.dir_info.pid = current->pid;
+ dir->u.umsdos_i.dir_info.creating++;
+ dir->u.umsdos_i.dir_info.pid = current->pid;
umsdos_waitlookup (dir);
}
if (umsdos_waitcreate (dir1) == 0
&& umsdos_waitcreate (dir2) == 0) {
/* We own both now */
- dir1->u.umsdos_i.u.dir_info.creating++;
- dir1->u.umsdos_i.u.dir_info.pid = current->pid;
- dir2->u.umsdos_i.u.dir_info.creating++;
- dir2->u.umsdos_i.u.dir_info.pid = current->pid;
+ dir1->u.umsdos_i.dir_info.creating++;
+ dir1->u.umsdos_i.dir_info.pid = current->pid;
+ dir2->u.umsdos_i.dir_info.creating++;
+ dir2->u.umsdos_i.dir_info.pid = current->pid;
break;
}
}
void umsdos_startlookup (struct inode *dir)
{
while (umsdos_waitcreate (dir) != 0);
- dir->u.umsdos_i.u.dir_info.looking++;
+ dir->u.umsdos_i.dir_info.looking++;
}
/*
*/
void umsdos_unlockcreate (struct inode *dir)
{
- dir->u.umsdos_i.u.dir_info.creating--;
- if (dir->u.umsdos_i.u.dir_info.creating < 0) {
- printk ("UMSDOS: dir->u.umsdos_i.u.dir_info.creating < 0: %d"
- ,dir->u.umsdos_i.u.dir_info.creating);
+ dir->u.umsdos_i.dir_info.creating--;
+ if (dir->u.umsdos_i.dir_info.creating < 0) {
+ printk ("UMSDOS: dir->u.umsdos_i.dir_info.creating < 0: %d"
+ ,dir->u.umsdos_i.dir_info.creating);
}
- wake_up (&dir->u.umsdos_i.u.dir_info.p);
+ u_wake_up (dir);
}
/*
*/
void umsdos_endlookup (struct inode *dir)
{
- dir->u.umsdos_i.u.dir_info.looking--;
- if (dir->u.umsdos_i.u.dir_info.looking < 0) {
- printk ("UMSDOS: dir->u.umsdos_i.u.dir_info.looking < 0: %d"
- ,dir->u.umsdos_i.u.dir_info.looking);
+ dir->u.umsdos_i.dir_info.looking--;
+ if (dir->u.umsdos_i.dir_info.looking < 0) {
+ printk ("UMSDOS: dir->u.umsdos_i.dir_info.looking < 0: %d"
+ ,dir->u.umsdos_i.dir_info.looking);
}
- wake_up (&dir->u.umsdos_i.u.dir_info.p);
+ u_wake_up (dir);
}
#else
* Let's go for simplicity...
*/
-extern struct inode_operations umsdos_symlink_inode_operations;
-
/*
* AV. Should be called with dir->i_sem down.
*/
NULL, /* get_block */
NULL, /* truncate */
NULL, /* permission */
- NULL, /* revalidate */
+ NULL /* revalidate */
};
__kernel_pid_t l_pid;
};
+#ifdef __KERNEL__
+#define flock64 flock
+#endif
+
#endif
extern __inline__ pgd_t *get_pgd_slow(void)
{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
if (ret) {
- init = pgd_offset(&init_mm, 0UL);
memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-
- pgd_val(ret[PTRS_PER_PGD])
- = pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL));
+ pgd_val(ret[PTRS_PER_PGD]) =
+ pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL));
}
return ret;
}
+extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
+{
+ pgd_t *init;
+
+ init = pgd_offset(&init_mm, 0UL);
+ memcpy (pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pgd_slow();
+ }
return (pgd_t *)ret;
}
#define pmd_free_kernel(pmd) free_pmd_fast(pmd)
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
{
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
- struct task_struct * p;
pgd_t *pgd;
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm,address) = entry;
- }
- read_unlock(&tasklist_lock);
+ mmlist_access_lock();
+ mmlist_set_pgdir(address, entry);
for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[(address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)] = entry;
+ mmlist_access_unlock();
}
#endif /* _ALPHA_PGALLOC_H */
typedef unsigned int __kernel_mode_t;
typedef unsigned int __kernel_nlink_t;
typedef long __kernel_off_t;
+typedef long __kernel_loff_t;
typedef int __kernel_pid_t;
typedef int __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef char * __kernel_caddr_t;
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
typedef struct {
int val[2];
} __kernel_fsid_t;
#define RLIM_NLIMITS 10
+/*
+ * SuS says limits have to be unsigned. Fine, it's unsigned, but
+ * we retain the old value for compatibility, especially with DU.
+ * When you run into the 2^63 barrier, you call me.
+ */
+#define RLIM_INFINITY 0x7ffffffffffffffful
+
#ifdef __KERNEL__
#define INIT_RLIMITS \
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
+#define F_GETLK64 12 /* using 'struct flock64' */
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
pid_t l_pid;
};
+struct flock64 {
+ short l_type;
+ short l_whence;
+ loff_t l_start;
+ loff_t l_len;
+ pid_t l_pid;
+};
+
#endif
#define RLIM_NLIMITS 10
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
#ifdef __KERNEL__
#define INIT_RLIMITS \
if (!size)
return 0;
- __asm__("cld\n\t"
- "movl $-1,%%eax\n\t"
+ __asm__("movl $-1,%%eax\n\t"
"xorl %%edx,%%edx\n\t"
"repe; scasl\n\t"
"je 1f\n\t"
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
+#define F_GETLK64 12 /* using 'struct flock64' */
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
pid_t l_pid;
};
+struct flock64 {
+ short l_type;
+ short l_whence;
+ loff_t l_start;
+ loff_t l_len;
+ pid_t l_pid;
+};
+
#endif
#define __INS(s) \
extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("cld ; rep ; ins" #s \
+{ __asm__ __volatile__ ("rep ; ins" #s \
: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
#define __OUTS(s) \
extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("cld ; rep ; outs" #s \
+{ __asm__ __volatile__ ("rep ; outs" #s \
: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
#define RETURN_TYPE unsigned char
#include <asm/fixmap.h>
#include <linux/threads.h>
-#define pgd_quicklist (current_cpu_data.pgd_quick)
+extern unsigned long *pgd_quicklist;
#define pmd_quicklist (current_cpu_data.pmd_quick)
#define pte_quicklist (current_cpu_data.pte_quick)
#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
#else
memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
#endif
- memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
+extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
+{
+ memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pgd_slow();
+ }
return (pgd_t *)ret;
}
#define pte_free_kernel(pte) free_pte_slow(pte)
#define pte_free(pte) free_pte_slow(pte)
-#define pgd_free(pgd) free_pgd_slow(pgd)
-#define pgd_alloc() get_pgd_fast()
+#define pgd_free(pgd) free_pgd_fast(pgd)
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
- struct task_struct * p;
pgd_t *pgd;
-#ifdef __SMP__
- int i;
-#endif
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm,address) = entry;
- }
- read_unlock(&tasklist_lock);
-#ifndef __SMP__
- for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+
+ mmlist_access_lock();
+ mmlist_set_pgdir(address, entry);
+ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned
+ long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
-#else
- /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
- modify pgd caches of other CPUs as well. -jj */
- for (i = 0; i < NR_CPUS; i++)
- for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- pgd[address >> PGDIR_SHIFT] = entry;
-#endif
+ mmlist_access_unlock();
}
/*
int f00f_bug;
int coma_bug;
unsigned long loops_per_sec;
- unsigned long *pgd_quick;
unsigned long *pmd_quick;
unsigned long *pte_quick;
unsigned long pgtable_cache_sz;
#define RLIM_NLIMITS 10
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
#ifdef __KERNEL__
#define INIT_RLIMITS \
int d0, d1;
register char * __res;
__asm__ __volatile__(
- "cld\n\t"
"movb %%al,%%ah\n"
"1:\tlodsb\n\t"
"cmpb %%ah,%%al\n\t"
int d0, d1;
register char * __res;
__asm__ __volatile__(
- "cld\n\t"
"movl %6,%%edi\n\t"
"repne\n\t"
"scasb\n\t"
int d0, d1;
register char * __res;
__asm__ __volatile__(
- "cld\n\t"
"movl %6,%%edi\n\t"
"repne\n\t"
"scasb\n\t"
int d0, d1;
register char * __res;
__asm__ __volatile__(
- "cld\n\t"
"movl %6,%%edi\n\t"
"repne\n\t"
"scasb\n\t"
int d0, d1;
register char * __res;
__asm__ __volatile__(
- "cld\n\t" \
"movl %6,%%edi\n\t"
"repne\n\t"
"scasb\n\t"
"1:\txorl %0,%0\n\t"
"movl $-1,%%ecx\n\t"
"xorl %%eax,%%eax\n\t"
- "cld\n\t"
"movl %4,%%edi\n\t"
"repne\n\t"
"scasb\n\t"
int d0, d1, d2;
register void *tmp = (void *)to;
__asm__ __volatile__ (
- "cld\n\t"
"shrl $1,%%ecx\n\t"
"jnc 1f\n\t"
"movsb\n"
register void *tmp = (void *)dest;
if (dest<src)
__asm__ __volatile__ (
- "cld\n\t"
"rep\n\t"
"movsb"
:"=&c" (d0), "=&S" (d1), "=&D" (d2)
int d0, d1, d2;
register int __res;
__asm__ __volatile__(
- "cld\n\t"
"repe\n\t"
"cmpsb\n\t"
"je 1f\n\t"
if (!count)
return NULL;
__asm__ __volatile__(
- "cld\n\t"
"repne\n\t"
"scasb\n\t"
"je 1f\n\t"
{
if (!size)
return addr;
- __asm__("cld
- repnz; scasb
+ __asm__("repnz; scasb
jnz 1f
dec %%edi
1: "
#ifndef _I386_STRING_H_
#define _I386_STRING_H_
+#ifdef __KERNEL__
/*
* On a 486 or Pentium, we are better off not using the
* byte string operations. But on a 386 or a PPro the
{
int d0, d1, d2;
__asm__ __volatile__(
- "cld\n"
"1:\tlodsb\n\t"
"stosb\n\t"
"testb %%al,%%al\n\t"
{
int d0, d1, d2, d3;
__asm__ __volatile__(
- "cld\n"
"1:\tdecl %2\n\t"
"js 2f\n\t"
"lodsb\n\t"
{
int d0, d1, d2, d3;
__asm__ __volatile__(
- "cld\n\t"
"repne\n\t"
"scasb\n\t"
"decl %1\n"
{
int d0, d1, d2, d3;
__asm__ __volatile__(
- "cld\n\t"
"repne\n\t"
"scasb\n\t"
"decl %1\n\t"
int d0, d1;
register int __res;
__asm__ __volatile__(
- "cld\n"
"1:\tlodsb\n\t"
"scasb\n\t"
"jne 2f\n\t"
register int __res;
int d0, d1, d2;
__asm__ __volatile__(
- "cld\n"
"1:\tdecl %3\n\t"
"js 2f\n\t"
"lodsb\n\t"
int d0;
register char * __res;
__asm__ __volatile__(
- "cld\n\t"
"movb %%al,%%ah\n"
"1:\tlodsb\n\t"
"cmpb %%ah,%%al\n\t"
int d0, d1;
register char * __res;
__asm__ __volatile__(
- "cld\n\t"
"movb %%al,%%ah\n"
"1:\tlodsb\n\t"
"cmpb %%ah,%%al\n\t"
int d0;
register int __res;
__asm__ __volatile__(
- "cld\n\t"
"repne\n\t"
"scasb\n\t"
"notl %0\n\t"
{
int d0, d1, d2;
__asm__ __volatile__(
- "cld\n\t"
"rep ; movsl\n\t"
"testb $2,%b4\n\t"
"je 1f\n\t"
}
#define COMMON(x) \
__asm__ __volatile__( \
- "cld\n\t" \
"rep ; movsl" \
x \
: "=&c" (d0), "=&D" (d1), "=&S" (d2) \
#endif
+/*
+ * struct_cpy(x,y), copy structure *x into (matching structure) *y.
+ *
+ * We get link-time errors if the structure sizes do not match.
+ * There is no runtime overhead, it's all optimized away at
+ * compile time.
+ */
+extern void __struct_cpy_bug (void);
+
+#define struct_cpy(x,y) \
+({ \
+ if (sizeof(*(x)) != sizeof(*(y))) \
+ __struct_cpy_bug; \
+ memcpy(x, y, sizeof(*(x))); \
+})
+
#define __HAVE_ARCH_MEMMOVE
extern inline void * memmove(void * dest,const void * src, size_t n)
{
int d0, d1, d2;
if (dest<src)
__asm__ __volatile__(
- "cld\n\t"
"rep\n\t"
"movsb"
: "=&c" (d0), "=&S" (d1), "=&D" (d2)
if (!count)
return NULL;
__asm__ __volatile__(
- "cld\n\t"
"repne\n\t"
"scasb\n\t"
"je 1f\n\t"
{
int d0, d1;
__asm__ __volatile__(
- "cld\n\t"
"rep\n\t"
"stosb"
: "=&c" (d0), "=&D" (d1)
{
int d0, d1;
__asm__ __volatile__(
- "cld\n\t"
"rep ; stosl\n\t"
"testb $2,%b3\n\t"
"je 1f\n\t"
return s;
}
#define COMMON(x) \
-__asm__ __volatile__("cld\n\t" \
+__asm__ __volatile__( \
"rep ; stosl" \
x \
: "=&c" (d0), "=&D" (d1) \
{
if (!size)
return addr;
- __asm__("cld
- repnz; scasb
+ __asm__("repnz; scasb
jnz 1f
dec %%edi
1: "
return addr;
}
+#endif /* __KERNEL__ */
+
#endif
#endif
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
+#define F_GETLK64 12 /* using 'struct flock64' */
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
pid_t l_pid;
};
+struct flock64 {
+ short l_type;
+ short l_whence;
+ loff_t l_start;
+ loff_t l_len;
+ pid_t l_pid;
+};
+
#endif /* _M68K_FCNTL_H */
#define RLIM_NLIMITS 10
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
#ifdef __KERNEL__
#define INIT_RLIMITS \
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
+#define F_GETLK64 12 /* using 'struct flock64' */
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
long pad[4]; /* ZZZZZZZZZZZZZZZZZZZZZZZZZZ */
} flock_t;
+typedef struct flock64 {
+ short l_type;
+ short l_whence;
+ loff_t l_start;
+ loff_t l_len;
+ pid_t l_pid;
+} flock64_t;
+
#endif /* __ASM_MIPS_FCNTL_H */
extern __inline__ pgd_t *get_pgd_slow(void)
{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
- if (ret) {
- init = pgd_offset(&init_mm, 0);
+ if (ret)
pgd_init((unsigned long)ret);
- memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
return ret;
}
+extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
+{
+ pgd_t *init;
+
+ init = pgd_offset(&init_mm, 0);
+ memcpy (pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pgd_slow();
+ }
return (pgd_t *)ret;
}
#define pte_free_kernel(pte) free_pte_fast(pte)
#define pte_free(pte) free_pte_fast(pte)
#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
- struct task_struct * p;
pgd_t *pgd;
#ifdef __SMP__
int i;
#endif
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm,address) = entry;
- }
- read_unlock(&tasklist_lock);
+
+ mmlist_access_lock();
+ mmlist_set_pgdir(address, entry);
#ifndef __SMP__
for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
#endif
+ mmlist_access_unlock();
}
extern pgd_t swapper_pg_dir[1024];
#define RLIM_NLIMITS 10 /* Number of limit flavors. */
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
#ifdef __KERNEL__
#define INIT_RLIMITS \
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
+#define F_GETLK64 12 /* using 'struct flock64' */
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
pid_t l_pid;
};
+struct flock64 {
+ short l_type;
+ short l_whence;
+ loff_t l_start;
+ loff_t l_len;
+ pid_t l_pid;
+};
+
#endif
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
- struct task_struct * p;
pgd_t *pgd;
#ifdef __SMP__
int i;
#endif
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm,address) = entry;
- }
- read_unlock(&tasklist_lock);
+
+ mmlist_access_lock();
+ mmlist_set_pgdir(address, entry);
#ifndef __SMP__
for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
for (pgd = (pgd_t *)cpu_data[i].pgd_cache; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
#endif
+ mmlist_access_unlock();
}
/* We don't use pmd cache, so this is a dummy routine */
extern __inline__ pgd_t *get_pgd_slow(void)
{
- pgd_t *ret, *init;
- /*if ( (ret = (pgd_t *)get_zero_page_fast()) == NULL )*/
- if ( (ret = (pgd_t *)__get_free_page(GFP_KERNEL)) != NULL )
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+ if (ret)
memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- if (ret) {
- init = pgd_offset(&init_mm, 0);
- memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
return ret;
}
+extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
+{
+ pgd_t *init;
+
+ init = pgd_offset(&init_mm, 0);
+ memcpy (pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pgd_slow();
+ }
return (pgd_t *)ret;
}
#define pte_free_kernel(pte) free_pte_fast(pte)
#define pte_free(pte) free_pte_fast(pte)
#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
#define RLIM_NLIMITS 10
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
+
#ifdef __KERNEL__
#define INIT_RLIMITS \
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
+#define F_GETLK64 12 /* for LFS */
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
pid_t l_pid;
};
+struct flock64 {
+ short l_type;
+ short l_whence;
+ loff_t l_start;
+ loff_t l_len;
+ pid_t l_pid;
+};
+
#endif /* __ASM_SH_FCNTL_H */
{
pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
- if (ret) {
- /* Clear User space */
+ if (ret)
memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
-
- /* XXX: Copy vmalloc-ed space??? */
- memcpy(ret + USER_PTRS_PER_PGD,
- swapper_pg_dir + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
return ret;
}
+extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
+{
+ /* XXX: Copy vmalloc-ed space??? */
+ memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pgd_slow();
+ }
return (pgd_t *)ret;
}
#define pte_free_kernel(pte) free_pte_slow(pte)
#define pte_free(pte) free_pte_slow(pte)
#define pgd_free(pgd) free_pgd_slow(pgd)
-#define pgd_alloc() get_pgd_fast()
extern __inline__ pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
- struct task_struct * p;
pgd_t *pgd;
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm,address) = entry;
- }
- read_unlock(&tasklist_lock);
+ mmlist_access_lock();
+ mmlist_set_pgdir(address, entry);
for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
+ mmlist_access_unlock();
}
extern pgd_t swapper_pg_dir[1024];
#define RLIM_NLIMITS 10
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
#ifdef __KERNEL__
#define INIT_RLIMITS \
#define RLIM_NLIMITS 10
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
#ifdef __KERNEL__
#define INIT_RLIMITS \
{ \
__kernel_pid_t32 l_pid;
short __unused;
};
+
+#define flock64 flock
#endif
#endif /* !(_SPARC64_FCNTL_H) */
pgd_quicklist = (unsigned long *)ret->next_hash;
ret = (struct page *)(page_address(ret) + off);
pgd_cache_size--;
- } else {
- ret = (struct page *) __get_free_page(GFP_KERNEL);
- if(ret) {
- struct page *page = mem_map + MAP_NR(ret);
-
- memset(ret, 0, PAGE_SIZE);
- (unsigned long)page->pprev_hash = 2;
- (unsigned long *)page->next_hash = pgd_quicklist;
- pgd_quicklist = (unsigned long *)page;
- pgd_cache_size++;
- }
}
return (pgd_t *)ret;
}
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+ pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
+
+ if(ret)
+ memset(ret, 0, PAGE_SIZE);
+ return (pgd_t *)ret;
+}
+
+extern __inline__ pgd_t *get_pgd_uptodate(pgd_t *pgd)
+{
+ struct page *page = mem_map + MAP_NR(pgd);
+
+ (unsigned long)page->pprev_hash = 2;
+ (unsigned long *)page->next_hash = pgd_quicklist;
+ pgd_quicklist = (unsigned long *)page;
+ pgd_cache_size++;
+}
#else /* __SMP__ */
extern __inline__ void free_pgd_fast(pgd_t *pgd)
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
- } else {
- ret = (unsigned long *) __get_free_page(GFP_KERNEL);
- if(ret)
- memset(ret, 0, PAGE_SIZE);
}
return (pgd_t *)ret;
}
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+ if(ret)
+ memset(ret, 0, PAGE_SIZE);
+ return(ret);
+}
+
+extern __inline__ pgd_t *get_pgd_uptodate(pgd_t *pgd)
+{
+}
+
extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
#define pmd_free_kernel(pmd) free_pmd_fast(pmd)
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
{
#define RLIM_NLIMITS 10
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
#ifdef __KERNEL__
#define INIT_RLIMITS \
{ \
/*
* AGPGART module version 0.99
* Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight
- * Copyright (C) 1999 Xi Graphics
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
extern void agp_free_memory(agp_memory *);
/*
- * void agp_free_memory(agp_memory *curr) :
+ * agp_free_memory :
*
* This function frees memory associated with
* an agp_memory pointer. It is the only function
extern agp_memory *agp_allocate_memory(size_t, u32);
/*
- * agp_memory *agp_allocate_memory(size_t page_count, u32 type) :
+ * agp_allocate_memory :
*
* This function allocates a group of pages of
* a certain type.
extern void agp_copy_info(agp_kern_info *);
/*
- * void agp_copy_info(agp_kern_info *info) :
+ * agp_copy_info :
*
* This function copies information about the
* agp bridge device and the state of the agp
extern int agp_bind_memory(agp_memory *, off_t);
/*
- * int agp_bind_memory(agp_memory *curr, off_t pg_start) :
+ * agp_bind_memory :
*
* This function binds an agp_memory structure
* into the graphics aperture translation table.
extern int agp_unbind_memory(agp_memory *);
/*
- * int agp_unbind_memory(agp_memory *curr) :
+ * agp_unbind_memory :
*
* This function removes an agp_memory structure
* from the graphics aperture translation table.
extern void agp_enable(u32);
/*
- * void agp_enable(u32 mode) :
+ * agp_enable :
*
* This function initializes the agp point-to-point
* connection.
extern int agp_backend_acquire(void);
/*
- * int agp_backend_acquire(void) :
+ * agp_backend_acquire :
*
* This Function attempts to acquire the agp
* backend.
extern void agp_backend_release(void);
/*
- * void agp_backend_release(void) :
+ * agp_backend_release :
*
* This Function releases the lock on the agp
* backend.
/*
* AGPGART module version 0.99
* Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight
- * Copyright (C) 1999 Xi Graphics
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
#elif (SCSI_DISK_MAJOR(MAJOR_NR))
#define DEVICE_NAME "scsidisk"
-#define DEVICE_INTR do_sd
#define TIMEOUT_VALUE (2*HZ)
-#define DEVICE_REQUEST do_sd_request
#define DEVICE_NR(device) (((MAJOR(device) & SD_MAJOR_MASK) << (8 - 4)) + (MINOR(device) >> 4))
#define DEVICE_ON(device)
#define DEVICE_OFF(device)
#elif (MAJOR_NR == SCSI_CDROM_MAJOR)
#define DEVICE_NAME "CD-ROM"
-#define DEVICE_INTR do_sr
-#define DEVICE_REQUEST do_sr_request
#define DEVICE_NR(device) (MINOR(device))
#define DEVICE_ON(device)
#define DEVICE_OFF(device)
#if !defined(IDE_DRIVER)
#ifndef CURRENT
-#define CURRENT (blk_dev[MAJOR_NR].current_request)
+#define CURRENT (blk_dev[MAJOR_NR].request_queue.current_request)
#endif
#ifndef DEVICE_NAME
#endif /* DEVICE_TIMEOUT */
-static void (DEVICE_REQUEST)(void);
+#ifdef DEVICE_REQUEST
+static void (DEVICE_REQUEST)(request_queue_t *);
+#endif
#ifdef DEVICE_INTR
#define CLEAR_INTR SET_INTR(NULL)
unsigned long nr_sectors;
unsigned long nr_segments;
unsigned long current_nr_sectors;
+ void * special;
char * buffer;
struct semaphore * sem;
struct buffer_head * bh;
struct request * next;
};
-typedef void (request_fn_proc) (void);
-typedef struct request ** (queue_proc) (kdev_t dev);
+typedef struct request_queue request_queue_t;
+typedef int (merge_request_fn) (request_queue_t *,
+ struct request * req,
+ struct buffer_head *);
+typedef int (merge_requests_fn) (request_queue_t *,
+ struct request * req,
+ struct request * req2);
+typedef void (request_fn_proc) (request_queue_t *);
+typedef request_queue_t * (queue_proc) (kdev_t dev);
+
+struct request_queue
+{
+ struct request * current_request;
+ request_fn_proc * request_fn;
+ merge_request_fn * merge_fn;
+ merge_requests_fn * merge_requests_fn;
+ /*
+ * The queue owner gets to use this for whatever they like.
+ * ll_rw_blk doesn't touch it.
+ */
+ void * queuedata;
+
+ /*
+ * This is used to remove the plug when tq_disk runs.
+ */
+ struct tq_struct plug_tq;
+ /*
+ * Boolean that indicates whether this queue is plugged or not.
+ */
+ char plugged;
+
+ /*
+ * Boolean that indicates whether current_request is active or
+ * not.
+ */
+ char head_active;
+
+ /*
+ * Boolean that indicates whether we should use plugging on
+ * this queue or not.
+ */
+ char use_plug;
+};
struct blk_dev_struct {
- request_fn_proc *request_fn;
/*
* queue_proc has to be atomic
*/
+ request_queue_t request_queue;
queue_proc *queue;
void *data;
- struct request *current_request;
- struct request plug;
- struct tq_struct plug_tq;
};
struct sec_size {
unsigned block_size_bits;
};
+/*
+ * Used to indicate the default queue for drivers that don't bother
+ * to implement multiple queues. We have this access macro here
+ * so as to eliminate the need for each and every block device
+ * driver to know about the internal structure of blk_dev[].
+ */
+#define BLK_DEFAULT_QUEUE(_MAJOR) &blk_dev[_MAJOR].request_queue
+
extern struct sec_size * blk_sec[MAX_BLKDEV];
extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
extern wait_queue_head_t wait_for_request;
extern void unplug_device(void * data);
extern void make_request(int major,int rw, struct buffer_head * bh);
+/*
+ * Access functions for manipulating queue properties
+ */
+extern void blk_init_queue(request_queue_t *, request_fn_proc *);
+extern void blk_cleanup_queue(request_queue_t *);
+extern void blk_queue_headactive(request_queue_t *, int);
+extern void blk_queue_pluggable(request_queue_t *, int);
+
/* md needs this function to remap requests */
extern int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size);
extern int md_make_request (int minor, int rw, struct buffer_head * bh);
/* symlink.c */
extern struct inode_operations ext2_symlink_inode_operations;
+extern struct inode_operations ext2_fast_symlink_inode_operations;
#endif /* __KERNEL__ */
struct file *fl_file;
unsigned char fl_flags;
unsigned char fl_type;
- off_t fl_start;
- off_t fl_end;
+ loff_t fl_start;
+ loff_t fl_end;
void (*fl_notify)(struct file_lock *); /* unblock callback */
extern int fcntl_getlk(unsigned int, struct flock *);
extern int fcntl_setlk(unsigned int, unsigned int, struct flock *);
+extern int fcntl_getlk64(unsigned int fd, struct flock64 *l);
+extern int fcntl_setlk64(unsigned int fd, unsigned int cmd, struct flock64 *l);
+
/* fs/locks.c */
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_flock(struct file *);
extern int block_write_full_page (struct dentry *, struct page *);
extern int block_write_partial_page (struct file *, struct page *, unsigned long, unsigned long, const char *);
extern int block_write_cont_page (struct file *, struct page *, unsigned long, unsigned long, const char *);
+extern int block_write_zero_range(struct inode *, struct page *, unsigned, unsigned, unsigned, const char *);
+extern inline int block_write_range(struct inode *inode, struct page *page,
+ unsigned from, unsigned len,const char *buf)
+{
+ return block_write_zero_range(inode, page, from, from, from+len, buf);
+}
extern int block_flushpage(struct page *, unsigned long);
+extern int block_symlink(struct inode *, const char *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern ssize_t generic_file_read(struct file *, char *, size_t, loff_t *);
extern ssize_t generic_file_write(struct file *, const char *, size_t, loff_t *, writepage_t);
-extern void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor);
+extern void do_generic_file_read(struct file *, loff_t *, read_descriptor_t *, read_actor_t);
+
+extern int vfs_readlink(struct dentry *, char *, int, char *);
+extern struct dentry *vfs_follow_link(struct dentry *, struct dentry *, unsigned, char *);
+extern int page_readlink(struct dentry *, char *, int);
+extern struct dentry *page_follow_link(struct dentry *, struct dentry *, unsigned);
extern struct super_block *get_super(kdev_t);
struct super_block *get_empty_super(void);
} special_t;
typedef struct ide_drive_s {
- struct request *queue; /* request queue */
+ request_queue_t queue; /* request queue */
struct ide_drive_s *next; /* circular list of hwgroup drives */
unsigned long sleep; /* sleep until this time */
unsigned long service_start; /* time we started last request */
/*
* ide_get_queue() returns the queue which corresponds to a given device.
*/
-struct request **ide_get_queue (kdev_t dev);
+request_queue_t *ide_get_queue (kdev_t dev);
/*
* CompactFlash cards and their brethern pretend to be removable hard disks,
void ide_timer_expiry (unsigned long data);
void ide_intr (int irq, void *dev_id, struct pt_regs *regs);
void ide_geninit (struct gendisk *gd);
-void do_ide0_request (void);
+void do_ide0_request (request_queue_t * q);
#if MAX_HWIFS > 1
-void do_ide1_request (void);
+void do_ide1_request (request_queue_t * q);
#endif
#if MAX_HWIFS > 2
-void do_ide2_request (void);
+void do_ide2_request (request_queue_t * q);
#endif
#if MAX_HWIFS > 3
-void do_ide3_request (void);
+void do_ide3_request (request_queue_t * q);
#endif
#if MAX_HWIFS > 4
-void do_ide4_request (void);
+void do_ide4_request (request_queue_t * q);
#endif
#if MAX_HWIFS > 5
-void do_ide5_request (void);
+void do_ide5_request (request_queue_t * q);
#endif
#if MAX_HWIFS > 6
-void do_ide6_request (void);
+void do_ide6_request (request_queue_t * q);
#endif
#if MAX_HWIFS > 7
-void do_ide7_request (void);
+void do_ide7_request (request_queue_t * q);
#endif
#if MAX_HWIFS > 8
-void do_ide8_request (void);
+void do_ide8_request (request_queue_t * q);
#endif
#if MAX_HWIFS > 9
-void do_ide9_request (void);
+void do_ide9_request (request_queue_t * q);
#endif
void ide_init_subdrivers (void);
#define PG_error 1
#define PG_referenced 2
#define PG_uptodate 3
+#define PG__unused_00 4
#define PG_decr_after 5
-#define PG_DMA 7
+#define PG_unused_01 6
+#define PG__unused_02 7
#define PG_slab 8
#define PG_swap_cache 9
#define PG_skip 10
#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
#define PageDecrAfter(page) test_bit(PG_decr_after, &(page)->flags)
-#define PageDMA(page) test_bit(PG_DMA, &(page)->flags)
+#define PageDMA(page) (contig_page_data.node_zones + ZONE_DMA == (page)->zone)
#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
#define PageSwapCache(page) test_bit(PG_swap_cache, &(page)->flags)
#define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
* PG_reserved is set for a page which must never be accessed (which
* may not even be present).
*
- * PG_DMA is set for those pages which lie in the range of
- * physical addresses capable of carrying DMA transfers.
+ * PG_DMA has been removed, page->zone now tells exactly wether the
+ * page is suited to do DMAing into.
*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
#define vmlist_modify_lock(mm) vmlist_access_lock(mm)
#define vmlist_modify_unlock(mm) vmlist_access_unlock(mm)
+extern spinlock_t mm_lock;
+#define mmlist_access_lock() spin_lock(&mm_lock)
+#define mmlist_access_unlock() spin_unlock(&mm_lock)
+#define mmlist_modify_lock() mmlist_access_lock()
+#define mmlist_modify_unlock() mmlist_access_unlock()
+
+#define for_each_mm(mm) \
+ for (mm = list_entry(init_mm.mmlist.next, struct mm_struct, mmlist); \
+ (mm != &init_mm); \
+ (mm = list_entry(mm->mmlist.next, struct mm_struct, mmlist)))
+
+static inline void mmlist_set_pgdir(unsigned long address, pgd_t entry)
+{
+ struct mm_struct *mm;
+
+ for_each_mm(mm)
+ *pgd_offset(mm,address) = entry;
+}
+
#endif /* __KERNEL__ */
#endif
#define MOD_VISITED 8
#define MOD_USED_ONCE 16
#define MOD_JUST_FREED 32
+#define MOD_INITIALIZING 64
/* Values for query_module's which. */
#define QM_SYMBOLS 4
#define QM_INFO 5
+/* Can the module be queried? */
+#define MOD_CAN_QUERY(mod) (((mod)->flags & (MOD_RUNNING | MOD_INITIALIZING)) && !((mod)->flags & MOD_DELETED))
+
/* When struct module is extended, we must test whether the new member
is present in the header received from insmod before we can use it.
This function returns true if the member is present. */
#ifndef LINUX_NFSD_NFSD_H
#define LINUX_NFSD_NFSD_H
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/unistd.h>
#include <linux/dirent.h>
#include <asm/types.h>
#ifdef __KERNEL__
+# include <linux/config.h>
# include <linux/types.h>
# include <linux/string.h>
# include <linux/fs.h>
int clusterfactor;
int clustersize;
int mft_recordsize;
- int mft_recordbits;
int mft_clusters_per_record;
int index_recordsize;
int index_clusters_per_record;
long ru_nivcsw; /* involuntary " */
};
-/*
- * SuS says limits have to be unsigned.
- *
- * Which makes a ton more sense anyway.
- */
-#define RLIM_INFINITY (~0UL)
-
struct rlimit {
unsigned long rlim_cur;
unsigned long rlim_max;
unsigned long cpu_vm_mask;
unsigned long swap_cnt; /* number of pages to swap on next pass */
unsigned long swap_address;
+ struct list_head mmlist; /* active mm list */
/*
* This is an architecture-specific pointer: the portable
* part of Linux does not know about any segments.
0, 0, 0, \
0, 0, 0, 0, \
0, 0, 0, \
- 0, 0, 0, 0, NULL }
+ 0, 0, 0, 0, \
+ LIST_HEAD_INIT(init_mm.mmlist), NULL }
struct signal_struct {
atomic_t count;
/* symlink.c */
extern struct inode_operations ufs_symlink_inode_operations;
+extern struct inode_operations ufs_fast_symlink_inode_operations;
/* truncate.c */
extern void ufs_truncate (struct inode *);
#define LINUX_UMSDOS_FS_H
-#define UMS_DEBUG 1 /* define for check_* functions */
+/*#define UMS_DEBUG 1 // define for check_* functions */
/*#define UMSDOS_DEBUG 1*/
#define UMSDOS_PARANOIA 1
* Only one at a time, although one
* may recursively lock, so it is a counter
*/
- long pid; /* pid of the process owning the creation */
- /* lock */
+ long pid; /* pid of the process owning the creation
+ * lock */
};
struct umsdos_inode_info {
- union {
- struct msdos_inode_info msdos_info;
- struct dir_locking_info dir_info;
- } u;
+ struct msdos_inode_info msdos_info;
+ struct dir_locking_info dir_info;
int i_patched; /* Inode has been patched */
int i_is_hlink; /* Resolved hardlink inode? */
unsigned long i_emd_owner; /* Is this the EMD file inode? */
#define LOG_SELECT 0x4c
#define LOG_SENSE 0x4d
#define MODE_SELECT_10 0x55
+#define RESERVE_10 0x56
+#define RELEASE_10 0x57
#define MODE_SENSE_10 0x5a
+#define PERSISTENT_RESERVE_IN 0x5e
+#define PERSISTENT_RESERVE_OUT 0x5f
#define MOVE_MEDIUM 0xa5
#define READ_12 0xa8
#define WRITE_12 0xaa
O_TARGET := kernel.o
O_OBJS = sched.o dma.o fork.o exec_domain.o panic.o printk.o sys.o \
module.o exit.o itimer.o info.o time.o softirq.o resource.o \
- sysctl.o acct.o capability.o ptrace.o
+ sysctl.o acct.o capability.o ptrace.o timer.o
OX_OBJS += signal.o
/* SLAB cache for mm_struct's. */
kmem_cache_t *mm_cachep;
+spinlock_t mm_lock = SPIN_LOCK_UNLOCKED;
/* SLAB cache for files structs */
kmem_cache_t *files_cachep;
atomic_set(&mm->mm_count, 1);
init_MUTEX(&mm->mmap_sem);
mm->page_table_lock = SPIN_LOCK_UNLOCKED;
- mm->pgd = pgd_alloc();
- if (mm->pgd)
+ mmlist_modify_lock();
+ if ((mm->pgd = get_pgd_fast())) {
+ list_add_tail(&mm->mmlist, &init_mm.mmlist);
+ mmlist_modify_unlock();
return mm;
+ }
+ mmlist_modify_unlock();
+ if ((mm->pgd = get_pgd_slow())) {
+ mmlist_modify_lock();
+ get_pgd_uptodate(mm->pgd);
+ list_add_tail(&mm->mmlist, &init_mm.mmlist);
+ mmlist_modify_unlock();
+ return mm;
+ }
+ mmlist_modify_unlock();
kmem_cache_free(mm_cachep, mm);
}
return NULL;
inline void __mmdrop(struct mm_struct *mm)
{
if (mm == &init_mm) BUG();
+ mmlist_modify_lock();
pgd_free(mm->pgd);
destroy_context(mm);
+ list_del(&mm->mmlist);
+ mmlist_modify_unlock();
kmem_cache_free(mm_cachep, mm);
}
EXPORT_SYMBOL(block_write_full_page);
EXPORT_SYMBOL(block_write_partial_page);
EXPORT_SYMBOL(block_write_cont_page);
+EXPORT_SYMBOL(block_write_zero_range);
EXPORT_SYMBOL(generic_file_read);
EXPORT_SYMBOL(do_generic_file_read);
EXPORT_SYMBOL(generic_file_write);
EXPORT_SYMBOL(__find_lock_page);
EXPORT_SYMBOL(grab_cache_page);
EXPORT_SYMBOL(read_cache_page);
+EXPORT_SYMBOL(vfs_readlink);
+EXPORT_SYMBOL(vfs_follow_link);
+EXPORT_SYMBOL(page_readlink);
+EXPORT_SYMBOL(page_follow_link);
+EXPORT_SYMBOL(block_symlink);
#if !defined(CONFIG_NFSD) && defined(CONFIG_NFSD_MODULE)
EXPORT_SYMBOL(do_nfsservctl);
* 0.99.14 version by Jon Tombs <jon@gtex02.us.es>,
* Heavily modified by Bjorn Ekwall <bj0rn@blox.se> May 1994 (C)
* Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
+ * Add MOD_INITIALIZING Keith Owens <kaos@ocs.com.au> Nov 1999
*
* This source is covered by the GNU GPL, the same as all kernel sources.
*/
put_mod_name(name);
/* Initialize the module. */
+ mod->flags |= MOD_INITIALIZING;
atomic_set(&mod->uc.usecount,1);
if (mod->init && mod->init() != 0) {
atomic_set(&mod->uc.usecount,0);
+ mod->flags &= ~MOD_INITIALIZING;
error = -EBUSY;
goto err0;
}
atomic_dec(&mod->uc.usecount);
/* And set it running. */
- mod->flags |= MOD_RUNNING;
+ mod->flags = (mod->flags | MOD_RUNNING) & ~MOD_INITIALIZING;
error = 0;
goto err0;
if (mod == &kernel_module)
return -EINVAL;
- if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
+ if (!MOD_CAN_QUERY(mod))
if (put_user(0, ret))
return -EFAULT;
else
if (mod == &kernel_module)
return -EINVAL;
- if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
+ if (!MOD_CAN_QUERY(mod))
if (put_user(0, ret))
return -EFAULT;
else
char *strings;
unsigned long *vals;
- if ((mod->flags & (MOD_RUNNING | MOD_DELETED)) != MOD_RUNNING)
+ if (!MOD_CAN_QUERY(mod))
if (put_user(0, ret))
return -EFAULT;
else
struct module_symbol *msym;
unsigned int j;
- if ((mod->flags & (MOD_RUNNING|MOD_DELETED)) != MOD_RUNNING)
+ if (!MOD_CAN_QUERY(mod))
continue;
/* magic: write module info as a pseudo symbol */
safe_copy_cstr(" (autoclean)");
if (!(mod->flags & MOD_USED_ONCE))
safe_copy_cstr(" (unused)");
- } else
+ }
+ else if (mod->flags & MOD_INITIALIZING)
+ safe_copy_cstr(" (initializing)");
+ else
safe_copy_cstr(" (uninitialized)");
if ((ref = mod->refs) != NULL) {
unsigned i;
struct module_symbol *sym;
- if (!(mod->flags & MOD_RUNNING) || (mod->flags & MOD_DELETED))
+ if (!MOD_CAN_QUERY(mod))
continue;
for (i = mod->nsyms, sym = mod->syms; i > 0; --i, ++sym) {
for (mp = module_list; mp; mp = mp->next) {
if (((modname == NULL) || (strcmp(mp->name, modname) == 0)) &&
- (mp->flags & (MOD_RUNNING | MOD_DELETED)) == MOD_RUNNING &&
+ MOD_CAN_QUERY(mp) &&
(mp->nsyms > 0)) {
for (i = mp->nsyms, sym = mp->syms;
i > 0; --i, ++sym) {
/*
* linux/kernel/sched.c
*
+ * Kernel scheduler and related syscalls
+ *
* Copyright (C) 1991, 1992 Linus Torvalds
*
* 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
* make semaphores SMP safe
- * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
- * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
- * "A Kernel Model for Precision Timekeeping" by Dave Mills
* 1998-11-19 Implemented schedule_timeout() and related stuff
* by Andrea Arcangeli
- * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
- * serialize accesses to xtime/lost_ticks).
- * Copyright (C) 1998 Andrea Arcangeli
* 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
- * 1999-03-10 Improved NTP compatibility by Ulrich Windl
*/
/*
*/
#include <linux/mm.h>
-#include <linux/kernel_stat.h>
-#include <linux/fdreg.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
-#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
-#include <linux/timex.h>
+
+extern void timer_bh(void);
+extern void tqueue_bh(void);
+extern void immediate_bh(void);
/*
- * kernel variables
+ * scheduler variables
*/
unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
-long tick = (1000000 + HZ/2) / HZ; /* timer interrupt period */
-
-/* The current time */
-volatile struct timeval xtime __attribute__ ((aligned (16)));
-
-/* Don't completely fail for HZ > 500. */
-int tickadj = 500/HZ ? : 1; /* microsecs */
-
-DECLARE_TASK_QUEUE(tq_timer);
-DECLARE_TASK_QUEUE(tq_immediate);
-DECLARE_TASK_QUEUE(tq_scheduler);
-
-/*
- * phase-lock loop variables
- */
-/* TIME_ERROR prevents overwriting the CMOS clock */
-int time_state = TIME_OK; /* clock synchronization status */
-int time_status = STA_UNSYNC; /* clock status bits */
-long time_offset = 0; /* time adjustment (us) */
-long time_constant = 2; /* pll time constant */
-long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
-long time_precision = 1; /* clock precision (us) */
-long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
-long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
-long time_phase = 0; /* phase offset (scaled us) */
-long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC; /* frequency offset (scaled ppm) */
-long time_adj = 0; /* tick adjust (scaled 1 / HZ) */
-long time_reftime = 0; /* time at last adjustment (s) */
-
-long time_adjust = 0;
-long time_adjust_step = 0;
-
-unsigned long event = 0;
-
-extern int do_setitimer(int, struct itimerval *, struct itimerval *);
-unsigned int * prof_buffer = NULL;
-unsigned long prof_len = 0;
-unsigned long prof_shift = 0;
-
extern void mem_use(void);
-unsigned long volatile jiffies=0;
-
/*
* Init task must be ok at boot for the ix86 as we will check its signals
* via the SMP irq return path.
wake_up_process(p);
}
-/*
- * Event timer code
- */
-#define TVN_BITS 6
-#define TVR_BITS 8
-#define TVN_SIZE (1 << TVN_BITS)
-#define TVR_SIZE (1 << TVR_BITS)
-#define TVN_MASK (TVN_SIZE - 1)
-#define TVR_MASK (TVR_SIZE - 1)
-
-struct timer_vec {
- int index;
- struct timer_list *vec[TVN_SIZE];
-};
-
-struct timer_vec_root {
- int index;
- struct timer_list *vec[TVR_SIZE];
-};
-
-static struct timer_vec tv5 = { 0 };
-static struct timer_vec tv4 = { 0 };
-static struct timer_vec tv3 = { 0 };
-static struct timer_vec tv2 = { 0 };
-static struct timer_vec_root tv1 = { 0 };
-
-static struct timer_vec * const tvecs[] = {
- (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
-};
-
-#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
-
-static unsigned long timer_jiffies = 0;
-
-static inline void insert_timer(struct timer_list *timer,
- struct timer_list **vec, int idx)
-{
- if ((timer->next = vec[idx]))
- vec[idx]->prev = timer;
- vec[idx] = timer;
- timer->prev = (struct timer_list *)&vec[idx];
-}
-
-static inline void internal_add_timer(struct timer_list *timer)
-{
- /*
- * must be cli-ed when calling this
- */
- unsigned long expires = timer->expires;
- unsigned long idx = expires - timer_jiffies;
-
- if (idx < TVR_SIZE) {
- int i = expires & TVR_MASK;
- insert_timer(timer, tv1.vec, i);
- } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
- int i = (expires >> TVR_BITS) & TVN_MASK;
- insert_timer(timer, tv2.vec, i);
- } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
- int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
- insert_timer(timer, tv3.vec, i);
- } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
- int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
- insert_timer(timer, tv4.vec, i);
- } else if ((signed long) idx < 0) {
- /* can happen if you add a timer with expires == jiffies,
- * or you set a timer to go off in the past
- */
- insert_timer(timer, tv1.vec, tv1.index);
- } else if (idx <= 0xffffffffUL) {
- int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
- insert_timer(timer, tv5.vec, i);
- } else {
- /* Can only get here on architectures with 64-bit jiffies */
- timer->next = timer->prev = timer;
- }
-}
-
-spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
-
-void add_timer(struct timer_list *timer)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&timerlist_lock, flags);
- if (timer->prev)
- goto bug;
- internal_add_timer(timer);
-out:
- spin_unlock_irqrestore(&timerlist_lock, flags);
- return;
-
-bug:
- printk("bug: kernel timer added twice at %p.\n",
- __builtin_return_address(0));
- goto out;
-}
-
-static inline int detach_timer(struct timer_list *timer)
-{
- struct timer_list *prev = timer->prev;
- if (prev) {
- struct timer_list *next = timer->next;
- prev->next = next;
- if (next)
- next->prev = prev;
- return 1;
- }
- return 0;
-}
-
-void mod_timer(struct timer_list *timer, unsigned long expires)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&timerlist_lock, flags);
- timer->expires = expires;
- detach_timer(timer);
- internal_add_timer(timer);
- spin_unlock_irqrestore(&timerlist_lock, flags);
-}
-
-int del_timer(struct timer_list * timer)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&timerlist_lock, flags);
- ret = detach_timer(timer);
- timer->next = timer->prev = 0;
- spin_unlock_irqrestore(&timerlist_lock, flags);
- return ret;
-}
-
signed long schedule_timeout(signed long timeout)
{
struct timer_list timer;
void scheduling_functions_end_here(void) { }
-static inline void cascade_timers(struct timer_vec *tv)
-{
- /* cascade all the timers from tv up one level */
- struct timer_list *timer;
- timer = tv->vec[tv->index];
- /*
- * We are removing _all_ timers from the list, so we don't have to
- * detach them individually, just clear the list afterwards.
- */
- while (timer) {
- struct timer_list *tmp = timer;
- timer = timer->next;
- internal_add_timer(tmp);
- }
- tv->vec[tv->index] = NULL;
- tv->index = (tv->index + 1) & TVN_MASK;
-}
-
-static inline void run_timer_list(void)
-{
- spin_lock_irq(&timerlist_lock);
- while ((long)(jiffies - timer_jiffies) >= 0) {
- struct timer_list *timer;
- if (!tv1.index) {
- int n = 1;
- do {
- cascade_timers(tvecs[n]);
- } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
- }
- while ((timer = tv1.vec[tv1.index])) {
- void (*fn)(unsigned long) = timer->function;
- unsigned long data = timer->data;
- detach_timer(timer);
- timer->next = timer->prev = NULL;
- spin_unlock_irq(&timerlist_lock);
- fn(data);
- spin_lock_irq(&timerlist_lock);
- }
- ++timer_jiffies;
- tv1.index = (tv1.index + 1) & TVR_MASK;
- }
- spin_unlock_irq(&timerlist_lock);
-}
-
-
-static inline void run_old_timers(void)
-{
- struct timer_struct *tp;
- unsigned long mask;
-
- for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
- if (mask > timer_active)
- break;
- if (!(mask & timer_active))
- continue;
- if (time_after(tp->expires, jiffies))
- continue;
- timer_active &= ~mask;
- tp->fn();
- sti();
- }
-}
-
-spinlock_t tqueue_lock = SPIN_LOCK_UNLOCKED;
-
-void tqueue_bh(void)
-{
- run_task_queue(&tq_timer);
-}
-
-void immediate_bh(void)
-{
- run_task_queue(&tq_immediate);
-}
-
-unsigned long timer_active = 0;
-struct timer_struct timer_table[32];
-
-/*
- * Hmm.. Changed this, as the GNU make sources (load.c) seems to
- * imply that avenrun[] is the standard name for this kind of thing.
- * Nothing else seems to be standardized: the fractional size etc
- * all seem to differ on different machines.
- */
-unsigned long avenrun[3] = { 0,0,0 };
-
-/*
- * Nr of active tasks - counted in fixed-point numbers
- */
-static unsigned long count_active_tasks(void)
-{
- struct task_struct *p;
- unsigned long nr = 0;
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if ((p->state == TASK_RUNNING ||
- (p->state & TASK_UNINTERRUPTIBLE) ||
- (p->state & TASK_SWAPPING)))
- nr += FIXED_1;
- }
- read_unlock(&tasklist_lock);
- return nr;
-}
-
-static inline void calc_load(unsigned long ticks)
-{
- unsigned long active_tasks; /* fixed-point */
- static int count = LOAD_FREQ;
-
- count -= ticks;
- if (count < 0) {
- count += LOAD_FREQ;
- active_tasks = count_active_tasks();
- CALC_LOAD(avenrun[0], EXP_1, active_tasks);
- CALC_LOAD(avenrun[1], EXP_5, active_tasks);
- CALC_LOAD(avenrun[2], EXP_15, active_tasks);
- }
-}
-
-/*
- * this routine handles the overflow of the microsecond field
- *
- * The tricky bits of code to handle the accurate clock support
- * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
- * They were originally developed for SUN and DEC kernels.
- * All the kudos should go to Dave for this stuff.
- *
- */
-static void second_overflow(void)
-{
- long ltemp;
-
- /* Bump the maxerror field */
- time_maxerror += time_tolerance >> SHIFT_USEC;
- if ( time_maxerror > NTP_PHASE_LIMIT ) {
- time_maxerror = NTP_PHASE_LIMIT;
- time_status |= STA_UNSYNC;
- }
-
- /*
- * Leap second processing. If in leap-insert state at
- * the end of the day, the system clock is set back one
- * second; if in leap-delete state, the system clock is
- * set ahead one second. The microtime() routine or
- * external clock driver will insure that reported time
- * is always monotonic. The ugly divides should be
- * replaced.
- */
- switch (time_state) {
-
- case TIME_OK:
- if (time_status & STA_INS)
- time_state = TIME_INS;
- else if (time_status & STA_DEL)
- time_state = TIME_DEL;
- break;
-
- case TIME_INS:
- if (xtime.tv_sec % 86400 == 0) {
- xtime.tv_sec--;
- time_state = TIME_OOP;
- printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
- }
- break;
-
- case TIME_DEL:
- if ((xtime.tv_sec + 1) % 86400 == 0) {
- xtime.tv_sec++;
- time_state = TIME_WAIT;
- printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
- }
- break;
-
- case TIME_OOP:
- time_state = TIME_WAIT;
- break;
-
- case TIME_WAIT:
- if (!(time_status & (STA_INS | STA_DEL)))
- time_state = TIME_OK;
- }
-
- /*
- * Compute the phase adjustment for the next second. In
- * PLL mode, the offset is reduced by a fixed factor
- * times the time constant. In FLL mode the offset is
- * used directly. In either mode, the maximum phase
- * adjustment for each second is clamped so as to spread
- * the adjustment over not more than the number of
- * seconds between updates.
- */
- if (time_offset < 0) {
- ltemp = -time_offset;
- if (!(time_status & STA_FLL))
- ltemp >>= SHIFT_KG + time_constant;
- if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
- ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
- time_offset += ltemp;
- time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
- } else {
- ltemp = time_offset;
- if (!(time_status & STA_FLL))
- ltemp >>= SHIFT_KG + time_constant;
- if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
- ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
- time_offset -= ltemp;
- time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
- }
-
- /*
- * Compute the frequency estimate and additional phase
- * adjustment due to frequency error for the next
- * second. When the PPS signal is engaged, gnaw on the
- * watchdog counter and update the frequency computed by
- * the pll and the PPS signal.
- */
- pps_valid++;
- if (pps_valid == PPS_VALID) { /* PPS signal lost */
- pps_jitter = MAXTIME;
- pps_stabil = MAXFREQ;
- time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
- STA_PPSWANDER | STA_PPSERROR);
- }
- ltemp = time_freq + pps_freq;
- if (ltemp < 0)
- time_adj -= -ltemp >>
- (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
- else
- time_adj += ltemp >>
- (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
-
-#if HZ == 100
- /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
- * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
- */
- if (time_adj < 0)
- time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
- else
- time_adj += (time_adj >> 2) + (time_adj >> 5);
-#endif
-}
-
-/* in the NTP reference this is called "hardclock()" */
-static void update_wall_time_one_tick(void)
-{
- if ( (time_adjust_step = time_adjust) != 0 ) {
- /* We are doing an adjtime thing.
- *
- * Prepare time_adjust_step to be within bounds.
- * Note that a positive time_adjust means we want the clock
- * to run faster.
- *
- * Limit the amount of the step to be in the range
- * -tickadj .. +tickadj
- */
- if (time_adjust > tickadj)
- time_adjust_step = tickadj;
- else if (time_adjust < -tickadj)
- time_adjust_step = -tickadj;
-
- /* Reduce by this step the amount of time left */
- time_adjust -= time_adjust_step;
- }
- xtime.tv_usec += tick + time_adjust_step;
- /*
- * Advance the phase, once it gets to one microsecond, then
- * advance the tick more.
- */
- time_phase += time_adj;
- if (time_phase <= -FINEUSEC) {
- long ltemp = -time_phase >> SHIFT_SCALE;
- time_phase += ltemp << SHIFT_SCALE;
- xtime.tv_usec -= ltemp;
- }
- else if (time_phase >= FINEUSEC) {
- long ltemp = time_phase >> SHIFT_SCALE;
- time_phase -= ltemp << SHIFT_SCALE;
- xtime.tv_usec += ltemp;
- }
-}
-
-/*
- * Using a loop looks inefficient, but "ticks" is
- * usually just one (we shouldn't be losing ticks,
- * we're doing this this way mainly for interrupt
- * latency reasons, not because we think we'll
- * have lots of lost timer ticks
- */
-static void update_wall_time(unsigned long ticks)
-{
- do {
- ticks--;
- update_wall_time_one_tick();
- } while (ticks);
-
- if (xtime.tv_usec >= 1000000) {
- xtime.tv_usec -= 1000000;
- xtime.tv_sec++;
- second_overflow();
- }
-}
-
-static inline void do_process_times(struct task_struct *p,
- unsigned long user, unsigned long system)
-{
- unsigned long psecs;
-
- psecs = (p->times.tms_utime += user);
- psecs += (p->times.tms_stime += system);
- if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
- /* Send SIGXCPU every second.. */
- if (!(psecs % HZ))
- send_sig(SIGXCPU, p, 1);
- /* and SIGKILL when we go over max.. */
- if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
- send_sig(SIGKILL, p, 1);
- }
-}
-
-static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
-{
- unsigned long it_virt = p->it_virt_value;
-
- if (it_virt) {
- if (it_virt <= ticks) {
- it_virt = ticks + p->it_virt_incr;
- send_sig(SIGVTALRM, p, 1);
- }
- p->it_virt_value = it_virt - ticks;
- }
-}
-
-static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
-{
- unsigned long it_prof = p->it_prof_value;
-
- if (it_prof) {
- if (it_prof <= ticks) {
- it_prof = ticks + p->it_prof_incr;
- send_sig(SIGPROF, p, 1);
- }
- p->it_prof_value = it_prof - ticks;
- }
-}
-
-void update_one_process(struct task_struct *p,
- unsigned long ticks, unsigned long user, unsigned long system, int cpu)
-{
- p->per_cpu_utime[cpu] += user;
- p->per_cpu_stime[cpu] += system;
- do_process_times(p, user, system);
- do_it_virt(p, user);
- do_it_prof(p, ticks);
-}
-
-static void update_process_times(unsigned long ticks, unsigned long system)
-{
-/*
- * SMP does this on a per-CPU basis elsewhere
- */
-#ifndef __SMP__
- struct task_struct * p = current;
- unsigned long user = ticks - system;
- if (p->pid) {
- p->counter -= ticks;
- if (p->counter <= 0) {
- p->counter = 0;
- p->need_resched = 1;
- }
- if (p->priority < DEF_PRIORITY)
- kstat.cpu_nice += user;
- else
- kstat.cpu_user += user;
- kstat.cpu_system += system;
- }
- update_one_process(p, ticks, user, system, 0);
-#endif
-}
-
-volatile unsigned long lost_ticks = 0;
-static unsigned long lost_ticks_system = 0;
-
-/*
- * This spinlock protect us from races in SMP while playing with xtime. -arca
- */
-rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
-
-static inline void update_times(void)
-{
- unsigned long ticks;
-
- /*
- * update_times() is run from the raw timer_bh handler so we
- * just know that the irqs are locally enabled and so we don't
- * need to save/restore the flags of the local CPU here. -arca
- */
- write_lock_irq(&xtime_lock);
-
- ticks = lost_ticks;
- lost_ticks = 0;
-
- if (ticks) {
- unsigned long system;
- system = xchg(&lost_ticks_system, 0);
-
- calc_load(ticks);
- update_wall_time(ticks);
- write_unlock_irq(&xtime_lock);
-
- update_process_times(ticks, system);
-
- } else
- write_unlock_irq(&xtime_lock);
-}
-
-static void timer_bh(void)
-{
- update_times();
- run_old_timers();
- run_timer_list();
-}
-
-void do_timer(struct pt_regs * regs)
-{
- (*(unsigned long *)&jiffies)++;
- lost_ticks++;
- mark_bh(TIMER_BH);
- if (!user_mode(regs))
- lost_ticks_system++;
- if (tq_timer)
- mark_bh(TQUEUE_BH);
-}
-
-#if !defined(__alpha__) && !defined(__ia64__)
-
-/*
- * For backwards compatibility? This can be done in libc so Alpha
- * and all newer ports shouldn't need it.
- */
-asmlinkage unsigned long sys_alarm(unsigned int seconds)
-{
- struct itimerval it_new, it_old;
- unsigned int oldalarm;
-
- it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
- it_new.it_value.tv_sec = seconds;
- it_new.it_value.tv_usec = 0;
- do_setitimer(ITIMER_REAL, &it_new, &it_old);
- oldalarm = it_old.it_value.tv_sec;
- /* ehhh.. We can't return 0 if we have an alarm pending.. */
- /* And we'd better return too much than too little anyway */
- if (it_old.it_value.tv_usec)
- oldalarm++;
- return oldalarm;
-}
-
-#endif
-
#ifndef __alpha__
-/*
- * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
- * should be moved into arch/i386 instead?
- */
-
-asmlinkage long sys_getpid(void)
-{
- /* This is SMP safe - current->pid doesn't change */
- return current->pid;
-}
-
-/*
- * This is not strictly SMP safe: p_opptr could change
- * from under us. However, rather than getting any lock
- * we can use an optimistic algorithm: get the parent
- * pid, and go back and check that the parent is still
- * the same. If it has changed (which is extremely unlikely
- * indeed), we just try again..
- *
- * NOTE! This depends on the fact that even if we _do_
- * get an old value of "parent", we can happily dereference
- * the pointer: we just can't necessarily trust the result
- * until we know that the parent pointer is valid.
- *
- * The "mb()" macro is a memory barrier - a synchronizing
- * event. It also makes sure that gcc doesn't optimize
- * away the necessary memory references.. The barrier doesn't
- * have to have all that strong semantics: on x86 we don't
- * really require a synchronizing instruction, for example.
- * The barrier is more important for code generation than
- * for any real memory ordering semantics (even if there is
- * a small window for a race, using the old pointer is
- * harmless for a while).
- */
-asmlinkage long sys_getppid(void)
-{
- int pid;
- struct task_struct * me = current;
- struct task_struct * parent;
-
- parent = me->p_opptr;
- for (;;) {
- pid = parent->pid;
-#if __SMP__
-{
- struct task_struct *old = parent;
- mb();
- parent = me->p_opptr;
- if (old != parent)
- continue;
-}
-#endif
- break;
- }
- return pid;
-}
-
-asmlinkage long sys_getuid(void)
-{
- /* Only we change this so SMP safe */
- return current->uid;
-}
-
-asmlinkage long sys_geteuid(void)
-{
- /* Only we change this so SMP safe */
- return current->euid;
-}
-
-asmlinkage long sys_getgid(void)
-{
- /* Only we change this so SMP safe */
- return current->gid;
-}
-
-asmlinkage long sys_getegid(void)
-{
- /* Only we change this so SMP safe */
- return current->egid;
-}
-
/*
* This has been replaced by sys_setpriority. Maybe it should be
* moved into the arch dependent tree for those ports that require
return 0;
}
-asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
-{
- struct timespec t;
- unsigned long expire;
-
- if(copy_from_user(&t, rqtp, sizeof(struct timespec)))
- return -EFAULT;
-
- if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
- return -EINVAL;
-
-
- if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
- current->policy != SCHED_OTHER)
- {
- /*
- * Short delay requests up to 2 ms will be handled with
- * high precision by a busy wait for all real-time processes.
- *
- * Its important on SMP not to do this holding locks.
- */
- udelay((t.tv_nsec + 999) / 1000);
- return 0;
- }
-
- expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
-
- current->state = TASK_INTERRUPTIBLE;
- expire = schedule_timeout(expire);
-
- if (expire) {
- if (rmtp) {
- jiffies_to_timespec(expire, &t);
- if (copy_to_user(rmtp, &t, sizeof(struct timespec)))
- return -EFAULT;
- }
- return -EINTR;
- }
- return 0;
-}
-
static void show_task(struct task_struct * p)
{
unsigned long free = 0;
--- /dev/null
+/*
+ * linux/kernel/ktimer.c
+ *
+ * Kernel internal timers, kernel timekeeping, basic process system calls
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
+ *
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
+ * serialize accesses to xtime/lost_ticks).
+ * Copyright (C) 1998 Andrea Arcangeli
+ * 1999-03-10 Improved NTP compatibility by Ulrich Windl
+ */
+
+#include <linux/mm.h>
+#include <linux/timex.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/uaccess.h>
+
+/*
+ * Timekeeping variables
+ */
+
+long tick = (1000000 + HZ/2) / HZ; /* timer interrupt period */
+
+/* The current time */
+volatile struct timeval xtime __attribute__ ((aligned (16)));
+
+/* Don't completely fail for HZ > 500. */
+int tickadj = 500/HZ ? : 1; /* microsecs */
+
+DECLARE_TASK_QUEUE(tq_timer);
+DECLARE_TASK_QUEUE(tq_immediate);
+DECLARE_TASK_QUEUE(tq_scheduler);
+
+/*
+ * phase-lock loop variables
+ */
+/* TIME_ERROR prevents overwriting the CMOS clock */
+int time_state = TIME_OK; /* clock synchronization status */
+int time_status = STA_UNSYNC; /* clock status bits */
+long time_offset = 0; /* time adjustment (us) */
+long time_constant = 2; /* pll time constant */
+long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
+long time_precision = 1; /* clock precision (us) */
+long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
+long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
+long time_phase = 0; /* phase offset (scaled us) */
+long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC;
+ /* frequency offset (scaled ppm)*/
+long time_adj = 0; /* tick adjust (scaled 1 / HZ) */
+long time_reftime = 0; /* time at last adjustment (s) */
+
+long time_adjust = 0;
+long time_adjust_step = 0;
+
+unsigned long event = 0;
+
+extern int do_setitimer(int, struct itimerval *, struct itimerval *);
+
+unsigned long volatile jiffies = 0;
+
+unsigned int * prof_buffer = NULL;
+unsigned long prof_len = 0;
+unsigned long prof_shift = 0;
+
+/*
+ * Event timer code
+ */
+#define TVN_BITS 6
+#define TVR_BITS 8
+#define TVN_SIZE (1 << TVN_BITS)
+#define TVR_SIZE (1 << TVR_BITS)
+#define TVN_MASK (TVN_SIZE - 1)
+#define TVR_MASK (TVR_SIZE - 1)
+
+struct timer_vec {
+ int index;
+ struct timer_list *vec[TVN_SIZE];
+};
+
+struct timer_vec_root {
+ int index;
+ struct timer_list *vec[TVR_SIZE];
+};
+
+static struct timer_vec tv5 = { 0 };
+static struct timer_vec tv4 = { 0 };
+static struct timer_vec tv3 = { 0 };
+static struct timer_vec tv2 = { 0 };
+static struct timer_vec_root tv1 = { 0 };
+
+static struct timer_vec * const tvecs[] = {
+ (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
+};
+
+#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
+
+static unsigned long timer_jiffies = 0;
+
+static inline void insert_timer(struct timer_list *timer,
+ struct timer_list **vec, int idx)
+{
+ if ((timer->next = vec[idx]))
+ vec[idx]->prev = timer;
+ vec[idx] = timer;
+ timer->prev = (struct timer_list *)&vec[idx];
+}
+
+static inline void internal_add_timer(struct timer_list *timer)
+{
+ /*
+ * must be cli-ed when calling this
+ */
+ unsigned long expires = timer->expires;
+ unsigned long idx = expires - timer_jiffies;
+
+ if (idx < TVR_SIZE) {
+ int i = expires & TVR_MASK;
+ insert_timer(timer, tv1.vec, i);
+ } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
+ int i = (expires >> TVR_BITS) & TVN_MASK;
+ insert_timer(timer, tv2.vec, i);
+ } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
+ int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
+ insert_timer(timer, tv3.vec, i);
+ } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
+ int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
+ insert_timer(timer, tv4.vec, i);
+ } else if ((signed long) idx < 0) {
+ /* can happen if you add a timer with expires == jiffies,
+ * or you set a timer to go off in the past
+ */
+ insert_timer(timer, tv1.vec, tv1.index);
+ } else if (idx <= 0xffffffffUL) {
+ int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+ insert_timer(timer, tv5.vec, i);
+ } else {
+ /* Can only get here on architectures with 64-bit jiffies */
+ timer->next = timer->prev = timer;
+ }
+}
+
+spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
+
+void add_timer(struct timer_list *timer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&timerlist_lock, flags);
+ if (timer->prev)
+ goto bug;
+ internal_add_timer(timer);
+out:
+ spin_unlock_irqrestore(&timerlist_lock, flags);
+ return;
+
+bug:
+ printk("bug: kernel timer added twice at %p.\n",
+ __builtin_return_address(0));
+ goto out;
+}
+
+static inline int detach_timer(struct timer_list *timer)
+{
+ struct timer_list *prev = timer->prev;
+ if (prev) {
+ struct timer_list *next = timer->next;
+ prev->next = next;
+ if (next)
+ next->prev = prev;
+ return 1;
+ }
+ return 0;
+}
+
+void mod_timer(struct timer_list *timer, unsigned long expires)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&timerlist_lock, flags);
+ timer->expires = expires;
+ detach_timer(timer);
+ internal_add_timer(timer);
+ spin_unlock_irqrestore(&timerlist_lock, flags);
+}
+
+int del_timer(struct timer_list * timer)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timerlist_lock, flags);
+ ret = detach_timer(timer);
+ timer->next = timer->prev = 0;
+ spin_unlock_irqrestore(&timerlist_lock, flags);
+ return ret;
+}
+
+static inline void cascade_timers(struct timer_vec *tv)
+{
+ /* cascade all the timers from tv up one level */
+ struct timer_list *timer;
+ timer = tv->vec[tv->index];
+ /*
+ * We are removing _all_ timers from the list, so we don't have to
+ * detach them individually, just clear the list afterwards.
+ */
+ while (timer) {
+ struct timer_list *tmp = timer;
+ timer = timer->next;
+ internal_add_timer(tmp);
+ }
+ tv->vec[tv->index] = NULL;
+ tv->index = (tv->index + 1) & TVN_MASK;
+}
+
+static inline void run_timer_list(void)
+{
+ spin_lock_irq(&timerlist_lock);
+ while ((long)(jiffies - timer_jiffies) >= 0) {
+ struct timer_list *timer;
+ if (!tv1.index) {
+ int n = 1;
+ do {
+ cascade_timers(tvecs[n]);
+ } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
+ }
+ while ((timer = tv1.vec[tv1.index])) {
+ void (*fn)(unsigned long) = timer->function;
+ unsigned long data = timer->data;
+ detach_timer(timer);
+ timer->next = timer->prev = NULL;
+ spin_unlock_irq(&timerlist_lock);
+ fn(data);
+ spin_lock_irq(&timerlist_lock);
+ }
+ ++timer_jiffies;
+ tv1.index = (tv1.index + 1) & TVR_MASK;
+ }
+ spin_unlock_irq(&timerlist_lock);
+}
+
+
+static inline void run_old_timers(void)
+{
+ struct timer_struct *tp;
+ unsigned long mask;
+
+ for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
+ if (mask > timer_active)
+ break;
+ if (!(mask & timer_active))
+ continue;
+ if (time_after(tp->expires, jiffies))
+ continue;
+ timer_active &= ~mask;
+ tp->fn();
+ sti();
+ }
+}
+
+spinlock_t tqueue_lock = SPIN_LOCK_UNLOCKED;
+
+void tqueue_bh(void)
+{
+ run_task_queue(&tq_timer);
+}
+
+void immediate_bh(void)
+{
+ run_task_queue(&tq_immediate);
+}
+
+unsigned long timer_active = 0;
+struct timer_struct timer_table[32];
+
+/*
+ * this routine handles the overflow of the microsecond field
+ *
+ * The tricky bits of code to handle the accurate clock support
+ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+ * They were originally developed for SUN and DEC kernels.
+ * All the kudos should go to Dave for this stuff.
+ *
+ */
+static void second_overflow(void)
+{
+ long ltemp;
+
+ /* Bump the maxerror field */
+ time_maxerror += time_tolerance >> SHIFT_USEC;
+ if ( time_maxerror > NTP_PHASE_LIMIT ) {
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_status |= STA_UNSYNC;
+ }
+
+ /*
+ * Leap second processing. If in leap-insert state at
+ * the end of the day, the system clock is set back one
+ * second; if in leap-delete state, the system clock is
+ * set ahead one second. The microtime() routine or
+ * external clock driver will insure that reported time
+ * is always monotonic. The ugly divides should be
+ * replaced.
+ */
+ switch (time_state) {
+
+ case TIME_OK:
+ if (time_status & STA_INS)
+ time_state = TIME_INS;
+ else if (time_status & STA_DEL)
+ time_state = TIME_DEL;
+ break;
+
+ case TIME_INS:
+ if (xtime.tv_sec % 86400 == 0) {
+ xtime.tv_sec--;
+ time_state = TIME_OOP;
+ printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
+ }
+ break;
+
+ case TIME_DEL:
+ if ((xtime.tv_sec + 1) % 86400 == 0) {
+ xtime.tv_sec++;
+ time_state = TIME_WAIT;
+ printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
+ }
+ break;
+
+ case TIME_OOP:
+ time_state = TIME_WAIT;
+ break;
+
+ case TIME_WAIT:
+ if (!(time_status & (STA_INS | STA_DEL)))
+ time_state = TIME_OK;
+ }
+
+ /*
+ * Compute the phase adjustment for the next second. In
+ * PLL mode, the offset is reduced by a fixed factor
+ * times the time constant. In FLL mode the offset is
+ * used directly. In either mode, the maximum phase
+ * adjustment for each second is clamped so as to spread
+ * the adjustment over not more than the number of
+ * seconds between updates.
+ */
+ if (time_offset < 0) {
+ ltemp = -time_offset;
+ if (!(time_status & STA_FLL))
+ ltemp >>= SHIFT_KG + time_constant;
+ if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
+ ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
+ time_offset += ltemp;
+ time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
+ } else {
+ ltemp = time_offset;
+ if (!(time_status & STA_FLL))
+ ltemp >>= SHIFT_KG + time_constant;
+ if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
+ ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
+ time_offset -= ltemp;
+ time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
+ }
+
+ /*
+ * Compute the frequency estimate and additional phase
+ * adjustment due to frequency error for the next
+ * second. When the PPS signal is engaged, gnaw on the
+ * watchdog counter and update the frequency computed by
+ * the pll and the PPS signal.
+ */
+ pps_valid++;
+ if (pps_valid == PPS_VALID) { /* PPS signal lost */
+ pps_jitter = MAXTIME;
+ pps_stabil = MAXFREQ;
+ time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
+ STA_PPSWANDER | STA_PPSERROR);
+ }
+ ltemp = time_freq + pps_freq;
+ if (ltemp < 0)
+ time_adj -= -ltemp >>
+ (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+ else
+ time_adj += ltemp >>
+ (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+
+#if HZ == 100
+ /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
+ * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
+ */
+ if (time_adj < 0)
+ time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
+ else
+ time_adj += (time_adj >> 2) + (time_adj >> 5);
+#endif
+}
+
+/* in the NTP reference this is called "hardclock()" */
+static void update_wall_time_one_tick(void)
+{
+ if ( (time_adjust_step = time_adjust) != 0 ) {
+ /* We are doing an adjtime thing.
+ *
+ * Prepare time_adjust_step to be within bounds.
+ * Note that a positive time_adjust means we want the clock
+ * to run faster.
+ *
+ * Limit the amount of the step to be in the range
+ * -tickadj .. +tickadj
+ */
+ if (time_adjust > tickadj)
+ time_adjust_step = tickadj;
+ else if (time_adjust < -tickadj)
+ time_adjust_step = -tickadj;
+
+ /* Reduce by this step the amount of time left */
+ time_adjust -= time_adjust_step;
+ }
+ xtime.tv_usec += tick + time_adjust_step;
+ /*
+ * Advance the phase, once it gets to one microsecond, then
+ * advance the tick more.
+ */
+ time_phase += time_adj;
+ if (time_phase <= -FINEUSEC) {
+ long ltemp = -time_phase >> SHIFT_SCALE;
+ time_phase += ltemp << SHIFT_SCALE;
+ xtime.tv_usec -= ltemp;
+ }
+ else if (time_phase >= FINEUSEC) {
+ long ltemp = time_phase >> SHIFT_SCALE;
+ time_phase -= ltemp << SHIFT_SCALE;
+ xtime.tv_usec += ltemp;
+ }
+}
+
+/*
+ * Using a loop looks inefficient, but "ticks" is
+ * usually just one (we shouldn't be losing ticks,
+ * we're doing this this way mainly for interrupt
+ * latency reasons, not because we think we'll
+ * have lots of lost timer ticks
+ */
+static void update_wall_time(unsigned long ticks)
+{
+ do {
+ ticks--;
+ update_wall_time_one_tick();
+ } while (ticks);
+
+ if (xtime.tv_usec >= 1000000) {
+ xtime.tv_usec -= 1000000;
+ xtime.tv_sec++;
+ second_overflow();
+ }
+}
+
+static inline void do_process_times(struct task_struct *p,
+ unsigned long user, unsigned long system)
+{
+ unsigned long psecs;
+
+ psecs = (p->times.tms_utime += user);
+ psecs += (p->times.tms_stime += system);
+ if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
+ /* Send SIGXCPU every second.. */
+ if (!(psecs % HZ))
+ send_sig(SIGXCPU, p, 1);
+ /* and SIGKILL when we go over max.. */
+ if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
+ send_sig(SIGKILL, p, 1);
+ }
+}
+
+static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
+{
+ unsigned long it_virt = p->it_virt_value;
+
+ if (it_virt) {
+ if (it_virt <= ticks) {
+ it_virt = ticks + p->it_virt_incr;
+ send_sig(SIGVTALRM, p, 1);
+ }
+ p->it_virt_value = it_virt - ticks;
+ }
+}
+
+static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
+{
+ unsigned long it_prof = p->it_prof_value;
+
+ if (it_prof) {
+ if (it_prof <= ticks) {
+ it_prof = ticks + p->it_prof_incr;
+ send_sig(SIGPROF, p, 1);
+ }
+ p->it_prof_value = it_prof - ticks;
+ }
+}
+
+void update_one_process(struct task_struct *p,
+ unsigned long ticks, unsigned long user, unsigned long system, int cpu)
+{
+ p->per_cpu_utime[cpu] += user;
+ p->per_cpu_stime[cpu] += system;
+ do_process_times(p, user, system);
+ do_it_virt(p, user);
+ do_it_prof(p, ticks);
+}
+
+static void update_process_times(unsigned long ticks, unsigned long system)
+{
+/*
+ * SMP does this on a per-CPU basis elsewhere
+ */
+#ifndef __SMP__
+ struct task_struct * p = current;
+ unsigned long user = ticks - system;
+ if (p->pid) {
+ p->counter -= ticks;
+ if (p->counter <= 0) {
+ p->counter = 0;
+ p->need_resched = 1;
+ }
+ if (p->priority < DEF_PRIORITY)
+ kstat.cpu_nice += user;
+ else
+ kstat.cpu_user += user;
+ kstat.cpu_system += system;
+ }
+ update_one_process(p, ticks, user, system, 0);
+#endif
+}
+
+/*
+ * Nr of active tasks - counted in fixed-point numbers
+ */
+static unsigned long count_active_tasks(void)
+{
+ struct task_struct *p;
+ unsigned long nr = 0;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if ((p->state == TASK_RUNNING ||
+ (p->state & TASK_UNINTERRUPTIBLE) ||
+ (p->state & TASK_SWAPPING)))
+ nr += FIXED_1;
+ }
+ read_unlock(&tasklist_lock);
+ return nr;
+}
+
+/*
+ * Hmm.. Changed this, as the GNU make sources (load.c) seems to
+ * imply that avenrun[] is the standard name for this kind of thing.
+ * Nothing else seems to be standardized: the fractional size etc
+ * all seem to differ on different machines.
+ */
+unsigned long avenrun[3] = { 0,0,0 };
+
+static inline void calc_load(unsigned long ticks)
+{
+ unsigned long active_tasks; /* fixed-point */
+ static int count = LOAD_FREQ;
+
+ count -= ticks;
+ if (count < 0) {
+ count += LOAD_FREQ;
+ active_tasks = count_active_tasks();
+ CALC_LOAD(avenrun[0], EXP_1, active_tasks);
+ CALC_LOAD(avenrun[1], EXP_5, active_tasks);
+ CALC_LOAD(avenrun[2], EXP_15, active_tasks);
+ }
+}
+
+volatile unsigned long lost_ticks = 0;
+static unsigned long lost_ticks_system = 0;
+
+/*
+ * This spinlock protect us from races in SMP while playing with xtime. -arca
+ */
+rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
+
+static inline void update_times(void)
+{
+ unsigned long ticks;
+
+ /*
+ * update_times() is run from the raw timer_bh handler so we
+ * just know that the irqs are locally enabled and so we don't
+ * need to save/restore the flags of the local CPU here. -arca
+ */
+ write_lock_irq(&xtime_lock);
+
+ ticks = lost_ticks;
+ lost_ticks = 0;
+
+ if (ticks) {
+ unsigned long system;
+ system = xchg(&lost_ticks_system, 0);
+
+ calc_load(ticks);
+ update_wall_time(ticks);
+ write_unlock_irq(&xtime_lock);
+
+ update_process_times(ticks, system);
+
+ } else
+ write_unlock_irq(&xtime_lock);
+}
+
+void timer_bh(void)
+{
+ update_times();
+ run_old_timers();
+ run_timer_list();
+}
+
+void do_timer(struct pt_regs * regs)
+{
+ (*(unsigned long *)&jiffies)++;
+ lost_ticks++;
+ mark_bh(TIMER_BH);
+ if (!user_mode(regs))
+ lost_ticks_system++;
+ if (tq_timer)
+ mark_bh(TQUEUE_BH);
+}
+
+#if !defined(__alpha__) && !defined(__ia64__)
+
+/*
+ * For backwards compatibility? This can be done in libc so Alpha
+ * and all newer ports shouldn't need it.
+ */
+asmlinkage unsigned long sys_alarm(unsigned int seconds)
+{
+ struct itimerval it_new, it_old;
+ unsigned int oldalarm;
+
+ it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
+ it_new.it_value.tv_sec = seconds;
+ it_new.it_value.tv_usec = 0;
+ do_setitimer(ITIMER_REAL, &it_new, &it_old);
+ oldalarm = it_old.it_value.tv_sec;
+ /* ehhh.. We can't return 0 if we have an alarm pending.. */
+ /* And we'd better return too much than too little anyway */
+ if (it_old.it_value.tv_usec)
+ oldalarm++;
+ return oldalarm;
+}
+
+#endif
+
+#ifndef __alpha__
+
+/*
+ * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
+ * should be moved into arch/i386 instead?
+ */
+
+asmlinkage long sys_getpid(void)
+{
+ /* This is SMP safe - current->pid doesn't change */
+ return current->pid;
+}
+
+/*
+ * This is not strictly SMP safe: p_opptr could change
+ * from under us. However, rather than getting any lock
+ * we can use an optimistic algorithm: get the parent
+ * pid, and go back and check that the parent is still
+ * the same. If it has changed (which is extremely unlikely
+ * indeed), we just try again..
+ *
+ * NOTE! This depends on the fact that even if we _do_
+ * get an old value of "parent", we can happily dereference
+ * the pointer: we just can't necessarily trust the result
+ * until we know that the parent pointer is valid.
+ *
+ * The "mb()" macro is a memory barrier - a synchronizing
+ * event. It also makes sure that gcc doesn't optimize
+ * away the necessary memory references.. The barrier doesn't
+ * have to have all that strong semantics: on x86 we don't
+ * really require a synchronizing instruction, for example.
+ * The barrier is more important for code generation than
+ * for any real memory ordering semantics (even if there is
+ * a small window for a race, using the old pointer is
+ * harmless for a while).
+ */
+asmlinkage long sys_getppid(void)
+{
+ int pid;
+ struct task_struct * me = current;
+ struct task_struct * parent;
+
+ parent = me->p_opptr;
+ for (;;) {
+ pid = parent->pid;
+#if __SMP__
+{
+ struct task_struct *old = parent;
+ mb();
+ parent = me->p_opptr;
+ if (old != parent)
+ continue;
+}
+#endif
+ break;
+ }
+ return pid;
+}
+
+asmlinkage long sys_getuid(void)
+{
+ /* Only we change this so SMP safe */
+ return current->uid;
+}
+
+asmlinkage long sys_geteuid(void)
+{
+ /* Only we change this so SMP safe */
+ return current->euid;
+}
+
+asmlinkage long sys_getgid(void)
+{
+ /* Only we change this so SMP safe */
+ return current->gid;
+}
+
+asmlinkage long sys_getegid(void)
+{
+ /* Only we change this so SMP safe */
+ return current->egid;
+}
+
+#endif
+
+asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
+{
+ struct timespec t;
+ unsigned long expire;
+
+ if(copy_from_user(&t, rqtp, sizeof(struct timespec)))
+ return -EFAULT;
+
+ if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
+ return -EINVAL;
+
+
+ if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
+ current->policy != SCHED_OTHER)
+ {
+ /*
+ * Short delay requests up to 2 ms will be handled with
+ * high precision by a busy wait for all real-time processes.
+ *
+ * Its important on SMP not to do this holding locks.
+ */
+ udelay((t.tv_nsec + 999) / 1000);
+ return 0;
+ }
+
+ expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
+
+ current->state = TASK_INTERRUPTIBLE;
+ expire = schedule_timeout(expire);
+
+ if (expire) {
+ if (rmtp) {
+ jiffies_to_timespec(expire, &t);
+ if (copy_to_user(rmtp, &t, sizeof(struct timespec)))
+ return -EFAULT;
+ }
+ return -EINTR;
+ }
+ return 0;
+}
+
count++;
ClearPageReserved(page);
set_page_count(page, 1);
- if ((i+(bdata->node_boot_start >> PAGE_SHIFT)) >= (virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT))
- clear_bit(PG_DMA, &page->flags);
__free_page(page);
}
}
mpnt->vm_page_prot = area->vm_page_prot;
mpnt->vm_flags = area->vm_flags;
mpnt->vm_ops = area->vm_ops;
- mpnt->vm_pgoff = area->vm_pgoff;
- area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT;
+ mpnt->vm_pgoff = area->vm_pgoff + ((end - area->vm_start) >> PAGE_SHIFT);
mpnt->vm_file = area->vm_file;
mpnt->vm_private_data = area->vm_private_data;
if (mpnt->vm_file)
*/
for (p = lmem_map; p < lmem_map + totalpages; p++) {
set_page_count(p, 0);
- p->flags = (1 << PG_DMA);
SetPageReserved(p);
init_waitqueue_head(&p->wait);
memlist_init(&p->list);
if(!sk->dead) {
wake_up_interruptible(sk->sleep);
- sock_wake_async(sk->socket, 0, POLL_IN);
+ sock_wake_async(sk->socket, 0, POLL_OUT);
}
return -1;
}
void __init packet_proto_init(struct net_proto *pro)
#endif
{
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *ent;
-#endif
sock_register(&packet_family_ops);
register_netdevice_notifier(&packet_netdev_notifier);
#ifdef CONFIG_PROC_FS
#! /bin/sh
# Script to apply kernel patches.
-# usage: patch-kernel [ sourcedir [ patchdir ] ]
+# usage: patch-kernel [ sourcedir [ patchdir [ stopversion ] ] ]
# The source directory defaults to /usr/src/linux, and the patch
# directory defaults to the current directory.
#
# gzip, bzip, bzip2, zip, compress, and plaintext.
#
# Adam Sulmicki <adam@cfar.umd.edu>, 1st January 1997.
+#
+# Added ability to stop at a given version number
+# Put the full version number (i.e. 2.3.31) as the last parameter
+# Dave Gilbert <linux@treblig.org>, 11th December 1999.
# Set directories from arguments, or use defaults.
sourcedir=${1-/usr/src/linux}
patchdir=${2-.}
+stopvers=${3-imnotaversion}
# set current VERSION, PATCHLEVEL, SUBLEVEL
eval `sed -n 's/^\([A-Z]*\) = \([0-9]*\)$/\1=\2/p' $sourcedir/Makefile`
while :
do
SUBLEVEL=`expr $SUBLEVEL + 1`
- patch=patch-$VERSION.$PATCHLEVEL.$SUBLEVEL
+ FULLVERSION="$VERSION.$PATCHLEVEL.$SUBLEVEL"
+
+ patch=patch-$FULLVERSION
if [ -r $patchdir/${patch}.gz ]; then
ext=".gz"
name="gzip"
fi
# Remove backup files
find $sourcedir/ '(' -name '*.orig' -o -name '.*.orig' ')' -exec rm -f {} \;
+
+ if [ $stopvers = $FULLVERSION ]
+ then
+ echo "Stoping at $FULLVERSION as requested. Enjoy."
+ break
+ fi
done