]> git.neil.brown.name Git - history.git/commitdiff
Import 2.3.46pre4 2.3.46pre4
authorLinus Torvalds <torvalds@linuxfoundation.org>
Fri, 23 Nov 2007 20:31:35 +0000 (15:31 -0500)
committerLinus Torvalds <torvalds@linuxfoundation.org>
Fri, 23 Nov 2007 20:31:35 +0000 (15:31 -0500)
49 files changed:
arch/i386/defconfig
arch/m68k/atari/stram.c
drivers/acorn/block/fd1772.c
drivers/acorn/block/mfmhd.c
drivers/block/DAC960.c
drivers/block/acsi.c
drivers/block/amiflop.c
drivers/block/ataflop.c
drivers/block/cpqarray.c
drivers/block/floppy.c
drivers/block/hd.c
drivers/block/ide.c
drivers/block/ll_rw_blk.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/paride/pcd.c
drivers/block/paride/pd.c
drivers/block/paride/pf.c
drivers/block/ps2esdi.c
drivers/block/swim3.c
drivers/block/swim_iop.c
drivers/block/xd.c
drivers/cdrom/aztcd.c
drivers/cdrom/cdu31a.c
drivers/cdrom/cm206.c
drivers/cdrom/gscd.c
drivers/cdrom/mcd.c
drivers/cdrom/mcdx.c
drivers/cdrom/optcd.c
drivers/cdrom/sbpcd.c
drivers/cdrom/sjcd.c
drivers/cdrom/sonycd535.c
drivers/char/lp.c
drivers/i2o/i2o_block.c
drivers/scsi/Config.in
drivers/scsi/scsi.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_merge.c
drivers/usb/scanner.c
drivers/usb/scanner.h
drivers/usb/usb.c
drivers/usb/usb_storage.c
fs/nfsd/vfs.c
fs/open.c
include/linux/blk.h
include/linux/blkdev.h
include/linux/fs.h
include/linux/lp.h
include/linux/nbd.h

index 66f9b5ff1f63f277e8af36bba26e25f4c7fa4751..712698adba153fe45ffe52713dbab48ab3468572 100644 (file)
@@ -175,7 +175,6 @@ CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_SD_EXTRA_DEVS=40
 # CONFIG_CHR_DEV_ST is not set
-CONFIG_ST_EXTRA_DEVS=2
 # CONFIG_BLK_DEV_SR is not set
 # CONFIG_CHR_DEV_SG is not set
 
index 77053421eb9276710de3d8c12d7949b80537ac95..7406a8d02e3ae4f6f3d271e92768db12801d214c 100644 (file)
@@ -1168,7 +1168,7 @@ static void do_stram_request( void )
 {
        unsigned long start, len;
 
-       while( CURRENT ) {
+       while( !QUEUE_EMPTY ) {
                if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
                        panic("stram: request list destroyed");
                if (CURRENT->bh) {
index 54ed9ac6b38a5204712fb61580d96854b320fadb..95836eb29544ef01cded04b9e955e67f5b533698 100644 (file)
@@ -591,7 +591,7 @@ static void fd_error(void)
 {
        printk("FDC1772: fd_error\n");
        /*panic("fd1772: fd_error"); *//* DAG tmp */
-       if (!CURRENT)
+       if (QUEUE_EMPTY)
                return;
        CURRENT->errors++;
        if (CURRENT->errors >= MAX_ERRORS) {
@@ -1230,14 +1230,14 @@ static void redo_fd_request(void)
 
        DPRINT(("redo_fd_request: CURRENT=%08lx CURRENT->rq_dev=%04x CURRENT->sector=%ld\n",
                (unsigned long) CURRENT, CURRENT ? CURRENT->rq_dev : 0,
-               CURRENT ? CURRENT->sector : 0));
+               !QUEUE_EMPTY ? CURRENT->sector : 0));
 
-       if (CURRENT && CURRENT->rq_status == RQ_INACTIVE)
+       if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE)
                goto the_end;
 
       repeat:
 
-       if (!CURRENT)
+       if (QUEUE_EMPTY)
                goto the_end;
 
        if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
index d30251fbb258c2828f2d3dee6960c7fa92b23315..0d65a1493734b08918e25076b12d11da9356c132 100644 (file)
@@ -758,7 +758,7 @@ static void request_done(int uptodate)
                /* No - its the end of the line */
                /* end_request's should have happened at the end of sector DMAs */
                /* Turns Drive LEDs off - may slow it down? */
-               if (!CURRENT)
+               if (QUEUE_EMPTY)
                        issue_command(CMD_CKV, block, 2);
 
                Busy = 0;
@@ -891,7 +891,7 @@ static void mfm_request(void)
 {
        DBG("mfm_request CURRENT=%p Busy=%d\n", CURRENT, Busy);
 
-       if (!CURRENT) {
+       if (QUEUE_EMPTY) {
                DBG("mfm_request: Exited due to NULL Current 1\n");
                return;
        }
@@ -918,7 +918,7 @@ static void mfm_request(void)
 
                DBG("mfm_request: before INIT_REQUEST\n");
 
-               if (!CURRENT) {
+               if (QUEUE_EMPTY) {
                        printk("mfm_request: Exiting due to !CURRENT (pre)\n");
                        CLEAR_INTR;
                        Busy = 0;
index 45e86000ae30aa123e515e1685491de35fe19c18..5a9f2216250c042152600cb661f8e2fcbe5b8c8c 100644 (file)
@@ -1009,37 +1009,58 @@ static boolean DAC960_ReportDeviceConfiguration(DAC960_Controller_T *Controller)
 }
 
 
-static int DAC_merge_fn(request_queue_t *q, struct request *req, 
-                       struct buffer_head *bh) 
+static inline int DAC_new_segment(request_queue_t *q, struct request *req,
+                                 int __max_segments)
 {
        int max_segments;
        DAC960_Controller_T * Controller = q->queuedata;
 
        max_segments = Controller->MaxSegmentsPerRequest[MINOR(req->rq_dev)];
+       if (__max_segments < max_segments)
+               max_segments = __max_segments;
 
-       if (req->bhtail->b_data + req->bhtail->b_size != bh->b_data) {
-               if (req->nr_segments < max_segments) {
-                       req->nr_segments++;
-                       return 1;
-               }
-               return 0;
+       if (req->nr_segments < max_segments) {
+               req->nr_segments++;
+               q->nr_segments++;
+               return 1;
        }
+       return 0;
+}
 
-       return 1;
+static int DAC_back_merge_fn(request_queue_t *q, struct request *req, 
+                            struct buffer_head *bh, int __max_segments)
+{
+       if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
+               return 1;
+       return DAC_new_segment(q, req, __max_segments);
+}
+
+static int DAC_front_merge_fn(request_queue_t *q, struct request *req, 
+                             struct buffer_head *bh, int __max_segments)
+{
+       if (bh->b_data + bh->b_size == req->bh->b_data)
+               return 1;
+       return DAC_new_segment(q, req, __max_segments);
 }
 
 static int DAC_merge_requests_fn(request_queue_t *q,
                                 struct request *req,
-                                struct request *next)
+                                struct request *next,
+                                int __max_segments)
 {
        int max_segments;
        DAC960_Controller_T * Controller = q->queuedata;
        int total_segments = req->nr_segments + next->nr_segments;
 
        max_segments = Controller->MaxSegmentsPerRequest[MINOR(req->rq_dev)];
+       if (__max_segments < max_segments)
+               max_segments = __max_segments;
 
        if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
+       {
                total_segments--;
+               q->nr_segments--;
+       }
     
        if (total_segments > max_segments)
                return 0;
@@ -1080,7 +1101,8 @@ static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
   q = BLK_DEFAULT_QUEUE(MajorNumber);
   blk_init_queue(q, RequestFunctions[Controller->ControllerNumber]);
   blk_queue_headactive(q, 0);
-  q->merge_fn = DAC_merge_fn;
+  q->back_merge_fn = DAC_back_merge_fn;
+  q->front_merge_fn = DAC_front_merge_fn;
   q->merge_requests_fn = DAC_merge_requests_fn;
   q->queuedata = (void *) Controller;
 
@@ -1156,7 +1178,6 @@ static void DAC960_UnregisterBlockDevice(DAC960_Controller_T *Controller)
   blk_size[MajorNumber] = NULL;
   blksize_size[MajorNumber] = NULL;
   max_sectors[MajorNumber] = NULL;
-  max_segments[MajorNumber] = NULL;
   /*
     Remove the Generic Disk Information structure from the list.
   */
@@ -1305,15 +1326,17 @@ static int DAC960_Finalize(NotifierBlock_T *NotifierBlock,
 static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller,
                                     boolean WaitForCommand)
 {
-  IO_Request_T **RequestQueuePointer =
-    &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].request_queue.current_request;
+  struct list_head * queue_head;
   IO_Request_T *Request;
   DAC960_Command_T *Command;
   char *RequestBuffer;
+
+  queue_head = &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].request_queue.queue_head;
   while (true)
     {
-      Request = *RequestQueuePointer;
-      if (Request == NULL || Request->rq_status == RQ_INACTIVE) return false;
+      if (list_empty(queue_head)) return false;
+      Request = blkdev_entry_next_request(queue_head);
+      if (Request->rq_status == RQ_INACTIVE) return false;
       Command = DAC960_AllocateCommand(Controller);
       if (Command != NULL) break;
       if (!WaitForCommand) return false;
@@ -1335,7 +1358,7 @@ static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller,
   Command->BufferHeader = Request->bh;
   RequestBuffer = Request->buffer;
   Request->rq_status = RQ_INACTIVE;
-  *RequestQueuePointer = Request->next;
+  blkdev_dequeue_request(Request);
   wake_up(&wait_for_request);
   if (Command->SegmentCount == 1)
     {
index ef9e3fa7c270fcd40dbcb48275638dd76eb0b83f..41578b7b2c1efd275037f4053d9baecce0e300e4 100644 (file)
@@ -769,7 +769,7 @@ static void unexpected_acsi_interrupt( void )
 static void bad_rw_intr( void )
 
 {
-       if (!CURRENT)
+       if (QUEUE_EMPTY)
                return;
 
        if (++CURRENT->errors >= MAX_ERRORS)
@@ -843,7 +843,7 @@ static void acsi_times_out( unsigned long dummy )
 
        DEVICE_INTR = NULL;
        printk( KERN_ERR "ACSI timeout\n" );
-       if (!CURRENT) return;
+       if (QUEUE_EMPTY) return;
        if (++CURRENT->errors >= MAX_ERRORS) {
 #ifdef DEBUG
                printk( KERN_ERR "ACSI: too many errors.\n" );
@@ -953,7 +953,7 @@ static void redo_acsi_request( void )
        unsigned long           pbuffer;
        struct buffer_head      *bh;
        
-       if (CURRENT && CURRENT->rq_status == RQ_INACTIVE) {
+       if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE) {
                if (!DEVICE_INTR) {
                        ENABLE_IRQ();
                        stdma_release();
@@ -969,7 +969,7 @@ static void redo_acsi_request( void )
        /* Another check here: An interrupt or timer event could have
         * happened since the last check!
         */
-       if (CURRENT && CURRENT->rq_status == RQ_INACTIVE) {
+       if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE) {
                if (!DEVICE_INTR) {
                        ENABLE_IRQ();
                        stdma_release();
@@ -979,7 +979,7 @@ static void redo_acsi_request( void )
        if (DEVICE_INTR)
                return;
 
-       if (!CURRENT) {
+       if (QUEUE_EMPTY) {
                CLEAR_INTR;
                ENABLE_IRQ();
                stdma_release();
index e6bf5fa0c1829843f37f019ee1930070b270ffd8..0c7af176e75ea75133d3f4de487ab2136146e585 100644 (file)
@@ -1385,12 +1385,12 @@ static void redo_fd_request(void)
        char *data;
        unsigned long flags;
 
-       if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){
+       if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE){
                return;
        }
 
  repeat:
-       if (!CURRENT) {
+       if (QUEUE_EMPTY) {
                /* Nothing left to do */
                return;
        }
index b1e20b7d3fecdd5d67cdaa8be7f07559b515b086..b7aa4241eb8c3276c8752f2187de993b6c55bae0 100644 (file)
@@ -624,7 +624,7 @@ static void fd_error( void )
                return;
        }
                
-       if (!CURRENT) return;
+       if (QUEUE_EMPTY) return;
        CURRENT->errors++;
        if (CURRENT->errors >= MAX_ERRORS) {
                printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
@@ -1450,18 +1450,18 @@ static void redo_fd_request(void)
        int device, drive, type;
   
        DPRINT(("redo_fd_request: CURRENT=%08lx CURRENT->dev=%04x CURRENT->sector=%ld\n",
-               (unsigned long)CURRENT, CURRENT ? CURRENT->rq_dev : 0,
-               CURRENT ? CURRENT->sector : 0 ));
+               (unsigned long)CURRENT, !QUEUE_EMPTY ? CURRENT->rq_dev : 0,
+               !QUEUE_EMPTY ? CURRENT->sector : 0 ));
 
        IsFormatting = 0;
 
-       if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){
+       if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE){
                return;
        }
 
 repeat:
     
-       if (!CURRENT)
+       if (QUEUE_EMPTY)
                goto the_end;
 
        if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
index 78269edf3a5cdb62d27a416e8d8d7ccc6ad9e12d..47291bef1065f8f0bff25fadabf3fc6601df52a7 100644 (file)
@@ -880,14 +880,16 @@ static void do_ida_request(int ctlr)
        cmdlist_t *c;
        int seg, sect;
        char *lastdataend;
-       request_queue_t * q;
+       struct list_head * queue_head;
        struct buffer_head *bh;
        struct request *creq;
 
-       q = &blk_dev[MAJOR_NR+ctlr].request_queue;
+       queue_head = &blk_dev[MAJOR_NR+ctlr].request_queue.queue_head;
 
-       creq = q->current_request;
-       if (creq == NULL || creq->rq_status == RQ_INACTIVE)
+       if (list_empty(queue_head))
+               goto doreq_done;
+       creq = blkdev_entry_next_request(queue_head);
+       if (creq->rq_status == RQ_INACTIVE)
                goto doreq_done;
 
        if (ctlr != MAJOR(creq->rq_dev)-MAJOR_NR ||
@@ -961,10 +963,9 @@ DBGPX(
                bh->b_reqnext = NULL;
 DBGPX(         printk("More to do on same request %p\n", creq); );
        } else {
-DBGPX(         printk("Done with %p, queueing %p\n", creq, creq->next); );
-               creq->rq_status = RQ_INACTIVE;
-               q->current_request = creq->next;
-               wake_up(&wait_for_request);
+DBGPX(         printk("Done with %p\n", creq); );
+               blkdev_dequeue_request(creq);
+               end_that_request_last(creq);
        }
 
        c->req.hdr.cmd = (creq->cmd == READ) ? IDA_READ : IDA_WRITE;
index 51241640dcc4a7f46c94bad771b831506968e8c6..44abae8779c444505ef2a079eca999b0ca004f1e 100644 (file)
@@ -2259,7 +2259,7 @@ static void request_done(int uptodate)
        probing = 0;
        reschedule_timeout(MAXTIMEOUT, "request done %d", uptodate);
 
-       if (!CURRENT){
+       if (QUEUE_EMPTY){
                DPRINT("request list destroyed in floppy request done\n");
                return;
        }
@@ -2273,14 +2273,14 @@ static void request_done(int uptodate)
                        DRS->maxtrack = 1;
 
                /* unlock chained buffers */
-               while (current_count_sectors && CURRENT &&
+               while (current_count_sectors && !QUEUE_EMPTY &&
                       current_count_sectors >= CURRENT->current_nr_sectors){
                        current_count_sectors -= CURRENT->current_nr_sectors;
                        CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
                        CURRENT->sector += CURRENT->current_nr_sectors;
                        end_request(1);
                }
-               if (current_count_sectors && CURRENT){
+               if (current_count_sectors && !QUEUE_EMPTY){
                        /* "unlock" last subsector */
                        CURRENT->buffer += current_count_sectors <<9;
                        CURRENT->current_nr_sectors -= current_count_sectors;
@@ -2289,7 +2289,7 @@ static void request_done(int uptodate)
                        return;
                }
 
-               if (current_count_sectors && !CURRENT)
+               if (current_count_sectors && QUEUE_EMPTY)
                        DPRINT("request list destroyed in floppy request done\n");
 
        } else {
@@ -2852,14 +2852,14 @@ static void redo_fd_request(void)
        if (current_drive < N_DRIVE)
                floppy_off(current_drive);
 
-       if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){
+       if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE){
                CLEAR_INTR;
                unlock_fdc();
                return;
        }
 
        while(1){
-               if (!CURRENT) {
+               if (QUEUE_EMPTY) {
                        CLEAR_INTR;
                        unlock_fdc();
                        return;
index 05a17a0c1907932e55f7f8937b5b00ea5e3393ab..111b899fe54e60be90602f73521bf075705eb2df 100644 (file)
@@ -145,7 +145,7 @@ static void dump_status (const char *msg, unsigned int stat)
        unsigned long flags;
        char devc;
 
-       devc = CURRENT ? 'a' + DEVICE_NR(CURRENT->rq_dev) : '?';
+       devc = !QUEUE_EMPTY ? 'a' + DEVICE_NR(CURRENT->rq_dev) : '?';
        save_flags (flags);
        sti();
 #ifdef VERBOSE_ERRORS
@@ -174,7 +174,7 @@ static void dump_status (const char *msg, unsigned int stat)
                if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
                        printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
                                inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
-                       if (CURRENT)
+                       if (!QUEUE_EMPTY)
                                printk(", sector=%ld", CURRENT->sector);
                }
                printk("\n");
@@ -351,7 +351,7 @@ static void bad_rw_intr(void)
 {
        int dev;
 
-       if (!CURRENT)
+       if (QUEUE_EMPTY)
                return;
        dev = DEVICE_NR(CURRENT->rq_dev);
        if (++CURRENT->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
@@ -414,7 +414,7 @@ ok_to_read:
 #if (HD_DELAY > 0)
        last_req = read_timer();
 #endif
-       if (CURRENT)
+       if (!QUEUE_EMPTY)
                hd_request();
        return;
 }
@@ -475,7 +475,7 @@ static void hd_times_out(void)
        unsigned int dev;
 
        DEVICE_INTR = NULL;
-       if (!CURRENT)
+       if (QUEUE_EMPTY)
                return;
        disable_irq(HD_IRQ);
        sti();
@@ -522,7 +522,7 @@ static void hd_request(void)
 {
        unsigned int dev, block, nsect, sec, track, head, cyl;
 
-       if (CURRENT && CURRENT->rq_status == RQ_INACTIVE) return;
+       if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE) return;
        if (DEVICE_INTR)
                return;
 repeat:
index 06e1bbcc6fc96e86a582c6a818661d58559f7436..13e2c481924c5cf11ecaada6a6339ad64340ea1f 100644 (file)
@@ -501,8 +501,7 @@ void ide_end_request (byte uptodate, ide_hwgroup_t *hwgroup)
 
        if (!end_that_request_first(rq, uptodate, hwgroup->drive->name)) {
                add_blkdev_randomness(MAJOR(rq->rq_dev));
-               hwgroup->drive->queue.current_request = rq->next;
-               blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL;
+               blkdev_dequeue_request(rq);
                hwgroup->rq = NULL;
                end_that_request_last(rq);
        }
@@ -772,8 +771,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, byte stat, byte err)
                }
        }
        spin_lock_irqsave(&io_request_lock, flags);
-       drive->queue.current_request = rq->next;
-       blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL;
+       blkdev_dequeue_request(rq);
        HWGROUP(drive)->rq = NULL;
        rq->rq_status = RQ_INACTIVE;
        spin_unlock_irqrestore(&io_request_lock, flags);
@@ -1076,7 +1074,7 @@ static ide_startstop_t start_request (ide_drive_t *drive)
 {
        ide_startstop_t startstop;
        unsigned long block, blockend;
-       struct request *rq = drive->queue.current_request;
+       struct request *rq = blkdev_entry_next_request(&drive->queue.queue_head);
        unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS;
        ide_hwif_t *hwif = HWIF(drive);
 
@@ -1159,13 +1157,12 @@ repeat:
        best = NULL;
        drive = hwgroup->drive;
        do {
-               if (drive->queue.current_request && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) {
+               if (!list_empty(&drive->queue.queue_head) && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) {
                        if (!best
                         || (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep)))
                         || (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive))))
                        {
-                               struct blk_dev_struct *bdev = &blk_dev[HWIF(drive)->major];
-                               if( !bdev->request_queue.plugged )
+                               if( !drive->queue.plugged )
                                        best = drive;
                        }
                }
@@ -1229,7 +1226,6 @@ repeat:
  */
 static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
 {
-       struct blk_dev_struct *bdev;
        ide_drive_t     *drive;
        ide_hwif_t      *hwif;
        ide_startstop_t startstop;
@@ -1246,9 +1242,6 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
                        hwgroup->rq = NULL;
                        drive = hwgroup->drive;
                        do {
-                               bdev = &blk_dev[HWIF(drive)->major];
-                               if( !bdev->request_queue.plugged )
-                                       bdev->request_queue.current_request = NULL;             /* (broken since patch-2.1.15) */
                                if (drive->sleep && (!sleep || 0 < (signed long)(sleep - drive->sleep)))
                                        sleep = drive->sleep;
                        } while ((drive = drive->next) != hwgroup->drive);
@@ -1285,10 +1278,9 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
                drive->sleep = 0;
                drive->service_start = jiffies;
 
-               bdev = &blk_dev[hwif->major];
-               if ( bdev->request_queue.plugged )      /* FIXME: paranoia */
+               if ( drive->queue.plugged )     /* paranoia */
                        printk("%s: Huh? nuking plugged queue\n", drive->name);
-               bdev->request_queue.current_request = hwgroup->rq = drive->queue.current_request;
+               hwgroup->rq = blkdev_entry_next_request(&drive->queue.queue_head);
                /*
                 * Some systems have trouble with IDE IRQs arriving while
                 * the driver is still setting things up.  So, here we disable
@@ -1670,7 +1662,7 @@ void ide_init_drive_cmd (struct request *rq)
        rq->sem = NULL;
        rq->bh = NULL;
        rq->bhtail = NULL;
-       rq->next = NULL;
+       rq->q = NULL;
 }
 
 /*
@@ -1703,7 +1695,7 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
        unsigned long flags;
        ide_hwgroup_t *hwgroup = HWGROUP(drive);
        unsigned int major = HWIF(drive)->major;
-       struct request *cur_rq;
+       struct list_head * queue_head;
        DECLARE_MUTEX_LOCKED(sem);
 
 #ifdef CONFIG_BLK_DEV_PDC4030
@@ -1716,20 +1708,17 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
        if (action == ide_wait)
                rq->sem = &sem;
        spin_lock_irqsave(&io_request_lock, flags);
-       cur_rq = drive->queue.current_request;
-       if (cur_rq == NULL || action == ide_preempt) {
-               rq->next = cur_rq;
-               drive->queue.current_request = rq;
+       queue_head = &drive->queue.queue_head;
+       if (list_empty(queue_head) || action == ide_preempt) {
                if (action == ide_preempt)
                        hwgroup->rq = NULL;
        } else {
                if (action == ide_wait || action == ide_end) {
-                       while (cur_rq->next != NULL)    /* find end of list */
-                               cur_rq = cur_rq->next;
-               }
-               rq->next = cur_rq->next;
-               cur_rq->next = rq;
+                       queue_head = queue_head->prev;
+               } else
+                       queue_head = queue_head->next;
        }
+       list_add(&rq->queue, queue_head);
        ide_do_request(hwgroup, 0);
        spin_unlock_irqrestore(&io_request_lock, flags);
        if (action == ide_wait) {
index 783d644c4eb9bac418b2155ffbfeb9b26b5da563..976803e2bbbbadb8b9f4cd26ea0c415419723246 100644 (file)
@@ -3,6 +3,7 @@
  *
  * Copyright (C) 1991, 1992 Linus Torvalds
  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
+ * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
  */
 
 /*
@@ -27,6 +28,8 @@
 
 #include <linux/module.h>
 
+#define DEBUG_ELEVATOR
+
 /*
  * MAC Floppy IWM hooks
  */
@@ -147,6 +150,18 @@ request_queue_t * blk_get_queue (kdev_t dev)
        return ret;
 }
 
+static inline int get_request_latency(elevator_t * elevator, int rw)
+{
+       int latency;
+
+       if (rw != READ)
+               latency = elevator->write_latency;
+       else
+               latency = elevator->read_latency;
+
+       return latency;
+}
+
 void blk_cleanup_queue(request_queue_t * q)
 {
        memset(q, 0, sizeof(*q));
@@ -167,28 +182,44 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
        q->make_request_fn = mfn;
 }
 
-static int ll_merge_fn(request_queue_t *q, struct request *req, 
-                      struct buffer_head *bh) 
+static inline int ll_new_segment(request_queue_t *q, struct request *req, int max_segments)
 {
-       if (req->bhtail->b_data + req->bhtail->b_size != bh->b_data) {
-               if (req->nr_segments < MAX_SEGMENTS) {
-                       req->nr_segments++;
-                       return 1;
-               }
-               return 0;
+       if (req->nr_segments < max_segments) {
+               req->nr_segments++;
+               q->nr_segments++;
+               return 1;
        }
-       return 1;
+       return 0;
+}
+
+static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
+                           struct buffer_head *bh, int max_segments)
+{
+       if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
+               return 1;
+       return ll_new_segment(q, req, max_segments);
+}
+
+static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
+                            struct buffer_head *bh, int max_segments)
+{
+       if (bh->b_data + bh->b_size == req->bh->b_data)
+               return 1;
+       return ll_new_segment(q, req, max_segments);
 }
 
 static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
-                               struct request *next)
+                               struct request *next, int max_segments)
 {
        int total_segments = req->nr_segments + next->nr_segments;
 
        if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
+       {
                total_segments--;
+               q->nr_segments--;
+       }
     
-       if (total_segments > MAX_SEGMENTS)
+       if (total_segments > max_segments)
                return 0;
 
        req->nr_segments = total_segments;
@@ -197,9 +228,11 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
 
 void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
 {
+       INIT_LIST_HEAD(&q->queue_head);
+       q->elevator = ELEVATOR_DEFAULTS;
        q->request_fn           = rfn;
-       q->current_request      = NULL;
-       q->merge_fn             = ll_merge_fn;
+       q->back_merge_fn        = ll_back_merge_fn;
+       q->front_merge_fn       = ll_front_merge_fn;
        q->merge_requests_fn    = ll_merge_requests_fn;
        q->make_request_fn      = NULL;
        q->plug_tq.sync         = 0;
@@ -226,11 +259,13 @@ void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
  */
 inline void generic_plug_device (request_queue_t *q, kdev_t dev)
 {
+#ifdef CONFIG_BLK_DEV_MD
        if (MAJOR(dev) == MD_MAJOR) {
                spin_unlock_irq(&io_request_lock);
                BUG();
        }
-       if (q->current_request)
+#endif
+       if (!list_empty(&q->queue_head))
                return;
 
        q->plugged = 1;
@@ -248,7 +283,7 @@ void generic_unplug_device(void * data)
        spin_lock_irqsave(&io_request_lock,flags);
        if (q->plugged) {
                q->plugged = 0;
-               if (q->current_request)
+               if (!list_empty(&q->queue_head))
                        (q->request_fn)(q);
        }
        spin_unlock_irqrestore(&io_request_lock,flags);
@@ -388,6 +423,125 @@ static inline void drive_stat_acct(struct request *req,
                printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");
 }
 
+/* elevator */
+
+#define elevator_sequence_after(a,b) ((int)((b)-(a)) < 0)
+#define elevator_sequence_before(a,b) elevator_sequence_after(b,a)
+#define elevator_sequence_after_eq(a,b) ((int)((b)-(a)) <= 0)
+#define elevator_sequence_before_eq(a,b) elevator_sequence_after_eq(b,a)
+
+static inline struct list_head * seek_to_not_starving_chunk(request_queue_t * q,
+                                                           int * lat, int * starving)
+{
+       int sequence = q->elevator.sequence;
+       struct list_head * entry = q->queue_head.prev;
+       int pos = 0;
+
+       do {
+               struct request * req = blkdev_entry_to_request(entry);
+               if (elevator_sequence_before(req->elevator_sequence, sequence))
+               {
+                       *lat -= q->nr_segments - pos;
+                       *starving = 1;
+                       return entry;
+               }
+               pos += req->nr_segments;
+       } while ((entry = entry->prev) != &q->queue_head);
+
+       *starving = 0;
+
+       return entry->next;
+}
+
+static inline void elevator_merge_requests(elevator_t * e, struct request * req, struct request * next)
+{
+       if (elevator_sequence_before(next->elevator_sequence, req->elevator_sequence))
+               req->elevator_sequence = next->elevator_sequence;
+       if (req->cmd == READ)
+               e->read_pendings--;
+
+}
+
+static inline int elevator_sequence(elevator_t * e, int latency)
+{
+       return latency + e->sequence;
+}
+
+#define elevator_merge_before(q, req, lat)     __elevator_merge((q), (req), (lat), 0)
+#define elevator_merge_after(q, req, lat)      __elevator_merge((q), (req), (lat), 1)
+static inline void __elevator_merge(request_queue_t * q, struct request * req, int latency, int after)
+{
+#ifdef DEBUG_ELEVATOR
+       int sequence = elevator_sequence(&q->elevator, latency);
+       if (after)
+               sequence -= req->nr_segments;
+       if (elevator_sequence_before(sequence, req->elevator_sequence)) {
+               static int warned = 0;
+               if (!warned) {
+                       printk(KERN_WARNING __FUNCTION__
+                              ": req latency %d req latency %d\n",
+                              req->elevator_sequence - q->elevator.sequence,
+                              sequence - q->elevator.sequence);
+                       warned = 1;
+               }
+               req->elevator_sequence = sequence;
+       }
+#endif
+}
+
+static inline void elevator_queue(request_queue_t * q,
+                                 struct request * req,
+                                 struct list_head * entry,
+                                 int latency, int starving)
+{
+       struct request * tmp, * __tmp;
+       int __latency = latency;
+
+       __tmp = tmp = blkdev_entry_to_request(entry);
+
+       for (;; tmp = blkdev_next_request(tmp))
+       {
+               if ((latency -= tmp->nr_segments) <= 0)
+               {
+                       tmp = __tmp;
+                       latency = __latency;
+
+                       if (starving)
+                               break;
+
+                       if (q->head_active && !q->plugged)
+                       {
+                               latency -= tmp->nr_segments;
+                               break;
+                       }
+
+                       list_add(&req->queue, &q->queue_head);
+                       goto after_link;
+               }
+
+               if (tmp->queue.next == &q->queue_head)
+                       break;
+
+               {
+                       const int after_current = IN_ORDER(tmp,req);
+                       const int before_next = IN_ORDER(req,blkdev_next_request(tmp));
+
+                       if (!IN_ORDER(tmp,blkdev_next_request(tmp))) {
+                               if (after_current || before_next)
+                                       break;
+                       } else {
+                               if (after_current && before_next)
+                                       break;
+                       }
+               }
+       }
+
+       list_add(&req->queue, &tmp->queue);
+
+ after_link:
+       req->elevator_sequence = elevator_sequence(&q->elevator, latency);
+}
+
 /*
  * add-request adds a request to the linked list.
  * It disables interrupts (aquires the request spinlock) so that it can muck
@@ -398,32 +552,20 @@ static inline void drive_stat_acct(struct request *req,
  * which is important for drive_stat_acct() above.
  */
 
-static inline void __add_request(request_queue_t * q, struct request * req)
+static inline void __add_request(request_queue_t * q, struct request * req,
+                                int empty, struct list_head * entry,
+                                int latency, int starving)
 {
-       int major = MAJOR(req->rq_dev);
-       struct request * tmp;
+       int major;
 
        drive_stat_acct(req, req->nr_sectors, 1);
-       req->next = NULL;
 
-       if (!(tmp = q->current_request)) {
-               q->current_request = req;
+       if (empty) {
+               req->elevator_sequence = elevator_sequence(&q->elevator, latency);
+               list_add(&req->queue, &q->queue_head);
                return;
        }
-       for ( ; tmp->next ; tmp = tmp->next) {
-               const int after_current = IN_ORDER(tmp,req);
-               const int before_next = IN_ORDER(req,tmp->next);
-
-               if (!IN_ORDER(tmp,tmp->next)) {
-                       if (after_current || before_next)
-                               break;
-               } else {
-                       if (after_current && before_next)
-                               break;
-               }
-       }
-       req->next = tmp->next;
-       tmp->next = req;
+       elevator_queue(q, req, entry, latency, starving);
 
        /*
         * FIXME(eric) I don't understand why there is a need for this
@@ -432,6 +574,7 @@ static inline void __add_request(request_queue_t * q, struct request * req)
         * I am leaving this in here until I hear back from the COMPAQ
         * people.
         */
+       major = MAJOR(req->rq_dev);
        if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7)
        {
                (q->request_fn)(q);
@@ -448,12 +591,14 @@ static inline void __add_request(request_queue_t * q, struct request * req)
  */
 static inline void attempt_merge (request_queue_t * q,
                                  struct request *req,
-                                 int max_sectors)
+                                 int max_sectors,
+                                 int max_segments)
 {
-       struct request *next = req->next;
-
-       if (!next)
+       struct request *next;
+  
+       if (req->queue.next == &q->queue_head)
                return;
+       next = blkdev_next_request(req);
        if (req->sector + req->nr_sectors != next->sector)
                return;
        if (next->sem || req->cmd != next->cmd || req->rq_dev != next->rq_dev || req->nr_sectors + next->nr_sectors > max_sectors)
@@ -464,25 +609,79 @@ static inline void attempt_merge (request_queue_t * q,
         * will have been updated to the appropriate number,
         * and we shouldn't do it here too.
         */
-       if(!(q->merge_requests_fn)(q, req, next))
+       if(!(q->merge_requests_fn)(q, req, next, max_segments))
                return;
 
+       elevator_merge_requests(&q->elevator, req, next);
        req->bhtail->b_reqnext = next->bh;
        req->bhtail = next->bhtail;
        req->nr_sectors += next->nr_sectors;
        next->rq_status = RQ_INACTIVE;
-       req->next = next->next;
+       list_del(&next->queue);
        wake_up (&wait_for_request);
 }
 
+static inline void elevator_debug(request_queue_t * q, kdev_t dev)
+{
+#ifdef DEBUG_ELEVATOR
+       int read_pendings = 0, nr_segments = 0;
+       elevator_t * elevator = &q->elevator;
+       struct list_head * entry = &q->queue_head;
+       static int counter;
+
+       if (counter++ % 100)
+               return;
+
+       while ((entry = entry->next) != &q->queue_head)
+       {
+               struct request * req;
+
+               req = blkdev_entry_to_request(entry);
+               if (!req->q)
+                       continue;
+               if (req->cmd == READ)
+                       read_pendings++;
+               nr_segments += req->nr_segments;
+       }
+
+       if (read_pendings != elevator->read_pendings)
+       {
+               printk(KERN_WARNING
+                      "%s: elevator read_pendings %d should be %d\n",
+                      kdevname(dev), elevator->read_pendings,
+                      read_pendings);
+               elevator->read_pendings = read_pendings;
+       }
+       if (nr_segments != q->nr_segments)
+       {
+               printk(KERN_WARNING
+                      "%s: elevator nr_segments %d should be %d\n",
+                      kdevname(dev), q->nr_segments,
+                      nr_segments);
+               q->nr_segments = nr_segments;
+       }
+#endif
+}
+
+static inline void elevator_account_request(request_queue_t * q, struct request * req)
+{
+       q->elevator.sequence++;
+       if (req->cmd == READ)
+               q->elevator.read_pendings++;
+       q->nr_segments++;
+}
+
 static inline void __make_request(request_queue_t * q, int rw,
                           struct buffer_head * bh)
 {
        int major = MAJOR(bh->b_rdev);
        unsigned int sector, count;
-       struct request * req;
+       int max_segments = MAX_SEGMENTS;
+       struct request * req, * prev;
        int rw_ahead, max_req, max_sectors;
        unsigned long flags;
+       int orig_latency, latency, __latency, starving, __starving, empty;
+       struct list_head * entry, * __entry;
 
        count = bh->b_size >> 9;
        sector = bh->b_rsector;
@@ -569,13 +768,18 @@ static inline void __make_request(request_queue_t * q, int rw,
         */
        max_sectors = get_max_sectors(bh->b_rdev);
 
+       __latency = orig_latency = get_request_latency(&q->elevator, rw);
+
        /*
         * Now we acquire the request spinlock, we have to be mega careful
         * not to schedule or do something nonatomic
         */
        spin_lock_irqsave(&io_request_lock,flags);
-       req = q->current_request;
-       if (!req) {
+       elevator_debug(q, bh->b_rdev);
+
+       empty = 0;
+       if (list_empty(&q->queue_head)) {
+               empty = 1;
                /* MD and loop can't handle plugging without deadlocking */
                if (q->plug_device_fn)
                        q->plug_device_fn(q, bh->b_rdev); /* is atomic */
@@ -584,6 +788,17 @@ static inline void __make_request(request_queue_t * q, int rw,
                goto get_rq;
        }
 
+       /* avoid write-bombs to not hurt iteractiveness of reads */
+       if (rw != READ && q->elevator.read_pendings)
+               max_segments = q->elevator.max_bomb_segments;
+
+       entry = seek_to_not_starving_chunk(q, &__latency, &starving);
+
+       __entry = entry;
+       __starving = starving;
+
+       latency = __latency;
+
        if (q->head_active && !q->plugged) {
                /*
                 * The scsi disk and cdrom drivers completely remove the request
@@ -595,11 +810,18 @@ static inline void __make_request(request_queue_t * q, int rw,
                 * entry may be busy being processed and we thus can't change
                 * it.
                 */
-               if ((req = req->next) == NULL)
-                       goto get_rq;
+               if (entry == q->queue_head.next) {
+                       latency -= blkdev_entry_to_request(entry)->nr_segments;
+                       if ((entry = entry->next) == &q->queue_head)
+                               goto get_rq;
+                       starving = 0;
+               }
        }
 
+       prev = NULL;
        do {
+               req = blkdev_entry_to_request(entry);
+
                if (req->sem)
                        continue;
                if (req->cmd != rw)
@@ -610,6 +832,8 @@ static inline void __make_request(request_queue_t * q, int rw,
                        continue;
                /* Can we add it to the end of this request? */
                if (req->sector + req->nr_sectors == sector) {
+                       if (latency - req->nr_segments < 0)
+                               break;
                        /*
                         * The merge_fn is a more advanced way
                         * of accomplishing the same task.  Instead
@@ -622,16 +846,21 @@ static inline void __make_request(request_queue_t * q, int rw,
                         * may suggest that we shouldn't merge
                         * this 
                         */
-                       if(!(q->merge_fn)(q, req, bh))
+                       if(!(q->back_merge_fn)(q, req, bh, max_segments))
                                continue;
                        req->bhtail->b_reqnext = bh;
                        req->bhtail = bh;
                        req->nr_sectors += count;
                        drive_stat_acct(req, count, 0);
+
+                       elevator_merge_after(q, req, latency);
+
                        /* Can we now merge this req with the next? */
-                       attempt_merge(q, req, max_sectors);
+                       attempt_merge(q, req, max_sectors, max_segments);
                /* or to the beginning? */
                } else if (req->sector - count == sector) {
+                       if (!prev && starving)
+                               continue;
                        /*
                         * The merge_fn is a more advanced way
                         * of accomplishing the same task.  Instead
@@ -644,7 +873,7 @@ static inline void __make_request(request_queue_t * q, int rw,
                         * may suggest that we shouldn't merge
                         * this 
                         */
-                       if(!(q->merge_fn)(q, req, bh))
+                       if(!(q->front_merge_fn)(q, req, bh, max_segments))
                                continue;
                        bh->b_reqnext = req->bh;
                        req->bh = bh;
@@ -653,13 +882,21 @@ static inline void __make_request(request_queue_t * q, int rw,
                        req->sector = sector;
                        req->nr_sectors += count;
                        drive_stat_acct(req, count, 0);
+
+                       elevator_merge_before(q, req, latency);
+
+                       if (prev)
+                               attempt_merge(q, prev, max_sectors, max_segments);
                } else
                        continue;
 
+               q->elevator.sequence++;
                spin_unlock_irqrestore(&io_request_lock,flags);
                return;
 
-       } while ((req = req->next) != NULL);
+       } while (prev = req,
+                (latency -= req->nr_segments) >= 0 &&
+                (entry = entry->next) != &q->queue_head);
 
 /* find an unused request. */
 get_rq:
@@ -675,6 +912,14 @@ get_rq:
                        goto end_io;
                req = __get_request_wait(max_req, bh->b_rdev);
                spin_lock_irqsave(&io_request_lock,flags);
+
+               /* lock got dropped so revalidate elevator */
+               empty = 1;
+               if (!list_empty(&q->queue_head)) {
+                       empty = 0;
+                       __latency = orig_latency;
+                       __entry = seek_to_not_starving_chunk(q, &__latency, &__starving);
+               }
        }
        /*
         * Dont start the IO if the buffer has been
@@ -707,8 +952,10 @@ get_rq:
        req->sem = NULL;
        req->bh = bh;
        req->bhtail = bh;
-       req->next = NULL;
-       __add_request(q, req);
+       req->q = q;
+       __add_request(q, req, empty, __entry, __latency, __starving);
+       elevator_account_request(q, req);
+
        spin_unlock_irqrestore(&io_request_lock, flags);
        return;
 
@@ -867,6 +1114,8 @@ int end_that_request_first (struct request *req, int uptodate, char *name)
 
 void end_that_request_last(struct request *req)
 {
+       if (req->q)
+               BUG();
        if (req->sem != NULL)
                up(req->sem);
        req->rq_status = RQ_INACTIVE;
@@ -886,7 +1135,6 @@ int __init blk_dev_init(void)
        req = all_requests + NR_REQUEST;
        while (--req >= all_requests) {
                req->rq_status = RQ_INACTIVE;
-               req->next = NULL;
        }
        memset(ro_bits,0,sizeof(ro_bits));
        memset(max_readahead, 0, sizeof(max_readahead));
index a717929e6d01a3770b9988537b16ba26c577b8e1..122c7a9c5fcae1677b77cdc76be1da760cef5166 100644 (file)
@@ -198,8 +198,6 @@ static int lo_send(struct loop_device *lo, char *data, int len, loff_t pos,
                offset = 0;
                index++;
                pos += size;
-               if (pos > lo->lo_dentry->d_inode->i_size)
-                       lo->lo_dentry->d_inode->i_size = pos;
                UnlockPage(page);
                page_cache_release(page);
        }
@@ -277,7 +275,7 @@ static void do_lo_request(request_queue_t * q)
 repeat:
        INIT_REQUEST;
        current_request=CURRENT;
-       CURRENT=current_request->next;
+       blkdev_dequeue_request(current_request);
        if (MINOR(current_request->rq_dev) >= max_loop)
                goto error_out;
        lo = &loop_dev[MINOR(current_request->rq_dev)];
@@ -375,15 +373,13 @@ done:
        spin_lock_irq(&io_request_lock);
        current_request->sector += current_request->current_nr_sectors;
        current_request->nr_sectors -= current_request->current_nr_sectors;
-       current_request->next=CURRENT;
-       CURRENT=current_request;
+       list_add(&current_request->queue, &current_request->q->queue_head);
        end_request(1);
        goto repeat;
 error_out_lock:
        spin_lock_irq(&io_request_lock);
 error_out:
-       current_request->next=CURRENT;
-       CURRENT=current_request;
+       list_add(&current_request->queue, &current_request->q->queue_head);
        end_request(0);
        goto repeat;
 }
@@ -790,6 +786,7 @@ int __init loop_init(void)
        }               
 
        blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+       blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0);
        for (i=0; i < max_loop; i++) {
                memset(&loop_dev[i], 0, sizeof(struct loop_device));
                loop_dev[i].lo_number = i;
index 0efcce8ed4433b745ab6b465ea35b43ca8518f60..2eae5822144d18de7f917e8168b258a665830141 100644 (file)
@@ -184,10 +184,10 @@ struct request *nbd_read_stat(struct nbd_device *lo)
        DEBUG("reading control, ");
        reply.magic = 0;
        result = nbd_xmit(0, lo->sock, (char *) &reply, sizeof(reply));
-       req = lo->tail;
        if (result <= 0)
                HARDFAIL("Recv control failed.");
        memcpy(&xreq, reply.handle, sizeof(xreq));
+       req = blkdev_entry_prev_request(&lo->queue_head);
 
        if (xreq != req)
                FAIL("Unexpected handle received.\n");
@@ -216,47 +216,42 @@ void nbd_do_it(struct nbd_device *lo)
 {
        struct request *req;
 
-       while (1) {
+       down (&lo->queue_lock);
+       while (!list_empty(&lo->queue_head)) {
                req = nbd_read_stat(lo);
                if (!req)
-                       return;
-               down (&lo->queue_lock);
+                       goto out;
 #ifdef PARANOIA
-               if (req != lo->tail) {
+               if (req != blkdev_entry_prev_request(&lo->queue_head)) {
                        printk(KERN_ALERT "NBD: I have problem...\n");
                }
                if (lo != &nbd_dev[MINOR(req->rq_dev)]) {
                        printk(KERN_ALERT "NBD: request corrupted!\n");
-                       goto next;
+                       continue;
                }
                if (lo->magic != LO_MAGIC) {
                        printk(KERN_ALERT "NBD: nbd_dev[] corrupted: Not enough magic\n");
-                       up (&lo->queue_lock);
-                       return;
+                       goto out;
                }
 #endif
-               nbd_end_request(req);
-               if (lo->tail == lo->head) {
-#ifdef PARANOIA
-                       if (lo->tail->next)
-                               printk(KERN_ERR "NBD: I did not expect this\n");
-#endif
-                       lo->head = NULL;
-               }
-               lo->tail = lo->tail->next;
-       next:
+               list_del(&req->queue);
                up (&lo->queue_lock);
+               
+               nbd_end_request(req);
+
+               down (&lo->queue_lock);
        }
+ out:
+       up (&lo->queue_lock);
 }
 
 void nbd_clear_que(struct nbd_device *lo)
 {
        struct request *req;
+       unsigned long flags;
 
-       while (1) {
-               req = lo->tail;
-               if (!req)
-                       return;
+       while (!list_empty(&lo->queue_head)) {
+               req = blkdev_entry_prev_request(&lo->queue_head);
 #ifdef PARANOIA
                if (lo != &nbd_dev[MINOR(req->rq_dev)]) {
                        printk(KERN_ALERT "NBD: request corrupted when clearing!\n");
@@ -268,15 +263,12 @@ void nbd_clear_que(struct nbd_device *lo)
                }
 #endif
                req->errors++;
+               list_del(&req->queue);
+               up(&lo->queue_lock);
+
                nbd_end_request(req);
-               if (lo->tail == lo->head) {
-#ifdef PARANOIA
-                       if (lo->tail->next)
-                               printk(KERN_ERR "NBD: I did not assume this\n");
-#endif
-                       lo->head = NULL;
-               }
-               lo->tail = lo->tail->next;
+
+               down(&lo->queue_lock);
        }
 }
 
@@ -296,7 +288,7 @@ static void do_nbd_request(request_queue_t * q)
        int dev;
        struct nbd_device *lo;
 
-       while (CURRENT) {
+       while (!QUEUE_EMPTY) {
                req = CURRENT;
                dev = MINOR(req->rq_dev);
 #ifdef PARANOIA
@@ -314,28 +306,23 @@ static void do_nbd_request(request_queue_t * q)
                requests_in++;
 #endif
                req->errors = 0;
-               CURRENT = CURRENT->next;
-               req->next = NULL;
-
+               blkdev_dequeue_request(req);
                spin_unlock_irq(&io_request_lock);
-               down (&lo->queue_lock);
-               if (lo->head == NULL) {
-                       lo->head = req;
-                       lo->tail = req;
-               } else {
-                       lo->head->next = req;
-                       lo->head = req;
-               }
 
+               down (&lo->queue_lock);
+               list_add(&req->queue, &lo->queue_head);
                nbd_send_req(lo->sock, req);    /* Why does this block?         */
                up (&lo->queue_lock);
+
                spin_lock_irq(&io_request_lock);
                continue;
 
              error_out:
                req->errors++;
+               blkdev_dequeue_request(req);
+               spin_unlock(&io_request_lock);
                nbd_end_request(req);
-               CURRENT = CURRENT->next;
+               spin_lock(&io_request_lock);
        }
        return;
 }
@@ -359,11 +346,14 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
        lo = &nbd_dev[dev];
        switch (cmd) {
        case NBD_CLEAR_SOCK:
+               down(&lo->queue_lock);
                nbd_clear_que(lo);
-               if (lo->head || lo->tail) {
+               if (!list_empty(&lo->queue_head)) {
+                       up(&lo->queue_lock);
                        printk(KERN_ERR "nbd: Some requests are in progress -> can not turn off.\n");
                        return -EBUSY;
                }
+               up(&lo->queue_lock);
                file = lo->file;
                if (!file)
                        return -EINVAL;
@@ -415,8 +405,8 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
                return 0;
 #ifdef PARANOIA
        case NBD_PRINT_DEBUG:
-               printk(KERN_INFO "NBD device %d: head = %lx, tail = %lx. Global: in %d, out %d\n",
-                      dev, (long) lo->head, (long) lo->tail, requests_in, requests_out);
+               printk(KERN_INFO "NBD device %d: queue_head = %p. Global: in %d, out %d\n",
+                      dev, lo->queue_head, requests_in, requests_out);
                return 0;
 #endif
        case BLKGETSIZE:
@@ -480,6 +470,7 @@ int nbd_init(void)
        blksize_size[MAJOR_NR] = nbd_blksizes;
        blk_size[MAJOR_NR] = nbd_sizes;
        blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_nbd_request);
+       blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0);
        for (i = 0; i < MAX_NBD; i++) {
                nbd_dev[i].refcnt = 0;
                nbd_dev[i].file = NULL;
index 7db6626f46026ce949a653eff69c33bfb922ce53..878709944250e4e18931d2faa27435c2bbf37540 100644 (file)
@@ -756,7 +756,7 @@ static void do_pcd_request (request_queue_t * q)
 
        if (pcd_busy) return;
         while (1) {
-           if ((!CURRENT) || (CURRENT->rq_status == RQ_INACTIVE)) return;
+           if (QUEUE_EMPTY || (CURRENT->rq_status == RQ_INACTIVE)) return;
            INIT_REQUEST;
            if (CURRENT->cmd == READ) {
                unit = MINOR(CURRENT->rq_dev);
index 577d1354cc291fceb8b928779d4fb3146e93fc04..ffd5c1c3f37ecc97725b14012cab31b13b01f2b4 100644 (file)
@@ -868,7 +868,7 @@ static void do_pd_request (request_queue_t * q)
 
         if (pd_busy) return;
 repeat:
-        if ((!CURRENT) || (CURRENT->rq_status == RQ_INACTIVE)) return;
+        if (QUEUE_EMPTY || (CURRENT->rq_status == RQ_INACTIVE)) return;
         INIT_REQUEST;
 
         pd_dev = MINOR(CURRENT->rq_dev);
@@ -890,7 +890,7 @@ repeat:
        pd_cmd = CURRENT->cmd;
        pd_run = pd_count;
         while ((pd_run <= cluster) &&
-              (req = req->next) && 
+              (req = blkdev_next_request(req)) && 
               (pd_block+pd_run == req->sector) &&
               (pd_cmd == req->cmd) &&
               (pd_dev == MINOR(req->rq_dev)))
@@ -922,7 +922,7 @@ static void pd_next_buf( int unit )
        
 /* paranoia */
 
-       if ((!CURRENT) ||
+       if (QUEUE_EMPTY ||
            (CURRENT->cmd != pd_cmd) ||
            (MINOR(CURRENT->rq_dev) != pd_dev) ||
            (CURRENT->rq_status == RQ_INACTIVE) ||
index d658a0369d360bcc1d77dda277bf5b8b772e4a09..4e7a5aaf4e541bd3e643d7f414f932d39ddefa99 100644 (file)
@@ -854,7 +854,7 @@ static void do_pf_request (request_queue_t * q)
 
         if (pf_busy) return;
 repeat:
-        if ((!CURRENT) || (CURRENT->rq_status == RQ_INACTIVE)) return;
+        if (QUEUE_EMPTY || (CURRENT->rq_status == RQ_INACTIVE)) return;
         INIT_REQUEST;
 
         pf_unit = unit = DEVICE_NR(CURRENT->rq_dev);
@@ -874,7 +874,7 @@ repeat:
        pf_cmd = CURRENT->cmd;
        pf_run = pf_count;
         while ((pf_run <= cluster) &&
-              (req = req->next) && 
+              (req = blkdev_next_request(req)) && 
               (pf_block+pf_run == req->sector) &&
               (pf_cmd == req->cmd) &&
               (pf_unit == DEVICE_NR(req->rq_dev)))
@@ -904,7 +904,7 @@ static void pf_next_buf( int unit )
        
 /* paranoia */
 
-       if ((!CURRENT) ||
+       if (QUEUE_EMPTY ||
            (CURRENT->cmd != pf_cmd) ||
            (DEVICE_NR(CURRENT->rq_dev) != pf_unit) ||
            (CURRENT->rq_status == RQ_INACTIVE) ||
index 9f68ebbfc492983817e764c5abf3e08b3d9ad3e3..b31f6e2d92210d83337b475e496707cc05245261 100644 (file)
@@ -476,7 +476,7 @@ static void do_ps2esdi_request(request_queue_t * q)
        if (virt_to_bus(CURRENT->buffer + CURRENT->nr_sectors * 512) > 16 * MB) {
                printk("%s: DMA above 16MB not supported\n", DEVICE_NAME);
                end_request(FAIL);
-               if (CURRENT)
+               if (!QUEUE_EMPTY)
                        do_ps2esdi_request(q);
                return;
        }                       /* check for above 16Mb dmas */
@@ -510,7 +510,7 @@ static void do_ps2esdi_request(request_queue_t * q)
                default:
                        printk("%s: Unknown command\n", DEVICE_NAME);
                        end_request(FAIL);
-                       if (CURRENT)
+                       if (!QUEUE_EMPTY)
                                do_ps2esdi_request(q);
                        break;
                }               /* handle different commands */
@@ -520,7 +520,7 @@ static void do_ps2esdi_request(request_queue_t * q)
                printk("Grrr. error. ps2esdi_drives: %d, %lu %lu\n", ps2esdi_drives,
                       CURRENT->sector, ps2esdi[MINOR(CURRENT->rq_dev)].nr_sects);
                end_request(FAIL);
-               if (CURRENT)
+               if (!QUEUE_EMPTY)
                        do_ps2esdi_request(q);
        }
 
@@ -591,7 +591,7 @@ static void ps2esdi_readwrite(int cmd, u_char drive, u_int block, u_int count)
                        return do_ps2esdi_request(NULL);
                else {
                        end_request(FAIL);
-                       if (CURRENT)
+                       if (!QUEUE_EMPTY)
                                do_ps2esdi_request(NULL);
                }
        }
@@ -894,7 +894,7 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code)
                                do_ps2esdi_request(NULL);
                        else {
                                end_request(FAIL);
-                               if (CURRENT)
+                               if (!QUEUE_EMPTY)
                                        do_ps2esdi_request(NULL);
                        }
                        break;
@@ -940,7 +940,7 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code)
                        do_ps2esdi_request(NULL);
                else {
                        end_request(FAIL);
-                       if (CURRENT)
+                       if (!QUEUE_EMPTY)
                                do_ps2esdi_request(NULL);
                }
                break;
@@ -950,7 +950,7 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code)
                outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
                outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
                end_request(FAIL);
-               if (CURRENT)
+               if (!QUEUE_EMPTY)
                        do_ps2esdi_request(NULL);
                break;
 
@@ -986,7 +986,7 @@ static void ps2esdi_continue_request(void)
                do_ps2esdi_request(NULL);
        } else {
                end_request(SUCCES);
-               if (CURRENT)
+               if (!QUEUE_EMPTY)
                        do_ps2esdi_request(NULL);
        }
 }
index 911bafe23855fac8cf6dede1c45dbb973e5e39c0..f38e102093bf833dc55ccc21d12065f919f50105 100644 (file)
@@ -305,7 +305,7 @@ static void start_request(struct floppy_state *fs)
                wake_up(&fs->wait);
                return;
        }
-       while (CURRENT && fs->state == idle) {
+       while (!QUEUE_EMPTY && fs->state == idle) {
                if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
                        panic(DEVICE_NAME ": request list destroyed");
                if (CURRENT->bh && !buffer_locked(CURRENT->bh))
index 467cda26d8b4991433169a58b1e5dedeed9cc8b9..d37059d388a655a03ea4286e657db1d229658b4b 100644 (file)
@@ -550,7 +550,7 @@ static void start_request(struct floppy_state *fs)
                wake_up(&fs->wait);
                return;
        }
-       while (CURRENT && fs->state == idle) {
+       while (!QUEUE_EMPTY && fs->state == idle) {
                if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
                        panic(DEVICE_NAME ": request list destroyed");
                if (CURRENT->bh && !buffer_locked(CURRENT->bh))
index aa4c3394911420ab7376e6b9514b12f4a259eaa3..2e074338976e998fe9687ab229d886f58d8b8258 100644 (file)
@@ -274,7 +274,7 @@ static void do_xd_request (request_queue_t * q)
        sti();
        if (xdc_busy)
                return;
-       while (code = 0, CURRENT) {
+       while (code = 0, !QUEUE_EMPTY) {
                INIT_REQUEST;   /* do some checking on the request structure */
 
                if (CURRENT_DEV < xd_drives
index ef9cbd3983ebcf648a20fc851b4c1083c8b7e0ec..74f24070b1eead2197acd43dad23c0e235ab5063 100644 (file)
@@ -234,7 +234,7 @@ static int aztcd_blocksizes[1] = {2048};
 #endif
 
 #define CURRENT_VALID \
-  (CURRENT && MAJOR(CURRENT -> rq_dev) == MAJOR_NR && CURRENT -> cmd == READ \
+  (!QUEUE_EMPTY && MAJOR(CURRENT -> rq_dev) == MAJOR_NR && CURRENT -> cmd == READ \
    && CURRENT -> sector != -1)
 
 #define AFL_STATUSorDATA (AFL_STATUS | AFL_DATA)
index 24e7a5c26197c09324ef4c8704afaea00f327154..d22cbd92185b1c4b2810fdb577b535137172a974 100644 (file)
@@ -1672,7 +1672,7 @@ do_cdu31a_request(request_queue_t * q)
       if (signal_pending(current))
       {
          restore_flags(flags);
-         if (CURRENT && CURRENT->rq_status != RQ_INACTIVE)
+         if (!QUEUE_EMPTY && CURRENT->rq_status != RQ_INACTIVE)
          {
             end_request(0);
          }
@@ -1705,7 +1705,7 @@ cdu31a_request_startover:
        * The beginning here is stolen from the hard disk driver.  I hope
        * it's right.
        */
-      if (!(CURRENT) || CURRENT->rq_status == RQ_INACTIVE)
+      if (QUEUE_EMPTY || CURRENT->rq_status == RQ_INACTIVE)
       {
          goto end_do_cdu31a_request;
       }
index e600d81d0d1a5c49063f7e913af5af5412cf166a..9a4d6dcf6bc515934ec1b84fa01957626353c10e 100644 (file)
@@ -816,7 +816,7 @@ static void do_cm206_request(request_queue_t * q)
   
   while(1) {    /* repeat until all requests have been satisfied */
     INIT_REQUEST;
-    if (CURRENT == NULL || CURRENT->rq_status == RQ_INACTIVE)
+    if (QUEUE_EMPTY || CURRENT->rq_status == RQ_INACTIVE)
       return;
     if (CURRENT->cmd != READ) {
       debug(("Non-read command %d on cdrom\n", CURRENT->cmd));
index 6047df0c70877e944404855daf3d0558d292717f..b5da453f30daaaae7b0ed89dd80c1f1076ca4ce9 100644 (file)
@@ -279,13 +279,13 @@ unsigned int block,dev;
 unsigned int nsect;
 
 repeat:
-       if (!(CURRENT) || CURRENT->rq_status == RQ_INACTIVE) return;
+       if (QUEUE_EMPTY || CURRENT->rq_status == RQ_INACTIVE) return;
        INIT_REQUEST;
        dev = MINOR(CURRENT->rq_dev);
        block = CURRENT->sector;
        nsect = CURRENT->nr_sectors;
 
-       if (CURRENT == NULL || CURRENT -> sector == -1)
+       if (QUEUE_EMPTY || CURRENT -> sector == -1)
                return;
 
        if (CURRENT -> cmd != READ)
index c882ae30ae040e5663ed33ec2c7d6aeca25e6406..88f57c3ce1f8dfe7948ec58faff57abaabde8693 100644 (file)
@@ -134,7 +134,7 @@ static int mcdPresent = 0;
 /* #define DOUBLE_QUICK_ONLY */
 
 #define CURRENT_VALID \
-(CURRENT && MAJOR(CURRENT -> rq_dev) == MAJOR_NR && CURRENT -> cmd == READ \
+(!QUEUE_EMPTY && MAJOR(CURRENT -> rq_dev) == MAJOR_NR && CURRENT -> cmd == READ \
 && CURRENT -> sector != -1)
 
 #define MFL_STATUSorDATA (MFL_STATUS | MFL_DATA)
index 32ae52497a1e89d09d29d7e6f3121a2664a570cf..38247218332d40ee37c3d864c67c0acf77d3394e 100644 (file)
@@ -530,7 +530,7 @@ void do_mcdx_request(request_queue_t * q)
 
   again:
 
-       if (CURRENT == NULL) {
+       if (QUEUE_EMPTY) {
                xtrace(REQUEST, "end_request(0): CURRENT == NULL\n");
                return;
        }
index b0bdc833aa036acd136ee2f05292d5e38c539eef..3bd1ce6db34716d3069aacfc998bf6b3d94419f4 100644 (file)
@@ -980,7 +980,7 @@ static int update_toc(void)
 
 
 #define CURRENT_VALID \
-       (CURRENT && MAJOR(CURRENT -> rq_dev) == MAJOR_NR \
+       (!QUEUE_EMPTY && MAJOR(CURRENT -> rq_dev) == MAJOR_NR \
         && CURRENT -> cmd == READ && CURRENT -> sector != -1)
 
 
index 42fa4d62b8744c1deafd3e87f0ee80f3733807dd..d8127febf9b5f494f346ef506a8aae740f58860f 100644 (file)
@@ -4791,9 +4791,7 @@ static void sbp_transfer(struct request *req)
  */
 #undef DEBUG_GTL
 static inline void sbpcd_end_request(struct request *req, int uptodate) {
-       req->next=CURRENT;
-       CURRENT=req;
-       up(&ioctl_read_sem);
+       list_add(&req->queue, &req->q->queue_head);
        end_request(uptodate);
 }
 /*==========================================================================*/
@@ -4815,7 +4813,7 @@ static void DO_SBPCD_REQUEST(request_queue_t * q)
 #ifdef DEBUG_GTL
        xnr=++xx_nr;
 
-       if(!CURRENT)
+       if(QUEUE_EMPTY)
        {
                printk( "do_sbpcd_request[%di](NULL), Pid:%d, Time:%li\n",
                        xnr, current->pid, jiffies);
@@ -4830,15 +4828,15 @@ static void DO_SBPCD_REQUEST(request_queue_t * q)
 #endif
        INIT_REQUEST;
        req=CURRENT;            /* take out our request so no other */
-       CURRENT=req->next;      /* task can fuck it up         GTL  */
-       spin_unlock_irq(&io_request_lock);              /* FIXME!!!! */
+       blkdev_dequeue_request(req);    /* task can fuck it up         GTL  */
        
-       down(&ioctl_read_sem);
        if (req->rq_status == RQ_INACTIVE)
                sbpcd_end_request(req, 0);
        if (req -> sector == -1)
                sbpcd_end_request(req, 0);
+       spin_unlock_irq(&io_request_lock);
 
+       down(&ioctl_read_sem);
        if (req->cmd != READ)
        {
                msg(DBG_INF, "bad cmd %d\n", req->cmd);
@@ -4875,8 +4873,9 @@ static void DO_SBPCD_REQUEST(request_queue_t * q)
                printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 2, Time:%li\n",
                        xnr, req, req->sector, req->nr_sectors, jiffies);
 #endif
+               up(&ioctl_read_sem);
+               spin_lock_irq(&io_request_lock);
                sbpcd_end_request(req, 1);
-               spin_lock_irq(&io_request_lock);                /* FIXME!!!! */
                goto request_loop;
        }
 
@@ -4915,8 +4914,9 @@ static void DO_SBPCD_REQUEST(request_queue_t * q)
                        printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 3, Time:%li\n",
                                xnr, req, req->sector, req->nr_sectors, jiffies);
 #endif
+                       up(&ioctl_read_sem);
+                       spin_lock_irq(&io_request_lock);
                        sbpcd_end_request(req, 1);
-                       spin_lock_irq(&io_request_lock);        /* FIXME!!!! */
                        goto request_loop;
                }
        }
@@ -4929,9 +4929,10 @@ static void DO_SBPCD_REQUEST(request_queue_t * q)
        printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 4 (error), Time:%li\n",
                xnr, req, req->sector, req->nr_sectors, jiffies);
 #endif
-       sbpcd_end_request(req, 0);
+       up(&ioctl_read_sem);
        sbp_sleep(0);    /* wait a bit, try again */
-       spin_lock_irq(&io_request_lock);                /* FIXME!!!! */
+       spin_lock_irq(&io_request_lock);
+       sbpcd_end_request(req, 0);
        goto request_loop;
 }
 /*==========================================================================*/
@@ -5741,6 +5742,7 @@ int __init SBPCD_INIT(void)
 #endif MODULE
        }
        blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+       blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0);
        read_ahead[MAJOR_NR] = buffers * (CD_FRAMESIZE / 512);
        
        request_region(CDo_command,4,major_name);
index f13deeac80d2e3a8923aaae57d49ae0e5497be8a..64394fc60e151eda3ab09dbde6e716c8affcda67 100644 (file)
@@ -938,7 +938,7 @@ static void sjcd_invalidate_buffers( void ){
  */
 
 #define CURRENT_IS_VALID                                      \
-    ( CURRENT != NULL && MAJOR( CURRENT->rq_dev ) == MAJOR_NR && \
+    ( !QUEUE_EMPTY && MAJOR( CURRENT->rq_dev ) == MAJOR_NR && \
       CURRENT->cmd == READ && CURRENT->sector != -1 )
 
 static void sjcd_transfer( void ){
index a9b82571e183c4451037969d6dea4b3325f924d6..689b26a74547656c67d5d12b59fc6ebcc0261104 100644 (file)
@@ -803,7 +803,7 @@ do_cdu535_request(request_queue_t * q)
                 * The beginning here is stolen from the hard disk driver.  I hope
                 * it's right.
                 */
-               if (!(CURRENT) || CURRENT->rq_status == RQ_INACTIVE) {
+               if (QUEUE_EMPTY || CURRENT->rq_status == RQ_INACTIVE) {
                        return;
                }
                INIT_REQUEST;
index 72377382f7b1ab287df28b863a7c5763bb3315a5..0a0288410be712ef29b63559f2037745e6c27f42 100644 (file)
@@ -151,66 +151,6 @@ static unsigned int lp_count = 0;
 
 #undef LP_DEBUG
 
-/* If you want to see if you can get lp_poll working, define this. */
-#undef SUPPORT_POLL
-
-/* --- parport support ----------------------------------------- */
-
-static int lp_preempt(void *handle)
-{
-       struct lp_struct *lps = (struct lp_struct *)handle;
-
-       if (!(lps->flags & LP_PORT_BUSY)) {
-              /* Let the port go. */
-              clear_bit (LP_HAVE_PORT_BIT, &lps->flags);
-              return 0;
-       }
-
-       /* Don't actually release the port now */
-       return 1;
-}
-
-static void lp_check_data (struct lp_struct *lp)
-{
-#if !defined(CONFIG_PARPORT_1284) || !defined (SUPPORT_POLL)
-       return;
-#else
-       struct pardevice *dev = lp->dev;
-       if (!(lp->flags & LP_NO_REVERSE)) {
-               int err = parport_negotiate (dev->port, IEEE1284_MODE_NIBBLE);
-               if (err)
-                       lp->flags |= LP_NO_REVERSE;
-               else {
-                       unsigned char s = parport_read_status (dev->port);
-                       if (s & PARPORT_STATUS_ERROR)
-                               lp->flags &= ~LP_DATA_AVAIL;
-                       else {
-                               lp->flags |= LP_DATA_AVAIL;
-                               if (waitqueue_active (&lp->dataq))
-                                       wake_up_interruptible (&lp->dataq);
-                       }
-               }
-       }
-#endif /* IEEE 1284 support */
-}
-
-static void lp_parport_release (int minor)
-{
-       lp_check_data (&lp_table[minor]);
-       if (test_and_clear_bit (LP_HAVE_PORT_BIT, &lp_table[minor].flags))
-               parport_release (lp_table[minor].dev);
-
-       lp_table[minor].flags &= ~LP_PORT_BUSY;
-}
-
-static void lp_parport_claim (int minor)
-{
-       if (!test_and_set_bit (LP_HAVE_PORT_BIT, &lp_table[minor].flags))
-               parport_claim_or_block (lp_table[minor].dev);
-
-       lp_table[minor].flags |= LP_PORT_BUSY;
-}
-
 /* --- low-level port access ----------------------------------- */
 
 #define r_dtr(x)       (parport_read_data(lp_table[(x)].dev->port))
@@ -221,42 +161,15 @@ static void lp_parport_claim (int minor)
 static int lp_reset(int minor)
 {
        int retval;
-       lp_parport_claim (minor);
+       parport_claim_or_block (lp_table[minor].dev);
        w_ctr(minor, LP_PSELECP);
        udelay (LP_DELAY);
        w_ctr(minor, LP_PSELECP | LP_PINITP);
        retval = r_str(minor);
-       lp_parport_release (minor);
+       parport_release (lp_table[minor].dev);
        return retval;
 }
 
-static void lp_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-       struct lp_struct *lp_dev = (struct lp_struct *) dev_id;
-       if (!(lp_dev->flags & LP_PORT_BUSY))
-               /* We must have the port since we got an interrupt. */
-               lp_check_data (lp_dev);
-       if (waitqueue_active (&lp_dev->waitq))
-               wake_up_interruptible (&lp_dev->waitq);
-}
-
-static void lp_wakeup (void *handle)
-{
-       struct lp_struct *lp_dev = handle;
-
-       if (lp_dev->flags & LP_PORT_BUSY)
-               return;
-
-       /* Grab the port if it can help (i.e. reverse mode is possible). */
-       if (!(lp_dev->flags & LP_NO_REVERSE)) {
-               parport_claim (lp_dev->dev);
-               set_bit (LP_HAVE_PORT_BIT, &lp_dev->flags);
-               lp_check_data (lp_dev);
-               if (waitqueue_active (&lp_dev->waitq))
-                       wake_up_interruptible (&lp_dev->waitq);
-       }
-}
-
 static void lp_error (int minor)
 {
        int polling;
@@ -265,10 +178,10 @@ static void lp_error (int minor)
                return;
 
        polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE;
-       if (polling) lp_parport_release (minor);
+       if (polling) parport_release (lp_table[minor].dev);
        interruptible_sleep_on_timeout (&lp_table[minor].waitq,
                                        LP_TIMEOUT_POLLED);
-       if (polling) lp_parport_claim (minor);
+       if (polling) parport_claim_or_block (lp_table[minor].dev);
        else parport_yield_blocking (lp_table[minor].dev);
 }
 
@@ -341,7 +254,7 @@ static ssize_t lp_write(struct file * file, const char * buf,
 
        /* Claim Parport or sleep until it becomes available
         */
-       lp_parport_claim (minor);
+       parport_claim_or_block (lp_table[minor].dev);
 
        /* Go to compatibility mode. */
        parport_negotiate (port, IEEE1284_MODE_COMPAT);
@@ -396,7 +309,7 @@ static ssize_t lp_write(struct file * file, const char * buf,
        /* Not really necessary, but polite. */
        parport_set_timeout (lp_table[minor].dev, old_to);
 
-       lp_parport_release (minor);
+       parport_release (lp_table[minor].dev);
 
        up (&lp_table[minor].port_mutex);
 
@@ -420,7 +333,7 @@ static ssize_t lp_read(struct file * file, char * buf,
        if (down_interruptible (&lp_table[minor].port_mutex))
                return -EINTR;
 
-       lp_parport_claim (minor);
+       parport_claim_or_block (lp_table[minor].dev);
 
        for (;;) {
                retval = parport_read (port, kbuf, count);
@@ -441,7 +354,7 @@ static ssize_t lp_read(struct file * file, char * buf,
                }
        }
 
-       lp_parport_release (minor);
+       parport_release (lp_table[minor].dev);
 
        if (retval > 0 && copy_to_user (buf, kbuf, retval))
                retval = -EFAULT;
@@ -473,9 +386,9 @@ static int lp_open(struct inode * inode, struct file * file)
           should most likely only ever be used by the tunelp application. */
        if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) {
                int status;
-               lp_parport_claim (minor);
+               parport_claim_or_block (lp_table[minor].dev);
                status = r_str(minor);
-               lp_parport_release (minor);
+               parport_release (lp_table[minor].dev);
                if (status & LP_POUTPA) {
                        printk(KERN_INFO "lp%d out of paper\n", minor);
                        MOD_DEC_USE_COUNT;
@@ -567,9 +480,9 @@ static int lp_ioctl(struct inode *inode, struct file *file,
                                return -EFAULT;
                        break;
                case LPGETSTATUS:
-                       lp_parport_claim(minor);
+                       parport_claim_or_block (lp_table[minor].dev);
                        status = r_str(minor);
-                       lp_parport_release(minor);
+                       parport_release (lp_table[minor].dev);
 
                        if (copy_to_user((int *) arg, &status, sizeof(int)))
                                return -EFAULT;
@@ -618,21 +531,6 @@ static int lp_ioctl(struct inode *inode, struct file *file,
        return retval;
 }
 
-#ifdef CONFIG_PARPORT_1284
-static unsigned int lp_poll (struct file *filp, struct poll_table_struct *wait)
-{
-       unsigned int minor = MINOR (filp->f_dentry->d_inode->i_rdev);
-       unsigned int mask = POLLOUT | POLLWRNORM; /* always writable */
-
-       poll_wait (filp, &lp_table[minor].dataq, wait);
-
-       if (lp_table[minor].flags & LP_DATA_AVAIL)
-               mask |= POLLIN | POLLRDNORM;
-
-       return mask;
-}
-#endif /* IEEE 1284 support */
-
 static struct file_operations lp_fops = {
        write:          lp_write,
        ioctl:          lp_ioctl,
@@ -640,7 +538,6 @@ static struct file_operations lp_fops = {
        release:        lp_release,
 #ifdef CONFIG_PARPORT_1284
        read:           lp_read,
-       poll:           lp_poll,
 #endif
 };
 
@@ -665,12 +562,9 @@ static void lp_console_write (struct console *co, const char *s,
        ssize_t written;
        signed long old_to;
 
-       if (!(lp_table[CONSOLE_LP].flags & (1<<LP_HAVE_PORT_BIT))) {
-               if (parport_claim (dev))
-                       /* Nothing we can do. */
-                       return;
-               set_bit (LP_HAVE_PORT_BIT, &lp_table[CONSOLE_LP].flags);
-       }
+       if (parport_claim (dev))
+               /* Nothing we can do. */
+               return;
 
        old_to = parport_set_timeout (dev, 0);
 
@@ -711,6 +605,7 @@ static void lp_console_write (struct console *co, const char *s,
        } while (count > 0 && (CONSOLE_LP_STRICT || written > 0));
 
        parport_set_timeout (dev, old_to);
+       parport_release (dev);
 }
 
 static kdev_t lp_console_device (struct console *c)
@@ -782,9 +677,7 @@ void __init lp_setup(char *str, int *ints)
 static int lp_register(int nr, struct parport *port)
 {
        lp_table[nr].dev = parport_register_device(port, "lp", 
-                                                  lp_preempt, lp_wakeup,
-                                                  lp_interrupt, 
-                                                  0,
+                                                  NULL, NULL, NULL, 0,
                                                   (void *) &lp_table[nr]);
        if (lp_table[nr].dev == NULL)
                return 1;
@@ -943,8 +836,6 @@ void cleanup_module(void)
        for (offset = 0; offset < LP_NO; offset++) {
                if (lp_table[offset].dev == NULL)
                        continue;
-               if (lp_table[offset].flags & (1<<LP_HAVE_PORT_BIT))
-                       parport_release (lp_table[offset].dev);
                parport_unregister_device(lp_table[offset].dev);
        }
 }
index a0cab3015d4ea4834c14bed4ab56abc5a859363d..8afad596f9ff9ed69714f1cc41634140b3bdb202 100644 (file)
@@ -461,7 +461,7 @@ static void i2ob_request(request_queue_t * q)
        struct i2ob_device *dev;
        u32 m;
 
-       while (CURRENT) {
+       while (!QUEUE_EMPTY) {
                /*
                 *      On an IRQ completion if there is an inactive
                 *      request on the queue head it means it isnt yet
@@ -515,8 +515,7 @@ static void i2ob_request(request_queue_t * q)
                        }
                }
                req->errors = 0;
-               CURRENT = CURRENT->next;
-               req->next = NULL;
+               blkdev_dequeue_request(req);
                req->sem = NULL;
                
                ireq = i2ob_qhead;
index 3b94d70a6594cf8dc8427eeac90209e4329958d8..5695e5ef2441171819b72849483c94fd906d263f 100644 (file)
@@ -8,7 +8,7 @@ fi
 
 dep_tristate '  SCSI tape support' CONFIG_CHR_DEV_ST $CONFIG_SCSI
 
-if [ "$CONFIG_BLK_DEV_ST" != "n" ]; then
+if [ "$CONFIG_CHR_DEV_ST" != "n" ]; then
    int  'Maximum number of SCSI tapes that can be loaded as modules' CONFIG_ST_EXTRA_DEVS 2
 fi
 
index 2d86cbf95c62f9b90642fdad833caf999ce5ef89..fece6484d33d1dc8a32b263209d6c6e5142e4c4e 100644 (file)
@@ -2193,19 +2193,24 @@ static void scsi_dump_status(int level)
                        /* Now dump the request lists for each block device */
                        printk("Dump of pending block device requests\n");
                        for (i = 0; i < MAX_BLKDEV; i++) {
-                               if (blk_dev[i].request_queue.current_request) {
+                               struct list_head * queue_head;
+
+                               queue_head = &blk_dev[i].request_queue.queue_head;
+                               if (!list_empty(queue_head)) {
                                        struct request *req;
+                                       struct list_head * entry;
+
                                        printk("%d: ", i);
-                                       req = blk_dev[i].request_queue.current_request;
-                                       while (req) {
+                                       entry = queue_head->next;
+                                       do {
+                                               req = blkdev_entry_to_request(entry);
                                                printk("(%s %d %ld %ld %ld) ",
                                                   kdevname(req->rq_dev),
                                                       req->cmd,
                                                       req->sector,
                                                       req->nr_sectors,
                                                req->current_nr_sectors);
-                                               req = req->next;
-                                       }
+                                       } while ((entry = entry->next) != queue_head);
                                        printk("\n");
                                }
                        }
@@ -2220,8 +2225,6 @@ static void scsi_dump_status(int level)
 
 int init_module(void)
 {
-       unsigned long size;
-       int has_space = 0;
        struct proc_dir_entry *generic;
 
         if( scsi_init_minimal_dma_pool() != 0 )
index 9ec2fe2813c8b1b1d5e702231f8b8f8185d6c86e..7f07000894e88138b7cef9576bc871a09d772261 100644 (file)
@@ -86,6 +86,7 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
        q = &SCpnt->device->request_queue;
        SCpnt->request.cmd = SPECIAL;
        SCpnt->request.special = (void *) SCpnt;
+       SCpnt->request.q = NULL;
 
        /*
         * We have the option of inserting the head or the tail of the queue.
@@ -96,8 +97,7 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
        spin_lock_irqsave(&io_request_lock, flags);
 
        if (at_head) {
-               SCpnt->request.next = q->current_request;
-               q->current_request = &SCpnt->request;
+               list_add(&SCpnt->request.queue, &q->queue_head);
        } else {
                /*
                 * FIXME(eric) - we always insert at the tail of the
@@ -107,19 +107,7 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
                 * request might not float high enough in the queue
                 * to be scheduled.
                 */
-               SCpnt->request.next = NULL;
-               if (q->current_request == NULL) {
-                       q->current_request = &SCpnt->request;
-               } else {
-                       struct request *req;
-
-                       for (req = q->current_request; req; req = req->next) {
-                               if (req->next == NULL) {
-                                       req->next = &SCpnt->request;
-                                       break;
-                               }
-                       }
-               }
+               list_add_tail(&SCpnt->request.queue, &q->queue_head);
        }
 
        /*
@@ -239,9 +227,8 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
                 * in which case we need to request the blocks that come after
                 * the bad sector.
                 */
-               SCpnt->request.next = q->current_request;
-               q->current_request = &SCpnt->request;
                SCpnt->request.special = (void *) SCpnt;
+               list_add(&SCpnt->request.queue, &q->queue_head);
        }
 
        /*
@@ -260,7 +247,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
         * use function pointers to pick the right one.
         */
        if (SDpnt->single_lun
-           && q->current_request == NULL
+           && list_empty(&q->queue_head)
            && SDpnt->device_busy == 0) {
                request_queue_t *q;
 
@@ -849,19 +836,19 @@ void scsi_request_fn(request_queue_t * q)
                        }
                }
 
-               /*
-                * Loop through all of the requests in this queue, and find
-                * one that is queueable.
-                */
-               req = q->current_request;
-
                /*
                 * If we couldn't find a request that could be queued, then we
                 * can also quit.
                 */
-               if (!req) {
+               if (list_empty(&q->queue_head))
                        break;
-               }
+
+               /*
+                * Loop through all of the requests in this queue, and find
+                * one that is queueable.
+                */
+               req = blkdev_entry_next_request(&q->queue_head);
+
                /*
                 * Find the actual device driver associated with this command.
                 * The SPECIAL requests are things like character device or
@@ -922,8 +909,7 @@ void scsi_request_fn(request_queue_t * q)
                 * reason to search the list, because all of the commands
                 * in this queue are for the same device.
                 */
-               q->current_request = req->next;
-               SCpnt->request.next = NULL;
+               blkdev_dequeue_request(req);
 
                if (req != &SCpnt->request) {
                        memcpy(&SCpnt->request, req, sizeof(struct request));
@@ -932,7 +918,6 @@ void scsi_request_fn(request_queue_t * q)
                         * We have copied the data out of the request block - it is now in
                         * a field in SCpnt.  Release the request block.
                         */
-                       req->next = NULL;
                        req->rq_status = RQ_INACTIVE;
                        wake_up(&wait_for_request);
                }
index c9a1edb60a11d9259510334e5a615cd432d3878e..d917d93062e258402497f969de0f6a4efd33447c 100644 (file)
@@ -307,6 +307,69 @@ recount_segments(Scsi_Cmnd * SCpnt)
 (((((long)(X)->b_data+(X)->b_size)|((long)(Y)->b_data)) & \
   (DMA_CHUNK_SIZE - 1)) == 0)
 
+#ifdef DMA_CHUNK_SIZE
+static inline int scsi_new_mergeable(request_queue_t * q,
+                                    struct request * req,
+                                    struct Scsi_Host *SHpnt,
+                                    int max_segments)
+{
+       /*
+        * pci_map_sg will be able to merge these two
+        * into a single hardware sg entry, check if
+        * we'll have enough memory for the sg list.
+        * scsi.c allocates for this purpose
+        * min(64,sg_tablesize) entries.
+        */
+       if (req->nr_segments >= max_segments &&
+           req->nr_segments >= SHpnt->sg_tablesize)
+               return 0;
+       req->nr_segments++;
+       q->nr_segments++;
+       return 1;
+}
+
+static inline int scsi_new_segment(request_queue_t * q,
+                                  struct request * req,
+                                  struct Scsi_Host *SHpnt,
+                                  int max_segments)
+{
+       /*
+        * pci_map_sg won't be able to map these two
+        * into a single hardware sg entry, so we have to
+        * check if things fit into sg_tablesize.
+        */
+       if (req->nr_hw_segments >= SHpnt->sg_tablesize ||
+           (req->nr_segments >= max_segments &&
+            req->nr_segments >= SHpnt->sg_tablesize))
+               return 0;
+       if (req->nr_segments >= max_segments)
+               return 0;
+       req->nr_hw_segments++;
+       req->nr_segments++;
+       q->nr_segments++;
+       return 1;
+}
+#else
+static inline int scsi_new_segment(request_queue_t * q,
+                                  struct request * req,
+                                  struct Scsi_Host *SHpnt,
+                                  int max_segments)
+{
+       if (req->nr_segments < SHpnt->sg_tablesize &&
+           req->nr_segments < max_segments) {
+               /*
+                * This will form the start of a new segment.  Bump the 
+                * counter.
+                */
+               req->nr_segments++;
+               q->nr_segments++;
+               return 1;
+       } else {
+               return 0;
+       }
+}
+#endif
+
 /*
  * Function:    __scsi_merge_fn()
  *
@@ -340,13 +403,14 @@ recount_segments(Scsi_Cmnd * SCpnt)
  *              than to have 4 separate functions all doing roughly the
  *              same thing.
  */
-__inline static int __scsi_merge_fn(request_queue_t * q,
-                                   struct request *req,
-                                   struct buffer_head *bh,
-                                   int use_clustering,
-                                   int dma_host)
+__inline static int __scsi_back_merge_fn(request_queue_t * q,
+                                        struct request *req,
+                                        struct buffer_head *bh,
+                                        int max_segments,
+                                        int use_clustering,
+                                        int dma_host)
 {
-       unsigned int sector, count;
+       unsigned int count;
        unsigned int segment_size = 0;
        Scsi_Device *SDpnt;
        struct Scsi_Host *SHpnt;
@@ -354,130 +418,97 @@ __inline static int __scsi_merge_fn(request_queue_t * q,
        SDpnt = (Scsi_Device *) q->queuedata;
        SHpnt = SDpnt->host;
 
-       count = bh->b_size >> 9;
-       sector = bh->b_rsector;
+       if (max_segments > 64)
+               max_segments = 64;
 
-       /*
-        * We come in here in one of two cases.   The first is that we
-        * are checking to see if we can add the buffer to the end of the
-        * request, the other is to see if we should add the request to the
-        * start.
-        */
-       if (req->sector + req->nr_sectors == sector) {
-               if (use_clustering) {
-                       /* 
-                        * See if we can do this without creating another
-                        * scatter-gather segment.  In the event that this is a
-                        * DMA capable host, make sure that a segment doesn't span
-                        * the DMA threshold boundary.  
-                        */
-                       if (dma_host &&
-                           virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
-                               goto new_end_segment;
-                       }
-                       if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) {
+       if (use_clustering) {
+               /* 
+                * See if we can do this without creating another
+                * scatter-gather segment.  In the event that this is a
+                * DMA capable host, make sure that a segment doesn't span
+                * the DMA threshold boundary.  
+                */
+               if (dma_host &&
+                   virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
+                       goto new_end_segment;
+               }
+               if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) {
 #ifdef DMA_SEGMENT_SIZE_LIMITED
-                               if( dma_host
-                                   && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
-                                       segment_size = 0;
-                                       count = __count_segments(req, use_clustering, dma_host, &segment_size);
-                                       if( segment_size + bh->b_size > PAGE_SIZE ) {
-                                               goto new_end_segment;
-                                       }
+                       if( dma_host
+                           && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
+                               segment_size = 0;
+                               count = __count_segments(req, use_clustering, dma_host, &segment_size);
+                               if( segment_size + bh->b_size > PAGE_SIZE ) {
+                                       goto new_end_segment;
                                }
-#endif
-                               /*
-                                * This one is OK.  Let it go.
-                                */
-                               return 1;
                        }
+#endif
+                       /*
+                        * This one is OK.  Let it go.
+                        */
+                       return 1;
                }
-             new_end_segment:
+       }
+ new_end_segment:
 #ifdef DMA_CHUNK_SIZE
-               if (MERGEABLE_BUFFERS(req->bhtail, bh))
-                       goto new_mergeable;
+       if (MERGEABLE_BUFFERS(req->bhtail, bh))
+               return scsi_new_mergeable(q, req, SHpnt, max_segments);
 #endif
-               goto new_segment;
-       } else {
-               if (req->sector - count != sector) {
-                       /* Attempt to merge sector that doesn't belong */
-                       BUG();
+       return scsi_new_segment(q, req, SHpnt, max_segments);
+}
+
+__inline static int __scsi_front_merge_fn(request_queue_t * q,
+                                         struct request *req,
+                                         struct buffer_head *bh,
+                                         int max_segments,
+                                         int use_clustering,
+                                         int dma_host)
+{
+       unsigned int count;
+       unsigned int segment_size = 0;
+       Scsi_Device *SDpnt;
+       struct Scsi_Host *SHpnt;
+
+       SDpnt = (Scsi_Device *) q->queuedata;
+       SHpnt = SDpnt->host;
+
+       if (max_segments > 64)
+               max_segments = 64;
+
+       if (use_clustering) {
+               /* 
+                * See if we can do this without creating another
+                * scatter-gather segment.  In the event that this is a
+                * DMA capable host, make sure that a segment doesn't span
+                * the DMA threshold boundary. 
+                */
+               if (dma_host &&
+                   virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) {
+                       goto new_start_segment;
                }
-               if (use_clustering) {
-                       /* 
-                        * See if we can do this without creating another
-                        * scatter-gather segment.  In the event that this is a
-                        * DMA capable host, make sure that a segment doesn't span
-                        * the DMA threshold boundary. 
-                        */
-                       if (dma_host &&
-                           virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) {
-                               goto new_start_segment;
-                       }
-                       if (CONTIGUOUS_BUFFERS(bh, req->bh)) {
+               if (CONTIGUOUS_BUFFERS(bh, req->bh)) {
 #ifdef DMA_SEGMENT_SIZE_LIMITED
-                               if( dma_host
-                                   && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
-                                       segment_size = bh->b_size;
-                                       count = __count_segments(req, use_clustering, dma_host, &segment_size);
-                                       if( count != req->nr_segments ) {
-                                               goto new_start_segment;
-                                       }
+                       if( dma_host
+                           && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
+                               segment_size = bh->b_size;
+                               count = __count_segments(req, use_clustering, dma_host, &segment_size);
+                               if( count != req->nr_segments ) {
+                                       goto new_start_segment;
                                }
-#endif
-                               /*
-                                * This one is OK.  Let it go.
-                                */
-                               return 1;
                        }
-               }
-             new_start_segment:
-#ifdef DMA_CHUNK_SIZE
-               if (MERGEABLE_BUFFERS(bh, req->bh))
-                       goto new_mergeable;
 #endif
-               goto new_segment;
+                       /*
+                        * This one is OK.  Let it go.
+                        */
+                       return 1;
+               }
        }
+ new_start_segment:
 #ifdef DMA_CHUNK_SIZE
-      new_mergeable:
-       /*
-        * pci_map_sg will be able to merge these two
-        * into a single hardware sg entry, check if
-        * we'll have enough memory for the sg list.
-        * scsi.c allocates for this purpose
-        * min(64,sg_tablesize) entries.
-        */
-       if (req->nr_segments >= 64 &&
-           req->nr_segments >= SHpnt->sg_tablesize)
-               return 0;
-       req->nr_segments++;
-       return 1;
-      new_segment:
-       /*
-        * pci_map_sg won't be able to map these two
-        * into a single hardware sg entry, so we have to
-        * check if things fit into sg_tablesize.
-        */
-       if (req->nr_hw_segments >= SHpnt->sg_tablesize ||
-           (req->nr_segments >= 64 &&
-            req->nr_segments >= SHpnt->sg_tablesize))
-               return 0;
-       req->nr_hw_segments++;
-       req->nr_segments++;
-       return 1;
-#else
-      new_segment:
-       if (req->nr_segments < SHpnt->sg_tablesize) {
-               /*
-                * This will form the start of a new segment.  Bump the 
-                * counter.
-                */
-               req->nr_segments++;
-               return 1;
-       } else {
-               return 0;
-       }
+       if (MERGEABLE_BUFFERS(bh, req->bh))
+               return scsi_new_mergeable(q, req, SHpnt, max_segments);
 #endif
+       return scsi_new_segment(q, req, SHpnt, max_segments);
 }
 
 /*
@@ -497,23 +528,34 @@ __inline static int __scsi_merge_fn(request_queue_t * q,
  * Notes:       Optimized for different cases depending upon whether
  *              ISA DMA is in use and whether clustering should be used.
  */
-#define MERGEFCT(_FUNCTION, _CLUSTER, _DMA)            \
-static int _FUNCTION(request_queue_t * q,              \
-              struct request * req,                    \
-              struct buffer_head * bh)                 \
-{                                                      \
-    int ret;                                           \
-    SANITY_CHECK(req, _CLUSTER, _DMA);                 \
-    ret =  __scsi_merge_fn(q, req, bh, _CLUSTER, _DMA); \
-    return ret;                                                \
+#define MERGEFCT(_FUNCTION, _BACK_FRONT, _CLUSTER, _DMA)               \
+static int _FUNCTION(request_queue_t * q,                              \
+                    struct request * req,                              \
+                    struct buffer_head * bh,                           \
+                    int max_segments)                                  \
+{                                                                      \
+    int ret;                                                           \
+    SANITY_CHECK(req, _CLUSTER, _DMA);                                 \
+    ret =  __scsi_ ## _BACK_FRONT ## _merge_fn(q,                      \
+                                              req,                     \
+                                              bh,                      \
+                                              max_segments,            \
+                                              _CLUSTER,                \
+                                              _DMA);                   \
+    return ret;                                                                \
 }
 
 /* Version with use_clustering 0 and dma_host 1 is not necessary,
  * since the only use of dma_host above is protected by use_clustering.
  */
-MERGEFCT(scsi_merge_fn_, 0, 0)
-MERGEFCT(scsi_merge_fn_c, 1, 0)
-MERGEFCT(scsi_merge_fn_dc, 1, 1)
+MERGEFCT(scsi_back_merge_fn_, back, 0, 0)
+MERGEFCT(scsi_back_merge_fn_c, back, 1, 0)
+MERGEFCT(scsi_back_merge_fn_dc, back, 1, 1)
+
+MERGEFCT(scsi_front_merge_fn_, front, 0, 0)
+MERGEFCT(scsi_front_merge_fn_c, front, 1, 0)
+MERGEFCT(scsi_front_merge_fn_dc, front, 1, 1)
+
 /*
  * Function:    __scsi_merge_requests_fn()
  *
@@ -550,6 +592,7 @@ MERGEFCT(scsi_merge_fn_dc, 1, 1)
 __inline static int __scsi_merge_requests_fn(request_queue_t * q,
                                             struct request *req,
                                             struct request *next,
+                                            int max_segments,
                                             int use_clustering,
                                             int dma_host)
 {
@@ -559,11 +602,14 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
        SDpnt = (Scsi_Device *) q->queuedata;
        SHpnt = SDpnt->host;
 
+       if (max_segments > 64)
+               max_segments = 64;
+
 #ifdef DMA_CHUNK_SIZE
        /* If it would not fit into prepared memory space for sg chain,
         * then don't allow the merge.
         */
-       if (req->nr_segments + next->nr_segments - 1 > 64 &&
+       if (req->nr_segments + next->nr_segments - 1 > max_segments &&
            req->nr_segments + next->nr_segments - 1 > SHpnt->sg_tablesize) {
                return 0;
        }
@@ -619,6 +665,7 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
                         * This one is OK.  Let it go.
                         */
                        req->nr_segments += next->nr_segments - 1;
+                       q->nr_segments--;
 #ifdef DMA_CHUNK_SIZE
                        req->nr_hw_segments += next->nr_hw_segments - 1;
 #endif
@@ -627,7 +674,7 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
        }
       dont_combine:
 #ifdef DMA_CHUNK_SIZE
-       if (req->nr_segments + next->nr_segments > 64 &&
+       if (req->nr_segments + next->nr_segments > max_segments &&
            req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) {
                return 0;
        }
@@ -650,7 +697,8 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
         * Make sure we can fix something that is the sum of the two.
         * A slightly stricter test than we had above.
         */
-       if (req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) {
+       if (req->nr_segments + next->nr_segments > max_segments &&
+           req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) {
                return 0;
        } else {
                /*
@@ -683,11 +731,12 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
 #define MERGEREQFCT(_FUNCTION, _CLUSTER, _DMA)         \
 static int _FUNCTION(request_queue_t * q,              \
                     struct request * req,              \
-                    struct request * next)             \
+                    struct request * next,             \
+                    int max_segments)                  \
 {                                                      \
     int ret;                                           \
     SANITY_CHECK(req, _CLUSTER, _DMA);                 \
-    ret =  __scsi_merge_requests_fn(q, req, next, _CLUSTER, _DMA); \
+    ret =  __scsi_merge_requests_fn(q, req, next, max_segments, _CLUSTER, _DMA); \
     return ret;                                                \
 }
 
@@ -1068,7 +1117,7 @@ void initialize_merge_fn(Scsi_Device * SDpnt)
         * pick a new one.
         */
 #if 0
-       if (q->merge_fn != NULL)
+       if (q->back_merge_fn && q->front_merge_fn)
                return;
 #endif
        /*
@@ -1083,19 +1132,23 @@ void initialize_merge_fn(Scsi_Device * SDpnt)
         * rather than rely upon the default behavior of ll_rw_blk.
         */
        if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
-               q->merge_fn = scsi_merge_fn_;
+               q->back_merge_fn = scsi_back_merge_fn_;
+               q->front_merge_fn = scsi_front_merge_fn_;
                q->merge_requests_fn = scsi_merge_requests_fn_;
                SDpnt->scsi_init_io_fn = scsi_init_io_v;
        } else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
-               q->merge_fn = scsi_merge_fn_;
+               q->back_merge_fn = scsi_back_merge_fn_;
+               q->front_merge_fn = scsi_front_merge_fn_;
                q->merge_requests_fn = scsi_merge_requests_fn_;
                SDpnt->scsi_init_io_fn = scsi_init_io_vd;
        } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
-               q->merge_fn = scsi_merge_fn_c;
+               q->back_merge_fn = scsi_back_merge_fn_c;
+               q->front_merge_fn = scsi_front_merge_fn_c;
                q->merge_requests_fn = scsi_merge_requests_fn_c;
                SDpnt->scsi_init_io_fn = scsi_init_io_vc;
        } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
-               q->merge_fn = scsi_merge_fn_dc;
+               q->back_merge_fn = scsi_back_merge_fn_dc;
+               q->front_merge_fn = scsi_front_merge_fn_dc;
                q->merge_requests_fn = scsi_merge_requests_fn_dc;
                SDpnt->scsi_init_io_fn = scsi_init_io_vdc;
        }
index b5cd41d857cd11b5a242f6f0109be0e7044ef7d5..d55e54206fb9dfd8f9cd545c1274c8b086a8ebd3 100644 (file)
  *    - Added Microtek X6 ID's. Thanks to Oliver Neukum
  *      <Oliver.Neukum@lrz.uni-muenchen.de>.
  *
+ * 
+ *  0.4.1  2/15/2000
+ *  
+ *    - Fixed 'count' bug in read_scanner(). Thanks to Henrik
+ *      Johansson <henrikjo@post.utfors.se> for identifying it.  Amazing
+ *      it has worked this long.
+ *    - Fixed '>=' bug in both read/write_scanner methods.
+ *    - Cleaned up both read/write_scanner() methods so that they are 
+ *      a little more readable.
+ *    - Added a lot of Microtek ID's.  Thanks to Adrian Perez Jorge.
+ *    - Adopted the __initcall().
+ *    - Added #include <linux/init.h> to scanner.h for __initcall().
+ *    - Added one liner in irq_scanner() to keep gcc from complaining 
+ *      about an unused variable (data) if debugging was disabled
+ *      in scanner.c.
+ *    - Increased the timeout parameter in read_scanner() to 120 Secs.
+ *
  *
  *  TODO
  *
  *    - Select/poll methods
+ *    - More testing
+ *    - Proper registry/assignment for LM9830 ioctl's
  *
  *
  *  Thanks to:
  *    - To Linus Torvalds for this great OS.
  *    - The GNU folks.
  *    - The folks that forwarded Vendor:Product ID's to me.
+ *    - Johannes Erdfelt for the loaning of a USB analyzer for tracking an
+ *      issue with HP-4100 and uhci.
+ *    - Adolfo Montero for his assistance.
  *    - And anybody else who chimed in with reports and suggestions.
  *
  *  Performance:
@@ -167,6 +189,7 @@ irq_scanner(struct urb *urb)
 
        struct scn_usb_data *scn = urb->context;
        unsigned char *data = &scn->button;
+       data += 0;              /* Keep gcc from complaining about unused var */
 
        if (urb->status) {
                return;
@@ -253,11 +276,11 @@ write_scanner(struct file * file, const char * buffer,
        struct scn_usb_data *scn;
        struct usb_device *dev;
        
-       ssize_t bytes_written = 0;
+       ssize_t bytes_written = 0; /* Overall count of bytes written */
        ssize_t ret = 0;
 
-       int copy_size;
-       int partial;
+       int this_write;         /* Number of bytes to write */
+       int partial;            /* Number of bytes successfully written */
        int result = 0;
        
        char *obuf;
@@ -275,15 +298,15 @@ write_scanner(struct file * file, const char * buffer,
                        break;
                }
 
-               copy_size = (count > OBUF_SIZE) ? OBUF_SIZE : count;
+               this_write = (count >= OBUF_SIZE) ? OBUF_SIZE : count;
                
-               if (copy_from_user(scn->obuf, buffer, copy_size)) {
+               if (copy_from_user(scn->obuf, buffer, this_write)) {
                        ret = -EFAULT;
                        break;
                }
 
-               result = usb_bulk_msg(dev,usb_sndbulkpipe(dev, scn->bulk_out_ep), obuf, copy_size, &partial, 60*HZ);
-               dbg("write stats(%d): result:%d copy_size:%d partial:%d", scn->scn_minor, result, copy_size, partial);
+               result = usb_bulk_msg(dev,usb_sndbulkpipe(dev, scn->bulk_out_ep), obuf, this_write, &partial, 60*HZ);
+               dbg("write stats(%d): result:%d this_write:%d partial:%d", scn->scn_minor, result, this_write, partial);
 
                if (result == USB_ST_TIMEOUT) { /* NAK -- shouldn't happen */
                        warn("write_scanner: NAK recieved.");
@@ -306,7 +329,7 @@ write_scanner(struct file * file, const char * buffer,
                        printk("\n");
                }
 #endif
-               if (partial != copy_size) { /* Unable to write complete amount */
+               if (partial != this_write) { /* Unable to write all contents of obuf */
                        ret = -EIO;
                        break;
                }
@@ -332,10 +355,11 @@ read_scanner(struct file * file, char * buffer,
        struct scn_usb_data *scn;
        struct usb_device *dev;
 
-       ssize_t read_count, ret = 0;
+       ssize_t bytes_read = 0; /* Overall count of bytes_read */
+       ssize_t ret = 0;
 
-       int partial;
-       int this_read;
+       int partial;            /* Number of bytes successfully read */
+       int this_read;          /* Max number of bytes to read */
        int result;
 
        char *ibuf;
@@ -346,7 +370,7 @@ read_scanner(struct file * file, char * buffer,
 
        dev = scn->scn_dev;
 
-       read_count = 0;
+       bytes_read = 0;
 
        while (count) {
                if (signal_pending(current)) {
@@ -354,9 +378,9 @@ read_scanner(struct file * file, char * buffer,
                        break;
                }
 
-               this_read = (count > IBUF_SIZE) ? IBUF_SIZE : count;
+               this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
                
-               result = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, scn->bulk_in_ep), ibuf, this_read, &partial, 60*HZ);
+               result = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, scn->bulk_in_ep), ibuf, this_read, &partial, 120*HZ);
                dbg("read stats(%d): result:%d this_read:%d partial:%d", scn->scn_minor, result, this_read, partial);
 
                if (result == USB_ST_TIMEOUT) { /* NAK -- shouldn't happen */
@@ -382,24 +406,22 @@ read_scanner(struct file * file, char * buffer,
 #endif
 
                if (partial) { /* Data returned */
-                       count = this_read = partial;
-               } else {
-                       ret = 0;
-                       read_count = 0;
-                       break;
-               }
-
-               if (this_read) {
                        if (copy_to_user(buffer, ibuf, this_read)) {
                                ret = -EFAULT;
                                break;
                        }
-                       count -= this_read;
-                       read_count += this_read;
-                       buffer += this_read;
+                       count -= partial;
+                       bytes_read += partial;
+                       buffer += partial;
+                       
+               } else {
+                       ret = 0;
+                       break;
                }
+               
        }
-       return ret ? ret : read_count;
+       
+       return ret ? ret : bytes_read;
 }
 
 static void *
@@ -442,7 +464,8 @@ probe_scanner(struct usb_device *dev, unsigned int ifnum)
  * that this will allow developers a means to produce applications
  * that will support USB products.
  *
- * Until we detect a device which is pleasing, we silently punt.  */
+ * Until we detect a device which is pleasing, we silently punt.
+ */
 
        do {
                if (dev->descriptor.idVendor == 0x03f0) {          /* Hewlett Packard */
@@ -460,6 +483,7 @@ probe_scanner(struct usb_device *dev, unsigned int ifnum)
                
                if (dev->descriptor.idVendor == 0x06bd) {          /* Agfa */
                        if (dev->descriptor.idProduct == 0x0001 || /* SnapScan 1212U */
+                           dev->descriptor.idProduct == 0x2061 || /* Another SnapScan 1212U (?) */
                            dev->descriptor.idProduct == 0x0100) { /* SnapScan Touch */
                                valid_device = 1;
                                break;
@@ -490,7 +514,13 @@ probe_scanner(struct usb_device *dev, unsigned int ifnum)
                }
 
                if (dev->descriptor.idVendor == 0x05da) {          /* Microtek */
-                       if (dev->descriptor.idProduct == 0x0099) { /* X6 */
+                       if (dev->descriptor.idProduct == 0x0099 || /* ScanMaker X6 - X6U */
+                           dev->descriptor.idProduct == 0x0094 || /* Phantom 336CX - C3 */
+                           dev->descriptor.idProduct == 0x00a0 || /* Phantom 336CX - C3 #2 */
+                           dev->descriptor.idProduct == 0x009a || /* Phantom C6 */
+                           dev->descriptor.idProduct == 0x00a3 || /* ScanMaker V6USL */
+                           dev->descriptor.idProduct == 0x80a3 || /* ScanMaker V6USL #2 */
+                           dev->descriptor.idProduct == 0x80ac) { /* ScanMaker V6UL - SpicyU */
                                valid_device = 1;
                                break;
                        }
@@ -763,11 +793,18 @@ ioctl_scanner(struct inode *inode, struct file *file,
 
 static struct
 file_operations usb_scanner_fops = {
-       read:           read_scanner,
-       write:          write_scanner,
-       ioctl:          ioctl_scanner,
-       open:           open_scanner,
-       release:        close_scanner,
+       NULL,           /* seek */
+       read_scanner,
+       write_scanner,
+       NULL,           /* readdir */
+       NULL,           /* poll */
+       ioctl_scanner,
+       NULL,           /* mmap */
+       open_scanner,
+       NULL,           /* flush */
+       close_scanner,
+       NULL,         
+       NULL,           /* fasync */
 };
 
 static struct
@@ -780,8 +817,15 @@ usb_driver scanner_driver = {
        SCN_BASE_MNR
 };
 
-int
-usb_scanner_init(void)
+#ifdef MODULE
+void cleanup_module(void)
+{
+       usb_deregister(&scanner_driver);
+}
+int init_module(void)
+#else
+int usb_scanner_init(void)
+#endif
 {
         if (usb_register(&scanner_driver) < 0)
                 return -1;
@@ -790,18 +834,4 @@ usb_scanner_init(void)
        return 0;
 }
 
-#ifdef MODULE
-
-int
-init_module(void)
-{
-       return usb_scanner_init();
-}
-
-void
-cleanup_module(void)
-{
-       usb_deregister(&scanner_driver);
-}
-#endif
-
+__initcall(usb_scanner_init);
index b1bac60187be68dc73094b2e2266ac36d0491bdd..9ba1bf0d577ffdd4b3f231bcac7dce7b8cfd4daf 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <asm/uaccess.h>
+#include <linux/init.h>
 #include <linux/malloc.h>
 #include <linux/delay.h>
 #include <linux/ioctl.h>
@@ -10,6 +11,7 @@
 
 #include "usb.h"
 
+/* WARNING: These DATA_DUMP's can produce a lot of data. Caveat Emptor. */
 // #define RD_DATA_DUMP /* Enable to dump data - limited to 24 bytes */
 // #define WR_DATA_DUMP /* DEBUG does not have to be defined. */
 
index e8981965950c81bd37dcf993a3b1560ef5c30857..ebd64b6c553a7a9cf3752de55c57ed8ccbe4b931 100644 (file)
@@ -588,7 +588,7 @@ static int usb_start_wait_urb(urb_t *urb, int timeout, int* actual_length)
 }
 
 /*-------------------------------------------------------------------*/
-// returns status (negative) are length (positive)
+// returns status (negative) or length (positive)
 int usb_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe, 
                            devrequest *cmd,  void *data, int len, int timeout)
 {
index ecff62a7d70314c082ae3288f01ac8d30c3b0934..eea25b9a0c558947d74e61e5462d26ae9d5d1b80 100644 (file)
@@ -1021,7 +1021,7 @@ static int Bulk_transport(Scsi_Cmnd *srb, struct us_data *us)
                  bcb.Flags, bcb.Length);
        result = usb_bulk_msg(us->pusb_dev, pipe, &bcb,
                              US_BULK_CB_WRAP_LEN, &partial, HZ*5);
-       US_DEBUGP("Bulk command transfer result 0x%x\n", result);
+       US_DEBUGP("Bulk command transfer result=%d\n", result);
 
        /* if we stall, we need to clear it before we go on */
        if (result == -EPIPE) {
@@ -1820,7 +1820,7 @@ static void storage_disconnect(struct usb_device *dev, void *ptr)
  * Initialization and registration
  ***********************************************************************/
 
-static int __init usb_stor_init(void)
+int __init usb_stor_init(void)
 {
        //  MOD_INC_USE_COUNT;
 
@@ -1840,7 +1840,7 @@ static int __init usb_stor_init(void)
        return 0;
 }
 
-static void __exit usb_stor_exit(void)
+void __exit usb_stor_exit(void)
 {
        usb_deregister(&storage_driver) ;
 }
index 1cfa0515ba2e2357654bda03813c4924caf614ae..15fd77b22411a55ba53409fa297e3a26efbc43d7 100644 (file)
@@ -272,10 +272,7 @@ printk("nfsd_setattr: size change??\n");
                if (err)
                        goto out_nfserr;
 
-               err = locks_verify_area(FLOCK_VERIFY_WRITE, inode, NULL,
-                                 iap->ia_size<inode->i_size ? iap->ia_size : inode->i_size,
-                                 abs(inode->i_size - iap->ia_size));
-
+               err = locks_verify_truncate(inode, NULL, iap->ia_size);
                if (err)
                        goto out_nfserr;
                DQUOT_INIT(inode);
@@ -1061,9 +1058,7 @@ nfsd_truncate(struct svc_rqst *rqstp, struct svc_fh *fhp, unsigned long size)
        err = get_write_access(inode);
        if (err)
                goto out_nfserr;
-       err = locks_verify_area(FLOCK_VERIFY_WRITE, inode, NULL,
-                                 size<inode->i_size ? size : inode->i_size,
-                                 abs(inode->i_size - size));
+       err = locks_verify_truncate(inode, NULL, size);
        if (err)
                goto out_nfserr;
 
index 808ddcaf8c303905e70af7301a7cb8ba43505b7d..7896b0a2e58890eddb6cc7e5ff7ce13a62dfaa24 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -112,9 +112,7 @@ static inline long do_sys_truncate(const char * path, loff_t length)
        if (error)
                goto dput_and_out;
 
-       error = locks_verify_area(FLOCK_VERIFY_WRITE, inode, NULL,
-                                 length < inode->i_size ? length : inode->i_size,
-                                 abs(inode->i_size - length));
+       error = locks_verify_truncate(inode, NULL, length);
        if (!error) {
                DQUOT_INIT(inode);
                error = do_truncate(dentry, length);
@@ -157,9 +155,7 @@ static inline long do_sys_ftruncate(unsigned int fd, loff_t length)
        error = -EPERM;
        if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
                goto out_putf;
-       error = locks_verify_area(FLOCK_VERIFY_WRITE, inode, file,
-                                 length<inode->i_size ? length : inode->i_size,
-                                 abs(inode->i_size - length));
+       error = locks_verify_truncate(inode, file, length);
        lock_kernel();
        if (!error)
                error = do_truncate(dentry, length);
index aba8430a246ccc1ea33a1ef275c5f598f789968f..1a818c4fd3a63497e3fb29af7afb8e6ab42b6091 100644 (file)
@@ -96,6 +96,18 @@ void initrd_init(void);
  * code duplication in drivers.
  */
 
+extern inline void blkdev_dequeue_request(struct request * req)
+{
+       if (req->q)
+       {
+               if (req->cmd == READ)
+                       req->q->elevator.read_pendings--;
+               req->q->nr_segments -= req->nr_segments;
+               req->q = NULL;
+       }
+       list_del(&req->queue);
+}
+
 int end_that_request_first(struct request *req, int uptodate, char *name);
 void end_that_request_last(struct request *req);
 
@@ -373,7 +385,10 @@ static void floppy_off(unsigned int nr);
 #if !defined(IDE_DRIVER)
 
 #ifndef CURRENT
-#define CURRENT (blk_dev[MAJOR_NR].request_queue.current_request)
+#define CURRENT blkdev_entry_next_request(&blk_dev[MAJOR_NR].request_queue.queue_head)
+#endif
+#ifndef QUEUE_EMPTY
+#define QUEUE_EMPTY list_empty(&blk_dev[MAJOR_NR].request_queue.queue_head)
 #endif
 
 #ifndef DEVICE_NAME
@@ -418,7 +433,7 @@ static void (DEVICE_REQUEST)(request_queue_t *);
 #endif
 
 #define INIT_REQUEST \
-       if (!CURRENT) {\
+       if (QUEUE_EMPTY) {\
                CLEAR_INTR; \
                return; \
        } \
@@ -446,7 +461,7 @@ static void end_request(int uptodate) {
        add_blkdev_randomness(MAJOR(req->rq_dev));
 #endif
        DEVICE_OFF(req->rq_dev);
-       CURRENT = req->next;
+       blkdev_dequeue_request(req);
        end_that_request_last(req);
 }
 
index 6eed225b6ec7bc4b1b89742e8fa3f592110aed1d..ebadfcf06c2f668a9d592c1d65eaaf91eb5ac70e 100644 (file)
@@ -5,6 +5,10 @@
 #include <linux/sched.h>
 #include <linux/genhd.h>
 #include <linux/tqueue.h>
+#include <linux/list.h>
+
+struct request_queue;
+typedef struct request_queue request_queue_t;
 
 /*
  * Ok, this is an expanded form so that we can use the same
@@ -13,6 +17,9 @@
  * for read/write completion.
  */
 struct request {
+       struct list_head queue;
+       int elevator_sequence;
+
        volatile int rq_status; /* should split this into a few status bits */
 #define RQ_INACTIVE            (-1)
 #define RQ_ACTIVE              1
@@ -33,27 +40,42 @@ struct request {
        struct semaphore * sem;
        struct buffer_head * bh;
        struct buffer_head * bhtail;
-       struct request * next;
+       request_queue_t * q;
 };
 
-typedef struct request_queue request_queue_t;
 typedef int (merge_request_fn) (request_queue_t *q, 
                                struct request  *req,
-                               struct buffer_head *bh);
+                               struct buffer_head *bh,
+                               int);
 typedef int (merge_requests_fn) (request_queue_t *q, 
                                 struct request  *req,
-                                struct request  *req2);
+                                struct request  *req2,
+                                int);
 typedef void (request_fn_proc) (request_queue_t *q);
 typedef request_queue_t * (queue_proc) (kdev_t dev);
 typedef void (make_request_fn) (int rw, struct buffer_head *bh);
 typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
 typedef void (unplug_device_fn) (void *q);
 
+typedef struct elevator_s
+{
+       int sequence;
+       int read_latency;
+       int write_latency;
+       int max_bomb_segments;
+       int read_pendings;
+} elevator_t;
+
 struct request_queue
 {
-       struct request          * current_request;
+       struct list_head queue_head;
+       /* together with queue_head for cacheline sharing */
+       elevator_t elevator;
+       unsigned int nr_segments;
+
        request_fn_proc         * request_fn;
-       merge_request_fn        * merge_fn;
+       merge_request_fn        * back_merge_fn;
+       merge_request_fn        * front_merge_fn;
        merge_requests_fn       * merge_requests_fn;
        make_request_fn         * make_request_fn;
        plug_device_fn          * plug_device_fn;
@@ -142,4 +164,12 @@ extern int * max_segments[MAX_BLKDEV];
 #define MAX_READAHEAD  31
 #define MIN_READAHEAD  3
 
+#define ELEVATOR_DEFAULTS ((elevator_t) { 0, NR_REQUEST>>1, NR_REQUEST<<5, 4, 0, })
+
+#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queue)
+#define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
+#define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
+#define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next)
+#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev)
+
 #endif
index 38cbb296073348e12a1fbfa1c29d114159ca41cd..4e607707c8b9c8fc6ee79b2b5286b37439d9f6ff 100644 (file)
@@ -744,6 +744,19 @@ extern inline int locks_verify_area(int read_write, struct inode *inode,
        return 0;
 }
 
+extern inline int locks_verify_truncate(struct inode *inode,
+                                   struct file *filp,
+                                   loff_t size)
+{
+       if (inode->i_flock && MANDATORY_LOCK(inode))
+               return locks_mandatory_area(
+                       FLOCK_VERIFY_WRITE, inode, filp,
+                       size < inode->i_size ? size : inode->i_size,
+                       abs(inode->i_size - size)
+               );
+       return 0;
+}
+
 
 /* fs/open.c */
 
index 4bc121a307b55afef5b39a256e5d99508159bc55..e688137e7901240afa9cc4c4af6b5b47d568798d 100644 (file)
@@ -26,8 +26,6 @@
 #define LP_TRUST_IRQ_  0x0200 /* obsolete */
 #define LP_NO_REVERSE  0x0400 /* No reverse mode available. */
 #define LP_DATA_AVAIL  0x0800 /* Data is available. */
-#define LP_HAVE_PORT_BIT   12 /* (0x1000) Port is claimed. */
-#define LP_PORT_BUSY   (1<<13) /* Reading or writing. */
 
 /* 
  * bit defines for 8255 status port
index 934e873986434b36bd384ac01aa7a9f1042b1dcb..e5194b28f84f9719085e8ff9cdd25444563f851e 100644 (file)
@@ -60,8 +60,7 @@ struct nbd_device {
        struct socket * sock;
        struct file * file;             /* If == NULL, device is not ready, yet */
        int magic;                      /* FIXME: not if debugging is off       */
-       struct request *head;   /* Requests are added here...                   */
-       struct request *tail;
+       struct list_head queue_head;    /* Requests are added here...                   */
        struct semaphore queue_lock;
 };
 #endif