CONFIG_BLK_DEV_SD=y
CONFIG_SD_EXTRA_DEVS=40
# CONFIG_CHR_DEV_ST is not set
-CONFIG_ST_EXTRA_DEVS=2
# CONFIG_BLK_DEV_SR is not set
# CONFIG_CHR_DEV_SG is not set
{
unsigned long start, len;
- while( CURRENT ) {
+ while( !QUEUE_EMPTY ) {
if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
panic("stram: request list destroyed");
if (CURRENT->bh) {
{
printk("FDC1772: fd_error\n");
/*panic("fd1772: fd_error"); *//* DAG tmp */
- if (!CURRENT)
+ if (QUEUE_EMPTY)
return;
CURRENT->errors++;
if (CURRENT->errors >= MAX_ERRORS) {
DPRINT(("redo_fd_request: CURRENT=%08lx CURRENT->rq_dev=%04x CURRENT->sector=%ld\n",
(unsigned long) CURRENT, CURRENT ? CURRENT->rq_dev : 0,
- CURRENT ? CURRENT->sector : 0));
+ !QUEUE_EMPTY ? CURRENT->sector : 0));
- if (CURRENT && CURRENT->rq_status == RQ_INACTIVE)
+ if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE)
goto the_end;
repeat:
- if (!CURRENT)
+ if (QUEUE_EMPTY)
goto the_end;
if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
/* No - its the end of the line */
/* end_request's should have happened at the end of sector DMAs */
/* Turns Drive LEDs off - may slow it down? */
- if (!CURRENT)
+ if (QUEUE_EMPTY)
issue_command(CMD_CKV, block, 2);
Busy = 0;
{
DBG("mfm_request CURRENT=%p Busy=%d\n", CURRENT, Busy);
- if (!CURRENT) {
+ if (QUEUE_EMPTY) {
DBG("mfm_request: Exited due to NULL Current 1\n");
return;
}
DBG("mfm_request: before INIT_REQUEST\n");
- if (!CURRENT) {
+ if (QUEUE_EMPTY) {
printk("mfm_request: Exiting due to !CURRENT (pre)\n");
CLEAR_INTR;
Busy = 0;
}
-static int DAC_merge_fn(request_queue_t *q, struct request *req,
- struct buffer_head *bh)
+static inline int DAC_new_segment(request_queue_t *q, struct request *req,
+ int __max_segments)
{
int max_segments;
DAC960_Controller_T * Controller = q->queuedata;
max_segments = Controller->MaxSegmentsPerRequest[MINOR(req->rq_dev)];
+ if (__max_segments < max_segments)
+ max_segments = __max_segments;
- if (req->bhtail->b_data + req->bhtail->b_size != bh->b_data) {
- if (req->nr_segments < max_segments) {
- req->nr_segments++;
- return 1;
- }
- return 0;
+ if (req->nr_segments < max_segments) {
+ req->nr_segments++;
+ q->nr_segments++;
+ return 1;
}
+ return 0;
+}
- return 1;
+static int DAC_back_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh, int __max_segments)
+{
+ if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
+ return 1;
+ return DAC_new_segment(q, req, __max_segments);
+}
+
+static int DAC_front_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh, int __max_segments)
+{
+ if (bh->b_data + bh->b_size == req->bh->b_data)
+ return 1;
+ return DAC_new_segment(q, req, __max_segments);
}
static int DAC_merge_requests_fn(request_queue_t *q,
struct request *req,
- struct request *next)
+ struct request *next,
+ int __max_segments)
{
int max_segments;
DAC960_Controller_T * Controller = q->queuedata;
int total_segments = req->nr_segments + next->nr_segments;
max_segments = Controller->MaxSegmentsPerRequest[MINOR(req->rq_dev)];
+ if (__max_segments < max_segments)
+ max_segments = __max_segments;
if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
+ {
total_segments--;
+ q->nr_segments--;
+ }
if (total_segments > max_segments)
return 0;
q = BLK_DEFAULT_QUEUE(MajorNumber);
blk_init_queue(q, RequestFunctions[Controller->ControllerNumber]);
blk_queue_headactive(q, 0);
- q->merge_fn = DAC_merge_fn;
+ q->back_merge_fn = DAC_back_merge_fn;
+ q->front_merge_fn = DAC_front_merge_fn;
q->merge_requests_fn = DAC_merge_requests_fn;
q->queuedata = (void *) Controller;
blk_size[MajorNumber] = NULL;
blksize_size[MajorNumber] = NULL;
max_sectors[MajorNumber] = NULL;
- max_segments[MajorNumber] = NULL;
/*
Remove the Generic Disk Information structure from the list.
*/
static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller,
boolean WaitForCommand)
{
- IO_Request_T **RequestQueuePointer =
- &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].request_queue.current_request;
+ struct list_head * queue_head;
IO_Request_T *Request;
DAC960_Command_T *Command;
char *RequestBuffer;
+
+ queue_head = &blk_dev[DAC960_MAJOR + Controller->ControllerNumber].request_queue.queue_head;
while (true)
{
- Request = *RequestQueuePointer;
- if (Request == NULL || Request->rq_status == RQ_INACTIVE) return false;
+ if (list_empty(queue_head)) return false;
+ Request = blkdev_entry_next_request(queue_head);
+ if (Request->rq_status == RQ_INACTIVE) return false;
Command = DAC960_AllocateCommand(Controller);
if (Command != NULL) break;
if (!WaitForCommand) return false;
Command->BufferHeader = Request->bh;
RequestBuffer = Request->buffer;
Request->rq_status = RQ_INACTIVE;
- *RequestQueuePointer = Request->next;
+ blkdev_dequeue_request(Request);
wake_up(&wait_for_request);
if (Command->SegmentCount == 1)
{
static void bad_rw_intr( void )
{
- if (!CURRENT)
+ if (QUEUE_EMPTY)
return;
if (++CURRENT->errors >= MAX_ERRORS)
DEVICE_INTR = NULL;
printk( KERN_ERR "ACSI timeout\n" );
- if (!CURRENT) return;
+ if (QUEUE_EMPTY) return;
if (++CURRENT->errors >= MAX_ERRORS) {
#ifdef DEBUG
printk( KERN_ERR "ACSI: too many errors.\n" );
unsigned long pbuffer;
struct buffer_head *bh;
- if (CURRENT && CURRENT->rq_status == RQ_INACTIVE) {
+ if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE) {
if (!DEVICE_INTR) {
ENABLE_IRQ();
stdma_release();
/* Another check here: An interrupt or timer event could have
* happened since the last check!
*/
- if (CURRENT && CURRENT->rq_status == RQ_INACTIVE) {
+ if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE) {
if (!DEVICE_INTR) {
ENABLE_IRQ();
stdma_release();
if (DEVICE_INTR)
return;
- if (!CURRENT) {
+ if (QUEUE_EMPTY) {
CLEAR_INTR;
ENABLE_IRQ();
stdma_release();
char *data;
unsigned long flags;
- if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){
+ if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE){
return;
}
repeat:
- if (!CURRENT) {
+ if (QUEUE_EMPTY) {
/* Nothing left to do */
return;
}
return;
}
- if (!CURRENT) return;
+ if (QUEUE_EMPTY) return;
CURRENT->errors++;
if (CURRENT->errors >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
int device, drive, type;
DPRINT(("redo_fd_request: CURRENT=%08lx CURRENT->dev=%04x CURRENT->sector=%ld\n",
- (unsigned long)CURRENT, CURRENT ? CURRENT->rq_dev : 0,
- CURRENT ? CURRENT->sector : 0 ));
+ (unsigned long)CURRENT, !QUEUE_EMPTY ? CURRENT->rq_dev : 0,
+ !QUEUE_EMPTY ? CURRENT->sector : 0 ));
IsFormatting = 0;
- if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){
+ if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE){
return;
}
repeat:
- if (!CURRENT)
+ if (QUEUE_EMPTY)
goto the_end;
if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
cmdlist_t *c;
int seg, sect;
char *lastdataend;
- request_queue_t * q;
+ struct list_head * queue_head;
struct buffer_head *bh;
struct request *creq;
- q = &blk_dev[MAJOR_NR+ctlr].request_queue;
+ queue_head = &blk_dev[MAJOR_NR+ctlr].request_queue.queue_head;
- creq = q->current_request;
- if (creq == NULL || creq->rq_status == RQ_INACTIVE)
+ if (list_empty(queue_head))
+ goto doreq_done;
+ creq = blkdev_entry_next_request(queue_head);
+ if (creq->rq_status == RQ_INACTIVE)
goto doreq_done;
if (ctlr != MAJOR(creq->rq_dev)-MAJOR_NR ||
bh->b_reqnext = NULL;
DBGPX( printk("More to do on same request %p\n", creq); );
} else {
-DBGPX( printk("Done with %p, queueing %p\n", creq, creq->next); );
- creq->rq_status = RQ_INACTIVE;
- q->current_request = creq->next;
- wake_up(&wait_for_request);
+DBGPX( printk("Done with %p\n", creq); );
+ blkdev_dequeue_request(creq);
+ end_that_request_last(creq);
}
c->req.hdr.cmd = (creq->cmd == READ) ? IDA_READ : IDA_WRITE;
probing = 0;
reschedule_timeout(MAXTIMEOUT, "request done %d", uptodate);
- if (!CURRENT){
+ if (QUEUE_EMPTY){
DPRINT("request list destroyed in floppy request done\n");
return;
}
DRS->maxtrack = 1;
/* unlock chained buffers */
- while (current_count_sectors && CURRENT &&
+ while (current_count_sectors && !QUEUE_EMPTY &&
current_count_sectors >= CURRENT->current_nr_sectors){
current_count_sectors -= CURRENT->current_nr_sectors;
CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
CURRENT->sector += CURRENT->current_nr_sectors;
end_request(1);
}
- if (current_count_sectors && CURRENT){
+ if (current_count_sectors && !QUEUE_EMPTY){
/* "unlock" last subsector */
CURRENT->buffer += current_count_sectors <<9;
CURRENT->current_nr_sectors -= current_count_sectors;
return;
}
- if (current_count_sectors && !CURRENT)
+ if (current_count_sectors && QUEUE_EMPTY)
DPRINT("request list destroyed in floppy request done\n");
} else {
if (current_drive < N_DRIVE)
floppy_off(current_drive);
- if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){
+ if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE){
CLEAR_INTR;
unlock_fdc();
return;
}
while(1){
- if (!CURRENT) {
+ if (QUEUE_EMPTY) {
CLEAR_INTR;
unlock_fdc();
return;
unsigned long flags;
char devc;
- devc = CURRENT ? 'a' + DEVICE_NR(CURRENT->rq_dev) : '?';
+ devc = !QUEUE_EMPTY ? 'a' + DEVICE_NR(CURRENT->rq_dev) : '?';
save_flags (flags);
sti();
#ifdef VERBOSE_ERRORS
if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
- if (CURRENT)
+ if (!QUEUE_EMPTY)
printk(", sector=%ld", CURRENT->sector);
}
printk("\n");
{
int dev;
- if (!CURRENT)
+ if (QUEUE_EMPTY)
return;
dev = DEVICE_NR(CURRENT->rq_dev);
if (++CURRENT->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
- if (CURRENT)
+ if (!QUEUE_EMPTY)
hd_request();
return;
}
unsigned int dev;
DEVICE_INTR = NULL;
- if (!CURRENT)
+ if (QUEUE_EMPTY)
return;
disable_irq(HD_IRQ);
sti();
{
unsigned int dev, block, nsect, sec, track, head, cyl;
- if (CURRENT && CURRENT->rq_status == RQ_INACTIVE) return;
+ if (!QUEUE_EMPTY && CURRENT->rq_status == RQ_INACTIVE) return;
if (DEVICE_INTR)
return;
repeat:
if (!end_that_request_first(rq, uptodate, hwgroup->drive->name)) {
add_blkdev_randomness(MAJOR(rq->rq_dev));
- hwgroup->drive->queue.current_request = rq->next;
- blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL;
+ blkdev_dequeue_request(rq);
hwgroup->rq = NULL;
end_that_request_last(rq);
}
}
}
spin_lock_irqsave(&io_request_lock, flags);
- drive->queue.current_request = rq->next;
- blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL;
+ blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
rq->rq_status = RQ_INACTIVE;
spin_unlock_irqrestore(&io_request_lock, flags);
{
ide_startstop_t startstop;
unsigned long block, blockend;
- struct request *rq = drive->queue.current_request;
+ struct request *rq = blkdev_entry_next_request(&drive->queue.queue_head);
unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS;
ide_hwif_t *hwif = HWIF(drive);
best = NULL;
drive = hwgroup->drive;
do {
- if (drive->queue.current_request && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) {
+ if (!list_empty(&drive->queue.queue_head) && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) {
if (!best
|| (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep)))
|| (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive))))
{
- struct blk_dev_struct *bdev = &blk_dev[HWIF(drive)->major];
- if( !bdev->request_queue.plugged )
+ if( !drive->queue.plugged )
best = drive;
}
}
*/
static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
{
- struct blk_dev_struct *bdev;
ide_drive_t *drive;
ide_hwif_t *hwif;
ide_startstop_t startstop;
hwgroup->rq = NULL;
drive = hwgroup->drive;
do {
- bdev = &blk_dev[HWIF(drive)->major];
- if( !bdev->request_queue.plugged )
- bdev->request_queue.current_request = NULL; /* (broken since patch-2.1.15) */
if (drive->sleep && (!sleep || 0 < (signed long)(sleep - drive->sleep)))
sleep = drive->sleep;
} while ((drive = drive->next) != hwgroup->drive);
drive->sleep = 0;
drive->service_start = jiffies;
- bdev = &blk_dev[hwif->major];
- if ( bdev->request_queue.plugged ) /* FIXME: paranoia */
+ if ( drive->queue.plugged ) /* paranoia */
printk("%s: Huh? nuking plugged queue\n", drive->name);
- bdev->request_queue.current_request = hwgroup->rq = drive->queue.current_request;
+ hwgroup->rq = blkdev_entry_next_request(&drive->queue.queue_head);
/*
* Some systems have trouble with IDE IRQs arriving while
* the driver is still setting things up. So, here we disable
rq->sem = NULL;
rq->bh = NULL;
rq->bhtail = NULL;
- rq->next = NULL;
+ rq->q = NULL;
}
/*
unsigned long flags;
ide_hwgroup_t *hwgroup = HWGROUP(drive);
unsigned int major = HWIF(drive)->major;
- struct request *cur_rq;
+ struct list_head * queue_head;
DECLARE_MUTEX_LOCKED(sem);
#ifdef CONFIG_BLK_DEV_PDC4030
if (action == ide_wait)
rq->sem = &sem;
spin_lock_irqsave(&io_request_lock, flags);
- cur_rq = drive->queue.current_request;
- if (cur_rq == NULL || action == ide_preempt) {
- rq->next = cur_rq;
- drive->queue.current_request = rq;
+ queue_head = &drive->queue.queue_head;
+ if (list_empty(queue_head) || action == ide_preempt) {
if (action == ide_preempt)
hwgroup->rq = NULL;
} else {
if (action == ide_wait || action == ide_end) {
- while (cur_rq->next != NULL) /* find end of list */
- cur_rq = cur_rq->next;
- }
- rq->next = cur_rq->next;
- cur_rq->next = rq;
+ queue_head = queue_head->prev;
+ } else
+ queue_head = queue_head->next;
}
+ list_add(&rq->queue, queue_head);
ide_do_request(hwgroup, 0);
spin_unlock_irqrestore(&io_request_lock, flags);
if (action == ide_wait) {
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994, Karl Keyte: Added support for disk statistics
+ * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*/
/*
#include <linux/module.h>
+#define DEBUG_ELEVATOR
+
/*
* MAC Floppy IWM hooks
*/
return ret;
}
+static inline int get_request_latency(elevator_t * elevator, int rw)
+{
+ int latency;
+
+ if (rw != READ)
+ latency = elevator->write_latency;
+ else
+ latency = elevator->read_latency;
+
+ return latency;
+}
+
void blk_cleanup_queue(request_queue_t * q)
{
memset(q, 0, sizeof(*q));
q->make_request_fn = mfn;
}
-static int ll_merge_fn(request_queue_t *q, struct request *req,
- struct buffer_head *bh)
+static inline int ll_new_segment(request_queue_t *q, struct request *req, int max_segments)
{
- if (req->bhtail->b_data + req->bhtail->b_size != bh->b_data) {
- if (req->nr_segments < MAX_SEGMENTS) {
- req->nr_segments++;
- return 1;
- }
- return 0;
+ if (req->nr_segments < max_segments) {
+ req->nr_segments++;
+ q->nr_segments++;
+ return 1;
}
- return 1;
+ return 0;
+}
+
+static int ll_back_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh, int max_segments)
+{
+ if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
+ return 1;
+ return ll_new_segment(q, req, max_segments);
+}
+
+static int ll_front_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh, int max_segments)
+{
+ if (bh->b_data + bh->b_size == req->bh->b_data)
+ return 1;
+ return ll_new_segment(q, req, max_segments);
}
static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
- struct request *next)
+ struct request *next, int max_segments)
{
int total_segments = req->nr_segments + next->nr_segments;
if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
+ {
total_segments--;
+ q->nr_segments--;
+ }
- if (total_segments > MAX_SEGMENTS)
+ if (total_segments > max_segments)
return 0;
req->nr_segments = total_segments;
void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
{
+ INIT_LIST_HEAD(&q->queue_head);
+ q->elevator = ELEVATOR_DEFAULTS;
q->request_fn = rfn;
- q->current_request = NULL;
- q->merge_fn = ll_merge_fn;
+ q->back_merge_fn = ll_back_merge_fn;
+ q->front_merge_fn = ll_front_merge_fn;
q->merge_requests_fn = ll_merge_requests_fn;
q->make_request_fn = NULL;
q->plug_tq.sync = 0;
*/
inline void generic_plug_device (request_queue_t *q, kdev_t dev)
{
+#ifdef CONFIG_BLK_DEV_MD
if (MAJOR(dev) == MD_MAJOR) {
spin_unlock_irq(&io_request_lock);
BUG();
}
- if (q->current_request)
+#endif
+ if (!list_empty(&q->queue_head))
return;
q->plugged = 1;
spin_lock_irqsave(&io_request_lock,flags);
if (q->plugged) {
q->plugged = 0;
- if (q->current_request)
+ if (!list_empty(&q->queue_head))
(q->request_fn)(q);
}
spin_unlock_irqrestore(&io_request_lock,flags);
printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");
}
+/* elevator */
+
+#define elevator_sequence_after(a,b) ((int)((b)-(a)) < 0)
+#define elevator_sequence_before(a,b) elevator_sequence_after(b,a)
+#define elevator_sequence_after_eq(a,b) ((int)((b)-(a)) <= 0)
+#define elevator_sequence_before_eq(a,b) elevator_sequence_after_eq(b,a)
+
+static inline struct list_head * seek_to_not_starving_chunk(request_queue_t * q,
+ int * lat, int * starving)
+{
+ int sequence = q->elevator.sequence;
+ struct list_head * entry = q->queue_head.prev;
+ int pos = 0;
+
+ do {
+ struct request * req = blkdev_entry_to_request(entry);
+ if (elevator_sequence_before(req->elevator_sequence, sequence))
+ {
+ *lat -= q->nr_segments - pos;
+ *starving = 1;
+ return entry;
+ }
+ pos += req->nr_segments;
+ } while ((entry = entry->prev) != &q->queue_head);
+
+ *starving = 0;
+
+ return entry->next;
+}
+
+static inline void elevator_merge_requests(elevator_t * e, struct request * req, struct request * next)
+{
+ if (elevator_sequence_before(next->elevator_sequence, req->elevator_sequence))
+ req->elevator_sequence = next->elevator_sequence;
+ if (req->cmd == READ)
+ e->read_pendings--;
+
+}
+
+static inline int elevator_sequence(elevator_t * e, int latency)
+{
+ return latency + e->sequence;
+}
+
+#define elevator_merge_before(q, req, lat) __elevator_merge((q), (req), (lat), 0)
+#define elevator_merge_after(q, req, lat) __elevator_merge((q), (req), (lat), 1)
+static inline void __elevator_merge(request_queue_t * q, struct request * req, int latency, int after)
+{
+#ifdef DEBUG_ELEVATOR
+ int sequence = elevator_sequence(&q->elevator, latency);
+ if (after)
+ sequence -= req->nr_segments;
+ if (elevator_sequence_before(sequence, req->elevator_sequence)) {
+ static int warned = 0;
+ if (!warned) {
+ printk(KERN_WARNING __FUNCTION__
+ ": req latency %d req latency %d\n",
+ req->elevator_sequence - q->elevator.sequence,
+ sequence - q->elevator.sequence);
+ warned = 1;
+ }
+ req->elevator_sequence = sequence;
+ }
+#endif
+}
+
+static inline void elevator_queue(request_queue_t * q,
+ struct request * req,
+ struct list_head * entry,
+ int latency, int starving)
+{
+ struct request * tmp, * __tmp;
+ int __latency = latency;
+
+ __tmp = tmp = blkdev_entry_to_request(entry);
+
+ for (;; tmp = blkdev_next_request(tmp))
+ {
+ if ((latency -= tmp->nr_segments) <= 0)
+ {
+ tmp = __tmp;
+ latency = __latency;
+
+ if (starving)
+ break;
+
+ if (q->head_active && !q->plugged)
+ {
+ latency -= tmp->nr_segments;
+ break;
+ }
+
+ list_add(&req->queue, &q->queue_head);
+ goto after_link;
+ }
+
+ if (tmp->queue.next == &q->queue_head)
+ break;
+
+ {
+ const int after_current = IN_ORDER(tmp,req);
+ const int before_next = IN_ORDER(req,blkdev_next_request(tmp));
+
+ if (!IN_ORDER(tmp,blkdev_next_request(tmp))) {
+ if (after_current || before_next)
+ break;
+ } else {
+ if (after_current && before_next)
+ break;
+ }
+ }
+ }
+
+ list_add(&req->queue, &tmp->queue);
+
+ after_link:
+ req->elevator_sequence = elevator_sequence(&q->elevator, latency);
+}
+
/*
* add-request adds a request to the linked list.
* It disables interrupts (aquires the request spinlock) so that it can muck
* which is important for drive_stat_acct() above.
*/
-static inline void __add_request(request_queue_t * q, struct request * req)
+static inline void __add_request(request_queue_t * q, struct request * req,
+ int empty, struct list_head * entry,
+ int latency, int starving)
{
- int major = MAJOR(req->rq_dev);
- struct request * tmp;
+ int major;
drive_stat_acct(req, req->nr_sectors, 1);
- req->next = NULL;
- if (!(tmp = q->current_request)) {
- q->current_request = req;
+ if (empty) {
+ req->elevator_sequence = elevator_sequence(&q->elevator, latency);
+ list_add(&req->queue, &q->queue_head);
return;
}
- for ( ; tmp->next ; tmp = tmp->next) {
- const int after_current = IN_ORDER(tmp,req);
- const int before_next = IN_ORDER(req,tmp->next);
-
- if (!IN_ORDER(tmp,tmp->next)) {
- if (after_current || before_next)
- break;
- } else {
- if (after_current && before_next)
- break;
- }
- }
- req->next = tmp->next;
- tmp->next = req;
+ elevator_queue(q, req, entry, latency, starving);
/*
* FIXME(eric) I don't understand why there is a need for this
* I am leaving this in here until I hear back from the COMPAQ
* people.
*/
+ major = MAJOR(req->rq_dev);
if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7)
{
(q->request_fn)(q);
*/
static inline void attempt_merge (request_queue_t * q,
struct request *req,
- int max_sectors)
+ int max_sectors,
+ int max_segments)
{
- struct request *next = req->next;
-
- if (!next)
+ struct request *next;
+
+ if (req->queue.next == &q->queue_head)
return;
+ next = blkdev_next_request(req);
if (req->sector + req->nr_sectors != next->sector)
return;
if (next->sem || req->cmd != next->cmd || req->rq_dev != next->rq_dev || req->nr_sectors + next->nr_sectors > max_sectors)
* will have been updated to the appropriate number,
* and we shouldn't do it here too.
*/
- if(!(q->merge_requests_fn)(q, req, next))
+ if(!(q->merge_requests_fn)(q, req, next, max_segments))
return;
+ elevator_merge_requests(&q->elevator, req, next);
req->bhtail->b_reqnext = next->bh;
req->bhtail = next->bhtail;
req->nr_sectors += next->nr_sectors;
next->rq_status = RQ_INACTIVE;
- req->next = next->next;
+ list_del(&next->queue);
wake_up (&wait_for_request);
}
+static inline void elevator_debug(request_queue_t * q, kdev_t dev)
+{
+#ifdef DEBUG_ELEVATOR
+ int read_pendings = 0, nr_segments = 0;
+ elevator_t * elevator = &q->elevator;
+ struct list_head * entry = &q->queue_head;
+ static int counter;
+
+ if (counter++ % 100)
+ return;
+
+ while ((entry = entry->next) != &q->queue_head)
+ {
+ struct request * req;
+
+ req = blkdev_entry_to_request(entry);
+ if (!req->q)
+ continue;
+ if (req->cmd == READ)
+ read_pendings++;
+ nr_segments += req->nr_segments;
+ }
+
+ if (read_pendings != elevator->read_pendings)
+ {
+ printk(KERN_WARNING
+ "%s: elevator read_pendings %d should be %d\n",
+ kdevname(dev), elevator->read_pendings,
+ read_pendings);
+ elevator->read_pendings = read_pendings;
+ }
+ if (nr_segments != q->nr_segments)
+ {
+ printk(KERN_WARNING
+ "%s: elevator nr_segments %d should be %d\n",
+ kdevname(dev), q->nr_segments,
+ nr_segments);
+ q->nr_segments = nr_segments;
+ }
+#endif
+}
+
+static inline void elevator_account_request(request_queue_t * q, struct request * req)
+{
+ q->elevator.sequence++;
+ if (req->cmd == READ)
+ q->elevator.read_pendings++;
+ q->nr_segments++;
+}
+
static inline void __make_request(request_queue_t * q, int rw,
struct buffer_head * bh)
{
int major = MAJOR(bh->b_rdev);
unsigned int sector, count;
- struct request * req;
+ int max_segments = MAX_SEGMENTS;
+ struct request * req, * prev;
int rw_ahead, max_req, max_sectors;
unsigned long flags;
+ int orig_latency, latency, __latency, starving, __starving, empty;
+ struct list_head * entry, * __entry;
count = bh->b_size >> 9;
sector = bh->b_rsector;
*/
max_sectors = get_max_sectors(bh->b_rdev);
+ __latency = orig_latency = get_request_latency(&q->elevator, rw);
+
/*
* Now we acquire the request spinlock, we have to be mega careful
* not to schedule or do something nonatomic
*/
spin_lock_irqsave(&io_request_lock,flags);
- req = q->current_request;
- if (!req) {
+ elevator_debug(q, bh->b_rdev);
+
+ empty = 0;
+ if (list_empty(&q->queue_head)) {
+ empty = 1;
/* MD and loop can't handle plugging without deadlocking */
if (q->plug_device_fn)
q->plug_device_fn(q, bh->b_rdev); /* is atomic */
goto get_rq;
}
+ /* avoid write-bombs to not hurt iteractiveness of reads */
+ if (rw != READ && q->elevator.read_pendings)
+ max_segments = q->elevator.max_bomb_segments;
+
+ entry = seek_to_not_starving_chunk(q, &__latency, &starving);
+
+ __entry = entry;
+ __starving = starving;
+
+ latency = __latency;
+
if (q->head_active && !q->plugged) {
/*
* The scsi disk and cdrom drivers completely remove the request
* entry may be busy being processed and we thus can't change
* it.
*/
- if ((req = req->next) == NULL)
- goto get_rq;
+ if (entry == q->queue_head.next) {
+ latency -= blkdev_entry_to_request(entry)->nr_segments;
+ if ((entry = entry->next) == &q->queue_head)
+ goto get_rq;
+ starving = 0;
+ }
}
+ prev = NULL;
do {
+ req = blkdev_entry_to_request(entry);
+
if (req->sem)
continue;
if (req->cmd != rw)
continue;
/* Can we add it to the end of this request? */
if (req->sector + req->nr_sectors == sector) {
+ if (latency - req->nr_segments < 0)
+ break;
/*
* The merge_fn is a more advanced way
* of accomplishing the same task. Instead
* may suggest that we shouldn't merge
* this
*/
- if(!(q->merge_fn)(q, req, bh))
+ if(!(q->back_merge_fn)(q, req, bh, max_segments))
continue;
req->bhtail->b_reqnext = bh;
req->bhtail = bh;
req->nr_sectors += count;
drive_stat_acct(req, count, 0);
+
+ elevator_merge_after(q, req, latency);
+
/* Can we now merge this req with the next? */
- attempt_merge(q, req, max_sectors);
+ attempt_merge(q, req, max_sectors, max_segments);
/* or to the beginning? */
} else if (req->sector - count == sector) {
+ if (!prev && starving)
+ continue;
/*
* The merge_fn is a more advanced way
* of accomplishing the same task. Instead
* may suggest that we shouldn't merge
* this
*/
- if(!(q->merge_fn)(q, req, bh))
+ if(!(q->front_merge_fn)(q, req, bh, max_segments))
continue;
bh->b_reqnext = req->bh;
req->bh = bh;
req->sector = sector;
req->nr_sectors += count;
drive_stat_acct(req, count, 0);
+
+ elevator_merge_before(q, req, latency);
+
+ if (prev)
+ attempt_merge(q, prev, max_sectors, max_segments);
} else
continue;
+ q->elevator.sequence++;
spin_unlock_irqrestore(&io_request_lock,flags);
return;
- } while ((req = req->next) != NULL);
+ } while (prev = req,
+ (latency -= req->nr_segments) >= 0 &&
+ (entry = entry->next) != &q->queue_head);
/* find an unused request. */
get_rq:
goto end_io;
req = __get_request_wait(max_req, bh->b_rdev);
spin_lock_irqsave(&io_request_lock,flags);
+
+ /* lock got dropped so revalidate elevator */
+ empty = 1;
+ if (!list_empty(&q->queue_head)) {
+ empty = 0;
+ __latency = orig_latency;
+ __entry = seek_to_not_starving_chunk(q, &__latency, &__starving);
+ }
}
/*
* Dont start the IO if the buffer has been
req->sem = NULL;
req->bh = bh;
req->bhtail = bh;
- req->next = NULL;
- __add_request(q, req);
+ req->q = q;
+ __add_request(q, req, empty, __entry, __latency, __starving);
+ elevator_account_request(q, req);
+
spin_unlock_irqrestore(&io_request_lock, flags);
return;
void end_that_request_last(struct request *req)
{
+ if (req->q)
+ BUG();
if (req->sem != NULL)
up(req->sem);
req->rq_status = RQ_INACTIVE;
req = all_requests + NR_REQUEST;
while (--req >= all_requests) {
req->rq_status = RQ_INACTIVE;
- req->next = NULL;
}
memset(ro_bits,0,sizeof(ro_bits));
memset(max_readahead, 0, sizeof(max_readahead));
offset = 0;
index++;
pos += size;
- if (pos > lo->lo_dentry->d_inode->i_size)
- lo->lo_dentry->d_inode->i_size = pos;
UnlockPage(page);
page_cache_release(page);
}
repeat:
INIT_REQUEST;
current_request=CURRENT;
- CURRENT=current_request->next;
+ blkdev_dequeue_request(current_request);
if (MINOR(current_request->rq_dev) >= max_loop)
goto error_out;
lo = &loop_dev[MINOR(current_request->rq_dev)];
spin_lock_irq(&io_request_lock);
current_request->sector += current_request->current_nr_sectors;
current_request->nr_sectors -= current_request->current_nr_sectors;
- current_request->next=CURRENT;
- CURRENT=current_request;
+ list_add(¤t_request->queue, ¤t_request->q->queue_head);
end_request(1);
goto repeat;
error_out_lock:
spin_lock_irq(&io_request_lock);
error_out:
- current_request->next=CURRENT;
- CURRENT=current_request;
+ list_add(¤t_request->queue, ¤t_request->q->queue_head);
end_request(0);
goto repeat;
}
}
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0);
for (i=0; i < max_loop; i++) {
memset(&loop_dev[i], 0, sizeof(struct loop_device));
loop_dev[i].lo_number = i;
DEBUG("reading control, ");
reply.magic = 0;
result = nbd_xmit(0, lo->sock, (char *) &reply, sizeof(reply));
- req = lo->tail;
if (result <= 0)
HARDFAIL("Recv control failed.");
memcpy(&xreq, reply.handle, sizeof(xreq));
+ req = blkdev_entry_prev_request(&lo->queue_head);
if (xreq != req)
FAIL("Unexpected handle received.\n");
{
struct request *req;
- while (1) {
+ down (&lo->queue_lock);
+ while (!list_empty(&lo->queue_head)) {
req = nbd_read_stat(lo);
if (!req)
- return;
- down (&lo->queue_lock);
+ goto out;
#ifdef PARANOIA
- if (req != lo->tail) {
+ if (req != blkdev_entry_prev_request(&lo->queue_head)) {
printk(KERN_ALERT "NBD: I have problem...\n");
}
if (lo != &nbd_dev[MINOR(req->rq_dev)]) {
printk(KERN_ALERT "NBD: request corrupted!\n");
- goto next;
+ continue;
}
if (lo->magic != LO_MAGIC) {
printk(KERN_ALERT "NBD: nbd_dev[] corrupted: Not enough magic\n");
- up (&lo->queue_lock);
- return;
+ goto out;
}
#endif
- nbd_end_request(req);
- if (lo->tail == lo->head) {
-#ifdef PARANOIA
- if (lo->tail->next)
- printk(KERN_ERR "NBD: I did not expect this\n");
-#endif
- lo->head = NULL;
- }
- lo->tail = lo->tail->next;
- next:
+ list_del(&req->queue);
up (&lo->queue_lock);
+
+ nbd_end_request(req);
+
+ down (&lo->queue_lock);
}
+ out:
+ up (&lo->queue_lock);
}
void nbd_clear_que(struct nbd_device *lo)
{
struct request *req;
+ unsigned long flags;
- while (1) {
- req = lo->tail;
- if (!req)
- return;
+ while (!list_empty(&lo->queue_head)) {
+ req = blkdev_entry_prev_request(&lo->queue_head);
#ifdef PARANOIA
if (lo != &nbd_dev[MINOR(req->rq_dev)]) {
printk(KERN_ALERT "NBD: request corrupted when clearing!\n");
}
#endif
req->errors++;
+ list_del(&req->queue);
+ up(&lo->queue_lock);
+
nbd_end_request(req);
- if (lo->tail == lo->head) {
-#ifdef PARANOIA
- if (lo->tail->next)
- printk(KERN_ERR "NBD: I did not assume this\n");
-#endif
- lo->head = NULL;
- }
- lo->tail = lo->tail->next;
+
+ down(&lo->queue_lock);
}
}
int dev;
struct nbd_device *lo;
- while (CURRENT) {
+ while (!QUEUE_EMPTY) {
req = CURRENT;
dev = MINOR(req->rq_dev);
#ifdef PARANOIA
requests_in++;
#endif
req->errors = 0;
- CURRENT = CURRENT->next;
- req->next = NULL;
-
+ blkdev_dequeue_request(req);
spin_unlock_irq(&io_request_lock);
- down (&lo->queue_lock);
- if (lo->head == NULL) {
- lo->head = req;
- lo->tail = req;
- } else {
- lo->head->next = req;
- lo->head = req;
- }
+ down (&lo->queue_lock);
+ list_add(&req->queue, &lo->queue_head);
nbd_send_req(lo->sock, req); /* Why does this block? */
up (&lo->queue_lock);
+
spin_lock_irq(&io_request_lock);
continue;
error_out:
req->errors++;
+ blkdev_dequeue_request(req);
+ spin_unlock(&io_request_lock);
nbd_end_request(req);
- CURRENT = CURRENT->next;
+ spin_lock(&io_request_lock);
}
return;
}
lo = &nbd_dev[dev];
switch (cmd) {
case NBD_CLEAR_SOCK:
+ down(&lo->queue_lock);
nbd_clear_que(lo);
- if (lo->head || lo->tail) {
+ if (!list_empty(&lo->queue_head)) {
+ up(&lo->queue_lock);
printk(KERN_ERR "nbd: Some requests are in progress -> can not turn off.\n");
return -EBUSY;
}
+ up(&lo->queue_lock);
file = lo->file;
if (!file)
return -EINVAL;
return 0;
#ifdef PARANOIA
case NBD_PRINT_DEBUG:
- printk(KERN_INFO "NBD device %d: head = %lx, tail = %lx. Global: in %d, out %d\n",
- dev, (long) lo->head, (long) lo->tail, requests_in, requests_out);
+ printk(KERN_INFO "NBD device %d: queue_head = %p. Global: in %d, out %d\n",
+ dev, lo->queue_head, requests_in, requests_out);
return 0;
#endif
case BLKGETSIZE:
blksize_size[MAJOR_NR] = nbd_blksizes;
blk_size[MAJOR_NR] = nbd_sizes;
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_nbd_request);
+ blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0);
for (i = 0; i < MAX_NBD; i++) {
nbd_dev[i].refcnt = 0;
nbd_dev[i].file = NULL;
if (pcd_busy) return;
while (1) {
- if ((!CURRENT) || (CURRENT->rq_status == RQ_INACTIVE)) return;
+ if (QUEUE_EMPTY || (CURRENT->rq_status == RQ_INACTIVE)) return;
INIT_REQUEST;
if (CURRENT->cmd == READ) {
unit = MINOR(CURRENT->rq_dev);
if (pd_busy) return;
repeat:
- if ((!CURRENT) || (CURRENT->rq_status == RQ_INACTIVE)) return;
+ if (QUEUE_EMPTY || (CURRENT->rq_status == RQ_INACTIVE)) return;
INIT_REQUEST;
pd_dev = MINOR(CURRENT->rq_dev);
pd_cmd = CURRENT->cmd;
pd_run = pd_count;
while ((pd_run <= cluster) &&
- (req = req->next) &&
+ (req = blkdev_next_request(req)) &&
(pd_block+pd_run == req->sector) &&
(pd_cmd == req->cmd) &&
(pd_dev == MINOR(req->rq_dev)))
/* paranoia */
- if ((!CURRENT) ||
+ if (QUEUE_EMPTY ||
(CURRENT->cmd != pd_cmd) ||
(MINOR(CURRENT->rq_dev) != pd_dev) ||
(CURRENT->rq_status == RQ_INACTIVE) ||
if (pf_busy) return;
repeat:
- if ((!CURRENT) || (CURRENT->rq_status == RQ_INACTIVE)) return;
+ if (QUEUE_EMPTY || (CURRENT->rq_status == RQ_INACTIVE)) return;
INIT_REQUEST;
pf_unit = unit = DEVICE_NR(CURRENT->rq_dev);
pf_cmd = CURRENT->cmd;
pf_run = pf_count;
while ((pf_run <= cluster) &&
- (req = req->next) &&
+ (req = blkdev_next_request(req)) &&
(pf_block+pf_run == req->sector) &&
(pf_cmd == req->cmd) &&
(pf_unit == DEVICE_NR(req->rq_dev)))
/* paranoia */
- if ((!CURRENT) ||
+ if (QUEUE_EMPTY ||
(CURRENT->cmd != pf_cmd) ||
(DEVICE_NR(CURRENT->rq_dev) != pf_unit) ||
(CURRENT->rq_status == RQ_INACTIVE) ||
if (virt_to_bus(CURRENT->buffer + CURRENT->nr_sectors * 512) > 16 * MB) {
printk("%s: DMA above 16MB not supported\n", DEVICE_NAME);
end_request(FAIL);
- if (CURRENT)
+ if (!QUEUE_EMPTY)
do_ps2esdi_request(q);
return;
} /* check for above 16Mb dmas */
default:
printk("%s: Unknown command\n", DEVICE_NAME);
end_request(FAIL);
- if (CURRENT)
+ if (!QUEUE_EMPTY)
do_ps2esdi_request(q);
break;
} /* handle different commands */
printk("Grrr. error. ps2esdi_drives: %d, %lu %lu\n", ps2esdi_drives,
CURRENT->sector, ps2esdi[MINOR(CURRENT->rq_dev)].nr_sects);
end_request(FAIL);
- if (CURRENT)
+ if (!QUEUE_EMPTY)
do_ps2esdi_request(q);
}
return do_ps2esdi_request(NULL);
else {
end_request(FAIL);
- if (CURRENT)
+ if (!QUEUE_EMPTY)
do_ps2esdi_request(NULL);
}
}
do_ps2esdi_request(NULL);
else {
end_request(FAIL);
- if (CURRENT)
+ if (!QUEUE_EMPTY)
do_ps2esdi_request(NULL);
}
break;
do_ps2esdi_request(NULL);
else {
end_request(FAIL);
- if (CURRENT)
+ if (!QUEUE_EMPTY)
do_ps2esdi_request(NULL);
}
break;
outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
end_request(FAIL);
- if (CURRENT)
+ if (!QUEUE_EMPTY)
do_ps2esdi_request(NULL);
break;
do_ps2esdi_request(NULL);
} else {
end_request(SUCCES);
- if (CURRENT)
+ if (!QUEUE_EMPTY)
do_ps2esdi_request(NULL);
}
}
wake_up(&fs->wait);
return;
}
- while (CURRENT && fs->state == idle) {
+ while (!QUEUE_EMPTY && fs->state == idle) {
if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
panic(DEVICE_NAME ": request list destroyed");
if (CURRENT->bh && !buffer_locked(CURRENT->bh))
wake_up(&fs->wait);
return;
}
- while (CURRENT && fs->state == idle) {
+ while (!QUEUE_EMPTY && fs->state == idle) {
if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
panic(DEVICE_NAME ": request list destroyed");
if (CURRENT->bh && !buffer_locked(CURRENT->bh))
sti();
if (xdc_busy)
return;
- while (code = 0, CURRENT) {
+ while (code = 0, !QUEUE_EMPTY) {
INIT_REQUEST; /* do some checking on the request structure */
if (CURRENT_DEV < xd_drives
#endif
#define CURRENT_VALID \
- (CURRENT && MAJOR(CURRENT -> rq_dev) == MAJOR_NR && CURRENT -> cmd == READ \
+ (!QUEUE_EMPTY && MAJOR(CURRENT -> rq_dev) == MAJOR_NR && CURRENT -> cmd == READ \
&& CURRENT -> sector != -1)
#define AFL_STATUSorDATA (AFL_STATUS | AFL_DATA)
if (signal_pending(current))
{
restore_flags(flags);
- if (CURRENT && CURRENT->rq_status != RQ_INACTIVE)
+ if (!QUEUE_EMPTY && CURRENT->rq_status != RQ_INACTIVE)
{
end_request(0);
}
* The beginning here is stolen from the hard disk driver. I hope
* it's right.
*/
- if (!(CURRENT) || CURRENT->rq_status == RQ_INACTIVE)
+ if (QUEUE_EMPTY || CURRENT->rq_status == RQ_INACTIVE)
{
goto end_do_cdu31a_request;
}
while(1) { /* repeat until all requests have been satisfied */
INIT_REQUEST;
- if (CURRENT == NULL || CURRENT->rq_status == RQ_INACTIVE)
+ if (QUEUE_EMPTY || CURRENT->rq_status == RQ_INACTIVE)
return;
if (CURRENT->cmd != READ) {
debug(("Non-read command %d on cdrom\n", CURRENT->cmd));
unsigned int nsect;
repeat:
- if (!(CURRENT) || CURRENT->rq_status == RQ_INACTIVE) return;
+ if (QUEUE_EMPTY || CURRENT->rq_status == RQ_INACTIVE) return;
INIT_REQUEST;
dev = MINOR(CURRENT->rq_dev);
block = CURRENT->sector;
nsect = CURRENT->nr_sectors;
- if (CURRENT == NULL || CURRENT -> sector == -1)
+ if (QUEUE_EMPTY || CURRENT -> sector == -1)
return;
if (CURRENT -> cmd != READ)
/* #define DOUBLE_QUICK_ONLY */
#define CURRENT_VALID \
-(CURRENT && MAJOR(CURRENT -> rq_dev) == MAJOR_NR && CURRENT -> cmd == READ \
+(!QUEUE_EMPTY && MAJOR(CURRENT -> rq_dev) == MAJOR_NR && CURRENT -> cmd == READ \
&& CURRENT -> sector != -1)
#define MFL_STATUSorDATA (MFL_STATUS | MFL_DATA)
again:
- if (CURRENT == NULL) {
+ if (QUEUE_EMPTY) {
xtrace(REQUEST, "end_request(0): CURRENT == NULL\n");
return;
}
#define CURRENT_VALID \
- (CURRENT && MAJOR(CURRENT -> rq_dev) == MAJOR_NR \
+ (!QUEUE_EMPTY && MAJOR(CURRENT -> rq_dev) == MAJOR_NR \
&& CURRENT -> cmd == READ && CURRENT -> sector != -1)
*/
#undef DEBUG_GTL
static inline void sbpcd_end_request(struct request *req, int uptodate) {
- req->next=CURRENT;
- CURRENT=req;
- up(&ioctl_read_sem);
+ list_add(&req->queue, &req->q->queue_head);
end_request(uptodate);
}
/*==========================================================================*/
#ifdef DEBUG_GTL
xnr=++xx_nr;
- if(!CURRENT)
+ if(QUEUE_EMPTY)
{
printk( "do_sbpcd_request[%di](NULL), Pid:%d, Time:%li\n",
xnr, current->pid, jiffies);
#endif
INIT_REQUEST;
req=CURRENT; /* take out our request so no other */
- CURRENT=req->next; /* task can fuck it up GTL */
- spin_unlock_irq(&io_request_lock); /* FIXME!!!! */
+ blkdev_dequeue_request(req); /* task can fuck it up GTL */
- down(&ioctl_read_sem);
if (req->rq_status == RQ_INACTIVE)
sbpcd_end_request(req, 0);
if (req -> sector == -1)
sbpcd_end_request(req, 0);
+ spin_unlock_irq(&io_request_lock);
+ down(&ioctl_read_sem);
if (req->cmd != READ)
{
msg(DBG_INF, "bad cmd %d\n", req->cmd);
printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 2, Time:%li\n",
xnr, req, req->sector, req->nr_sectors, jiffies);
#endif
+ up(&ioctl_read_sem);
+ spin_lock_irq(&io_request_lock);
sbpcd_end_request(req, 1);
- spin_lock_irq(&io_request_lock); /* FIXME!!!! */
goto request_loop;
}
printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 3, Time:%li\n",
xnr, req, req->sector, req->nr_sectors, jiffies);
#endif
+ up(&ioctl_read_sem);
+ spin_lock_irq(&io_request_lock);
sbpcd_end_request(req, 1);
- spin_lock_irq(&io_request_lock); /* FIXME!!!! */
goto request_loop;
}
}
printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 4 (error), Time:%li\n",
xnr, req, req->sector, req->nr_sectors, jiffies);
#endif
- sbpcd_end_request(req, 0);
+ up(&ioctl_read_sem);
sbp_sleep(0); /* wait a bit, try again */
- spin_lock_irq(&io_request_lock); /* FIXME!!!! */
+ spin_lock_irq(&io_request_lock);
+ sbpcd_end_request(req, 0);
goto request_loop;
}
/*==========================================================================*/
#endif MODULE
}
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0);
read_ahead[MAJOR_NR] = buffers * (CD_FRAMESIZE / 512);
request_region(CDo_command,4,major_name);
*/
#define CURRENT_IS_VALID \
- ( CURRENT != NULL && MAJOR( CURRENT->rq_dev ) == MAJOR_NR && \
+ ( !QUEUE_EMPTY && MAJOR( CURRENT->rq_dev ) == MAJOR_NR && \
CURRENT->cmd == READ && CURRENT->sector != -1 )
static void sjcd_transfer( void ){
* The beginning here is stolen from the hard disk driver. I hope
* it's right.
*/
- if (!(CURRENT) || CURRENT->rq_status == RQ_INACTIVE) {
+ if (QUEUE_EMPTY || CURRENT->rq_status == RQ_INACTIVE) {
return;
}
INIT_REQUEST;
#undef LP_DEBUG
-/* If you want to see if you can get lp_poll working, define this. */
-#undef SUPPORT_POLL
-
-/* --- parport support ----------------------------------------- */
-
-static int lp_preempt(void *handle)
-{
- struct lp_struct *lps = (struct lp_struct *)handle;
-
- if (!(lps->flags & LP_PORT_BUSY)) {
- /* Let the port go. */
- clear_bit (LP_HAVE_PORT_BIT, &lps->flags);
- return 0;
- }
-
- /* Don't actually release the port now */
- return 1;
-}
-
-static void lp_check_data (struct lp_struct *lp)
-{
-#if !defined(CONFIG_PARPORT_1284) || !defined (SUPPORT_POLL)
- return;
-#else
- struct pardevice *dev = lp->dev;
- if (!(lp->flags & LP_NO_REVERSE)) {
- int err = parport_negotiate (dev->port, IEEE1284_MODE_NIBBLE);
- if (err)
- lp->flags |= LP_NO_REVERSE;
- else {
- unsigned char s = parport_read_status (dev->port);
- if (s & PARPORT_STATUS_ERROR)
- lp->flags &= ~LP_DATA_AVAIL;
- else {
- lp->flags |= LP_DATA_AVAIL;
- if (waitqueue_active (&lp->dataq))
- wake_up_interruptible (&lp->dataq);
- }
- }
- }
-#endif /* IEEE 1284 support */
-}
-
-static void lp_parport_release (int minor)
-{
- lp_check_data (&lp_table[minor]);
- if (test_and_clear_bit (LP_HAVE_PORT_BIT, &lp_table[minor].flags))
- parport_release (lp_table[minor].dev);
-
- lp_table[minor].flags &= ~LP_PORT_BUSY;
-}
-
-static void lp_parport_claim (int minor)
-{
- if (!test_and_set_bit (LP_HAVE_PORT_BIT, &lp_table[minor].flags))
- parport_claim_or_block (lp_table[minor].dev);
-
- lp_table[minor].flags |= LP_PORT_BUSY;
-}
-
/* --- low-level port access ----------------------------------- */
#define r_dtr(x) (parport_read_data(lp_table[(x)].dev->port))
static int lp_reset(int minor)
{
int retval;
- lp_parport_claim (minor);
+ parport_claim_or_block (lp_table[minor].dev);
w_ctr(minor, LP_PSELECP);
udelay (LP_DELAY);
w_ctr(minor, LP_PSELECP | LP_PINITP);
retval = r_str(minor);
- lp_parport_release (minor);
+ parport_release (lp_table[minor].dev);
return retval;
}
-static void lp_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct lp_struct *lp_dev = (struct lp_struct *) dev_id;
- if (!(lp_dev->flags & LP_PORT_BUSY))
- /* We must have the port since we got an interrupt. */
- lp_check_data (lp_dev);
- if (waitqueue_active (&lp_dev->waitq))
- wake_up_interruptible (&lp_dev->waitq);
-}
-
-static void lp_wakeup (void *handle)
-{
- struct lp_struct *lp_dev = handle;
-
- if (lp_dev->flags & LP_PORT_BUSY)
- return;
-
- /* Grab the port if it can help (i.e. reverse mode is possible). */
- if (!(lp_dev->flags & LP_NO_REVERSE)) {
- parport_claim (lp_dev->dev);
- set_bit (LP_HAVE_PORT_BIT, &lp_dev->flags);
- lp_check_data (lp_dev);
- if (waitqueue_active (&lp_dev->waitq))
- wake_up_interruptible (&lp_dev->waitq);
- }
-}
-
static void lp_error (int minor)
{
int polling;
return;
polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE;
- if (polling) lp_parport_release (minor);
+ if (polling) parport_release (lp_table[minor].dev);
interruptible_sleep_on_timeout (&lp_table[minor].waitq,
LP_TIMEOUT_POLLED);
- if (polling) lp_parport_claim (minor);
+ if (polling) parport_claim_or_block (lp_table[minor].dev);
else parport_yield_blocking (lp_table[minor].dev);
}
/* Claim Parport or sleep until it becomes available
*/
- lp_parport_claim (minor);
+ parport_claim_or_block (lp_table[minor].dev);
/* Go to compatibility mode. */
parport_negotiate (port, IEEE1284_MODE_COMPAT);
/* Not really necessary, but polite. */
parport_set_timeout (lp_table[minor].dev, old_to);
- lp_parport_release (minor);
+ parport_release (lp_table[minor].dev);
up (&lp_table[minor].port_mutex);
if (down_interruptible (&lp_table[minor].port_mutex))
return -EINTR;
- lp_parport_claim (minor);
+ parport_claim_or_block (lp_table[minor].dev);
for (;;) {
retval = parport_read (port, kbuf, count);
}
}
- lp_parport_release (minor);
+ parport_release (lp_table[minor].dev);
if (retval > 0 && copy_to_user (buf, kbuf, retval))
retval = -EFAULT;
should most likely only ever be used by the tunelp application. */
if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) {
int status;
- lp_parport_claim (minor);
+ parport_claim_or_block (lp_table[minor].dev);
status = r_str(minor);
- lp_parport_release (minor);
+ parport_release (lp_table[minor].dev);
if (status & LP_POUTPA) {
printk(KERN_INFO "lp%d out of paper\n", minor);
MOD_DEC_USE_COUNT;
return -EFAULT;
break;
case LPGETSTATUS:
- lp_parport_claim(minor);
+ parport_claim_or_block (lp_table[minor].dev);
status = r_str(minor);
- lp_parport_release(minor);
+ parport_release (lp_table[minor].dev);
if (copy_to_user((int *) arg, &status, sizeof(int)))
return -EFAULT;
return retval;
}
-#ifdef CONFIG_PARPORT_1284
-static unsigned int lp_poll (struct file *filp, struct poll_table_struct *wait)
-{
- unsigned int minor = MINOR (filp->f_dentry->d_inode->i_rdev);
- unsigned int mask = POLLOUT | POLLWRNORM; /* always writable */
-
- poll_wait (filp, &lp_table[minor].dataq, wait);
-
- if (lp_table[minor].flags & LP_DATA_AVAIL)
- mask |= POLLIN | POLLRDNORM;
-
- return mask;
-}
-#endif /* IEEE 1284 support */
-
static struct file_operations lp_fops = {
write: lp_write,
ioctl: lp_ioctl,
release: lp_release,
#ifdef CONFIG_PARPORT_1284
read: lp_read,
- poll: lp_poll,
#endif
};
ssize_t written;
signed long old_to;
- if (!(lp_table[CONSOLE_LP].flags & (1<<LP_HAVE_PORT_BIT))) {
- if (parport_claim (dev))
- /* Nothing we can do. */
- return;
- set_bit (LP_HAVE_PORT_BIT, &lp_table[CONSOLE_LP].flags);
- }
+ if (parport_claim (dev))
+ /* Nothing we can do. */
+ return;
old_to = parport_set_timeout (dev, 0);
} while (count > 0 && (CONSOLE_LP_STRICT || written > 0));
parport_set_timeout (dev, old_to);
+ parport_release (dev);
}
static kdev_t lp_console_device (struct console *c)
static int lp_register(int nr, struct parport *port)
{
lp_table[nr].dev = parport_register_device(port, "lp",
- lp_preempt, lp_wakeup,
- lp_interrupt,
- 0,
+ NULL, NULL, NULL, 0,
(void *) &lp_table[nr]);
if (lp_table[nr].dev == NULL)
return 1;
for (offset = 0; offset < LP_NO; offset++) {
if (lp_table[offset].dev == NULL)
continue;
- if (lp_table[offset].flags & (1<<LP_HAVE_PORT_BIT))
- parport_release (lp_table[offset].dev);
parport_unregister_device(lp_table[offset].dev);
}
}
struct i2ob_device *dev;
u32 m;
- while (CURRENT) {
+ while (!QUEUE_EMPTY) {
/*
* On an IRQ completion if there is an inactive
* request on the queue head it means it isnt yet
}
}
req->errors = 0;
- CURRENT = CURRENT->next;
- req->next = NULL;
+ blkdev_dequeue_request(req);
req->sem = NULL;
ireq = i2ob_qhead;
dep_tristate ' SCSI tape support' CONFIG_CHR_DEV_ST $CONFIG_SCSI
-if [ "$CONFIG_BLK_DEV_ST" != "n" ]; then
+if [ "$CONFIG_CHR_DEV_ST" != "n" ]; then
int 'Maximum number of SCSI tapes that can be loaded as modules' CONFIG_ST_EXTRA_DEVS 2
fi
/* Now dump the request lists for each block device */
printk("Dump of pending block device requests\n");
for (i = 0; i < MAX_BLKDEV; i++) {
- if (blk_dev[i].request_queue.current_request) {
+ struct list_head * queue_head;
+
+ queue_head = &blk_dev[i].request_queue.queue_head;
+ if (!list_empty(queue_head)) {
struct request *req;
+ struct list_head * entry;
+
printk("%d: ", i);
- req = blk_dev[i].request_queue.current_request;
- while (req) {
+ entry = queue_head->next;
+ do {
+ req = blkdev_entry_to_request(entry);
printk("(%s %d %ld %ld %ld) ",
kdevname(req->rq_dev),
req->cmd,
req->sector,
req->nr_sectors,
req->current_nr_sectors);
- req = req->next;
- }
+ } while ((entry = entry->next) != queue_head);
printk("\n");
}
}
int init_module(void)
{
- unsigned long size;
- int has_space = 0;
struct proc_dir_entry *generic;
if( scsi_init_minimal_dma_pool() != 0 )
q = &SCpnt->device->request_queue;
SCpnt->request.cmd = SPECIAL;
SCpnt->request.special = (void *) SCpnt;
+ SCpnt->request.q = NULL;
/*
* We have the option of inserting the head or the tail of the queue.
spin_lock_irqsave(&io_request_lock, flags);
if (at_head) {
- SCpnt->request.next = q->current_request;
- q->current_request = &SCpnt->request;
+ list_add(&SCpnt->request.queue, &q->queue_head);
} else {
/*
* FIXME(eric) - we always insert at the tail of the
* request might not float high enough in the queue
* to be scheduled.
*/
- SCpnt->request.next = NULL;
- if (q->current_request == NULL) {
- q->current_request = &SCpnt->request;
- } else {
- struct request *req;
-
- for (req = q->current_request; req; req = req->next) {
- if (req->next == NULL) {
- req->next = &SCpnt->request;
- break;
- }
- }
- }
+ list_add_tail(&SCpnt->request.queue, &q->queue_head);
}
/*
* in which case we need to request the blocks that come after
* the bad sector.
*/
- SCpnt->request.next = q->current_request;
- q->current_request = &SCpnt->request;
SCpnt->request.special = (void *) SCpnt;
+ list_add(&SCpnt->request.queue, &q->queue_head);
}
/*
* use function pointers to pick the right one.
*/
if (SDpnt->single_lun
- && q->current_request == NULL
+ && list_empty(&q->queue_head)
&& SDpnt->device_busy == 0) {
request_queue_t *q;
}
}
- /*
- * Loop through all of the requests in this queue, and find
- * one that is queueable.
- */
- req = q->current_request;
-
/*
* If we couldn't find a request that could be queued, then we
* can also quit.
*/
- if (!req) {
+ if (list_empty(&q->queue_head))
break;
- }
+
+ /*
+ * Loop through all of the requests in this queue, and find
+ * one that is queueable.
+ */
+ req = blkdev_entry_next_request(&q->queue_head);
+
/*
* Find the actual device driver associated with this command.
* The SPECIAL requests are things like character device or
* reason to search the list, because all of the commands
* in this queue are for the same device.
*/
- q->current_request = req->next;
- SCpnt->request.next = NULL;
+ blkdev_dequeue_request(req);
if (req != &SCpnt->request) {
memcpy(&SCpnt->request, req, sizeof(struct request));
* We have copied the data out of the request block - it is now in
* a field in SCpnt. Release the request block.
*/
- req->next = NULL;
req->rq_status = RQ_INACTIVE;
wake_up(&wait_for_request);
}
(((((long)(X)->b_data+(X)->b_size)|((long)(Y)->b_data)) & \
(DMA_CHUNK_SIZE - 1)) == 0)
+#ifdef DMA_CHUNK_SIZE
+static inline int scsi_new_mergeable(request_queue_t * q,
+ struct request * req,
+ struct Scsi_Host *SHpnt,
+ int max_segments)
+{
+ /*
+ * pci_map_sg will be able to merge these two
+ * into a single hardware sg entry, check if
+ * we'll have enough memory for the sg list.
+ * scsi.c allocates for this purpose
+ * min(64,sg_tablesize) entries.
+ */
+ if (req->nr_segments >= max_segments &&
+ req->nr_segments >= SHpnt->sg_tablesize)
+ return 0;
+ req->nr_segments++;
+ q->nr_segments++;
+ return 1;
+}
+
+static inline int scsi_new_segment(request_queue_t * q,
+ struct request * req,
+ struct Scsi_Host *SHpnt,
+ int max_segments)
+{
+ /*
+ * pci_map_sg won't be able to map these two
+ * into a single hardware sg entry, so we have to
+ * check if things fit into sg_tablesize.
+ */
+ if (req->nr_hw_segments >= SHpnt->sg_tablesize ||
+ (req->nr_segments >= max_segments &&
+ req->nr_segments >= SHpnt->sg_tablesize))
+ return 0;
+ if (req->nr_segments >= max_segments)
+ return 0;
+ req->nr_hw_segments++;
+ req->nr_segments++;
+ q->nr_segments++;
+ return 1;
+}
+#else
+static inline int scsi_new_segment(request_queue_t * q,
+ struct request * req,
+ struct Scsi_Host *SHpnt,
+ int max_segments)
+{
+ if (req->nr_segments < SHpnt->sg_tablesize &&
+ req->nr_segments < max_segments) {
+ /*
+ * This will form the start of a new segment. Bump the
+ * counter.
+ */
+ req->nr_segments++;
+ q->nr_segments++;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+#endif
+
/*
* Function: __scsi_merge_fn()
*
* than to have 4 separate functions all doing roughly the
* same thing.
*/
-__inline static int __scsi_merge_fn(request_queue_t * q,
- struct request *req,
- struct buffer_head *bh,
- int use_clustering,
- int dma_host)
+__inline static int __scsi_back_merge_fn(request_queue_t * q,
+ struct request *req,
+ struct buffer_head *bh,
+ int max_segments,
+ int use_clustering,
+ int dma_host)
{
- unsigned int sector, count;
+ unsigned int count;
unsigned int segment_size = 0;
Scsi_Device *SDpnt;
struct Scsi_Host *SHpnt;
SDpnt = (Scsi_Device *) q->queuedata;
SHpnt = SDpnt->host;
- count = bh->b_size >> 9;
- sector = bh->b_rsector;
+ if (max_segments > 64)
+ max_segments = 64;
- /*
- * We come in here in one of two cases. The first is that we
- * are checking to see if we can add the buffer to the end of the
- * request, the other is to see if we should add the request to the
- * start.
- */
- if (req->sector + req->nr_sectors == sector) {
- if (use_clustering) {
- /*
- * See if we can do this without creating another
- * scatter-gather segment. In the event that this is a
- * DMA capable host, make sure that a segment doesn't span
- * the DMA threshold boundary.
- */
- if (dma_host &&
- virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
- goto new_end_segment;
- }
- if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) {
+ if (use_clustering) {
+ /*
+ * See if we can do this without creating another
+ * scatter-gather segment. In the event that this is a
+ * DMA capable host, make sure that a segment doesn't span
+ * the DMA threshold boundary.
+ */
+ if (dma_host &&
+ virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
+ goto new_end_segment;
+ }
+ if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) {
#ifdef DMA_SEGMENT_SIZE_LIMITED
- if( dma_host
- && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
- segment_size = 0;
- count = __count_segments(req, use_clustering, dma_host, &segment_size);
- if( segment_size + bh->b_size > PAGE_SIZE ) {
- goto new_end_segment;
- }
+ if( dma_host
+ && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
+ segment_size = 0;
+ count = __count_segments(req, use_clustering, dma_host, &segment_size);
+ if( segment_size + bh->b_size > PAGE_SIZE ) {
+ goto new_end_segment;
}
-#endif
- /*
- * This one is OK. Let it go.
- */
- return 1;
}
+#endif
+ /*
+ * This one is OK. Let it go.
+ */
+ return 1;
}
- new_end_segment:
+ }
+ new_end_segment:
#ifdef DMA_CHUNK_SIZE
- if (MERGEABLE_BUFFERS(req->bhtail, bh))
- goto new_mergeable;
+ if (MERGEABLE_BUFFERS(req->bhtail, bh))
+ return scsi_new_mergeable(q, req, SHpnt, max_segments);
#endif
- goto new_segment;
- } else {
- if (req->sector - count != sector) {
- /* Attempt to merge sector that doesn't belong */
- BUG();
+ return scsi_new_segment(q, req, SHpnt, max_segments);
+}
+
+__inline static int __scsi_front_merge_fn(request_queue_t * q,
+ struct request *req,
+ struct buffer_head *bh,
+ int max_segments,
+ int use_clustering,
+ int dma_host)
+{
+ unsigned int count;
+ unsigned int segment_size = 0;
+ Scsi_Device *SDpnt;
+ struct Scsi_Host *SHpnt;
+
+ SDpnt = (Scsi_Device *) q->queuedata;
+ SHpnt = SDpnt->host;
+
+ if (max_segments > 64)
+ max_segments = 64;
+
+ if (use_clustering) {
+ /*
+ * See if we can do this without creating another
+ * scatter-gather segment. In the event that this is a
+ * DMA capable host, make sure that a segment doesn't span
+ * the DMA threshold boundary.
+ */
+ if (dma_host &&
+ virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) {
+ goto new_start_segment;
}
- if (use_clustering) {
- /*
- * See if we can do this without creating another
- * scatter-gather segment. In the event that this is a
- * DMA capable host, make sure that a segment doesn't span
- * the DMA threshold boundary.
- */
- if (dma_host &&
- virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) {
- goto new_start_segment;
- }
- if (CONTIGUOUS_BUFFERS(bh, req->bh)) {
+ if (CONTIGUOUS_BUFFERS(bh, req->bh)) {
#ifdef DMA_SEGMENT_SIZE_LIMITED
- if( dma_host
- && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
- segment_size = bh->b_size;
- count = __count_segments(req, use_clustering, dma_host, &segment_size);
- if( count != req->nr_segments ) {
- goto new_start_segment;
- }
+ if( dma_host
+ && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
+ segment_size = bh->b_size;
+ count = __count_segments(req, use_clustering, dma_host, &segment_size);
+ if( count != req->nr_segments ) {
+ goto new_start_segment;
}
-#endif
- /*
- * This one is OK. Let it go.
- */
- return 1;
}
- }
- new_start_segment:
-#ifdef DMA_CHUNK_SIZE
- if (MERGEABLE_BUFFERS(bh, req->bh))
- goto new_mergeable;
#endif
- goto new_segment;
+ /*
+ * This one is OK. Let it go.
+ */
+ return 1;
+ }
}
+ new_start_segment:
#ifdef DMA_CHUNK_SIZE
- new_mergeable:
- /*
- * pci_map_sg will be able to merge these two
- * into a single hardware sg entry, check if
- * we'll have enough memory for the sg list.
- * scsi.c allocates for this purpose
- * min(64,sg_tablesize) entries.
- */
- if (req->nr_segments >= 64 &&
- req->nr_segments >= SHpnt->sg_tablesize)
- return 0;
- req->nr_segments++;
- return 1;
- new_segment:
- /*
- * pci_map_sg won't be able to map these two
- * into a single hardware sg entry, so we have to
- * check if things fit into sg_tablesize.
- */
- if (req->nr_hw_segments >= SHpnt->sg_tablesize ||
- (req->nr_segments >= 64 &&
- req->nr_segments >= SHpnt->sg_tablesize))
- return 0;
- req->nr_hw_segments++;
- req->nr_segments++;
- return 1;
-#else
- new_segment:
- if (req->nr_segments < SHpnt->sg_tablesize) {
- /*
- * This will form the start of a new segment. Bump the
- * counter.
- */
- req->nr_segments++;
- return 1;
- } else {
- return 0;
- }
+ if (MERGEABLE_BUFFERS(bh, req->bh))
+ return scsi_new_mergeable(q, req, SHpnt, max_segments);
#endif
+ return scsi_new_segment(q, req, SHpnt, max_segments);
}
/*
* Notes: Optimized for different cases depending upon whether
* ISA DMA is in use and whether clustering should be used.
*/
-#define MERGEFCT(_FUNCTION, _CLUSTER, _DMA) \
-static int _FUNCTION(request_queue_t * q, \
- struct request * req, \
- struct buffer_head * bh) \
-{ \
- int ret; \
- SANITY_CHECK(req, _CLUSTER, _DMA); \
- ret = __scsi_merge_fn(q, req, bh, _CLUSTER, _DMA); \
- return ret; \
+#define MERGEFCT(_FUNCTION, _BACK_FRONT, _CLUSTER, _DMA) \
+static int _FUNCTION(request_queue_t * q, \
+ struct request * req, \
+ struct buffer_head * bh, \
+ int max_segments) \
+{ \
+ int ret; \
+ SANITY_CHECK(req, _CLUSTER, _DMA); \
+ ret = __scsi_ ## _BACK_FRONT ## _merge_fn(q, \
+ req, \
+ bh, \
+ max_segments, \
+ _CLUSTER, \
+ _DMA); \
+ return ret; \
}
/* Version with use_clustering 0 and dma_host 1 is not necessary,
* since the only use of dma_host above is protected by use_clustering.
*/
-MERGEFCT(scsi_merge_fn_, 0, 0)
-MERGEFCT(scsi_merge_fn_c, 1, 0)
-MERGEFCT(scsi_merge_fn_dc, 1, 1)
+MERGEFCT(scsi_back_merge_fn_, back, 0, 0)
+MERGEFCT(scsi_back_merge_fn_c, back, 1, 0)
+MERGEFCT(scsi_back_merge_fn_dc, back, 1, 1)
+
+MERGEFCT(scsi_front_merge_fn_, front, 0, 0)
+MERGEFCT(scsi_front_merge_fn_c, front, 1, 0)
+MERGEFCT(scsi_front_merge_fn_dc, front, 1, 1)
+
/*
* Function: __scsi_merge_requests_fn()
*
__inline static int __scsi_merge_requests_fn(request_queue_t * q,
struct request *req,
struct request *next,
+ int max_segments,
int use_clustering,
int dma_host)
{
SDpnt = (Scsi_Device *) q->queuedata;
SHpnt = SDpnt->host;
+ if (max_segments > 64)
+ max_segments = 64;
+
#ifdef DMA_CHUNK_SIZE
/* If it would not fit into prepared memory space for sg chain,
* then don't allow the merge.
*/
- if (req->nr_segments + next->nr_segments - 1 > 64 &&
+ if (req->nr_segments + next->nr_segments - 1 > max_segments &&
req->nr_segments + next->nr_segments - 1 > SHpnt->sg_tablesize) {
return 0;
}
* This one is OK. Let it go.
*/
req->nr_segments += next->nr_segments - 1;
+ q->nr_segments--;
#ifdef DMA_CHUNK_SIZE
req->nr_hw_segments += next->nr_hw_segments - 1;
#endif
}
dont_combine:
#ifdef DMA_CHUNK_SIZE
- if (req->nr_segments + next->nr_segments > 64 &&
+ if (req->nr_segments + next->nr_segments > max_segments &&
req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) {
return 0;
}
* Make sure we can fix something that is the sum of the two.
* A slightly stricter test than we had above.
*/
- if (req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) {
+ if (req->nr_segments + next->nr_segments > max_segments &&
+ req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) {
return 0;
} else {
/*
#define MERGEREQFCT(_FUNCTION, _CLUSTER, _DMA) \
static int _FUNCTION(request_queue_t * q, \
struct request * req, \
- struct request * next) \
+ struct request * next, \
+ int max_segments) \
{ \
int ret; \
SANITY_CHECK(req, _CLUSTER, _DMA); \
- ret = __scsi_merge_requests_fn(q, req, next, _CLUSTER, _DMA); \
+ ret = __scsi_merge_requests_fn(q, req, next, max_segments, _CLUSTER, _DMA); \
return ret; \
}
* pick a new one.
*/
#if 0
- if (q->merge_fn != NULL)
+ if (q->back_merge_fn && q->front_merge_fn)
return;
#endif
/*
* rather than rely upon the default behavior of ll_rw_blk.
*/
if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
- q->merge_fn = scsi_merge_fn_;
+ q->back_merge_fn = scsi_back_merge_fn_;
+ q->front_merge_fn = scsi_front_merge_fn_;
q->merge_requests_fn = scsi_merge_requests_fn_;
SDpnt->scsi_init_io_fn = scsi_init_io_v;
} else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
- q->merge_fn = scsi_merge_fn_;
+ q->back_merge_fn = scsi_back_merge_fn_;
+ q->front_merge_fn = scsi_front_merge_fn_;
q->merge_requests_fn = scsi_merge_requests_fn_;
SDpnt->scsi_init_io_fn = scsi_init_io_vd;
} else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
- q->merge_fn = scsi_merge_fn_c;
+ q->back_merge_fn = scsi_back_merge_fn_c;
+ q->front_merge_fn = scsi_front_merge_fn_c;
q->merge_requests_fn = scsi_merge_requests_fn_c;
SDpnt->scsi_init_io_fn = scsi_init_io_vc;
} else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
- q->merge_fn = scsi_merge_fn_dc;
+ q->back_merge_fn = scsi_back_merge_fn_dc;
+ q->front_merge_fn = scsi_front_merge_fn_dc;
q->merge_requests_fn = scsi_merge_requests_fn_dc;
SDpnt->scsi_init_io_fn = scsi_init_io_vdc;
}
* - Added Microtek X6 ID's. Thanks to Oliver Neukum
* <Oliver.Neukum@lrz.uni-muenchen.de>.
*
+ *
+ * 0.4.1 2/15/2000
+ *
+ * - Fixed 'count' bug in read_scanner(). Thanks to Henrik
+ * Johansson <henrikjo@post.utfors.se> for identifying it. Amazing
+ * it has worked this long.
+ * - Fixed '>=' bug in both read/write_scanner methods.
+ * - Cleaned up both read/write_scanner() methods so that they are
+ * a little more readable.
+ * - Added a lot of Microtek ID's. Thanks to Adrian Perez Jorge.
+ * - Adopted the __initcall().
+ * - Added #include <linux/init.h> to scanner.h for __initcall().
+ * - Added one liner in irq_scanner() to keep gcc from complaining
+ * about an unused variable (data) if debugging was disabled
+ * in scanner.c.
+ * - Increased the timeout parameter in read_scanner() to 120 Secs.
+ *
*
* TODO
*
* - Select/poll methods
+ * - More testing
+ * - Proper registry/assignment for LM9830 ioctl's
*
*
* Thanks to:
* - To Linus Torvalds for this great OS.
* - The GNU folks.
* - The folks that forwarded Vendor:Product ID's to me.
+ * - Johannes Erdfelt for the loaning of a USB analyzer for tracking an
+ * issue with HP-4100 and uhci.
+ * - Adolfo Montero for his assistance.
* - And anybody else who chimed in with reports and suggestions.
*
* Performance:
struct scn_usb_data *scn = urb->context;
unsigned char *data = &scn->button;
+ data += 0; /* Keep gcc from complaining about unused var */
if (urb->status) {
return;
struct scn_usb_data *scn;
struct usb_device *dev;
- ssize_t bytes_written = 0;
+ ssize_t bytes_written = 0; /* Overall count of bytes written */
ssize_t ret = 0;
- int copy_size;
- int partial;
+ int this_write; /* Number of bytes to write */
+ int partial; /* Number of bytes successfully written */
int result = 0;
char *obuf;
break;
}
- copy_size = (count > OBUF_SIZE) ? OBUF_SIZE : count;
+ this_write = (count >= OBUF_SIZE) ? OBUF_SIZE : count;
- if (copy_from_user(scn->obuf, buffer, copy_size)) {
+ if (copy_from_user(scn->obuf, buffer, this_write)) {
ret = -EFAULT;
break;
}
- result = usb_bulk_msg(dev,usb_sndbulkpipe(dev, scn->bulk_out_ep), obuf, copy_size, &partial, 60*HZ);
- dbg("write stats(%d): result:%d copy_size:%d partial:%d", scn->scn_minor, result, copy_size, partial);
+ result = usb_bulk_msg(dev,usb_sndbulkpipe(dev, scn->bulk_out_ep), obuf, this_write, &partial, 60*HZ);
+ dbg("write stats(%d): result:%d this_write:%d partial:%d", scn->scn_minor, result, this_write, partial);
if (result == USB_ST_TIMEOUT) { /* NAK -- shouldn't happen */
warn("write_scanner: NAK recieved.");
printk("\n");
}
#endif
- if (partial != copy_size) { /* Unable to write complete amount */
+ if (partial != this_write) { /* Unable to write all contents of obuf */
ret = -EIO;
break;
}
struct scn_usb_data *scn;
struct usb_device *dev;
- ssize_t read_count, ret = 0;
+ ssize_t bytes_read = 0; /* Overall count of bytes_read */
+ ssize_t ret = 0;
- int partial;
- int this_read;
+ int partial; /* Number of bytes successfully read */
+ int this_read; /* Max number of bytes to read */
int result;
char *ibuf;
dev = scn->scn_dev;
- read_count = 0;
+ bytes_read = 0;
while (count) {
if (signal_pending(current)) {
break;
}
- this_read = (count > IBUF_SIZE) ? IBUF_SIZE : count;
+ this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
- result = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, scn->bulk_in_ep), ibuf, this_read, &partial, 60*HZ);
+ result = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, scn->bulk_in_ep), ibuf, this_read, &partial, 120*HZ);
dbg("read stats(%d): result:%d this_read:%d partial:%d", scn->scn_minor, result, this_read, partial);
if (result == USB_ST_TIMEOUT) { /* NAK -- shouldn't happen */
#endif
if (partial) { /* Data returned */
- count = this_read = partial;
- } else {
- ret = 0;
- read_count = 0;
- break;
- }
-
- if (this_read) {
if (copy_to_user(buffer, ibuf, this_read)) {
ret = -EFAULT;
break;
}
- count -= this_read;
- read_count += this_read;
- buffer += this_read;
+ count -= partial;
+ bytes_read += partial;
+ buffer += partial;
+
+ } else {
+ ret = 0;
+ break;
}
+
}
- return ret ? ret : read_count;
+
+ return ret ? ret : bytes_read;
}
static void *
* that this will allow developers a means to produce applications
* that will support USB products.
*
- * Until we detect a device which is pleasing, we silently punt. */
+ * Until we detect a device which is pleasing, we silently punt.
+ */
do {
if (dev->descriptor.idVendor == 0x03f0) { /* Hewlett Packard */
if (dev->descriptor.idVendor == 0x06bd) { /* Agfa */
if (dev->descriptor.idProduct == 0x0001 || /* SnapScan 1212U */
+ dev->descriptor.idProduct == 0x2061 || /* Another SnapScan 1212U (?) */
dev->descriptor.idProduct == 0x0100) { /* SnapScan Touch */
valid_device = 1;
break;
}
if (dev->descriptor.idVendor == 0x05da) { /* Microtek */
- if (dev->descriptor.idProduct == 0x0099) { /* X6 */
+ if (dev->descriptor.idProduct == 0x0099 || /* ScanMaker X6 - X6U */
+ dev->descriptor.idProduct == 0x0094 || /* Phantom 336CX - C3 */
+ dev->descriptor.idProduct == 0x00a0 || /* Phantom 336CX - C3 #2 */
+ dev->descriptor.idProduct == 0x009a || /* Phantom C6 */
+ dev->descriptor.idProduct == 0x00a3 || /* ScanMaker V6USL */
+ dev->descriptor.idProduct == 0x80a3 || /* ScanMaker V6USL #2 */
+ dev->descriptor.idProduct == 0x80ac) { /* ScanMaker V6UL - SpicyU */
valid_device = 1;
break;
}
static struct
file_operations usb_scanner_fops = {
- read: read_scanner,
- write: write_scanner,
- ioctl: ioctl_scanner,
- open: open_scanner,
- release: close_scanner,
+ NULL, /* seek */
+ read_scanner,
+ write_scanner,
+ NULL, /* readdir */
+ NULL, /* poll */
+ ioctl_scanner,
+ NULL, /* mmap */
+ open_scanner,
+ NULL, /* flush */
+ close_scanner,
+ NULL,
+ NULL, /* fasync */
};
static struct
SCN_BASE_MNR
};
-int
-usb_scanner_init(void)
+#ifdef MODULE
+void cleanup_module(void)
+{
+ usb_deregister(&scanner_driver);
+}
+int init_module(void)
+#else
+int usb_scanner_init(void)
+#endif
{
if (usb_register(&scanner_driver) < 0)
return -1;
return 0;
}
-#ifdef MODULE
-
-int
-init_module(void)
-{
- return usb_scanner_init();
-}
-
-void
-cleanup_module(void)
-{
- usb_deregister(&scanner_driver);
-}
-#endif
-
+__initcall(usb_scanner_init);
#include <linux/kernel.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
+#include <linux/init.h>
#include <linux/malloc.h>
#include <linux/delay.h>
#include <linux/ioctl.h>
#include "usb.h"
+/* WARNING: These DATA_DUMP's can produce a lot of data. Caveat Emptor. */
// #define RD_DATA_DUMP /* Enable to dump data - limited to 24 bytes */
// #define WR_DATA_DUMP /* DEBUG does not have to be defined. */
}
/*-------------------------------------------------------------------*/
-// returns status (negative) are length (positive)
+// returns status (negative) or length (positive)
int usb_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe,
devrequest *cmd, void *data, int len, int timeout)
{
bcb.Flags, bcb.Length);
result = usb_bulk_msg(us->pusb_dev, pipe, &bcb,
US_BULK_CB_WRAP_LEN, &partial, HZ*5);
- US_DEBUGP("Bulk command transfer result 0x%x\n", result);
+ US_DEBUGP("Bulk command transfer result=%d\n", result);
/* if we stall, we need to clear it before we go on */
if (result == -EPIPE) {
* Initialization and registration
***********************************************************************/
-static int __init usb_stor_init(void)
+int __init usb_stor_init(void)
{
// MOD_INC_USE_COUNT;
return 0;
}
-static void __exit usb_stor_exit(void)
+void __exit usb_stor_exit(void)
{
usb_deregister(&storage_driver) ;
}
if (err)
goto out_nfserr;
- err = locks_verify_area(FLOCK_VERIFY_WRITE, inode, NULL,
- iap->ia_size<inode->i_size ? iap->ia_size : inode->i_size,
- abs(inode->i_size - iap->ia_size));
-
+ err = locks_verify_truncate(inode, NULL, iap->ia_size);
if (err)
goto out_nfserr;
DQUOT_INIT(inode);
err = get_write_access(inode);
if (err)
goto out_nfserr;
- err = locks_verify_area(FLOCK_VERIFY_WRITE, inode, NULL,
- size<inode->i_size ? size : inode->i_size,
- abs(inode->i_size - size));
+ err = locks_verify_truncate(inode, NULL, size);
if (err)
goto out_nfserr;
if (error)
goto dput_and_out;
- error = locks_verify_area(FLOCK_VERIFY_WRITE, inode, NULL,
- length < inode->i_size ? length : inode->i_size,
- abs(inode->i_size - length));
+ error = locks_verify_truncate(inode, NULL, length);
if (!error) {
DQUOT_INIT(inode);
error = do_truncate(dentry, length);
error = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
goto out_putf;
- error = locks_verify_area(FLOCK_VERIFY_WRITE, inode, file,
- length<inode->i_size ? length : inode->i_size,
- abs(inode->i_size - length));
+ error = locks_verify_truncate(inode, file, length);
lock_kernel();
if (!error)
error = do_truncate(dentry, length);
* code duplication in drivers.
*/
+extern inline void blkdev_dequeue_request(struct request * req)
+{
+ if (req->q)
+ {
+ if (req->cmd == READ)
+ req->q->elevator.read_pendings--;
+ req->q->nr_segments -= req->nr_segments;
+ req->q = NULL;
+ }
+ list_del(&req->queue);
+}
+
int end_that_request_first(struct request *req, int uptodate, char *name);
void end_that_request_last(struct request *req);
#if !defined(IDE_DRIVER)
#ifndef CURRENT
-#define CURRENT (blk_dev[MAJOR_NR].request_queue.current_request)
+#define CURRENT blkdev_entry_next_request(&blk_dev[MAJOR_NR].request_queue.queue_head)
+#endif
+#ifndef QUEUE_EMPTY
+#define QUEUE_EMPTY list_empty(&blk_dev[MAJOR_NR].request_queue.queue_head)
#endif
#ifndef DEVICE_NAME
#endif
#define INIT_REQUEST \
- if (!CURRENT) {\
+ if (QUEUE_EMPTY) {\
CLEAR_INTR; \
return; \
} \
add_blkdev_randomness(MAJOR(req->rq_dev));
#endif
DEVICE_OFF(req->rq_dev);
- CURRENT = req->next;
+ blkdev_dequeue_request(req);
end_that_request_last(req);
}
#include <linux/sched.h>
#include <linux/genhd.h>
#include <linux/tqueue.h>
+#include <linux/list.h>
+
+struct request_queue;
+typedef struct request_queue request_queue_t;
/*
* Ok, this is an expanded form so that we can use the same
* for read/write completion.
*/
struct request {
+ struct list_head queue;
+ int elevator_sequence;
+
volatile int rq_status; /* should split this into a few status bits */
#define RQ_INACTIVE (-1)
#define RQ_ACTIVE 1
struct semaphore * sem;
struct buffer_head * bh;
struct buffer_head * bhtail;
- struct request * next;
+ request_queue_t * q;
};
-typedef struct request_queue request_queue_t;
typedef int (merge_request_fn) (request_queue_t *q,
struct request *req,
- struct buffer_head *bh);
+ struct buffer_head *bh,
+ int);
typedef int (merge_requests_fn) (request_queue_t *q,
struct request *req,
- struct request *req2);
+ struct request *req2,
+ int);
typedef void (request_fn_proc) (request_queue_t *q);
typedef request_queue_t * (queue_proc) (kdev_t dev);
typedef void (make_request_fn) (int rw, struct buffer_head *bh);
typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
typedef void (unplug_device_fn) (void *q);
+typedef struct elevator_s
+{
+ int sequence;
+ int read_latency;
+ int write_latency;
+ int max_bomb_segments;
+ int read_pendings;
+} elevator_t;
+
struct request_queue
{
- struct request * current_request;
+ struct list_head queue_head;
+ /* together with queue_head for cacheline sharing */
+ elevator_t elevator;
+ unsigned int nr_segments;
+
request_fn_proc * request_fn;
- merge_request_fn * merge_fn;
+ merge_request_fn * back_merge_fn;
+ merge_request_fn * front_merge_fn;
merge_requests_fn * merge_requests_fn;
make_request_fn * make_request_fn;
plug_device_fn * plug_device_fn;
#define MAX_READAHEAD 31
#define MIN_READAHEAD 3
+#define ELEVATOR_DEFAULTS ((elevator_t) { 0, NR_REQUEST>>1, NR_REQUEST<<5, 4, 0, })
+
+#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queue)
+#define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
+#define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
+#define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next)
+#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev)
+
#endif
return 0;
}
+extern inline int locks_verify_truncate(struct inode *inode,
+ struct file *filp,
+ loff_t size)
+{
+ if (inode->i_flock && MANDATORY_LOCK(inode))
+ return locks_mandatory_area(
+ FLOCK_VERIFY_WRITE, inode, filp,
+ size < inode->i_size ? size : inode->i_size,
+ abs(inode->i_size - size)
+ );
+ return 0;
+}
+
/* fs/open.c */
#define LP_TRUST_IRQ_ 0x0200 /* obsolete */
#define LP_NO_REVERSE 0x0400 /* No reverse mode available. */
#define LP_DATA_AVAIL 0x0800 /* Data is available. */
-#define LP_HAVE_PORT_BIT 12 /* (0x1000) Port is claimed. */
-#define LP_PORT_BUSY (1<<13) /* Reading or writing. */
/*
* bit defines for 8255 status port
struct socket * sock;
struct file * file; /* If == NULL, device is not ready, yet */
int magic; /* FIXME: not if debugging is off */
- struct request *head; /* Requests are added here... */
- struct request *tail;
+ struct list_head queue_head; /* Requests are added here... */
struct semaphore queue_lock;
};
#endif