};
static struct gendisk *stram_disk;
-static struct request_queue stram_queue;
+static struct request_queue *stram_queue;
static spinlock_t stram_lock = SPIN_LOCK_UNLOCKED;
int __init stram_device_init(void)
return -ENXIO;
}
- blk_init_queue(&stram_queue, do_stram_request, &stram_lock);
+ stram_queue = blk_init_queue(do_stram_request, &stram_lock);
+ if (!stram_queue) {
+ unregister_blkdev(STRAM_MAJOR, "stram");
+ put_disk(stram_disk);
+ return -ENOMEM;
+ }
+
stram_disk->major = STRAM_MAJOR;
stram_disk->first_minor = STRAM_MINOR;
stram_disk->fops = &stram_fops;
- stram_disk->queue = &stram_queue;
+ stram_disk->queue = stram_queue;
sprintf(stram_disk->disk_name, "stram");
set_capacity(stram_disk, (swap_end - swap_start)/512);
add_disk(stram_disk);
};
/* Protected by the queue_lock */
-static request_queue_t ubd_queue;
+static request_queue_t *ubd_queue;
/* Protected by ubd_lock */
static int fake_major = 0;
static void ubd_handler(void)
{
struct io_thread_req req;
- struct request *rq = elv_next_request(&ubd_queue);
+ struct request *rq = elv_next_request(ubd_queue);
int n;
do_ubd = NULL;
ubd_finish(rq, req.error);
reactivate_fd(thread_fd, UBD_IRQ);
- do_ubd_request(&ubd_queue);
+ do_ubd_request(ubd_queue);
}
static void ubd_intr(int irq, void *dev, struct pt_regs *unused)
sprintf(disk->devfs_name, "ubd/disc%d", unit);
disk->private_data = &ubd_dev[unit];
- disk->queue = &ubd_queue;
+ disk->queue = ubd_queue;
add_disk(disk);
*disk_out = disk;
if (register_blkdev(MAJOR_NR, "ubd"))
return -1;
- blk_init_queue(&ubd_queue, do_ubd_request, &ubd_io_lock);
- elevator_init(&ubd_queue, &elevator_noop);
+ ubd_queue = blk_init_queue(do_ubd_request, &ubd_io_lock);
+ if (!ubd_queue) {
+ unregister_blkdev(MAJOR_NR, "ubd");
+ return -1;
+ }
+
+ elevator_init(ubd_queue, &elevator_noop);
if (fake_major != 0) {
char name[sizeof("ubd_nnn\0")];
#define DPRINT(a)
#endif
-static struct request_queue floppy_queue;
+static struct request_queue *floppy_queue;
#define MAJOR_NR FLOPPY_MAJOR
#define FLOPPY_DMA 0
#define DEVICE_NAME "floppy"
-#define QUEUE (&floppy_queue)
-#define CURRENT elv_next_request(&floppy_queue)
+#define QUEUE (floppy_queue)
+#define CURRENT elv_next_request(floppy_queue)
/* Disk types: DD */
static struct archy_disk_type {
enable_dma(FIQ_FD1772); /* This inserts a call to our command end routine */
- blk_init_queue(&floppy_queue, do_fd_request, &lock);
+ floppy_queue = blk_init_queue(do_fd_request, &lock);
+ if (!floppy_queue)
+ goto err_queue;
+
for (i = 0; i < FD_MAX_UNITS; i++) {
unit[i].track = -1;
disks[i]->major = MAJOR_NR;
disks[i]->fops = &floppy_fops;
sprintf(disks[i]->disk_name, "fd%d", i);
disks[i]->private_data = &unit[i];
- disks[i]->queue = &floppy_queue;
+ disks[i]->queue = floppy_queue;
set_capacity(disks[i], MAX_DISK_SIZE * 2);
}
blk_register_region(MKDEV(MAJOR_NR, 0), 256, THIS_MODULE,
return 0;
+ err_queue:
+ kfree(DMAbuffer);
err_dma2:
free_dma(FIQ_FD1772);
#include <asm/hardware/ioc.h>
static void (*do_mfm)(void) = NULL;
-static struct request_queue mfm_queue;
+static struct request_queue *mfm_queue;
static spinlock_t mfm_lock = SPIN_LOCK_UNLOCKED;
#define MAJOR_NR MFM_ACORN_MAJOR
-#define QUEUE (&mfm_queue)
-#define CURRENT elv_next_request(&mfm_queue)
+#define QUEUE (mfm_queue)
+#define CURRENT elv_next_request(mfm_queue)
/*
* This sort of stuff should be in a header file shared with ide.c, hd.c, xd.c etc
*/
hdc63463_irqpolladdress = mfm_IRQPollLoc;
hdc63463_irqpollmask = irqmask;
- blk_init_queue(&mfm_queue, do_mfm_request, &mfm_lock);
+ mfm_queue = blk_init_queue(do_mfm_request, &mfm_lock);
+ if (!mfm_queue)
+ goto out2a;
Busy = 0;
lastspecifieddrive = -1;
for (i = 0; i < mfm_drives; i++) {
mfm_geometry(i);
- mfm_gendisk[i]->queue = &mfm_queue;
+ mfm_gendisk[i]->queue = mfm_queue;
add_disk(mfm_gendisk[i]);
}
return 0;
for (i = 0; i < mfm_drives; i++)
put_disk(mfm_gendisk[i]);
out3:
- blk_cleanup_queue(&mfm_queue);
+ blk_cleanup_queue(mfm_queue);
+out2a:
unregister_blkdev(MAJOR_NR, "mfm");
out2:
release_region(mfm_addr, 10);
del_gendisk(mfm_gendisk[i]);
put_disk(mfm_gendisk[i]);
}
- blk_cleanup_queue(&mfm_queue);
+ blk_cleanup_queue(mfm_queue);
unregister_blkdev(MAJOR_NR, "mfm");
if (mfm_addr)
release_region(mfm_addr, 10);
/*
Initialize the I/O Request Queue.
*/
- RequestQueue = &Controller->RequestQueue;
- blk_init_queue(RequestQueue, DAC960_RequestFunction, &Controller->queue_lock);
+ RequestQueue = blk_init_queue(DAC960_RequestFunction,&Controller->queue_lock);
+ if (!RequestQueue) {
+ unregister_blkdev(MajorNumber, "dac960");
+ return false;
+ }
+ Controller->RequestQueue = RequestQueue;
blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
RequestQueue->queuedata = Controller;
blk_queue_max_hw_segments(RequestQueue,
/*
Remove the I/O Request Queue.
*/
- blk_cleanup_queue(&Controller->RequestQueue);
+ blk_cleanup_queue(Controller->RequestQueue);
}
/*
if (!Controller->disks[i])
goto Failure;
Controller->disks[i]->private_data = (void *)i;
- Controller->disks[i]->queue = &Controller->RequestQueue;
+ Controller->disks[i]->queue = Controller->RequestQueue;
}
init_waitqueue_head(&Controller->CommandWaitQueue);
init_waitqueue_head(&Controller->HealthStatusWaitQueue);
static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller,
boolean WaitForCommand)
{
- struct request_queue *RequestQueue = &Controller->RequestQueue;
+ struct request_queue *RequestQueue = Controller->RequestQueue;
struct request *Request;
DAC960_Command_T *Command;
Command->BlockCount = Request->nr_sectors;
Command->Request = Request;
blkdev_dequeue_request(Request);
- Command->SegmentCount = blk_rq_map_sg(&Controller->RequestQueue,
+ Command->SegmentCount = blk_rq_map_sg(Controller->RequestQueue,
Command->Request, Command->cmd_sglist);
/* pci_map_sg MAY change the value of SegCount */
Command->SegmentCount = pci_map_sg(Command->PciDevice, Command->cmd_sglist,
* code should almost never be called, just go with a
* simple coding.
*/
- (void)blk_rq_map_sg(&Controller->RequestQueue, Command->Request,
+ (void)blk_rq_map_sg(Controller->RequestQueue, Command->Request,
Command->cmd_sglist);
(void)pci_map_sg(Command->PciDevice, Command->cmd_sglist, 1,
DAC960_Command_T *FreeCommands;
unsigned char *CombinedStatusBuffer;
unsigned char *CurrentStatusBuffer;
- struct request_queue RequestQueue;
+ struct request_queue *RequestQueue;
spinlock_t queue_lock;
wait_queue_head_t CommandWaitQueue;
wait_queue_head_t HealthStatusWaitQueue;
#include <asm/atari_stram.h>
static void (*do_acsi)(void) = NULL;
-static struct request_queue acsi_queue;
-#define QUEUE (&acsi_queue)
-#define CURRENT elv_next_request(&acsi_queue)
+static struct request_queue *acsi_queue;
+#define QUEUE (acsi_queue)
+#define CURRENT elv_next_request(acsi_queue)
#define DEBUG
#undef DEBUG_DETECT
phys_acsi_buffer = virt_to_phys( acsi_buffer );
STramMask = ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000;
- blk_init_queue(&acsi_queue, do_acsi_request, &acsi_lock);
+ acsi_queue = blk_init_queue(do_acsi_request, &acsi_lock);
+ if (!acsi_queue) {
+ err = -ENOMEM;
+ goto out2a;
+ }
#ifdef CONFIG_ATARI_SLM
err = slm_init();
#endif
disk->fops = &acsi_fops;
disk->private_data = &acsi_info[i];
set_capacity(disk, acsi_info[i].size);
- disk->queue = &acsi_queue;
+ disk->queue = acsi_queue;
add_disk(disk);
}
return 0;
while (i--)
put_disk(acsi_gendisk[i]);
out3:
- blk_cleanup_queue(&acsi_queue);
+ blk_cleanup_queue(acsi_queue);
+out2a:
atari_stram_free( acsi_buffer );
out2:
unregister_blkdev( ACSI_MAJOR, "ad" );
{
int i;
del_timer( &acsi_timer );
- blk_cleanup_queue(&acsi_queue);
+ blk_cleanup_queue(acsi_queue);
atari_stram_free( acsi_buffer );
if (unregister_blkdev( ACSI_MAJOR, "ad" ) != 0)
MODULE_PARM(fd_def_df0,"l");
MODULE_LICENSE("GPL");
-static struct request_queue floppy_queue;
-#define QUEUE (&floppy_queue)
-#define CURRENT elv_next_request(&floppy_queue)
+static struct request_queue *floppy_queue;
+#define QUEUE (floppy_queue)
+#define CURRENT elv_next_request(floppy_queue)
/*
* Macros
disk->fops = &floppy_fops;
sprintf(disk->disk_name, "fd%d", drive);
disk->private_data = &unit[drive];
- disk->queue = &floppy_queue;
+ disk->queue = floppy_queue;
set_capacity(disk, 880*2);
add_disk(disk);
}
post_write_timer.data = 0;
post_write_timer.function = post_write;
- blk_init_queue(&floppy_queue, do_fd_request, &amiflop_lock);
+ floppy_queue = blk_init_queue(do_fd_request, &amiflop_lock);
+ if (!floppy_queue) {
+ free_irq(IRQ_AMIGA_CIAA_TB, NULL);
+ free_irq(IRQ_AMIGA_DSKBLK, NULL);
+ amiga_chip_free(raw_buf);
+ release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
+ unregister_blkdev(FLOPPY_MAJOR,"fd");
+ blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
+ return -ENOMEM;
+ }
+
for (i = 0; i < 128; i++)
mfmdecode[i]=255;
for (i = 0; i < 16; i++)
free_irq(IRQ_AMIGA_DSKBLK, NULL);
custom.dmacon = DMAF_DISK; /* disable DMA */
amiga_chip_free(raw_buf);
- blk_cleanup_queue(&floppy_queue);
+ blk_cleanup_queue(floppy_queue);
release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
unregister_blkdev(FLOPPY_MAJOR, "fd");
}
#undef DEBUG
-static struct request_queue floppy_queue;
+static struct request_queue *floppy_queue;
-#define QUEUE (&floppy_queue)
-#define CURRENT elv_next_request(&floppy_queue)
+#define QUEUE (floppy_queue)
+#define CURRENT elv_next_request(floppy_queue)
/* Disk types: DD, HD, ED */
static struct atari_disk_type {
PhysTrackBuffer = virt_to_phys(TrackBuffer);
BufferDrive = BufferSide = BufferTrack = -1;
- blk_init_queue(&floppy_queue, do_fd_request, &ataflop_lock);
+ floppy_queue = blk_init_queue(do_fd_request, &ataflop_lock);
+ if (!floppy_queue)
+ goto Enomem;
for (i = 0; i < FD_MAX_UNITS; i++) {
unit[i].track = -1;
sprintf(unit[i].disk->disk_name, "fd%d", i);
unit[i].disk->fops = &floppy_fops;
unit[i].disk->private_data = &unit[i];
- unit[i].disk->queue = &floppy_queue;
+ unit[i].disk->queue = floppy_queue;
set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
add_disk(unit[i].disk);
}
Enomem:
while (i--)
put_disk(unit[i].disk);
+ if (floppy_queue)
+ blk_cleanup_queue(floppy_queue);
unregister_blkdev(FLOPPY_MAJOR, "fd");
return -ENOMEM;
}
}
unregister_blkdev(FLOPPY_MAJOR, "fd");
- blk_cleanup_queue(&floppy_queue);
+ blk_cleanup_queue(floppy_queue);
del_timer_sync(&fd_timer);
atari_stram_free( DMABuffer );
}
drive_info_struct *drv = &(hba[ctlr]->drv[i]);
if (!drv->nr_blocks)
continue;
- hba[ctlr]->queue.hardsect_size = drv->block_size;
+ blk_queue_hardsect_size(hba[ctlr]->queue, drv->block_size);
set_capacity(disk, drv->nr_blocks);
add_disk(disk);
}
/*
* See if we can queue up some more IO
*/
- blk_start_queue(&h->queue);
+ blk_start_queue(h->queue);
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
return IRQ_HANDLED;
}
|| (hba[i]->cmd_pool == NULL)
|| (hba[i]->errinfo_pool == NULL))
{
+err_all:
if(hba[i]->cmd_pool_bits)
kfree(hba[i]->cmd_pool_bits);
if(hba[i]->cmd_pool)
return(-1);
}
+ /*
+ * someone needs to clean up this failure handling mess
+ */
+ spin_lock_init(&hba[i]->lock);
+ q = blk_init_queue(do_cciss_request, &hba[i]->lock);
+ if (!q)
+ goto err_all;
+
/* Initialize the pdev driver private data.
have it point to hba[i]. */
pci_set_drvdata(pdev, hba[i]);
cciss_procinit(i);
- q = &hba[i]->queue;
q->queuedata = hba[i];
- spin_lock_init(&hba[i]->lock);
- blk_init_queue(q, do_cciss_request, &hba[i]->lock);
blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
/* This is a hardware imposed limit. */
disk->major = COMPAQ_CISS_MAJOR + i;
disk->first_minor = j << NWD_SHIFT;
disk->fops = &cciss_fops;
- disk->queue = &hba[i]->queue;
+ disk->queue = hba[i]->queue;
disk->private_data = drv;
if( !(drv->nr_blocks))
continue;
- hba[i]->queue.hardsect_size = drv->block_size;
+ blk_queue_hardsect_size(hba[i]->queue, drv->block_size);
set_capacity(disk, drv->nr_blocks);
add_disk(disk);
}
pci_set_drvdata(pdev, NULL);
iounmap((void*)hba[i]->vaddr);
cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
+ blk_cleanup_queue(hba[i]->queue);
unregister_blkdev(COMPAQ_CISS_MAJOR+i, hba[i]->devname);
remove_proc_entry(hba[i]->devname, proc_cciss);
unsigned int maxQsinceinit;
unsigned int maxSG;
spinlock_t lock;
- struct request_queue queue;
+ struct request_queue *queue;
//* pointers to command and error info pool */
CommandList_struct *cmd_pool;
struct access_method *access;
};
-#define CCISS_LOCK(i) (hba[i]->queue.queue_lock)
+#define CCISS_LOCK(i) (hba[i]->queue->queue_lock)
#endif /* CCISS_H */
iounmap(hba[i]->vaddr);
unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
del_timer(&hba[i]->timer);
- blk_cleanup_queue(&hba[i]->queue);
+ blk_cleanup_queue(hba[i]->queue);
remove_proc_entry(hba[i]->devname, proc_array);
pci_free_consistent(hba[i]->pci_dev,
NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
printk(KERN_INFO "cpqarray: Finding drives on %s",
hba[i]->devname);
+
+ spin_lock_init(&hba[i]->lock);
+ q = blk_init_queue(do_ida_request, &hba[i]->lock);
+ if (!q)
+ goto Enomem1;
+
+ hba[i]->queue = q;
+ q->queuedata = hba[i];
+
getgeometry(i);
start_fwbk(i);
ida_procinit(i);
- q = &hba[i]->queue;
- q->queuedata = hba[i];
- spin_lock_init(&hba[i]->lock);
- blk_init_queue(q, do_ida_request, &hba[i]->lock);
blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
/* This is a hardware imposed limit. */
disk->fops = &ida_fops;
if (j && !drv->nr_blks)
continue;
- hba[i]->queue.hardsect_size = drv->blk_size;
+ blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks);
- disk->queue = &hba[i]->queue;
+ disk->queue = hba[i]->queue;
disk->private_data = drv;
add_disk(disk);
}
/*
* See if we can queue up some more IO
*/
- do_ida_request(&h->queue);
+ do_ida_request(h->queue);
spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
return IRQ_HANDLED;
}
drv_info_t *drv = &host->drv[i];
if (i && !drv->nr_blks)
continue;
- host->queue.hardsect_size = drv->blk_size;
+ blk_queue_hardsect_size(host->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks);
- disk->queue = &host->queue;
+ disk->queue = host->queue;
disk->private_data = drv;
if (i)
add_disk(disk);
cmdlist_t *cmd_pool;
dma_addr_t cmd_pool_dhandle;
unsigned long *cmd_pool_bits;
- struct request_queue queue;
+ struct request_queue *queue;
spinlock_t lock;
unsigned int Qdepth;
#include <linux/completion.h>
static struct request *current_req;
-static struct request_queue floppy_queue;
+static struct request_queue *floppy_queue;
#ifndef fd_get_dma_residue
#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
* logical buffer */
static void request_done(int uptodate)
{
- struct request_queue *q = &floppy_queue;
+ struct request_queue *q = floppy_queue;
struct request *req = current_req;
unsigned long flags;
int block;
if (!current_req) {
struct request *req;
- spin_lock_irq(floppy_queue.queue_lock);
- req = elv_next_request(&floppy_queue);
- spin_unlock_irq(floppy_queue.queue_lock);
+ spin_lock_irq(floppy_queue->queue_lock);
+ req = elv_next_request(floppy_queue);
+ spin_unlock_irq(floppy_queue->queue_lock);
if (!req) {
do_floppy = NULL;
unlock_fdc();
goto out;
}
+ floppy_queue = blk_init_queue(do_fd_request, &floppy_lock);
+ if (!floppy_queue) {
+ err = -ENOMEM;
+ goto fail_queue;
+ }
+
for (i=0; i<N_DRIVE; i++) {
disks[i]->major = FLOPPY_MAJOR;
disks[i]->first_minor = TOMINOR(i);
else
floppy_sizes[i] = MAX_DISK_SIZE << 1;
- blk_init_queue(&floppy_queue, do_fd_request, &floppy_lock);
reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
config_types();
continue;
/* to be cleaned up... */
disks[drive]->private_data = (void*)(long)drive;
- disks[drive]->queue = &floppy_queue;
+ disks[drive]->queue = floppy_queue;
add_disk(disks[drive]);
}
del_timer(&fd_timeout);
out2:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
+ blk_cleanup_queue(floppy_queue);
+fail_queue:
unregister_blkdev(FLOPPY_MAJOR,"fd");
- blk_cleanup_queue(&floppy_queue);
out:
for (i=0; i<N_DRIVE; i++)
put_disk(disks[i]);
}
devfs_remove("floppy");
- blk_cleanup_queue(&floppy_queue);
+ blk_cleanup_queue(floppy_queue);
/* eject disk, if any */
fd_eject(0);
}
#include <linux/completion.h>
static struct request *current_req;
-static struct request_queue floppy_queue;
+static struct request_queue *floppy_queue;
#ifndef fd_get_dma_residue
#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
* logical buffer */
static void request_done(int uptodate)
{
- struct request_queue *q = &floppy_queue;
+ struct request_queue *q = floppy_queue;
struct request *req = current_req;
unsigned long flags;
int block;
if (!current_req) {
struct request *req;
- spin_lock_irq(floppy_queue.queue_lock);
- req = elv_next_request(&floppy_queue);
- spin_unlock_irq(floppy_queue.queue_lock);
+ spin_lock_irq(floppy_queue->queue_lock);
+ req = elv_next_request(floppy_queue);
+ spin_unlock_irq(floppy_queue->queue_lock);
if (!req) {
do_floppy = NULL;
unlock_fdc();
else
floppy_sizes[i] = MAX_DISK_SIZE << 1;
- blk_init_queue(&floppy_queue, do_fd_request, &floppy_lock);
+ floppy_queue = blk_init_queue(do_fd_request, &floppy_lock)
+ if (!floppy_queue)
+ goto out_queue;
+
reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
config_types();
continue;
/* to be cleaned up... */
disks[drive]->private_data = (void*)(long)drive;
- disks[drive]->queue = &floppy_queue;
+ disks[drive]->queue = floppy_queue;
add_disk(disks[drive]);
}
out1:
del_timer_sync(&fd_timeout);
out2:
+ blk_cleanup_queue(floppy_queue);
+out_queue:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
unregister_blkdev(FLOPPY_MAJOR,"fd");
- blk_cleanup_queue(&floppy_queue);
out:
for (i=0; i<N_DRIVE; i++)
put_disk(disks[i]);
}
devfs_remove("floppy");
- blk_cleanup_queue(&floppy_queue);
+ blk_cleanup_queue(floppy_queue);
/* eject disk, if any */
fd_eject(0);
}
* @q: the request queue to be released
*
* Description:
- * blk_cleanup_queue is the pair to blk_init_queue(). It should
- * be called when a request queue is being released; typically
- * when a block device is being de-registered. Currently, its
- * primary task it to free all the &struct request structures that
- * were allocated to the queue.
+ * blk_cleanup_queue is the pair to blk_init_queue() or
+ * blk_queue_make_request(). It should be called when a request queue is
+ * being released; typically when a block device is being de-registered.
+ * Currently, its primary task it to free all the &struct request
+ * structures that were allocated to the queue and the queue itself.
+ *
* Caveat:
* Hopefully the low level driver will have finished any
* outstanding requests first...
{
struct request_list *rl = &q->rq;
+ if (!atomic_dec_and_test(&q->refcnt))
+ return;
+
elevator_exit(q);
del_timer_sync(&q->unplug_timer);
kblockd_flush();
- mempool_destroy(rl->rq_pool);
+ if (rl->rq_pool)
+ mempool_destroy(rl->rq_pool);
if (blk_queue_tagged(q))
blk_queue_free_tags(q);
- memset(q, 0, sizeof(*q));
+ kfree(q);
}
static int blk_init_free_list(request_queue_t *q)
__setup("elevator=", elevator_setup);
#endif /* CONFIG_IOSCHED_AS || CONFIG_IOSCHED_DEADLINE */
+request_queue_t *blk_alloc_queue(int gfp_mask)
+{
+ request_queue_t *q = kmalloc(sizeof(*q), gfp_mask);
+
+ if (!q)
+ return NULL;
+
+ memset(q, 0, sizeof(*q));
+ atomic_set(&q->refcnt, 1);
+ return q;
+}
+
/**
* blk_init_queue - prepare a request queue for use with a block device
* @q: The &request_queue_t to be initialised
* blk_init_queue() must be paired with a blk_cleanup_queue() call
* when the block device is deactivated (such as at module unload).
**/
-int blk_init_queue(request_queue_t *q, request_fn_proc *rfn, spinlock_t *lock)
+request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
- int ret;
+ request_queue_t *q;
static int printed;
+ q = blk_alloc_queue(GFP_KERNEL);
+ if (!q)
+ return NULL;
+
if (blk_init_free_list(q))
- return -ENOMEM;
+ goto out_init;
if (!printed) {
printed = 1;
printk("Using %s elevator\n", chosen_elevator->elevator_name);
}
- if ((ret = elevator_init(q, chosen_elevator))) {
- blk_cleanup_queue(q);
- return ret;
- }
+ if (elevator_init(q, chosen_elevator))
+ goto out_elv;
q->request_fn = rfn;
q->back_merge_fn = ll_back_merge_fn;
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
- return 0;
+ return q;
+out_elv:
+ blk_cleanup_queue(q);
+out_init:
+ kfree(q);
+ return NULL;
+
+}
+
+int blk_get_queue(request_queue_t *q)
+{
+ if (!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+ atomic_inc(&q->refcnt);
+ return 0;
+ }
+
+ return 1;
}
static inline void blk_free_request(request_queue_t *q, struct request *rq)
goto end_io;
}
+ if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))
+ goto end_io;
+
/*
* If this device has partitions, remap block n
* of partition p to block n+start(p) of the disk.
EXPORT_SYMBOL(end_request);
EXPORT_SYMBOL(blk_init_queue);
EXPORT_SYMBOL(blk_cleanup_queue);
+EXPORT_SYMBOL(blk_get_queue);
+EXPORT_SYMBOL(blk_alloc_queue);
EXPORT_SYMBOL(blk_queue_make_request);
EXPORT_SYMBOL(blk_queue_bounce_limit);
EXPORT_SYMBOL(generic_make_request);
lo->lo_bio = lo->lo_biotail = NULL;
+ lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
+ if (!lo->lo_queue) {
+ error = -ENOMEM;
+ fput(file);
+ goto out_putf;
+ }
+
+ disks[lo->lo_number]->queue = lo->lo_queue;
+
/*
* set queue make_request_fn, and add limits based on lower level
* device
*/
- blk_queue_make_request(&lo->lo_queue, loop_make_request);
- lo->lo_queue.queuedata = lo;
+ blk_queue_make_request(lo->lo_queue, loop_make_request);
+ lo->lo_queue->queuedata = lo;
/*
* we remap to a block device, make sure we correctly stack limits
if (S_ISBLK(inode->i_mode)) {
request_queue_t *q = bdev_get_queue(lo_device);
- blk_queue_max_sectors(&lo->lo_queue, q->max_sectors);
- blk_queue_max_phys_segments(&lo->lo_queue,q->max_phys_segments);
- blk_queue_max_hw_segments(&lo->lo_queue, q->max_hw_segments);
- blk_queue_max_segment_size(&lo->lo_queue, q->max_segment_size);
- blk_queue_segment_boundary(&lo->lo_queue, q->seg_boundary_mask);
- blk_queue_merge_bvec(&lo->lo_queue, q->merge_bvec_fn);
+ blk_queue_max_sectors(lo->lo_queue, q->max_sectors);
+ blk_queue_max_phys_segments(lo->lo_queue,q->max_phys_segments);
+ blk_queue_max_hw_segments(lo->lo_queue, q->max_hw_segments);
+ blk_queue_max_segment_size(lo->lo_queue, q->max_segment_size);
+ blk_queue_segment_boundary(lo->lo_queue, q->seg_boundary_mask);
+ blk_queue_merge_bvec(lo->lo_queue, q->merge_bvec_fn);
}
kernel_thread(loop_thread, lo, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
lo->lo_sizelimit = 0;
lo->lo_encrypt_key_size = 0;
lo->lo_flags = 0;
- lo->lo_queue.queuedata = NULL;
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
filp->f_dentry->d_inode->i_mapping->gfp_mask = gfp;
lo->lo_state = Lo_unbound;
fput(filp);
+ blk_put_queue(lo->lo_queue);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return 0;
sprintf(disk->disk_name, "loop%d", i);
sprintf(disk->devfs_name, "loop/%d", i);
disk->private_data = lo;
- disk->queue = &lo->lo_queue;
+ disk->queue = lo->lo_queue;
add_disk(disk);
}
printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop);
* every gendisk to have its very own request_queue struct.
* These structs are big so we dynamically allocate them.
*/
- disk->queue = kmalloc(sizeof(struct request_queue), GFP_KERNEL);
+ disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
if (!disk->queue) {
put_disk(disk);
goto out;
}
- memset(disk->queue, 0, sizeof(struct request_queue));
- blk_init_queue(disk->queue, do_nbd_request, &nbd_lock);
}
if (register_blkdev(NBD_MAJOR, "nbd")) {
return 0;
out:
while (i--) {
- kfree(nbd_dev[i].disk->queue);
+ if (nbd_dev[i].disk->queue)
+ blk_cleanup_queue(nbd_dev[i].disk->queue);
put_disk(nbd_dev[i].disk);
}
return err;
for (i = 0; i < MAX_NBD; i++) {
struct gendisk *disk = nbd_dev[i].disk;
if (disk) {
- if (disk->queue) {
+ if (disk->queue)
blk_cleanup_queue(disk->queue);
- kfree(disk->queue);
- disk->queue = NULL;
- }
del_gendisk(disk);
put_disk(disk);
}
}
/* I/O request processing */
-static struct request_queue pcd_queue;
+static struct request_queue *pcd_queue;
static void do_pcd_request(request_queue_t * q)
{
spin_lock_irqsave(&pcd_lock, saved_flags);
end_request(pcd_req, success);
pcd_busy = 0;
- do_pcd_request(&pcd_queue);
+ do_pcd_request(pcd_queue);
spin_unlock_irqrestore(&pcd_lock, saved_flags);
}
do_pcd_read();
spin_lock_irqsave(&pcd_lock, saved_flags);
- do_pcd_request(&pcd_queue);
+ do_pcd_request(pcd_queue);
spin_unlock_irqrestore(&pcd_lock, saved_flags);
}
return -1;
}
- blk_init_queue(&pcd_queue, do_pcd_request, &pcd_lock);
+ pcd_queue = blk_init_queue(do_pcd_request, &pcd_lock);
+ if (!pcd_queue) {
+ unregister_blkdev(major, name);
+ for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+ put_disk(cd->disk);
+ return -1;
+ }
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (cd->present) {
register_cdrom(&cd->info);
cd->disk->private_data = cd;
- cd->disk->queue = &pcd_queue;
+ cd->disk->queue = pcd_queue;
add_disk(cd->disk);
}
}
}
put_disk(cd->disk);
}
- blk_cleanup_queue(&pcd_queue);
+ blk_cleanup_queue(pcd_queue);
unregister_blkdev(major, name);
}
return (((status_reg(pf_current) & (STAT_BUSY | pf_mask)) == pf_mask));
}
-static struct request_queue pf_queue;
+static struct request_queue *pf_queue;
static void do_pf_request(request_queue_t * q)
{
spin_lock_irqsave(&pf_spin_lock, saved_flags);
end_request(pf_req, success);
pf_busy = 0;
- do_pf_request(&pf_queue);
+ do_pf_request(pf_queue);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
}
put_disk(pf->disk);
return -1;
}
- blk_init_queue(&pf_queue, do_pf_request, &pf_spin_lock);
- blk_queue_max_phys_segments(&pf_queue, cluster);
- blk_queue_max_hw_segments(&pf_queue, cluster);
+ pf_queue = blk_init_queue(do_pf_request, &pf_spin_lock);
+ if (!pf_queue) {
+ unregister_blkdev(major, name);
+ for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+ put_disk(pf->disk);
+ return -1;
+ }
+
+ blk_queue_max_phys_segments(pf_queue, cluster);
+ blk_queue_max_hw_segments(pf_queue, cluster);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
struct gendisk *disk = pf->disk;
if (!pf->present)
continue;
disk->private_data = pf;
- disk->queue = &pf_queue;
+ disk->queue = pf_queue;
add_disk(disk);
}
return 0;
put_disk(pf->disk);
pi_release(pf->pi);
}
- blk_cleanup_queue(&pf_queue);
+ blk_cleanup_queue(pf_queue);
}
MODULE_LICENSE("GPL");
unsigned int head, sect, cyl, wpcom, lzone, ctl;
};
static spinlock_t ps2esdi_lock = SPIN_LOCK_UNLOCKED;
-static struct request_queue ps2esdi_queue;
+static struct request_queue *ps2esdi_queue;
static struct request *current_req;
#if 0
/* register the device - pass the name and major number */
if (register_blkdev(PS2ESDI_MAJOR, "ed"))
- return -1;
+ return -EBUSY;
/* set up some global information - indicating device specific info */
- blk_init_queue(&ps2esdi_queue, do_ps2esdi_request, &ps2esdi_lock);
+ ps2esdi_queue = blk_init_queue(do_ps2esdi_request, &ps2esdi_lock);
+ if (!ps2esdi_queue) {
+ unregister_blkdev(PS2ESDI_MAJOR, "ed");
+ return -ENOMEM;
+ }
/* some minor housekeeping - setup the global gendisk structure */
error = ps2esdi_geninit();
printk(KERN_WARNING "PS2ESDI: error initialising"
" device, releasing resources\n");
unregister_blkdev(PS2ESDI_MAJOR, "ed");
- blk_cleanup_queue(&ps2esdi_queue);
+ blk_cleanup_queue(ps2esdi_queue);
return error;
}
return 0;
free_dma(dma_arb_level);
free_irq(PS2ESDI_IRQ, &ps2esdi_gendisk);
unregister_blkdev(PS2ESDI_MAJOR, "ed");
- blk_cleanup_queue(&ps2esdi_queue);
+ blk_cleanup_queue(ps2esdi_queue);
for (i = 0; i < ps2esdi_drives; i++) {
del_gendisk(ps2esdi_gendisk[i]);
put_disk(ps2esdi_gendisk[i]);
error = -EBUSY;
goto err_out3;
}
- blk_queue_max_sectors(&ps2esdi_queue, 128);
+ blk_queue_max_sectors(ps2esdi_queue, 128);
error = -ENOMEM;
for (i = 0; i < ps2esdi_drives; i++) {
struct gendisk *disk = ps2esdi_gendisk[i];
set_capacity(disk, ps2esdi_info[i].head * ps2esdi_info[i].sect *
ps2esdi_info[i].cyl);
- disk->queue = &ps2esdi_queue;
+ disk->queue = ps2esdi_queue;
disk->private_data = &ps2esdi_info[i];
add_disk(disk);
}
spin_lock_irqsave(&ps2esdi_lock, flags);
end_request(current_req, ending);
current_req = NULL;
- do_ps2esdi_request(&ps2esdi_queue);
+ do_ps2esdi_request(ps2esdi_queue);
spin_unlock_irqrestore(&ps2esdi_lock, flags);
}
} /* handle interrupts */
static struct gendisk *rd_disks[NUM_RAMDISKS];
static struct block_device *rd_bdev[NUM_RAMDISKS];/* Protected device data */
-static struct request_queue *rd_queue;
+static struct request_queue *rd_queue[NUM_RAMDISKS];
/*
* Parameters for the boot-loading of the RAM disk. These are set by
del_gendisk(rd_disks[i]);
put_disk(rd_disks[i]);
}
- kfree(rd_queue);
devfs_remove("rd");
unregister_blkdev(RAMDISK_MAJOR, "ramdisk" );
}
goto out;
}
- rd_queue = kmalloc(NUM_RAMDISKS * sizeof(struct request_queue),
- GFP_KERNEL);
- if (!rd_queue)
- goto out;
- memset(rd_queue, 0, NUM_RAMDISKS * sizeof(struct request_queue));
if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) {
err = -EIO;
- goto out_queue;
+ goto out;
}
devfs_mk_dir("rd");
for (i = 0; i < NUM_RAMDISKS; i++) {
struct gendisk *disk = rd_disks[i];
- blk_queue_make_request(&rd_queue[i], &rd_make_request);
+ rd_queue[i] = blk_alloc_queue(GFP_KERNEL);
+ if (!rd_queue[i])
+ goto out_queue;
+
+ blk_queue_make_request(rd_queue[i], &rd_make_request);
/* rd_size is given in kB */
disk->major = RAMDISK_MAJOR;
disk->first_minor = i;
disk->fops = &rd_bd_op;
- disk->queue = &rd_queue[i];
+ disk->queue = rd_queue[i];
sprintf(disk->disk_name, "ram%d", i);
sprintf(disk->devfs_name, "rd/%d", i);
set_capacity(disk, rd_size * 2);
return 0;
out_queue:
- kfree(rd_queue);
+ unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
out:
while (i--)
put_disk(rd_disks[i]);
case CDROMCLOSETRAY:
close = 1;
case CDROMEJECT:
+ if (blk_get_queue(q)) {
+ err = -ENXIO;
+ break;
+ }
+
rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq->flags |= REQ_BLOCK_PC;
rq->data = NULL;
blk_put_request(rq);
break;
default:
- err = -ENOTTY;
+ return -ENOTTY;
}
blk_put_queue(q);
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-static struct request_queue swim3_queue;
+static struct request_queue *swim3_queue;
static struct gendisk *disks[2];
static struct request *fd_req;
wake_up(&fs->wait);
return;
}
- while (fs->state == idle && (req = elv_next_request(&swim3_queue))) {
+ while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
#if 0
printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
req->rq_disk->disk_name, req->cmd,
err = -EBUSY;
goto out;
}
- blk_init_queue(&swim3_queue, do_fd_request, &swim3_lock);
+
+ swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
+ if (!swim3_queue) {
+ err = -ENOMEM;
+ goto out_queue;
+ }
+
for (i = 0; i < floppy_count; i++) {
struct gendisk *disk = disks[i];
disk->major = FLOPPY_MAJOR;
disk->first_minor = i;
disk->fops = &floppy_fops;
disk->private_data = &floppy_states[i];
- disk->queue = &swim3_queue;
+ disk->queue = swim3_queue;
sprintf(disk->disk_name, "fd%d", i);
sprintf(disk->devfs_name, "floppy/%d", i);
set_capacity(disk, 2880);
}
return 0;
+out_queue:
+ unregister_blkdev(FLOPPY_MAJOR, "fd");
out:
while (i--)
put_disk(disks[i]);
static struct floppy_state floppy_states[MAX_FLOPPIES];
static spinlock_t swim_iop_lock = SPIN_LOCK_UNLOCKED;
-#define CURRENT elv_next_request(&swim_queue)
+#define CURRENT elv_next_request(swim_queue)
static char *drive_names[7] = {
"not installed", /* DRV_NONE */
.revalidate_disk= floppy_revalidate,
};
-static struct request_queue swim_queue;
+static struct request_queue *swim_queue;
/*
* SWIM IOP initialization
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return -EBUSY;
- blk_init_queue(&swim_queue, do_fd_request, &swim_iop_lock);
+ swim_queue = blk_init_queue(do_fd_request, &swim_iop_lock);
+ if (!swim_queue) {
+ unregister_blkdev(FLOPPY_MAJOR, "fd");
+ return -ENOMEM;
+ }
+
printk("SWIM-IOP: %s by Joshua M. Thompson (funaho@jurai.org)\n",
DRIVER_VERSION);
if (iop_listen(SWIM_IOP, SWIM_CHAN, swimiop_receive, "SWIM") != 0) {
printk(KERN_ERR "SWIM-IOP: IOP channel already in use; can't initialize.\n");
+ unregister_blkdev(FLOPPY_MAJOR, "fd");
+ blk_cleanup_queue(swim_queue);
return -EBUSY;
}
disk->fops = &floppy_fops;
sprintf(disk->disk_name, "fd%d", i);
disk->private_data = &floppy_states[i];
- disk->queue = &swim_queue;
+ disk->queue = swim_queue;
set_capacity(disk, 2880 * 2);
add_disk(disk);
}
*/
struct bio *bio, *currentbio, **biotail;
- request_queue_t queue;
+ request_queue_t *queue;
struct mm_page {
dma_addr_t page_dma;
card->bio = NULL;
card->biotail = &card->bio;
- blk_queue_make_request(&card->queue, mm_make_request);
- card->queue.queuedata = card;
- card->queue.unplug_fn = mm_unplug_device;
+ card->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!card->queue)
+ goto failed_alloc;
+
+ blk_queue_make_request(card->queue, mm_make_request);
+ card->queue->queuedata = card;
+ card->queue->unplug_fn = mm_unplug_device;
tasklet_init(&card->tasklet, process_page, (unsigned long)card);
pci_free_consistent(card->dev, PAGE_SIZE*2,
card->mm_pages[1].desc,
card->mm_pages[1].page_dma);
+ blk_put_queue(card->queue);
}
static const struct pci_device_id mm_pci_ids[] = { {
disk->first_minor = i << MM_SHIFT;
disk->fops = &mm_fops;
disk->private_data = &cards[i];
- disk->queue = &cards[i].queue;
+ disk->queue = cards[i].queue;
set_capacity(disk, cards[i].mm_size << 1);
add_disk(disk);
}
static volatile u_char xd_error;
static int nodma = XD_DONT_USE_DMA;
-static struct request_queue xd_queue;
+static struct request_queue *xd_queue;
/* xd_init: register the block device number and set up pointer tables */
static int __init xd_init(void)
if (register_blkdev(XT_DISK_MAJOR, "xd"))
goto out1;
+ err = -ENOMEM;
+ xd_queue = blk_init_queue(do_xd_request, &xd_lock);
+ if (!xd_queue)
+ goto out1a;
+
devfs_mk_dir("xd");
- blk_init_queue(&xd_queue, do_xd_request, &xd_lock);
if (xd_detect(&controller,&address)) {
printk("Detected a%s controller (type %d) at address %06x\n",
sprintf(disk->disk_name, "xd%c", i+'a');
disk->fops = &xd_fops;
disk->private_data = p;
- disk->queue = &xd_queue;
+ disk->queue = xd_queue;
set_capacity(disk, p->heads * p->cylinders * p->sectors);
printk(" %s: CHS=%d/%d/%d\n", disk->disk_name,
p->cylinders, p->heads, p->sectors);
}
/* xd_maxsectors depends on controller - so set after detection */
- blk_queue_max_sectors(&xd_queue, xd_maxsectors);
+ blk_queue_max_sectors(xd_queue, xd_maxsectors);
for (i = 0; i < xd_drives; i++)
add_disk(xd_gendisk[i]);
release_region(xd_iobase,4);
out2:
devfs_remove("xd");
- blk_cleanup_queue(&xd_queue);
+ blk_cleanup_queue(xd_queue);
+out1a:
unregister_blkdev(XT_DISK_MAJOR, "xd");
out1:
if (xd_dma_buffer)
del_gendisk(xd_gendisk[i]);
put_disk(xd_gendisk[i]);
}
- blk_cleanup_queue(&xd_queue);
+ blk_cleanup_queue(xd_queue);
release_region(xd_iobase,4);
devfs_remove("xd");
if (xd_drives) {
return get_disk(z2ram_gendisk);
}
-static struct request_queue z2_queue;
+static struct request_queue *z2_queue;
int __init
z2_init(void)
{
+ int ret;
if (!MACH_IS_AMIGA)
return -ENXIO;
+ ret = -EBUSY;
if (register_blkdev(Z2RAM_MAJOR, DEVICE_NAME))
- return -EBUSY;
+ goto err;
+ ret = -ENOMEM;
z2ram_gendisk = alloc_disk(1);
- if (!z2ram_gendisk) {
- unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
- return -ENOMEM;
- }
+ if (!z2ram_gendisk)
+ goto out_disk;
+
+ z2_queue = blk_init_queue(do_z2_request, &z2ram_lock);
+ if (!z2_queue)
+ goto out_queue;
+
z2ram_gendisk->major = Z2RAM_MAJOR;
z2ram_gendisk->first_minor = 0;
z2ram_gendisk->fops = &z2_fops;
sprintf(z2ram_gendisk->disk_name, "z2ram");
- blk_init_queue(&z2_queue, do_z2_request, &z2ram_lock);
- z2ram_gendisk->queue = &z2_queue;
+ z2ram_gendisk->queue = z2_queue;
add_disk(z2ram_gendisk);
blk_register_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT, THIS_MODULE,
z2_find, NULL, NULL);
return 0;
+
+out_queue:
+ put_disk(z2ram_gendisk);
+out_disk:
+ unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
+err:
+ return ret;
}
#if defined(MODULE)
del_gendisk(z2ram_gendisk);
put_disk(z2ram_gendisk);
- blk_cleanup_queue(&z2_queue);
+ blk_cleanup_queue(z2_queue);
if ( current_device != -1 )
{
*/
#define MAJOR_NR AZTECH_CDROM_MAJOR
-#define QUEUE (&azt_queue)
-#define CURRENT elv_next_request(&azt_queue)
+#define QUEUE (azt_queue)
+#define CURRENT elv_next_request(azt_queue)
#define SET_TIMER(func, jifs) delay_timer.expires = jiffies + (jifs); \
delay_timer.function = (void *) (func); \
add_timer(&delay_timer);
#define AZT_DEBUG_MULTISESSION
#endif
-static struct request_queue azt_queue;
+static struct request_queue *azt_queue;
static int current_valid(void)
{
goto err_out2;
}
- blk_init_queue(&azt_queue, do_aztcd_request, &aztSpin);
- blk_queue_hardsect_size(&azt_queue, 2048);
+ azt_queue = blk_init_queue(do_aztcd_request, &aztSpin);
+ if (!azt_queue) {
+ ret = -ENOMEM;
+ goto err_out3;
+ }
+
+ blk_queue_hardsect_size(azt_queue, 2048);
azt_disk->major = MAJOR_NR;
azt_disk->first_minor = 0;
azt_disk->fops = &azt_fops;
sprintf(azt_disk->disk_name, "aztcd");
sprintf(azt_disk->devfs_name, "aztcd");
- azt_disk->queue = &azt_queue;
+ azt_disk->queue = azt_queue;
add_disk(azt_disk);
azt_invalidate_buffers();
aztPresent = 1;
aztCloseDoor();
return 0;
+err_out3:
+ unregister_blkdev(MAJOR_NR, "aztcd");
err_out2:
put_disk(azt_disk);
err_out:
printk("What's that: can't unregister aztcd\n");
return;
}
- blk_cleanup_queue(&azt_queue);
+ blk_cleanup_queue(azt_queue);
if ((azt_port == 0x1f0) || (azt_port == 0x170)) {
SWITCH_IDE_MASTER;
release_region(azt_port, 8); /*IDE-interface */
static volatile unsigned short sony_cd_read_reg;
static volatile unsigned short sony_cd_fifost_reg;
-static struct request_queue cdu31a_queue;
+static struct request_queue *cdu31a_queue;
static spinlock_t cdu31a_lock = SPIN_LOCK_UNLOCKED; /* queue lock */
static int sony_spun_up = 0; /* Has the drive been spun up? */
is_a_cdu31a =
strcmp("CD-ROM CDU31A", drive_config.product_id) == 0;
- blk_init_queue(&cdu31a_queue, do_cdu31a_request, &cdu31a_lock);
+ cdu31a_queue = blk_init_queue(do_cdu31a_request, &cdu31a_lock);
+ if (!cdu31a_queue)
+ goto errout0;
init_timer(&cdu31a_abort_timer);
cdu31a_abort_timer.function = handle_abort_timeout;
scd_info.mask = deficiency;
scd_gendisk = disk;
if (register_cdrom(&scd_info))
- goto errout0;
- disk->queue = &cdu31a_queue;
+ goto err;
+ disk->queue = cdu31a_queue;
add_disk(disk);
disk_changed = 1;
return (0);
+err:
+ blk_cleanup_queue(cdu31a_queue);
errout0:
+ if (cdu31a_irq)
+ free_irq(cdu31a_irq, NULL);
printk("Unable to register CDU-31a with Uniform cdrom driver\n");
- blk_cleanup_queue(&cdu31a_queue);
put_disk(disk);
errout1:
if (unregister_blkdev(MAJOR_NR, "cdu31a")) {
return;
}
- blk_cleanup_queue(&cdu31a_queue);
+ blk_cleanup_queue(cdu31a_queue);
if (cdu31a_irq > 0)
free_irq(cdu31a_irq, NULL);
#define PLAY_TO cd->toc[0] /* toc[0] records end-time in play */
static struct cm206_struct *cd; /* the main memory structure */
-static struct request_queue cm206_queue;
+static struct request_queue *cm206_queue;
static spinlock_t cm206_lock = SPIN_LOCK_UNLOCKED;
/* First, we define some polling functions. These are actually
printk(KERN_INFO "Cannot register for cdrom %d!\n", MAJOR_NR);
goto out_cdrom;
}
- blk_init_queue(&cm206_queue, do_cm206_request, &cm206_lock);
- blk_queue_hardsect_size(&cm206_queue, 2048);
- disk->queue = &cm206_queue;
+ cm206_queue = blk_init_queue(do_cm206_request, &cm206_lock);
+ if (!cm206_queue)
+ goto out_queue;
+
+ blk_queue_hardsect_size(cm206_queue, 2048);
+ disk->queue = cm206_queue;
add_disk(disk);
memset(cd, 0, sizeof(*cd)); /* give'm some reasonable value */
size);
return 0;
+out_queue:
+ unregister_cdrom(&cm206_info);
out_cdrom:
put_disk(disk);
out_disk:
printk("Can't unregister major cm206\n");
return;
}
- blk_cleanup_queue(&cm206_queue);
+ blk_cleanup_queue(cm206_queue);
free_irq(cm206_irq, NULL);
kfree(cd);
release_region(cm206_base, 16);
static struct timer_list gscd_timer = TIMER_INITIALIZER(NULL, 0, 0);
static spinlock_t gscd_lock = SPIN_LOCK_UNLOCKED;
-struct request_queue gscd_queue;
+static struct request_queue *gscd_queue;
static struct block_device_operations gscd_fops = {
.owner = THIS_MODULE,
unsigned int nsect;
repeat:
- req = elv_next_request(&gscd_queue);
+ req = elv_next_request(gscd_queue);
if (!req)
return;
printk("What's that: can't unregister GoldStar-module\n");
return;
}
- blk_cleanup_queue(&gscd_queue);
+ blk_cleanup_queue(gscd_queue);
release_region(gscd_port, GSCD_IO_EXTENT);
printk(KERN_INFO "GoldStar-module released.\n");
}
goto err_out2;
}
- blk_init_queue(&gscd_queue, do_gscd_request, &gscd_lock);
+ gscd_queue = blk_init_queue(do_gscd_request, &gscd_lock);
+ if (!gscd_queue) {
+ ret -ENOMEM;
+ goto err_out3;
+ }
disk_state = 0;
gscdPresent = 1;
- gscd_disk->queue = &gscd_queue;
+ gscd_disk->queue = gscd_queue;
add_disk(gscd_disk);
printk(KERN_INFO "GSCD: GoldStar CD-ROM Drive found.\n");
return 0;
+err_out3:
+ unregister_blkdev(MAJOR_NR, "gscd");
err_out2:
put_disk(gscd_disk);
err_out1:
/* Is the drive connected properly and responding?? */
static int mcdPresent;
-static struct request_queue mcd_queue;
+static struct request_queue *mcd_queue;
#define MAJOR_NR MITSUMI_CDROM_MAJOR
-#define QUEUE (&mcd_queue)
-#define CURRENT elv_next_request(&mcd_queue)
+#define QUEUE (mcd_queue)
+#define CURRENT elv_next_request(mcd_queue)
#define QUICK_LOOP_DELAY udelay(45) /* use udelay */
#define QUICK_LOOP_COUNT 20
goto out_region;
}
- blk_init_queue(&mcd_queue, do_mcd_request, &mcd_spinlock);
+ mcd_queue = blk_init_queue(do_mcd_request, &mcd_spinlock);
+ if (!mcd_queue)
+ goto out_queue;
/* check for card */
printk(KERN_ERR "mcd: Unable to register Mitsumi CD-ROM.\n");
goto out_cdrom;
}
- disk->queue = &mcd_queue;
+ disk->queue = mcd_queue;
add_disk(disk);
printk(msg);
return 0;
out_cdrom:
free_irq(mcd_irq, NULL);
-out_probe:
+out_queue:
release_region(mcd_port, 4);
+out_probe:
+ blk_cleanup_queue(mcd_queue);
out_region:
unregister_blkdev(MAJOR_NR, "mcd");
- blk_cleanup_queue(&mcd_queue);
put_disk(disk);
return -EIO;
}
printk(KERN_WARNING "Can't unregister major mcd\n");
return;
}
- blk_cleanup_queue(&mcd_queue);
+ blk_cleanup_queue(mcd_queue);
del_timer_sync(&mcd_timer);
}
0, 0, 0, 0, 0, 0, 0, 0
};
static spinlock_t mcdx_lock = SPIN_LOCK_UNLOCKED;
-static struct request_queue mcdx_queue;
+static struct request_queue *mcdx_queue;
MODULE_PARM(mcdx, "1-4i");
static struct cdrom_device_ops mcdx_dops = {
if (unregister_blkdev(MAJOR_NR, "mcdx") != 0) {
xwarn("cleanup() unregister_blkdev() failed\n");
}
- blk_cleanup_queue(&mcdx_queue);
+ blk_cleanup_queue(mcdx_queue);
#if !MCDX_QUIET
else
xinfo("cleanup() succeeded\n");
return 1;
}
- blk_init_queue(&mcdx_queue, do_mcdx_request, &mcdx_lock);
+ mcdx_queue = blk_init_queue(do_mcdx_request, &mcdx_lock);
+ if (!mcdx_queue) {
+ unregister_blkdev(MAJOR_NR, "mcdx");
+ release_region((unsigned long) stuffp->wreg_data,
+ MCDX_IO_SIZE);
+ kfree(stuffp);
+ put_disk(disk);
+ return 1;
+ }
xtrace(INIT, "init() subscribe irq and i/o\n");
mcdx_irq_map[stuffp->irq] = stuffp;
xwarn("%s=0x%3p,%d: Init failed. Can't get irq (%d).\n",
MCDX, stuffp->wreg_data, stuffp->irq, stuffp->irq);
stuffp->irq = 0;
- blk_cleanup_queue(&mcdx_queue);
+ blk_cleanup_queue(mcdx_queue);
kfree(stuffp);
put_disk(disk);
return 0;
put_disk(disk);
if (unregister_blkdev(MAJOR_NR, "mcdx") != 0)
xwarn("cleanup() unregister_blkdev() failed\n");
- blk_cleanup_queue(&mcdx_queue);
+ blk_cleanup_queue(mcdx_queue);
return 2;
}
disk->private_data = stuffp;
- disk->queue = &mcdx_queue;
+ disk->queue = mcdx_queue;
add_disk(disk);
printk(msg);
return 0;
#include <asm/uaccess.h>
#define MAJOR_NR OPTICS_CDROM_MAJOR
-#define QUEUE (&opt_queue)
-#define CURRENT elv_next_request(&opt_queue)
+#define QUEUE (opt_queue)
+#define CURRENT elv_next_request(opt_queue)
\f
/* Debug support */
static void sleep_timer(unsigned long data);
static struct timer_list delay_timer = TIMER_INITIALIZER(sleep_timer, 0, 0);
static spinlock_t optcd_lock = SPIN_LOCK_UNLOCKED;
-static struct request_queue opt_queue;
+static struct request_queue *opt_queue;
/* Timer routine: wake up when desired flag goes low,
or when timeout expires. */
}
- blk_init_queue(&opt_queue, do_optcd_request, &optcd_lock);
- blk_queue_hardsect_size(&opt_queue, 2048);
- optcd_disk->queue = &opt_queue;
+ opt_queue = blk_init_queue(do_optcd_request, &optcd_lock);
+ if (!opt_queue) {
+ unregister_blkdev(MAJOR_NR, "optcd");
+ release_region(optcd_port, 4);
+ put_disk(optcd_disk);
+ return -ENOMEM;
+ }
+
+ blk_queue_hardsect_size(opt_queue, 2048);
+ optcd_disk->queue = opt_queue;
add_disk(optcd_disk);
printk(KERN_INFO "optcd: DOLPHIN 8000 AT CDROM at 0x%x\n", optcd_port);
printk(KERN_ERR "optcd: what's that: can't unregister\n");
return;
}
- blk_cleanup_queue(&opt_queue);
+ blk_cleanup_queue(opt_queue);
release_region(optcd_port, 4);
printk(KERN_INFO "optcd: module released.\n");
}
* Protects access to global structures etc.
*/
static spinlock_t sbpcd_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
-static struct request_queue sbpcd_queue;
+static struct request_queue *sbpcd_queue;
MODULE_PARM(sbpcd, "2i");
MODULE_PARM(max_drives, "i");
#endif /* MODULE */
}
- blk_init_queue(&sbpcd_queue, do_sbpcd_request, &sbpcd_lock);
+ /*
+ * init error handling is broken beyond belief in this driver...
+ */
+ sbpcd_queue = blk_init_queue(do_sbpcd_request, &sbpcd_lock);
+ if (!sbpcd_queue) {
+ release_region(CDo_command,4);
+ unregister_blkdev(MAJOR_NR, major_name);
+ return -ENOMEM;
+ }
devfs_mk_dir("sbp");
printk("Can't unregister %s\n", major_name);
}
release_region(CDo_command,4);
- blk_cleanup_queue(&sbpcd_queue);
+ blk_cleanup_queue(sbpcd_queue);
return -EIO;
}
#ifdef MODULE
if (sbpcd_infop == NULL)
{
release_region(CDo_command,4);
- blk_cleanup_queue(&sbpcd_queue);
+ blk_cleanup_queue(sbpcd_queue);
return -ENOMEM;
}
memset(sbpcd_infop, 0, sizeof(struct cdrom_device_info));
printk(" sbpcd: Unable to register with Uniform CD-ROm driver\n");
}
disk->private_data = p;
- disk->queue = &sbpcd_queue;
+ disk->queue = sbpcd_queue;
add_disk(disk);
}
- blk_queue_hardsect_size(&sbpcd_queue, CD_FRAMESIZE);
+ blk_queue_hardsect_size(sbpcd_queue, CD_FRAMESIZE);
#ifndef MODULE
init_done:
return;
}
release_region(CDo_command,4);
- blk_cleanup_queue(&sbpcd_queue);
+ blk_cleanup_queue(sbpcd_queue);
for (j=0;j<NR_SBPCD;j++)
{
if (D_S[j].drv_id==-1) continue;
#include "sjcd.h"
static int sjcd_present = 0;
-static struct request_queue sjcd_queue;
+static struct request_queue *sjcd_queue;
#define MAJOR_NR SANYO_CDROM_MAJOR
-#define QUEUE (&sjcd_queue)
-#define CURRENT elv_next_request(&sjcd_queue)
+#define QUEUE (sjcd_queue)
+#define CURRENT elv_next_request(sjcd_queue)
#define SJCD_BUF_SIZ 32 /* cdr-h94a has internal 64K buffer */
if (register_blkdev(MAJOR_NR, "sjcd"))
return -EIO;
- blk_init_queue(&sjcd_queue, do_sjcd_request, &sjcd_lock);
- blk_queue_hardsect_size(&sjcd_queue, 2048);
+ sjcd_queue = blk_init_queue(do_sjcd_request, &sjcd_lock);
+ if (!sjcd_queue)
+ goto out0;
+
+ blk_queue_hardsect_size(sjcd_queue, 2048);
sjcd_disk = alloc_disk(1);
if (!sjcd_disk) {
}
printk(KERN_INFO "SJCD: Status: port=0x%x.\n", sjcd_base);
- sjcd_disk->queue = &sjcd_queue;
+ sjcd_disk->queue = sjcd_queue;
add_disk(sjcd_disk);
sjcd_present++;
return (0);
out3:
release_region(sjcd_base, 4);
- blk_cleanup_queue(&sjcd_queue);
out2:
put_disk(sjcd_disk);
out1:
+ blk_cleanup_queue(sjcd_queue);
+out0:
if ((unregister_blkdev(MAJOR_NR, "sjcd") == -EINVAL))
printk("SJCD: cannot unregister device.\n");
return (-EIO);
del_gendisk(sjcd_disk);
put_disk(sjcd_disk);
release_region(sjcd_base, 4);
- blk_cleanup_queue(&sjcd_queue);
+ blk_cleanup_queue(sjcd_queue);
if ((unregister_blkdev(MAJOR_NR, "sjcd") == -EINVAL))
printk("SJCD: cannot unregister device.\n");
printk(KERN_INFO "SJCD: module: removed.\n");
static unsigned short data_reg;
static spinlock_t sonycd535_lock = SPIN_LOCK_UNLOCKED; /* queue lock */
-static struct request_queue sonycd535_queue;
+static struct request_queue *sonycd535_queue;
static int initialized; /* Has the drive been initialized? */
static int sony_disc_changed = 1; /* Has the disk been changed
err = -EIO;
goto out1;
}
- blk_init_queue(&sonycd535_queue, do_cdu535_request, &sonycd535_lock);
- blk_queue_hardsect_size(&sonycd535_queue, CDU535_BLOCK_SIZE);
+ sonycd535_queue = blk_init_queue(do_cdu535_request, &sonycd535_lock);
+ if (!sonycd535_queue) {
+ err = -ENOMEM;
+ goto out1a;
+ }
+
+ blk_queue_hardsect_size(sonycd535_queue, CDU535_BLOCK_SIZE);
sony_toc = kmalloc(sizeof(struct s535_sony_toc), GFP_KERNEL);
err = -ENOMEM;
if (!sony_toc)
sony535_cd_base_io);
goto out7;
}
- cdu_disk->queue = &sonycd535_queue;
+ cdu_disk->queue = sonycd535_queue;
add_disk(cdu_disk);
return 0;
out3:
kfree(sony_toc);
out2:
- blk_cleanup_queue(&sonycd535_queue);
+ blk_cleanup_queue(sonycd535_queue);
+out1a:
unregister_blkdev(MAJOR_NR, CDU535_HANDLE);
out1:
if (sony535_irq_used)
kfree(sony_toc);
del_gendisk(cdu_disk);
put_disk(cdu_disk);
- blk_cleanup_queue(&sonycd535_queue);
+ blk_cleanup_queue(sonycd535_queue);
if (unregister_blkdev(MAJOR_NR, CDU535_HANDLE) == -EINVAL)
printk("Uh oh, couldn't unregister " CDU535_HANDLE "\n");
else
if (cdrom_read_from_buffer(drive))
return ide_stopped;
- blk_attempt_remerge(&drive->queue, rq);
+ blk_attempt_remerge(drive->queue, rq);
/* Clear the local sector buffer. */
info->nsectors_buffered = 0;
* remerge requests, often the plugging will not have had time
* to do this properly
*/
- blk_attempt_remerge(&drive->queue, rq);
+ blk_attempt_remerge(drive->queue, rq);
info->nsectors_buffered = 0;
* default to read-only always and fix latter at the bottom
*/
set_disk_ro(drive->disk, 1);
- blk_queue_hardsect_size(&drive->queue, CD_FRAMESIZE);
+ blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
- blk_queue_prep_rq(&drive->queue, ide_cdrom_prep_fn);
- blk_queue_dma_alignment(&drive->queue, 3);
+ blk_queue_prep_rq(drive->queue, ide_cdrom_prep_fn);
+ blk_queue_dma_alignment(drive->queue, 3);
drive->special.all = 0;
drive->ready_stat = 0;
printk("%s: ide_cdrom_cleanup failed to unregister device from the cdrom driver.\n", drive->name);
kfree(info);
drive->driver_data = NULL;
- blk_queue_prep_rq(&drive->queue, NULL);
+ blk_queue_prep_rq(drive->queue, NULL);
del_gendisk(g);
g->fops = ide_fops;
return 0;
spin_lock_irqsave(&ide_lock, flags);
if (ata_pending_commands(drive) < drive->queue_depth)
- ret = blk_queue_start_tag(&drive->queue, rq);
+ ret = blk_queue_start_tag(drive->queue, rq);
spin_unlock_irqrestore(&ide_lock, flags);
return ret;
if (max_s > hwif->rqsize)
max_s = hwif->rqsize;
- blk_queue_max_sectors(&drive->queue, max_s);
+ blk_queue_max_sectors(drive->queue, max_s);
}
- printk("%s: max request size: %dKiB\n", drive->name, drive->queue.max_sectors / 2);
+ printk("%s: max request size: %dKiB\n", drive->name, drive->queue->max_sectors / 2);
/* Extract geometry if we did not already have one for the drive */
if (!drive->cyl || !drive->head || !drive->sect) {
if (hwif->sg_dma_active)
BUG();
- nents = blk_rq_map_sg(&drive->queue, rq, hwif->sg_table);
+ nents = blk_rq_map_sg(drive->queue, rq, hwif->sg_table);
if (rq_data_dir(rq) == READ)
hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
set_bit(IDEFLOPPY_ZIP_DRIVE, &floppy->flags);
/* This value will be visible in the /proc/ide/hdx/settings */
floppy->ticks = IDEFLOPPY_TICKS_DELAY;
- blk_queue_max_sectors(&drive->queue, 64);
+ blk_queue_max_sectors(drive->queue, 64);
}
/*
* it, so please don't remove this.
*/
if (strncmp(drive->id->model, "IOMEGA Clik!", 11) == 0) {
- blk_queue_max_sectors(&drive->queue, 64);
+ blk_queue_max_sectors(drive->queue, 64);
set_bit(IDEFLOPPY_CLIK_DRIVE, &floppy->flags);
}
if (!blk_rq_tagged(rq))
blkdev_dequeue_request(rq);
else
- blk_queue_end_tag(&drive->queue, rq);
+ blk_queue_end_tag(drive->queue, rq);
HWGROUP(drive)->rq = NULL;
end_that_request_last(rq);
ret = 0;
#endif
spin_lock_irqsave(&ide_lock, flags);
if (blk_pm_suspend_request(rq)) {
- blk_stop_queue(&drive->queue);
+ blk_stop_queue(drive->queue);
} else {
drive->blocked = 0;
- blk_start_queue(&drive->queue);
+ blk_start_queue(drive->queue);
}
blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
drive = hwgroup->drive;
do {
if ((!drive->sleep || time_after_eq(jiffies, drive->sleep))
- && !elv_queue_empty(&drive->queue)) {
+ && !elv_queue_empty(drive->queue)) {
if (!best
|| (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep)))
|| (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive))))
{
- if (!blk_queue_plugged(&drive->queue))
+ if (!blk_queue_plugged(drive->queue))
best = drive;
}
}
break;
}
- if (blk_queue_plugged(&drive->queue)) {
+ if (blk_queue_plugged(drive->queue)) {
if (drive->using_tcq)
break;
* we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request
*/
- rq = elv_next_request(&drive->queue);
+ rq = elv_next_request(drive->queue);
if (!rq) {
hwgroup->busy = !!ata_pending_commands(drive);
break;
insert_end = 0;
rq->flags |= REQ_PREEMPT;
}
- __elv_add_request(&drive->queue, rq, insert_end, 0);
+ __elv_add_request(drive->queue, rq, insert_end, 0);
ide_do_request(hwgroup, IDE_NO_IRQ);
spin_unlock_irqrestore(&ide_lock, flags);
addr = HWIF(drive)->pci_dev->dma_mask;
}
- blk_queue_bounce_limit(&drive->queue, addr);
+ if (drive->queue)
+ blk_queue_bounce_limit(drive->queue, addr);
}
EXPORT_SYMBOL(ide_toggle_bounce);
/*
* init request queue
*/
-static void ide_init_queue(ide_drive_t *drive)
+static int ide_init_queue(ide_drive_t *drive)
{
- request_queue_t *q = &drive->queue;
+ request_queue_t *q;
ide_hwif_t *hwif = HWIF(drive);
int max_sectors = 256;
* do not.
*/
- blk_init_queue(q, do_ide_request, &ide_lock);
+ q = blk_init_queue(do_ide_request, &ide_lock);
+ if (!q)
+ return 1;
+
q->queuedata = HWGROUP(drive);
blk_queue_segment_boundary(q, 0xffff);
/* This is a driver limit and could be eliminated. */
blk_queue_max_phys_segments(q, PRD_ENTRIES);
+
+ /* assign drive and gendisk queue */
+ drive->queue = q;
+ if (drive->disk)
+ drive->disk->queue = drive->queue;
+
+ return 0;
}
/*
ide_drive_t *drive = &hwif->drives[index];
if (!drive->present)
continue;
- ide_init_queue(drive);
+ if (ide_init_queue(drive)) {
+ printk(KERN_ERR "ide: failed to init %s\n",drive->name);
+ continue;
+ }
spin_lock_irq(&ide_lock);
if (!hwgroup->drive) {
/* first drive for hwgroup. */
sprintf(disk->disk_name,"hd%c",'a'+hwif->index*MAX_DRIVES+unit);
disk->fops = ide_fops;
disk->private_data = drive;
- disk->queue = &drive->queue;
drive->disk = disk;
}
return 0;
EXPORT_SYMBOL(hwif_init);
-void export_ide_init_queue (ide_drive_t *drive)
+int export_ide_init_queue (ide_drive_t *drive)
{
- ide_init_queue(drive);
+ if (ide_init_queue(drive))
+ return 1;
+
ide_init_drive(drive);
+ return 0;
}
EXPORT_SYMBOL(export_ide_init_queue);
static void ide_tcq_invalidate_queue(ide_drive_t *drive)
{
ide_hwgroup_t *hwgroup = HWGROUP(drive);
- request_queue_t *q = &drive->queue;
+ request_queue_t *q = drive->queue;
struct request *rq;
unsigned long flags;
spin_lock_irqsave(&ide_lock, flags);
- if ((rq = blk_queue_find_tag(&drive->queue, tag))) {
+ if ((rq = blk_queue_find_tag(drive->queue, tag))) {
HWGROUP(drive)->rq = rq;
/*
if (itb->max_sectors > HWIF(drive)->rqsize)
itb->max_sectors = HWIF(drive)->rqsize;
- blk_queue_max_sectors(&drive->queue, itb->max_sectors);
+ blk_queue_max_sectors(drive->queue, itb->max_sectors);
}
/*
* enable block tagging
*/
- if (!blk_queue_tagged(&drive->queue))
- blk_queue_init_tags(&drive->queue, IDE_MAX_TAG);
+ if (!blk_queue_tagged(drive->queue))
+ blk_queue_init_tags(drive->queue, IDE_MAX_TAG);
/*
* check auto-poll support
drive->id = NULL;
}
drive->present = 0;
- blk_cleanup_queue(&drive->queue);
+ blk_cleanup_queue(drive->queue);
+ drive->queue = NULL;
}
if (hwif->next == hwif) {
BUG_ON(hwgroup->hwif != hwif);
#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
static spinlock_t hd_lock = SPIN_LOCK_UNLOCKED;
-static struct request_queue hd_queue;
+static struct request_queue *hd_queue;
#define MAJOR_NR HD_MAJOR
-#define QUEUE (&hd_queue)
-#define CURRENT elv_next_request(&hd_queue)
+#define QUEUE (hd_queue)
+#define CURRENT elv_next_request(hd_queue)
#define TIMEOUT_VALUE (6*HZ)
#define HD_DELAY 0
if (register_blkdev(MAJOR_NR,"hd"))
return -1;
- blk_init_queue(&hd_queue, do_hd_request, &hd_lock);
- blk_queue_max_sectors(&hd_queue, 255);
+ hd_queue = blk_init_queue(do_hd_request, &hd_lock);
+ if (!hd_queue) {
+ unegister_blkdev(MAJOR_NR,"hd");
+ return -ENOMEM;
+ }
+
+ blk_queue_max_sectors(hd_queue, 255);
init_timer(&device_timer);
device_timer.function = hd_times_out;
- blk_queue_hardsect_size(&hd_queue, 512);
+ blk_queue_hardsect_size(hd_queue, 512);
#ifdef __i386__
if (!NR_HD) {
sprintf(disk->disk_name, "hd%c", 'a'+drive);
disk->private_data = p;
set_capacity(disk, p->head * p->sect * p->cyl);
- disk->queue = &hd_queue;
+ disk->queue = hd_queue;
p->unit = drive;
hd_gendisk[drive] = disk;
printk ("%s: %luMB, CHS=%d/%d/%d\n",
out:
del_timer(&device_timer);
unregister_blkdev(MAJOR_NR,"hd");
- blk_cleanup_queue(&hd_queue);
+ blk_cleanup_queue(hd_queue);
return -1;
Enomem:
while (drive--)
#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
static spinlock_t hd_lock = SPIN_LOCK_UNLOCKED;
-static struct request_queue hd_queue;
+static struct request_queue *hd_queue;
-#define CURRENT elv_next_request(&hd_queue)
+#define CURRENT elv_next_request(hd_queue)
#define TIMEOUT_VALUE (6*HZ)
#define HD_DELAY 0
printk("hd: unable to get major %d for hard disk\n",HD_MAJOR);
return -1;
}
- blk_init_queue(&hd_queue, do_hd_request, &hd_lock);
- blk_queue_max_sectors(&hd_queue, 255);
+ hd_queue = blk_init_queue(do_hd_request, &hd_lock);
+ if (!hd_queue) {
+ unregister_blkdev(HD_MAJOR,"hd");
+ return -1;
+ }
+ blk_queue_max_sectors(hd_queue, 255);
init_timer(&device_timer);
device_timer.function = hd_times_out;
- blk_queue_hardsect_size(&hd_queue, 512);
+ blk_queue_hardsect_size(hd_queue, 512);
#ifdef __i386__
if (!NR_HD) {
sprintf(disk->disk_name, "hd%c", 'a'+drive);
disk->private_data = p;
set_capacity(disk, p->head * p->sect * p->cyl);
- disk->queue = &hd_queue;
+ disk->queue = hd_queue;
p->unit = drive;
hd_gendisk[drive] = disk;
printk ("%s: %luMB, CHS=%d/%d/%d\n",
out:
del_timer(&device_timer);
unregister_blkdev(HD_MAJOR,"hd");
- blk_cleanup_queue(&hd_queue);
+ blk_cleanup_queue(hd_queue);
return -1;
Enomem:
while (drive--)
unsigned long flags;
- request_queue_t queue;
+ request_queue_t *queue;
struct gendisk *disk;
/*
init_rwsem(&md->lock);
atomic_set(&md->holders, 1);
- md->queue.queuedata = md;
- blk_queue_make_request(&md->queue, dm_request);
+ md->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!md->queue) {
+ kfree(md);
+ return NULL;
+ }
+
+ md->queue->queuedata = md;
+ blk_queue_make_request(md->queue, dm_request);
md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
mempool_free_slab, _io_cache);
if (!md->io_pool) {
free_minor(minor);
+ blk_put_queue(md->queue);
kfree(md);
return NULL;
}
if (!md->disk) {
mempool_destroy(md->io_pool);
free_minor(minor);
+ blk_put_queue(md->queue);
kfree(md);
return NULL;
}
md->disk->major = _major;
md->disk->first_minor = minor;
md->disk->fops = &dm_blk_dops;
- md->disk->queue = &md->queue;
+ md->disk->queue = md->queue;
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
add_disk(md->disk);
mempool_destroy(md->io_pool);
del_gendisk(md->disk);
put_disk(md->disk);
+ blk_put_queue(md->queue);
kfree(md);
}
static int __bind(struct mapped_device *md, struct dm_table *t)
{
- request_queue_t *q = &md->queue;
+ request_queue_t *q = md->queue;
sector_t size;
md->map = t;
if (table-conf->hash_table != nb_zone)
BUG();
- blk_queue_merge_bvec(&mddev->queue, linear_mergeable_bvec);
+ blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
return 0;
out:
if (!mddev->raid_disks && list_empty(&mddev->disks)) {
list_del(&mddev->all_mddevs);
mddev_map[mdidx(mddev)] = NULL;
+ blk_put_queue(mddev->queue);
kfree(mddev);
MOD_DEC_USE_COUNT;
}
INIT_LIST_HEAD(&new->all_mddevs);
init_timer(&new->safemode_timer);
atomic_set(&new->active, 1);
- blk_queue_make_request(&new->queue, md_fail_request);
+
+ new->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!new->queue) {
+ kfree(new);
+ return NULL;
+ }
+
+ blk_queue_make_request(new->queue, md_fail_request);
goto retry;
}
sprintf(disk->disk_name, "md%d", mdidx(mddev));
disk->fops = &md_fops;
disk->private_data = mddev;
- disk->queue = &mddev->queue;
+ disk->queue = mddev->queue;
add_disk(disk);
disks[mdidx(mddev)] = disk;
up(&disks_sem);
mddev->pers = pers[pnum];
spin_unlock(&pers_lock);
- blk_queue_make_request(&mddev->queue, mddev->pers->make_request);
+ blk_queue_make_request(mddev->queue, mddev->pers->make_request);
printk("%s: setting max_sectors to %d, segment boundary to %d\n",
disk->disk_name,
chunk_size >> 9,
(chunk_size>>1)-1);
- blk_queue_max_sectors(&mddev->queue, chunk_size >> 9);
- blk_queue_segment_boundary(&mddev->queue, (chunk_size>>1) - 1);
- mddev->queue.queuedata = mddev;
+ blk_queue_max_sectors(mddev->queue, chunk_size >> 9);
+ blk_queue_segment_boundary(mddev->queue, (chunk_size>>1) - 1);
+ mddev->queue->queuedata = mddev;
err = mddev->pers->run(mddev);
if (err) {
conf->hash_spacing++;
}
- blk_queue_max_sectors(&mddev->queue, mddev->chunk_size >> 9);
- blk_queue_merge_bvec(&mddev->queue, raid0_mergeable_bvec);
+ blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
+ blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
return 0;
out_free_conf:
static inline void raid5_plug_device(raid5_conf_t *conf)
{
spin_lock_irq(&conf->device_lock);
- blk_plug_device(&conf->mddev->queue);
+ blk_plug_device(conf->mddev->queue);
spin_unlock_irq(&conf->device_lock);
}
if (list_empty(&conf->handle_list) &&
atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
- !blk_queue_plugged(&mddev->queue) &&
+ !blk_queue_plugged(mddev->queue) &&
!list_empty(&conf->delayed_list))
raid5_activate_delayed(conf);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
- mddev->queue.unplug_fn = raid5_unplug_device;
+ mddev->queue->unplug_fn = raid5_unplug_device;
PRINTK("raid5: run(md%d) called.\n", mdidx(mddev));
atomic_t queue_depth;
struct i2ob_request request_queue[MAX_I2OB_DEPTH];
struct i2ob_request *i2ob_qhead;
- request_queue_t req_queue;
+ request_queue_t *req_queue;
spinlock_t lock;
};
static struct i2ob_iop_queue *i2ob_queues[MAX_I2O_CONTROLLERS];
* code so that we can directly get the queue ptr from the
* device instead of having to go the IOP data structure.
*/
- dev->req_queue = &i2ob_queues[c->unit]->req_queue;
+ dev->req_queue = i2ob_queues[c->unit]->req_queue;
/* Register a size before we register for events - otherwise we
might miss and overwrite an event */
i2ob_queues[unit]->i2ob_qhead = &i2ob_queues[unit]->request_queue[0];
atomic_set(&i2ob_queues[unit]->queue_depth, 0);
- blk_init_queue(&i2ob_queues[unit]->req_queue, i2ob_request, &i2ob_queues[unit]->lock);
- i2ob_queues[unit]->req_queue.queuedata = &i2ob_queues[unit];
+ i2ob_queues[unit]->req_queue = blk_init_queue(i2ob_request, &i2ob_queues[unit]->lock);
+ if (!i2ob_queues[unit]->req_queue) {
+ kfree(i2ob_queues[unit]);
+ return -1;
+ }
+
+ i2ob_queues[unit]->req_queue->queuedata = &i2ob_queues[unit];
return 0;
}
struct completion thread_dead;
int exiting;
wait_queue_head_t thread_wq;
- struct request_queue rq;
+ struct request_queue *rq;
spinlock_t queue_lock;
};
static int mtd_blktrans_thread(void *arg)
{
struct mtd_blktrans_ops *tr = arg;
- struct request_queue *rq = &tr->blkcore_priv->rq;
+ struct request_queue *rq = tr->blkcore_priv->rq;
/* we might get involved when memory gets low, so use PF_MEMALLOC */
current->flags |= PF_MEMALLOC;
set_capacity(gd, new->size);
gd->private_data = new;
new->blkcore_priv = gd;
- gd->queue = &tr->blkcore_priv->rq;
+ gd->queue = tr->blkcore_priv->rq;
if (new->readonly)
set_disk_ro(gd, 1);
init_completion(&tr->blkcore_priv->thread_dead);
init_waitqueue_head(&tr->blkcore_priv->thread_wq);
- blk_init_queue(&tr->blkcore_priv->rq, mtd_blktrans_request,
- &tr->blkcore_priv->queue_lock);
- tr->blkcore_priv->rq.queuedata = tr;
+ tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
+ if (!tr->blkcore_priv->rq) {
+ unregister_blkdev(tr->major, tr->name);
+ kfree(tr->blkcore_priv);
+ up(&mtd_table_mutex);
+ return -ENOMEM;
+ }
+
+ tr->blkcore_priv->rq->queuedata = tr;
ret = kernel_thread(mtd_blktrans_thread, tr,
CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
if (ret < 0) {
- blk_cleanup_queue(&tr->blkcore_priv->rq);
+ blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
up(&mtd_table_mutex);
}
devfs_remove(tr->name);
- blk_cleanup_queue(&tr->blkcore_priv->rq);
+ blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
up(&mtd_table_mutex);
{
int max, rc;
- device->request_queue = kmalloc(sizeof (request_queue_t), GFP_KERNEL);
+ device->request_queue = blk_init_queue(do_dasd_request, &device->request_queue_lock);
if (device->request_queue == NULL)
return -ENOMEM;
- memset(device->request_queue, 0, sizeof(request_queue_t));
+
device->request_queue->queuedata = device;
- rc = blk_init_queue(device->request_queue, do_dasd_request,
- &device->request_queue_lock);
- if (rc)
- return rc;
#if 0
elevator_exit(device->request_queue);
rc = elevator_init(device->request_queue, &elevator_noop);
{
if (device->request_queue) {
blk_cleanup_queue(device->request_queue);
- kfree(device->request_queue);
device->request_queue = NULL;
}
}
struct tape_blk_data
{
/* Block device request queue. */
- request_queue_t request_queue;
+ request_queue_t *request_queue;
spinlock_t request_queue_lock;
/* Block frontend tasklet */
struct tasklet_struct tasklet;
tapeblock_setup_device(struct tape_device * device)
{
struct tape_blk_data *d = &device->blk_data;
- request_queue_t *q = &d->request_queue;
+ request_queue_t *q;
struct gendisk *disk = alloc_disk(1);
int rc;
tasklet_init(&d->tasklet, tapeblock_tasklet, (unsigned long)device);
spin_lock_init(&d->request_queue_lock);
- rc = blk_init_queue(q, tapeblock_request_fn, &d->request_queue_lock);
- if (rc)
+ q = blk_init_queue(tapeblock_request_fn, &d->request_queue_lock);
+ if (!q)
goto put_disk;
+ d->request_queue = q;
elevator_exit(q);
rc = elevator_init(q, &elevator_noop);
if (rc)
del_gendisk(d->disk);
put_disk(d->disk);
- blk_cleanup_queue(&d->request_queue);
+ blk_cleanup_queue(d->request_queue);
tasklet_kill(&d->tasklet);
}
return 0;
}
-static struct request_queue jsf_queue;
+static struct request_queue *jsf_queue;
static int jsfd_init(void)
{
goto out;
}
- blk_init_queue(&jsf_queue, jsfd_do_request, &lock);
+ jsf_queue = blk_init_queue(jsfd_do_request, &lock);
+ if (!jsf_queue) {
+ err = -ENOMEM;
+ unregister_blkdev(JSFD_MAJOR, "jsfd");
+ goto out;
+ }
+
for (i = 0; i < JSF_MAX; i++) {
struct gendisk *disk = jsfd_disk[i];
if ((i & JSF_PART_MASK) >= JSF_NPART) continue;
disk->fops = &jsfd_fops;
set_capacity(disk, jdp->dsize >> 9);
disk->private_data = jdp;
- disk->queue = &jsf_queue;
+ disk->queue = jsf_queue;
add_disk(disk);
set_disk_ro(disk, 1);
}
misc_deregister(&jsf_dev);
if (unregister_blkdev(JSFD_MAJOR, "jsfd") != 0)
printk("jsfd: cleanup_module failed\n");
- blk_cleanup_queue(&jsf_queue);
+ blk_cleanup_queue(jsf_queue);
}
module_init(jsflash_init_module);
struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
- struct request_queue *q = kmalloc(sizeof(*q), GFP_ATOMIC);
+ struct request_queue *q;
+ q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock);
if (!q)
return NULL;
- memset(q, 0, sizeof(*q));
- blk_init_queue(q, scsi_request_fn, &sdev->sdev_lock);
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_max_hw_segments(q, shost->sg_tablesize);
void scsi_free_queue(struct request_queue *q)
{
blk_cleanup_queue(q);
- kfree(q);
}
/*
struct blk_queue_tag *queue_tags;
+ atomic_t refcnt;
+
/*
* sg stuff
*/
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define QUEUE_FLAG_READFULL 3 /* write queue has been filled */
#define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */
+#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define blk_queue_plugged(q) !list_empty(&(q)->plug_list)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
elv_remove_request(req->q, req);
}
-/*
- * get ready for proper ref counting
- */
-#define blk_put_queue(q) do { } while (0)
-
/*
* Access functions for manipulating queue properties
*/
-extern int blk_init_queue(request_queue_t *, request_fn_proc *, spinlock_t *);
+extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void blk_queue_bounce_limit(request_queue_t *, u64);
extern void generic_unplug_device(void *);
extern long nr_blockdev_pages(void);
+int blk_get_queue(request_queue_t *);
+request_queue_t *blk_alloc_queue(int);
+#define blk_put_queue(q) blk_cleanup_queue((q))
+
/*
* tag stuff
*/
char name[4]; /* drive name, such as "hda" */
char driver_req[10]; /* requests specific driver */
- request_queue_t queue; /* request queue */
+ request_queue_t *queue; /* request queue */
struct request *rq; /* current request */
struct ide_drive_s *next; /* circular list of hwgroup drives */
extern void ide_hwif_release_regions(ide_hwif_t* hwif);
extern void ide_unregister (unsigned int index);
-extern void export_ide_init_queue(ide_drive_t *);
+extern int export_ide_init_queue(ide_drive_t *);
extern u8 export_probe_for_drive(ide_drive_t *);
extern int probe_hwif_init(ide_hwif_t *);
static inline int ata_pending_commands(ide_drive_t *drive)
{
if (drive->using_tcq)
- return blk_queue_tag_depth(&drive->queue);
+ return blk_queue_tag_depth(drive->queue);
return 0;
}
static inline int ata_can_queue(ide_drive_t *drive)
{
if (drive->using_tcq)
- return blk_queue_tag_queue(&drive->queue);
+ return blk_queue_tag_queue(drive->queue);
return 1;
}
struct semaphore lo_bh_mutex;
atomic_t lo_pending;
- request_queue_t lo_queue;
+ request_queue_t *lo_queue;
};
#endif /* __KERNEL__ */
unsigned int safemode_delay;
struct timer_list safemode_timer;
atomic_t writes_pending;
- request_queue_t queue; /* for plugging ... */
+ request_queue_t *queue; /* for plugging ... */
struct list_head all_mddevs;
};