static int md_make_request (request_queue_t *q, struct bio *bio)
{
- mddev_t *mddev = kdev_to_mddev(to_kdev_t(bio->bi_bdev->bd_dev));
+ mddev_t *mddev = q->queuedata;
if (mddev && mddev->pers)
return mddev->pers->make_request(mddev, bio_rw(bio), bio);
}
}
+static int md_fail_request (request_queue_t *q, struct bio *bio)
+{
+ bio_io_error(bio);
+ return 0;
+}
+
static mddev_t * alloc_mddev(kdev_t dev)
{
mddev_t *mddev;
}
mddev->pers = pers[pnum];
+ blk_queue_make_request(&mddev->queue, md_make_request);
+ mddev->queue.queuedata = mddev;
+
err = mddev->pers->run(mddev);
if (err) {
printk(KERN_ERR "md: pers->run() failed ...\n");
#endif
}
+request_queue_t * md_queue_proc(kdev_t dev)
+{
+ mddev_t *mddev = kdev_to_mddev(dev);
+ if (mddev == NULL)
+ return BLK_DEFAULT_QUEUE(MAJOR_NR);
+ else
+ return &mddev->queue;
+}
+
int __init md_init(void)
{
static char * name = "mdrecoveryd";
S_IFBLK | S_IRUSR | S_IWUSR, &md_fops, NULL);
}
- /* forward all md request to md_make_request */
- blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), md_make_request);
+ /* all requests on an uninitialised device get failed... */
+ blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), md_fail_request);
+ blk_dev[MAJOR_NR].queue = md_queue_proc;
add_gendisk(&md_gendisk);
}
static void raid5_unplug_device(void *data)
{
- raid5_conf_t *conf = (raid5_conf_t *)data;
+ request_queue_t *q = data;
+ mddev_t *mddev = q->queuedata;
+ raid5_conf_t *conf = mddev_to_conf(mddev);
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
- raid5_activate_delayed(conf);
-
- conf->plugged = 0;
+ if (blk_remove_plug(q))
+ raid5_activate_delayed(conf);
md_wakeup_thread(conf->thread);
spin_unlock_irqrestore(&conf->device_lock, flags);
static inline void raid5_plug_device(raid5_conf_t *conf)
{
spin_lock_irq(&conf->device_lock);
- if (list_empty(&conf->delayed_list))
- if (!conf->plugged) {
- conf->plugged = 1;
- queue_task(&conf->plug_tq, &tq_disk);
- }
+ blk_plug_device(&conf->mddev->queue);
spin_unlock_irq(&conf->device_lock);
}
static int make_request (mddev_t *mddev, int rw, struct bio * bi)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev_to_conf(mddev);
const unsigned int raid_disks = conf->raid_disks;
const unsigned int data_disks = raid_disks - 1;
unsigned int dd_idx, pd_idx;
if (list_empty(&conf->handle_list) &&
atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
- !conf->plugged &&
+ !blk_queue_plugged(&mddev->queue) &&
!list_empty(&conf->delayed_list))
raid5_activate_delayed(conf);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
- conf->plugged = 0;
- conf->plug_tq.sync = 0;
- conf->plug_tq.routine = &raid5_unplug_device;
- conf->plug_tq.data = conf;
+ mddev->queue.unplug_fn = raid5_unplug_device;
PRINTK("raid5: run(md%d) called.\n", mdidx(mddev));
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
+ request_queue_t queue; /* for plugging ... */
+
struct list_head all_mddevs;
};
* is put on a "delayed" queue until there are no stripes currently
* in a pre-read phase. Further, if the "delayed" queue is empty when
* a stripe is put on it then we "plug" the queue and do not process it
- * until an unplg call is made. (the tq_disk list is run).
+ * until an unplug call is made. (blk_run_queues is run).
*
* When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
* it to the count of prereading stripes.
* waiting for 25% to be free
*/
spinlock_t device_lock;
-
- int plugged;
- struct tq_struct plug_tq;
};
typedef struct raid5_private_data raid5_conf_t;