]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] md 2 of 22 - Make device plugging work for md/raid5
authorNeil Brown <neilb@cse.unsw.edu.au>
Tue, 18 Jun 2002 11:15:33 +0000 (04:15 -0700)
committerLinus Torvalds <torvalds@home.transmeta.com>
Tue, 18 Jun 2002 11:15:33 +0000 (04:15 -0700)
We embed a request_queue_t in the mddev structure and so
have a separate one for each mddev.
This is used for plugging (in raid5).

Given this embeded request_queue_t, md_make_request no-longer
needs to make from device number to mddev, but can map from
the queue to the mddev instead.

drivers/md/md.c
drivers/md/raid5.c
include/linux/raid/md_k.h
include/linux/raid/raid5.h

index c37d56fd15738aae41d70561dcd9ed614bbde5fb..0cf3192212af721e7b57c5435a52e37a44713ebb 100644 (file)
@@ -172,7 +172,7 @@ void del_mddev_mapping(mddev_t * mddev, kdev_t dev)
 
 static int md_make_request (request_queue_t *q, struct bio *bio)
 {
-       mddev_t *mddev = kdev_to_mddev(to_kdev_t(bio->bi_bdev->bd_dev));
+       mddev_t *mddev = q->queuedata;
 
        if (mddev && mddev->pers)
                return mddev->pers->make_request(mddev, bio_rw(bio), bio);
@@ -182,6 +182,12 @@ static int md_make_request (request_queue_t *q, struct bio *bio)
        }
 }
 
+static int md_fail_request (request_queue_t *q, struct bio *bio)
+{
+       bio_io_error(bio);
+       return 0;
+}
+
 static mddev_t * alloc_mddev(kdev_t dev)
 {
        mddev_t *mddev;
@@ -1711,6 +1717,9 @@ static int do_md_run(mddev_t * mddev)
        }
        mddev->pers = pers[pnum];
 
+       blk_queue_make_request(&mddev->queue, md_make_request);
+       mddev->queue.queuedata = mddev;
+
        err = mddev->pers->run(mddev);
        if (err) {
                printk(KERN_ERR "md: pers->run() failed ...\n");
@@ -3616,6 +3625,15 @@ static void md_geninit(void)
 #endif
 }
 
+request_queue_t * md_queue_proc(kdev_t dev)
+{
+       mddev_t *mddev = kdev_to_mddev(dev);
+       if (mddev == NULL)
+               return BLK_DEFAULT_QUEUE(MAJOR_NR);
+       else
+               return &mddev->queue;
+}
+
 int __init md_init(void)
 {
        static char * name = "mdrecoveryd";
@@ -3640,8 +3658,9 @@ int __init md_init(void)
                        S_IFBLK | S_IRUSR | S_IWUSR, &md_fops, NULL);
        }
 
-       /* forward all md request to md_make_request */
-       blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), md_make_request);
+       /* all requests on an uninitialised device get failed... */
+       blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), md_fail_request);
+       blk_dev[MAJOR_NR].queue = md_queue_proc;
 
        add_gendisk(&md_gendisk);
 
index e7743fcad2b974710862ec516df0a13e7cb8c727..83afd1a8aac3e91621433d397dee0fd94440a7f4 100644 (file)
@@ -1225,14 +1225,15 @@ static inline void raid5_activate_delayed(raid5_conf_t *conf)
 }
 static void raid5_unplug_device(void *data)
 {
-       raid5_conf_t *conf = (raid5_conf_t *)data;
+       request_queue_t *q = data;
+       mddev_t *mddev = q->queuedata;
+       raid5_conf_t *conf = mddev_to_conf(mddev);
        unsigned long flags;
 
        spin_lock_irqsave(&conf->device_lock, flags);
 
-       raid5_activate_delayed(conf);
-       
-       conf->plugged = 0;
+       if (blk_remove_plug(q))
+               raid5_activate_delayed(conf);
        md_wakeup_thread(conf->thread);
 
        spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -1241,17 +1242,13 @@ static void raid5_unplug_device(void *data)
 static inline void raid5_plug_device(raid5_conf_t *conf)
 {
        spin_lock_irq(&conf->device_lock);
-       if (list_empty(&conf->delayed_list))
-               if (!conf->plugged) {
-                       conf->plugged = 1;
-                       queue_task(&conf->plug_tq, &tq_disk);
-               }
+       blk_plug_device(&conf->mddev->queue);
        spin_unlock_irq(&conf->device_lock);
 }
 
 static int make_request (mddev_t *mddev, int rw, struct bio * bi)
 {
-       raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+       raid5_conf_t *conf = mddev_to_conf(mddev);
        const unsigned int raid_disks = conf->raid_disks;
        const unsigned int data_disks = raid_disks - 1;
        unsigned int dd_idx, pd_idx;
@@ -1352,7 +1349,7 @@ static void raid5d (void *data)
 
                if (list_empty(&conf->handle_list) &&
                    atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
-                   !conf->plugged &&
+                   !blk_queue_plugged(&mddev->queue) &&
                    !list_empty(&conf->delayed_list))
                        raid5_activate_delayed(conf);
 
@@ -1443,10 +1440,7 @@ static int run (mddev_t *mddev)
        atomic_set(&conf->active_stripes, 0);
        atomic_set(&conf->preread_active_stripes, 0);
 
-       conf->plugged = 0;
-       conf->plug_tq.sync = 0;
-       conf->plug_tq.routine = &raid5_unplug_device;
-       conf->plug_tq.data = conf;
+       mddev->queue.unplug_fn = raid5_unplug_device;
 
        PRINTK("raid5: run(md%d) called.\n", mdidx(mddev));
 
index 7b270a50487ee7b9fb3fb1f11870d48a12e6df5f..94ed760bd22180ccfd300483e8ab9e50fc76b175 100644 (file)
@@ -214,6 +214,8 @@ struct mddev_s
        atomic_t                        recovery_active; /* blocks scheduled, but not written */
        wait_queue_head_t               recovery_wait;
 
+       request_queue_t                 queue;  /* for plugging ... */
+
        struct list_head                all_mddevs;
 };
 
index 5c25120581a79efd7fd428fd8a3fc5e8f54fe3c8..661783287da51211e8e0f9ca3e1c42ec6fd82ba3 100644 (file)
@@ -176,7 +176,7 @@ struct stripe_head {
  * is put on a "delayed" queue until there are no stripes currently
  * in a pre-read phase.  Further, if the "delayed" queue is empty when
  * a stripe is put on it then we "plug" the queue and do not process it
- * until an unplg call is made. (the tq_disk list is run).
+ * until an unplug call is made. (blk_run_queues is run).
  *
  * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
  * it to the count of prereading stripes.
@@ -228,9 +228,6 @@ struct raid5_private_data {
                                                         * waiting for 25% to be free
                                                         */        
        spinlock_t              device_lock;
-
-       int                     plugged;
-       struct tq_struct        plug_tq;
 };
 
 typedef struct raid5_private_data raid5_conf_t;