static mdk_personality_t multipath_personality;
-static md_spinlock_t retry_list_lock = MD_SPIN_LOCK_UNLOCKED;
+static spinlock_t retry_list_lock = SPIN_LOCK_UNLOCKED;
struct multipath_bh *multipath_retry_list = NULL, **multipath_retry_tail;
static int multipath_diskop(mddev_t *mddev, mdp_disk_t **d, int state);
struct multipath_bh *mp_bh = NULL;
do {
- md_spin_lock_irq(&conf->device_lock);
+ spin_lock_irq(&conf->device_lock);
if (!conf->freer1_blocked && conf->freer1) {
mp_bh = conf->freer1;
conf->freer1 = mp_bh->next_mp;
conf->freer1_cnt--;
mp_bh->next_mp = NULL;
mp_bh->state = (1 << MPBH_PreAlloc);
- mp_bh->bh_req.b_state = 0;
}
- md_spin_unlock_irq(&conf->device_lock);
+ spin_unlock_irq(&conf->device_lock);
if (mp_bh)
return mp_bh;
mp_bh = (struct multipath_bh *) kmalloc(sizeof(struct multipath_bh),
if (test_bit(MPBH_PreAlloc, &mp_bh->state)) {
unsigned long flags;
+ mp_bh->bio = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
mp_bh->next_mp = conf->freer1;
conf->freer1 = mp_bh;
static void multipath_shrink_mpbh(multipath_conf_t *conf)
{
- md_spin_lock_irq(&conf->device_lock);
+ spin_lock_irq(&conf->device_lock);
while (conf->freer1) {
struct multipath_bh *mp_bh = conf->freer1;
conf->freer1 = mp_bh->next_mp;
conf->freer1_cnt--;
kfree(mp_bh);
}
- md_spin_unlock_irq(&conf->device_lock);
+ spin_unlock_irq(&conf->device_lock);
}
-static int multipath_map (mddev_t *mddev, kdev_t *rdev)
+static int multipath_map (mddev_t *mddev, kdev_t *dev)
{
multipath_conf_t *conf = mddev_to_conf(mddev);
int i, disks = MD_SB_DISKS;
for (i = 0; i < disks; i++) {
if (conf->multipaths[i].operational) {
- *rdev = conf->multipaths[i].dev;
+ *dev = conf->multipaths[i].dev;
return (0);
}
}
mddev_t *mddev = mp_bh->mddev;
multipath_conf_t *conf = mddev_to_conf(mddev);
- md_spin_lock_irqsave(&retry_list_lock, flags);
+ spin_lock_irqsave(&retry_list_lock, flags);
if (multipath_retry_list == NULL)
multipath_retry_tail = &multipath_retry_list;
*multipath_retry_tail = mp_bh;
multipath_retry_tail = &mp_bh->next_mp;
mp_bh->next_mp = NULL;
- md_spin_unlock_irqrestore(&retry_list_lock, flags);
+ spin_unlock_irqrestore(&retry_list_lock, flags);
md_wakeup_thread(conf->thread);
}
*/
static void multipath_end_bh_io (struct multipath_bh *mp_bh, int uptodate)
{
- struct buffer_head *bh = mp_bh->master_bh;
+ struct bio *bio = mp_bh->master_bio;
- bh->b_end_io(bh, uptodate);
+ bio_endio(bio, uptodate);
+ bio_put(mp_bh->bio);
multipath_free_mpbh(mp_bh);
}
-void multipath_end_request (struct buffer_head *bh, int uptodate)
+void multipath_end_request(struct bio *bio)
{
- struct multipath_bh * mp_bh = (struct multipath_bh *)(bh->b_private);
+ int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
/*
* this branch is our 'one multipath IO has finished' event handler:
*/
if (!uptodate)
- md_error (mp_bh->mddev, bh->b_dev);
+ md_error (mp_bh->mddev, bio->bi_dev);
else
/*
* Set MPBH_Uptodate in our master buffer_head, so that
/*
* oops, IO error:
*/
- printk(KERN_ERR "multipath: %s: rescheduling block %lu\n",
- partition_name(bh->b_dev), bh->b_blocknr);
+ printk(KERN_ERR "multipath: %s: rescheduling sector %lu\n",
+ partition_name(bio->bi_dev), bio->bi_sector);
multipath_reschedule_retry(mp_bh);
return;
}
return 0;
}
-static int multipath_make_request (mddev_t *mddev, int rw,
- struct buffer_head * bh)
+static int multipath_make_request (mddev_t *mddev, int rw, struct bio * bio)
{
multipath_conf_t *conf = mddev_to_conf(mddev);
- struct buffer_head *bh_req;
+ struct bio *real_bio;
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- if (!buffer_locked(bh))
- BUG();
-
/*
* make_request() can abort the operation when READA is being
* used and no empty request is available.
mp_bh = multipath_alloc_mpbh (conf);
- mp_bh->master_bh = bh;
+ mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
mp_bh->cmd = rw;
*/
multipath = conf->multipaths + multipath_read_balance(conf);
- bh_req = &mp_bh->bh_req;
- memcpy(bh_req, bh, sizeof(*bh));
- bh_req->b_blocknr = bh->b_rsector;
- bh_req->b_dev = multipath->dev;
- /* FIXME - later we will need bdev here */
- bh_req->b_rdev = multipath->dev;
-/* bh_req->b_rsector = bh->n_rsector; */
- bh_req->b_end_io = multipath_end_request;
- bh_req->b_private = mp_bh;
- generic_make_request (rw, bh_req);
+ real_bio = bio_clone(bio, GFP_NOIO);
+ real_bio->bi_dev = multipath->dev;
+ real_bio->bi_rw = rw;
+ real_bio->bi_end_io = multipath_end_request;
+ real_bio->bi_private = mp_bh;
+ mp_bh->bio = real_bio;
+ generic_make_request(real_bio);
return 0;
}
mdp_super_t *sb = mddev->sb;
mdp_disk_t *failed_desc, *spare_desc, *added_desc;
mdk_rdev_t *spare_rdev, *failed_rdev;
+ struct block_device *bdev;
print_multipath_conf(conf);
- md_spin_lock_irq(&conf->device_lock);
+ spin_lock_irq(&conf->device_lock);
/*
* find the disk ...
*/
*d = failed_desc;
- if (kdev_none(sdisk->dev))
+ if (!sdisk->bdev)
sdisk->used_slot = 0;
/*
* this really activates the spare.
err = 1;
goto abort;
}
+ bdev = rdisk->bdev;
rdisk->dev = NODEV;
+ rdisk->bdev = NULL;
rdisk->used_slot = 0;
conf->nr_disks--;
+ bdput(bdev);
break;
case DISKOP_HOT_ADD_DISK:
adisk->number = added_desc->number;
adisk->raid_disk = added_desc->raid_disk;
adisk->dev = mk_kdev(added_desc->major,added_desc->minor);
+ /* it will be held open by rdev */
+ adisk->bdev = bdget(kdev_t_to_nr(adisk->dev));
adisk->operational = 0;
adisk->spare = 1;
break;
default:
- MD_BUG();
+ MD_BUG();
err = 1;
goto abort;
}
abort:
- md_spin_unlock_irq(&conf->device_lock);
+ spin_unlock_irq(&conf->device_lock);
print_multipath_conf(conf);
return err;
static void multipathd (void *data)
{
struct multipath_bh *mp_bh;
- struct buffer_head *bh;
+ struct bio *bio;
unsigned long flags;
mddev_t *mddev;
kdev_t dev;
-
for (;;) {
- md_spin_lock_irqsave(&retry_list_lock, flags);
+ spin_lock_irqsave(&retry_list_lock, flags);
mp_bh = multipath_retry_list;
if (!mp_bh)
break;
multipath_retry_list = mp_bh->next_mp;
- md_spin_unlock_irqrestore(&retry_list_lock, flags);
+ spin_unlock_irqrestore(&retry_list_lock, flags);
mddev = mp_bh->mddev;
if (mddev->sb_dirty) {
mddev->sb_dirty = 0;
md_update_sb(mddev);
}
- bh = &mp_bh->bh_req;
- dev = bh->b_dev;
+ bio = mp_bh->bio;
+ dev = bio->bi_dev;
- multipath_map (mddev, &bh->b_dev);
- if (kdev_same(bh->b_dev, dev)) {
- printk (IO_ERROR, partition_name(bh->b_dev), bh->b_blocknr);
+ multipath_map (mddev, &bio->bi_dev);
+ if (kdev_same(bio->bi_dev, dev)) {
+ printk(IO_ERROR,
+ partition_name(bio->bi_dev), bio->bi_sector);
multipath_end_bh_io(mp_bh, 0);
} else {
- printk (REDIRECT_SECTOR,
- partition_name(bh->b_dev), bh->b_blocknr);
- bh->b_rdev = bh->b_dev;
- bh->b_rsector = bh->b_blocknr;
- generic_make_request (mp_bh->cmd, bh);
+ printk(REDIRECT_SECTOR,
+ partition_name(bio->bi_dev), bio->bi_sector);
+ generic_make_request(bio);
}
}
- md_spin_unlock_irqrestore(&retry_list_lock, flags);
+ spin_unlock_irqrestore(&retry_list_lock, flags);
}
#undef IO_ERROR
#undef REDIRECT_SECTOR
multipath_conf_t *conf = mddev_to_conf(mddev);
int disks = MD_SB_DISKS;
kdev_t dev;
+ struct block_device *bdev;
struct buffer_head *bh = NULL;
int i, rc = 0;
char *buffer = NULL;
continue;
printk("(checking disk %d)\n",i);
dev = conf->multipaths[i].dev;
+ bdev = conf->multipaths[i].bdev;
set_blocksize(dev, 4096);
- if ((bh = bread(dev, row / 4, 4096)) == NULL)
+ if ((bh = __bread(bdev, row / 4, 4096)) == NULL)
break;
if (!buffer) {
buffer = (char *) __get_free_page(GFP_KERNEL);
break;
}
bforget(bh);
- fsync_dev(dev);
- invalidate_buffers(dev);
+ fsync_bdev(bdev);
+ invalidate_bdev(bdev, 0);
bh = NULL;
}
if (buffer)
free_page((unsigned long) buffer);
if (bh) {
- dev = bh->b_dev;
+ bdev = bh->b_bdev;
bforget(bh);
- fsync_dev(dev);
- invalidate_buffers(dev);
+ fsync_bdev(bdev);
+ invalidate_bdev(bdev, 0);
}
return rc;
}
mdp_super_t *sb = mddev->sb;
mdp_disk_t *desc, *desc2;
mdk_rdev_t *rdev, *def_rdev = NULL;
- struct md_list_head *tmp;
+ struct list_head *tmp;
int num_rdevs = 0;
MOD_INC_USE_COUNT;
disk->number = desc->number;
disk->raid_disk = desc->raid_disk;
disk->dev = rdev->dev;
+ disk->bdev = rdev->bdev;
+ atomic_inc(&rdev->bdev->bd_count);
disk->operational = 0;
disk->spare = 1;
disk->used_slot = 1;
sb->spare_disks = num_rdevs - 1;
mddev->sb_dirty = 1;
conf->mddev = mddev;
- conf->device_lock = MD_SPIN_LOCK_UNLOCKED;
+ conf->device_lock = SPIN_LOCK_UNLOCKED;
init_waitqueue_head(&conf->wait_buffer);
out_free_conf:
multipath_shrink_mpbh(conf);
+ for (i = 0; i < MD_SB_DISKS; i++)
+ if (conf->multipaths[i].bdev)
+ bdput(conf->multipaths[i].bdev);
kfree(conf);
mddev->private = NULL;
out:
static int multipath_stop (mddev_t *mddev)
{
multipath_conf_t *conf = mddev_to_conf(mddev);
+ int i;
md_unregister_thread(conf->thread);
multipath_shrink_mpbh(conf);
+ for (i = 0; i < MD_SB_DISKS; i++)
+ if (conf->multipaths[i].bdev)
+ bdput(conf->multipaths[i].bdev);
kfree(conf);
mddev->private = NULL;
MOD_DEC_USE_COUNT;
diskop: multipath_diskop,
};
-static int md__init multipath_init (void)
+static int __init multipath_init (void)
{
return register_md_personality (MULTIPATH, &multipath_personality);
}
-static void multipath_exit (void)
+static void __exit multipath_exit (void)
{
unregister_md_personality (MULTIPATH);
}