BufferHeader->bi_end_io(BufferHeader);
}
+static inline int DAC960_PartitionByCommand(DAC960_Command_T *Command)
+{
+ return DAC960_PartitionNumber(to_kdev_t(Command->BufferHeader->bi_bdev->bd_dev));
+}
/*
DAC960_V1_ReadWriteError prints an appropriate error message for Command
Controller, Controller->ControllerNumber,
Command->LogicalDriveNumber, Command->BlockNumber,
Command->BlockNumber + Command->BlockCount - 1);
- if (DAC960_PartitionNumber(Command->BufferHeader->bi_dev) > 0)
+ if (DAC960_PartitionByCommand(Command) > 0)
DAC960_Error(" /dev/rd/c%dd%dp%d: relative blocks %u..%u\n",
Controller, Controller->ControllerNumber,
Command->LogicalDriveNumber,
- DAC960_PartitionNumber(Command->BufferHeader->bi_dev),
+ DAC960_PartitionByCommand(Command),
Command->BufferHeader->bi_sector,
Command->BufferHeader->bi_sector + Command->BlockCount - 1);
}
Controller, Controller->ControllerNumber,
Command->LogicalDriveNumber, Command->BlockNumber,
Command->BlockNumber + Command->BlockCount - 1);
- if (DAC960_PartitionNumber(Command->BufferHeader->bi_dev) > 0)
+ if (DAC960_PartitionByCommand(Command) > 0)
DAC960_Error(" /dev/rd/c%dd%dp%d: relative blocks %u..%u\n",
Controller, Controller->ControllerNumber,
Command->LogicalDriveNumber,
- DAC960_PartitionNumber(Command->BufferHeader->bi_dev),
+ DAC960_PartitionByCommand(Command),
Command->BufferHeader->bi_sector,
Command->BufferHeader->bi_sector + Command->BlockCount - 1);
}
/*
* same device and no special stuff set, merge is ok
*/
- if (kdev_same(rq->rq_dev, bio->bi_dev) && !rq->waiting && !rq->special)
+ if (kdev_same(rq->rq_dev, to_kdev_t(bio->bi_bdev->bd_dev)) &&
+ !rq->waiting && !rq->special)
return 1;
return 0;
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
- req->rq_dev = bio->bi_dev;
+ req->rq_dev = to_kdev_t(bio->bi_bdev->bd_dev);
add_request(q, req, insert_here);
out:
if (freereq)
*/
static inline void blk_partition_remap(struct bio *bio)
{
- int major, minor, drive, minor0;
+ struct block_device *bdev = bio->bi_bdev;
struct gendisk *g;
- kdev_t dev0;
-
- major = major(bio->bi_dev);
- if ((g = get_gendisk(bio->bi_dev))) {
- minor = minor(bio->bi_dev);
- drive = (minor >> g->minor_shift);
- minor0 = (drive << g->minor_shift); /* whole disk device */
- /* that is, minor0 = (minor & ~((1<<g->minor_shift)-1)); */
- dev0 = mk_kdev(major, minor0);
- if (!kdev_same(dev0, bio->bi_dev)) {
- bio->bi_dev = dev0;
- bio->bi_sector += g->part[minor].start_sect;
- }
- /* lots of checks are possible */
- }
+
+ if (bdev == bdev->bd_contains)
+ return;
+
+ g = get_gendisk(to_kdev_t(bdev->bd_dev));
+ if (!g)
+ BUG();
+
+ bio->bi_sector += g->part[minor(to_kdev_t((bdev->bd_dev)))].start_sect;
+ bio->bi_bdev = bdev->bd_contains;
+ /* lots of checks are possible */
}
/**
int ret, nr_sectors = bio_sectors(bio);
/* Test device or partition size, when known. */
- maxsector = (blkdev_size_in_bytes(bio->bi_dev) >> 9);
+ maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
if (maxsector) {
sector_t sector = bio->bi_sector;
printk(KERN_INFO
"attempt to access beyond end of device\n");
printk(KERN_INFO "%s: rw=%ld, want=%ld, limit=%Lu\n",
- kdevname(bio->bi_dev), bio->bi_rw,
+ kdevname(to_kdev_t(bio->bi_bdev->bd_dev)),
+ bio->bi_rw,
sector + nr_sectors,
(long long) maxsector);
* Stacking drivers are expected to know what they are doing.
*/
do {
- q = blk_get_queue(bio->bi_dev);
+ q = blk_get_queue(to_kdev_t(bio->bi_bdev->bd_dev));
if (!q) {
printk(KERN_ERR
"generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n",
- kdevname(bio->bi_dev), (long long) bio->bi_sector);
+ kdevname(to_kdev_t(bio->bi_bdev->bd_dev)),
+ (long long) bio->bi_sector);
end_io:
bio->bi_end_io(bio);
break;
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio->bi_dev = bh->b_dev;
+ bio->bi_bdev = bh->b_bdev;
bio->bi_io_vec[0].bv_page = bh->b_page;
bio->bi_io_vec[0].bv_len = bh->b_size;
bio->bi_io_vec[0].bv_offset = bh_offset(bh);
static void loop_end_io_transfer(struct bio *bio)
{
struct bio *rbh = bio->bi_private;
- struct loop_device *lo = &loop_dev[minor(rbh->bi_dev)];
+ struct loop_device *lo = &loop_dev[minor(to_kdev_t(rbh->bi_bdev->bd_dev))];
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
if (!uptodate || bio_rw(bio) == WRITE) {
bio->bi_sector = rbh->bi_sector + (lo->lo_offset >> 9);
bio->bi_rw = rbh->bi_rw;
spin_lock_irq(&lo->lo_lock);
- bio->bi_dev = to_kdev_t(lo->lo_device->bd_dev);
+ bio->bi_bdev = lo->lo_device;
spin_unlock_irq(&lo->lo_lock);
return bio;
struct loop_device *lo;
unsigned long IV;
int rw = bio_rw(rbh);
+ int unit = minor(to_kdev_t(rbh->bi_bdev->bd_dev));
- if (minor(rbh->bi_dev) >= max_loop)
+ if (unit >= max_loop)
goto out;
- lo = &loop_dev[minor(rbh->bi_dev)];
+ lo = &loop_dev[unit];
spin_lock_irq(&lo->lo_lock);
if (lo->lo_state != Lo_bound)
goto inactive;
unsigned long offset, len;
int rw = sbh->bi_rw;
- minor = minor(sbh->bi_dev);
+ minor = minor(to_kdev_t(sbh->bi_bdev->bd_dev));
if (minor >= NUM_RAMDISKS)
goto fail;
bio_io_error(bio);
return 0;
}
- bio->bi_dev = tmp_dev->dev;
+ bio->bi_bdev = tmp_dev->bdev;
bio->bi_sector = bio->bi_sector - (tmp_dev->offset << 1);
return 1;
+#error Broken until maintainers will sanitize kdev_t handling
/*
* kernel/lvm.c
*
static int md_make_request (request_queue_t *q, struct bio *bio)
{
- mddev_t *mddev = kdev_to_mddev(bio->bi_dev);
+ mddev_t *mddev = kdev_to_mddev(to_kdev_t(bio->bi_bdev->bd_dev));
if (mddev && mddev->pers)
return mddev->pers->make_request(mddev, bio_rw(bio), bio);
}
-static int multipath_map (mddev_t *mddev, kdev_t *dev)
+static int multipath_map (mddev_t *mddev, struct block_device **bdev)
{
multipath_conf_t *conf = mddev_to_conf(mddev);
int i, disks = MD_SB_DISKS;
for (i = 0; i < disks; i++) {
if (conf->multipaths[i].operational) {
- *dev = conf->multipaths[i].dev;
+ *bdev = conf->multipaths[i].bdev;
return (0);
}
}
* this branch is our 'one multipath IO has finished' event handler:
*/
if (!uptodate)
- md_error (mp_bh->mddev, bio->bi_dev);
+ md_error (mp_bh->mddev, to_kdev_t(bio->bi_bdev->bd_dev));
else
/*
* Set MPBH_Uptodate in our master buffer_head, so that
* oops, IO error:
*/
printk(KERN_ERR "multipath: %s: rescheduling sector %lu\n",
- partition_name(bio->bi_dev), bio->bi_sector);
+ bdev_partition_name(bio->bi_bdev), bio->bi_sector);
multipath_reschedule_retry(mp_bh);
return;
}
multipath = conf->multipaths + multipath_read_balance(conf);
real_bio = bio_clone(bio, GFP_NOIO);
- real_bio->bi_dev = multipath->dev;
+ real_bio->bi_bdev = multipath->bdev;
real_bio->bi_rw = rw;
real_bio->bi_end_io = multipath_end_request;
real_bio->bi_private = mp_bh;
struct bio *bio;
unsigned long flags;
mddev_t *mddev;
- kdev_t dev;
+ struct block_device *bdev;
for (;;) {
spin_lock_irqsave(&retry_list_lock, flags);
md_update_sb(mddev);
}
bio = mp_bh->bio;
- dev = bio->bi_dev;
+ bdev = bio->bi_bdev;
- multipath_map (mddev, &bio->bi_dev);
- if (kdev_same(bio->bi_dev, dev)) {
+ multipath_map (mddev, &bio->bi_bdev);
+ if (bio->bi_bdev == bdev) {
printk(IO_ERROR,
- partition_name(bio->bi_dev), bio->bi_sector);
+ bdev_partition_name(bio->bi_bdev), bio->bi_sector);
multipath_end_bh_io(mp_bh, 0);
} else {
printk(REDIRECT_SECTOR,
- partition_name(bio->bi_dev), bio->bi_sector);
+ bdev_partition_name(bio->bi_bdev), bio->bi_sector);
generic_make_request(bio);
}
}
* The new BH_Lock semantics in ll_rw_blk.c guarantee that this
* is the only IO operation happening on this bh.
*/
- bio->bi_dev = tmp_dev->dev;
+ bio->bi_bdev = tmp_dev->bdev;
bio->bi_sector = rsect;
/*
mempool_free(r1_bio, conf->r1buf_pool);
}
-static int map(mddev_t *mddev, kdev_t *rdev)
+static int map(mddev_t *mddev, struct block_device **bdev)
{
conf_t *conf = mddev_to_conf(mddev);
int i, disks = MD_SB_DISKS;
for (i = 0; i < disks; i++) {
if (conf->mirrors[i].operational) {
- *rdev = conf->mirrors[i].dev;
+ *bdev = conf->mirrors[i].bdev;
return 0;
}
}
* this branch is our 'one mirror IO has finished' event handler:
*/
if (!uptodate)
- md_error(r1_bio->mddev, bio->bi_dev);
+ md_error(r1_bio->mddev, to_kdev_t(bio->bi_bdev->bd_dev));
else
/*
* Set R1BIO_Uptodate in our master bio, so that
* oops, read error:
*/
printk(KERN_ERR "raid1: %s: rescheduling sector %lu\n",
- partition_name(bio->bi_dev), r1_bio->sector);
+ bdev_partition_name(bio->bi_bdev), r1_bio->sector);
reschedule_retry(r1_bio);
return;
}
r1_bio->read_bio = read_bio;
read_bio->bi_sector = r1_bio->sector;
- read_bio->bi_dev = mirror->dev;
+ read_bio->bi_bdev = mirror->bdev;
read_bio->bi_end_io = end_request;
read_bio->bi_rw = rw;
read_bio->bi_private = r1_bio;
r1_bio->write_bios[i] = mbio;
mbio->bi_sector = r1_bio->sector;
- mbio->bi_dev = conf->mirrors[i].dev;
+ mbio->bi_bdev = conf->mirrors[i].bdev;
mbio->bi_end_io = end_request;
mbio->bi_rw = rw;
mbio->bi_private = r1_bio;
* We don't do much here, just schedule handling by raid1d
*/
if (!uptodate)
- md_error (r1_bio->mddev, bio->bi_dev);
+ md_error (r1_bio->mddev, to_kdev_t(bio->bi_bdev->bd_dev));
else
set_bit(R1BIO_Uptodate, &r1_bio->state);
reschedule_retry(r1_bio);
int i;
if (!uptodate)
- md_error(mddev, bio->bi_dev);
+ md_error(mddev, to_kdev_t(bio->bi_bdev->bd_dev));
for (i = 0; i < MD_SB_DISKS; i++)
if (r1_bio->write_bios[i] == bio) {
* There is no point trying a read-for-reconstruct as
* reconstruct is about to be aborted
*/
- printk(IO_ERROR, partition_name(bio->bi_dev), r1_bio->sector);
+ printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), r1_bio->sector);
md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0);
resume_device(conf);
put_buf(r1_bio);
if (r1_bio->write_bios[i])
BUG();
r1_bio->write_bios[i] = mbio;
- mbio->bi_dev = conf->mirrors[i].dev;
+ mbio->bi_bdev = conf->mirrors[i].bdev;
mbio->bi_sector = r1_bio->sector;
mbio->bi_end_io = end_sync_write;
mbio->bi_rw = WRITE;
* Nowhere to write this to... I guess we
* must be done
*/
- printk(IO_ERROR, partition_name(bio->bi_dev), r1_bio->sector);
+ printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), r1_bio->sector);
md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0);
resume_device(conf);
put_buf(r1_bio);
if (!mbio)
continue;
- md_sync_acct(mbio->bi_dev, mbio->bi_size >> 9);
+ md_sync_acct(to_kdev_t(mbio->bi_bdev->bd_dev), mbio->bi_size >> 9);
generic_make_request(mbio);
atomic_inc(&conf->mirrors[i].nr_pending);
}
unsigned long flags;
mddev_t *mddev;
conf_t *conf;
- kdev_t dev;
+ struct block_device *bdev;
for (;;) {
break;
case READ:
case READA:
- dev = bio->bi_dev;
- map(mddev, &bio->bi_dev);
- if (kdev_same(bio->bi_dev, dev)) {
- printk(IO_ERROR, partition_name(bio->bi_dev), r1_bio->sector);
+ bdev = bio->bi_bdev;
+ map(mddev, &bio->bi_bdev);
+ if (bio->bi_bdev == bdev) {
+ printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), r1_bio->sector);
raid_end_bio_io(r1_bio, 0);
break;
}
printk(REDIRECT_SECTOR,
- partition_name(bio->bi_dev), r1_bio->sector);
+ bdev_partition_name(bio->bi_bdev), r1_bio->sector);
bio->bi_sector = r1_bio->sector;
bio->bi_rw = r1_bio->cmd;
read_bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
read_bio->bi_sector = sector_nr;
- read_bio->bi_dev = mirror->dev;
+ read_bio->bi_bdev = mirror->bdev;
read_bio->bi_end_io = end_sync_read;
read_bio->bi_rw = READ;
read_bio->bi_private = r1_bio;
BUG();
r1_bio->read_bio = read_bio;
- md_sync_acct(read_bio->bi_dev, nr_sectors);
+ md_sync_acct(to_kdev_t(read_bio->bi_bdev->bd_dev), nr_sectors);
generic_make_request(read_bio);
atomic_inc(&conf->mirrors[conf->last_used].nr_pending);
bio->bi_io_vec = bio_src->bi_io_vec;
bio->bi_sector = bio_src->bi_sector;
- bio->bi_dev = bio_src->bi_dev;
+ bio->bi_bdev = bio_src->bi_bdev;
bio->bi_flags |= 1 << BIO_CLONED;
bio->bi_rw = bio_src->bi_rw;
}
b->bi_sector = bio->bi_sector;
- b->bi_dev = bio->bi_dev;
+ b->bi_bdev = bio->bi_bdev;
b->bi_rw = bio->bi_rw;
b->bi_vcnt = bio->bi_vcnt;
}
bio->bi_sector = sector;
- bio->bi_dev = dev;
+ bio->bi_bdev = bdev;
bio->bi_idx = 0;
bio->bi_end_io = bio_end_io_kio;
bio->bi_private = kio;
bio = bio_alloc(GFP_NOFS, 1);
bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
- bio->bi_dev = to_kdev_t(log->bdev->bd_dev);
+ bio->bi_bdev = log->bdev;
bio->bi_io_vec[0].bv_page = virt_to_page(bp->l_ldata);
bio->bi_io_vec[0].bv_len = LOGPSIZE;
bio->bi_io_vec[0].bv_offset = 0;
bio = bio_alloc(GFP_NOFS, 1);
bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
- bio->bi_dev = to_kdev_t(log->bdev->bd_dev);
+ bio->bi_bdev = log->bdev;
bio->bi_io_vec[0].bv_page = virt_to_page(bp->l_ldata);
bio->bi_io_vec[0].bv_len = LOGPSIZE;
bio->bi_io_vec[0].bv_offset = 0;
struct bio {
sector_t bi_sector;
struct bio *bi_next; /* request queue link */
- kdev_t bi_dev; /* will be block device */
+ struct block_device *bi_bdev;
unsigned long bi_flags; /* status, command, etc */
unsigned long bi_rw; /* bottom bits READ/WRITE,
* top bits priority
extern void add_mddev_mapping (mddev_t *mddev, kdev_t dev, void *data);
extern void del_mddev_mapping (mddev_t *mddev, kdev_t dev);
extern char * partition_name (kdev_t dev);
+extern inline char * bdev_partition_name (struct block_device *bdev)
+{
+ return partition_name(to_kdev_t(bdev->bd_dev));
+}
extern int register_md_personality (int p_num, mdk_personality_t *p);
extern int unregister_md_personality (int p_num);
extern mdk_thread_t * md_register_thread (void (*run) (void *data),
}
}
- bio->bi_dev = (*bio_orig)->bi_dev;
+ bio->bi_bdev = (*bio_orig)->bi_bdev;
bio->bi_sector = (*bio_orig)->bi_sector;
bio->bi_rw = (*bio_orig)->bi_rw;