return -EIO;
}
- bdev = fs->devs[dev].sb->s_bdev;
+ bdev = fs->devs[dev].bdev;
return lafs_sync_page_io(bdev, sect, 0,
blocks << fs->blocksize_bits,
p, 0) ? 0 : -EIO;
if (ac->state == 4)
return -EIO;
- bdev = fs->devs[dev].sb->s_bdev;
+ bdev = fs->devs[dev].bdev;
ac->state = 2; /* loading */
ac->fs = fs;
async_page_io(bdev, sect, 0,
struct bio *bio = bio_alloc(GFP_NOIO, 1);
int rw = WRITE | (1 << BIO_RW_UNPLUG);
- bio->bi_bdev = fs->devs[dev].sb->s_bdev;
+ bio->bi_bdev = fs->devs[dev].bdev;
bio->bi_sector = addr;
bio_add_page(bio, virt_to_page(buf), size, offset_in_page(buf));
bio->bi_private = fs;
return -EIO;
}
- bdev = fs->devs[dev].sb->s_bdev;
+ bdev = fs->devs[dev].bdev;
if (!bio) {
bio = bio_alloc(GFP_NOIO, 1);
bio = wc->bio;
if (bio && virt == wc->bio_virt &&
- bio->bi_bdev == fs->devs[dev].sb->s_bdev &&
+ bio->bi_bdev == fs->devs[dev].bdev &&
which == wc->bio_which &&
bio_add_page(bio, p, fs->blocksize, offset) > 0) {
/* Added the current bio - too easy */
wc->bio_virt = virt + 1;
wc->bio_head = head;
wc->bio_which = which;
- bio->bi_bdev = fs->devs[dev].sb->s_bdev;
+ bio->bi_bdev = fs->devs[dev].bdev;
bio->bi_sector = sect;
bio_add_page(bio, p, fs->blocksize, offset);
* dev= - add another device
* new= - the device is being added.
*
- * We have a separate 'struct super_block' for each device, but they
- * share the s_fs_info, which lists them all.
*/
struct options {
const char *dev;
int is_new;
int is_name;
- struct super_block *sb;
+ struct block_device *bdev;
struct lafs_dev *devblock;
struct lafs_state *stateblock;
int devchoice, statechoice;
}
static int
-lafs_fill_super(struct super_block *sb, void *opv, int silent)
+lafs_load_super(struct block_device *bdev, void *opv, int silent)
{
/* Find the devblock and the stateblock for this device
BUG_ON(dv->devblock);
BUG_ON(dv->stateblock);
- n = queue_logical_block_size(sb->s_bdev->bd_disk->queue);
+ n = queue_logical_block_size(bdev->bd_disk->queue);
if (n < LAFS_DEVBLK_SIZE)
n = LAFS_DEVBLK_SIZE;
BUG_ON(n > PAGE_SIZE);
sect = 0;
for (i = 0; i < 4; i++) {
/* try to read block at 'sect' */
- int ok = lafs_sync_page_io(sb->s_bdev, sect, 0, n, pg, READ);
+ int ok = lafs_sync_page_io(bdev, sect, 0, n, pg, READ);
if (ok && valid_devblock(page_address(pg), sect)) {
if (!have_dev) {
if (i != 1)
sect += (n>>9);
else {
- sect = sb->s_bdev->bd_inode->i_size & ~(sector_t)(n-1);
+ sect = bdev->bd_inode->i_size & ~(sector_t)(n-1);
sect >>= 9;
sect -= (n>>9)*2;
}
*/
n = le32_to_cpu(1<<dv->devblock->statebits);
if ((n & (n-1)) ||
- n < queue_logical_block_size(sb->s_bdev->bd_disk->queue) ||
+ n < queue_logical_block_size(bdev->bd_disk->queue) ||
n > 128*1024) {
printk(KERN_ERR "LaFS: statesize of %u not acceptable.\n", n);
err = -EINVAL;
for (i = 0; i < 4; i++) {
int ok;
sect = le64_to_cpu(dv->devblock->stateaddr[i])>>9;
- ok = lafs_sync_page_io(sb->s_bdev, sect, 0, n, pg, READ);
+ ok = lafs_sync_page_io(bdev, sect, 0, n, pg, READ);
if (ok && valid_stateblock(page_address(pg), dv->devblock)) {
if (!have_state) {
have_state = 1;
}
}
- /* We allow 29 bits for nanosecs, so they must be even. */
- sb->s_time_gran = 2;
-
if (have_state) {
err = 0;
dv->devchoice = dev_addr;
return newstate;
}
-static struct fs *
-lafs_load(struct options *op, int newest)
+static int
+lafs_load(struct fs *fs, struct options *op, int newest)
{
/* We seem to have a full set of devices for the filesystem.
* Time to create our fs_info structure and fill it out.
* This only includes information from the dev and state blocks.
* Finding the root-inode comes a bit later.
*/
- struct fs *fs;
struct lafs_state *st;
int i;
int err;
- fs = kzalloc(sizeof(*fs), GFP_KERNEL);
- if (!fs)
- return fs;
st = fs->state = op->devlist[newest].stateblock;
op->devlist[newest].stateblock = NULL;
#ifdef DUMP
fs->prime_sb->s_export_op = &lafs_export_ops;
fs->prime_sb->s_root = NULL;
+ /* We allow 29 bits for nanosecs, so they must be even. */
+ fs->prime_sb->s_time_gran = 2;
+
for (i = 0; i < fs->devices; i++) {
struct fs_dev *dv = &fs->devs[i];
struct devent *de = &op->devlist[i];
int j;
- dv->sb = de->sb;
- de->sb = NULL;
- dv->sb->s_fs_info = fs;
- dv->sb->s_blocksize = 1 << op->blockbits;
- dv->sb->s_blocksize_bits = op->blockbits;
- up_write(&dv->sb->s_umount);
+ dv->bdev = de->bdev;
+ de->bdev = NULL;
dv->devblk = de->devblock;
de->devblock = NULL;
dv->devaddr[j] = le64_to_cpu(dv->devblk->devaddr[j]);
for (j = 0; j < 4; j++)
dv->stateaddr[j] = le64_to_cpu(dv->devblk->stateaddr[j]);
-
- dv->sb->s_op = &lafs_sops;
- dv->sb->s_export_op = &lafs_export_ops;
- dv->sb->s_root = NULL;
}
- return fs;
+ return 0;
abort:
kfree(fs->scan.free_usages);
kfree(fs->devs);
kfree(fs->ss);
kfree(fs);
- return NULL;
+ return -ENOMEM;
}
static int show_orphans(struct fs *fs)
for (i = 0; i < fs->devices; i++) {
struct fs_dev *dv = &fs->devs[i];
kfree(dv->devblk);
- if (dv->sb)
- kill_block_super(dv->sb);
- /* FIXME should I kfree dv->sb or something here?
- * maybe used put_super(dv->sb)*/
- dv->sb = NULL;
+ close_bdev_exclusive(dv->bdev, FMODE_READ|FMODE_WRITE);
}
/* Final checkpoint will have cleared out the leafs lists,
}
static int
-get_lafs_sb_dev(struct options *op, int flags)
+lafs_get_devs(struct fs *fs, struct options *op, int flags)
{
int err;
int i;
- struct vfsmount mnt;
for (i = 0; i < op->devcnt; i++) {
+ struct block_device *bdev;
op->curr_dev = i;
- err = get_sb_bdev(&lafs_fs_type, flags,
- op->devlist[i].dev, op,
- lafs_fill_super, &mnt);
- if (err < 0)
+
+ bdev = open_bdev_exclusive(op->devlist[i].dev,
+ FMODE_READ|FMODE_WRITE, fs);
+ err = PTR_ERR(bdev);
+ if (IS_ERR(bdev))
goto out;
- dput(mnt.mnt_root);
- if (i &&
- op->devlist[i-1].sb->s_fs_info !=
- mnt.mnt_sb->s_fs_info) {
- deactivate_super(mnt.mnt_sb);
- err = -EBUSY;
+ err = lafs_load_super(bdev, op, flags & MS_SILENT ? 1 : 0);
+ if (err < 0)
goto out;
- }
- op->devlist[i].sb = mnt.mnt_sb;
+ op->devlist[i].bdev = bdev;
}
return 0;
* If the later, we return the primary.
* If the former, we init the filesystem copying static data
* to all supers.
- * First we 'open_bdev_excl' each device, exclusive to lafs
+ * First we 'open_bdev_exclusive' each device, exclusive to lafs
* Then we 'sget' a superblock that knows any/all the devices.
* This may be pre-existing, or may be new
* If new, it will be created knowing all devices.
struct options op;
int err;
int newest;
- struct fs *fs = NULL;
+ struct fs *fs = kzalloc(sizeof(*fs), GFP_KERNEL);
char *cdata = data;
if (cdata == NULL)
cdata = "";
+ err = -ENOMEM;
+ if (!fs)
+ goto out;
err = parse_opts(&op, dev_name, cdata);
if (err)
goto out;
- /* We now have as list of device names. We call get_sb_bdev
- * on each to collect some superblocks. These must all have
- * the same s_fs_info. If non-null, we simply return the
- * primary super
+ /* We now have as list of device names. We call open_bdev_exclusive
+ * on each to collect some superblocks.
*/
- err = get_lafs_sb_dev(&op, flags);
+ err = lafs_get_devs(fs, &op, flags);
if (err)
goto out;
- fs = op.devlist[0].sb->s_fs_info;
- if (fs) {
- /* Maybe check read-only status FIXME */
- op.devlist[0].sb = NULL;
- /* FIXME do I protect any others? */
- goto out;
- }
/* Each device has a valid dev and state block. Hopefully they
* are all for the same filesystem. If they don't have the
/* So they seem to be the same - better create our
* s_fs_info structure and fill it in
*/
- err = -ENOMEM;
- fs = lafs_load(&op, newest);
- if (!fs)
+ err = lafs_load(fs, &op, newest);
+ if (err)
goto out;
/* Well, all the devices check out. Now we need to find the
for (i = 0; i < op.devcnt; i++) {
kfree(op.devlist[i].devblock);
kfree(op.devlist[i].stateblock);
- if (op.devlist[i].sb) {
- up_write(&op.devlist[i].sb->s_umount);
- kill_block_super(op.devlist[i].sb);
- }
+ if (op.devlist[i].bdev)
+ close_bdev_exclusive(op.devlist[i].bdev,
+ FMODE_READ|FMODE_WRITE);
}
kfree(op.devlist);
}