From: NeilBrown Date: Fri, 4 Mar 2011 01:47:26 +0000 (+1100) Subject: roll: handle cluster headers larger than one page. X-Git-Url: http://git.neil.brown.name/?a=commitdiff_plain;h=64ad960d5ccf4bb38d310c74f463e3e356921311;p=LaFS.git roll: handle cluster headers larger than one page. If roll-forward finds cluster headers larger than one page, it now tries to allocate that much space and if the allocation succeeds the roll-forward will not succeed, rather than always failing. Signed-off-by: NeilBrown --- diff --git a/io.c b/io.c index d4380f7..d59238b 100644 --- a/io.c +++ b/io.c @@ -91,6 +91,28 @@ lafs_load_page(struct fs *fs, struct page *p, u64 vaddr, int blocks) p, 0) ? 0 : -EIO; } +int +lafs_load_pages(struct fs *fs, struct page *p, u64 vaddr, int blocks) +{ + /* load 1 or more pages which are consecutive in memory + * from 'p' + * FIXME make this async - then wait. + */ + int blocks_per_page = (PAGE_SIZE >> fs->blocksize_bits); + int rv = 0; + + while(blocks && rv == 0) { + int b = blocks; + if (b > blocks_per_page) + b = blocks_per_page; + rv = lafs_load_page(fs, p, vaddr, b); + blocks -= b; + vaddr += blocks_per_page; + p++; + } + return rv; +} + static void bi_async_complete(struct bio *bio, int error) { diff --git a/lafs.h b/lafs.h index 7af975f..f7f0b7c 100644 --- a/lafs.h +++ b/lafs.h @@ -97,6 +97,7 @@ typedef u64 vaddr_t; int lafs_sync_page_io(struct block_device *bdev, sector_t sector, int offset, int size, struct page *page, int rw); int lafs_load_page(struct fs *fs, struct page *p, u64 vaddr, int blocks); +int lafs_load_pages(struct fs *fs, struct page *p, u64 vaddr, int blocks); int lafs_load_page_async(struct fs *fs, struct page *p, u64 vaddr, int blocks, struct async_complete *ac); int __must_check lafs_load_block(struct block *b, struct bio *bio); diff --git a/roll.c b/roll.c index 2c561db..e558552 100644 --- a/roll.c +++ b/roll.c @@ -518,7 +518,7 @@ roll_one(struct fs *fs, u64 *addrp, struct page *p, struct page *pg, int header_blocks; /* we "know" buf is big enough */ - err = lafs_load_page(fs, p, addr, max/blocksize); + err = lafs_load_pages(fs, p, addr, max/blocksize); if (err) return err; @@ -628,6 +628,7 @@ static int roll_forward(struct fs *fs) int dev; u32 seg; u32 offset; + int order = 0; fs->phase = 1; fs->qphase = 0; @@ -635,7 +636,7 @@ static int roll_forward(struct fs *fs) clear_bit(DelayYouth, &fs->fsstate); first = fs->checkpointcluster; - p = alloc_page(GFP_KERNEL); + p = alloc_pages(GFP_KERNEL, order); if (!p) return -ENOMEM; @@ -643,22 +644,27 @@ static int roll_forward(struct fs *fs) max = ((max + blocksize - 1) / blocksize) * blocksize; - if (!err && max > PAGE_SIZE) - err = -EFBIG; + if (!err && max > PAGE_SIZE) { + __free_pages(p, order); + order = get_order(max * blocksize); + p = alloc_pages(order, GFP_KERNEL); + if (!p) + err = -EFBIG; + } if (err) { - put_page(p); + __free_pages(p, order); return err; } pg = alloc_page(GFP_KERNEL); if (!pg) { - put_page(p); + __free_pages(p, order); return -ENOMEM; } err = lafs_cluster_init(fs, 0, next, last, seq); if (err) { - put_page(p); put_page(pg); + __free_pages(p, order); put_page(pg); return err; } lafs_cluster_init(fs, 1, 0, 0, 0); @@ -692,7 +698,7 @@ static int roll_forward(struct fs *fs) lafs_update_youth(fs, dev, seg); } } - put_page(p); + __free_pages(p, order); put_page(pg); lafs_add_active(fs, next);