From 957390c78aea4845d8dafcbd53121e7450e7eb08 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 25 Jun 2010 21:01:58 +1000 Subject: [PATCH] Complete TODO list Also remove some white-space badness. Signed-off-by: NeilBrown --- README | 105 +++++++++++++++++++++++++++++++++++++++++++++++++-- block.c | 7 +--- checkpoint.c | 13 ++----- clean.c | 11 +++--- cluster.c | 51 ++++++++++++++----------- dir-avl.c | 9 ++--- dir.c | 64 +++++++++++++++---------------- file.c | 7 ++-- index.c | 50 ++++++++++++------------ inode.c | 58 ++++++++++++++-------------- io.c | 4 +- lafs.h | 5 +++ modify.c | 47 +++++++++++------------ orphan.c | 25 ++++++------ roll.c | 16 ++++---- segments.c | 40 ++++++++++---------- snapshot.c | 6 +-- state.h | 1 - summary.c | 1 - super.c | 52 ++++++++++++------------- 20 files changed, 329 insertions(+), 243 deletions(-) diff --git a/README b/README index 50ab3d0..5b06a17 100644 --- a/README +++ b/README @@ -4785,12 +4785,12 @@ Maybe I want two separate 'free_blocks' counters. DONE 0/ start TODO list DONE 1/ document new bugs DONE 2/ Tidy up all recent changes as individual commits. - 3/ clean up the various 'scratch' patches discarding any tracing that +DONE 3/ clean up the various 'scratch' patches discarding any tracing that I don't think I need, and making the rest 'dprintk' etc. - 4/ check in this README file - 5/ Write rest of the TODO list +DONE 4/ check in this README file +DONE 5/ Write rest of the TODO list - 6/ soft locking in unlink call. + 6/ soft lockup in unlink call. EIP is at lafs_hash_name+0xa5/0x10f [lafs] [] hash_piece+0x18/0x65 [lafs] [] lafs_dir_del_ent+0x4e/0x404 [lafs] @@ -4856,3 +4856,100 @@ Why have I no credits? 14/ Review writepage and flush and make sure we flush often enough but not too often. + +15/ The inode map file lost some credits. I think it losts a PinPending because + it isn't locked properly. Don't clear PinPending if someone else might + have set it. + +15a/ Find all FIXMEs and add them here. + +## Items from 6 jul 2007. + +16/ Update locking.doc + +17/ cluster_flush calls lafs_cluster_allocate calls lafs_add_block_address + calls lafs_iolock_written. How do we know that won't block on cluster_flush? + +18/ See if per-fs shrinker is available yet and consider it for index blocks. + +19/ Review WritePhase and make sure it is used properly. + +20/ Review places where we update blocks and be sure they are not in writeout + or in a different phase. + +21/ Review and document all lru uses (locking.doc) and make sure they are + all locked properly. + +22/ Check possible failures: + - thread allocation + - memory allocation + - reading critical metadata + ... + +23/ Rebase on 2.6.latest + +24/ load/dirty block0 before dirtying any othe rblock in depth=0 file, + else we might lose block0 + +25/ use kmem_cache for + datablock + indexblock + skippoint (mempool?) + others? + +26/ Review seg addressing code for 2-D geometries. + +27/ Allow ranges of holes in pending_addr so partial truncate can be more efficient. + +28/ Make sure youth blocks are always referenced properly. + +29/ Make sure new segments are referenced properly. I think there might be + some double referencing. + +30/ Decide when to use VerifyNULL or VerifyNext2 + +31/ Implement non-logged files + +32/ Store access time in non-logged file + +33/ Support quota : group / user / tree + +34/ handle subordinate filesystems: + ss[]->rootdir needs to be array or list + lafs_iget_fs needs to understand this + +35/ review snapshots: + - peer lists and cleaning + - how to create + - failure modes + - how to destroy + +36/ review roll-forward + make sure files with nlink == 0 are handled well + sanity check before trusting clusters + +37/ Configure index block hash_table at run time base on mem size?? + +38/ striped layout + review everything needed for safe RAID5 + +39/ How to handle all different IO errors + +40/ Guard against data corruption at every level. + +41/ Add checksums on index blocks and dir blocks and Inodes and ??? + +42/ Store duplicates of some blocks. At least index and inode. + +43/ Handle writepage on mem-mapped page, adding new credits or unmapping. + Make sure ->page_mkwrite sets up credits properly + +44/ Examine created filesystem and make sure everything looks good. + +45/ mkfs.lafs + +46/ fsck.lafs + +47/ Write good documentation + +48/ Review all code, improve all comments, remove all bugs. diff --git a/block.c b/block.c index 19509cd..7ea8f3a 100644 --- a/block.c +++ b/block.c @@ -112,7 +112,6 @@ lafs_get_block(struct inode *ino, unsigned long index, struct page *p, } if (!PagePrivate(p)) { - unsigned long ind = p->index << bits; int i; /* New page, need to set up attribute blocks */ @@ -505,11 +504,10 @@ lafs_dirty_dblock(struct datablock *b) if (!test_and_set_bit(B_UnincCredit, &b->b.flags)) if (!test_and_clear_bit(B_ICredit, &b->b.flags)) if (!test_and_clear_bit(B_NICredit, &b->b.flags)) - LAFS_BUG(1, &b->b); // ICredit should be set before we dirty - // a block. + LAFS_BUG(1, &b->b); /* ICredit should be set before we dirty + * a block. */ // FIXME Do I need to do something with PinPending?? - } void @@ -608,7 +606,6 @@ lafs_erase_dblock(struct datablock *b) void lafs_dirty_iblock(struct indexblock *b) { - /* FIXME is this all I have to do here? * Do I need to put it on a list, or lock or something? diff --git a/checkpoint.c b/checkpoint.c index 9dff93c..15a50bb 100644 --- a/checkpoint.c +++ b/checkpoint.c @@ -223,13 +223,11 @@ int lafs_print_tree(struct block *b, int depth) } j++; } - { list_for_each_entry(b2, &freelist.lru, lru) if (b2 == b) { printk(" on free "); break; } - } for (i = 0 ; i < 2; i++) { int j, k; j = 0; @@ -282,7 +280,7 @@ int lafs_print_tree(struct block *b, int depth) LAFS_BUG(LAFSI(dblk(b)->my_inode)->iblock->b.parent != b->parent, b); credits += lafs_print_tree(&LAFSI(dblk(b)->my_inode)->iblock->b, - depth+1); + depth+1); } return credits; } @@ -328,7 +326,7 @@ struct block *lafs_get_flushable(struct fs *fs, int phase) { struct block *b = NULL; - retry: +retry: if (b) { lafs_iounlock_block(b); putref(b, MKREF(leaf)); @@ -341,7 +339,6 @@ struct block *lafs_get_flushable(struct fs *fs, int phase) else b = NULL; - if (b) /* the list counted a reference. Now we hold it */ list_del_init(&b->lru); @@ -373,7 +370,7 @@ static void do_checkpoint(void *data) dprintk("Start Checkpoint\n"); if (lafs_trace) lafs_dump_tree(); - again: +again: while ((b = lafs_get_flushable(fs, oldphase)) != NULL) { int unlock = 1; dprintk("Checkpoint Block %s\n", strblk(b)); @@ -420,14 +417,13 @@ static void do_checkpoint(void *data) if (test_bit(B_Pinned, &LAFSI(fs->ss[0].root)->iblock->b.flags) && !!test_bit(B_Phase1, &LAFSI(fs->ss[0].root)->iblock->b.flags) != fs->phase) { - if (loops == 20) { printk("Cannot escape PHASE=%d\n", oldphase); lafs_print_tree(&LAFSI(fs->ss[0].root)->dblock->b, 0); lafs_print_tree(&LAFSI(fs->ss[0].root)->iblock->b, 0); lafs_trace = 1; } - + lafs_cluster_flush(fs, 0); lafs_cluster_wait_all(fs); lafs_clusters_done(fs); @@ -540,7 +536,6 @@ void lafs_checkpoint_unlock(struct fs *fs) spin_unlock(&fs->lock); if (l == 0) wake_up(&fs->phase_wait); - } void lafs_checkpoint_unlock_wait(struct fs *fs) diff --git a/clean.c b/clean.c index e5ed195..80d9d3c 100644 --- a/clean.c +++ b/clean.c @@ -77,7 +77,6 @@ static int cleaner(void *data) set_bit(CleanerNeeded, &fs->fsstate); while (!kthread_should_stop()) { - /* We need to wait INTERRUPTIBLE so that * we don't add to the load-average. * That means we need to be sure no signals are @@ -332,7 +331,7 @@ static int try_clean(struct fs *fs, struct toclean *tc) * this might be fixed, but need to be certain * of all possibilities. */ tc->gh = (struct group_head *)(((char *)tc->gh) + - le16_to_cpu(tc->gh->group_size_words)*4); + le16_to_cpu(tc->gh->group_size_words)*4); tc->desc = tc->gh->u.desc; continue; } @@ -363,8 +362,8 @@ static int try_clean(struct fs *fs, struct toclean *tc) fsnum = le32_to_cpu(tc->gh->fsnum); dprintk("Cleaner looking at %d/%d %d+%d (%d)\n", - (int)fsnum, (int)inum, (int)bnum, (int)bcnt, - (int)le16_to_cpu(tc->desc->block_bytes)); + (int)fsnum, (int)inum, (int)bnum, (int)bcnt, + (int)le16_to_cpu(tc->desc->block_bytes)); if (fsnum == 0 && inum == 0 && bnum == 0) goto skip; @@ -583,7 +582,7 @@ static unsigned long do_clean(struct fs *fs) if ((F < C || C * F >= T * (F - C)) && !test_bit(CleanerBlocks, &fs->fsstate)) { dprintk("CLEANER: enough cleaning with %d segments\n", - i); + i); break; } @@ -594,7 +593,7 @@ static unsigned long do_clean(struct fs *fs) tc->ss = lafs_get_cleanable(fs, &tc->dev, &tc->seg); if (!tc->ss) { dprintk("CLEANER: Nothing found to clean at %d :-(\n", - i); + i); break; } printk("CLEANER: clean %d/%d\n", tc->dev, tc->seg); diff --git a/cluster.c b/cluster.c index f1685ca..7128745 100644 --- a/cluster.c +++ b/cluster.c @@ -136,8 +136,8 @@ static struct block *skip_find(struct skippoint *head, } if (head->b == NULL) { if (list_empty(list) || - cmp_blk(list_entry(list->next, struct block, lru), - target) > 0) + cmp_blk(list_entry(list->next, struct block, lru), + target) > 0) /* This goes at the start */ return NULL; b = list_entry(list->next, struct block, lru); @@ -201,8 +201,8 @@ static int cluster_insert(struct skippoint *head, cmpafter = -3; else cmpafter = cmp_blk(target, list_entry(target->lru.next, - struct block, - lru)); + struct block, + lru)); if (cmpbefore == -1) { /* adjacent with previous. Possibly move skippoint */ if (pos.next[0]->b == b) { @@ -476,7 +476,7 @@ static int flush_data_to_inode(struct block *b) LAFS_BUG(!test_bit(B_Valid, &lai->iblock->b.flags), &lai->iblock->b); if (!test_and_set_bit(B_Realloc, &lai->iblock->b.flags)) - credits--; + credits--; if (!test_and_set_bit(B_UnincCredit, &lai->iblock->b.flags)) if (!test_and_clear_bit(B_ICredit, &lai->iblock->b.flags)) @@ -521,7 +521,7 @@ static int flush_data_to_inode(struct block *b) lafs_refile(b, 0); return 1; - give_up: +give_up: lafs_iounlock_block(&lai->iblock->b); return 0; } @@ -616,7 +616,7 @@ unsigned long long lafs_cluster_allocate(struct block *b, int cnum) LAFS_BUG(b->parent != b2->parent, b); set_bit(B_PinPending, &b2->flags); set_phase(b2, test_bit(B_Phase1, - &b->flags)); + &b->flags)); lafs_refile(b2, 0); } @@ -795,7 +795,7 @@ static inline void cluster_addhead(struct wc *wc, struct inode *ino, struct group_head **headstart) { struct group_head *gh = (struct group_head *)((char *)wc->chead + - wc->chead_size); + wc->chead_size); u16 tnf; dprintk("CLUSTER addhead %d\n", wc->chead_size); *headstart = gh; @@ -827,7 +827,7 @@ static inline void cluster_addmini(struct wc *wc, u32 addr, int offset, * (size-size2) is at 'data' and the rest is at 'data2' */ struct miniblock *mb = ((struct miniblock *) - ((char *)wc->chead + wc->chead_size)); + ((char *)wc->chead + wc->chead_size)); dprintk("CLUSTER addmini %d %d\n", wc->chead_size, size); @@ -846,7 +846,7 @@ static inline void cluster_adddesc(struct wc *wc, struct block *blk, struct descriptor **desc_start) { struct descriptor *dh = (struct descriptor *)((char *)wc->chead + - wc->chead_size); + wc->chead_size); *desc_start = dh; dprintk("CLUSTER add_desc %d\n", wc->chead_size); dh->block_num = cpu_to_le32(blk->fileaddr); @@ -1064,6 +1064,7 @@ void lafs_clusters_done(struct fs *fs) for (i = 0 ; i < WC_NUM; i++) cluster_done(fs, &fs->wc[i]); } + void lafs_done_work(struct work_struct *ws) { struct fs *fs = container_of(ws, struct fs, done_work); @@ -1193,21 +1194,21 @@ static void cluster_flush(struct fs *fs, int cnum) */ which = (wc->pending_next+3)%4; dprintk("AA which=%d vt=%d pc=%d\n", which, wc->pending_vfy_type[which], - atomic_read(&wc->pending_cnt[which])); + atomic_read(&wc->pending_cnt[which])); if (wc->pending_vfy_type[which] == VerifyNext) wait_event(wc->pending_wait, atomic_read(&wc->pending_cnt[which]) == 1); which = (which+3) % 4; dprintk("AB which=%d vt=%d pc=%d\n", which, wc->pending_vfy_type[which], - atomic_read(&wc->pending_cnt[which])); + atomic_read(&wc->pending_cnt[which])); if (wc->pending_vfy_type[which] == VerifyNext2) wait_event(wc->pending_wait, atomic_read(&wc->pending_cnt[which]) == 1); lafs_clusters_done(fs); dprintk("cluster_flush pre-bug pending_next=%d cnt=%d\n", - wc->pending_next, atomic_read(&wc->pending_cnt - [wc->pending_next])); + wc->pending_next, atomic_read(&wc->pending_cnt + [wc->pending_next])); BUG_ON(atomic_read(&wc->pending_cnt[wc->pending_next]) != 0); BUG_ON(!list_empty(&wc->pending_blocks[wc->pending_next])); @@ -1278,19 +1279,19 @@ static void cluster_flush(struct fs *fs, int cnum) which = wc->pending_next; wake = 0; dprintk("A which=%d vt=%d pc=%d\n", which, wc->pending_vfy_type[which], - atomic_read(&wc->pending_cnt[which])); + atomic_read(&wc->pending_cnt[which])); if (wc->pending_vfy_type[which] == VerifyNull) if (atomic_dec_and_test(&wc->pending_cnt[which])) wake = 1; which = (which+3) % 4; dprintk("B which=%d vt=%d pc=%d\n", which, wc->pending_vfy_type[which], - atomic_read(&wc->pending_cnt[which])); + atomic_read(&wc->pending_cnt[which])); if (wc->pending_vfy_type[which] == VerifyNext) if (atomic_dec_and_test(&wc->pending_cnt[which])) wake = 1; which = (which+3) % 4; dprintk("C which=%d vt=%d pc=%d\n", which, wc->pending_vfy_type[which], - atomic_read(&wc->pending_cnt[which])); + atomic_read(&wc->pending_cnt[which])); if (wc->pending_vfy_type[which] == VerifyNext2) if (atomic_dec_and_test(&wc->pending_cnt[which])) wake = 1; @@ -1312,9 +1313,8 @@ static void cluster_flush(struct fs *fs, int cnum) wait_event(wc->pending_wait, atomic_read(&wc->pending_cnt[wc->pending_next]) == 0); dprintk("cluster_flush end pending_next=%d cnt=%d\n", - wc->pending_next, atomic_read(&wc->pending_cnt - [wc->pending_next])); - + wc->pending_next, atomic_read(&wc->pending_cnt + [wc->pending_next])); } void lafs_cluster_flush(struct fs *fs, int cnum) @@ -1355,7 +1355,7 @@ static void cluster_end_io(struct bio *bio, int err, int done = 0; dprintk("end_io err=%d which=%d header=%d\n", - err, which, header); + err, which, header); if (atomic_dec_and_test(&wc->pending_cnt[which])) done++; @@ -1385,18 +1385,22 @@ static void cluster_end_io(struct bio *bio, int err, } else if (wake) wake_up(&wc->pending_wait); } + static void cluster_endio_data_0(struct bio *bio, int err) { cluster_end_io(bio, err, 0, 0); } + static void cluster_endio_data_1(struct bio *bio, int err) { cluster_end_io(bio, err, 1, 0); } + static void cluster_endio_data_2(struct bio *bio, int err) { cluster_end_io(bio, err, 2, 0); } + static void cluster_endio_data_3(struct bio *bio, int err) { cluster_end_io(bio, err, 3, 0); @@ -1404,16 +1408,19 @@ static void cluster_endio_data_3(struct bio *bio, int err) static void cluster_endio_header_0(struct bio *bio, int err) { - cluster_end_io(bio, err, 0, 1); + cluster_end_io(bio, err, 0, 1); } + static void cluster_endio_header_1(struct bio *bio, int err) { cluster_end_io(bio, err, 1, 1); } + static void cluster_endio_header_2(struct bio *bio, int err) { cluster_end_io(bio, err, 2, 1); } + static void cluster_endio_header_3(struct bio *bio, int err) { cluster_end_io(bio, err, 3, 1); diff --git a/dir-avl.c b/dir-avl.c index 95c409d..4495227 100644 --- a/dir-avl.c +++ b/dir-avl.c @@ -375,7 +375,7 @@ int lafs_dir_add_ent(char *block, int psz, const char *name, int len, dir_check_balance(block, psz); return 1; - replace_deleted: +replace_deleted: /* If there is room at the end, add an entry, * else fail. */ @@ -974,10 +974,6 @@ static void dir_print_block(char *block, int psz, int sort) 255 - dh->lastpiece); printk(" Free Pieces: %d (%d deleted)\n", dh->freepieces, dh->freepieces - (255 - dh->lastpiece)); -/* - if (dh->reserved) - printk( "!!!!!! Reserved is non-zero : %d\n", dh->reserved); -*/ if (sort == 1) dir_print_piece(block, psz, dh->root, 0, 1); else if (sort == 2) { @@ -1009,6 +1005,7 @@ static void dir_print_block(char *block, int psz, int sort) } } } + void lafs_dir_print(char *buf, int psz) { dir_print_block(buf, psz, 1); @@ -1030,7 +1027,7 @@ int main(int argc, char **argv) while (arg < argc-1) { if (argv[arg][0] != '-') switch (lafs_dir_add_ent(block, psz, argv[arg], 0, - 42+arg, 4, 0)) { + 42+arg, 4, 0)) { case 0: printf("%s didn't fit!\n", argv[arg]); break; diff --git a/dir.c b/dir.c index 39f4d91..4728b2f 100644 --- a/dir.c +++ b/dir.c @@ -330,7 +330,7 @@ dir_create_commit(struct dirop_handle *doh, putdref(doh->new, MKREF(dir_new)); } else lafs_dir_add_ent(buf, bits, name, nlen, target, - type, seed, doh->hash, doh->chainoffset); + type, seed, doh->hash, doh->chainoffset); lafs_dirty_dblock(doh->dirent_block); if (dir->i_size <= blocksize) { /* Make dir fit in inode if possible */ @@ -392,7 +392,7 @@ dir_delete_prepare(struct fs *fs, struct inode *dir, printk("Weird: %s\n", strblk(&dirblk->b)); lafs_trace = 0; } - + if (IS_ERR(dirblk)) return PTR_ERR(dirblk); @@ -496,6 +496,7 @@ dir_update_prepare(struct fs *fs, struct inode *dir, return PTR_ERR(dirblk); return 0; } + static void dir_update_commit(struct fs *fs, u32 target, int type, struct dirop_handle *doh) @@ -518,6 +519,7 @@ dir_update_pin(struct dirop_handle *doh) { return lafs_pin_dblock(doh->dirent_block, ReleaseSpace); } + static void dir_update_abort(struct dirop_handle *doh) { @@ -605,7 +607,7 @@ lafs_create(struct inode *dir, struct dentry *de, int mode, if (err) goto abort; - retry: +retry: dprintk("lc: dirblk = %p\n", doh.dirent_block); lafs_checkpoint_lock(fs); @@ -633,9 +635,9 @@ lafs_create(struct inode *dir, struct dentry *de, int mode, putdref(db, MKREF(inode_new)); return 0; - abort_unlock: +abort_unlock: lafs_checkpoint_unlock(fs); - abort: +abort: lafs_cluster_update_abort(&uh); dir_create_abort(&doh); iput(ino); @@ -667,7 +669,7 @@ lafs_link(struct dentry *from, struct inode *dir, struct dentry *to) err = PTR_ERR(inodb); if (err) goto abort; - retry: +retry: lafs_checkpoint_lock(fs); err = dir_create_pin(&doh); @@ -694,10 +696,10 @@ lafs_link(struct dentry *from, struct inode *dir, struct dentry *to) lafs_checkpoint_unlock(fs); return 0; - abort_unlock: +abort_unlock: lafs_checkpoint_unlock(fs); clear_bit(B_PinPending, &inodb->b.flags); - abort: +abort: if (!IS_ERR(inodb)) putdref(inodb, MKREF(inode_update)); dir_create_abort(&doh); @@ -729,7 +731,7 @@ lafs_unlink(struct inode *dir, struct dentry *de) err = lafs_make_orphan(fs, inodb); if (err) goto abort; - retry: +retry: lafs_checkpoint_lock(fs); err = dir_delete_pin(&doh); @@ -751,10 +753,10 @@ lafs_unlink(struct inode *dir, struct dentry *de) clear_bit(B_PinPending, &inodb->b.flags); putdref(inodb, MKREF(inode_update)); return 0; - abort_unlock: +abort_unlock: clear_bit(B_PinPending, &inodb->b.flags); lafs_checkpoint_unlock(fs); - abort: +abort: if (!IS_ERR(inodb)) putdref(inodb, MKREF(inode_update)); lafs_cluster_update_abort(&uh); @@ -811,7 +813,7 @@ lafs_rmdir(struct inode *dir, struct dentry *de) err = lafs_make_orphan(fs, inodb); if (err) goto abort; - retry: +retry: lafs_checkpoint_lock(fs); err = dir_delete_pin(&doh); @@ -836,10 +838,10 @@ lafs_rmdir(struct inode *dir, struct dentry *de) lafs_inode_checkpin(dir); lafs_checkpoint_unlock(fs); return 0; - abort_unlock: +abort_unlock: lafs_checkpoint_unlock(fs); clear_bit(B_PinPending, &inodb->b.flags); - abort: +abort: if (!IS_ERR(inodb)) putdref(inodb, MKREF(inode_update)); lafs_cluster_update_abort(&uh); @@ -880,7 +882,7 @@ lafs_symlink(struct inode *dir, struct dentry *de, err = dir_log_prepare(&uh, fs, &de->d_name) ?: err; if (err) goto abort; - retry: +retry: lafs_checkpoint_lock(fs); err = dir_create_pin(&doh); @@ -916,9 +918,9 @@ lafs_symlink(struct inode *dir, struct dentry *de, d_instantiate(de, ino); putdref(inodb, MKREF(inode_new)); return 0; - abort_unlock: +abort_unlock: lafs_checkpoint_unlock(fs); - abort: +abort: putdref(inodb, MKREF(inode_new)); clear_bit(B_PinPending, &b->b.flags); putdref(b, MKREF(symlink)); @@ -951,7 +953,7 @@ lafs_mkdir(struct inode *dir, struct dentry *de, int mode) err = dir_log_prepare(&uh, fs, &de->d_name) ?: err; if (err) goto abort; - retry: +retry: lafs_checkpoint_lock(fs); err = dir_create_pin(&doh); @@ -979,12 +981,12 @@ lafs_mkdir(struct inode *dir, struct dentry *de, int mode) putdref(inodb, MKREF(inode_new)); lafs_checkpoint_unlock(fs); return 0; - abort_unlock: +abort_unlock: lafs_checkpoint_unlock(fs); - abort: +abort: dir_create_abort(&doh); lafs_cluster_update_abort(&uh); - iput(ino); + iput(ino); clear_bit(B_PinPending, &inodb->b.flags); putdref(inodb, MKREF(inode_new)); return err; @@ -1028,7 +1030,7 @@ lafs_mknod(struct inode *dir, struct dentry *de, int mode, err = dir_log_prepare(&uh, fs, &de->d_name) ?: err; if (err) goto abort; - retry: +retry: lafs_checkpoint_lock(fs); err = dir_create_pin(&doh); @@ -1055,9 +1057,9 @@ lafs_mknod(struct inode *dir, struct dentry *de, int mode, clear_bit(B_PinPending, &inodb->b.flags); putdref(inodb, MKREF(inode_new)); return 0; - abort_unlock: +abort_unlock: lafs_checkpoint_unlock(fs); - abort: +abort: dir_create_abort(&doh); lafs_cluster_update_abort(&uh); iput(ino); @@ -1100,7 +1102,7 @@ lafs_rename(struct inode *old_dir, struct dentry *old_dentry, } } dprintk("rename %s %s\n", old_dentry->d_name.name, - new_dentry->d_name.name); + new_dentry->d_name.name); /* old entry gets deleted, new entry gets created or updated. */ err = dir_delete_prepare(fs, old_dir, @@ -1136,7 +1138,7 @@ lafs_rename(struct inode *old_dir, struct dentry *old_dentry, if (err) goto abort; - retry: +retry: lafs_checkpoint_lock(fs); err = dir_delete_pin(&old_doh); @@ -1200,12 +1202,12 @@ lafs_rename(struct inode *old_dir, struct dentry *old_dentry, lafs_checkpoint_unlock(fs); return 0; - abort_unlock: +abort_unlock: lafs_checkpoint_unlock(fs); clear_bit(B_PinPending, &olddb->b.flags); if (new_inode) clear_bit(B_PinPending, &newdb->b.flags); - abort: +abort: dir_delete_abort(&old_doh); lafs_cluster_update_abort(&old_uh); lafs_cluster_update_abort(&new_uh); @@ -1218,7 +1220,6 @@ lafs_rename(struct inode *old_dir, struct dentry *old_dentry, } else dir_create_abort(&new_doh); return err; - } /*-------------------------------------------------------------------- @@ -1261,7 +1262,6 @@ void lafs_dir_handle_orphan(struct datablock *b) LAFS_BUG(!test_bit(B_Valid, &b->b.flags), &b->b); dprintk("HANDLE ORPHAN h=%x %s\n", (unsigned)hash, strblk(&b->b)); - lafs_checkpoint_lock(fs); /* First test: Does a chain of deleted entries extend beyond @@ -1393,9 +1393,9 @@ void lafs_dir_handle_orphan(struct datablock *b) lafs_orphan_release(fs, b); return; - abortb2: +abortb2: putdref(b2, MKREF(dir_orphan)); - abort: +abort: lafs_checkpoint_unlock(fs); } diff --git a/file.c b/file.c index 2c178be..83ded7a 100644 --- a/file.c +++ b/file.c @@ -118,7 +118,7 @@ lafs_prepare_write(struct file *file, struct page *page, err = lafs_wait_block(&fb->b); if (err) goto fail; - retry: +retry: lafs_checkpoint_lock(fs); for (i = first ; err == 0 && i <= last ; i++) { /* FIXME need PinPending or something to make sure @@ -135,7 +135,7 @@ lafs_prepare_write(struct file *file, struct page *page, goto fail; return 0; - fail: +fail: lafs_checkpoint_unlock(fs); while (--i >= first) putdref(&fb[i-first], MKREF(write)); @@ -272,7 +272,7 @@ lafs_writepage(struct page *page, struct writeback_control *wbc) putdref(b, MKREF(writepageflush)); } // FIXME need to make sure a cluster_flush happens some time!!! if (wbc->sync_mode != WB_SYNC_NONE) - lafs_cluster_flush(fs, 0); + lafs_cluster_flush(fs, 0); dprintk("WRITEPAGE flush\n"); return 0; } @@ -331,7 +331,6 @@ const struct file_operations lafs_file_file_operations = { /* .fsync = lafs_sync_file, */ .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, - }; struct inode_operations lafs_file_ino_operations = { diff --git a/index.c b/index.c index 38aca36..1244ce6 100644 --- a/index.c +++ b/index.c @@ -44,7 +44,6 @@ static struct hlist_head hash_table[1<b.flags), &ib->b); if (!hlist_unhashed(&ib->hash)) @@ -199,6 +198,7 @@ int lafs_ihash_init(void) return 0; } + void lafs_ihash_free(void) { unregister_shrinker(&hash_shrink); @@ -359,7 +359,7 @@ block_adopt(struct block *blk, struct indexblock *parent) LAFS_BUG(!test_bit(B_Index, &parent->b.flags), &parent->b); LAFS_BUG(test_bit(B_InoIdx, &parent->b.flags), &parent->b); LAFS_BUG(parent->b.fileaddr != parent->b.parent->b.fileaddr, &parent->b); - + clear_bit(B_EmptyIndex, &parent->b.flags); } LAFS_BUG(parent == iblk(blk), blk); @@ -429,10 +429,10 @@ static void pin_all_children(struct indexblock *ib) struct fs *fs = fs_from_inode(ib->b.inode); int ph = fs->phase; - recurse: +recurse: p = ib; ib = list_entry(&p->children, struct indexblock, b.siblings); - loop: +loop: list_for_each_entry_continue(ib, &p->children, b.siblings) { if (test_bit(B_Index, &ib->b.flags)) { /* recurse down */ @@ -449,7 +449,6 @@ static void pin_all_children(struct indexblock *ib) p = ib->b.parent; goto loop; } - } void lafs_phase_flip(struct fs *fs, struct indexblock *ib) @@ -613,7 +612,7 @@ void lafs_phase_flip(struct fs *fs, struct indexblock *ib) b2->chain = NULL; clear_bit(B_Uninc, &b2->flags); while (lafs_add_block_address(fs, b2) == 0) - /* FIXME do I need more like I do in + /* FIXME do I need more like I do in * lafs_allocated_block */ ; putref(b2, MKREF(uninc)); @@ -688,7 +687,7 @@ int lafs_is_leaf(struct block *b, int ph) if (test_bit(B_Index, &b->flags)) { if (ph >= 0) return atomic_read(&iblk(b)->pincnt[ph]) == 0; - return (atomic_read(&iblk(b)->pincnt[0]) + + return (atomic_read(&iblk(b)->pincnt[0]) + atomic_read(&iblk(b)->pincnt[1])) == 0; } @@ -747,7 +746,7 @@ void lafs_refile(struct block *b, int dec) ->iblock->b.flags))) { int pp = !!test_bit(B_Phase1, &LAFSI(dblk(cb)->my_inode) - ->iblock->b.flags); + ->iblock->b.flags); c[pp]++; } } @@ -926,7 +925,7 @@ void lafs_refile(struct block *b, int dec) spin_unlock(&b->inode->i_data.private_lock); if (test_bit(B_Index, &b->flags)) list_add(&b->siblings, - &LAFSI(b->inode)->free_index); + &LAFSI(b->inode)->free_index); if (test_and_clear_bit(B_Prealloc, &b->flags) && b->physaddr == 0) @@ -999,7 +998,7 @@ void lafs_refile(struct block *b, int dec) spin_lock(&b->inode->i_data .private_lock); LAFS_BUG(LAFSI(b->inode)->iblock - != iblk(b), b); + != iblk(b), b); LAFS_BUG(next, next); next = &LAFSI(b->inode)->dblock->b; del_ref(next, MKREF(iblock), @@ -1017,7 +1016,7 @@ void lafs_refile(struct block *b, int dec) (in = dblk(b)->my_inode) != NULL && (!PagePrivate(dblk(b)->page) || test_bit(I_Destroyed, &LAFSI(in)->iflags))) { - dblk(b)->my_inode = NULL; + dblk(b)->my_inode = NULL; LAFSI(in)->dblock = NULL; spin_unlock(&lafs_hash_lock); if (test_bit(I_Destroyed, &LAFSI(in)->iflags)) @@ -1072,7 +1071,7 @@ lafs_make_iblock(struct inode *ino, int adopt, int async, REFARG) BUG_ON(lai->dblock == NULL); BUG_ON(atomic_read(&lai->dblock->b.refcnt) == 0); - retry: +retry: spin_lock(&lai->vfs_inode.i_data.private_lock); if (lai->iblock) ib = getiref_locked(lai->iblock, REF); @@ -1404,7 +1403,7 @@ lafs_leaf_find(struct inode *inode, u32 addr, int adopt, u32 *next, * that no IO will be required to get back to where * we were. */ - restart: +restart: if (next) *next = 0xFFFFFFFF; @@ -1418,7 +1417,7 @@ lafs_leaf_find(struct inode *inode, u32 addr, int adopt, u32 *next, if (!async) lafs_iolock_block(&ib->b); else if (!lafs_iolock_block_async(&ib->b)) - goto err_ib; + goto err_ib; while (ib->depth > 1) { /* internal index block */ @@ -1450,7 +1449,7 @@ lafs_leaf_find(struct inode *inode, u32 addr, int adopt, u32 *next, better = find_better(inode, ib2, addr, next, REF); putiref(ib2, REF); ib2 = better; - + if (!test_bit(B_Valid, &ib2->b.flags)) { lafs_iounlock_block(&ib->b); err = lafs_load_block(&ib2->b); @@ -1475,11 +1474,11 @@ lafs_leaf_find(struct inode *inode, u32 addr, int adopt, u32 *next, * much less likely. If it did, then it must * have had a parent, so the ref we hold means * it still has a parent. - * So if ib->b.parent, then we might need to + * So if ib->b.parent, then we might need to * retry from the top, and holding a ref on ib * means that we don't risk live-locking if * memory for index blocks is very tight. - * If there is no parent, then there is + * If there is no parent, then there is * no risk that ib changed. */ if (ib->b.parent) { @@ -1527,9 +1526,9 @@ lafs_leaf_find(struct inode *inode, u32 addr, int adopt, u32 *next, return ib; - err_ib2: +err_ib2: putiref(ib2, REF); - err_ib: +err_ib: putiref(ib, REF); err: if (hold) @@ -1682,8 +1681,8 @@ int lafs_find_next(struct inode *ino, loff_t *bnump) putdref(b, MKREF(find_next)); return rv; } - hole = (b->b.physaddr == 0 || - !test_bit(B_PhysValid, &b->b.flags)) && + hole = (b->b.physaddr == 0 || + !test_bit(B_PhysValid, &b->b.flags)) && !test_bit(B_Valid, &b->b.flags); if (LAFSI(ino)->depth == 0 && b->b.fileaddr == 0) @@ -1823,7 +1822,7 @@ find_block(struct datablock *b, int adopt, int async) * look through. Find the leaf first */ ib = lafs_leaf_find(b->b.inode, b->b.fileaddr, adopt, NULL, async, - MKREF(findblock)); + MKREF(findblock)); putdref(db, MKREF(findblock)); if (IS_ERR(ib)) { @@ -1864,7 +1863,7 @@ find_block(struct datablock *b, int adopt, int async) if (adopt) block_adopt(&b->b, ib); lafs_iounlock_block(&ib->b); - out: +out: if (IS_ERR(ib)) return PTR_ERR(ib); if (ib) @@ -2092,7 +2091,7 @@ int lafs_allocated_block(struct fs *fs, struct block *blk, u64 phys) return 0; } - new_parent: +new_parent: if (test_bit(B_Dirty, &blk->flags) || phys == 0) { if (!test_bit(B_Dirty, &p->b.flags) && !test_bit(B_Credit, &p->b.flags)) { @@ -2132,7 +2131,6 @@ int lafs_allocated_block(struct fs *fs, struct block *blk, u64 phys) BUG_ON(!test_bit(B_Pinned, &blk->parent->b.flags)); if (!lafs_add_block_address(fs, blk)) { - p = blk->parent; LAFS_BUG(!p, blk); diff --git a/inode.c b/inode.c index 7d7224d..0877079 100644 --- a/inode.c +++ b/inode.c @@ -70,7 +70,7 @@ lafs_iget(struct super_block *sb, ino_t inum, int async) putdref(b, MKREF(iget)); unlock_new_inode(ino); return ino; - err: +err: ino->i_nlink = 0; unlock_new_inode(ino); putdref(b, MKREF(iget)); @@ -227,7 +227,7 @@ lafs_import_inode(struct inode *ino, struct datablock *b) } dprintk(" mode = 0%o uid %d size %lld\n", - ino->i_mode, ino->i_uid, ino->i_size); + ino->i_mode, ino->i_uid, ino->i_size); switch (li->type) { case TypeFile: ino->i_op = &lafs_file_ino_operations; @@ -242,9 +242,9 @@ lafs_import_inode(struct inode *ino, struct datablock *b) { u32 *b = (u32 *)lai; dprintk("Hmm. %d %d %d\n", - (int)b[24], - (int)b[25], - (int)b[26]); + (int)b[24], + (int)b[25], + (int)b[26]); } break; case TypeSymlink: @@ -273,7 +273,7 @@ lafs_import_inode(struct inode *ino, struct datablock *b) li->dblock = b; b->my_inode = ino; - out: +out: if (err && li->type) printk("inode %lu type is %d\n", (unsigned long)ino->i_ino, li->type); @@ -385,7 +385,7 @@ void lafs_inode_init(struct datablock *b, int type, int mode, struct inode *dir) /* FIXME real name?? */ strlcpy(l->name, "Secondary", sizeof(l->name)); break; -} + } case TypeInodeMap: { struct inodemap_metadata *l = &lai->metadata[0].inodemap; @@ -559,7 +559,7 @@ static int prune_some(void *data, u32 addr, u64 paddr, int len) a = &ib->uninc_table.pending_addr [ib->uninc_table.pending_cnt - 1]; if (ib->uninc_table.pending_cnt < - ARRAY_SIZE(ib->uninc_table.pending_addr)) { + ARRAY_SIZE(ib->uninc_table.pending_addr)) { a++; a->fileaddr = addr + i; a->physaddr = 0; @@ -585,7 +585,7 @@ void lafs_inode_handle_orphan(struct datablock *b) int do_restart; int loop_cnt = 20; - restart: +restart: do_restart = 1; if (!test_bit(I_Trunc, &LAFSI(ino)->iflags)) { @@ -660,7 +660,7 @@ void lafs_inode_handle_orphan(struct datablock *b) ib2 = ib; lastaddr = (i_size_read(ino) + ino->i_sb->s_blocksize - 1) - >> ino->i_sb->s_blocksize_bits; + >> ino->i_sb->s_blocksize_bits; /* Find a Pinned descendent of ib which has no * Pinned descendents and no PrimaryRef dependent * (so take the last). @@ -698,7 +698,7 @@ void lafs_inode_handle_orphan(struct datablock *b) lafs_print_tree(&ib2->b, 3); LAFS_BUG(1, &ib2->b); } - + /* lafs_iolock_written(&ib2->b); - without blocking */ if (!lafs_iolock_block_async(&ib2->b)) { putiref(ib2, MKREF(inode_handle_orphan2)); @@ -763,7 +763,7 @@ void lafs_inode_handle_orphan(struct datablock *b) clear_bit(I_Trunc, &LAFSI(ino)->iflags); if (test_bit(B_Pinned, &ib->b.flags)) /* Need to move the dirtiness which keeps this - * pinned to the data block. + * pinned to the data block. */ lafs_cluster_allocate(&ib->b, 0); else @@ -790,7 +790,7 @@ void lafs_inode_handle_orphan(struct datablock *b) } lafs_pin_block(&ib->b); - + /* It might be that this can happen, in which case * we simply update trunc_next and loop. But I'd like * to be sure before I implement that @@ -844,9 +844,9 @@ void lafs_inode_handle_orphan(struct datablock *b) lafs_print_tree(&ib->b, 2); LAFS_BUG(!list_empty(&ib->children), &ib->b); - out: +out: lafs_iounlock_block(&ib->b); - out_unlocked: +out_unlocked: lafs_checkpoint_unlock(fs); putiref(ib, MKREF(inode_handle_orphan3)); @@ -958,8 +958,8 @@ int lafs_write_inode(struct inode *ino, int wait) lafs_space_return(fs, 1); LAFSI(ino)->update_cluster = lafs_cluster_update_commit - (&uh, b, LAFS_INODE_LOG_START, - LAFS_INODE_LOG_SIZE); + (&uh, b, LAFS_INODE_LOG_START, + LAFS_INODE_LOG_SIZE); } else lafs_cluster_update_abort(&uh); lafs_checkpoint_unlock(fs); @@ -1128,7 +1128,7 @@ choose_free_inum(struct fs *fs, struct inode *filesys, u32 *inump, int bit; mutex_lock_nested(&im->i_mutex, I_MUTEX_QUOTA); - retry: +retry: if (*bp) { putdref(*bp, MKREF(cfi_map)); *bp = NULL; @@ -1219,9 +1219,9 @@ choose_free_inum(struct fs *fs, struct inode *filesys, u32 *inump, iput(im); return 0; - abort_unlock: +abort_unlock: lafs_checkpoint_unlock(fs); - abort: +abort: clear_bit(B_PinPending, &b->b.flags); putdref(b, MKREF(cfi_map)); *bp = NULL; @@ -1244,7 +1244,7 @@ inode_map_new_prepare(struct fs *fs, int inum, struct inode *filesys, struct datablock *b; imni->ib = imni->mb = NULL; - retry: +retry: if (inum == 0) /* choose a possibly-free inode number */ err = choose_free_inum(fs, filesys, &choice, @@ -1342,7 +1342,7 @@ lafs_new_inode(struct fs *fs, struct inode *dir, int type, int inum, int mode, err = lafs_make_orphan(fs, imni.ib); if (err) goto abort; - retry: +retry: lafs_checkpoint_lock(fs); err = inode_map_new_pin(&imni); @@ -1358,8 +1358,8 @@ lafs_new_inode(struct fs *fs, struct inode *dir, int type, int inum, int mode, b = getdref(imni.ib, MKREF(inode_new)); lafs_iolock_block(&b->b); /* make sure we don't race with the cleaner - * and zero this inode while trying to load it - */ + * and zero this inode while trying to load it + */ lafs_inode_init(b, type, mode, dir); lafs_iounlock_block(&b->b); @@ -1379,10 +1379,10 @@ lafs_new_inode(struct fs *fs, struct inode *dir, int type, int inum, int mode, *inodbp = b; return ino; - abort_unlock: +abort_unlock: lafs_checkpoint_unlock(fs); err = -ENOSPC; - abort: +abort: inode_map_new_abort(&imni); lafs_cluster_update_abort(&ui); dprintk("After abort %d: %s\n", err, strblk(&imni.ib->b)); @@ -1411,7 +1411,7 @@ static int inode_map_free(struct fs *fs, struct inode *filesys, u32 inum) putdref(b, MKREF(inode_map_free)); return err; } - retry: +retry: lafs_checkpoint_lock(fs); err = lafs_pin_dblock(b, ReleaseSpace); if (err == -EAGAIN) { @@ -1446,7 +1446,7 @@ int lafs_setattr(struct dentry *dentry, struct iattr *attr) /* FIXME quota stuff */ - again: +again: lafs_checkpoint_lock(fs); err = lafs_pin_dblock(db, ReleaseSpace); if (err == -EAGAIN) { @@ -1488,7 +1488,7 @@ void lafs_truncate(struct inode *ino) set_bit(I_Trunc, &LAFSI(ino)->iflags); LAFSI(ino)->trunc_next = (i_size_read(ino) + ino->i_sb->s_blocksize - 1) - >> ino->i_sb->s_blocksize_bits; + >> ino->i_sb->s_blocksize_bits; putdref(db, MKREF(trunc)); } diff --git a/io.c b/io.c index b1615a4..0bbeb21 100644 --- a/io.c +++ b/io.c @@ -190,7 +190,8 @@ static DECLARE_WAIT_QUEUE_HEAD(block_wait); /* need more of these later FIXME */ void lafs_io_wake(struct block *b) { wake_up(&block_wait); -} +} + void _lafs_iolock_block(struct block *b) { if (test_and_set_bit(B_IOLock, &b->flags)) { @@ -228,7 +229,6 @@ lafs_iounlock_block(struct block *b) if (test_bit(B_Async, &b->flags) || test_bit(B_Orphan, &b->flags)) lafs_wake_cleaner(fs_from_inode(b->inode)); - } void lafs_writeback_done(struct block *b) diff --git a/lafs.h b/lafs.h index a439fb9..75dcfef 100644 --- a/lafs.h +++ b/lafs.h @@ -125,6 +125,7 @@ void _lafs_iolock_written(struct block *b); static inline int _lafs_iolock_block_async(struct block *b) { return !test_and_set_bit(B_IOLock, &b->flags); } + void lafs_iounlock_block(struct block *b); void lafs_iocheck_block(struct datablock *db, int unlock); void lafs_iocheck_writeback(struct datablock *db, int unlock); @@ -373,6 +374,7 @@ static inline struct block *__getref(struct block *b) atomic_inc(&b->refcnt); return b; } + static inline struct block *_getref(struct block *b) { LAFS_BUG(b && atomic_read(&b->refcnt) == 0, b); @@ -424,6 +426,7 @@ static inline int _putref_norefile(struct block *b) BUG_ON(atomic_read(&b->refcnt)==0); return atomic_dec_and_test(&b->refcnt); } + static inline void _putref(struct block *b) { BUG_ON(atomic_read(&b->refcnt)==0); @@ -579,6 +582,7 @@ static inline void lafs_cluster_wait(struct fs *fs, unsigned long long seq) wait_event(fs->wc[0].pending_wait, fs->wc[0].cluster_seq > seq); } + void lafs_cluster_wait_all(struct fs *fs); int lafs_cluster_empty(struct fs *fs, int cnum); @@ -649,6 +653,7 @@ static inline void lafs_pin_block(struct block *b) { lafs_pin_block_ph(b, fs_from_inode(b->inode)->phase); } + int lafs_add_block_address(struct fs *fs, struct block *blk); void lafs_phase_flip(struct fs *fs, struct indexblock *ib); struct indexblock * __must_check diff --git a/modify.c b/modify.c index ab03a51..c0b231e 100644 --- a/modify.c +++ b/modify.c @@ -134,7 +134,7 @@ static int incorporate_indirect(struct uninc *ui, char *buf, u32 addr, int len) if ( ui->pending_addr[ui->pending_cnt-1].fileaddr +ui->pending_addr[ui->pending_cnt-1].cnt - > addr + (len/6)) + > addr + (len/6)) return 0; for (i = 0; i < ui->pending_cnt; i++) { @@ -157,7 +157,7 @@ static int incorporate_extent(struct uninc *ui, char *buf, int size) /* See if the common case of new data being added to the file * applies. i.e. is there enough space in the buf to add the * extents listed in ui. If so, add them. */ - /* Find the last valid extent. */ + /* Find the last valid extent. */ int e = (size)/12; int i; u32 last = 0; @@ -183,10 +183,10 @@ static int incorporate_extent(struct uninc *ui, char *buf, int size) buf += e*12; for (i = 0; i < ui->pending_cnt; i++) if (ui->pending_addr[i].physaddr) { - dprintk("AddedX %llu %u %lu\n", - (unsigned long long)ui->pending_addr[i].physaddr, - (unsigned) ui->pending_addr[i].cnt, - (unsigned long)ui->pending_addr[i].fileaddr); + dprintk("AddedX %llu %u %lu\n", + (unsigned long long)ui->pending_addr[i].physaddr, + (unsigned) ui->pending_addr[i].cnt, + (unsigned long)ui->pending_addr[i].fileaddr); credits += ui->pending_addr[i].cnt; encode48(buf, ui->pending_addr[i].physaddr); encode16(buf, ui->pending_addr[i].cnt); @@ -424,7 +424,7 @@ static void grow_index_tree(struct indexblock *ib, struct indexblock *new) ib->b.parent = new; getiref(new, MKREF(child)); - clear_bit(B_PhysValid, &ib->b.flags); + clear_bit(B_PhysValid, &ib->b.flags); ib->b.physaddr = 0; ibuf = map_iblock(new); @@ -434,7 +434,7 @@ static void grow_index_tree(struct indexblock *ib, struct indexblock *new) new->depth = ib->depth + 1; LAFSI(new->b.inode)->depth++; ((struct la_inode*)(ibuf))->depth = LAFSI(new->b.inode)->depth; - + /* There is no need to set PrimaryRef as the first child in an * index block is implicitly incorporated - at least enough to be * found. So the ->parent link holds our place in the tree @@ -523,7 +523,6 @@ struct layoutinfo { u64 lastphys; u32 lastaddr; int esize; - }; static int add_index(void *data, u32 addr, u64 phys) @@ -859,7 +858,6 @@ static u32 walk_extent(u32 addr, char **bufp, int len, struct uninc *ui, handle(data, addr, 0, len); /* initialise */ while (uinum < ui->pending_cnt || ! found_end) { - if (elen == 0 && !found_end) { if (len >= 12) { ephys = decode48(buf); @@ -894,11 +892,11 @@ static u32 walk_extent(u32 addr, char **bufp, int len, struct uninc *ui, * add the overlapped portion. */ dprintk("(%llu %lu %u) (%llu %lu %u) + %d = %d (%d)\n", - ephys, (unsigned long)eaddr, elen, - ui->pending_addr[uinum].physaddr, - (unsigned long)ui->pending_addr[uinum].fileaddr, - ui->pending_addr[uinum].cnt, - uioffset, hlen, uinum); + ephys, (unsigned long)eaddr, elen, + ui->pending_addr[uinum].physaddr, + (unsigned long)ui->pending_addr[uinum].fileaddr, + ui->pending_addr[uinum].cnt, + uioffset, hlen, uinum); if (elen && ui->pending_addr[uinum].fileaddr + uioffset + hlen > eaddr + elen) @@ -906,8 +904,8 @@ static u32 walk_extent(u32 addr, char **bufp, int len, struct uninc *ui, (ui->pending_addr[uinum].fileaddr + uioffset); if (ui->pending_addr[uinum].physaddr) handled = handle(data, - ui->pending_addr[uinum].fileaddr + uioffset, - ui->pending_addr[uinum].physaddr + uioffset, + ui->pending_addr[uinum].fileaddr + uioffset, + ui->pending_addr[uinum].physaddr + uioffset, hlen); else handled = hlen; @@ -1011,7 +1009,6 @@ void lafs_walk_leaf_index(struct indexblock *ib, } if (test_bit(B_InoIdx, &ib->b.flags)) kunmap(LAFSI(ib->b.inode)->dblock->page); - } static void share_list(struct block **ibp, struct block **newp, u32 next) @@ -1682,7 +1679,7 @@ void lafs_incorporate(struct fs *fs, struct indexblock *ib) !test_bit(B_Realloc, &ib->b.flags)) printk("bad %s\n", strblk(&ib->b)); LAFS_BUG(!test_bit(B_Dirty, &ib->b.flags) && - !test_bit(B_Realloc, &ib->b.flags), &ib->b); + !test_bit(B_Realloc, &ib->b.flags), &ib->b); /* OK, we genuinely have work to do. */ /* find size and type of current block */ @@ -1730,7 +1727,7 @@ void lafs_incorporate(struct fs *fs, struct indexblock *ib) filter_empties(&uninc); LAFS_BUG(!test_bit(B_Dirty, &ib->b.flags) && - !test_bit(B_Realloc, &ib->b.flags), &ib->b); + !test_bit(B_Realloc, &ib->b.flags), &ib->b); /* OK, we genuinely have work to do. */ /* find size and type of current block */ @@ -1749,7 +1746,7 @@ void lafs_incorporate(struct fs *fs, struct indexblock *ib) print_index(buf, ib->b.fileaddr, blocksize - offset); printk("uninc list:\n"); for (b = uninc; b ; b=b->chain) - printk(" %lu: %llu\n", + printk(" %lu: %llu\n", (unsigned long) b->fileaddr, (unsigned long long) b->physaddr); } @@ -1768,7 +1765,7 @@ void lafs_incorporate(struct fs *fs, struct indexblock *ib) /* Ok, those were the easy options. Now we need to allocate a * new index block. */ - retry: +retry: new = lafs_iblock_alloc(fs, GFP_NOFS, 1, MKREF(inc)); /* FIXME need to preallocate something for a fall-back?? */ @@ -1800,7 +1797,7 @@ void lafs_incorporate(struct fs *fs, struct indexblock *ib) lafs_iounlock_block(&new->b); lafs_iblock_free(new); LAFS_BUG(!test_bit(B_Dirty, &ib->b.flags) && - !test_bit(B_Realloc, &ib->b.flags), &ib->b); + !test_bit(B_Realloc, &ib->b.flags), &ib->b); /* Don't need to dirty, it is already dirty */ break; @@ -1842,7 +1839,7 @@ void lafs_incorporate(struct fs *fs, struct indexblock *ib) goto retry; } - out: +out: if (uit.credits > 0 && !test_and_set_bit(B_UnincCredit, &ib->b.flags)) uit.credits--; if (uit.credits > 0 && !test_and_set_bit(B_ICredit, &ib->b.flags)) @@ -1920,7 +1917,7 @@ int __must_check lafs_prealloc(struct block *blk, int why) && !test_phase_locked(fs) && !fs->checkpointing, blk); - retry: +retry: need = 0; b = blk; diff --git a/orphan.c b/orphan.c index 6de97ee..d429955 100644 --- a/orphan.c +++ b/orphan.c @@ -69,7 +69,7 @@ void lafs_dump_orphans(void) bnum = slot >> (dfs->prime_sb->s_blocksize_bits-4); ob = lafs_get_block(dfs->orphans, bnum, NULL, GFP_KERNEL, - MKREF(dumpo)); + MKREF(dumpo)); or = map_dblock(ob); ent = slot - (bnum << (dfs->prime_sb->s_blocksize_bits-4)); printk("%3d: %d %u %u %u\n", slot, @@ -114,7 +114,7 @@ static int orphan_prepare(struct fs *fs, int async) bnum = (om->nextfree + om->reserved) >> (fs->prime_sb->s_blocksize_bits-4); - b = lafs_get_block(fs->orphans, bnum, NULL, GFP_KERNEL, + b = lafs_get_block(fs->orphans, bnum, NULL, GFP_KERNEL, MKREF(orphan_reserve)); if (b) { if (async) @@ -178,7 +178,7 @@ static void orphan_commit(struct fs *fs, struct datablock *b, struct datablock * putdref(ob, MKREF(orphan_reserve)); or = map_dblock(ob); - ent = b->orphan_slot - (ob->b.fileaddr + ent = b->orphan_slot - (ob->b.fileaddr << (fs->prime_sb->s_blocksize_bits-4)); or[ent].type = cpu_to_le32(1); or[ent].filesys = cpu_to_le32(LAFSI(b->b.inode)->filesys->i_ino); @@ -246,7 +246,7 @@ int lafs_make_orphan(struct fs *fs, struct datablock *db) err = orphan_prepare(fs, 0); if (err) return err; - retry: +retry: lafs_checkpoint_lock(fs); mutex_lock_nested(&fs->orphans->i_mutex, I_MUTEX_QUOTA); ob = orphan_pin(fs, db); @@ -300,7 +300,7 @@ int lafs_make_orphan_nb(struct fs *fs, struct datablock *db) * we need to delete the entry from the orphan file. * If this isn't the last entry in the file we swap the last entry * in thus keeping the file dense. - * We don't have the blocks in the orphan file reserved. If we + * We don't have the blocks in the orphan file reserved. If we * cannot get a reservation we fail to release the orphan status * so we can try again later. * All IO requests here must be async as we run from the cleaner @@ -317,7 +317,6 @@ void lafs_orphan_release(struct fs *fs, struct datablock *b) if (!test_bit(B_Orphan, &b->b.flags)) return; - ob1 = lafs_get_block(fs->orphans, b->orphan_slot >> shift, NULL, GFP_KERNEL, MKREF(orphan_release)); /* We already own an 'orphan' reference on this block, so we @@ -390,11 +389,11 @@ void lafs_orphan_release(struct fs *fs, struct datablock *b) putdref(bi, MKREF(orphan_ino)); dprintk("O bfs=%p(%p) bi=%p bbl=%p lastent=%d " "fs=%d in=%d a=%d\n", - bfs, bfs->my_inode, bi, bbl, lastent, - le32_to_cpu(last.filesys), - le32_to_cpu(last.inum), - le32_to_cpu(last.addr) - ); + bfs, bfs->my_inode, bi, bbl, lastent, + le32_to_cpu(last.filesys), + le32_to_cpu(last.inum), + le32_to_cpu(last.addr) + ); if (!bbl) { putdref(ob2, MKREF(orphan_move)); printk("OUCH 3\n"); @@ -437,9 +436,9 @@ void lafs_orphan_release(struct fs *fs, struct datablock *b) /* Now drop the reservation we just synthesised */ orphan_abort(fs); - out_unlock: +out_unlock: mutex_unlock(&fs->orphans->i_mutex); - out: +out: lafs_checkpoint_unlock(fs); } diff --git a/roll.c b/roll.c index 5ed46ed..ab29e7a 100644 --- a/roll.c +++ b/roll.c @@ -176,8 +176,8 @@ roll_mini(struct fs *fs, int fsnum, int inum, int trunc, int flg, void *buf; dprintk("Roll Mini %d/%d/%d/%lu/%d,%d\n", - fsnum, inum, flg, (unsigned long) bnum, - offset, len); + fsnum, inum, flg, (unsigned long) bnum, + offset, len); /* The handling of miniblock updates is quite different for * different objects. @@ -253,8 +253,8 @@ roll_block(struct fs *fs, int fsnum, int inum, int trunc, int flg, return 0; /* FIXME should I punch a hole here? */ dprintk("Roll Block %d/%d/%d/%lu/%llu\n", - fsnum, inum, flg, (unsigned long) bnum, - (unsigned long long)baddr); + fsnum, inum, flg, (unsigned long) bnum, + (unsigned long long)baddr); /* find/load the inode */ inode = lafs_iget_fs(fs, fsnum, inum, SYNC); @@ -267,7 +267,7 @@ roll_block(struct fs *fs, int fsnum, int inum, int trunc, int flg, li = LAFSI(inode); dprintk("Got the inode, type %d %p size %llu\n", li->type, - inode, inode->i_size); + inode, inode->i_size); switch (li->type) { struct la_inode *lai; @@ -351,7 +351,7 @@ roll_block(struct fs *fs, int fsnum, int inum, int trunc, int flg, /* FIXME if the table becomes full, we have a problem... */ LAFS_BUG(1, &blk->b); dprintk("Allocated block %lu to %llu\n", - (unsigned long)bnum, baddr); + (unsigned long)bnum, baddr); /* FIXME maybe clear Writeback instead */ lafs_iounlock_block(&blk->b); @@ -446,7 +446,7 @@ roll_one(struct fs *fs, u64 *addrp, struct page *p, struct page *pg, int len = le16_to_cpu(mb->length) - DescMiniOffset; err = roll_mini(fs, fsnum, inum, trunc, flg, - bnum, offset, len, (char *)(mb+1)); + bnum, offset, len, (char *)(mb+1)); mb++; mb = (struct miniblock *)(((char*)mb) @@ -601,7 +601,7 @@ lafs_mount(struct fs *fs) } return err; - err: +err: putdref(b, MKREF(mount)); err2: return err; diff --git a/segments.c b/segments.c index 14827cb..d698cc8 100644 --- a/segments.c +++ b/segments.c @@ -103,6 +103,7 @@ static inline void ss_get(struct segsum *ss) BUG_ON(atomic_read(&ss->refcnt) == 0); atomic_inc(&ss->refcnt); } + static void ss_put(struct segsum *ss, struct fs *fs) { if (atomic_dec_and_lock(&ss->refcnt, &fs->stable_lock)) { @@ -129,7 +130,7 @@ static struct segsum *segsum_find(struct fs *fs, u32 segnum, u32 addr; int err; - retry: +retry: spin_lock(&fs->stable_lock); hlist_for_each_entry(ss, n, head, hash) if (ss->segnum == segnum && @@ -367,9 +368,9 @@ void lafs_seg_move(struct fs *fs, u64 oldaddr, u64 newaddr, { struct segsum *ss; - dprintk("SEGMOVE %llu %llu\n", - (unsigned long long) oldaddr, - (unsigned long long) newaddr); + dprintk("SEGMOVE %llu %llu\n", + (unsigned long long) oldaddr, + (unsigned long long) newaddr); if (newaddr) { ss = segsum_byaddr(fs, newaddr, ssnum); @@ -461,7 +462,6 @@ void lafs_seg_apply_all(struct fs *fs) */ int lafs_seg_dup(struct fs *fs, int newtable) { - /* for each device, * get blocks for each table and memcpy */ @@ -500,7 +500,7 @@ int lafs_seg_dup(struct fs *fs, int newtable) putdref(nbl, MKREF(ss)); } } - out: +out: return err; } @@ -543,8 +543,8 @@ static int count_credits(struct block *b) credits += count_credits(&LAFSI(dblk(b)->my_inode)->iblock->b); } return credits; - } + int temp_credits; static void check_credits(struct fs *fs) { @@ -888,12 +888,12 @@ static void segsort(struct segtracker *st, struct slist *l) */ while (h[0] != 0xFFFF || h[1] != 0xFFFF) { if (h[next] == 0xFFFF || - (h[1-next] != 0xFFFF && - !((prev <= segfollow(st, h[1-next])->score) - ^ (segfollow(st, h[1-next])->score - <= segfollow(st, h[next])->score) - ^ (segfollow(st, h[next])->score <= prev)) - )) + (h[1-next] != 0xFFFF && + !((prev <= segfollow(st, h[1-next])->score) + ^ (segfollow(st, h[1-next])->score + <= segfollow(st, h[next])->score) + ^ (segfollow(st, h[next])->score <= prev)) + )) next = 1 - next; if (segfollow(st, h[next])->score < prev) @@ -1056,7 +1056,7 @@ void lafs_free_get(struct fs *fs, unsigned int *dev, u32 *seg, BUG_ON(nonlogged); // FIXME should handle this case, not ignore it - again: +again: spin_lock(&fs->lock); wait_event_lock(fs->phase_wait, @@ -1172,7 +1172,6 @@ int lafs_clean_count(struct fs *fs) return rv; } - static int add_free(struct fs *fs, unsigned int dev, u32 seg, u16 *youthp) { /* This dev/seg is known to be free. add it to the list */ @@ -1422,7 +1421,7 @@ int lafs_get_cleanable(struct fs *fs, u16 *dev, u32 *seg) lafs_check_seg_cnt(st); - retry: +retry: spin_lock(&fs->lock); if (st->cleanable.first == 0xFFFF) { spin_unlock(&fs->lock); @@ -1656,8 +1655,8 @@ unsigned long lafs_scan_seg(struct fs *fs) */ while (fs->scan.free_block > - (fs->devs[fs->scan.free_dev].segment_count - >> (fs->prime_sb->s_blocksize_bits - 1))) { + (fs->devs[fs->scan.free_dev].segment_count + >> (fs->prime_sb->s_blocksize_bits - 1))) { fs->scan.free_dev++; fs->scan.free_block = 0; if (fs->scan.free_dev >= fs->devices) { @@ -1674,7 +1673,7 @@ unsigned long lafs_scan_seg(struct fs *fs) if (fs->scan.youth_db == NULL) fs->scan.youth_db = lafs_get_block(fs->devs[fs->scan.free_dev] - .segsum, + .segsum, fs->scan.free_block, NULL, GFP_KERNEL, MKREF(youth)); if (!fs->scan.youth_db) { @@ -1793,7 +1792,7 @@ unsigned long lafs_scan_seg(struct fs *fs) fs->scan.free_block + fs->devs[fs->scan.free_dev].tablesize * LAFSI(fs->ss[fs->scan.free_stage-1].root) - ->md.fs.usagetable, + ->md.fs.usagetable, NULL, GFP_KERNEL, MKREF(usage_ss)); if (!db) { printk("EEEEKKK get_block for subsequent usage failed\n"); @@ -1852,7 +1851,6 @@ unsigned long lafs_scan_seg(struct fs *fs) (void)getdref(fs->scan.usage0_db, MKREF(intable)); } - unmap_dblock(fs->scan.youth_db, yp); putdref(fs->scan.youth_db, MKREF(youth)); fs->scan.youth_db = NULL; diff --git a/snapshot.c b/snapshot.c index 413c1f8..81435cf 100644 --- a/snapshot.c +++ b/snapshot.c @@ -22,6 +22,7 @@ static int match_one(struct super_block *sb, void *target) { return sb == target; } + static int discard_one(struct super_block *sb, void *data) { return -ENOENT; @@ -108,7 +109,7 @@ lafs_snap_get_sb(struct file_system_type *fstype, struct lafs_inode *li = LAFSI(fs->ss[s].root); if (strcmp(li->md.fs.name, op.snapshot) == 0) { /* Already mounted... what to do? FIXME */ - return -EBUSY; + return -EBUSY; } } err = lafs_load_page(fs, p, fs->ss[s].root_addr, 1); @@ -165,7 +166,7 @@ lafs_snap_get_sb(struct file_system_type *fstype, } err = -ENOENT; - fail: +fail: put_page(p); path_put(&nd.path); return err; @@ -179,7 +180,6 @@ static void lafs_snap_kill_sb(struct super_block *sb) generic_shutdown_super(sb); printk("Generic_shutdown_super called\n"); deactivate_super(fs->prime_sb); - } struct file_system_type lafs_snap_fs_type = { diff --git a/state.h b/state.h index 6dd3351..ad80ff9 100644 --- a/state.h +++ b/state.h @@ -287,7 +287,6 @@ struct fs { struct hlist_head stable[SHASHSIZE]; spinlock_t stable_lock; - }; static inline int test_phase_locked(struct fs *fs) diff --git a/summary.c b/summary.c index 6db14db..c2fe6b4 100644 --- a/summary.c +++ b/summary.c @@ -118,5 +118,4 @@ int lafs_summary_allocate(struct fs *fs, struct inode *ino, int diff) spin_unlock(&ino->i_lock); return 0; - } diff --git a/super.c b/super.c index e392e12..5bc8cee 100644 --- a/super.c +++ b/super.c @@ -329,17 +329,17 @@ lafs_fill_super(struct super_block *sb, void *opv, int silent) dev_addr = sect; } else switch (compare_dev(dv->devblock, page_address(pg))) { - case 0: /* older, do nothing */ - break; - case 1: /* newer, overwrite */ - memcpy(dv->devblock, page_address(pg), n); - dev_addr = sect; - break; - default: /* inconsistent --- HELP */ - printk(KERN_ERR "LaFS: inconsistent device-blocks found.\n"); - err = -EINVAL; - goto out; - } + case 0: /* older, do nothing */ + break; + case 1: /* newer, overwrite */ + memcpy(dv->devblock, page_address(pg), n); + dev_addr = sect; + break; + default: /* inconsistent --- HELP */ + printk(KERN_ERR "LaFS: inconsistent device-blocks found.\n"); + err = -EINVAL; + goto out; + } } if (i != 1) @@ -403,7 +403,7 @@ lafs_fill_super(struct super_block *sb, void *opv, int silent) if (!silent) printk(KERN_ERR "LaFS: no valid stateblock found.\n"); } - out: +out: page_cache_release(pg); return err; } @@ -485,7 +485,7 @@ lafs_load(struct options *op, int newest) st = fs->state = op->devlist[newest].stateblock; op->devlist[newest].stateblock = NULL; #ifdef DUMP - dfs = fs; + dfs = fs; #endif fs->seq = le32_to_cpu(st->seq); @@ -520,7 +520,7 @@ lafs_load(struct options *op, int newest) fs->ss[i].root_addr = le64_to_cpu(st->root_inodes[i]); dprintk("root inode %d are %llu\n", - i, fs->ss[i].root_addr); + i, fs->ss[i].root_addr); } INIT_LIST_HEAD(&fs->pending_orphans); INIT_LIST_HEAD(&fs->inode_index); @@ -585,8 +585,8 @@ lafs_load(struct options *op, int newest) dv->start = le64_to_cpu(dv->devblk->start); dv->size = le64_to_cpu(dv->devblk->size); dprintk("Dev %d seems to range %llu + %llu\n", - i, (unsigned long long)dv->start, - (unsigned long long)dv->size); + i, (unsigned long long)dv->start, + (unsigned long long)dv->size); dv->width = le16_to_cpu(dv->devblk->width); dv->stride = le32_to_cpu(dv->devblk->stride); @@ -651,7 +651,7 @@ lafs_release(struct fs *fs) retry: list_for_each_entry(b, &fs->phase_leafs[i], lru) { /* FIXME this only OK for readonly mounts. - */ + */ getref(b, MKREF(release)); lafs_refile(b, 0); if (test_bit(B_Pinned, &b->flags)) { @@ -774,7 +774,7 @@ get_lafs_sb_dev(struct options *op, int flags) } return 0; - out: +out: return err; } @@ -855,7 +855,7 @@ lafs_get_sb(struct file_system_type *fs_type, * metadata files initialised, all pigs fed, and ready to fly!!! */ - out: +out: /* Now we clean up 'options'. Anything that is wanted has * been moved into 'fs', so we just discard anything we find */ @@ -884,7 +884,6 @@ static void lafs_kill_sb(struct super_block *sb) struct fs *fs = sb->s_fs_info; lafs_release(fs); - } struct file_system_type lafs_fs_type = { @@ -914,7 +913,7 @@ static int __init lafs_init(void) } return 0; - out: +out: lafs_ihash_free(); return err; } @@ -1011,13 +1010,13 @@ static int lafs_statfs(struct dentry *de, struct kstatfs *buf) */ buf->f_bavail = fs->free_blocks - fs->allocated_blocks; buf->f_bfree = buf->f_blocks - (root->md.fs.cblocks_used + - root->md.fs.pblocks_used + - root->md.fs.ablocks_used); + root->md.fs.pblocks_used + + root->md.fs.ablocks_used); dprintk("df: tot=%ld free=%ld avail=%ld(%ld-%ld) cb=%ld pb=%ld ab=%ld\n", - (long)buf->f_blocks, (long)buf->f_bfree, (long)buf->f_bavail, + (long)buf->f_blocks, (long)buf->f_bfree, (long)buf->f_bavail, (long)fs->free_blocks, (long)fs->allocated_blocks, - (long)root->md.fs.cblocks_used, (long)root->md.fs.pblocks_used, - (long)root->md.fs.ablocks_used); + (long)root->md.fs.cblocks_used, (long)root->md.fs.pblocks_used, + (long)root->md.fs.ablocks_used); buf->f_files = 0; buf->f_ffree = 0; @@ -1078,6 +1077,7 @@ static int do_dump(const char *val, struct kernel_param *kp) lafs_dump_usage(); return 0; } + static int get_dump(char *buffer, struct kernel_param *kp) { strcpy(buffer, "orphans,tree,cleanable,usage"); -- 2.39.5