- don't hold a 'leaf' reference. NO
- clean up *ref calls - differentiate those that can be called when zero DONE
- use enum for B_* DONE
- - support truncate to non-zero offset
+ - support truncate to non-zero offset DONE
- "looping on" found an 'OnFree' block!
- clean out lot of debugging
lost and bad things happen.
I might be able to fix it up, but it is probably better to try the
checkpoint_lock approach if I can only remember what that is.
+
+Locking:
+ Available locks:
+
+ Spin:
+
+ lafs_hash_lock
+ Used in:
+ lafs_shrinker
+ lafs_refile ???
+ Protects:
+ ib->hash
+ ->lru when on freelist
+
+ i_data.private_lock
+ Used in:
+ lafs_shrinker
+ Protects:
+ ->iblock / refcnt
+ ->dblock / my_inode
+ ->children / ->parent within an inode
+ setting ->private
+
+ fs->alloc_lock
+ fs->allocate_blocks
+
+ fs->stable_lock
+ segsum hash table
+ segsummary counters (in blocks)
+
+ fs->lock
+ _leafs lru
+ ->pending_blocks lru - should this be wc->lock ??.. not in 'bh'
+ Pinned consistent with lru
+ ->checkpointing / ->phase_locked
+ fs->pending_orphans
+ ->uninc and ->chain ?? Should use parent->B_IOLock ??
+ uninc_table - should use B_IOLock
+ free list / clean list segtrack
+
+ Mutex:
+
+ fs->wc->lock
+ wc[0] .. something in prepare_checkpoint
+ ->remaining etc
+ cluster_flush
+ mini blocks
+
+ i_mutex
+ inode_map
+ orphans
+
+ Other:
+
+ B_IOLock
+ erase_block
+ incorporate
+ cluster_allocate
+ allocated_block
+ IO
+ Phase flip
+ Initialising new inode
+ B_IOLockLock
+ IOLock across a page
+
+
+--------------------
+This is a list from 18 months ago, with updates
+
+ - Understand how superblock 'version' should be used.
+
+ - Review and fix up all locking/refcounts. See locking.doc
+ Also lock inode when copying in block 0 and probably
+ when calling lafs_inode_fillblock (??)
+ - lafs_incorporate must take a copy of the table under a lock so
+ more allocations can come in at any time.
+
+ - We don't want _allocated to block during cluster flush. So have
+ a no-block version and queue blocks on ->uninc if we cannot
+ allocate quickly. Find some way to process those ->uninc blocks.
+
+ - Use above for phase_flip so that we don't need to _allocated there.
+
+ - Utilise WritePhase bit, to be cleared when write completes.
+ In particular, find when to wait for Alloc to be cleared if
+ WritePhase doesn't match Phase.
+ - when about to perform an incorporation.
+ - make sure we don't re-cluster_allocate until old-phase address has
+ be recorded for incorporation.
+
+ - allocate multiple WAIT_QUEUE_HEADS for 'block_wait'
+
+ - Can inode data block be on leafs while index isn't, what happens if we
+ try to write it out...
+
+ - If InoIdx doesn't exist, then write_inode must write the data block.
+
+ - document and review all guards against dirtying a block from a previous phase
+ that is not yet safe on storage.
+ See lafs_dirty_dblock.
+ - check for proper handling of error conditions
+ b/ if lafs_seg_dup fails in do_checkpoint, we need to abort the snapshot.
+ - review checkpoint loop.
+ Should anything be explicit, or will refile do whatever is needed?
+ - Waiting.
+ What should checkpoint_unlock_wait wait for?
+ When do we need to wait for blocks the change state. And how?
+
+ - load/dirty block0 before dirtying any other block in depth=0 file
+
+ - use kmem_cache for 'struct datablock'
+ - indexblock allocation.
+ use kmem_cache
+ allocate the 'data' buffer late for InoIdx block.
+ trigger flushing when space is tight
+ Understand exactly when make_iblock should be called, and make it so.
+ - use a mempool for skippoints in cluster.c
+ - Review seg addressing code in cluster.c and make sure comments are good.
+ - consider ranges of holes in pending_addr.
+
+ - review correct placement of state block given issues with stripes.
+
+ - review segment usage /youth handling and make a todo list.
+ a/ Understand ref counting on segments and get it right.
+ - Choose when to use VerifyNull and when to use VerifyNext2.
+ - implement non-logged files
+ - Store accesstime in separate (non-logged) file.
+ - quotas.
+ make sure files are released on unmount.
+
+ - cleaner.
+ Support 'peer' lists and peer_find. etc
+ - subordinate filesystems:
+ a/ ss[]->rootdir needs to be an array or list.
+ b/ lafs_iget_fs need to understand these.
+ - review snapshots.
+ How to create
+ how they can fail / how to abort
+ How to destroy
+ - review unmount
+ - need to clean up checkpoint thread cleanly - be sure it has fully exited.
+ - review roll-forward
+ - make sure files with nlink=0 are handled well.
+ - sanity check various values before trusting clusters.
+
+ - Configure index block hash_table at run time base on memory size??
+ - striped layout.
+ Review everything that needs to handle laying out at cluster
+ aligned for striping.
+
+ - consider how to handle IO errors in detail, and implement it.
+ - consider how to handle data corruption in indexing and directories and
+ other metadata and guard against problems (lot of -EIO I suspect).
+
+ - check all uninc_table accesses are locked if needed.
+
+ - If a datablock is memory mapped writeable, then when we write it out,
+ we need to with fill up it's credits again, or unmap it.
+ - Need to handle orphans asynchonously.
+
+ - support 'remount'
+ - implement 'write_super' ??
+
+ - pin_all_children has horrible gotos - remove them.
+
+ - perform consistency check on all metadata blocks read from disk
+ e.g. don't assume index blocks are type 1 or 2.
/*
* Handle data blocks for LaFS
* fs/lafs/block.c
- * Copyright (C) 2004-2006
+ * Copyright (C) 2004-2009
* NeilBrown <neilb@suse.de>
* Released under the GPL, version 2
*/
int i;
int z = -1;
spin_lock(&refl);
- for (i=0; i<16; i++) {
+ for (i = 0; i < 16; i++) {
if (b->holders[i].cnt) {
if (strcmp(b->holders[i].name, ref) == 0) {
b->holders[i].cnt++;
}
if (z < 0) {
spin_unlock(&refl);
- printk("add_ref all holders are in use at %s:%d\nblock: %s\n",
+ printk(KERN_ERR "LaFS: add_ref all holders are in use"
+ " at %s:%d\nblock: %s\n",
file, line, strblk(b));
BUG();
}
{
int i;
spin_lock(&refl);
- for (i=0; i<16; i++) {
+ for (i = 0; i < 16; i++) {
if (b->holders[i].cnt &&
strcmp(b->holders[i].name, ref) == 0) {
b->holders[i].cnt--;
}
spin_unlock(&refl);
- printk("holder %s not found at %s:%d\nblk: %s\n",
+ printk(KERN_ERR "LaFS: holder %s not found at %s:%d\nblk: %s\n",
ref, file, line, strblk(b));
BUG();
}
#endif
-
/* Based on grow_dev_page */
struct datablock *
lafs_get_block(struct inode *ino, unsigned long index, struct page *p,
/* New page, need to set up attribute blocks */
/* FIXME use kmem_cache */
dprintk("setting up %p for %lu\n", p, index);
- BUG_ON(index == 0x5a5a5a5a);
b = kzalloc(sizeof(struct datablock)<<bits, gfp);
if (!b) {
if (unlock)
return NULL;
}
- for (i=0; i< (1<<bits); i++) {
+ for (i = 0; i < (1<<bits); i++) {
b[i].page = p;
atomic_set(&b[i].b.refcnt, 0);
b[i].b.flags = 0;
INIT_LIST_HEAD(&b[i].cleaning);
b[i].b.chain = NULL;
- b[i].my_inode = NULL; /* FIXME does this belong here? */
+ b[i].my_inode = NULL;
if (fs_from_inode(ino)->orphans == ino)
atomic_set(&b[i].pincnt, 0);
kfree(b);
}
- b = (struct datablock*) p->private;
+ b = (struct datablock *)p->private;
b += index & ((1<<bits)-1);
getdref_locked(b, REF);
- if (unlock) unlock_page(p);
+ if (unlock)
+ unlock_page(p);
BUG_ON(b->b.inode != ino);
return b;
}
* erase any blocks beyond end-of-file
* wait for any pending IO to complete (so page can be freed)
*/
- for (i=0; i<(1<<bits); i++) {
+ for (i = 0; i < (1<<bits); i++) {
if (start >= size)
/* Remove block from mapping and file */
lafs_erase_dblock(&b[i]);
}
b_start += sb->s_blocksize;
start += sb->s_blocksize;
- if(offset == 0 && test_bit(B_IOLock, &b[i].b.flags))
- printk("OOps %d: %s\n",i, strblk(&b[i].b));
- BUG_ON(offset == 0 && test_bit(B_IOLock, &b[i].b.flags));
+ BUG_ON(offset == 0 &&
+ test_bit(B_IOLock, &b[i].b.flags));
}
}
if (offset == 0) {
* - free the data structures
*/
b = (struct datablock *)page->private;
- for (i=0; i<(1<<bits); i++) {
+ for (i = 0; i < (1<<bits); i++) {
if (test_bit(B_WriteError, &b[i].b.flags))
set_bit(AS_EIO, &mapping->flags);
if (test_bit(B_Dirty, &b[i].b.flags) ||
* wait for a checkpoint here.??
*/
|| test_bit(B_Uninc, &b[i].b.flags)
- /* || atomic_read(&b[i].b.refcnt) */
) {
- printk("Cannot release %s\n", strblk(&b[i].b));
+ printk(KERN_ERR "Cannot release %s\n", strblk(&b[i].b));
if (!list_empty(&b[i].b.lru))
- printk("lru NOT empty\n");
+ printk(KERN_ERR "lru NOT empty\n");
spin_unlock(&mapping->private_lock);
BUG();
- /* This not really a bug, but bugs can lead here, and this is
- * an unusual situation (currently) so we BUG here to be
- * safe. When we find a situation that does fail
- * a release_page with good reason, we should remove
- * this BUG().
+ /* This not really a bug, but bugs can lead
+ * here, and this is an unusual situation
+ * (currently) so we BUG here to be safe.
+ * When we find a situation that does fail a
+ * release_page with good reason, we should
+ * remove this BUG().
*/
return 0;
}
}
/* OK, we are good to go. */
- for (i=0; i<(1<<bits); i++) {
+ for (i = 0; i < (1<<bits); i++) {
parents[i] = b[i].b.parent;
b[i].b.parent = NULL;
- list_del_init(&b[i].b.siblings); // FIXME do I still want this here??
+ list_del_init(&b[i].b.siblings);
list_del_init(&b[i].b.lru);
list_del_init(&b[i].b.peers);
(void)getdref_locked(&b[i], MKREF(lafs_release));
spin_unlock(&mapping->private_lock);
- for (i=0; i<(1<<bits); i++) {
+ for (i = 0; i < (1<<bits); i++) {
putdref(&b[i], MKREF(lafs_release));
putiref(parents[i], MKREF(child));
}
else
err = lafs_setparent(dblk(b));
- if (err) printk("EA err=%d\n", err);
if (test_bit(B_Realloc, &b->flags))
alloc_type = CleanSpace;
if (alloc_type == NewSpace && b->physaddr)
}
err = err ?: lafs_prealloc(b, alloc_type);
- if (err) dprintk("EB err=%d %s\n", err, strblk(b));
if (err == 0 && b->physaddr == 0 &&
!test_bit(B_Index, &b->flags) &&
!test_and_set_bit(B_Prealloc, &b->flags)) {
err = lafs_summary_allocate(fs, b->inode, 1);
- if (err) printk("EC err=%d\n", err);
if (err)
clear_bit(B_Prealloc, &b->flags);
}
if (err)
return err;
- if ( ! test_and_set_bit(B_SegRef, &b->flags) && b->physaddr) {
+ if (!test_and_set_bit(B_SegRef, &b->flags) && b->physaddr) {
lafs_seg_ref(fs, b->physaddr, 0);
/* FIXME can that fail?? */
/* It shouldn't fail, but might block */
if (!test_and_clear_bit(B_Realloc, &b->b.flags))
if (!test_and_clear_bit(B_Credit, &b->b.flags))
if (!test_and_clear_bit(B_NCredit, &b->b.flags))
- BUG(); // Credit should have been set.
+ BUG(); // Credit should have been set.
if (!test_and_set_bit(B_UnincCredit, &b->b.flags))
if (!test_and_clear_bit(B_ICredit, &b->b.flags))
if (!test_and_clear_bit(B_NICredit, &b->b.flags))
- BUG(); // ICredit should be set before we dirty a block.
+ BUG(); // ICredit should be set before we dirty
+ // a block.
if (test_and_clear_bit(B_Realloc, &b->b.flags))
lafs_space_return(fs_from_inode(b->b.inode), 1);
BUG_ON(list_empty(&b->b.lru));
list_del_init(&b->b.lru);
if (!test_bit(B_Root, &b->b.flags))
- atomic_dec(&b->b.parent->pincnt[!!test_bit(B_Phase1, &b->b.flags)]);
+ atomic_dec(&b->b.parent->pincnt
+ [!!test_bit(B_Phase1, &b->b.flags)]);
clear_bit(B_Pinned, &b->b.flags);
spin_unlock(&fs->lock);
if (!test_bit(B_Root, &b->b.flags))
onlist = 1;
}
if (!test_bit(B_Root, &b->b.flags))
- atomic_dec(&b->b.parent->pincnt[!!test_bit(B_Phase1, &b->b.flags)]);
+ atomic_dec(&b->b.parent->pincnt
+ [!!test_bit(B_Phase1, &b->b.flags)]);
clear_bit(B_Pinned, &b->b.flags);
spin_unlock(&fs->lock);
if (!test_bit(B_Root, &b->b.flags))
/* FIXME is it completely safe to just clear Realloc here??? */
if (!test_and_clear_bit(B_Realloc, &b->b.flags))
if (!test_and_clear_bit(B_Credit, &b->b.flags)) {
- printk("Why have I no credits? %s\n", strblk(&b->b));
+ printk(KERN_ERR "Why have I no credits? %s\n",
+ strblk(&b->b));
BUG(); // Credit should have been set.
}
}
BUG(); // ICredit should be set before we dirty a block.
if (test_and_clear_bit(B_Realloc, &b->b.flags))
lafs_space_return(fs_from_inode(b->b.inode), 1);
-
- // FIXME Do I need to do something with PinPending??
-
}
/*
* routines to create a checkpoint for LaFS
* fs/lafs/checkpoint.c
- * Copyright (C) 2006
+ * Copyright (C) 2006-2009
* NeilBrown <neilb@suse.de>
* Released under the GPL, version 2
*/
#include <linux/kthread.h>
#ifdef DUMP
-extern struct fs *dfs;
-
static char *strflags(struct block *b)
{
static char ans[200];
ans[0] = 0;
- if (test_bit(B_Index, &b->flags)) sprintf(ans, "Index(%d),", iblk(b)->depth);
+ if (test_bit(B_Index, &b->flags))
+ sprintf(ans, "Index(%d),", iblk(b)->depth);
if (test_bit(B_Pinned, &b->flags)) {
strcat(ans, "Pinned,");
if (test_bit(B_Phase1, &b->flags))
- strcat(ans, "Phase1,"); else strcat(ans, "Phase0,");
- if (test_bit(B_IOLock, &b->flags) && test_bit(B_Valid, &b->flags)) {
+ strcat(ans, "Phase1,");
+ else
+ strcat(ans, "Phase0,");
+ if (test_bit(B_IOLock, &b->flags) &&
+ test_bit(B_Valid, &b->flags)) {
if (test_bit(B_WritePhase1, &b->flags))
- strcat(ans, "WPhase1,"); else strcat(ans, "WPhase0,");
+ strcat(ans, "WPhase1,");
+ else
+ strcat(ans, "WPhase0,");
}
}
- if (test_bit(B_PinPending, &b->flags)) strcat(ans, "PinPending,");
- if (test_bit(B_InoIdx, &b->flags)) strcat(ans, "InoIdx,");
- if (test_bit(B_Valid, &b->flags)) strcat(ans, "Valid,");
- if (test_bit(B_Dirty, &b->flags)) strcat(ans, "Dirty,");
- if (test_bit(B_Linked, &b->flags)) strcat(ans, "Linked,");
- if (test_bit(B_Realloc, &b->flags)) strcat(ans, "Realloc,");
- if (test_bit(B_Async, &b->flags)) strcat(ans, "Async,");
- if (test_bit(B_Root, &b->flags)) strcat(ans, "Root,");
- if (test_bit(B_SegRef, &b->flags)) strcat(ans, "SegRef,");
- if (test_bit(B_Credit, &b->flags)) strcat(ans, "C,");
- if (test_bit(B_ICredit, &b->flags)) strcat(ans, "CI,");
- if (test_bit(B_NCredit, &b->flags)) strcat(ans, "CN,");
- if (test_bit(B_NICredit, &b->flags)) strcat(ans, "CNI,");
- if (test_bit(B_UnincCredit, &b->flags)) strcat(ans, "UninCredit,");
- if (test_bit(B_IOLock, &b->flags)) strcat(ans, "IOLock,");
- if (test_bit(B_IOLockLock, &b->flags)) strcat(ans, "IOLockLock,");
- if (test_bit(B_IOPending, &b->flags)) strcat(ans, "IOPending,");
- if (test_bit(B_WriteError, &b->flags)) strcat(ans, "WriteError,");
- if (test_bit(B_Claimed, &b->flags)) strcat(ans, "Claimed,");
- if (test_bit(B_OnFree, &b->flags)) strcat(ans, "OnFree,");
- if (test_bit(B_PhysValid, &b->flags)) strcat(ans, "PhysValid,");
-
- if (test_bit(B_Uninc, &b->flags)) strcat(ans, "Uninc,");
- if (test_bit(B_Orphan, &b->flags)) strcat(ans, "Orphan,");
- if (test_bit(B_Prealloc, &b->flags)) strcat(ans, "Prealloc,");
+ if (test_bit(B_PinPending, &b->flags))
+ strcat(ans, "PinPending,");
+ if (test_bit(B_InoIdx, &b->flags))
+ strcat(ans, "InoIdx,");
+ if (test_bit(B_Valid, &b->flags))
+ strcat(ans, "Valid,");
+ if (test_bit(B_Dirty, &b->flags))
+ strcat(ans, "Dirty,");
+ if (test_bit(B_Linked, &b->flags))
+ strcat(ans, "Linked,");
+ if (test_bit(B_Realloc, &b->flags))
+ strcat(ans, "Realloc,");
+ if (test_bit(B_Async, &b->flags))
+ strcat(ans, "Async,");
+ if (test_bit(B_Root, &b->flags))
+ strcat(ans, "Root,");
+ if (test_bit(B_SegRef, &b->flags))
+ strcat(ans, "SegRef,");
+ if (test_bit(B_Credit, &b->flags))
+ strcat(ans, "C,");
+ if (test_bit(B_ICredit, &b->flags))
+ strcat(ans, "CI,");
+ if (test_bit(B_NCredit, &b->flags))
+ strcat(ans, "CN,");
+ if (test_bit(B_NICredit, &b->flags))
+ strcat(ans, "CNI,");
+ if (test_bit(B_UnincCredit, &b->flags))
+ strcat(ans, "UninCredit,");
+ if (test_bit(B_IOLock, &b->flags))
+ strcat(ans, "IOLock,");
+ if (test_bit(B_IOLockLock, &b->flags))
+ strcat(ans, "IOLockLock,");
+ if (test_bit(B_IOPending, &b->flags))
+ strcat(ans, "IOPending,");
+ if (test_bit(B_WriteError, &b->flags))
+ strcat(ans, "WriteError,");
+ if (test_bit(B_Claimed, &b->flags))
+ strcat(ans, "Claimed,");
+ if (test_bit(B_OnFree, &b->flags))
+ strcat(ans, "OnFree,");
+ if (test_bit(B_PhysValid, &b->flags))
+ strcat(ans, "PhysValid,");
+
+ if (test_bit(B_Uninc, &b->flags))
+ strcat(ans, "Uninc,");
+ if (test_bit(B_Orphan, &b->flags))
+ strcat(ans, "Orphan,");
+ if (test_bit(B_Prealloc, &b->flags))
+ strcat(ans, "Prealloc,");
if (ans[0])
ans[strlen(ans)-1] = 0;
else
static char ans[400];
unsigned long ino = 0;
- if (!b) return "(NULL block)";
+ if (!b)
+ return "(NULL block)";
if (b->inode)
ino = b->inode->i_ino;
if (test_bit(B_PhysValid, &b->flags))
- sprintf(ans, "[%p]%lu/%lu(%llu)r%d%c:%s", b, ino, b->fileaddr,
- b->physaddr, atomic_read(&b->refcnt),
- list_empty(&b->lru) ? 'E':'F',
- strflags(b));
+ sprintf(ans, "[%p]%lu/%lu(%llu)r%d%c:%s",
+ b, ino, b->fileaddr,
+ b->physaddr, atomic_read(&b->refcnt),
+ list_empty(&b->lru) ? 'E' : 'F',
+ strflags(b));
else
- sprintf(ans, "[%p]%lu/%lu(NoPhysAddr)r%d%c:%s", b, ino, b->fileaddr,
- atomic_read(&b->refcnt),
- list_empty(&b->lru) ? 'E':'F',
- strflags(b));
- if (test_bit(B_Pinned, &b->flags)&&
+ sprintf(ans, "[%p]%lu/%lu(NoPhysAddr)r%d%c:%s",
+ b, ino, b->fileaddr,
+ atomic_read(&b->refcnt),
+ list_empty(&b->lru) ? 'E' : 'F',
+ strflags(b));
+ if (test_bit(B_Pinned, &b->flags) &&
test_bit(B_Index, &b->flags))
sprintf(ans+strlen(ans), "{%d,%d}",
atomic_read(&iblk(b)->pincnt[0]),
if (test_bit(B_Index, &b->flags)) {
sprintf(ans+strlen(ans), "[%d%s%s]",
iblk(b)->uninc_table.pending_cnt,
- iblk(b)->uninc ? "*":"",
- iblk(b)->uninc_next ? "+":"");
+ iblk(b)->uninc ? "*" : "",
+ iblk(b)->uninc_next ? "+" : "");
}
if (!b->parent)
sprintf(ans+strlen(ans), " NP");
#if DEBUG_REF
-{
- int i;
- for (i=0; i<16; i++)
- if (b->holders[i].cnt)
- sprintf(ans+strlen(ans), " %s(%d)", b->holders[i].name,
- b->holders[i].cnt);
-}
+ {
+ int i;
+ for (i = 0; i < 16; i++)
+ if (b->holders[i].cnt)
+ sprintf(ans+strlen(ans),
+ " %s(%d)", b->holders[i].name,
+ b->holders[i].cnt);
+ }
#endif
return ans;
}
struct block *b2;
int credits = 0;
- if (depth > 20) { printk("... aborting at %d\n", depth); BUG();return 0;}
+ if (depth > 20) {
+ printk("... aborting at %d\n", depth);
+ BUG();
+ return 0;
+ }
printk("%*s", depth, "");
printk("%s", strblk(b));
- for(i=0; i<2; i++) {
+ for (i = 0; i < 2; i++) {
j = 0;
list_for_each_entry(b2, &dfs->phase_leafs[i], lru) {
if (b2 == b) {
- printk(" Leaf%d(%d) ", i,j);
+ printk(" Leaf%d(%d) ", i, j);
break;
}
j++;
j++;
}
{
-extern struct freelists {
- struct list_head lru;
- unsigned long freecnt;
-} freelist;
list_for_each_entry(b2, &freelist.lru, lru)
if (b2 == b) {
printk(" on free ");
if (test_bit(B_Index, &b->flags) && ib->uninc_table.pending_cnt)
lafs_print_uninc(&ib->uninc_table);
- if (test_bit(B_Credit, &b->flags)) credits++;
- if (test_bit(B_ICredit, &b->flags)) credits++;
- if (test_bit(B_NCredit, &b->flags)) credits++;
- if (test_bit(B_NICredit, &b->flags)) credits++;
- if (test_bit(B_UnincCredit, &b->flags)) credits++;
- if (test_bit(B_Dirty, &b->flags)) credits++;
- if (test_bit(B_Realloc, &b->flags)) credits++;
+ if (test_bit(B_Credit, &b->flags))
+ credits++;
+ if (test_bit(B_ICredit, &b->flags))
+ credits++;
+ if (test_bit(B_NCredit, &b->flags))
+ credits++;
+ if (test_bit(B_NICredit, &b->flags))
+ credits++;
+ if (test_bit(B_UnincCredit, &b->flags))
+ credits++;
+ if (test_bit(B_Dirty, &b->flags))
+ credits++;
+ if (test_bit(B_Realloc, &b->flags))
+ credits++;
if (test_bit(B_Index, &b->flags)) {
list_for_each_entry(b, &ib->children, siblings) {
LAFSI(dblk(b)->my_inode)->iblock->b.parent
) {
BUG_ON(LAFSI(dblk(b)->my_inode)->iblock->b.parent != b->parent);
- credits += print_tree(&LAFSI(dblk(b)->my_inode)->iblock->b, depth+1);
+ credits += print_tree(&LAFSI(dblk(b)->my_inode)->iblock->b,
+ depth+1);
}
return credits;
}
fs->lock);
oldphase = fs->phase;
- fs->phase = ! oldphase;
+ fs->phase = !oldphase;
fs->checkpointing = CH_CheckpointStart | CH_Checkpoint;
spin_unlock(&fs->lock);
static void do_checkpoint(void *data)
{
struct fs *fs = data;
- int oldphase = !fs->phase; /* FIXME could there be a race getting this? */
+ int oldphase = !fs->phase; /*FIXME could there be a race getting this?*/
struct block *b;
- int cnt=0;
+ int cnt = 0;
#ifdef DUMP
dfs = fs;
#endif
dprintk("Start Checkpoint\n");
- if (lafs_trace) lafs_dump_tree();
- while ( (b = lafs_get_flushable(fs, oldphase)) != NULL) {
+ if (lafs_trace)
+ lafs_dump_tree();
+ while ((b = lafs_get_flushable(fs, oldphase)) != NULL) {
int unlock = 1;
-dprintk("Block %d/%d idx=%d\n", (int)b->inode->i_ino, (int)b->fileaddr,
- test_bit(B_Index, &b->flags));
+ dprintk("Checkpoint Block %s\n", strblk(b));
-dprintk("(");
if (!test_bit(B_Pinned, &b->flags))
/* Haven't refiled since we cleared that */ ;
else if (!!test_bit(B_Phase1, &b->flags) == oldphase) {
if (test_bit(B_Index, &b->flags) &&
(iblk(b)->uninc_table.pending_cnt ||
iblk(b)->uninc)) {
-dprintk("Incorp\n");
lafs_incorporate(fs, iblk(b));
} else if ((test_bit(B_Dirty, &b->flags) ||
test_bit(B_Realloc, &b->flags))) {
-dprintk("alloc\n");
lafs_cluster_allocate(b, 0);
unlock = 0;
} else if (test_bit(B_Index, &b->flags)) {
-dprintk("flip\n");
- if (atomic_read(&iblk(b)->pincnt[oldphase]) == 0)
+ if (atomic_read(&iblk(b)->pincnt[oldphase])
+ == 0)
lafs_phase_flip(fs, b);
} else {
/* Datablock that should lose it's Pin
} else {
printk("IMPOSSIBLE(%d) %s\n", oldphase, strblk(b));
if (test_bit(B_InoIdx, &b->flags))
- printk(" data=%s\n", strblk(&LAFSI(b->inode)->dblock->b));
+ printk(" data=%s\n",
+ strblk(&LAFSI(b->inode)->dblock->b));
WARN_ON(1);
/* FIXME is this possible? */
}
if (unlock)
lafs_iounlock_block(b, 0);
-dprintk(")");
putref(b, MKREF(leaf));
-// lafs_dump_tree();
if (list_empty(&fs->phase_leafs[oldphase])) {
- void lafs_cluster_wait_all(struct fs *fs);
lafs_cluster_flush(fs, 0);
lafs_cluster_wait_all(fs);
- lafs_clusters_done(fs);
+ lafs_clusters_done(fs);
}
cnt++;
-#if 0
- if (cnt > 500) {
- printk("Loop 500\n");
- lafs_dump_tree();
- break;
- }
-#endif
-// lafs_trace = 0;
}
lafs_clusters_done(fs);
if (test_bit(B_Pinned, &LAFSI(fs->ss[0].root)->iblock->b.flags) &&
- !!test_bit(B_Phase1, &LAFSI(fs->ss[0].root)->iblock->b.flags) != fs->phase) {
+ !!test_bit(B_Phase1, &LAFSI(fs->ss[0].root)->iblock->b.flags)
+ != fs->phase) {
struct indexblock *ib = LAFSI(fs->ss[0].root)->iblock;
struct block *cb;
printk("ROOT has not changed phase!! \n");
lafs_dump_tree();
cb = &ib->b;
- printk("Root %s Block still old: %s\n",
- test_bit(B_Index, &cb->flags)?"Index":"Data",
+ printk("Root %s Block still old: %s\n",
+ test_bit(B_Index, &cb->flags) ? "Index" : "Data",
strblk(cb));
list_for_each_entry(cb, &ib->children, siblings)
int pp = !!test_bit(B_Phase1, &cb->flags);
if (pp != fs->phase)
printk("%s Block still old: %s\n",
- test_bit(B_Index, &cb->flags)?"Index":"Data",
+ test_bit(B_Index, &cb->flags)
+ ? "Index" : "Data",
strblk(cb));
}
BUG();
return HZ/10; /* FIXME that is gross ... is it needed? */
/* OK, time for some work. */
dprintk("############################ start checkpoint\n");
-// printk("1");
y = prepare_checkpoint(fs);
-// printk("2");
do_checkpoint(fs);
-// printk("3");
finish_checkpoint(fs, y);
-// printk("\n");
dprintk("############################ finish checkpoint\n");
return MAX_SCHEDULE_TIMEOUT;
void lafs_checkpoint_lock(struct fs *fs)
{
spin_lock(&fs->lock);
- fs->phase_locked ++;
+ fs->phase_locked++;
spin_unlock(&fs->lock);
}
{
int l;
spin_lock(&fs->lock);
- l = -- fs->phase_locked;
+ l = --fs->phase_locked;
spin_unlock(&fs->lock);
if (l == 0)
wake_up(&fs->phase_wait);
+/*
+ * fs/lafs/clean.c
+ * Copyright (C) 2005-2009
+ * Neil Brown <neilb@suse.de>
+ * Released under the GPL, version 2
+ */
+
#include "lafs.h"
#include <linux/kthread.h>
long to;
set_bit(CleanerNeeded, &fs->fsstate);
- while (! kthread_should_stop() ) {
+ while (!kthread_should_stop()) {
/* We need to wait INTERRUPTIBLE so that
* we don't add to the load-average.
timeout);
clear_bit(CleanerNeeded, &fs->fsstate);
-//printk("X1");
timeout = MAX_SCHEDULE_TIMEOUT;
to = lafs_do_checkpoint(fs);
if (to < timeout)
timeout = to;
-//printk("X2");
to = lafs_run_orphans(fs);
if (to < timeout)
timeout = to;
-//printk("X3");
to = lafs_scan_seg(fs);
if (to < timeout)
timeout = to;
-//printk("X4");
to = do_clean(fs);
if (to < timeout)
timeout = to;
-//printk("X5\n");
-
lafs_clusters_done(fs);
}
return 0;
return getref(b, REF);
spin_lock(&as->private_lock);
- for (p = b; p && !in_seg(fs, dev, seg, p->physaddr); p = &(p->parent)->b) {
+ for (p = b;
+ p && !in_seg(fs, dev, seg, p->physaddr);
+ p = &(p->parent)->b) {
if (test_bit(B_InoIdx, &p->flags)) {
struct datablock *db = LAFSI(p->inode)->dblock;
{
struct block *b;
dprintk("Start cleaner_flush\n");
- while ( (b = lafs_get_flushable(fs, -1)) != NULL) {
+ while ((b = lafs_get_flushable(fs, -1)) != NULL) {
int unlock = 1;
dprintk("cleaning %s\n", strblk(b));
-//printk("[");
if (test_bit(B_Dirty, &b->flags)) {
/* Ignore this, checkpoint will take it */
if (test_and_clear_bit(B_Realloc, &b->flags))
if (test_and_set_bit(B_Credit, &b->flags))
lafs_space_return(fs, 1);
-//printk("A");
} else if (test_bit(B_Index, &b->flags) &&
(iblk(b)->uninc ||
iblk(b)->uninc_table.pending_cnt)) {
lafs_incorporate(fs, iblk(b));
-//printk("B");
} else {
lafs_cluster_allocate(b, 1);
unlock = 0;
-//printk("C%s",strblk(b));
}
if (unlock)
lafs_iounlock_block(b, 0);
putref(b, MKREF(leaf));
-//printk("]");
+
if (list_empty(&fs->clean_leafs)) {
- void lafs_cluster_wait_all(struct fs *fs);
lafs_cluster_flush(fs, 1);
- // FIXME wait??
+ /* FIXME wait?? */
lafs_cluster_wait_all(fs);
}
-//printk("\n");
}
}
struct datablock *b, *tmp;
dprintk("try_clean: state = %d\n", tc->ac.state);
-//printk("A");
if (tc->ch == NULL) {
/* Need to read in the cluster header */
int err;
fs->prime_sb->s_blocksize),
&tc->ac);
-//printk("b");
if (err == -EAGAIN)
return 0;
if (err == -EIO) {
bad_header:
- //printk("CLEANER got IO error !!\n");
+ //printk("CLEANER got IO error !!\n");
// FIXME adjust youth to so as not to touch this again
return 0;
}
-//printk("c");
BUG_ON(err);
// FIXME check checksum of the cluster_header
tc->ch = page_address(tc->chead);
tc->gh = tc->ch->groups;
tc->desc = tc->gh->u.desc;
if (lafs_calc_cluster_csum(tc->ch) != tc->ch->checksum) {
- //printk("Cluster header checksum is wrong!!\n");
+ //printk("Cluster header checksum is wrong!!\n");
goto bad_header;
}
dprintk("try_clean: got header %d\n", (int)tc->haddr);
-//printk("d");
}
while (tc->ch && again < 16) {
/* Load the index block for each described data or index block.
* in a current cleaning-segment and flagged for reallocation
* if appropriate.
*/
-//printk("e");
- if ((((char*)tc->gh) - (char*)tc->ch)
+ if ((((char *)tc->gh) - (char *)tc->ch)
>= le16_to_cpu(tc->ch->Hlength)) {
/* Finished with that cluster, try another. */
tc->haddr = le64_to_cpu(tc->ch->next_addr);
tc->ac.state = 0;
tc->ss = 0;
}
-//printk("f");
tc->ch = NULL;
lafs_wake_cleaner(fs);
break;
}
-//printk("g");
- if (((((char*)tc->desc) - (char*)tc->gh)+3)/4
+ if (((((char *)tc->desc) - (char *)tc->gh)+3)/4
>= le16_to_cpu(tc->gh->group_size_words)) {
/* Finished with that group, try another */
/* FIXME what if group has padding at end?
* this might be fixed, but need to be certain
* of all possibilities. */
- tc->gh = (struct group_head*)(((char*)tc->gh) +
+ tc->gh = (struct group_head *)(((char *)tc->gh) +
le16_to_cpu(tc->gh->group_size_words)*4);
tc->desc = tc->gh->u.desc;
-//printk("h");
continue;
}
-//printk("i");
if (le16_to_cpu(tc->desc->block_bytes) > DescMiniOffset &&
tc->desc->block_bytes != DescIndex) {
/* This is a miniblock, skip it. */
int len = le16_to_cpu(tc->desc->block_bytes)
- DescMiniOffset;
tc->desc++;
- tc->desc = (struct descriptor*)
+ tc->desc = (struct descriptor *)
(((char *)tc->desc)
+ roundup(len, 4));
-//printk("j");
continue;
}
-//printk("k");
/* Ok, desc seems to be a valid descriptor in this group */
/* Try to load the index info for block_num in inode in filesys.
*/
bcnt = le16_to_cpu(tc->desc->block_cnt);
if (bcnt <= 0) {
tc->desc++;
-//printk("l");
continue;
}
-//printk("m");
inum = le32_to_cpu(tc->gh->inum);
fsnum = le32_to_cpu(tc->gh->fsnum);
(int)fsnum, (int)inum, (int)bnum, (int)bcnt,
(int)le16_to_cpu(tc->desc->block_bytes));
-//printk("n");
if (fsnum == 0 && inum == 0 && bnum == 0)
goto skip;
-//printk("n1(%p)",ino);
-//if (ino)printk("n1a(%p)", LAFSI(ino)->filesys);
+
if (ino == NULL ||
ino->i_ino != inum ||
LAFSI(ino)->filesys->i_ino != fsnum) {
-//printk("n2");
if (ino)
iput(ino);
-//printk("n3");
ino = lafs_iget_fs(fs, fsnum, inum, 1);
}
-//printk("o");
if (IS_ERR(ino)) {
/* FIXME check that this is -EAGAIN
* FIXME should have lafs_iget_fs return the
if (err != -EAGAIN) {
/* inode not found */
tc->desc++;
-//printk("p");
continue;
}
-//printk("q");
break;
} else {
dprintk("got the inode\n");
b = lafs_get_block(ino, bnum, NULL, GFP_NOFS,
MKREF(cleaning));
-//printk("r");
if (b == NULL)
break;
if (list_empty(&b->cleaning))
list_add(&b->cleaning, &tc->cleaning);
else
putdref(b, MKREF(cleaning));
-//printk("s");
}
skip:
/* We modify the descriptor in-place to track where
* we are up to. This is a private copy. The real
* descriptor doesn't change.
*/
-//printk("t");
tc->desc->block_num = cpu_to_le32(bnum+1);
tc->desc->block_cnt = cpu_to_le16(bcnt-1);
}
if (ino)
iput(ino);
-//printk("u");
dprintk("start processing list\n");
list_for_each_entry_safe(b, tmp, &tc->cleaning, cleaning) {
struct block *cb;
int err = lafs_find_block_async(b);
dprintk("find_async %d/%d gives %d\n", (int)b->b.inode->i_ino,
(int)b->b.fileaddr, err);
-//printk("v");
if (err == -EAGAIN)
continue;
-//printk("w");
if (err) {
/* Eeek, what do I do?? */
list_del_init(&b->cleaning);
putdref(b, MKREF(cleaning));
-//printk("x");
continue;
}
-//printk("y");
cb = first_in_seg(&b->b, fs, tc->dev, tc->seg, MKREF(clean2));
if (cb == NULL) {
putdref(b, MKREF(cleaning));
continue;
}
-//printk("z");
err = lafs_load_block(cb, 0);
if (err) {
list_del_init(&b->cleaning);
putref(cb, MKREF(clean2));
continue;
}
-//printk("1");
err = lafs_wait_block_async(cb);
if (err == -EAGAIN)
continue;
list_del_init(&b->cleaning);
-//printk("2");
if (err) {
putref(cb, MKREF(clean2));
putdref(b, MKREF(cleaning));
continue;
}
-//printk("3");
err = mark_cleaning(cb);
dprintk("Want to clean %d/%d (%d)\n",
(int)cb->inode->i_ino,
putdref(b, MKREF(cleaning));
putref(cb, MKREF(clean2));
-//printk("4");
if (err)
return -1;
}
-//printk("5\n");
return tc->ch == NULL && tc->ss == 0 &&
list_empty(&tc->cleaning);
}
* - If all done, process realloc_leafs and allocate to clean cluster.
*
*/
- if ( !fs->cleaner.active ) {
+ if (!fs->cleaner.active) {
/* choose to clean when the fraction of all space that is clean
* is below the faction of free space that is not clean.
* i.e. if T is total space, C is clean space, F is free space,
*
* We need to avoid cleaning too much in one checkpoint as
* the free counts will start to get misleading.
- * Maybe every time we choose to clean a segment, we add the size
- * of the segment to some counter and add that to C in the
+ * Maybe every time we choose to clean a segment, we add the
+ * size of the segment to some counter and add that to C in the
* above calculations.
*
* For now, clean up to 4 segments at a time.
int i;
u64 T = 0;
- for (i=0; i<fs->devices; i++)
+ for (i = 0; i < fs->devices; i++)
T += fs->devs[i].size;
- T -= 4 * fs->max_segment; /* adjust to unusable space FIXME adjust F too? */
+ /* adjust to unusable space FIXME adjust F too? */
+ T -= 4 * fs->max_segment;
for (i = 0; i < 4; i++) {
struct toclean *tc = &fs->cleaner.seg[i];
/* OK, we are good to keep cleaning */
tc->ss = lafs_get_cleanable(fs, &tc->dev, &tc->seg);
- if (! tc->ss) {
+ if (!tc->ss) {
dprintk("CLEANER: Nothing found to clean at %d :-(\n",
i);
break;
int doflush = 1;
for (i = 0; i < 4 ; i++) {
struct toclean *tc = &fs->cleaner.seg[i];
-//printk("i%d",i);
if (tc->ss) {
/* Might be something to do here */
int done = try_clean(fs, tc);
cnt++;
}
}
-//printk("d");
if (doflush)
cleaner_flush(fs);
-//printk("f\n");
if (cnt == 0)
fs->cleaner.active = 0;
}
/*
* write-cluster management routines for LaFS
* fs/lafs/cluster.c
- * Copyright (C) 2006
+ * Copyright (C) 2006-2009
* NeilBrown <neilb@suse.de>
* Released under the GPL, version 2
*/
static void skip_discard(struct skippoint *sp)
{
- while(sp) {
+ while (sp) {
struct skippoint *next = sp->next[0];
kfree(sp);
sp = next;
if (unlikely(list_empty(list))) {
int height;
/* handle this trivial case separately */
- if (avail < 2) return -1;
+ if (avail < 2)
+ return -1;
list_add(&target->lru, list);
head->b = NULL;
- for (height=0; height< SKIP_MAX_HEIGHT; height++)
+ for (height = 0; height < SKIP_MAX_HEIGHT; height++)
head->next[height] = NULL;
return 2;
}
if (!newpoint)
return rv; /* FIXME how do we trigger early flush? */
newpoint->b = target;
- for (level=0; level < height-1; level++) {
+ for (level = 0; level < height-1; level++) {
newpoint->next[level] = pos.next[level];
pos.next[level] = newpoint;
}
struct fs_dev *dv = &fs->devs[seg->dev];
int rows = dv->rows_per_table - seg->st_row;
- rows += dv->rows_per_table * (dv->tables_per_seg - seg->st_table -1);
+ rows += dv->rows_per_table * (dv->tables_per_seg - seg->st_table - 1);
return rows * dv->width;
}
seg->table = seg->nxt_table = seg->st_table;
seg->row = seg->nxt_row = seg->st_row;
seg->col = 0;
-
-#if 0
- printk("set_setpos: %d pos=%d/%d %d/%d %d/%d %d/%d/%d\n",
- (int)addr,
- seg->dev, seg->num,
- seg->st_table, seg->st_row,
- seg->nxt_table, seg->nxt_row,
- seg->table, seg->row, seg->col);
-#endif
}
static u64 seg_next(struct fs *fs, struct segpos *seg)
seg->col++;
if (seg->col >= dv->width) {
seg->col = 0;
- seg->row ++;
+ seg->row++;
if (seg->row >= dv->rows_per_table) {
seg->row = 0;
- seg->table ++;
+ seg->table++;
}
}
return addr;
lafs_free_get(fs, &dev, &seg, 0);
wc->seg.dev = dev;
wc->seg.num = seg;
- seg_setpos(fs, &wc->seg, p=segtovirt(fs, dev, seg));
+ seg_setpos(fs, &wc->seg, (p = segtovirt(fs, dev, seg)));
lafs_seg_ref(fs, p, 0); /* FIXME - never released */
wc->remaining = seg_remainder(fs, &wc->seg);
if (test_and_set_bit(B_Credit, &lai->iblock->b.flags))
lafs_space_return(fs, 1);
lafs_dirty_iblock(lai->iblock);
- }
- else if (test_and_clear_bit(B_Realloc, &b->flags)) {
+ } else if (test_and_clear_bit(B_Realloc, &b->flags)) {
int credits = 1;
BUG_ON(!test_bit(B_Valid, &lai->iblock->b.flags));
if (!test_and_set_bit(B_Realloc, &lai->iblock->b.flags)) {
- if (!test_and_clear_bit(B_Credit, &lai->iblock->b.flags))
+ if (!test_and_clear_bit(B_Credit,
+ &lai->iblock->b.flags))
credits--;
}
if (!test_and_set_bit(B_UnincCredit, &lai->iblock->b.flags))
- if (!test_and_clear_bit(B_ICredit, &lai->iblock->b.flags))
+ if (!test_and_clear_bit(B_ICredit,
+ &lai->iblock->b.flags))
credits--;
lafs_space_return(fs, credits);
} else {
/* It is an awkward time to call lafs_inode_fillblock,
* so do this one little change manually
*/
- ((struct la_inode*)ibuf)->depth = 0;
+ ((struct la_inode *)ibuf)->depth = 0;
memcpy(ibuf + lai->metadata_size,
dbuf, size);
memset(ibuf + lai->metadata_size + size,
BUG_ON(lai->flags & File_nonlogged &&
!test_bit(B_Index, &b->flags));
- if (b->inode->i_ino == 0 && b->fileaddr == 2) {
- printk("cluster allocate %s\n", strblk(b));
- }
size = i_size_read(b->inode);
- if (!test_bit(B_Index, &b->flags) && /* it is a datablock */
+ if (!test_bit(B_Index, &b->flags) && /* it is a datablock */
b->fileaddr == 0 &&
- b->parent == lai->iblock && /* No indexing */
- lai->type >= TypeBase && /* 'size' is meaningful */
+ b->parent == lai->iblock && /* No indexing */
+ lai->type >= TypeBase && /* 'size' is meaningful */
size + lai->metadata_size <= sb->s_blocksize) {
int success = flush_data_to_inode(b);
if (success) {
if (!test_bit(B_Index, &b->flags) &&
lai->type == TypeInodeFile &&
dblk(b)->my_inode != NULL &&
- !test_bit(B_Pinned, &b->flags) /* Once the data block is pinned, we write it */
+ !test_bit(B_Pinned, &b->flags) /* Once the data block is pinned,
+ we write it */
) {
spin_lock(&dblk(b)->my_inode->i_data.private_lock);
- if ((inob = LAFSI(dblk(b)->my_inode)->iblock) != NULL &&
+ inob = LAFSI(dblk(b)->my_inode)->iblock;
+ if (inob != NULL &&
test_bit(B_Pinned, &inob->b.flags) &&
test_bit(B_Phase1, &inob->b.flags) ==
test_bit(B_Phase1, &b->flags)
) {
/* Don't allocate yet, until index block is ready */
-
+
spin_unlock(&dblk(b)->my_inode->i_data.private_lock);
lafs_iounlock_block(b, 0);
return 0;
lai->dblock->b.parent =
getiref(b->parent, MKREF(child));
BUG_ON(b->parent != lai->dblock->b.parent);
- set_phase(&lai->dblock->b, test_bit(B_Phase1, &b->flags));
+ set_phase(&lai->dblock->b, test_bit(B_Phase1,
+ &b->flags));
lafs_refile(&lai->dblock->b, 0);
}
} else {
int credits = 0;
if (test_and_clear_bit(B_Dirty, &b->flags))
- credits ++;
+ credits++;
if (test_and_clear_bit(B_Realloc, &b->flags))
- credits ++;
+ credits++;
if (credits > 1)
lafs_space_return(fs, credits-1);
BUG_ON(credits < 1);
BUG_ON(!test_bit(B_Valid, &b2->flags));
if (test_and_set_bit(B_Credit, &b2->flags))
- lafs_space_return(fs,1);
+ lafs_space_return(fs, 1);
if (test_and_set_bit(B_ICredit, &b2->flags))
- lafs_space_return(fs,1);
-
+ lafs_space_return(fs, 1);
+
if (cnum == 0)
lafs_dirty_dblock(dblk(b2));
else {
if (!test_and_set_bit(B_UnincCredit, &b2->flags))
if (!test_and_clear_bit(B_ICredit, &b2->flags))
- BUG(); // ICredit should be set before we dirty a block.
+ BUG(); // ICredit should be set
+ //before we dirty a block.
}
/* make sure 'dirty' status is registered */
lafs_refile(b2, 0);
else
cluster_flush(fs, cnum);
}
- // printk("remaining now %d\n", wc->remaining);
/* insert into list ensuring there is enough space
* in cluster head
if (wc->cluster_space < 0) {
/* need a new page */
wc->chead_blocks++;
- wc->remaining --;
+ wc->remaining--;
wc->cluster_space += sb->s_blocksize;
}
- wc->remaining --;
+ wc->remaining--;
if (wc->remaining == 0)
cluster_flush(fs, cnum);
mutex_unlock(&wc->lock);
-// printk("End of allocate, remaining = %d (%d)\n", wc->remaining,
-// seg_remainder(fs, &wc->seg));
return wc->cluster_seq; /* FIXME is this really needed - or right */
}
static inline void cluster_addhead(struct wc *wc, struct inode *ino,
struct group_head **headstart)
{
- struct group_head *gh = (struct group_head*)((char*)wc->chead +
+ struct group_head *gh = (struct group_head *)((char *)wc->chead +
wc->chead_size);
u16 tnf;
dprintk("CLUSTER addhead %d\n", wc->chead_size);
gh->inum = cpu_to_le32(ino->i_ino);
gh->fsnum = cpu_to_le32(LAFSI(ino)->filesys->i_ino);
- tnf = ((ino->i_generation<<8) | (LAFSI(ino)->trunc_gen & 0xff)) & 0x7fff;
+ tnf = ((ino->i_generation<<8) | (LAFSI(ino)->trunc_gen & 0xff))
+ & 0x7fff;
if (wc->cnum)
tnf |= 0x8000;
gh->truncatenum_and_flag = cpu_to_le16(tnf);
wc->chead_size += sizeof(struct group_head);
}
-static inline void cluster_closehead(struct wc *wc, struct group_head *headstart)
+static inline void cluster_closehead(struct wc *wc,
+ struct group_head *headstart)
{
- int size = wc->chead_size - (((char*)headstart) - (char*)wc->chead);
+ int size = wc->chead_size - (((char *)headstart) - (char *)wc->chead);
dprintk("CLUSTER closehead %d %d\n", wc->chead_size, size);
headstart->group_size_words = size / 4;
/* if size2 !=0, then only
* (size-size2) is at 'data' and the rest is at 'data2'
*/
- struct miniblock *mb= (struct miniblock*) ((char*)wc->chead + wc->chead_size);
+ struct miniblock *mb = ((struct miniblock *)
+ ((char *)wc->chead + wc->chead_size));
dprintk("CLUSTER addmini %d %d\n", wc->chead_size, size);
static inline void cluster_adddesc(struct wc *wc, struct block *blk,
struct descriptor **desc_start)
{
- struct descriptor *dh = (struct descriptor*)((char*)wc->chead +
+ struct descriptor *dh = (struct descriptor *)((char *)wc->chead +
wc->chead_size);
*desc_start = dh;
dprintk("CLUSTER add_desc %d\n", wc->chead_size);
} else
desc_start->block_bytes = cpu_to_le32(1<<blkbits);
}
-
}
/*------------------------------------------------------------------------
cluster_flush(fs, 0);
}
- lafs_space_return(fs, uh->reserved); /* FIXME should this be _space_use ?? */
+ /* FIXME should this be _space_use ?? */
+ lafs_space_return(fs, uh->reserved);
uh->reserved = 0;
cluster_addhead(wc, ino, &head_start);
if (b)
unmap_dblock(b, mapping);
cluster_closehead(wc, head_start);
- wc->cluster_space -= sizeof(struct group_head)+sizeof(struct descriptor) +
- ROUND_UP(len);
+ wc->cluster_space -= (sizeof(struct group_head)+
+ sizeof(struct descriptor) +
+ ROUND_UP(len));
seq = wc->cluster_seq;
mutex_unlock(&wc->lock);
return seq;
int lafs_calc_cluster_csum(struct cluster_head *head)
{
- unsigned int oldcsum = head->checksum;
+ unsigned int oldcsum = head->checksum;
unsigned long long newcsum = 0;
unsigned long csum;
int i;
- unsigned int *superc = (unsigned int*) head;
+ unsigned int *superc = (unsigned int *) head;
head->checksum = 0;
- for(i=0; i< le16_to_cpu(head->Hlength)/4; i++)
- newcsum+= le32_to_cpu(superc[i]);
- csum = (newcsum& 0xffffffff) + (newcsum>>32);
+ for (i = 0; i < le16_to_cpu(head->Hlength)/4; i++)
+ newcsum += le32_to_cpu(superc[i]);
+ csum = (newcsum & 0xffffffff) + (newcsum>>32);
head->checksum = oldcsum;
return cpu_to_le32(csum);
}
if (!test_bit(B_Index, &b->flags)) {
struct page *page = dblk(b)->page;
- struct datablock *db = (struct datablock *)page->private;
+ struct datablock *db = ((struct datablock *)
+ page->private);
int blks = 1 << (PAGE_SHIFT -
- page->mapping->host->i_sb->s_blocksize_bits);
+ page->mapping->host->i_sb
+ ->s_blocksize_bits);
if (PageWriteback(page) && db) {
int j;
- for (j=0; j < blks ; j++)
+ for (j = 0; j < blks ; j++)
if (test_bit(B_IOLock, &db[j].b.flags))
break;
if (j == blks)
seg_remainder(fs, &wc->seg) - wc->remaining);
/* find, and step over, address header block(s) */
- for (i=0; i < wc->chead_blocks ; i++)
+ for (i = 0; i < wc->chead_blocks ; i++)
head_addr[i] = seg_next(fs, &wc->seg);
list_for_each_entry(b, &wc->clhead, lru) {
cluster_closehead(wc, head_start);
segend = wc->seg; /* We may write zeros from here */
seg_step(fs, &wc->seg);
-#if 0
- printk("segstep: %d pos=%d/%d %d/%d %d/%d %d/%d/%d\n",
- seg_remainder(fs, &wc->seg),
- wc->seg.dev, wc->seg.num,
- wc->seg.st_table, wc->seg.st_row,
- wc->seg.nxt_table, wc->seg.nxt_row,
- wc->seg.table, wc->seg.row, wc->seg.col);
-#endif
wc->remaining = seg_remainder(fs, &wc->seg);
if (wc->remaining < 2)
new_segment(fs, cnum);
atomic_read(&wc->pending_cnt[which]));
if (wc->pending_vfy_type[which] == VerifyNext)
wait_event(wc->pending_wait,
- atomic_read(&wc->pending_cnt[which])==1);
+ atomic_read(&wc->pending_cnt[which]) == 1);
which = (which+3) % 4;
dprintk("AB which=%d vt=%d pc=%d\n", which, wc->pending_vfy_type[which],
atomic_read(&wc->pending_cnt[which]));
if (wc->pending_vfy_type[which] == VerifyNext2)
wait_event(wc->pending_wait,
- atomic_read(&wc->pending_cnt[which])==1);
+ atomic_read(&wc->pending_cnt[which]) == 1);
lafs_clusters_done(fs);
dprintk("cluster_flush pre-bug pending_next=%d cnt=%d\n",
- wc->pending_next, atomic_read(&wc->pending_cnt[wc->pending_next]));
+ wc->pending_next, atomic_read(&wc->pending_cnt
+ [wc->pending_next]));
BUG_ON(atomic_read(&wc->pending_cnt[wc->pending_next]) != 0);
BUG_ON(!list_empty(&wc->pending_blocks[wc->pending_next]));
* Later we should possibly re-order the writes
* for raid4 stripe-at-a-time
*/
- for (i=0; i < wc->chead_blocks; i++)
- lafs_write_head(fs, page_address(wc->page[wc->pending_next]) + i*sb->s_blocksize,
+ for (i = 0; i < wc->chead_blocks; i++)
+ lafs_write_head(fs,
+ page_address(wc->page[wc->pending_next])
+ + i*sb->s_blocksize,
head_addr[i], segend.dev, wc);
while (!list_empty(&wc->clhead)) {
/* now re-initialise the cluster information */
wc->chead_blocks = 1;
- wc->remaining --;
+ wc->remaining--;
wc->cluster_space = sb->s_blocksize - sizeof(struct cluster_head);
wc->chead_size = sizeof(struct cluster_head);
wc->chead = page_address(wc->page[wc->pending_next]);
wait_event(wc->pending_wait,
- atomic_read(&wc->pending_cnt[wc->pending_next])==0);
+ atomic_read(&wc->pending_cnt[wc->pending_next]) == 0);
dprintk("cluster_flush end pending_next=%d cnt=%d\n",
- wc->pending_next, atomic_read(&wc->pending_cnt[wc->pending_next]));
+ wc->pending_next, atomic_read(&wc->pending_cnt
+ [wc->pending_next]));
-// printk("End of flush: remaining = %d (%d)\n", wc->remaining,
-// seg_remainder(fs, &wc->seg));
}
void lafs_cluster_flush(struct fs *fs, int cnum)
void lafs_cluster_wait_all(struct fs *fs)
{
int i;
- for (i=0; i<WC_NUM; i++) {
+ for (i = 0; i < WC_NUM; i++) {
struct wc *wc = &fs->wc[i];
int j;
- for (j=0; j<4; j++) {
-// printk("===============================================wait: %d,%d = %d\n",i,j,
-// atomic_read(&wc->pending_cnt[j]));
+ for (j = 0; j < 4; j++) {
wait_event(wc->pending_wait,
- atomic_read(&wc->pending_cnt[j])<=1);
+ atomic_read(&wc->pending_cnt[j]) <= 1);
}
}
}
bio_end_io_t *lafs_cluster_endio_choose(int which, int header)
{
if (header)
- if ((which&2)==0)
- if (which==0)
+ if ((which&2) == 0)
+ if (which == 0)
return cluster_endio_header_0;
else
return cluster_endio_header_1;
else
- if (which==2)
+ if (which == 2)
return cluster_endio_header_2;
else
return cluster_endio_header_3;
else
- if ((which&2)==0)
- if (which==0)
+ if ((which&2) == 0)
+ if (which == 0)
return cluster_endio_data_0;
else
return cluster_endio_data_1;
else
- if (which==2)
+ if (which == 2)
return cluster_endio_data_2;
else
return cluster_endio_data_3;
if (cnum)
wc->remaining = 0;
else
- wc->remaining = seg_remainder(fs, &wc->seg) - 1 /* 1 for header */;
+ wc->remaining = seg_remainder(fs, &wc->seg) - 1;/*1 for header*/
if (prev)
/* if prev == 0 this is a brand new segment for cleaning */
fs->free_blocks += wc->remaining+1;
/*
+ * fs/lafs/dir-avl.c
+ * Copyright (C) 2005-2009
+ * Neil Brown <neilb@suse.de>
+ * Released under the GPL, version 2
+ *
* A directory block is stored as an AVL tree.
* Entries are added to the end, and merged into
* the AVL tree.
sum += DELTA;
b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
- } while(--n);
+ } while (--n);
buf[0] += b0;
buf[1] += b1;
offset = le32_to_cpu(offset);
break;
}
- if (offsetp) *offsetp = offset;
+ if (offsetp)
+ *offsetp = offset;
return hash + offset;
}
/* dpaddr assumes that 'psz' (piece size) is valid when called */
-#define dpaddr(_block, _piece) ((struct dirpiece*)((_block) + ((_piece)<<psz)))
-#define dlpaddr(_block, _piece) ((struct dirleafpiece*)((_block) + ((_piece)<<psz)))
+#define dpaddr(_block, _piece) ((struct dirpiece *)((_block) + ((_piece)<<psz)))
+#define dlpaddr(_block, _piece) ((struct dirleafpiece *)((_block) \
+ + ((_piece)<<psz)))
static int dir_set_name(struct dirpiece *dp, const char *name, int len,
int chain_offset)
len = strlen(name);
BUG_ON(len > 255);
- dh = (struct dirheader*) block;
+ dh = (struct dirheader *) block;
- pnum = (sizeof(struct dirheader)+ (1<<psz)-1) >> psz;
+ pnum = (sizeof(struct dirheader) + (1<<psz)-1) >> psz;
dh->root = pnum;
dh->pad = 0;
dp->chain_info = 3;
}
- /* NOTE: we want the last piece, not the next free piece, so we don't add
- * (1<<psz) into this sum
+ /* NOTE: we want the last piece, not the next free piece, so
+ * we don't add (1<<psz) into this sum
*/
- dh->lastpiece = pnum + ((offsetof(struct dirpiece, name)+len-1)>> psz);
+ dh->lastpiece = pnum + ((offsetof(struct dirpiece, name)+len-1) >> psz);
dh->freepieces = 255 - dh->lastpiece;
}
static inline int dir_rotate_2(char *block, int psz, u8 *treep, int dir)
{
- unsigned int B,C,D,E;
- struct dirpiece *b,*d;
+ unsigned int B, C, D, E;
+ struct dirpiece *b, *d;
B = *treep; b = dpaddr(block, B);
D = b->next[dir]; d = dpaddr(block, D);
return E;
}
-static inline int dir_rotate_3(char *block, int psz, u8 *treep, int dir, int third)
+static inline int dir_rotate_3(char *block, int psz, u8 *treep,
+ int dir, int third)
{
unsigned int B, F, D, C, E;
struct dirpiece *b, *f, *d;
}
static void dir_check_balance(char *block, int psz);
-int lafs_dir_add_ent(char *block, int psz, const char *name, int len, u32 target,
- int type, u32 seed, u32 hash, int hashoffset)
+int lafs_dir_add_ent(char *block, int psz, const char *name, int len,
+ u32 target, int type, u32 seed, u32 hash, int hashoffset)
{
/* Add this entry to the directory block,
* Return:
* 1 add successful
*
*/
- struct dirheader *dh = (struct dirheader*) block;
+ struct dirheader *dh = (struct dirheader *) block;
struct dirpiece *dp, *dpold;
int piece = dh->root;
u8 *thisp = &dh->root;
int dir;
/* loop detect */
- int last=0, cnt=0, reset=1;
+ int last = 0, cnt = 0, reset = 1;
- if (len==0)
+ if (len == 0)
len = strlen(name);
- if (piece==0) {
+ if (piece == 0) {
/* Block is empty */
if (type != DT_TEST)
lafs_dir_init_block(block, psz, name, len, target,
/* First, find the insertion point */
dp = dpaddr(block, piece);
- while(piece) {
+ while (piece) {
u32 hval = hash_piece(seed, dp, NULL);
if (hval == hash) {
return -1;
}
dir = (hash > hval);
- if (dp->longer != Neither) { depth = 0; topp = thisp;}
- if (dir) set_bit(depth, &path);
- else clear_bit(depth, &path);
+ if (dp->longer != Neither) {
+ depth = 0;
+ topp = thisp;
+ }
+ if (dir)
+ set_bit(depth, &path);
+ else
+ clear_bit(depth, &path);
depth++;
- if (depth >= sizeof(path)*8) return -2;
+ if (depth >= sizeof(path)*8)
+ return -2;
thisp = &dp->next[dir];
piece = *thisp;
dp = dpaddr(block, piece);
/* Special flag to say 'just test if there is room */
return 1;
piece = dh->lastpiece+1;
-/* printf("inserting %s at %d, sharelen %d\n", name, piece, sharelen);*/
+
dp = dpaddr(block, piece);
dh->lastpiece += need;
- BUG_ON(dh->lastpiece==0);
+ BUG_ON(dh->lastpiece == 0);
dh->freepieces -= need;
dp->target = cpu_to_le32(target);
dp->next[0] = 0;
dp = dpaddr(block, piece);
st = 0;
+ first = !!test_bit(0, &path);
+ second = !!test_bit(1, &path);
if (dp->longer == Neither)
;
- else if (dp->longer != (first = !!test_bit(0, &path))) {
+ else if (dp->longer != first) {
/* took the shorter path */
dp->longer = Neither;
piece = dp->next[first];
st = 1;
- } else if (first == (second = !!test_bit(1, &path))) {
+ } else if (first == second) {
/* just a two-point rotation */
piece = dir_rotate_2(block, psz, topp, first);
st = 2;
} else {
- if (depth < 3) third = Neither;
- else third = !!test_bit(2, &path);
+ if (depth < 3)
+ third = Neither;
+ else
+ third = !!test_bit(2, &path);
piece = dir_rotate_3(block, psz, topp, first, third);
st = 3;
}
if (type == DT_TEST)
return 1;
- space = dp->length + (dp->chain_info < 2 ? 0 : dp->chain_info == 2 ? 1 : 4);
+ space = dp->length + (dp->chain_info < 2
+ ? 0 : (dp->chain_info == 2
+ ? 1 : 4));
space += offsetof(struct dirpiece, name);
space = DIV_ROUND_UP(space, 1<<psz);
* so the space can be used immediately.
*/
- struct dirheader *dh = (struct dirheader*)block;
+ struct dirheader *dh = (struct dirheader *)block;
struct dirpiece *dp;
struct dirpiece *second;
int piece = dh->root;
depth = 0;
}
}
- if (dir) set_bit(depth, &path);
- else clear_bit(depth, &path);
- depth ++;
- if (depth >= sizeof(path)*8) return -2;
+ if (dir)
+ set_bit(depth, &path);
+ else
+ clear_bit(depth, &path);
+ depth++;
+ if (depth >= sizeof(path)*8)
+ return -2;
thisp = &dp->next[dir];
piece = *thisp;
}
if (!targetp)
return 0;
- if (dir) set_bit(depth, &path);
- else clear_bit(depth, &path);
+ if (dir)
+ set_bit(depth, &path);
+ else
+ clear_bit(depth, &path);
/* now we must walk down from topp to thisp rebalancing nodes,
* but making sure that targetp points to the link to target...
piece = *topp;
dir = !!test_bit(st, &path);
dp = dpaddr(block, piece);
- if (dp->next[dir] == 0) break;
+ if (dp->next[dir] == 0)
+ break;
if (dp->longer == Neither)
dp->longer = 1-dir;
space = DIV_ROUND_UP(space, 1<<psz);
if (dh->root == 0) {
/* we deleted the only entry, clear the block */
- dh->lastpiece = (sizeof(struct dirheader)+ (1<<psz)-1) >> psz;
+ dh->lastpiece = (sizeof(struct dirheader) + (1<<psz)-1) >> psz;
dh->freepieces = 255 - dh->lastpiece;
} else if (targetn + space > dh->lastpiece) {
/* We are deleting the last entry, so it can be reused */
* each pointer was we step down it, and use
* the ->longer field to say which way we came
*/
- struct dirheader *dh = (struct dirheader*)block;
+ struct dirheader *dh = (struct dirheader *)block;
int piece = dh->root;
struct dirpiece *dp = NULL;
int prev = 0;
while (piece) {
dp = dpaddr(block, piece);
- switch(state) {
+ switch (state) {
case 0: /* stepping down to piece for the first time */
if (dp->next[0]) {
/* step further down */
prev = piece;
if (dp->next[1]) {
/* step down the other way */
- int t= dp->next[1];
+ int t = dp->next[1];
dp->longer = 1;
dp->next[1] = parent;
parent = piece;
parent = dp->next[state];
state++;
break;
- default: BUG();
+ default:
+ BUG();
}
}
/* now 'prev' is the last piece. Walk back along the path setting
* lowest in 'new2'
*
*/
- struct dirheader *dh = (struct dirheader*)orig;
- struct dirheader *dh1 = (struct dirheader*)new1;
- struct dirheader *dh2 = (struct dirheader*)new2;
+ struct dirheader *dh = (struct dirheader *)orig;
+ struct dirheader *dh1 = (struct dirheader *)new1;
+ struct dirheader *dh2 = (struct dirheader *)new2;
struct dirpiece *dp;
int first, last;
- int full1=0, full2=0;
+ int full1 = 0, full2 = 0;
u32 offset, maxhash, minhash, hval;
dir_linearise(orig, psz);
* is allowed to go in either block.
* Choose a block, and an entry, and insert it (if possible)
*/
- if (full2 || (dh1->freepieces >= dh2->freepieces && ! full1)) {
+ if (full2 || (dh1->freepieces >= dh2->freepieces && !full1)) {
/* insert into new1 from first or name */
dp = dpaddr(orig, first);
if (first)
if (!lafs_dir_add_ent(new1, psz, dp->name,
dp->length,
le32_to_cpu(dp->target),
- dp->type, seed, hval, offset))
+ dp->type, seed, hval,
+ offset))
full1 = 1;
else {
maxhash = hval;
}
} else {
/* insert name */
- if (!lafs_dir_add_ent(new1, psz, name, 0, target,
- type, seed, hash, chainoffset))
+ if (!lafs_dir_add_ent(new1, psz, name, 0,
+ target, type, seed,
+ hash, chainoffset))
full1 = 1;
else {
maxhash = hash;
dp = dpaddr(orig, last);
if (first)
hval = hash_piece(seed, dp, &offset);
- if (type == 0 || (first && hval > hash )) {
+ if (type == 0 || (first && hval > hash)) {
/* last is the preferred candidate */
if (!lafs_dir_add_ent(new2, psz,
dp->name, dp->length,
le32_to_cpu(dp->target),
- dp->type, seed, hval, offset))
+ dp->type, seed, hval,
+ offset))
full2 = 1;
else {
minhash = hval;
void lafs_dir_repack(char *block, int psz, char *new, u32 seed, int merge)
{
- struct dirheader *dh = (struct dirheader*)block;
+ struct dirheader *dh = (struct dirheader *)block;
int pnum = (sizeof(struct dirheader) + (1<<psz)-1)>>psz;
int first = !merge;
dp->type, seed, hash, offset);
first = 0;
}
- space = dp->length + (dp->chain_info < 2 ? 0 : dp->chain_info == 2 ? 1 : 4);
+ space = dp->length + (dp->chain_info < 2
+ ? 0 : (dp->chain_info == 2
+ ? 1 : 4));
space += offsetof(struct dirpiece, name);
space = DIV_ROUND_UP(space, 1<<psz);
* set 'pp' to the piece number if found, or the
* next larger piece if not (zero if nothing is larger).
*/
- struct dirheader *dh = (struct dirheader*)block;
+ struct dirheader *dh = (struct dirheader *)block;
int pnum = dh->root;
int cnt = 256;
*pp = pnum;
pnum = dp->next[0];
}
- cnt --;
+ cnt--;
}
return 0;
}
static int dir_check_loop(char *block, int psz, int pnum, int depth)
{
/* walk around the tree, and BUG if we ever get a depth > 255 */
- struct dirheader *dh = (struct dirheader*)block;
+ struct dirheader *dh = (struct dirheader *)block;
if (pnum == -1)
pnum = dh->root;
- if (depth <= 0) return 1;
+ if (depth <= 0)
+ return 1;
if (pnum < 0 || pnum >= 256)
BUG();
int lafs_dir_empty(char *block)
{
- struct dirheader *dh = (struct dirheader*)block;
+ struct dirheader *dh = (struct dirheader *)block;
return dh->root == 0;
}
int lafs_dir_blk_size(char *block, int psz)
{
/* how much of this block do we actually need to store */
- struct dirheader *dh = (struct dirheader*)block;
+ struct dirheader *dh = (struct dirheader *)block;
if (lafs_dir_empty(block))
return 0;
return (dh->lastpiece+1) << psz;
static void xprintk(char *block, int psz, char *s, int a, int b, int c, int d)
{
- printk(s,a,b,c,d);
+ printk(s, a, b, c, d);
dir_print(block, psz);
BUG();
}
static int dir_check_depth(char *block, int psz, int p, int depth)
{
- struct dirpiece *dp = dpaddr(block,p);
- int b,f;
+ struct dirpiece *dp = dpaddr(block, p);
+ int b, f;
if (depth > 10) {
int i;
- for (i=0; i<32; i++)
+ for (i = 0; i < 32; i++)
printk("%02x ", block[i]);
printk("\n");
}
- BUG_ON(depth>10);
- if (p == 0) return 0;
+ BUG_ON(depth > 10);
+ if (p == 0)
+ return 0;
b = dir_check_depth(block, psz, dp->next[0], depth+1);
f = dir_check_depth(block, psz, dp->next[1], depth+1);
if (b == f) {
if (dp->longer != Neither)
- xprintk(block,psz, "... %d - b=%d f=%d lgr=%d\n", p, b, f, dp->longer);
+ xprintk(block, psz, "... %d - b=%d f=%d lgr=%d\n",
+ p, b, f, dp->longer);
return b+1;
}
if (b == f-1) {
if (dp->longer != 1)
- xprintk(block,psz, "... %d - b=%d f=%d lgr=%d\n", p, b, f, dp->longer);
+ xprintk(block, psz, "... %d - b=%d f=%d lgr=%d\n",
+ p, b, f, dp->longer);
return f+1;
}
if (b-1 == f) {
if (dp->longer != 0)
- xprintk(block,psz, "... %d - b=%d f=%d lgr=%d\n", p, b, f, dp->longer);
+ xprintk(block, psz, "... %d - b=%d f=%d lgr=%d\n",
+ p, b, f, dp->longer);
return b+1;
}
- xprintk(block,psz, "... %d - b=%d f=%d lgr=%d\n", p, b, f, dp->longer);
- return (b>f?b:f)+1;
+ xprintk(block, psz, "... %d - b=%d f=%d lgr=%d\n", p, b, f, dp->longer);
+ return (b > f ? b : f) + 1;
}
static void dir_check_balance(char *block, int psz)
{
- struct dirheader *dh = (struct dirheader*) block;
+ struct dirheader *dh = (struct dirheader *) block;
dir_check_depth(block, psz, dh->root, 0);
}
{
struct dirpiece *dp = dpaddr(block, piece);
- if (piece == 0) return;
+ if (piece == 0)
+ return;
dir_print_piece(block, psz, dp->next[0], depth+1, 0);
- printk("%3d - %08lu:%02d %*s%c", piece, (unsigned long) le32_to_cpu(dp->target),
- depth,depth*2, "",
- dir?'\\':'/');
+ printk("%3d - %08lu:%02d %*s%c", piece,
+ (unsigned long) le32_to_cpu(dp->target),
+ depth, depth*2, "",
+ dir ? '\\' : '/');
printk("%.*s\n", dp->length, dp->name);
dir_print_piece(block, psz, dp->next[1], depth+1, 1);
printk("===Directory Block===\n");
printk(" Root: %d\n", dh->root);
- printk(" Last Piece : %d (%d left)\n", dh->lastpiece, 255 - dh->lastpiece);
- printk(" Free Pieces: %d (%d deleted)\n", dh->freepieces, dh->freepieces - (255 - dh->lastpiece));
+ printk(" Last Piece : %d (%d left)\n", dh->lastpiece,
+ 255 - dh->lastpiece);
+ printk(" Free Pieces: %d (%d deleted)\n", dh->freepieces,
+ dh->freepieces - (255 - dh->lastpiece));
/*
if (dh->reserved)
printk( "!!!!!! Reserved is non-zero : %d\n", dh->reserved);
*/
- if (sort==1)
+ if (sort == 1)
dir_print_piece(block, psz, dh->root, 0, 1);
else if (sort == 2) {
/* linearised */
pnum, (unsigned long) le32_to_cpu(dp->target),
dp->next[0], dp->next[1], dp->longer,
dp->type);
- printk(": (%d)%.*s\n", dp->length, dp->length, dp->name);
+ printk(": (%d)%.*s\n", dp->length, dp->length,
+ dp->name);
pnum = dp->next[1];
}
} else {
/* don't interpret the pieces too much */
- int pnum = (sizeof(struct dirheader)+ (1<<psz)-1)>>psz;
+ int pnum = (sizeof(struct dirheader) + (1<<psz)-1)>>psz;
while (pnum <= dh->lastpiece) {
dp = dpaddr(block, pnum);
printk("%3d - %08lu: (b:%-3d, f:%-3d, l:%d, type:%d ",
pnum, (unsigned long)le32_to_cpu(dp->target),
dp->next[0], dp->next[1], dp->longer,
dp->type);
- printk(": (%d)%.*s\n", dp->length, dp->length, dp->name);
- pnum += (offsetof(struct dirpiece, name) + dp->length +(1<<psz)-1)>>psz;
+ printk(": (%d)%.*s\n", dp->length,
+ dp->length, dp->name);
+ pnum += (offsetof(struct dirpiece, name)
+ + dp->length + (1<<psz)-1)>>psz;
}
}
}
#ifdef MAIN
-int noise = 0;
+int noise;
int main(int argc, char **argv)
{
char block[4096];
lafs_dir_init_block(block, psz, argv[1], 0, 42, 3, 0);
while (arg < argc-1) {
if (argv[arg][0] != '-')
- switch(lafs_dir_add_ent(block, psz, argv[arg], 0, 42+arg, 4, 0)) {
- case 0: printf("%s didn't fit!\n", argv[arg]); break;
- case -1: printf("%s already exists\n", argv[arg]); break;
- case 1: printf("%s inserted\n", argv[arg]);
+ switch (lafs_dir_add_ent(block, psz, argv[arg], 0,
+ 42+arg, 4, 0)) {
+ case 0:
+ printf("%s didn't fit!\n", argv[arg]);
+ break;
+ case -1:
+ printf("%s already exists\n", argv[arg]);
+ break;
+ case 1:
+ printf("%s inserted\n", argv[arg]);
}
else
switch (lafs_dir_del_ent(block, psz, argv[arg]+1, 0)) {
- case 0: printf("%s not found!\n", argv[arg]); break;
- case -2:printf("%s not deleted - bad dir\n", argv[arg]); break;
- case 1: printf("%s deleted\n", argv[arg]); break;
+ case 0:
+ printf("%s not found!\n", argv[arg]);
+ break;
+ case -2:
+ printf("%s not deleted - bad dir\n", argv[arg]);
+ break;
+ case 1:
+ printf("%s deleted\n", argv[arg]);
+ break;
}
dir_check_balance(block, psz);
dir_print_block(block, psz, 0);
dir_print_block(block, psz, 1);
- lafs_dir_split(block, psz, block+1024, block+2048, argv[arg], 40, 2, nm);
+ lafs_dir_split(block, psz, block+1024, block+2048, argv[arg],
+ 40, 2, nm);
dir_print_block(block+1024, psz, 1);
dir_print_block(block+2048, psz, 1);
dir_get_prefix(block+1024, block+2048, psz, block+3096);
/*
* fs/lafs/dir.c
- * Copyright (C) 2005-2006
+ * Copyright (C) 2005-2009
* Neil Brown <neilb@suse.de>
* Released under the GPL, version 2
*
hash = lafs_hash_name(seed, nlen, name);
- while(1) {
+ while (1) {
char *buf;
bn = hash+1;
if (lafs_find_next(dir, &bn) == 0)
doh->temp = NULL;
lafs_dirty_dblock(doh->new);
if (((doh->new->b.fileaddr+1) << dir->i_blkbits) > dir->i_size)
- i_size_write(dir, ((doh->new->b.fileaddr+1) << dir->i_blkbits));
+ i_size_write(dir, ((doh->new->b.fileaddr+1)
+ << dir->i_blkbits));
clear_bit(B_PinPending, &doh->new->b.flags);
putdref(doh->new, MKREF(dir_new));
} else
if (IS_ERR(dirblk))
return PTR_ERR(dirblk);
-// printk("Delete finds %.*s at %d (%x)\n", nlen, name, doh->index, doh->hash);
/* FIXME should I check if the orphanage is needed
* before committing this block to it?
*/
de.target = 0;
de.type = 0;
lafs_dir_set_target(buf, bits, &de, doh->index);
-// printk("Set %d to zero\n", doh->index);
/* If 'hash+1' is not in this block, make me an orphan
* (as we cannot check the chain)
if (piece == firstpiece && de.target == 0) {
lafs_orphan_commit(&doh->oi);
- add_orphan(doh->dirent_block->b.inode, doh->dirent_block);
+ add_orphan(doh->dirent_block->b.inode,
+ doh->dirent_block);
} else
lafs_orphan_abort(&doh->oi);
} else {
{
int err;
err = lafs_pin_dblock(doh->dirent_block);
- if (err) printk("E1 err=%d\n", err);
+ if (err)
+ printk("E1 err=%d\n", err);
if (err)
return err;
if (doh->dirent_block)
err = lafs_orphan_pin(&doh->oi, doh->dirent_block, 1);
- if (err) printk("2 err=%d\n", err);
+ if (err)
+ printk("2 err=%d\n", err);
return err;
}
int operation, u32 *handle)
{
char mb[4];
- static u32 hancnt=0;
+ static u32 hancnt;
u32 han = 0;
- switch(operation) {
+ switch (operation) {
case DIROP_LINK:
case DIROP_UNLINK:
han = 0;
break;
case DIROP_REN_SOURCE:
- while (++hancnt == 0);
+ while (++hancnt == 0)
+ ;
han = hancnt;
*handle = han;
break;
BUG();
}
- *(u32*)mb = cpu_to_le32(target);
+ *(u32 *)mb = cpu_to_le32(target);
lafs_cluster_update_commit_buf(uh, fs, dir, han, operation,
4+name->len, mb,
name->len, name->name);
*/
static int
lafs_create(struct inode *dir, struct dentry *de, int mode,
- struct nameidata *nd)
+ struct nameidata *nd)
{
- /* Need to allocate an inode and space in the directory */
+/* Need to allocate an inode and space in the directory */
struct fs *fs = fs_from_inode(dir);
struct datablock *db;
struct inode *ino = lafs_new_inode(fs, dir, TypeFile, 0, mode, &db);
if (IS_ERR(ino))
return PTR_ERR(ino);
- BUG_ON(LAFSI(ino)->dblock->orphan_slot == 0x5a5a5a5a);
err = dir_create_prepare(fs, dir, de->d_name.name, de->d_name.len,
ino->i_ino, DT_REG, &doh);
if (err < 0)
goto abort_unlock;
- inode->i_nlink ++;
+ inode->i_nlink++;
lafs_dirty_inode(inode);
clear_bit(B_PinPending, &inodb->b.flags);
putdref(inodb, MKREF(inode_update));
- dir_log_commit(&uh, fs, dir, &to->d_name, inode->i_ino, DIROP_LINK, NULL);
+ dir_log_commit(&uh, fs, dir, &to->d_name, inode->i_ino,
+ DIROP_LINK, NULL);
dir_create_commit(&doh, fs, dir, to->d_name.name, to->d_name.len,
inode->i_ino, mode_to_dt(inode->i_mode));
/* Don't log the nlink change - that is implied in the name creation */
dprintk("enter unlink: refcnt = %d\n",
atomic_read(&LAFSI(inode)->dblock->b.refcnt));
- err = dir_delete_prepare(fs, dir, de->d_name.name, de->d_name.len, &doh);
+ err = dir_delete_prepare(fs, dir, de->d_name.name, de->d_name.len,
+ &doh);
err = dir_log_prepare(&uh, fs, &de->d_name) ?: err;
if (last)
err = lafs_orphan_prepare(fs, &oi) ?: err;
lafs_checkpoint_lock(fs);
err = dir_delete_pin(&doh);
- if (err) printk("E err=%d\n", err);
+ if (err)
+ printk("E err=%d\n", err);
err = err ?: lafs_cluster_update_pin(&uh);
err = err ?: lafs_pin_dblock(inodb);
if (err == 0 && last)
goto abort_unlock;
inode->i_nlink--;
- dir_log_commit(&uh, fs, dir, &de->d_name, inode->i_ino, DIROP_UNLINK, NULL);
+ dir_log_commit(&uh, fs, dir, &de->d_name, inode->i_ino,
+ DIROP_UNLINK, NULL);
dir_delete_commit(&doh, fs, dir, de->d_name.name, de->d_name.len);
if (last)
lafs_orphan_commit(&oi);
if (inode->i_size || inode->i_nlink > 2)
return -ENOTEMPTY;
- err = dir_delete_prepare(fs, dir, de->d_name.name, de->d_name.len, &doh);
+ err = dir_delete_prepare(fs, dir, de->d_name.name, de->d_name.len,
+ &doh);
err = dir_log_prepare(&uh, fs, &de->d_name) ?: err;
err = lafs_orphan_prepare(fs, &oi) ?: err;
err = lafs_inode_prepare(inode, &inodb, MKREF(inode_update)) ?: err;
err = err ?: lafs_cluster_update_pin(&uh);
err = err ?: lafs_pin_dblock(inodb);
err = err ?: lafs_orphan_pin(&oi, inodb, 1);
- if (err == -EAGAIN){
+ if (err == -EAGAIN) {
lafs_checkpoint_unlock_wait(fs);
goto retry; /* FIXME should I not unlock ?? */
}
if (err < 0)
goto abort_unlock;
- dir->i_nlink --;
+ dir->i_nlink--;
inode->i_nlink -= 2;
- dir_log_commit(&uh, fs, dir, &de->d_name, inode->i_ino, DIROP_UNLINK, NULL);
+ dir_log_commit(&uh, fs, dir, &de->d_name, inode->i_ino,
+ DIROP_UNLINK, NULL);
dir_delete_commit(&doh, fs, dir, de->d_name.name, de->d_name.len);
lafs_orphan_commit(&oi);
lafs_dirty_inode(inode);
if (err < 0)
goto abort_unlock;
- lai= LAFSI(ino);
+ lai = LAFSI(ino);
lai->md.file.parent = dir->i_ino;
dir->i_nlink++;
ino->i_nlink = 2; /* From parent, and from '.' */
/* old entry gets deleted, new entry gets created or updated. */
err = dir_delete_prepare(fs, old_dir,
- old_dentry->d_name.name, old_dentry->d_name.len,
+ old_dentry->d_name.name,
+ old_dentry->d_name.len,
&old_doh);
err = dir_log_prepare(&old_uh, fs, &old_dentry->d_name) ?: err;
err = dir_log_prepare(&new_uh, fs, &new_dentry->d_name) ?: err;
new_dentry->d_name.name,
new_dentry->d_name.len,
&new_doh) ?: err;
- err = lafs_inode_prepare(new_inode, &newdb, MKREF(inode_update)) ?: err;
+ err = lafs_inode_prepare(new_inode, &newdb,
+ MKREF(inode_update)) ?: err;
} else
/* create new link */
err = dir_create_prepare(fs, new_dir,
if (new_inode) {
if (last)
lafs_orphan_commit(&oi);
- dir_update_commit(fs, old_inode->i_ino, mode_to_dt(old_inode->i_mode),
+ dir_update_commit(fs, old_inode->i_ino,
+ mode_to_dt(old_inode->i_mode),
&new_doh);
} else
dir_create_commit(&new_doh, fs, new_dir,
- new_dentry->d_name.name, new_dentry->d_name.len,
- old_inode->i_ino, mode_to_dt(old_inode->i_mode));
+ new_dentry->d_name.name,
+ new_dentry->d_name.len,
+ old_inode->i_ino,
+ mode_to_dt(old_inode->i_mode));
if (S_ISDIR(old_inode->i_mode)) {
old_dir->i_nlink--;
if (!new_inode)
dir_delete_abort(&old_doh);
lafs_cluster_update_abort(&old_uh);
lafs_cluster_update_abort(&new_uh);
- if(olddb)
+ if (olddb)
putdref(olddb, MKREF(inode_update));
if (new_inode) {
if (last)
*/
static int dir_handle_orphan(struct inode *dir,
- struct datablock *b);
+ struct datablock *b);
/* Called with ino->i_sem down */
void lafs_dir_handle_orphans(struct inode *ino)
struct list_head tmp;
int all_failed = 0;
- while (! all_failed) {
+ while (!all_failed) {
all_failed = 1;
list_add(&tmp, &LAFSI(ino)->md.file.dirorphans);
list_del_init(&LAFSI(ino)->md.file.dirorphans);
struct dir_ent de;
int err;
- dprintk("HANDLE ORPHAN i=%d b=%d h=%x\n", (int)dir->i_ino, (int)b->b.fileaddr,
- (unsigned)hash);
+ dprintk("HANDLE ORPHAN i=%d b=%d h=%x\n", (int)dir->i_ino,
+ (int)b->b.fileaddr, (unsigned)hash);
err = lafs_read_block(b);
if (err)
goto abort;
/* FIXME what if it returns an error */
bnum = 0;
- b2 = lafs_get_block(dir, bnum, NULL, GFP_KERNEL, MKREF(dir_orphan));
+ b2 = lafs_get_block(dir, bnum, NULL, GFP_KERNEL,
+ MKREF(dir_orphan));
err = -ENOMEM;
if (!b2)
goto abort;
lafs_dir_extract(buf, bits, &de, firstpiece, &hash)->target == 0 &&
lafs_dir_find(buf, bits, seed, hash+1, &piece) == 0) {
unmap_dblock(b, buf);
- b2 = lafs_get_block(dir, hash, NULL, GFP_KERNEL, MKREF(dir_orphan));
+ b2 = lafs_get_block(dir, hash, NULL, GFP_KERNEL,
+ MKREF(dir_orphan));
err = -ENOMEM;
if (!b2)
goto abort;
if (lafs_dir_find(buf2, bits, seed, (hash-1) & MaxDirHash,
&piece) &&
lafs_dir_extract(buf2, bits, &de, piece, NULL)->target == 0)
- add_orphan(dir, b2); /* FIXME not recorded in orphan file... */
+ /* FIXME not recorded in orphan file... */
+ add_orphan(dir, b2);
unmap_dblock(b2, buf2);
putdref(b2, MKREF(dir_orphan));
buf = map_dblock(b);
if (err)
goto abortb2;
buf2 = map_dblock(b2);
- i_size_write(dir, lafs_dir_blk_size(buf2, bits));
+ i_size_write(dir,
+ lafs_dir_blk_size(buf2, bits));
unmap_dblock(b2, buf2);
putdref(b2, MKREF(dir_orphan));
}
ino = dentry->d_inode->i_ino;
if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
break;
- filp->f_pos ++;
+ filp->f_pos++;
i++;
/* fallthrough */
case 1:
bnum = hash+1;
switch (lafs_find_next(dentry->d_inode, &bnum)) {
- case 1: break;
+ case 1:
+ break;
case 0:
bnum = 0;
break;
default:
return -EIO;
}
-// printk("block number %lld\n", (long long)bnum);
b = lafs_get_block(dentry->d_inode, bnum, NULL,
GFP_KERNEL, MKREF(readdir));
if (!b) {
err = lafs_read_block(b);
if (err)
break;
-// buf = map_dblock(b);
+ /* buf = map_dblock(b); */
buf = kmap(b->page);
buf += dblock_offset(b);
while (1) {
hash = seed;
lafs_dir_extract(buf, bits, &de,
piece, &hash);
-// printk("name=%.*s, %d, %d %08x %d\n",
-// de.nlen, de.name, de.target, de.type, hash,
-// piece);
if (de.target == 0) {
hash++;
else
break;
}
-// unmap_dblock(b, buf);
+ /* unmap_dblock(b, buf); */
kunmap(b->page);
putdref(b, MKREF(readdir));
hash = bnum;
return d_splice_alias(ino, dentry);
}
-struct file_operations lafs_dir_file_operations = {
+const struct file_operations lafs_dir_file_operations = {
.llseek = generic_file_llseek, /* Just set 'pos' */
.read = generic_read_dir, /* return error */
.readdir = lafs_readdir,
/*
* fs/lafs/file.c
- * Copyright (C) 2005
+ * Copyright (C) 2005-2009
* Neil Brown <neilb@suse.de>
* Released under the GPL, version 2
*
set_bit(B_IOPending, &b0->b.flags);
dprintk("read page %p for %d blocks\n", page, n);
- for (i=0; i < n; i++) {
+ for (i = 0; i < n; i++) {
struct datablock *b = lafs_get_block(ino, i, page, GFP_KERNEL,
MKREF(readpage));
if (!b) {
putdref(b, MKREF(readpage));
break;
}
- // dprintk(" %d at %llu\n", i, b->b.physaddr);
err = lafs_load_block(&b->b, 1);
putdref(b, MKREF(readpage));
if (err)
retry:
lafs_checkpoint_lock(fs);
for (i = first ; err == 0 && i <= last ; i++) {
- /* FIXME need PinPending or something to make sure credits don't disappear */
+ /* FIXME need PinPending or something to make sure
+ * credits don't disappear */
err = lafs_reserve_block(&fb[i - first].b, NewSpace);
if (fb[i-first].b.parent == NULL)
BUG();
for (i = 0 ; i < blocks; i++) {
struct datablock *b = lafs_get_block(ino, i, page, GFP_KERNEL,
MKREF(writepage));
- if (!b) continue;
+ if (!b)
+ continue;
if (i == 0) {
set_bit(B_IOPending, &b->b.flags);
b0 = getdref(b, MKREF(writepage0));
putdref(b, MKREF(writepage));
}
set_page_writeback(page);
- unlock_page(page); /* FIXME this must not happen before the writes complete! */
+ unlock_page(page); /* FIXME this must not happen before
+ the writes complete! */
if (!b0)
return 0;
lafs_iounlock_block(&b0->b, B_IOPending);
struct address_space *mapping;
struct fs *fs;
int bits;
-// int i;
mapping = page->mapping;
if (!mapping)
bits = PAGE_SHIFT - ino->i_blkbits;
#if 0
- for (i=0; i < (1<<bits); i++) {
+ for (i = 0; i < (1<<bits); i++) {
struct datablock *b = lafs_get_block(ino, i, page,
GFP_KERNEL, MKREF(xx));
if (!b)
#endif
}
-struct file_operations lafs_file_file_operations = {
+const struct file_operations lafs_file_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = generic_file_aio_read,
.aio_write = generic_file_aio_write,
-/* .ioctl = ext3_ioctl,*/
+/* .ioctl = lafs__ioctl,*/
.mmap = generic_file_mmap,
.open = generic_file_open,
-/* .release = ext3_release_file,*/
-/* .fsync = ext3_sync_file, */
+/* .release = lafs__release_file,*/
+/* .fsync = lafs_sync_file, */
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
/*
* Handle index block for LaFS.
* fs/lafs/index.c
- * Copyright (C) 2005-2007
+ * Copyright (C) 2005-2009
* Neil Brown <neilb@suse.de>
* Released under the GPL, version 2
*/
static struct hlist_head hash_table[1<<HASHBITS];
spinlock_t lafs_hash_lock;
-/* static */ struct freelists {
- struct list_head lru;
- unsigned long freecnt;
-} freelist;
+
+/* static */ struct freelists freelist;
static int lafs_shrinker(int nr_to_scan, /*gfp_t*/unsigned int gfp_mask)
{
continue;
}
hlist_del(&ib->hash);
- list_del_init(&ib->b.siblings); /* from inode->free_index */
+ /* delete from inode->free_index */
+ list_del_init(&ib->b.siblings);
list_move(&ib->b.lru, &togo);
nr_to_scan--;
freelist.freecnt--;
kfree(ib);
}
-static struct shrinker hash_shrink =
-{
+static struct shrinker hash_shrink = {
.shrink = lafs_shrinker,
.seeks = DEFAULT_SEEKS,
};
int lafs_ihash_init(void)
{
int i;
- for (i = 0 ; i < (1<<HASHBITS); i++ )
+ for (i = 0 ; i < (1<<HASHBITS); i++)
INIT_HLIST_HEAD(hash_table+i);
spin_lock_init(&lafs_hash_lock);
INIT_LIST_HEAD(&freelist.lru);
ok = 1;
BUG_ON(!ok);
-
if (!test_bit(B_InoIdx, &ib->b.flags)) {
getref_locked(&ib->b, REF);
return ib;
spin_lock(&as->private_lock);
if (blk->parent) {
if (blk->parent != parent) {
- extern void lafs_dump_tree(void);
printk("blk = %s\n", strblk(blk));
printk("blk->p = %s\n", strblk(&blk->parent->b));
printk("parent = %s\n", strblk(&parent->b));
BUG_ON(parent == iblk(blk));
blk->parent = parent;
getiref(parent, MKREF(child));
+ /* Remove from the per-inode free list */
if (!test_bit(B_InoIdx, &blk->flags))
list_move(&blk->siblings, &parent->children);
else
- list_del_init(&blk->siblings); /* FIXME this was INIT_LIST_HEAD which is clearly wrong,
- * but the new version probably needs to be verified */
+ list_del_init(&blk->siblings);
}
spin_unlock(&as->private_lock);
}
ino = b->inode;
spin_lock(&ino->i_mapping->private_lock);
- for (p = b ; p && !test_bit(B_Pinned, &p->flags) ; p = &(p->parent)->b) {
+ for (p = b ;
+ p && !test_bit(B_Pinned, &p->flags) ;
+ p = &(p->parent)->b) {
set_phase(p, ph);
if (test_bit(B_InoIdx, &p->flags)) {
struct datablock *db = LAFSI(p->inode)->dblock;
list_for_each_entry_continue(b, &p->children, siblings) {
if (test_bit(B_Index, &b->flags)) {
/* recurse down */
- depth ++;
+ depth++;
goto recurse;
} else
set_phase(b, ph);
atomic_dec(&b->parent->pincnt[oldphase]);
lafs_refile(&b->parent->b, 0);
}
- dprintk("===========Dropping Pinned on flip===========\n");
- } else printk("=====================weird=====================\n");
+ }
return;
}
- if (! test_bit(B_InoIdx, &b->flags)) {
+ if (!test_bit(B_InoIdx, &b->flags)) {
struct inode *ino = b->inode;
spin_lock(&ino->i_mapping->private_lock);
* a phase change can (and should) happen for index blocks
* in the 'other' phase that are
* not Dirty, not Alloc, pincnt[oldphase]==0, uninc-table empty
- *
+ *
*
* Refiling one block may cause a change in another block which requires
* a recursive refile. Naturally we unroll the tail-recursion, but we
/* sanity tests.
* 1/ make sure pincnt is right
*/
-#if 0
- if (dec &&
- b->inode->i_ino == 0 &&
- b->fileaddr == 53 &&
- test_bit(B_Index, &b->flags)
- ) {
- printk("dec 8/0: %d\n", atomic_read(&b->refcnt));
- WARN_ON(1);
- }
-#endif
if (test_bit(B_Index, &b->flags)) {
int c[2];
struct block *cb;
!test_bit(B_Index, &cb->flags) &&
dblk(cb)->my_inode &&
LAFSI(dblk(cb)->my_inode)->iblock &&
- test_bit(B_Pinned, &LAFSI(dblk(cb)->my_inode)->iblock->b.flags)) {
+ test_bit(B_Pinned, &(LAFSI(dblk(cb)->my_inode)
+ ->iblock->b.flags))) {
int pp = !!test_bit(B_Phase1,
- &LAFSI(dblk(cb)->my_inode)->iblock->b.flags);
+ &LAFSI(dblk(cb)->my_inode)
+ ->iblock->b.flags);
c[pp]++;
}
}
if (c[0] != atomic_read(&iblk(b)->pincnt[0]) ||
c[1] != atomic_read(&iblk(b)->pincnt[1])) {
- void lafs_dump_tree(void);
printk("%d %d %s\n", c[0], c[1], strblk(b));
lafs_dump_tree();
BUG();
!test_bit(B_Index, &cb->flags) &&
dblk(cb)->my_inode &&
LAFSI(dblk(cb)->my_inode)->iblock &&
- test_bit(B_Pinned, &LAFSI(dblk(cb)->my_inode)->iblock->b.flags)) {
+ test_bit(B_Pinned, &(LAFSI(dblk(cb)->my_inode)
+ ->iblock->b.flags))) {
int pp = !!test_bit(B_Phase1,
- &LAFSI(dblk(cb)->my_inode)->iblock->b.flags);
+ &LAFSI(dblk(cb)->my_inode)
+ ->iblock->b.flags);
c[pp]++;
}
}
spin_lock(&lafs_hash_lock);
ph = !!test_bit(B_Phase1, &b->flags);
-//printk("4");
/* See if we still need to be pinned */
/* FIXME need some locking here ... */
if (test_bit(B_Pinned, &b->flags) &&
(iblk(b)->uninc_table.pending_cnt == 0 &&
iblk(b)->uninc == NULL &&
iblk(b)->uninc_next == NULL &&
- atomic_read(&b->refcnt) == dec && /* Not sure this is safe..*/
+ atomic_read(&b->refcnt) == dec &&
atomic_read(&iblk(b)->pincnt[0]) == 0 &&
atomic_read(&iblk(b)->pincnt[1]) == 0)) &&
(!test_bit(B_InoIdx, &b->flags) ||
!test_bit(B_PinPending, &LAFSI(b->inode)->dblock->b.flags))
) {
/* Don't need to be Pinned any more */
-//printk("5");
lafs_checkpoint_lock(fs);
if (test_and_clear_bit(B_Pinned, &b->flags)) {
- //int credits = 0;
if (!test_bit(B_Root, &b->flags)) {
atomic_dec(&b->parent->pincnt[ph]);
if (next_parent != &b->parent->b) {
BUG_ON(next_parent);
next_parent = &b->parent->b;
- atomic_inc(&next_parent->refcnt);
+ atomic_inc(&next_parent
+ ->refcnt);
}
}
-#if 0
- /* FIXME this is for phase_flip, not for here !! */
- /* If we cannot get instant re-allocation
- * of credits, we use the 'next' credits
- * and Pin all Dirty datablock children
- */
- if (lafs_prealloc(b, ReleaseSpace) < 0) {
- /* Cannot reserve for next phase,
- * so force everything out this
- * phase
- */
- if (!test_bit(B_Credit, &b->flags) &&
- test_and_clear_bit(B_NCredit, &b->flags))
- set_bit(B_Credit, &b->flags);
-
- if (!test_bit(B_ICredit, &b->flags) &&
- test_and_clear_bit(B_NICredit, &b->flags))
- set_bit(B_ICredit, &b->flags);
- // FIXME Pin all dirty children!!
- }
-#endif
-#if 0
- /* No longer pinned, so don't need those credits */
- if (test_and_clear_bit(B_Credit, &b->flags))
- credits++;
- if (test_and_clear_bit(B_ICredit, &b->flags))
- credits++;
- if (test_and_clear_bit(B_NCredit, &b->flags))
- credits++;
- if (test_and_clear_bit(B_NICredit, &b->flags))
- credits++;
- /* FIXME do I clear UnincCredit? I don't think so - it is like Dirty */
- lafs_space_return(fs, credits);
-#endif
}
lafs_checkpoint_unlock(fs);
}
-//printk("1");
/* Make sure lru is correct */
if ((list_empty(&b->lru) || test_bit(B_OnFree, &b->flags)) &&
test_bit(B_Pinned, &b->flags) &&
* ->dblock, But how to we know it isn't
* already NULL ??
*/
- //spin_lock(&b->inode->i_mapping->private_lock);
if (test_bit(B_Index, &b->flags))
getiref_locked(iblk(b), MKREF(leaf));
else
getref(b, MKREF(leaf));
- //spin_unlock(&b->inode->i_mapping->private_lock);
}
spin_unlock(&fs->lock);
}
-//printk("6");
/* check the ->parent link */
if (atomic_read(&b->refcnt) == dec) {
if (b->parent &&
int credits;
/* Don't need ->parent any more */
if (next_parent == &b->parent->b) {
- if (atomic_dec_and_test(&next_parent->refcnt))
+ if (atomic_dec_and_test(&next_parent
+ ->refcnt))
BUG();
} else {
BUG_ON(next_parent);
next_parent = &b->parent->b;
}
- del_ref(next_parent, MKREF(child), __FILE__,__LINE__);
-//printk("7");
+ del_ref(next_parent, MKREF(child),
+ __FILE__, __LINE__);
b->parent = NULL;
spin_lock(&b->inode->i_mapping->private_lock);
list_del_init(&b->siblings);
spin_unlock(&b->inode->i_mapping->private_lock);
- if (test_bit(B_Index, &b->flags)) {
-//printk("7a");
+ if (test_bit(B_Index, &b->flags))
list_add(&b->siblings,
&LAFSI(b->inode)->free_index);
- }
+
if (test_and_clear_bit(B_Prealloc, &b->flags) &&
b->physaddr == 0)
lafs_summary_allocate(fs, b->inode, -1);
(1<<B_Realloc) |
(1<<B_Dirty)))
) {
-//printk("8");
if (b->parent != NULL)
- /* Could this be uninc - where is refcnt */
+ /* Could this be uninc - where
+ * is refcnt */
printk("Problem: %s\n", strblk(b));
BUG_ON(b->parent != NULL);
/* put it on the lru */
BUG_ON(!list_empty(&b->lru));
list_move(&b->lru, &freelist.lru);
}
- freelist.freecnt ++;
+ freelist.freecnt++;
}
}
struct datablock *db = dblk(b);
if (!test_bit(B_Index, &b->flags) &&
!PagePrivate(db->page)) {
- int bits = PAGE_SHIFT - b->inode->i_blkbits;
+ int bits = (PAGE_SHIFT -
+ b->inode->i_blkbits);
int mask = (1<<bits) - 1;
int bnum = b->fileaddr & mask;
* as lafs_destroy_inode ..
*/
if (db->my_inode) {
- BUG_ON(LAFSI(db->my_inode)->iblock);
- LAFSI(db->my_inode)->dblock = NULL;
+ BUG_ON(LAFSI(db->my_inode)
+ ->iblock);
+ LAFSI(db->my_inode)
+ ->dblock = NULL;
db->my_inode = NULL;
}
BUG_ON(next);
if (bnum) {
next = &db[-bnum].b;
- del_ref(next, "lafs_release_0", __FILE__, __LINE__);
+ del_ref(next, "lafs_release_0",
+ __FILE__, __LINE__);
} else {
free_me = 1;
put_page(db->page);
* deref the dblock
*/
if (test_bit(B_InoIdx, &b->flags)) {
- spin_lock(&b->inode->i_mapping->private_lock);
- BUG_ON(LAFSI(b->inode)->iblock != iblk(b));
+ spin_lock(&b->inode->i_mapping
+ ->private_lock);
+ BUG_ON(LAFSI(b->inode)->iblock
+ != iblk(b));
BUG_ON(next);
next = &LAFSI(b->inode)->dblock->b;
- del_ref(next, MKREF(iblock), __FILE__, __LINE__);
- spin_unlock(&b->inode->i_mapping->private_lock);
+ del_ref(next, MKREF(iblock),
+ __FILE__, __LINE__);
+ spin_unlock(&b->inode->i_mapping
+ ->private_lock);
}
}
}
(in = dblk(b)->my_inode) != NULL &&
test_bit(I_Destroyed, &LAFSI(in)->iflags)) {
if (LAFSI(in)->iblock)
- BUG_ON(atomic_read(&LAFSI(in)->iblock->b.refcnt));
+ BUG_ON(atomic_read(&LAFSI(in)->iblock->
+ b.refcnt));
LAFSI(in)->iblock = NULL;
clear_bit(I_Deleting, &LAFSI(in)->iflags);
lafs_destroy_inode(in);
}
-//printk("9");
if (free_me)
kfree(b);
if (next) {
b = next;
next = NULL;
- dec = 1;
} else if (next_parent) {
b = next_parent;
next_parent = NULL;
- dec = 1;
}
-//printk("D");
+ dec = 1;
}
-//printk("X\n");
}
-
-
+
/*
* create (if it doesn't already exist) the 'iblock' for an inode.
* This is a shadow of the dblock but comes into it's own if/when
u64 p;
unsigned char *cp;
int elen;
- int hi,lo;
+ int hi, lo;
u32 addr, next;
if (nextp)
*nextp = 0xffffffff;
- if (buf[1]) return 0;
+ if (buf[1])
+ return 0;
switch (buf[0]) {
- default: p = 0;
+ default:
+ p = 0;
break;
case IBLK_INDIRECT: /* indirect */
dprintk("indirect lookup for %lu from %lu, len %d\n",
(unsigned long)target, (unsigned long)startaddr, len);
- { int i;
- for (i=0; i<20; i++)
- dprintk(" %02x", buf[i]);
- dprintk("\n");
- }
- //WARN_ON(target == 0xFFFFFFFF);
len -= 2;
buf += 2;
- if ( target < startaddr)
+ if (target < startaddr)
return 0;
next = target;
break;
case IBLK_EXTENT: /* extent */
- /* 12 byte records: 6byte device, 2 byte length, 4 byte fileaddr */
+ /* 12 byte records: 6byte device, 2 byte length, 4
+ * byte fileaddr */
len -= 2;
buf += 2;
p2 = decode48(cp);
len = decode16(cp);
if (len == 0)
- *nextp = ~0UL; /* no more meaningful extents*/
+ *nextp = ~0UL; /* no more meaningful
+ extents*/
else
*nextp = decode32(cp);
}
u64 p;
unsigned char *cp;
- dprintk("IL len=%d\n", len);
-
if (buf[1] || buf[0]) {
- dprintk("WARNING: not an index block %02x %02x\n",buf[0],buf[1]);
+ dprintk("WARNING: not an index block %02x %02x\n",
+ buf[0], buf[1]);
return 0;
}
p = decode48(cp);
addr = decode32(cp);
dprintk("...addr=%lu target=%lu lo=%d mid=%d hi=%d\n",
- (unsigned long)addr, (unsigned long)target, lo, mid, hi);
+ (unsigned long)addr, (unsigned long)target,
+ lo, mid, hi);
if (p && addr <= target)
lo = mid;
- else hi = mid;
+ else
+ hi = mid;
}
cp = buf;
goto err;
}
if (ib2->b.physaddr != iphys &&
- ! test_bit(B_Uninc, &ib2->b.flags)) {
+ !test_bit(B_Uninc, &ib2->b.flags)) {
dprintk("WARN %llu != %llu\n",
ib2->b.physaddr, iphys);
}
if (nxt->fileaddr < ib2->b.fileaddr)
break;
if (nxt->fileaddr > addr &&
- (nxt->physaddr != 0 || test_bit(B_Valid, &nxt->flags))) {
+ (nxt->physaddr != 0 || test_bit(B_Valid,
+ &nxt->flags))) {
if (next && *next > nxt->fileaddr)
*next = nxt->fileaddr;
break;
if (*addrp == 0xFFFFFFFF) {
int j;
printk("at %s\n", strblk(&lai->dblock->b));
- printk("offset = %d depth=%d\n", offset, lai->depth);
- for (j=0; j<16;j++)
+ printk("offset = %d depth=%d\n", offset,
+ lai->depth);
+ for (j = 0; j < 16; j++)
printk("%04x ", buf[offset+j] & 0xffff);
- for (j=0; j<16;j++)
+ for (j = 0; j < 16; j++)
printk("%04x ", buf[1024-16+j] & 0xffff);
printk("\n");
}
putdref(b, MKREF(find_next));
return rv;
}
- hole = (b->b.physaddr == 0 || !test_bit(B_PhysValid, &b->b.flags)) &&
+ hole = (b->b.physaddr == 0 ||
+ !test_bit(B_PhysValid, &b->b.flags)) &&
!test_bit(B_Valid, &b->b.flags);
if (LAFSI(ino)->depth == 0 &&
b->b.fileaddr == 0)
static int table_lookup(struct uninc *tbl, u32 addr, u64 *phys)
{
int i;
- for (i=tbl->pending_cnt; i; ) {
+ for (i = tbl->pending_cnt; i; ) {
struct addr *a = &tbl->pending_addr[--i];
if (a->fileaddr <= addr &&
a->fileaddr + a->cnt > addr) {
if (IS_ERR(db))
return PTR_ERR(db);
-// down_read(&lai->ind_sem);
/* Ok, there is some indexing information we need to
* look through. Find the leaf first
*/
if (test_bit(B_InoIdx, &ib->b.flags))
dprintk("InoIdx!!!\n");
dprintk("offset %d: %02x %02x\n", offset,
- ((char*)buf)[offset], ((char*)buf)[offset+1]);
+ ((char *)buf)[offset], ((char *)buf)[offset+1]);
b->b.physaddr = leaf_lookup(buf+offset,
- b->b.inode->i_sb->s_blocksize - offset,
- ib->b.fileaddr, b->b.fileaddr, NULL);
+ b->b.inode->i_sb->s_blocksize
+ - offset,
+ ib->b.fileaddr, b->b.fileaddr,
+ NULL);
set_bit(B_PhysValid, &b->b.flags);
unmap_iblock(ib, buf);
}
block_adopt(&b->b, ib);
out:
-
-// up_read(&lai->ind_sem);
-
if (IS_ERR(ib))
return PTR_ERR(ib);
if (ib)
/* FIXME if phys is 0, I shouldn't need uninc credit. Should
* I not demand it? See comment in lafs_erase_dblock */
- if(!test_bit(B_UnincCredit, &blk->flags))
+ if (!test_bit(B_UnincCredit, &blk->flags))
printk("no uninc credit %s\n", strblk(blk));
- if(!test_bit(B_Dirty, &blk->flags) &&
- !test_bit(B_Realloc, &blk->flags) &&
- phys != 0)
+ if (!test_bit(B_Dirty, &blk->flags) &&
+ !test_bit(B_Realloc, &blk->flags) &&
+ phys != 0)
printk("something missing %s\n", strblk(blk));
// BUG_ON(!test_bit(B_UnincCredit, &blk->flags));
BUG_ON(!test_bit(B_Dirty, &blk->flags) &&
BUG_ON(test_bit(B_Index, &blk->flags));
- for (i=0; i < fs->maxsnapshot; i++)
+ for (i = 0; i < fs->maxsnapshot; i++)
if (fs->ss[i].root == blk->inode)
break;
BUG_ON(i == fs->maxsnapshot);
- blk->physaddr = phys; /* superblock doesn't get counted in summaries */
+ blk->physaddr = phys; /* superblock doesn't get
+ counted in summaries */
set_bit(B_PhysValid, &blk->flags);
fs->ss[i].root_addr = phys;
lai = LAFSI(blk->inode);
}
if (test_bit(B_Dirty, &blk->flags) || phys == 0) {
- if (!test_bit(B_Dirty, &p->b.flags) && !test_bit(B_Credit, &p->b.flags)) {
+ if (!test_bit(B_Dirty, &p->b.flags)
+ && !test_bit(B_Credit, &p->b.flags)) {
printk("Oh dear: %s\n", strblk(blk));
printk(".......: %s\n", strblk(&p->b));
}
}
if (!test_and_set_bit(B_UnincCredit, &p->b.flags))
if (!test_and_clear_bit(B_ICredit, &p->b.flags))
- BUG(); // ICredit should be set before we dirty a block.
+ BUG(); // ICredit should be set before
+ //we dirty a block.
}
/* same phase and leaf; add the address to uninc_table */
- while(1) {
+ while (1) {
/* the only place we try to merge is at the end
* of the embedded table, and then only for data
* blocks
a->physaddr+a->cnt == blk->physaddr) {
a->cnt++;
if (test_and_clear_bit(B_UnincCredit, &blk->flags))
- p->uninc_table.credits ++;
+ p->uninc_table.credits++;
spin_unlock(&fs->lock);
break;
}
a->physaddr = blk->physaddr;
a->cnt = 1;
p->uninc_table.pending_cnt++;
- if (test_and_clear_bit(B_UnincCredit, &blk->flags))
- p->uninc_table.credits ++;
+ if (test_and_clear_bit(B_UnincCredit,
+ &blk->flags))
+ p->uninc_table.credits++;
spin_unlock(&fs->lock);
break;
}
/*
* fs/lafs/inode.c
- * Copyright (C) 2005
+ * Copyright (C) 2005-2009
* Neil Brown <neilb@suse.de>
* Released under the GPL, version 2
*
#include <linux/random.h>
#include <linux/delay.h>
-extern spinlock_t lafs_hash_lock;
-
struct inode *
lafs_iget(struct super_block *sb, ino_t inum, int async)
{
li->depth = lai->depth;
dprintk("inode %lu type is %d\n", (unsigned long)ino->i_ino, li->type);
- BUG_ON(li->type == 0x6b6b6b6b); // use after free
ino->i_mapping->a_ops = &lafs_file_aops;
li->trunc_next = 0;
- switch(li->type) {
+ switch (li->type) {
case TypeInodeFile:
{
struct fs_md *i = &li->md.fs;
i->quota_inums[0] = le32_to_cpu(l->quota_inodes[0]);
i->quota_inums[1] = le32_to_cpu(l->quota_inodes[1]);
i->quota_inums[2] = le32_to_cpu(l->quota_inodes[2]);
- i->quota_inodes[0] = i->quota_inodes[1] = i->quota_inodes[2] = NULL;
+ i->quota_inodes[0] = i->quota_inodes[1]
+ = i->quota_inodes[2] = NULL;
memcpy(i->name, l->name, 64);
i->name[64] = 0;
break;
case TypeOrphanList:
{
struct orphan_md *m = &li->md.orphan;
- m->nextfree = 0; /* FIXME should I could size of file or something? */
+ m->nextfree = 0; /* FIXME should I could size of file
+ * or something? */
m->reserved = 0;
break;
}
struct dir_metadata *d = &lai->metadata[0].dir;
struct special_metadata *s = &lai->metadata[0].special;
- if (li->type < TypeBase) goto out;
+ if (li->type < TypeBase)
+ goto out;
i->flags = le16_to_cpu(l->flags);
ino->i_mode = le16_to_cpu(l->mode);
ino->i_uid = le32_to_cpu(l->userid);
decode_time(&ino->i_mtime, le64_to_cpu(l->modifytime));
decode_time(&ino->i_ctime, le64_to_cpu(l->ctime));
decode_time(&i->i_accesstime, le64_to_cpu(l->accesstime));
- ino->i_atime = i->i_accesstime; /* FIXME load from accesstime file */
+ ino->i_atime = i->i_accesstime; /* FIXME load from
+ * accesstime file */
ino->i_size = le64_to_cpu(l->size);
i->parent = le32_to_cpu(l->parent);
ino->i_nlink = le32_to_cpu(l->linkcount);
dprintk(" mode = 0%o uid %d size %lld\n",
ino->i_mode, ino->i_uid, ino->i_size);
- switch(li->type) {
+ switch (li->type) {
case TypeFile:
ino->i_op = &lafs_file_ino_operations;
ino->i_fop = &lafs_file_file_operations;
ino->i_fop = &lafs_dir_file_operations;
ino->i_mode = (ino->i_mode & 07777) | S_IFDIR;
{
- u32 *b = (u32*)lai;
+ u32 *b = (u32 *)lai;
dprintk("Hmm. %d %d %d\n",
(int)b[24],
(int)b[25],
out:
if (err && li->type)
- printk("inode %lu type is %d\n", (unsigned long)ino->i_ino, li->type);
+ printk("inode %lu type is %d\n",
+ (unsigned long)ino->i_ino, li->type);
unmap_dblock(b, lai);
return err;
}
lai->filetype = type;
lai->flags = 0;
- switch(type)
- {
+ switch(type) {
case TypeInodeFile:
{
struct fs_metadata *l = &lai->metadata[0].fs;
l->snapshot_usage_table = 0;
l->pad = 0;
memset(l->name, 0, sizeof(l->name));
- strlcpy(l->name, "Secondary", sizeof(l->name)); /* FIXME real name?? */
+ /* FIXME real name?? */
+ strlcpy(l->name, "Secondary", sizeof(l->name));
break;
}
case TypeInodeMap:
}
size += sizeof(struct la_inode);
lai->metadata_size = cpu_to_le32(size);
- memset(((char*)lai)+size, 0, fs->prime_sb->s_blocksize-size);
- *(u16*)(((char*)lai)+size) = cpu_to_le16(IBLK_EXTENT);
+ memset(((char *)lai)+size, 0, fs->prime_sb->s_blocksize-size);
+ *(u16 *)(((char *)lai)+size) = cpu_to_le16(IBLK_EXTENT);
unmap_dblock(b, lai);
lafs_dirty_dblock(b);
{
struct fs *fs = fs_from_inode(ino);
struct datablock *b;
- dprintk("DELETE INODE %d\n",(int)ino->i_ino);
+ dprintk("DELETE INODE %d\n", (int)ino->i_ino);
if (ino->i_mode == 0) {
/* There never was an inode here,
if (IS_ERR(b))
return;
- while(test_bit(B_Orphan, &b->b.flags) &&
+ while (test_bit(B_Orphan, &b->b.flags) &&
(test_bit(I_Trunc, &LAFSI(ino)->iflags) ||
LAFSI(ino)->type == 0))
switch (lafs_inode_handle_orphan(b)) {
struct indexblock *ib = data;
struct inode *ino = ib->b.inode;
struct fs *fs = fs_from_inode(ino);
- int ph = !! test_bit(B_Phase1, &ib->b.flags);
+ int ph = !!test_bit(B_Phase1, &ib->b.flags);
int i;
if (paddr == 0 || len == 0)
return 0;
struct indexblock *ib = data;
struct inode *ino = ib->b.inode;
struct fs *fs = fs_from_inode(ino);
- int ph = !! test_bit(B_Phase1, &ib->b.flags);
+ int ph = !!test_bit(B_Phase1, &ib->b.flags);
int i;
if (paddr == 0 || len == 0)
return 0;
a->fileaddr = addr + i;
a->physaddr = paddr + i;
a->cnt = 1;
- ib->uninc_table.pending_cnt ++;
+ ib->uninc_table.pending_cnt++;
} else {
spin_unlock(&fs->lock);
break;
wake_up(&fs->trunc_wait);
putiref(ib, MKREF(inode_handle_orphan));
return 1; /* Try again whenever */
- }
+ }
/* ib2 is an index block beyond EOF with no
* Pinned children.
if (!list_empty(&ib2->b.siblings)) {
static int cnt = 10;
printk("looping on %s\n", strblk(&ib2->b));
- cnt --;
- if (cnt < 0) BUG();
+ cnt--;
+ if (cnt < 0)
+ BUG();
}
putiref(ib, MKREF(inode_handle_orphan));
return 1; /* Try again whenever */
lafs_cluster_update_abort(&uh);
else {
lafs_checkpoint_lock(fs);
- if (lafs_cluster_update_pin(&uh)==0) {
+ if (lafs_cluster_update_pin(&uh) == 0) {
if (test_and_clear_bit(B_Dirty, &b->b.flags))
lafs_space_return(fs, 1);
LAFSI(ino)->update_cluster =
}
if (wait) {
-
if (LAFSI(ino)->update_cluster)
lafs_cluster_wait(fs, LAFSI(ino)->update_cluster);
else {
struct dir_metadata *d = &lai->metadata[0].dir;
struct special_metadata *s = &lai->metadata[0].special;
- if (li->type < TypeBase) break;
+ if (li->type < TypeBase)
+ break;
l->flags = cpu_to_le16(i->flags);
l->mode = cpu_to_le16(ino->i_mode);
l->userid = cpu_to_le32(ino->i_uid);
l->parent = cpu_to_le32(i->parent);
l->linkcount = cpu_to_le32(ino->i_nlink);
- switch(li->type) {
+ switch (li->type) {
case TypeFile:
break;
case TypeDir:
}
unmap_dblock(db, lai);
lafs_dirty_dblock(db);
-}
+}
/*-----------------------------------------------------------------------
* Inode allocate map handling.
if (bnum+1 < LAFSI(im)->md.inodemap.size)
bnum++;
- else if (! *restarted) {
+ else if (!*restarted) {
bnum = 0;
*restarted = 1;
} else {
bit = LAFSI(im)->md.inodemap.nextbit;
LAFSI(im)->md.inodemap.thisblock = bnum;
buf = map_dblock(b);
- bit = find_next_bit((unsigned long *)buf, im->i_sb->s_blocksize<<3, bit);
+ bit = find_next_bit((unsigned long *)buf,
+ im->i_sb->s_blocksize<<3, bit);
unmap_dblock(b, buf);
LAFSI(im)->md.inodemap.nextbit = bit+1;
if (bit >= im->i_sb->s_blocksize<<3)
goto retry;
mutex_unlock(&im->i_mutex);
- *inump = bit + ( bnum << (im->i_blkbits + 3));
+ *inump = bit + (bnum << (im->i_blkbits + 3));
return 0;
abort_unlock:
retry:
if (inum == 0)
/* choose a possibly-free inode number */
- err = choose_free_inum(fs, filesys, &choice, &imni->mb, &restarted);
+ err = choose_free_inum(fs, filesys, &choice,
+ &imni->mb, &restarted);
if (err)
return err;
- if (choice == 327) printk("================= allocateing inode 327=====\n");
b = lafs_get_block(filesys, choice, NULL, GFP_KERNEL,
MKREF(cfi_ino));
if (!b)
inode_map_new_pin(struct inode_map_new_info *imni)
{
int err = lafs_pin_dblock(imni->mb);
- if (imni->ib->b.fileaddr == 327) printk("==== pinning 327 %d\n", err);
err = err ?: lafs_pin_dblock(imni->ib);
return err;
}
err = inode_map_new_prepare(fs, inum, filesys, &imni);
err = lafs_orphan_prepare(fs, &oi) ?: err;
- err = lafs_cluster_update_prepare(&ui, fs, sizeof(struct la_inode)) ?: err;
+ err = lafs_cluster_update_prepare(&ui, fs, sizeof(struct la_inode))
+ ?: err;
if (err)
goto abort;
retry:
lafs_cluster_update_abort(&ui);
BUG();
} else
- lafs_cluster_update_commit(&ui, b, 0, LAFSI(ino)->metadata_size);
+ lafs_cluster_update_commit(&ui, b, 0,
+ LAFSI(ino)->metadata_size);
clear_bit(B_PinPending, &b->b.flags);
BUG_ON(LAFSI(ino)->dblock != b);
BUG_ON(b->my_inode != ino);
/*
* IO routines for LaFS
* fs/lafs/io.c
- * Copyright (C) 2006
+ * Copyright (C) 2006-2009
* NeilBrown <neilb@suse.de>
* Released under the GPL, version 2
*/
lafs_dev_find(struct fs *fs, u64 virt)
{
int i;
- for (i=0; i <fs->devices; i++)
+ for (i = 0; i < fs->devices; i++)
if (virt >= fs->devs[i].start &&
virt < fs->devs[i].start + fs->devs[i].size)
return i;
printk("%llu not found:\n", (unsigned long long) virt);
- for (i=0; i<fs->devices; i++)
+ for (i = 0; i < fs->devices; i++)
printk(" %d: %llu+%llu\n", i,
(unsigned long long)fs->devs[i].start,
(unsigned long long)fs->devs[i].size);
static void bi_complete(struct bio *bio, int error)
{
- complete((struct completion*)bio->bi_private);
+ complete((struct completion *)bio->bi_private);
}
int
-lafs_sync_page_io(struct block_device *bdev, sector_t sector, int offset, int size,
+lafs_sync_page_io(struct block_device *bdev, sector_t sector,
+ int offset, int size,
struct page *page, int rw)
{
struct bio *bio = bio_alloc(GFP_NOIO, 1);
lafs_super_wait(struct fs *fs)
{
wait_event(fs->sb_writes_wait,
- atomic_read(&fs->sb_writes_pending)==0
+ atomic_read(&fs->sb_writes_pending) == 0
);
return 0; /* FIXME should be an error flag */
}
{
if (test_and_set_bit(B_IOLock, &b->flags)) {
DEFINE_WAIT(wq);
- for(;;) {
+ for (;;) {
prepare_to_wait(&block_wait, &wq, TASK_UNINTERRUPTIBLE);
if (test_and_set_bit(B_IOLock, &b->flags))
schedule();
- else break;
+ else
+ break;
}
finish_wait(&block_wait, &wq);
}
dprintk("unlock non-data block\n");
return;
}
- blist = (struct datablock*)db->page->private;
+ blist = (struct datablock *)db->page->private;
n = 1<<(PAGE_CACHE_SHIFT - b->inode->i_blkbits);
bit_spin_lock(B_IOLockLock, &blist->b.flags);
clear_bit(bit, &b->flags);
/* keep page locked during lafs_readpage */
locked = -1;
else
- for (i=0 ; i<n; i++) {
+ for (i = 0 ; i < n; i++) {
if (test_bit(B_IOLock, &blist[i].b.flags))
- locked ++;
+ locked++;
/* FIXME what about checking uptodate ?? */
}
bit_spin_unlock(B_IOLockLock, &blist->b.flags);
{
if (test_bit(B_IOLock, &b->flags)) {
DEFINE_WAIT(wq);
- for(;;) {
+ for (;;) {
prepare_to_wait(&block_wait, &wq, TASK_UNINTERRUPTIBLE);
if (test_bit(B_IOLock, &b->flags))
schedule();
- else break;
+ else
+ break;
}
finish_wait(&block_wait, &wq);
}
lafs_wait_block_async(struct block *b)
{
if (test_bit(B_IOLock, &b->flags)) {
- if (! test_and_set_bit(B_Async, &b->flags))
+ if (!test_and_set_bit(B_Async, &b->flags))
getref(b, MKREF(async));
}
if (!test_bit(B_IOLock, &b->flags)) {
dprintk("loaded %d of %d\n", (int)b->fileaddr, (int)b->inode->i_ino);
if (test_bit(BIO_UPTODATE, &bio->bi_flags)) {
- set_bit(B_Valid, &b->flags); /* FIXME should I set an error too? */
+ set_bit(B_Valid, &b->flags); /* FIXME should I set
+ an error too? */
} else if (!test_bit(B_Index, &b->flags) && dblk(b)->page) {
ClearPageUptodate(dblk(b)->page);
SetPageError(dblk(b)->page);
- } else dprintk("Block with no page!!\n");
+ } else
+ dprintk("Block with no page!!\n");
/* FIXME signal error for index block */
lafs_iounlock_block(b, B_IOLock);
}
dprintk("loaded %d of %d\n", (int)b->fileaddr, (int)b->inode->i_ino);
if (test_bit(BIO_UPTODATE, &bio->bi_flags)) {
- set_bit(B_Valid, &b->flags); /* FIXME should I set an error too? */
+ set_bit(B_Valid, &b->flags); /* FIXME should I set
+ an error too? */
} else if (!test_bit(B_Index, &b->flags) && dblk(b)->page) {
ClearPageUptodate(dblk(b)->page);
SetPageError(dblk(b)->page);
- } else dprintk("Block with no page!!\n");
+ } else
+ dprintk("Block with no page!!\n");
lafs_iounlock_block(b, 0);
}
memset(baddr, 0, (1<<b->inode->i_blkbits));
unmap_dblock(db, baddr);
set_bit(B_Valid, &b->flags);
- lafs_iounlock_block(b, unlock ? B_IOLock:0);
- // dprintk("easy-loaded %d of %d\n", (int)b->fileaddr, (int)b->inode->i_ino);
+ lafs_iounlock_block(b, unlock ? B_IOLock : 0);
return 0;
}
page = db->page;
virttophys(fs, b->physaddr, &dev, §);
if (dev < 0) {
- lafs_iounlock_block(b, unlock?B_IOLock:0);
+ lafs_iounlock_block(b, unlock ? B_IOLock : 0);
return -EIO;
}
u64 virt, int dev, struct wc *wc, int head)
{
struct bio *bio = bio_alloc(GFP_NOIO, 1);
- sector_t sect = 0;/* FIXME this initialisation is only needed with -Werror */
+ sector_t sect = 0;/* FIXME this initialisation is
+ only needed with -Werror */
int which = wc->pending_next;
virttophys(fs, virt, &dev, §);
+/*
+ * fs/lafs/lafs.h
+ * Copyright (C) 2005-2009
+ * Neil Brown <neilb@suse.de>
+ * Released under the GPL, version 2
+ */
+
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/highmem.h>
extern int lafs_trace;
#define dprintk(x...) do { if(lafs_trace)printk(x); }while(0)
+#ifdef DUMP
+extern struct fs *dfs;
+extern struct freelists {
+ struct list_head lru;
+ unsigned long freecnt;
+} freelist;
+extern void lafs_dump_tree(void);
+#endif
+
#if DEBUG_REF
#define REFARG char *__refname
#define REF __refname
#endif
#define strblk(a) lafs_strblk(a)
-
static inline int
u32_after(u32 a, u32 b)
{
extern struct inode_operations lafs_dir_ino_operations;
extern struct inode_operations lafs_link_ino_operations;
extern struct inode_operations lafs_special_ino_operations;
-extern struct file_operations lafs_file_file_operations;
-extern struct file_operations lafs_dir_file_operations;
+const extern struct file_operations lafs_file_file_operations;
+const extern struct file_operations lafs_dir_file_operations;
extern struct address_space_operations lafs_file_aops;
extern struct address_space_operations lafs_index_operations;
#define _reflog(c,blk,ref,add) (c(blk))
#else
#define _reflog(c,blk,ref,add) ({ add? add_ref(blk,ref,__FILE__,__LINE__) : \
- del_ref(blk,ref, __FILE__,__LINE__); c(blk); })
+ del_ref(blk,ref, __FILE__,__LINE__); c(blk); })
#endif
#define getref(blk, r) ( ({BUG_ON((blk) && ! atomic_read(&(blk)->refcnt));}), _reflog(_getref, blk,r,1))
#define getref_locked(blk, r) _reflog(_getref_locked, blk,r,1)
#define putdref(blk, r) ({ if (blk) _reflog(_putref, (&(blk)->b),r,0);})
#define putiref(blk, r) ({ if (blk) _reflog(_putref, (&(blk)->b),r,0);})
-
int __must_check lafs_setparent(struct datablock *blk);
/*
wait_event(fs->wc[0].pending_wait,
fs->wc[0].cluster_seq > seq);
}
+void lafs_cluster_wait_all(struct fs *fs);
int lafs_cluster_empty(struct fs *fs, int cnum);
/* super.c */
-
-/* layout of device superblock
+/*
+ * fs/lafs/layout.h
+ * Copyright (C) 2005-2009
+ * Neil Brown <neilb@suse.de>
+ * Released under the GPL, version 2
+ *
+ * layout of device superblock
* and array state block
*/
#include "lafs.h"
/*
+ * fs/lafs/modify.c
+ * Copyright (C) 2005-2009
+ * Neil Brown <neilb@suse.de>
+ * Released under the GPL, version 2
+ *
* Index incorporation.
* We have an index block (indirect, extent, or index), which may contain
* some addresses already.
static int addr_cmp(const void *av, const void *bv)
{
- const struct addr *a=av;
- const struct addr *b=bv;
+ const struct addr *a = av;
+ const struct addr *b = bv;
return a->fileaddr - b->fileaddr;
}
char *ac = a;
char *bc = b;
while (len--) {
- int t= *ac;
+ int t = *ac;
*ac++ = *bc;
*bc++ = t;
}
/* simple quadratic sort for now...
*/
int i, j;
- for (i=0; i<cnt; i++) {
+ for (i = 0; i < cnt; i++) {
/* the first i are sorted */
- for (j=cnt-1; j>i ; j--) {
+ for (j = cnt-1; j > i ; j--) {
/* table[j] is less that j+x */
void *a = table + (j-1) * size;
void *b = table + (j) * size;
- if (cmp(a,b) > 0)
- swap(a,b,size);
+ if (cmp(a, b) > 0)
+ swap(a, b, size);
}
}
// FIXME need to check for overlapping extents and fail
/* sort blocks list in *blkp by ->fileaddr
* use merge sort
*/
- int cnt=0;
+ int cnt = 0;
struct block *bl[2];
bl[0] = *blkp;
/*Ok, they fit. */
buf += e*12;
- for (i=0; i<ui->pending_cnt; i++)
+ for (i = 0; i < ui->pending_cnt; i++)
if (ui->pending_addr[i].physaddr) {
credits += ui->pending_addr[i].cnt;
encode48(buf, ui->pending_addr[i].physaddr);
}
for (blk = uninc ; blk ; blk = blk->chain)
ucnt++;
- i=0;
+ i = 0;
ncnt = 0;
BUG_ON(ucnt == 0);
uaddr = uninc->fileaddr;
buf = map_iblock(ib);
memset(buf+offset, 0, len - offset);
- //printk("clear to depth %d: %s\n", ib->depth, strblk(&ib->b));
if (ib->depth == 1)
- *(u16*)(buf+offset) = cpu_to_le16(IBLK_INDIRECT);
+ *(u16 *)(buf+offset) = cpu_to_le16(IBLK_INDIRECT);
else if (ib->depth > 1)
- *(u16*)(buf+offset) = cpu_to_le16(IBLK_INDEX);
+ *(u16 *)(buf+offset) = cpu_to_le16(IBLK_INDEX);
unmap_iblock(ib, buf);
}
INIT_LIST_HEAD(&new->b.siblings);
new->depth = ib->depth + 1;
- LAFSI(new->b.inode)->depth ++;
+ LAFSI(new->b.inode)->depth++;
BUG_ON(new->depth != LAFSI(new->b.inode)->depth);
lafs_clear_index(new);
static void print_index(char *buf, u32 start, int len)
{
- int type = le16_to_cpu(*(u16*)(buf));
+ int type = le16_to_cpu(*(u16 *)(buf));
char *p = buf+2;
u32 addr;
int size;
u64 phys;
len -= 2;
- switch(type) {
+ switch (type) {
case IBLK_INDIRECT:
printk(" Indirect:\n");
while (len >= 6) {
phys = decode48(p);
- if (phys) printk(" %u:%lu", start, (unsigned long)phys);
+ if (phys)
+ printk(" %u:%lu", start, (unsigned long)phys);
start++;
- len-= 6;
+ len -= 6;
}
printk("\n");
break;
if (size)
printk(" %u-%u: %llu\n", addr, addr+size-1,
(unsigned long long) phys);
- len-= 12;
+ len -= 12;
}
break;
case IBLK_INDEX:
{
int i;
printk(" pending=%d\n", ui->pending_cnt);
- for (i=0; i<ui->pending_cnt; i++)
+ for (i = 0; i < ui->pending_cnt; i++)
printk(" [%d] %u-%u : %lu\n", i,
ui->pending_addr[i].fileaddr,
ui->pending_addr[i].fileaddr
li->data = cp;
return 0;
}
- if (phys == ~0LL) {
+ if (phys == ~0LL)
return 0;
- }
-
+
if (li->size < 10)
return 0;
p = li->data + (addr - li->lastaddr) * 6;
lastaddr = li->lastaddr + (li->size/6);
- for (i=0; i<len && addr < lastaddr ; i++) {
+ for (i = 0; i < len && addr < lastaddr ; i++) {
encode48(p, phys);
- addr ++;
- phys ++;
+ addr++;
+ phys++;
}
return i;
}
if (len) {
/* need a new extent */
li->extents++;
- if ( li->extents * 12 > li->blksize) {
+ if (li->extents * 12 > li->blksize) {
/* But it won't fit */
if (!li->choice)
li->choice = IBLK_INDIRECT;
else
handled = hlen;
/* That might replace some of current extent */
- overlap= (ui->pending_addr[uinum].fileaddr + uioffset +
- handled) - eaddr;
+ overlap = (ui->pending_addr[uinum].fileaddr + uioffset +
+ handled) - eaddr;
if (elen && overlap > 0) {
if (overlap > elen)
overlap = elen;
len = fs->prime_sb->s_blocksize - offset;
handle(data, ib->b.fileaddr, 0, len);
- current_layout = le16_to_cpu(*(u16*)(ibuf+offset));
+ current_layout = le16_to_cpu(*(u16 *)(ibuf+offset));
buf = ibuf + offset + 2;
dprintk("CURRENT=%d\n", current_layout);
- switch(current_layout) {
+ switch (current_layout) {
case IBLK_INDIRECT:
walk_indirect(ib->b.fileaddr,
&buf, len-2, &ui,
u32 len;
int moved = 0;
- for (i = 0; i<from->pending_cnt; i++) {
+ for (i = 0; i < from->pending_cnt; i++) {
ia = &from->pending_addr[i];
if (ia->fileaddr +
ia->cnt <= next)
len = fs->prime_sb->s_blocksize - offset;
check_leaf(&leafinfo, ib->b.fileaddr, 0, len);
- current_layout = le16_to_cpu(*(u16*)(ibuf+offset));
+ current_layout = le16_to_cpu(*(u16 *)(ibuf+offset));
buf = ibuf + offset + 2;
- switch(current_layout) {
+ switch (current_layout) {
case IBLK_INDIRECT:
walk_indirect(ib->b.fileaddr,
&buf, len-2, ui,
case IBLK_INDIRECT:
next = walk_indirect(ib->b.fileaddr,
&buf, len-2, ui,
- choice == IBLK_EXTENT ? add_extent : add_indirect,
+ (choice == IBLK_EXTENT)
+ ? add_extent : add_indirect,
&layout);
break;
case IBLK_EXTENT:
next = walk_extent(ib->b.fileaddr,
&buf, len-2, ui,
- choice == IBLK_EXTENT ? add_extent : add_indirect,
+ (choice == IBLK_EXTENT)
+ ? add_extent : add_indirect,
&layout);
break;
}
if (ui->pending_addr[uinum].fileaddr < next) {
int cnt = ui->pending_addr[uinum].cnt
- (next - ui->pending_addr[uinum].fileaddr);
- ui->pending_addr[uinxt].physaddr = ui->pending_addr[uinum].physaddr
+ ui->pending_addr[uinxt].physaddr =
+ ui->pending_addr[uinum].physaddr
+ (next - ui->pending_addr[uinum].fileaddr);
ui->pending_addr[uinxt].fileaddr = next;
ui->pending_addr[uinxt].cnt = cnt;
} else {
- ui->pending_addr[uinxt].fileaddr = ui->pending_addr[uinum].fileaddr;
- ui->pending_addr[uinxt].physaddr = ui->pending_addr[uinum].physaddr;
- ui->pending_addr[uinxt].cnt = ui->pending_addr[uinum].cnt;
+ ui->pending_addr[uinxt].fileaddr =
+ ui->pending_addr[uinum].fileaddr;
+ ui->pending_addr[uinxt].physaddr =
+ ui->pending_addr[uinum].physaddr;
+ ui->pending_addr[uinxt].cnt =
+ ui->pending_addr[uinum].cnt;
}
uinxt++;
}
next2 = walk_extent(next, &sbuf, slen, ui, add_extent, &layout);
BUG_ON(next2 != 0);
if (slen && layout.data > sbuf) {
- printk("slen=%d ld-sb=%d layout.data=%p sbuf=%p buf=%p ibuf=%p len=%d\n",
- slen, layout.data-sbuf, layout.data, sbuf, buf, ibuf, len);
+ printk("slen=%d ld-sb=%d layout.data=%p sbuf=%p "
+ "buf=%p ibuf=%p len=%d\n",
+ slen, layout.data-sbuf, layout.data, sbuf,
+ buf, ibuf, len);
}
BUG_ON(slen && layout.data > sbuf);
memset(layout.data, 0, layout.size);
offset = 0;
len = fs->prime_sb->s_blocksize - offset;
- current_layout = le16_to_cpu(*(u16*)(ibuf+offset));
+ current_layout = le16_to_cpu(*(u16 *)(ibuf+offset));
buf = ibuf + offset + 2;
if (current_layout != IBLK_INDEX)
BUG();
offset = LAFSI(ib->b.inode)->metadata_size;
buf += offset;
if (LAFSI(ib->b.inode)->depth == 0) {
- /* data has already been copied out.. I hope - FIXME */
+ /* data has already been copied
+ * out.. I hope - FIXME */
memset(buf, 0, blocksize - offset);
- *(u16*)(buf) = cpu_to_le16(IBLK_EXTENT);
+ *(u16 *)(buf) = cpu_to_le16(IBLK_EXTENT);
LAFSI(ib->b.inode)->depth = 1;
ib->depth = 1;
}
offset = 0;
/* Maybe a direct update of an indirect block */
- if (*(u16*)(buf) == cpu_to_le16(IBLK_INDIRECT) &&
+ if (*(u16 *)(buf) == cpu_to_le16(IBLK_INDIRECT) &&
incorporate_indirect(&uit, buf+2,
ib->b.fileaddr,
blocksize-(offset+2))) {
}
/* Maybe a fairly direct update of an extent block */
- if (*(u16*)(buf) == cpu_to_le16(IBLK_EXTENT) &&
+ if (*(u16 *)(buf) == cpu_to_le16(IBLK_EXTENT) &&
incorporate_extent(&uit, buf+2,
blocksize-(offset+2))) {
unmap_iblock(ib, buf-offset);
}
dprintk("Index contains:\n");
- if (lafs_trace) print_index(buf, ib->b.fileaddr, blocksize - offset);
+ if (lafs_trace)
+ print_index(buf, ib->b.fileaddr, blocksize - offset);
dprintk("uninc contains:\n");
if (lafs_trace)
lafs_print_uninc(&uit);
unmap_iblock(ib, buf-offset);
} else {
- int cred;
+ int cred;
uit.credits = 0;
BUG_ON(ib->uninc_table.pending_cnt);
offset = 0;
/* internal index block. Might be able to merge in-place */
- if (*(u16*)(buf) == cpu_to_le16(IBLK_INDEX) &&
+ if (*(u16 *)(buf) == cpu_to_le16(IBLK_INDEX) &&
(cred = incorporate_index(uninc, buf+2,
blocksize-(offset+2))) >= 0) {
unmap_iblock(ib, buf-offset);
}
dprintk("Index contains:\n");
- if (lafs_trace) print_index(buf, ib->b.fileaddr, blocksize - offset);
+ if (lafs_trace)
+ print_index(buf, ib->b.fileaddr, blocksize - offset);
/* FIXME trace the contents of the uninc list */
unmap_iblock(ib, buf-offset);
new = lafs_iblock_alloc(fs, GFP_NOFS, 1, MKREF(inc));
/* FIXME need to preallocate something for a fall-back?? */
- if (ib->depth < 1) printk("small depth %s\n", strblk(&ib->b));
+ if (ib->depth < 1)
+ printk("small depth %s\n", strblk(&ib->b));
BUG_ON(ib->depth < 1);
- if (ib->depth == 1) {
+ if (ib->depth == 1)
rv = do_incorporate_leaf(fs, ib, &uit, new);
- } else
+ else
rv = do_incorporate_internal(fs, ib, uninc, &uit.credits, new);
- switch(rv) {
+ switch (rv) {
case 0:
/* There is nothing in this block any more.
* If it is an inode, clear it, else punch a hole
*/
- dprintk("incorp to empty off=%d %s\n", (int)offset, strblk(&ib->b));
+ dprintk("incorp to empty off=%d %s\n",
+ (int)offset, strblk(&ib->b));
lafs_iblock_free(new);
buf = map_iblock(ib);
memset(buf + offset, 0, blocksize - offset);
- *(u16*)(buf) = cpu_to_le16(IBLK_INDIRECT);
+ *(u16 *)(buf) = cpu_to_le16(IBLK_INDIRECT);
unmap_iblock(ib, buf);
#if 0
/* This is a hang-over that I don't think is needed any more
break;
case 1: /* everything was incorporated - hurray */
-//printk("A\n");
lafs_iblock_free(new);
-//printk("A\n");
/* Don't need to dirty, it is already dirty */
break;
case 2: /* Simple split */
- /* The addresses are now in 'ib', 'new' and possibly new->uninc_table.
- * 'new' has been linked in to the parent.
+ /* The addresses are now in 'ib', 'new' and possibly
+ * new->uninc_table. 'new' has been linked in to the
+ * parent.
*/
uit.credits -= 2;
set_bit(B_Credit, &new->b.flags);
case 3: /* Need to grow */
/* new needs a B_Credit and a B_ICredit.
*/
-
+
uit.credits -= 2;
set_bit(B_Credit, &new->b.flags);
set_bit(B_ICredit, &new->b.flags);
}
out:
- //printk("Done Incorp %s\n", strblk(&ib->b));
if (uit.credits < 0) {
printk("Credits = %d, rv=%d\n", uit.credits, rv);
printk("ib = %s\n", strblk(&ib->b));
int need;
int credits = 0;
- BUG_ON(why != CleanSpace && !test_phase_locked(fs) && !fs->checkpointing);
+ BUG_ON(why != CleanSpace && !test_phase_locked(fs)
+ && !fs->checkpointing);
retry:
need = 0;
while (b) {
if (credits <= 0)
- need += ! test_bit(B_Credit, &b->flags);
- else if (! test_and_set_bit(B_Credit, &b->flags))
+ need += !test_bit(B_Credit, &b->flags);
+ else if (!test_and_set_bit(B_Credit, &b->flags))
credits--;
if (credits <= 0)
- need += ! test_bit(B_ICredit, &b->flags);
- else if (! test_and_set_bit(B_ICredit, &b->flags))
+ need += !test_bit(B_ICredit, &b->flags);
+ else if (!test_and_set_bit(B_ICredit, &b->flags))
credits--;
if (test_bit(B_Index, &b->flags) ||
LAFSI(b->inode)->type == TypeQuota ||
LAFSI(b->inode)->type == TypeSegmentMap) {
if (credits <= 0)
- need += ! test_bit(B_NCredit, &b->flags);
- else if (! test_and_set_bit(B_NCredit, &b->flags))
+ need += !test_bit(B_NCredit, &b->flags);
+ else if (!test_and_set_bit(B_NCredit, &b->flags))
credits--;
if (credits <= 0)
- need += ! test_bit(B_NICredit, &b->flags);
- else if (! test_and_set_bit(B_NICredit, &b->flags))
+ need += !test_bit(B_NICredit, &b->flags);
+ else if (!test_and_set_bit(B_NICredit, &b->flags))
credits--;
}
if (test_bit(B_InoIdx, &b->flags))
struct fs *fs = fs_from_inode(b->inode);
wait_event(fs->phase_wait,
- ! test_bit(B_Pinned, &b->flags) ||
+ !test_bit(B_Pinned, &b->flags) ||
(!!test_bit(B_Phase1, &b->flags)) == fs->phase);
}
/*
* fs/lafs/inode.c
- * Copyright (C) 2006
+ * Copyright (C) 2006-2009
* Neil Brown <neilb@suse.de>
* Released under the GPL, version 2
*
#include "lafs.h"
#ifdef DUMP
-extern struct fs *dfs;
void lafs_dump_orphans(void)
{
struct orphan_md *om;
int slot;
- if (!dfs || ! dfs->orphans) {
+ if (!dfs || !dfs->orphans) {
printk("No orphan FS !!\n");
return;
}
mutex_lock_nested(&dfs->orphans->i_mutex, I_MUTEX_QUOTA);
om = &LAFSI(dfs->orphans)->md.orphan;
- printk("nextfree = %u",(unsigned)om->nextfree);
- printk("reserved = %u",(unsigned)om->reserved);
+ printk("nextfree = %u", (unsigned)om->nextfree);
+ printk("reserved = %u", (unsigned)om->reserved);
for (slot = 0; slot < om->nextfree; slot++) {
struct orphan *or;
oi->ob = NULL;
oi->fs = fs;
mutex_lock_nested(&fs->orphans->i_mutex, I_MUTEX_QUOTA);
- bnum = (om->nextfree + om->reserved) >> (fs->prime_sb->s_blocksize_bits-4);
+ bnum = (om->nextfree + om->reserved) >>
+ (fs->prime_sb->s_blocksize_bits-4);
b = lafs_get_block(fs->orphans, bnum, NULL, GFP_KERNEL, MKREF(orphan));
if (b) {
b->orphan_slot = slot;
set_bit(B_Orphan, &b->b.flags);
dprintk("%p->orphan_slot=%d (%lu,%lu,%lu)\n", b, b->orphan_slot,
- LAFSI(b->b.inode)->filesys->i_ino,b->b.inode->i_ino, b->b.fileaddr);
+ LAFSI(b->b.inode)->filesys->i_ino,
+ b->b.inode->i_ino, b->b.fileaddr);
oi->reserved = 0;
om->nextfree++;
/* If this was the last block in the file,
* we need to punch a hole
*/
- if (((om->nextfree + om->reserved +1) >>
+ if (((om->nextfree + om->reserved + 1) >>
(fs->prime_sb->s_blocksize_bits-4))
!= bnum)
lafs_erase_dblock(b);
ent = b->orphan_slot & ((1<<shift)-1);
- dprintk("os=%d sh=%d ent=%d nf=%d\n", b->orphan_slot, shift, ent, om->nextfree);
+ dprintk("os=%d sh=%d ent=%d nf=%d\n", b->orphan_slot, shift,
+ ent, om->nextfree);
if (b->orphan_slot != om->nextfree-1) {
/* need to swap in the last entry */
lastor[lastent].type = 0;
unmap_dblock_2(ob2, lastor);
- /* FIXME these should not create the block if it doesn't exist */
+ /* FIXME these should not create the block if it
+ * doesn't exist */
bfs = lafs_get_block(fs->ss[0].root, le32_to_cpu(last.filesys),
NULL, GFP_KERNEL, MKREF(orphan));
if (bfs && bfs->my_inode)
bi = lafs_get_block(bfs->my_inode,
le32_to_cpu(last.inum),
NULL, GFP_KERNEL, MKREF(orphan));
- else bi = NULL;
+ else
+ bi = NULL;
putdref(bfs, MKREF(orphan));
if (bi && bi->my_inode)
bbl = lafs_get_block(bi->my_inode,
le32_to_cpu(last.addr),
NULL, GFP_KERNEL, MKREF(orphan));
- else bbl = NULL;
+ else
+ bbl = NULL;
putdref(bi, MKREF(orphan));
- dprintk("O bfs=%p(%p) bi=%p bbl=%p lastent=%d fs=%d in=%d a=%d\n",
+ dprintk("O bfs=%p(%p) bi=%p bbl=%p lastent=%d "
+ "fs=%d in=%d a=%d\n",
bfs, bfs->my_inode, bi, bbl, lastent,
le32_to_cpu(last.filesys),
le32_to_cpu(last.inum),
/* If this was the last block in the file,
* we need to punch a hole
*/
- if (((om->nextfree + om->reserved +1) >> (b->b.inode->i_blkbits-4))
+ if (((om->nextfree + om->reserved + 1) >> (b->b.inode->i_blkbits-4))
!= bnum)
/* FIXME share code with orphan_abort */
lafs_erase_dblock(b);
/*
* fs/lafs/roll.c
- * Copyright (C) 2005
+ * Copyright (C) 2005-2009
* Neil Brown <neilb@suse.de>
* Released under the GPL, version 2
*
#include "lafs.h"
-#if 0
-static int find_super(struct fs *fs, int ssnum, u64 *addr)
-{
- struct page *p, *p2;
- int hlen, glen;
- struct cluster_head *ch;
- struct group_head *gr;
- struct descriptor *dp;
- u64 root_addr;
- int offset;
- int err;
- struct la_inode *lai;
-
- p = alloc_page(GFP_KERNEL);
- p2 = alloc_page(GFP_KERNEL);
- if (!p || !p2) {
- if (p) put_page(p);
- if (p2) put_page(p2);
- return -ENOMEM;
- }
-
- dprintk("Super-cluster for %d is at %llu\n",
- ssnum, (unsigned long long)fs->ss[ssnum].checkpointcluster);
-
- err = lafs_load_page(fs, p, fs->ss[ssnum].checkpointcluster, 1);
- if (err)
- goto out;
-
- ch = (struct cluster_head *)page_address(p);
-
- if (strncmp(ch->idtag, "LaFSHead", 8) != 0) {
- dprintk("No clusterhead found!\n");
- err = -EINVAL;
- goto out;
- }
- dprintk("Yep, found the clusterhead.\n");
- dprintk("First group is for inode %lu, block %lu\n",
- (unsigned long) ch->groups[0].inum,
- (unsigned long) ch->groups[0].u.desc[0].block_num);
-
- /* Check each 0,0,0 block to see if it is for this snapshot */
- hlen = le16_to_cpu(ch->Hlength);
- offset = (hlen + fs->prime_sb->s_blocksize-1)/(fs->prime_sb->s_blocksize);
- gr = ch->groups;
- hlen -= offsetof(struct cluster_head, groups);
- while (hlen > 0) {
- if (gr->inum || gr->fsnum) {
- hlen -= gr->group_size_words * 4;
- gr = (struct group_head*)(((char*)gr)+gr->group_size_words*4);
- continue;
- }
- /* It's the right file, look for block 0 */
- dp = &gr->u.desc[0];
- glen = gr->group_size_words - offsetof(struct group_head, u)/4;
- while (glen) {
- if (dp->block_type != cpu_to_le16(DescIndex) &&
- le16_to_cpu(dp->block_type) > DescMiniOffset) {
- /* must be a miniblock, skip it */
- glen -= (sizeof(struct miniblock) +
- ROUND_UP(le16_to_cpu(((struct miniblock*)dp)->length)-
- DescMiniOffset
- ))/4;
- dp = (struct descriptor*)((char*)dp)+(sizeof(struct miniblock) +
- ROUND_UP(le16_to_cpu(((struct miniblock*)dp)->length)));
- continue;
- }
- /* It is a descriptor */
- if (dp->block_num != 0) {
- offset += le16_to_cpu(dp->block_cnt);
- dp ++;
- glen -= sizeof(dp)/4;
- continue;
- }
- /* We have a root block, for some snapshot.
- * Read it in and check the snapshot_usage_table
- */
- root_addr = fs->ss[ssnum].checkpointcluster + offset;
- dprintk("Possible root inode is at %llu\n", (unsigned long long)root_addr);
-
- err = lafs_load_page(fs, p2, root_addr, 1);
- if (err)
- goto out;
-
- lai = (struct la_inode*)page_address(p2);
- if (__le16_to_cpu(lai->metadata[0].fs.snapshot_usage_table)
- == ssnum+1) {
- dprintk("Found it!!!\n");
- *addr = root_addr;
- err = 0;
- goto out;
- }
- dprintk("Nope, that was for %d\n",
- __le16_to_cpu(lai->metadata[0].fs.snapshot_usage_table)-1);
-
- offset += __le16_to_cpu(dp->block_cnt);
- dp ++;
- glen -= sizeof(dp)/4;
- }
- hlen -= gr->group_size_words * 4;
- gr = (struct group_head*)(((char*)gr)+gr->group_size_words*4);
- }
- err = -ENOENT; /* FIXME */
- out:
- put_page(p);
- put_page(p2);
- return err;
-}
-#endif
-
static int
roll_valid(struct fs *fs, struct cluster_head *ch, unsigned long long addr)
{
int max = 0;
int prevtype, prev2type;
- if (!p) return -ENOMEM;
- ch = (struct cluster_head*)page_address(p);
+ if (!p)
+ return -ENOMEM;
+ ch = (struct cluster_head *)page_address(p);
- this = start; prev=start;
+ this = start; prev = start;
do {
- if (lafs_load_page(fs, p, this, 1)!=0) {
+ if (lafs_load_page(fs, p, this, 1) != 0) {
printk(KERN_ERR "LaFS: Could not read cluster %llu\n",
(unsigned long long) this);
return -EIO;
max = le16_to_cpu(ch->Hlength);
prev = this;
this = le64_to_cpu(ch->next_addr);
- seq ++;
+ seq++;
} while (!(ch->flags & CH_CheckpointEnd));
/* 'seq' is sequence number of 'this' */
prevtype = prev2type = VerifyNull;
while (1) {
-//printk("Checking %llu\n", (unsigned long long) this);
- if (lafs_load_page(fs, p, this, 1)!=0)
+ if (lafs_load_page(fs, p, this, 1) != 0)
break;
-//printk("1\n");
if (!roll_valid(fs, ch, this))
break;
-//printk("2\n");
if (le64_to_cpu(ch->prev_addr) != prev)
break;
-//printk("3\n");
if (le64_to_cpu(ch->seq) != seq)
break;
-//printk("Looks good\n");
/* FIXME check checksum, and possibly VerifySum */
/* this head looks valid, so we can possibly verify previous
* clusters
prev2 = prev; prev2type = prevtype;
prev = this ; prevtype = le16_to_cpu(ch->verify_type);
this = le64_to_cpu(ch->next_addr);
- seq ++;
+ seq++;
}
dprintk("LaFS: Next address to write is %llu\n", start);
inode = lafs_iget_fs(fs, fsnum, inum, 0);
if (IS_ERR(inode))
- return (PTR_ERR(inode));
+ return PTR_ERR(inode);
li = LAFSI(inode);
- switch(li->type) {
+ switch (li->type) {
case TypeInodeFile:
BUG_ON(fsnum); /* FIXME should be more careful */
if (err == -EIO && offset == 0) {
/* creating new inode */
}
- return (PTR_ERR(inode));
+ return PTR_ERR(inode);
}
db = lafs_inode_dblock(inode, 0, MKREF(roll));
buf = map_dblock(db);
/* find/load the inode */
inode = lafs_iget_fs(fs, fsnum, inum, 0);
if (IS_ERR(inode))
- return (PTR_ERR(inode));
+ return PTR_ERR(inode);
/* FIXME do I need to 'lock' the inode in any way? */
dprintk("Got the inode, type %d %p size %llu\n", li->type,
inode, inode->i_size);
- switch(li->type) {
+ switch (li->type) {
struct la_inode *lai;
int mdsize;
dprintk("inode load page err %d\n", err);
if (err)
break;
- lai = (struct la_inode*)page_address(p);
+ lai = (struct la_inode *)page_address(p);
mdsize = le16_to_cpu(lai->metadata_size);
if (lai->filetype >= TypeBase &&
lai->filetype != TypeDir &&
u64 sz = le64_to_cpu(lai->metadata[0].file.size);
if (sz <= fs->prime_sb->s_blocksize - mdsize)
err = roll_mini(fs, inum, bnum, -1, flg, 0, 0,
- (int)sz, page_address(p) + mdsize);
+ (int)sz,
+ page_address(p) + mdsize);
}
break;
case TypeSegmentMap:
case TypeQuota:
/* These only get merged while in a checkpoint. */
- if (! in_checkpoint)
+ if (!in_checkpoint)
break;
/* FALL THROUGH */
case TypeFile:
*/
dprintk("FILE type\n");
err = -ENOMEM;
- blk = lafs_get_block(inode, bnum, NULL, GFP_KERNEL, MKREF(roll));
+ blk = lafs_get_block(inode, bnum, NULL, GFP_KERNEL,
+ MKREF(roll));
if (!blk)
break;
if (err)
break;
-#if 0
- if (blk->b.physaddr == baddr ||
- addr_newer(blk->b.physaddr, baddr))
- /* We already know about this block */
- break;
-#endif
if (li->type >= TypeBase &&
inode->i_size <= (bnum << inode->i_blkbits))
inode->i_size = ((bnum) << inode->i_blkbits) + type;
}
static int __must_check
-roll_one(struct fs *fs, u64 *addrp, struct page *p, struct page *pg, int max, int *in_checkpointp)
+roll_one(struct fs *fs, u64 *addrp, struct page *p, struct page *pg,
+ int max, int *in_checkpointp)
{
u64 addr = *addrp;
struct cluster_head *ch = (struct cluster_head *)page_address(p);
if (le16_to_cpu(ch->Hlength) > max)
return -EIO;
- baddr += (le16_to_cpu(ch->Hlength) + blocksize -1) / blocksize;
+ baddr += (le16_to_cpu(ch->Hlength) + blocksize - 1) / blocksize;
if (!(ch->flags & CH_Checkpoint))
*in_checkpointp = 0;
gh = ch->groups;
i = 0;
- while ( ((char*)gh - (char*)ch) < le16_to_cpu(ch->Hlength)) {
- int j=0;
+ while (((char *)gh - (char *)ch) < le16_to_cpu(ch->Hlength)) {
+ int j = 0;
int inum = le32_to_cpu(gh->inum);
int fsnum = le32_to_cpu(gh->fsnum);
int trunc = le16_to_cpu(gh->truncatenum_and_flag) & 0x7fff;
int flg = le16_to_cpu(gh->truncatenum_and_flag) & 0x8000;
desc = gh->u.desc;
- while (((char*)desc - (char*)gh) < le16_to_cpu(gh->group_size_words)*4) {
-#if 0
- if (le16_to_cpu(desc->block_bytes) == 0)
- desc->block_bytes = cpu_to_le16(0x8000);
- else if (le16_to_cpu(desc->block_bytes) == 0xffff)
- desc->block_bytes = cpu_to_le16(0xffff);
- else if (le16_to_cpu(desc->block_bytes) == 0xfffe)
- desc->block_bytes = cpu_to_le16(0);
- else
- desc->block_bytes = cpu_to_le16(le16_to_cpu(desc->block_bytes) + 0x8000);
-#endif
+ while (((char *)desc - (char *)gh) <
+ le16_to_cpu(gh->group_size_words)*4) {
if (le16_to_cpu(desc->block_bytes) <= DescMiniOffset ||
- le16_to_cpu(desc->block_bytes) == DescIndex ) {
+ le16_to_cpu(desc->block_bytes) == DescIndex) {
u32 bnum = le32_to_cpu(desc->block_num);
int cnt = le16_to_cpu(desc->block_cnt);
if (le16_to_cpu(desc->block_bytes) == DescIndex
&& cnt != 1)
- return -EIO; /* FIXME is this the best response */
+ return -EIO; /* FIXME is this
+ * the best
+ * response */
/* FIXME range check count */
while (!err && cnt--) {
err = roll_block(fs, fsnum, inum, trunc,
struct miniblock *mb = (struct miniblock *)desc;
u32 bnum = le32_to_cpu(mb->block_num);
int offset = le16_to_cpu(mb->block_offset);
- int len = le16_to_cpu(mb->length) - DescMiniOffset;
+ int len = le16_to_cpu(mb->length)
+ - DescMiniOffset;
err = roll_mini(fs, fsnum, inum, trunc, flg,
- bnum, offset, len, (char*)(mb+1));
+ bnum, offset, len, (char *)(mb+1));
mb++;
- mb = (struct miniblock *)(((char*)mb) + ROUND_UP(len));
+ mb = (struct miniblock *)(((char*)mb)
+ + ROUND_UP(len));
desc = (struct descriptor *)mb;
}
j++;
- if (err) break;
+ if (err)
+ break;
}
- gh = (struct group_head*)desc;
+ gh = (struct group_head *)desc;
i++;
- if (err) break;
+ if (err)
+ break;
}
if (ch->flags & CH_CheckpointEnd)
*in_checkpointp = 0;
first = fs->checkpointcluster;
err = roll_locate(fs, first, &next, &last, &seq, &max);
- if (err) return err;
+ if (err)
+ return err;
if (max > PAGE_SIZE)
return -EFBIG;
max = ((max + blocksize - 1) / blocksize) * blocksize;
dprintk("Max = %d\n", max);
buf = kmalloc(max, GFP_KERNEL);
- if(buf)
+ if (buf)
while (first != next) {
err = roll_one(fs, &first, p, pg, max, &in_checkpoint);
if (err)
fs->ss[0].root = root = iget_locked(fs->prime_sb, 0);
b = lafs_get_block(root, 0, NULL, GFP_KERNEL, MKREF(mount));
-//printk("1\n");
if (!b)
return -ENOMEM;
set_bit(B_Root, &b->b.flags);
b->b.physaddr = fs->ss[0].root_addr;
set_bit(B_PhysValid, &b->b.flags);
err = lafs_load_block(&b->b, 0);
-//printk("2 %d\n", err);
if (err)
goto err;
err = lafs_wait_block(&b->b);
-//printk("3 %d\n", err);
if (err)
goto err;
err = lafs_import_inode(root, b);
-//printk("4 %d\n", err);
if (err)
goto err;
putdref(b, MKREF(mount));
/* FIXME error check */
fs->orphans = lafs_iget(fs->prime_sb, 8, 0);
- for (d=0; d < fs->devices ; d++) {
+ for (d = 0; d < fs->devices ; d++) {
fs->devs[d].segsum = lafs_iget(fs->prime_sb,
fs->devs[d].usage_inum,
0);
lafs_checkpoint_lock(fs);
err = roll_forward(fs);
lafs_checkpoint_unlock(fs);
- for (d=0; d<4; d++)
+ for (d = 0; d < 4; d++)
fs->cleaner.seg[d].chead = alloc_page(GFP_KERNEL);
return err;
/*
* segment tracking routines for LaFS
* fs/lafs/segments.c
- * Copyright (C) 2006
+ * Copyright (C) 2006-2009
* NeilBrown <neilb@suse.de>
* Released under the GPL, version 2
*/
#include <linux/sort.h>
#include <linux/random.h>
-#ifdef DUMP
-extern struct fs *dfs;
-#endif
-
struct segsum {
u32 segnum;
int devnum;
static int shash(u32 segnum, int devnum, int ssnum)
{
unsigned long h = hash_long(segnum, BITS_PER_LONG);
- return hash_long(h ^ (devnum | (ssnum << 16 )), SHASHBITS);
+ return hash_long(h ^ (devnum | (ssnum << 16)), SHASHBITS);
}
static inline void ss_get(struct segsum *ss)
}
}
-static struct segsum *segsum_find(struct fs *fs, u32 segnum, int devnum, int ssnum)
+static struct segsum *segsum_find(struct fs *fs, u32 segnum,
+ int devnum, int ssnum)
{
struct hlist_head *head = &fs->stable[shash(segnum, devnum, ssnum)];
struct segsum *ss, *new = NULL;
atomic_set(&new->delayed, 0);
INIT_HLIST_NODE(&new->hash);
dv = fs->devs + devnum;
-// printk("ssnum=%d devnum=%d\n", ssnum, devnum);
addr = LAFSI(fs->ss[ssnum].root)->md.fs.usagetable * dv->tablesize;
-// printk("addr %ld\n", (long)addr);
addr += segnum >> (fs->prime_sb->s_blocksize_bits-1);
new->ssblk = lafs_get_block(dv->segsum, addr, NULL,
GFP_KERNEL | __GFP_NOFAIL,
MKREF(ss));
- BUG_ON(! new->ssblk || IS_ERR(new->ssblk));
+ BUG_ON(!new->ssblk || IS_ERR(new->ssblk));
err = lafs_read_block(new->ssblk);
if (err) {
ss_put(new, fs);
}
/* lafs_seg_ref:
- * Take a reference to the segsum structure for the segment containing physaddr.
- * This can-and-should block if there is not enough memory to allocate needed data.
- * If needed, it will load the segment summary block for this snapshot
+ * Take a reference to the segsum structure for the segment containing
+ * physaddr. This can-and-should block if there is not enough memory
+ * to allocate needed data. If needed, it will load the segment
+ * summary block for this snapshot
*/
int lafs_seg_ref(struct fs *fs, u64 physaddr, int ssnum)
{
static void seg_inc(struct fs *fs, struct segsum *ss, int diff, int in_phase)
{
- if (! in_phase)
+ if (!in_phase)
atomic_add(diff, &ss->delayed);
else {
u16 *b, *p;
}
}
-void lafs_seg_move(struct fs *fs, u64 oldaddr, u64 newaddr, int ssnum, int phase)
+void lafs_seg_move(struct fs *fs, u64 oldaddr, u64 newaddr,
+ int ssnum, int phase)
{
struct segsum *ss;
void lafs_seg_flush_all(struct fs *fs)
{
int d;
- for (d=0; d < fs->devices ; d++)
+ for (d = 0; d < fs->devices ; d++)
write_inode_now(fs->devs[d].segsum, 0);
- for (d=0; d < fs->devices ; d++)
+ for (d = 0; d < fs->devices ; d++)
write_inode_now(fs->devs[d].segsum, 1);
}
u16 *p;
if (atomic_read(&ss->delayed) == 0)
return;
- printk("Seg apply %d %s\n", (int)atomic_read(&ss->delayed),
+ dprintk("Seg apply %d %s\n", (int)atomic_read(&ss->delayed),
strblk(&ss->ssblk->b));
buf = map_dblock(ss->ssblk);
p = buf;
- p += ss->segnum & (fs->prime_sb->s_blocksize/2 -1);
+ p += ss->segnum & (fs->prime_sb->s_blocksize/2 - 1);
*p = cpu_to_le16(le16_to_cpu(*p) + atomic_read(&ss->delayed));
atomic_set(&ss->delayed, 0);
unmap_dblock(ss->ssblk, buf);
{
int i;
- for (i=0 ; i<SHASHSIZE ; i++) {
+ for (i = 0 ; i < SHASHSIZE ; i++) {
struct hlist_head *head = &fs->stable[i];
struct segsum *ss;
struct hlist_node *n, *pos;
seg_apply(fs, ss);
ss_put(ss, fs);
spin_lock(&fs->stable_lock);
- // FIXME this still isn't safe - 'n' could disappear while
- // unlocked.
+ // FIXME this still isn't safe - 'n' could
+ // disappear while unlocked.
}
spin_unlock(&fs->stable_lock);
}
*/
break;
}
- if (why != ReleaseSpace) /* FIXME currently don't handle EAGAIN, so never return it */
- if (fs->rolled) {
- if (fs->free_blocks - fs->allocated_blocks - credits < watermark)
- credits = 0; /* Sorry, no room */
+ if (why != ReleaseSpace) {/* FIXME currently don't handle
+ * EAGAIN, so never return it
+ */
+ if (fs->rolled) {
+ if (fs->free_blocks - fs->allocated_blocks
+ - credits < watermark)
+ credits = 0; /* Sorry, no room */
+ }
}
fs->allocated_blocks += credits;
spin_unlock(&fs->alloc_lock);
a = st->page[link >> 12];
a += (link & 0xFFF) *
(st->size[link >> 12] * sizeof(u16) + sizeof(struct segstat));
- return (struct segstat*) a;
+ return (struct segstat *) a;
}
int lafs_segtrack_init(struct segtracker *st)
st->clean.first = st->clean.last = 0xffff;
st->unused.cnt = st->free.cnt = st->cleanable.cnt = 0;
- for (h=0; h<SEG_MAX_HEIGHT; h++)
+ for (h = 0; h < SEG_MAX_HEIGHT; h++)
st->head[h] = 0xFFFF;
/* how many entries in each page */
- for (i=0 ; i<4; i++)
- n[i] = PAGE_SIZE / (sizeof(struct segstat) + st->size[i] * sizeof(u16));
+ for (i = 0 ; i < 4; i++)
+ n[i] = PAGE_SIZE / (sizeof(struct segstat) +
+ st->size[i] * sizeof(u16));
do {
char rand;
st->unused.first = sn;
if (st->unused.cnt == 0)
st->unused.last = sn;
- st->unused.cnt ++;
+ st->unused.cnt++;
found = 1;
}
} while (found);
int sn, cnt, prev;
cnt = 0;
- for (prev = sn = st->unused.first ; sn != 0xFFFF; sn = segfollow(st, prev=sn)->next)
+ for (prev = sn = st->unused.first ;
+ sn != 0xFFFF ;
+ sn = segfollow(st, (prev = sn))->next)
cnt++;
- if (cnt != st->unused.cnt) { printk("%d != %d\n", cnt, st->unused.cnt); WARN_ON(1); return 1;}
- if (st->unused.last != prev) { printk("L%d != %d\n", prev, st->unused.last); WARN_ON(1); return 1;}
+ if (cnt != st->unused.cnt) {
+ printk("%d != %d\n", cnt, st->unused.cnt); WARN_ON(1);
+ return 1;
+ }
+ if (st->unused.last != prev) {
+ printk("L%d != %d\n", prev, st->unused.last); WARN_ON(1);
+ return 1;
+ }
cnt = 0;
- for (prev = sn = st->free.first ; sn != 0xFFFF; sn = segfollow(st, prev=sn)->next)
+ for (prev = sn = st->free.first ;
+ sn != 0xFFFF;
+ sn = segfollow(st, (prev = sn))->next)
cnt++;
- if (cnt != st->free.cnt) { printk("%d != %d\n", cnt, st->free.cnt); WARN_ON(1); return 1;}
- if (st->free.last != prev) { printk("L%d != %d\n", prev, st->free.last); WARN_ON(1); return 1;}
+ if (cnt != st->free.cnt) {
+ printk("%d != %d\n", cnt, st->free.cnt); WARN_ON(1);
+ return 1;
+ }
+ if (st->free.last != prev) {
+ printk("L%d != %d\n", prev, st->free.last); WARN_ON(1);
+ return 1;
+ }
cnt = 0;
- for (prev = sn = st->clean.first ; sn != 0xFFFF; sn = segfollow(st, prev=sn)->next)
+ for (prev = sn = st->clean.first ;
+ sn != 0xFFFF;
+ sn = segfollow(st, (prev = sn))->next)
cnt++;
- if (cnt != st->clean.cnt) { printk("%d != %d\n", cnt, st->clean.cnt); WARN_ON(1); return 1;}
- if (st->clean.last != prev) { printk("L%d != %d\n", prev, st->clean.last); WARN_ON(1); return 1;}
+ if (cnt != st->clean.cnt) {
+ printk("%d != %d\n", cnt, st->clean.cnt); WARN_ON(1);
+ return 1;
+ }
+ if (st->clean.last != prev) {
+ printk("L%d != %d\n", prev, st->clean.last); WARN_ON(1);
+ return 1;
+ }
cnt = 0;
- for (prev = sn = st->cleanable.first ; sn != 0xFFFF; sn = segfollow(st, prev=sn)->next)
+ for (prev = sn = st->cleanable.first ;
+ sn != 0xFFFF;
+ sn = segfollow(st, (prev = sn))->next)
cnt++;
- if (cnt != st->cleanable.cnt) { printk("%d != %d\n", cnt, st->cleanable.cnt); WARN_ON(1); return 1;}
- if (st->cleanable.last != prev) { printk("L%d != %d\n", prev, st->cleanable.last); WARN_ON(1); return 1;}
+ if (cnt != st->cleanable.cnt) {
+ printk("%d != %d\n", cnt, st->cleanable.cnt); WARN_ON(1);
+ return 1;
+ }
+ if (st->cleanable.last != prev) {
+ printk("L%d != %d\n", prev, st->cleanable.last); WARN_ON(1);
+ return 1;
+ }
return 0;
}
* prev, flip 'curr' and add smallest.
*/
while (h[0] != 0xFFFF || h[1] != 0xFFFF) {
- if ( h[next] == 0xFFFF ||
+ if (h[next] == 0xFFFF ||
(h[1-next] != 0xFFFF &&
!((prev <= segfollow(st, h[1-next])->score)
- ^ (segfollow(st, h[1-next])->score <= segfollow(st, h[next])->score)
+ ^ (segfollow(st, h[1-next])->score
+ <= segfollow(st, h[next])->score)
^ (segfollow(st, h[next])->score <= prev))
))
next = 1 - next;
return 0;
}
-static struct segstat *segfind(struct segtracker *st, u16 dev, u32 segnum, u16 *where[SEG_MAX_HEIGHT])
+static struct segstat *segfind(struct segtracker *st, u16 dev,
+ u32 segnum, u16 *where[SEG_MAX_HEIGHT])
{
/* Find the segment entry for dev/segnum.
* Return link info in where to allow insertion/deletion.
return h;
}
-static void seginsert(struct segtracker *st, u16 ssn, u16 *where[SEG_MAX_HEIGHT])
+static void seginsert(struct segtracker *st, u16 ssn,
+ u16 *where[SEG_MAX_HEIGHT])
{
- /* We looked for 'ss' but didn't find it. 'where' is the result of looking.
- * Now insert 'ss'
+ /* We looked for 'ss' but didn't find it. 'where' is the
+ * result of looking. Now insert 'ss'
*/
struct segstat *ss = segfollow(st, ssn);
int h = segchoose_height(st, ssn);
*where[h] = ss->skip[h];
ss->next = st->unused.first;
st->unused.first = pos;
- st->unused.cnt ++;
+ st->unused.cnt++;
lafs_check_seg_cnt(st);
} else {
/* advance 'where' to here */
spin_lock(&fs->lock);
wait_event_lock(fs->phase_wait,
- ! fs->scan.first_free_pass ||
+ !fs->scan.first_free_pass ||
fs->segtrack->free.first != 0xFFFF,
fs->lock);
if (db &&
db->b.inode == fs->devs[*dev].segsum &&
- db->b.fileaddr == ((*seg) >> (fs->prime_sb->s_blocksize_bits - 1))) {
+ db->b.fileaddr == ((*seg) >> (fs->prime_sb->s_blocksize_bits
+ - 1))) {
/* HACK youth_next should always be at least 0x8000 so that
* cleanable score differentiates well for new segments.
* old code would sometimes set youth_next very low, so
if (fs->youth_next < 0x8000)
fs->youth_next = 0x8000;
- youthp[(*seg) & ((1 << (fs->prime_sb->s_blocksize_bits - 1)) - 1)]
+ youthp[(*seg) & ((1 << (fs->prime_sb->s_blocksize_bits
+ - 1)) - 1)]
= fs->youth_next;
- fs->youth_next ++;
+ fs->youth_next++;
fs->segtrack->free.first = ss->next;
if (fs->segtrack->free.first == 0xFFFF)
putdref(db, MKREF(youth));
}
db = lafs_get_block(fs->devs[*dev].segsum,
- (*seg) >> (fs->prime_sb->s_blocksize_bits - 1),
+ (*seg) >> (fs->prime_sb->s_blocksize_bits
+ - 1),
NULL, GFP_KERNEL,
MKREF(youth));
- // FIXME we need to hold a ref on all youth blocks for known
- // free segments....
- BUG_ON(! test_bit(B_Valid, &db->b.flags));
+ // FIXME we need to hold a ref on all youth blocks for
+ // known free segments....
+ BUG_ON(!test_bit(B_Valid, &db->b.flags));
youthp = map_dblock(db);
goto again;
}
return rv;
ss = segfollow(st, rv);
st->unused.first = ss->next;
- st->unused.cnt --;
+ st->unused.cnt--;
ss->next = 0xFFFF;
return rv;
}
fs->segtrack->free.first = ssn;
if (fs->segtrack->free.last == 0xFFFF)
fs->segtrack->free.last = ssn;
- fs->segtrack->free.cnt ++;
+ fs->segtrack->free.cnt++;
ss->dev = dev;
ss->segment = seg;
ss->score = 0;
int ssn;
dprintk("ADD CLEAN %d/%d %d #####################################\n",
- dev,seg, fs->segtrack->clean.cnt);
- {void lafs_dump_cleanable(void); lafs_dump_cleanable();}
+ dev, seg, fs->segtrack->clean.cnt);
spin_lock(&fs->lock);
ss = segfind(fs->segtrack, dev, seg, where);
if (ss) {
fs->segtrack->clean.last =
fs->segtrack->clean.first;
fs->segtrack->clean.cnt++;
- printk("a");
}
- printk(" %x b\n", ss->score);
ss->score = 0;
ss->usage = 0;
spin_unlock(&fs->lock);
fs->segtrack->total / 2) {
/* Have enough free/clean entries already */
spin_unlock(&fs->lock);
- printk("c\n");
return;
}
if (fs->segtrack->clean.last == 0xFFFF)
fs->segtrack->clean.last =
fs->segtrack->clean.first;
- fs->segtrack->clean.cnt ++;
+ fs->segtrack->clean.cnt++;
ss->dev = dev;
ss->segment = seg;
ss->score = 0;
ss->usage = 0;
seginsert(fs->segtrack, ssn, where);
lafs_check_seg_cnt(fs->segtrack);
- printk("d");
}
spin_unlock(&fs->lock);
- printk("e\n");
}
static void clean_free(struct fs *fs)
st->max_score = segfollow(st, st->cleanable.last)->score;
}
- if (lafs_check_seg_cnt(fs->segtrack)) WARN_ON(1);
+ WARN_ON(lafs_check_seg_cnt(fs->segtrack));
ss = segfollow(st, st->cleanable.first);
st->cleanable.first = ss->next;
- st->cleanable.cnt --;
+ st->cleanable.cnt--;
if (ss->next == 0xffff)
st->cleanable.last = 0xffff;
if (lafs_check_seg_cnt(fs->segtrack)) {
int sj;
- printk("first=%x last=%x cnt=%d\n", st->cleanable.first, st->cleanable.last, st->cleanable.cnt);
- for (sj = st->cleanable.first ; sj != 0xFFFF; sj = segfollow(st, sj)->next)
+ printk("first=%x last=%x cnt=%d\n", st->cleanable.first,
+ st->cleanable.last, st->cleanable.cnt);
+ for (sj = st->cleanable.first ;
+ sj != 0xFFFF;
+ sj = segfollow(st, sj)->next)
printk(" %x\n", sj);
WARN_ON(1);
}
- st->sorted_size --;
+ st->sorted_size--;
*dev = ss->dev;
*seg = ss->segment;
u32 segsize;
u16 *where[SEG_MAX_HEIGHT];
- if (lafs_check_seg_cnt(fs->segtrack)) WARN_ON(1);
+ WARN_ON(lafs_check_seg_cnt(fs->segtrack));
if (fs->scan.trace || lafs_trace)
printk("CLEANABLE: %u/%lu y=%d u=%d\n",
dev, (unsigned long)seg, (int)youth, (int)usage);
if (usage == 0) {
add_clean(fs, dev, seg);
- if (lafs_check_seg_cnt(fs->segtrack)) WARN_ON(1);
+ WARN_ON(lafs_check_seg_cnt(fs->segtrack));
return;
}
score = youth * usage / segsize;
- // printk("add cleanable %d/%d y=%d u=%d sc=%d\n", dev,seg, youth, usage, score);
- // FIXME I am sorting under a spinlock - not ideal
+
spin_lock(&fs->lock);
ss = segfind(fs->segtrack, dev, seg, where);
ss->score = score;
}
spin_unlock(&fs->lock);
- if (lafs_check_seg_cnt(fs->segtrack)) WARN_ON(1);
+ WARN_ON(lafs_check_seg_cnt(fs->segtrack));
return;
}
fs->segtrack->max_score < score) {
/* score to high to bother with */
spin_unlock(&fs->lock);
- if (lafs_check_seg_cnt(fs->segtrack)) WARN_ON(1);
+ WARN_ON(lafs_check_seg_cnt(fs->segtrack));
return;
}
fs->segtrack->cleanable.last = ssn;
} else {
fs->segtrack->cleanable.last = ssn;
- BUG_ON(segfollow(fs->segtrack,l)->next != 0xFFFF);
+ BUG_ON(segfollow(fs->segtrack, l)->next != 0xFFFF);
segfollow(fs->segtrack, l)->next = ssn;
}
fs->segtrack->cleanable.cnt++;
seginsert(fs->segtrack, ssn, where);
}
spin_unlock(&fs->lock);
- if (lafs_check_seg_cnt(fs->segtrack)) WARN_ON(1);
+ WARN_ON(lafs_check_seg_cnt(fs->segtrack));
}
static void merge_usage(struct fs *fs, u16 *d)
int segperblk = fs->prime_sb->s_blocksize / 2;
int i;
- for (i=0; i<segperblk; i++)
+ for (i = 0; i < segperblk; i++)
if (le16_to_cpu(d[i]) > le16_to_cpu(u[i]))
u[i] = d[i];
}
unsigned long lafs_scan_seg(struct fs *fs)
{
- /* Process one block of youth or segment-usage
- * data.
- * We collect free segments (youth==0) into a table that is kept
- * sorted to ensure against duplicates.
- * It is treated like a ring buffer with a head and a tail to
- * distinguish free space from used space. head/tail point to the
- * free space (which is never empty). As we scan all segments sequentially
- * any free segment we find is placed at the head of the free list
- * and the entry just after the tail might get discarded if we run out
- * of room, or if that entry matches the entry we just inserted.
- * New free segments are allocated from just after the tail. i.e. we
- * increment the tail and return the entry recorded there.
- * If tail+1 == head, there are no segments in the buffer.
+ /* Process one block of youth or segment-usage data. We
+ * collect free segments (youth==0) into a table that is kept
+ * sorted to ensure against duplicates. It is treated like a
+ * ring buffer with a head and a tail to distinguish free
+ * space from used space. head/tail point to the free space
+ * (which is never empty). As we scan all segments
+ * sequentially any free segment we find is placed at the head
+ * of the free list and the entry just after the tail might
+ * get discarded if we run out of room, or if that entry
+ * matches the entry we just inserted. New free segments are
+ * allocated from just after the tail. i.e. we increment the
+ * tail and return the entry recorded there. If tail+1 ==
+ * head, there are no segments in the buffer.
*
* We also collect potentially cleanable segments. These are any
* segments which is not empty and not non-logged (i.e youth>=8).
}
if (fs->scan.youth_db == NULL)
fs->scan.youth_db =
- lafs_get_block(fs->devs[fs->scan.free_dev].segsum,
+ lafs_get_block(fs->devs[fs->scan.free_dev]
+ .segsum,
fs->scan.free_block,
NULL, GFP_KERNEL, MKREF(youth));
if (!fs->scan.youth_db) {
for (i = 0; i < segments ; i++)
if (yp[i] == cpu_to_le16(0)) {
if (fs->scan.first_free_pass)
- fs->free_blocks += fs->devs[fs->scan.free_dev].segment_size;
- add_free(fs, fs->scan.free_dev, firstseg + i, &yp[i]);
- fs->total_free += fs->devs[fs->scan.free_dev].segment_size /*- 1*/;
+ fs->free_blocks +=
+ fs->devs[fs->scan.free_dev]
+ .segment_size;
+ add_free(fs, fs->scan.free_dev, firstseg + i,
+ &yp[i]);
+ fs->total_free +=
+ fs->devs[fs->scan.free_dev]
+ .segment_size /*- 1*/;
}
unmap_dblock(fs->scan.youth_db, yp);
fs->scan.free_stage = 1;
}
- if (lafs_check_seg_cnt(fs->segtrack)) WARN_ON(1);
+ WARN_ON(lafs_check_seg_cnt(fs->segtrack));
if (fs->scan.free_stage == 1) {
- /* Find the main usage block and copy the data into our temp block
+ /* Find the main usage block and copy the data into
+ * our temp block
*/
struct datablock *db;
char *d;
unmap_dblock(db, d);
fs->scan.free_stage = 2;
}
- if (lafs_check_seg_cnt(fs->segtrack)) WARN_ON(1);
- while (fs->scan.free_stage > 1 && fs->scan.free_stage < fs->maxsnapshot+1) {
+ WARN_ON(lafs_check_seg_cnt(fs->segtrack));
+ while (fs->scan.free_stage > 1 &&
+ fs->scan.free_stage < fs->maxsnapshot + 1) {
struct datablock *db;
u16 *d;
db = lafs_get_block(fs->devs[fs->scan.free_dev].segsum,
fs->scan.free_block +
fs->devs[fs->scan.free_dev].tablesize *
- LAFSI(fs->ss[fs->scan.free_stage-1].root)->md.fs.usagetable,
+ LAFSI(fs->ss[fs->scan.free_stage-1].root)
+ ->md.fs.usagetable,
NULL, GFP_KERNEL, MKREF(usage));
if (!db) {
printk("EEEEKKK get_block for subsequent usage failed\n");
d = map_dblock(db);
merge_usage(fs, d);
unmap_dblock(db, d);
- fs->scan.free_stage ++;
+ fs->scan.free_stage++;
- if (lafs_check_seg_cnt(fs->segtrack)) WARN_ON(1);
+ WARN_ON(lafs_check_seg_cnt(fs->segtrack));
}
if (fs->scan.free_stage == fs->maxsnapshot + 1) {
/* All usage data has been merged, we can record all these
if (fs->scan.free_block == blks)
segments = segcount % segperblk;
- for (i=0; i < segments; i++)
+ for (i = 0; i < segments; i++)
add_cleanable(fs, fs->scan.free_dev,
i + fs->scan.free_block * segperblk,
le16_to_cpu(yp[i]),
fs->scan.free_block++;
fs->scan.free_stage = 0;
}
- if (lafs_check_seg_cnt(fs->segtrack)) WARN_ON(1);
+ WARN_ON(lafs_check_seg_cnt(fs->segtrack));
if (fs->scan.trace)
return HZ/10;
else
/*
* fs/lafs/super.c
- * Copyright (C) 2005-2006
+ * Copyright (C) 2005-2009
* Neil Brown <neilb@suse.de>
* Released under the GPL, version 2
*/
while ((p = strsep(&data, ",")) != NULL) {
if (!*p)
continue;
- if (strncmp(p, "snapshot=", 9)==0) {
+ if (strncmp(p, "snapshot=", 9) == 0) {
op->snapshot = p+9;
} else {
printk(KERN_ERR
goto fail;
fs = sb->s_fs_info;
- for (s=1; s<fs->maxsnapshot; s++) {
+ for (s = 1; s < fs->maxsnapshot; s++) {
struct datablock *b;
struct inode *rootdir;
if (fs->ss[s].root_addr == 0)
err = lafs_load_page(fs, p, fs->ss[s].root_addr, 1);
if (err)
continue;
- lai = (struct la_inode*)page_address(p);
+ lai = (struct la_inode *)page_address(p);
printk("ss %d is %.64s\n", s, lai->metadata[0].fs.name);
- if (strncmp(lai->metadata[0].fs.name, op.snapshot, 64)!=0)
+ if (strncmp(lai->metadata[0].fs.name, op.snapshot, 64) != 0)
continue;
/* FIXME more checks? */
/* Ok, we have the right snapshot... now we need a superblock */
sb = sget(&lafs_snap_fs_type, NULL, set_anon_super, NULL);
- if(IS_ERR(sb))
+ if (IS_ERR(sb))
return PTR_ERR(sb);
sb->s_flags = flags | MS_RDONLY;
sb->s_fs_info = fs;
BUG_ON(test_bit(B_Valid, &b->b.flags));
printk("ss root at %llu\n", b->b.physaddr);
err = lafs_load_block(&b->b, 0);
- if (!err) err = lafs_wait_block(&b->b);
- if (!err) err = lafs_import_inode(fs->ss[s].root, b);
+ if (!err)
+ err = lafs_wait_block(&b->b);
+ if (!err)
+ err = lafs_import_inode(fs->ss[s].root, b);
putdref(b, MKREF(snap));
if (err) {
/* FIXME what to do with a locked inode? */
/*
+ * fs/lafs/state.h
+ * Copyright (C) 2005-2009
+ * Neil Brown <neilb@suse.de>
+ * Released under the GPL, version 2
+ *
* The stateblock and superblocks are copied into the
* internal 'struct fs' at startup time, and are written
* out based on information there-in.
-
+/*
+ * fs/lafs/summary.c
+ * Copyright (C) 2005-2009
+ * Neil Brown <neilb@suse.de>
+ * Released under the GPL, version 2
+ */
#include "lafs.h"
/*
* per-filesystem
* per-user/group/tree
*/
-void lafs_summary_update(struct fs *fs, struct inode *ino, u64 oldphys, u64 newphys,
- int is_index, int phase)
+void lafs_summary_update(struct fs *fs, struct inode *ino,
+ u64 oldphys, u64 newphys,
+ int is_index, int phase)
{
/* Whether writing a block or truncating, we hold a reference
* to ->iblock, so can access it without locking
int future;
int diff;
if (oldphys && newphys) {
- lafs_seg_move(fs, oldphys, newphys, 0, phase); // FIXME what snapshot should I use?
+ // FIXME what snapshot should I use?
+ lafs_seg_move(fs, oldphys, newphys, 0, phase);
return;
}
if (oldphys == 0 && newphys == 0)
else
lai->cblocks += diff;
}
- if (! is_index) {
+ if (!is_index) {
if (diff > 0)
- lai->ablocks --;
+ lai->ablocks--;
else
ino->i_blocks -=
1 << (fs->prime_sb->s_blocksize_bits - 9);
if (!is_index) {
if (diff > 0)
- lai->md.fs.ablocks_used --;
+ lai->md.fs.ablocks_used--;
}
spin_unlock(&lai->vfs_inode.i_lock);
lafs_qcommit(fs, ino, diff, phase);
- lafs_seg_move(fs, oldphys, newphys, 0, phase); // FIXME what snapshot should I use?
+ // FIXME what snapshot should I use?
+ lafs_seg_move(fs, oldphys, newphys, 0, phase);
}
int lafs_summary_allocate(struct fs *fs, struct inode *ino, int diff)
struct lafs_inode *lai = LAFSI(ino);
lai = LAFSI(lai->filesys);
spin_lock(&lai->vfs_inode.i_lock);
-// printk("LSA: %d -> ", (int)lai->md.fs.ablocks_used);
if (lai->md.fs.blocks_allowed &&
diff > 0 &&
lai->md.fs.cblocks_used +
else
lai->md.fs.ablocks_used += diff;
-// printk(" %d\n", (int)lai->md.fs.ablocks_used);
spin_unlock(&lai->vfs_inode.i_lock);
if (err)
return err;
/*
* fs/lafs/super.c
- * Copyright (C) 2005-2006
+ * Copyright (C) 2005-2009
* Neil Brown <neilb@suse.de>
* Released under the GPL, version 2
*/
* So we don't bother with that just yet.
* The state block needs to be written - twice on each device - whenever
* a checkpoint is completed. All copies are identical and the writes
- * proceed in parallel. There are 4 stateblock location on each device.
+ * proceed in parallel. There are 4 stateblock locations on each device.
* 2 are typically less recent than the other two. We over-write the
* less-recent copies.
* FIXME on a RAID4 we should pad the write to be a full stripe.
+ *
+ * Locking issues: This is called from the checkpoint thread and so
+ * it does not race with anything else exclusive to that thread.
+ * The nonlog information needs to be reviewed once that functionality
+ * is implemented.
*/
int lafs_write_state(struct fs *fs)
st->checksum = 0;
st->checksum = crc32_le(0, (unsigned char *)st, fs->statesize);
- for (d=0; d < fs->devices ; d++)
- for (i = (fs->seq & 1); i < 4 ; i+= 2)
+ for (d = 0; d < fs->devices ; d++)
+ for (i = (fs->seq & 1); i < 4 ; i += 2)
lafs_super_write(fs, d, fs->devs[d].stateaddr[i] >> 9,
- (char*)st, fs->statesize);
+ (char *)st, fs->statesize);
lafs_super_wait(fs);
/* FIXME what about a write error ??? */
return 0;
* it was found at sector 'addr'
*/
u32 crc, crc2;
- if (strncmp(db->idtag, "LaFS-DeviceBlock", 16))
+ if (strncmp(db->idtag, "LaFS-DeviceBlock", 16) != 0)
return 0;
- if (strncmp(db->version, "AlphaDevel ", 16))
+ if (strncmp(db->version, "AlphaDevel ", 16) != 0)
return 0;
/* uuid can be anything */
crc = db->checksum;
crc2 = crc32_le(0, (unsigned char *)db, LAFS_DEVBLK_SIZE);
db->checksum = crc;
if (crc2 != crc) {
- dprintk("%lx != %lx\n", (unsigned long)crc, (unsigned long)crc2);
+ dprintk("%lx != %lx\n", (unsigned long)crc,
+ (unsigned long)crc2);
return 0;
}
(le32_to_cpu(db->segment_size)<<db->blockbits) * 10)
return 0;
- /* FIXME should range check segment_count, but need to know size for that */
+ /* FIXME should range check segment_count, but need to know
+ * size for that */
if (le32_to_cpu(db->level) > 10)
return 0;
* and consistent stateblock
*/
u32 crc;
- if (strncmp(st->idtag, "LaFS-State-Block", 16))
+ if (strncmp(st->idtag, "LaFS-State-Block", 16) != 0)
return 0;
- if (strncmp(st->version, "AlphaDevel ", 16))
+ if (strncmp(st->version, "AlphaDevel ", 16) != 0)
return 0;
crc = st->checksum;
st->checksum = 0;
{
int cnt = 0;
if (*name == '/')
- cnt=1;
+ cnt = 1;
while (data && *data) {
- if (strncmp(data, "dev=", 4)==0)
+ if (strncmp(data, "dev=", 4) == 0)
cnt++;
- if (strncmp(data, "new=", 4)==0)
+ if (strncmp(data, "new=", 4) == 0)
cnt++;
- data = strchr(data,',');
- if (data) data++;
+ data = strchr(data, ',');
+ if (data)
+ data++;
}
return cnt;
}
while ((p = strsep(&data, ",")) != NULL) {
if (!*p)
continue;
- if (strncmp(p, "dev=", 4)==0)
+ if (strncmp(p, "dev=", 4) == 0)
op->devlist[dv++].dev = p+4;
- else if (strncmp(p, "new=", 4)==0) {
+ else if (strncmp(p, "new=", 4) == 0) {
op->devlist[dv].is_new = 1;
op->devlist[dv++].dev = p+4;
} else {
struct options *op = opv;
struct devent *dv;
struct page *pg;
- sector_t sect, dev_addr =0, state_addr =0;
+ sector_t sect, dev_addr = 0, state_addr = 0;
int err = 0;
unsigned int n;
int i;
- int have_dev=0, have_state =0;
+ int have_dev = 0, have_state = 0;
dv = &op->devlist[op->curr_dev];
BUG_ON(dv->devblock);
* uuids, we are confused!
*/
sect = 0;
- for (i=0; i<4; i++) {
+ for (i = 0; i < 4; i++) {
/* try to read block at 'sect' */
int ok = lafs_sync_page_io(sb->s_bdev, sect, 0, n, pg, READ);
if (ok && valid_devblock(page_address(pg), sect)) {
if (!have_dev) {
- have_dev =1;
+ have_dev = 1;
memcpy(dv->devblock, page_address(pg), n);
dev_addr = sect;
- } else switch(compare_dev(dv->devblock, page_address(pg))) {
+ } else switch (compare_dev(dv->devblock,
+ page_address(pg))) {
case 0: /* older, do nothing */
break;
case 1: /* newer, overwrite */
}
}
- if (i!=1)
+ if (i != 1)
sect += (n>>9);
else {
sect = sb->s_bdev->bd_inode->i_size & ~(sector_t)(n-1);
/* FIXME - we've lost the read error, if it was significant */
err = -EINVAL;
if (!have_dev) {
- if (!silent) printk(KERN_ERR "LaFS - no valid devblock found.\n");
+ if (!silent)
+ printk(KERN_ERR "LaFS - no valid devblock found.\n");
goto out;
}
*/
n = le32_to_cpu(1<<dv->devblock->statebits);
if ((n & (n-1)) ||
- n < queue_hardsect_size(sb->s_bdev->bd_disk->queue)) {
- printk(KERN_ERR "LaFS: statesize of %u no acceptable.\n", n);
+ n < queue_hardsect_size(sb->s_bdev->bd_disk->queue) ||
+ n > 128*1024) {
+ printk(KERN_ERR "LaFS: statesize of %u not acceptable.\n", n);
err = -EINVAL;
goto out;
}
err = -ENOMEM;
if (!dv->stateblock)
goto out;
- for (i=0; i<4; i++) {
+ for (i = 0; i < 4; i++) {
int ok;
sect = le64_to_cpu(dv->devblock->stateaddr[i])>>9;
ok = lafs_sync_page_io(sb->s_bdev, sect, 0, n, pg, READ);
have_state = 1;
memcpy(dv->stateblock, page_address(pg), n);
state_addr = i;
- } else if (compare_state(dv->stateblock, page_address(pg))) {
+ } else if (compare_state(dv->stateblock,
+ page_address(pg))) {
memcpy(dv->stateblock, page_address(pg), n);
state_addr = i;
}
dv->statechoice = state_addr;
} else {
err = -EINVAL;
- if (!silent) printk(KERN_ERR "LaFS: no valid stateblock found.\n");
+ if (!silent)
+ printk(KERN_ERR "LaFS: no valid stateblock found.\n");
}
out:
page_cache_release(pg);
int seqlo = le32_to_cpu(op->devlist[0].devblock->seq);
int seqhi = le32_to_cpu(op->devlist[0].devblock->seq);
int newdev = 0;
- int newstate=0;
- int i,j;
+ int newstate = 0;
+ int i, j;
- for (i=1; i<op->devcnt; i++) {
+ for (i = 1; i < op->devcnt; i++) {
if (memcmp(op->devlist[0].stateblock->uuid,
op->devlist[i].stateblock->uuid,
16) != 0)
return -EINVAL;
if (u32_after(le32_to_cpu(op->devlist[i].stateblock->seq),
- le32_to_cpu(op->devlist[newstate].stateblock->seq)))
+ le32_to_cpu(op->devlist[newstate].
+ stateblock->seq)))
newstate = i;
}
- if (le32_to_cpu(op->devlist[newstate].stateblock->devices) != op->devcnt)
+ if (le32_to_cpu(op->devlist[newstate].stateblock->devices)
+ != op->devcnt)
return -EINVAL;
op->statebits = op->devlist[0].devblock->statebits;
/* Now check devices don't overlap in start/size.
* We do a simple quadratic search
*/
- for (i=0; i<op->devcnt; i++)
- for (j=0; j<op->devcnt; j++)
+ for (i = 0; i < op->devcnt; i++)
+ for (j = 0; j < op->devcnt; j++)
if (i != j)
if (le64_to_cpu(op->devlist[i].devblock->start) <
le64_to_cpu(op->devlist[j].devblock->start) &&
st = fs->state = op->devlist[newest].stateblock;
op->devlist[newest].stateblock = NULL;
#ifdef DUMP
- {
- extern struct fs *dfs;
dfs = fs;
- }
#endif
fs->seq = le32_to_cpu(st->seq);
fs->levels = le32_to_cpu(st->levels);
fs->devices = op->devcnt;
fs->devs_loaded = fs->devices; /* FIXME use this or lose this */
- fs->statesize = 1<< op->statebits;
+ fs->statesize = 1 << op->statebits;
fs->nonlog_segment = le32_to_cpu(st->nonlog_segment);
fs->nonlog_dev = le16_to_cpu(st->nonlog_dev);
fs->nonlog_offset = le16_to_cpu(st->nonlog_offset);
fs->youth_next = le16_to_cpu(st->nextyouth);
- if (fs->youth_next < 8) fs->youth_next = 8;
+ if (fs->youth_next < 8)
+ fs->youth_next = 8;
fs->scan.first_free_pass = 1;
fs->maxsnapshot = le32_to_cpu(st->maxsnapshot);
return NULL;
}
fs->checkpointcluster = le64_to_cpu(st->checkpointcluster);
- for (i=0; i<fs->maxsnapshot; i++) {
+ for (i = 0; i < fs->maxsnapshot; i++) {
fs->ss[i].root_addr =
le64_to_cpu(st->root_inodes[i]);
dprintk("root inode %d are %llu\n",
INIT_WORK(&fs->done_work, lafs_done_work);
fs->phase_locked = 0;
- for (i=0; i<WC_NUM; i++) {
+ for (i = 0; i < WC_NUM; i++) {
int j;
mutex_init(&fs->wc[i].lock);
- for (j=0; j<4 ; j++) {
+ for (j = 0; j < 4 ; j++) {
atomic_set(&fs->wc[i].pending_cnt[j], 0);
INIT_LIST_HEAD(&fs->wc[i].pending_blocks[j]);
}
return NULL;
}
- for (i=0; i<fs->devices; i++) {
+ for (i = 0; i < fs->devices; i++) {
struct fs_dev *dv = &fs->devs[i];
struct devent *de = &op->devlist[i];
int j;
fs->max_segment = dv->segment_size;
if (dv->width * dv->stride <= dv->segment_size) {
- dv->tables_per_seg = dv->segment_size / dv->width / dv->stride;
+ dv->tables_per_seg = dv->segment_size /
+ dv->width / dv->stride;
dv->rows_per_table = dv->stride;
dv->tablesize = dv->rows_per_table * dv->width;
dv->segment_stride = dv->segment_size;
dv->segment_stride = dv->rows_per_table;
}
- for (j=0; j<2; j++)
+ for (j = 0; j < 2; j++)
dv->devaddr[j] = le64_to_cpu(dv->devblk->devaddr[j]);
- for (j=0; j<4; j++)
+ for (j = 0; j < 4; j++)
dv->stateaddr[j] = le64_to_cpu(dv->devblk->stateaddr[j]);
dv->sb->s_op = &lafs_sops;
{
/* Release the 'struct fs' */
int i;
-#if 0
- if (fs->prime_sb)
- up_write(&fs->prime_sb->s_umount);
-#endif
+
/* Lets see what is on the 'leaf' list? */
- for (i=0; i<2; i++){
+ for (i = 0; i < 2; i++) {
struct block *b;
dprintk("For phase %d\n", i);
retry:
}
}
- for (i=0; i<fs->devices; i++) {
+ for (i = 0; i < fs->devices; i++) {
struct fs_dev *dv = &fs->devs[i];
kfree(dv->devblk);
if (dv->sb)
struct lafs_inode *li;
/* need to break a circular reference... */
- for (ss=0; ss<fs->maxsnapshot; ss++)
+ for (ss = 0; ss < fs->maxsnapshot; ss++)
if (fs->ss[ss].root &&
fs->ss[ss].root->i_sb == sb)
break;
int i;
struct vfsmount mnt;
- for (i=0; i<op->devcnt; i++) {
+ for (i = 0; i < op->devcnt; i++) {
op->curr_dev = i;
err = get_sb_bdev(&lafs_fs_type, flags,
op->devlist[i].dev, op,
int newest;
struct fs *fs = NULL;
char *cdata = data;
- if (cdata == NULL) cdata = "";
+ if (cdata == NULL)
+ cdata = "";
err = parse_opts(&op, dev_name, cdata);
if (err)
if (!fs)
goto out;
- /* Well, all the devices check out. Now we need to find the filesystem */
+ /* Well, all the devices check out. Now we need to find the
+ * filesystem */
err = lafs_mount(fs);
if (err == 0)
err = lafs_start_cleaner(fs);
*/
if (op.devlist) {
int i;
- for (i=0; i<op.devcnt; i++) {
+ for (i = 0; i < op.devcnt; i++) {
kfree(op.devlist[i].devblock);
kfree(op.devlist[i].stateblock);
if (op.devlist[i].sb) {
if (err)
goto out;
err = register_filesystem(&lafs_fs_type);
- if (err) goto out;
+ if (err)
+ goto out;
err = register_filesystem(&lafs_snap_fs_type);
if (err) {
unregister_filesystem(&lafs_fs_type);
return &li->vfs_inode;
}
-extern spinlock_t lafs_hash_lock;
void lafs_destroy_inode(struct inode *inode)
{
if (test_bit(I_Deleting, &LAFSI(inode)->iflags))
struct lafs_inode *root = LAFSI(fs->ss[0].root);
fsid = 0;
- fsuuid = (u32*)fs->state->uuid;
- for (i=0; i < 16 / 4 ; i ++)
+ fsuuid = (u32 *)fs->state->uuid;
+ for (i = 0; i < 16 / 4 ; i++)
fsid ^= le32_to_cpu(fsuuid[i]);
spin_lock(&root->vfs_inode.i_lock);
buf->f_bfree = buf->f_blocks - (root->md.fs.cblocks_used +
root->md.fs.pblocks_used +
root->md.fs.ablocks_used);
- printk("df: tot=%ld free=%ld avail=%ld(%ld-%ld) cb=%ld pb=%ld ab=%ld\n",
+ dprintk("df: tot=%ld free=%ld avail=%ld(%ld-%ld) cb=%ld pb=%ld ab=%ld\n",
(long)buf->f_blocks, (long)buf->f_bfree, (long)buf->f_bavail,
(long)fs->free_blocks, (long)fs->allocated_blocks,
(long)root->md.fs.cblocks_used, (long)root->md.fs.pblocks_used,
module_param(lafs_trace, int, 0644);
#ifdef DUMP
-struct fs *dfs = NULL;
+struct fs *dfs;
static int do_dump(const char *val, struct kernel_param *kp)
{
extern void lafs_dump_orphans(void);
return strlen(buffer);
}
-module_param_call(dump,do_dump,get_dump,0, 0775);
+module_param_call(dump, do_dump, get_dump, 0, 0775);
#endif