An Index block is 'Valid' if it contains any index.
So an InoIdx block for a depth=0 inode is never Valid
(as there is data rather than indexes there).
When it comes time to cluster_allocate an Index block,
if it is not Valid, we simple allocated_block it to 0.
An index block is Dirty if there are any children that need incorporation
as well was when incorporate has happened but has not yet been
written.
So an Index block can be Dirty but not Valid (unlike data blocks).
lafs_iounlock_block(&b->b, 0);
}
-void
-lafs_erase_iblock(struct indexblock *b)
-{
- struct fs *fs = fs_from_inode(b->b.inode);
-
- LAFS_BUG(!test_bit(B_IOLock, &b->b.flags), &b->b);
- clear_bit(B_Valid, &b->b.flags);
- if (test_and_clear_bit(B_Dirty, &b->b.flags))
- lafs_space_return(fs, 1);
- if (test_and_clear_bit(B_Realloc, &b->b.flags))
- lafs_space_return(fs, 1);
-
- if (b->b.physaddr && !test_bit(B_InoIdx, &b->b.flags))
- lafs_allocated_block(fs, &b->b, 0);
- spin_lock(&fs->lock);
- if (test_bit(B_Pinned, &b->b.flags)) {
- /* We should only be erasing index blocks when
- * they have no children. We shouldn't be on
- * a _leaf list because we hold IOLock
- */
- int onlist = 0;
- LAFS_BUG(atomic_read(&b->pincnt[0]), &b->b);
- LAFS_BUG(atomic_read(&b->pincnt[1]), &b->b);
- if (!list_empty(&b->b.lru)) {
- list_del_init(&b->b.lru);
- onlist = 1;
- }
- if (!test_bit(B_Root, &b->b.flags))
- atomic_dec(&b->b.parent->pincnt
- [!!test_bit(B_Phase1, &b->b.flags)]);
- clear_bit(B_Pinned, &b->b.flags);
- spin_unlock(&fs->lock);
- lafs_inode_checkpin(b->b.inode);
- if (!test_bit(B_Root, &b->b.flags))
- lafs_refile(&b->b.parent->b, 0);
- if (onlist)
- putiref(b, MKREF(leaf));
- } else
- spin_unlock(&fs->lock);
-}
-
void
lafs_dirty_iblock(struct indexblock *b)
{
* in the previous phase.
*/
- LAFS_BUG(!test_bit(B_Valid, &b->b.flags), &b->b);
LAFS_BUG(!test_bit(B_Pinned, &b->b.flags), &b->b);
if (!test_and_set_bit(B_Dirty, &b->b.flags)) {
if (!test_and_clear_bit(B_Credit, &b->b.flags)) {
}
lai->depth = 0;
lai->iblock->depth = 0;
+ clear_bit(B_Valid, &lai->iblock->b.flags);
/* safe to reference ->dblock as b is a dirty child */
db = getdref(lai->dblock, MKREF(flush2db));
lai = LAFSI(b->inode);
if (!test_bit(B_Valid, &b->flags)) {
+ if (test_bit(B_PhysValid, &b->flags) &&
+ !test_bit(B_InoIdx, &b->flags) &&
+ b->physaddr != 0)
+ lafs_allocated_block(fs, b, 0);
+ if (test_and_clear_bit(B_Dirty, &b->flags))
+ lafs_space_return(fs, 1);
+ if (test_and_clear_bit(B_Realloc, &b->flags))
+ lafs_space_return(fs, 1);
lafs_iounlock_block(b, B_IOLock);
return wc->cluster_seq;
}
if (test_bit(B_PhysValid, &lai->dblock->b.flags))
set_bit(B_PhysValid, &new->b.flags);
LAFS_BUG(!test_bit(B_Valid, &lai->dblock->b.flags), &lai->dblock->b);
- set_bit(B_Valid, &new->b.flags);
+ if (lai->depth > 0)
+ set_bit(B_Valid, &new->b.flags);
new->b.inode = ino;
new->depth = lai->depth;
/* Note: this doesn't get hashed until the index
} else {
LAFS_BUG(LAFSI(ino)->type != 0, &b->b);
lafs_orphan_release(fs, b);
- lafs_iolock_block(&ib->b);
- lafs_erase_iblock(ib);
- lafs_iounlock_block(&ib->b, 0);
+ if (test_bit(B_Dirty, &ib->b.flags)) {
+ lafs_iolock_block(&ib->b);
+ lafs_cluster_allocate(&ib->b, 0);
+ }
lafs_erase_dblock(b);
clear_bit(I_Deleting, &LAFSI(ino)->iflags);
}
*/
getiref(ib2, MKREF(inode_handle_orphan2));
lafs_iolock_block(&ib2->b);
- /* call lafs_incorporate at least once to ensure
- * that lafs_erase_iblock gets called
- */
do
lafs_incorporate(fs, ib2);
while (ib2->uninc_table.pending_cnt || ib2->uninc);
* pinning */
LAFSI(ino)->trunc_next = next_trunc;
lafs_iolock_block(&ib->b);
- lafs_erase_iblock(ib);
- lafs_iounlock_block(&ib->b, 0);
+ lafs_cluster_allocate(&ib->b, 0);
goto out;
}
LAFS_BUG(!test_bit(B_PhysValid, &ib->b.flags), &ib->b);
LAFS_BUG(ib->b.physaddr != 0, &ib->b);
}
- lafs_clear_index(ib);
+ clear_bit(B_Valid, &ib->b.flags);
out:
lafs_checkpoint_unlock(fs);
share_list(&ib->uninc_next, &new->uninc_next, next);
new->uninc_table.pending_cnt = 0;
new->uninc_table.credits = 0;
-BUG_ON(!test_bit(B_Dirty, &new->b.flags) &&
- !test_bit(B_Realloc, &new->b.flags));
share_uninic(&ib->uninc_table, &new->uninc_table, next);
spin_unlock(&fs->lock);
LAFS_BUG(!test_bit(B_IOLock, &ib->b.flags), &ib->b);
+ if (!test_bit(B_Valid, &ib->b.flags)) {
+ lafs_clear_index(ib);
+ set_bit(B_Valid, &ib->b.flags);
+ }
+
if (ib->depth <= 1) {
/* take a copy of the uninc_table so we can work while
* more changes can be made
}
lafs_space_return(fs, uit.credits);
- /* If this index block is now empty, we need to be sure it
- * is erased. But if there is anything pending in the next phase
- * we need to wait for that to complete.
+ /* If this index block is now empty, we clear
+ * B_Valid so that it doesn't get written out,
+ * but rather gets allocated as '0'.
*/
if (test_bit(B_Valid, &ib->b.flags) &&
- lafs_leaf_next(ib, ib->b.fileaddr) == 0xFFFFFFFF &&
- atomic_read(&ib->pincnt[0]) == 0 &&
- atomic_read(&ib->pincnt[1]) == 0)
- lafs_erase_iblock(ib);
+ lafs_leaf_next(ib, ib->b.fileaddr) == 0xFFFFFFFF)
+ clear_bit(B_Valid, &ib->b.flags);
}
/***************************************************************