*/
getiref(ib2, MKREF(inode_handle_orphan2));
lafs_iolock_block(&ib2->b);
- lafs_incorporate_loop(fs, ib2);
+ /* call lafs_incorporate at least once to ensure
+ * that lafs_erase_iblock gets called
+ */
+ do
+ lafs_incorporate(fs, ib2);
+ while (ib2->uninc_table.pending_cnt || ib2->uninc);
lafs_iounlock_block(&ib2->b, 0);
putiref(ib2, MKREF(inode_handle_orphan2));
printk(".");
LAFSI(ino)->trunc_next = trunc_next;
lafs_iolock_block(&ib2->b);
- lafs_incorporate_loop(fs, ib2);
+ while (ib->uninc_table.pending_cnt || ib->uninc)
+ lafs_incorporate(fs, ib2);
lafs_iounlock_block(&ib2->b, 0);
dprintk("Trunc %d\n", (int)LAFSI(ino)->trunc_next);
lafs_walk_leaf_index(ib2, prune, ib2);
int lafs_summary_allocate(struct fs *fs, struct inode *ino, int diff);
void lafs_qcommit(struct fs *fs, struct inode *ino, int diff, int phase);
void lafs_incorporate(struct fs *fs, struct indexblock *ib);
-void lafs_incorporate_loop(struct fs *fs, struct indexblock *ib);
void lafs_walk_leaf_index(struct indexblock *ib,
int (*handle)(void*, u32, u64, int),
void *data);
int choice;
struct layoutinfo layout;
u32 next = 0;
+ u32 next2;
int uinxt, uinum;
struct block *b, *tmp;
}
layout.data = ibuf;
layout.size = len;
- next = walk_extent(next, &sbuf, slen, ui, add_extent, &layout);
+ next2 = walk_extent(next, &sbuf, slen, ui, add_extent, &layout);
+ BUG_ON(next2 != 0);
if (slen && layout.data > sbuf) {
printk("slen=%d ld-sb=%d layout.data=%p sbuf=%p buf=%p ibuf=%p len=%d\n",
slen, layout.data-sbuf, layout.data, sbuf, buf, ibuf, len);
memset(buf, 0, blocksize - offset);
*(u16*)(buf) = cpu_to_le16(IBLK_EXTENT);
LAFSI(ib->b.inode)->depth = 1;
+ ib->depth = 1;
}
} else
offset = 0;
new = lafs_iblock_alloc(fs, GFP_NOFS, 1, MKREF(inc));
/* FIXME need to preallocate something for a fall-back?? */
+ if (ib->depth < 1) printk("small depth %s\n", strblk(&ib->b));
+ BUG_ON(ib->depth < 1);
if (ib->depth == 1) {
rv = do_incorporate_leaf(fs, ib, &uit, new);
} else
*/
dprintk("incorp to empty off=%d %s\n", (int)offset, strblk(&ib->b));
lafs_iblock_free(new);
+ buf = map_iblock(ib);
+ memset(buf + offset, 0, blocksize - offset);
+ *(u16*)(buf) = cpu_to_le16(IBLK_INDIRECT);
+ unmap_iblock(ib, buf);
if (offset) {
- buf = map_iblock(ib);
- memset(buf + offset, 0, blocksize - offset);
- *(u16*)(buf) = cpu_to_le16(IBLK_INDIRECT);
- unmap_iblock(ib, buf);
LAFSI(ib->b.inode)->depth = 1;
if (LAFSI(ib->b.inode)->type == 0)
/* truncate has finished */
lafs_erase_dblock(LAFSI(ib->b.inode)->dblock);
- } else
- lafs_erase_iblock(ib);
+ }
break;
lafs_space_return(fs, uit.credits);
/* If this index block is now empty, we need to be sure it
- * is erased.
+ * is erased. But if there is anything pending in the next phase
+ * we need to wait for that to complete.
*/
if (test_bit(B_Valid, &ib->b.flags) &&
- lafs_leaf_next(ib, ib->b.fileaddr) == 0xFFFFFFFF)
+ lafs_leaf_next(ib, ib->b.fileaddr) == 0xFFFFFFFF &&
+ atomic_read(&ib->pincnt[0]) == 0 &&
+ atomic_read(&ib->pincnt[1]) == 0)
lafs_erase_iblock(ib);
}
-void lafs_incorporate_loop(struct fs *fs, struct indexblock *ib)
-{
- /* Repeatedly run lafs_incorporate until
- * uninc_table is empty. This is only used during
- * truncation and we know all the incorporated addresses
- * are zero, so no splitting or such is needed.
- */
- while (ib->uninc_table.pending_cnt || ib->uninc)
- lafs_incorporate(fs, ib);
-}
-
/***************************************************************
* Space pre-allocation
* We need to make sure that the block and all parents