lafs_erase_dblock(&b[i]);
else if (b_start >= offset) {
/* Just remove block from mapping */
- lafs_iolock_block(&b[i].b);
+ lafs_iolock_written(&b[i].b);
LAFS_BUG(test_bit(B_Dirty, &b[i].b.flags),
&b[i].b);
LAFS_BUG(test_bit(B_Realloc, &b[i].b.flags),
set_bit(AS_EIO, &mapping->flags);
if (test_bit(B_Dirty, &b[i].b.flags) ||
test_bit(B_Pinned, &b[i].b.flags) ||
- test_bit(B_IOLock, &b[i].b.flags)
+ test_bit(B_IOLock, &b[i].b.flags) ||
+ test_bit(B_Writeback, &b[i].b.flags)
/* NOTE: if we find an Uninc is set when we
* need to invalidate the page, then we
* should be waiting for all pages to be gone
struct fs *fs = fs_from_inode(b->b.inode);
dprintk("Eraseblock for %s\n", strblk(&b->b));
- lafs_iolock_block(&b->b);
+ lafs_iolock_written(&b->b);
if (b->b.physaddr == 0 &&
b->b.fileaddr == 0 &&
LAFSI(b->b.inode)->depth == 0) {
getiref_locked(ib, MKREF("erasedblock"));
spin_unlock(&b->b.inode->i_data.private_lock);
if (ib) {
- lafs_iolock_block(&ib->b);
+ lafs_iolock_written(&ib->b);
if (ib->depth == 0) {
LAFS_BUG(LAFSI(b->b.inode)->depth !=
ib->depth, &b->b);
strcat(ans, "Valid,");
if (test_bit(B_Dirty, &b->flags))
strcat(ans, "Dirty,");
+ if (test_bit(B_Writeback, &b->flags))
+ strcat(ans, "Writeback,");
if (test_bit(B_Linked, &b->flags))
strcat(ans, "Linked,");
if (test_bit(B_Realloc, &b->flags))
/* the list counted a reference. Now we hold it */
list_del_init(&b->lru);
spin_unlock(&fs->lock);
- if (b)
- /* Need an iolock, but if the list gets put on another
- * lru (like a cluster or back on leafs) then we lose
- * interest.
+ if (b) {
+ /* Need an iolock, but if the block is in writeback,
+ * we don't want it.
*/
- if (lafs_iolock_block_empty(b) == 0) {
- /* gave up on the lock */
+ lafs_iolock_block(b);
+ if (test_bit(B_Writeback, &b->flags)) {
+ lafs_iounlock_block(b);
putref(b, MKREF(leaf));
goto retry;
}
-
+ }
return b;
}
cluster_flush(fs, cnum);
}
+ if (test_and_set_bit(B_Writeback, &b->flags))
+ LAFS_BUG(1, b);
+ lafs_iounlock_block(b);
/* insert into list ensuring there is enough space
* in cluster head
*/
list_for_each_entry_safe(b, tmp, &tofree, lru) {
list_del_init(&b->lru);
- lafs_iounlock_block(b);
+ lafs_writeback_done(b);
putref(b, MKREF(cluster));
}
}
b0 = getdref(b, MKREF(writepage0));
if (test_bit(B_Dirty, &b->b.flags)) {
- lafs_iolock_block(&b->b);
+ lafs_iolock_written(&b->b);
lafs_cluster_allocate(&b->b, 0);
}
putdref(b, MKREF(writepage));
if (b0) {
set_page_writeback(page);
set_bit(B_HaveWriteback, &b0->b.flags);
- lafs_iocheck_block(b0, 0);
+ lafs_iocheck_writeback(b0, 0);
}
unlock_page(page); /* FIXME this must not happen before
the writes complete! */
*/
struct datablock *b = lafs_inode_dblock(ino, 0,
MKREF(writepageflush));
- lafs_iolock_block(&b->b);
+ lafs_iolock_written(&b->b);
lafs_cluster_allocate(&b->b, 0);
putdref(b, MKREF(writepageflush));
}
// FIXME this test will flush a little too often.
// We need some better way to know if a block is in
// the current cluster queue...
- if (test_bit(B_IOLock, &b->b.flags) &&
+ if (test_bit(B_Writeback, &b->b.flags) &&
test_bit(B_Dirty, &b->b.flags) &&
!lafs_cluster_empty(fs, 0)
) {
if (!list_empty(&b->lru) &&
!test_bit(B_IOLock, &b->flags) &&
+ !test_bit(B_Writeback, &b->flags) &&
!test_bit(B_OnFree, &b->flags))
/* If B_IOLock, then it might be on the cluster
* list, but not the LRU.
if ((list_empty(&b->lru) || test_bit(B_OnFree, &b->flags)) &&
test_bit(B_Pinned, &b->flags) &&
!test_bit(B_IOLock, &b->flags) &&
+ !test_bit(B_Writeback, &b->flags) &&
(!test_bit(B_Index, &b->flags) ||
atomic_read(&iblk(b)->pincnt[ph]) == 0)) {
if (test_and_clear_bit(B_OnFree, &b->flags)) {
* two index blocks, which would require an alloc.
*/
- lafs_iolock_block(&p->b);
+ lafs_iolock_written(&p->b);
lafs_incorporate(fs, p);
lafs_iounlock_block(&p->b);
return 0;
LAFS_BUG(!test_bit(B_Dirty, &blk->flags) &&
!test_bit(B_Realloc, &blk->flags) &&
phys != 0, blk);
- LAFS_BUG(!test_bit(B_IOLock, &blk->flags), blk);
+ if (phys)
+ LAFS_BUG(!test_bit(B_Writeback, &blk->flags), blk);
+ else
+ LAFS_BUG(!test_bit(B_IOLock, &blk->flags), blk);
if (test_bit(B_Root, &blk->flags)) {
int i;
LAFS_BUG(LAFSI(ino)->type != 0, &b->b);
lafs_orphan_release(fs, b);
if (test_bit(B_Dirty, &ib->b.flags)) {
- lafs_iolock_block(&ib->b);
+ lafs_iolock_written(&ib->b);
lafs_cluster_allocate(&ib->b, 0);
}
lafs_erase_dblock(b);
* Incorporating it should unpin it.
*/
getiref(ib2, MKREF(inode_handle_orphan2));
- lafs_iolock_block(&ib2->b);
+ lafs_iolock_written(&ib2->b);
do
lafs_incorporate(fs, ib2);
while (ib2->uninc_table.pending_cnt || ib2->uninc);
{
wake_up(&block_wait);
}
-int
-_lafs_iolock_block(struct block *b, int checkempty)
+void _lafs_iolock_block(struct block *b)
{
- int locked = 1;
if (test_and_set_bit(B_IOLock, &b->flags)) {
DEFINE_WAIT(wq);
#ifdef DEBUG_IOLOCK
#endif
for (;;) {
prepare_to_wait(&block_wait, &wq, TASK_UNINTERRUPTIBLE);
- if (checkempty && !list_empty_careful(&b->lru)) {
- locked = 0;
- break;
- }
+
if (!test_and_set_bit(B_IOLock, &b->flags))
break;
schedule();
}
finish_wait(&block_wait, &wq);
}
- return locked;
}
void
}
+void lafs_writeback_done(struct block *b)
+{
+ /* remove writeback flag on this block.
+ * If it is last on page, release page as well.
+ */
+
+ if (test_bit(B_Index, &b->flags))
+ clear_bit(B_Writeback, &b->flags);
+ else
+ lafs_iocheck_writeback(dblk(b), 1);
+
+ lafs_io_wake(b);
+}
+
void lafs_iocheck_block(struct datablock *db, int unlock)
{
struct page *page = db->page;
struct datablock *blist;
int n, i;
int locked = 0;
- int havelock = 0, havewrite = 0;
+ int havelock = 0;
if (!page)
return;
}
if (!locked && test_and_clear_bit(B_HaveLock, &blist->b.flags))
havelock = 1;
- if (!locked && test_and_clear_bit(B_HaveWriteback, &blist->b.flags))
- havewrite = 1;
bit_spin_unlock(B_IOLockLock, &blist->b.flags);
if (havelock) {
SetPageUptodate(page);
unlock_page(page);
}
+}
+
+void lafs_iocheck_writeback(struct datablock *db, int unlock)
+{
+ struct page *page = db->page;
+ struct datablock *blist;
+ int n, i;
+ int locked = 0;
+ int havewrite = 0;
+
+ if (!page)
+ return;
+ blist = (struct datablock *)page->private;
+ if (!blist)
+ return;
+
+ n = 1<<(PAGE_CACHE_SHIFT - blist->b.inode->i_blkbits);
+ bit_spin_lock(B_IOLockLock, &blist->b.flags);
+ if (unlock)
+ clear_bit(B_Writeback, &db->b.flags);
+ for (i = 0 ; i < n; i++) {
+ if (test_bit(B_Writeback, &blist[i].b.flags))
+ locked++;
+ /* FIXME what about checking uptodate ?? */
+ }
+ if (!locked && test_and_clear_bit(B_HaveWriteback, &blist->b.flags))
+ havewrite = 1;
+ bit_spin_unlock(B_IOLockLock, &blist->b.flags);
+
if (havewrite)
end_page_writeback(page);
}
return -EAGAIN;
}
+static void wait_writeback(struct block *b)
+{
+ if (test_bit(B_Writeback, &b->flags)) {
+ DEFINE_WAIT(wq);
+ for (;;) {
+ prepare_to_wait(&block_wait, &wq, TASK_UNINTERRUPTIBLE);
+ if (test_bit(B_Writeback, &b->flags))
+ schedule();
+ else
+ break;
+ }
+ finish_wait(&block_wait, &wq);
+ }
+}
+
+void lafs_iolock_written(struct block *b)
+{
+ lafs_iolock_block(b);
+ wait_writeback(b);
+}
+
static void
block_loaded(struct bio *bio, int error)
{
#else
#define set_iolock_info(b) (0)
#endif
-#define lafs_iolock_block(b) do { _lafs_iolock_block(b, 0); set_iolock_info(b); } while(0)
-#define lafs_iolock_block_empty(b) (_lafs_iolock_block(b, 1) ? ( set_iolock_info(b), 1): 0)
+#define lafs_iolock_block(b) do { _lafs_iolock_block(b); set_iolock_info(b); } while(0)
void lafs_io_wake(struct block *b);
-int _lafs_iolock_block(struct block *b, int checkempty);
+void _lafs_iolock_block(struct block *b);
+void lafs_iolock_written(struct block *b);
void lafs_iounlock_block(struct block *b);
void lafs_iocheck_block(struct datablock *db, int unlock);
+void lafs_iocheck_writeback(struct datablock *db, int unlock);
+void lafs_writeback_done(struct block *b);
void lafs_super_write(struct fs *fs, int dev, u64 addr, char *buf, int size);
int lafs_super_wait(struct fs *fs);
B_UnincCredit, /* Uninc carries a credit */
B_OnFree, /* index block on the free list */
B_IOLock, /* Block is undergoing IO */
+ B_Writeback, /* Block is in a cluster */
B_IOLockLock, /* lock while testing IOLock on a page */
B_HaveLock, /* We own the page lock and when all blocks are unlocked,