b = list_entry(fs->phase_leafs[phase].next, struct block, lru);
else
b = NULL;
- if (b) {
+ if (b)
/* the list counted a reference. Now we hold it */
list_del_init(&b->lru);
-
- if (!test_bit(B_Index, &b->flags)) {
- if (test_and_set_bit(B_IOLock, &b->flags)) {
- /* someone else is invalidating this block,
- * so I can just skip it. Better drop the ref.
- */
- spin_unlock(&fs->lock);
- putref(b, MKREF(leaf));
- goto retry;
- } else
- set_iolock_info(b);
- }
- }
spin_unlock(&fs->lock);
- if (b && test_bit(B_Index, &b->flags))
- /* we couldn't iolock in the spinlock because there might
- * be an intermediate incorporation happening that
- * would block us. So we get the lock now, waiting if
- * needed.
+ if (b)
+ /* Need an iolock, but if the list gets put on another
+ * lru (like a cluster or back on leafs) then we lose
+ * interest.
*/
- lafs_iolock_block(b);
+ if (lafs_iolock_block_empty(b) == 0) {
+ /* gave up on the lock */
+ putref(b, MKREF(leaf));
+ goto retry;
+ }
return b;
}
/* won't fit */
// printk("Wont fit - used = %d\n", used);
cluster_flush(fs, cnum);
- }
+ } else
+ lafs_io_wake(b);
} while (used < 0);
if (used > 0)
getref(b, MKREF(leaf));
}
spin_unlock(&fs->lock);
+ lafs_io_wake(b);
}
/* check the ->parent link */
if (atomic_read(&b->refcnt) == dec) {
static DECLARE_WAIT_QUEUE_HEAD(block_wait); /* need more of these later FIXME */
-void
-_lafs_iolock_block(struct block *b)
+void lafs_io_wake(struct block *b)
+{
+ wake_up(&block_wait);
+}
+int
+_lafs_iolock_block(struct block *b, int checkempty)
{
+ int locked = 1;
if (test_and_set_bit(B_IOLock, &b->flags)) {
DEFINE_WAIT(wq);
#ifdef DEBUG_IOLOCK
#endif
for (;;) {
prepare_to_wait(&block_wait, &wq, TASK_UNINTERRUPTIBLE);
- if (test_and_set_bit(B_IOLock, &b->flags))
- schedule();
- else
+ if (checkempty && !list_empty_careful(&b->lru)) {
+ locked = 0;
break;
+ }
+ if (!test_and_set_bit(B_IOLock, &b->flags))
+ break;
+ schedule();
}
finish_wait(&block_wait, &wq);
}
+ return locked;
}
void
!db->page || !db->page->private ||
(!PageLocked(db->page) && !PageWriteback(db->page))) {
clear_bit(B_IOLock, &b->flags);
- wake_up(&block_wait);
+ lafs_io_wake(b);
if (test_bit(B_Async, &b->flags))
lafs_wake_cleaner(fs_from_inode(b->inode));
dprintk("unlock non-data block\n");
end_page_writeback(db->page);
}
- wake_up(&block_wait);
+ lafs_io_wake(b);
if (test_bit(B_Async, &b->flags))
lafs_wake_cleaner(fs_from_inode(b->inode));
}
int adopt, u32 *next, int async, REFARG);
u32 lafs_leaf_next(struct indexblock *ib, u32 start);
#ifdef DEBUG_IOLOCK
-#define set_iolock_info(b) do { (b)->iolock_file = __FILE__; (b)->iolock_line = __LINE__; } while (0)
+#define set_iolock_info(b) ( (b)->iolock_file = __FILE__, (b)->iolock_line = __LINE__)
#else
-#define set_iolock_info(b) do {} while(0)
+#define set_iolock_info(b) (0)
#endif
-#define lafs_iolock_block(b) do { _lafs_iolock_block(b); set_iolock_info(b); } while(0)
+#define lafs_iolock_block(b) do { _lafs_iolock_block(b, 0); set_iolock_info(b); } while(0)
+#define lafs_iolock_block_empty(b) (_lafs_iolock_block(b, 1) ? ( set_iolock_info(b), 1): 0)
-void _lafs_iolock_block(struct block *b);
+void lafs_io_wake(struct block *b);
+int _lafs_iolock_block(struct block *b, int checkempty);
void lafs_iounlock_block(struct block *b, int bit);
void lafs_super_write(struct fs *fs, int dev, u64 addr, char *buf, int size);