fb = lafs_get_block(ino, first, page, GFP_KERNEL, MKREF(write));
dprintk("PREPARE %p\n", fb);
- if (!fb)
- return -ENOMEM;
+ if (!fb) {
+ err = -ENOMEM;
+ goto fail;
+ }
/* Further lafs_get_block calls cannot fail as both the page
* and the block structures exist
*/
goto retry;
}
if (err < 0)
- goto fail;
+ goto fail_unlock;
*fsdata = fb;
return 0;
-fail:
+fail_unlock:
lafs_checkpoint_unlock(fs);
- while (--i >= first)
- putdref(&fb[i-first], MKREF(write));
+fail:
+ if (fb)
+ for (i = first; i <= last ; i++)
+ putdref(&fb[i-first], MKREF(write));
+ unlock_page(page);
+ page_cache_release(page);
return err;
}
struct fs *fs;
int bits;
int i;
+ int want_flush = 0;
+
+ if (!PageWriteback(page))
+ /* Presumably page is locked - nothing
+ * we can do
+ */
+ return;
mapping = page->mapping;
if (!mapping)
fs = fs_from_inode(ino);
bits = PAGE_SHIFT - ino->i_blkbits;
- for (i = 0; i < (1<<bits); i++) {
- struct datablock *b = lafs_get_block(ino, i, page,
- GFP_KERNEL, MKREF(sync_page));
- if (!b)
- continue;
- /* If this block is still dirty though the page is in
- * writeback, the block must be in the current cluster
- */
- if (test_bit(B_Dirty, &b->b.flags)) {
- putdref(b, MKREF(sync_page));
- lafs_cluster_flush(fs, 0);
- break;
+ spin_lock(&mapping->private_lock);
+ if (PagePrivate(page)) {
+ struct datablock *bl = (struct datablock *)page->private;
+
+ for (i = 0; i < (1<<bits); i++) {
+ /* If this block is still dirty though the page is in
+ * writeback, the block must be in the current cluster
+ */
+ if (test_bit(B_Dirty, &bl[i].b.flags)) {
+ want_flush = 1;
+ break;
+ }
}
- putdref(b, MKREF(sync_page));
}
+ spin_unlock(&mapping->private_lock);
+ if (want_flush)
+ lafs_cluster_flush(fs, 0);
}
static int