return ib;
offset = li->metadata_size;
- lafs_iolock_block(&ib->b);
+ if (!async)
+ lafs_iolock_block(&ib->b);
+ else if (test_and_set_bit(B_IOLock, &ib->b.flags)) {
+ err = -EAGAIN;
+ goto err;
+ } else
+ set_iolock_info(&ib->b);
+
while (ib->depth > 1) {
/* internal index block */
/* ALERT: If the index tree has only just grown, then
err = lafs_wait_block(&ib2->b);
if (err)
goto err_ib2;
- lafs_iolock_block(&ib->b);
+ if (!async)
+ lafs_iolock_block(&ib->b);
+ else if (test_and_set_bit(B_IOLock, &ib->b.flags)) {
+ err = -EAGAIN;
+ goto err_ib2;
+ } else
+ set_iolock_info(&ib->b);
}
- lafs_iolock_block(&ib2->b);
+ if (!async)
+ lafs_iolock_block(&ib2->b);
+ else if (test_and_set_bit(B_IOLock, &ib2->b.flags)) {
+ err = -EAGAIN;
+ goto err_ib2;
+ } else
+ set_iolock_info(&ib2->b);
+
/* This block might have been split, in which case
* we need to consider adjacent siblings.
*/
}
getref_locked(nxt, REF);
spin_unlock(&inode->i_data.private_lock);
- lafs_iolock_block(nxt);
+ if (!async)
+ lafs_iolock_block(nxt);
+ else if (test_and_set_bit(B_IOLock, &nxt->flags)) {
+ err = -EAGAIN;
+ putref(nxt, REF);
+ goto err_ib2;
+ } else
+ set_iolock_info(nxt);
+
lafs_iounlock_block(&ib2->b);
putiref(ib2, REF);
ib2 = iblk(nxt);