struct datablock *inodb;
int err;
- /* FIXME run orphan changes */
- if (inode->i_size || inode->i_nlink > 2)
+ if (inode->i_nlink > 2)
return -ENOTEMPTY;
+ if (inode->i_size) {
+ /* Probably not empty, but it could be that we
+ * just need to wait for orphans the clear.
+ * They cannot clear while we hold i_mutex, so
+ * we have to run it ourselves.
+ */
+ struct datablock *db;
+ DEFINE_WAIT(wq);
+ while ((db = lafs_find_orphan(inode))) {
+ prepare_to_wait(&fs->async_complete, &wq,
+ TASK_UNINTERRUPTIBLE);
+ lafs_dir_handle_orphan(db);
+ if (lafs_drop_orphan(fs, db) == 0)
+ /* still an orphan, need to wait */
+ schedule();
+ }
+ finish_wait(&fs->async_complete, &wq);
+ if (inode->i_size)
+ return -ENOTEMPTY;
+ }
dprintk("rmdir %s\n", de->d_name.name);
void lafs_orphan_release(struct fs *fs, struct datablock *b);
long lafs_run_orphans(struct fs *fs);
int lafs_drop_orphan(struct fs *fs, struct datablock *db);
+struct datablock *lafs_find_orphan(struct inode *ino);
/* Segment.c */
int lafs_prealloc(struct block *b, int type);
return 1;
}
}
+
+struct datablock *lafs_find_orphan(struct inode *ino)
+{
+ /* I could walk the child tree of the inode, or
+ * walk the pending_orphan list looking for an
+ * orphan for this inode.
+ * The latter seems easier.
+ * Performance will be quadratic in the size of the
+ * orphan list, so we could possibly consider
+ * improvements later.
+ */
+ struct fs *fs = fs_from_inode(ino);
+ struct datablock *db;
+
+ spin_lock(&fs->lock);
+ list_for_each_entry(db, &fs->pending_orphans, orphans)
+ if (db->b.inode == ino) {
+ spin_unlock(&fs->lock);
+ return db;
+ }
+ return NULL;
+}