From e7b3664fe5f7b6227610c70d1c57efd8af51dfc5 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Sat, 14 Aug 2010 15:41:59 +1000 Subject: [PATCH] Combine cleaning and orphan list_heads. A datablock is very rarely both an orphan and requiring cleaning, so having two list_heads is a waste. If is an orphan it will have full parent linkage and addresses already so it will be handled promptly and removed from the cleaning list. So arrange that if a block wants to be both, it is preferentially on the cleaning list, and when removed from the cleaning list is gets added back to the pending_orphan list in case it needs processing. Note that only directory and inode blocks can ever be orphans so some optimisation of spinlocks is possible. Signed-off-by: NeilBrown --- clean.c | 48 ++++++++++++++++++++++++++++++++++++++++-------- orphan.c | 25 ++++++++++--------------- state.h | 16 +++++++++++----- 3 files changed, 61 insertions(+), 28 deletions(-) diff --git a/clean.c b/clean.c index aff7c95..641b6b4 100644 --- a/clean.c +++ b/clean.c @@ -295,8 +295,19 @@ static int try_clean(struct fs *fs, struct toclean *tc) getdref(b, MKREF(cleaning)); igrab(ino); } - if (list_empty(&b->cleaning)) - list_add_tail(&b->cleaning, &tc->cleaning); + if (LAFSI(ino)->type == TypeInodeFile || + LAFSI(ino)->type == TypeDir) { + /* Could become an orphan just now, so need + * to protect b->cleaning + */ + spin_lock(&fs->lock); + list_move_tail(&b->cleaning, &tc->cleaning); + spin_unlock(&fs->lock); + } else { + /* No locking needed */ + if (list_empty(&b->cleaning)) + list_add_tail(&b->cleaning, &tc->cleaning); + } /* FIXME do I need a memory barrier. to ensure truncate * sees the not-list_empty, and we see i_size? */ @@ -364,12 +375,22 @@ static int try_clean(struct fs *fs, struct toclean *tc) * ref now */ done_cleaning: + clear_bit(B_Cleaning, &b->b.flags); + list_del_init(&b->cleaning); - if (test_and_clear_bit(B_Cleaning, &b->b.flags)) { - ino = b->b.inode; - putdref(b, MKREF(cleaning)); - iput(ino); + if (test_bit(B_Orphan, &b->b.flags)) { + spin_lock(&fs->lock); + if (test_bit(B_Orphan, &b->b.flags) && + list_empty(&b->orphans)) { + list_add(&b->orphans, &fs->pending_orphans); + lafs_wake_thread(fs); + } + spin_unlock(&fs->lock); } + + ino = b->b.inode; + putdref(b, MKREF(cleaning)); + iput(ino); putref(cb, MKREF(clean2)); if (rv) goto out; @@ -386,15 +407,26 @@ void lafs_unclean(struct datablock *db) if (!list_empty_careful(&db->cleaning)) { struct fs *fs = fs_from_inode(db->b.inode); mutex_lock(&fs->cleaner.lock); - if (!list_empty(&db->cleaning)) - list_del_init(&db->cleaning); if (test_and_clear_bit(B_Cleaning, &db->b.flags)) { + /* This must be on the cleaner list, so + * it is safe to delete without a spinlock + */ + list_del_init(&db->cleaning); putdref(db, MKREF(cleaning)); iput(db->b.inode); if (test_and_clear_bit(B_Async, &db->b.flags)) { putdref(db, MKREF(async)); lafs_wake_thread(fs); } + if (test_bit(B_Orphan, &db->b.flags)) { + spin_lock(&fs->lock); + if (test_bit(B_Orphan, &db->b.flags) && + list_empty(&db->orphans)) { + list_add(&db->orphans, &fs->pending_orphans); + lafs_wake_thread(fs); + } + spin_unlock(&fs->lock); + } } mutex_unlock(&fs->cleaner.lock); } diff --git a/orphan.c b/orphan.c index 246e6ea..30f18b8 100644 --- a/orphan.c +++ b/orphan.c @@ -303,7 +303,7 @@ int lafs_make_orphan_nb(struct fs *fs, struct datablock *db, struct inode *ino) return err; } -static int lafs_drop_orphan(struct fs *fs, struct datablock *db); +static void lafs_drop_orphan(struct fs *fs, struct datablock *db); /* * When any processing of an orphan makes it not an orphan any more * (e.g. link is created for a file, directory block is cleaned) @@ -554,7 +554,7 @@ long lafs_run_orphans(struct fs *fs) return timeout; } -int lafs_drop_orphan(struct fs *fs, struct datablock *db) +static void lafs_drop_orphan(struct fs *fs, struct datablock *db) { /* This block was an orphan but isn't any more. * Remove it from the list. @@ -567,18 +567,13 @@ int lafs_drop_orphan(struct fs *fs, struct datablock *db) orphan_iput(ino); if (test_bit(B_Orphan, &db->b.flags)) - return 0; + return; spin_lock(&fs->lock); - if (test_bit(B_Orphan, &db->b.flags) || - list_empty(&db->orphans)) { - /* Is an orphan again, or it is already removed */ - spin_unlock(&fs->lock); - return 0; - } else { + if (!test_bit(B_Orphan, &db->b.flags) && + !list_empty_careful(&db->orphans) && + !test_bit(B_Cleaning, &db->b.flags)) list_del_init(&db->orphans); - spin_unlock(&fs->lock); - return 1; - } + spin_unlock(&fs->lock); } void lafs_add_orphan(struct fs *fs, struct datablock *db) @@ -591,9 +586,8 @@ void lafs_add_orphan(struct fs *fs, struct datablock *db) */ LAFS_BUG(!test_bit(B_Orphan, &db->b.flags), &db->b); spin_lock(&fs->lock); - if (list_empty(&db->orphans)) + if (list_empty_careful(&db->orphans)) list_add_tail(&db->orphans, &fs->pending_orphans); - spin_unlock(&fs->lock); lafs_wake_thread(fs); } @@ -604,7 +598,8 @@ void lafs_orphan_forget(struct fs *fs, struct datablock *db) * it just now. When we do, lafs_add_orphan will be called */ LAFS_BUG(!test_bit(B_Orphan, &db->b.flags), &db->b); spin_lock(&fs->lock); - if (!list_empty(&db->orphans)) + if (!test_bit(B_Cleaning, &db->b.flags) && + !list_empty_careful(&db->orphans)) list_del_init(&db->orphans); spin_unlock(&fs->lock); } diff --git a/state.h b/state.h index 01b03d3..e155644 100644 --- a/state.h +++ b/state.h @@ -374,12 +374,18 @@ struct datablock { u32 orphan_slot; /* slot in orphan file to record that this * block is an orphan */ - struct list_head orphans; /* linked list of blocks needing orphan - * processing. - */ - struct list_head cleaning; /* list of blocks being cleaned. - * Could share with orphans FIXME + union { + /* If a block is both an orphan and undergoing + * cleaning, it lives on the cleaning list until + * the cleaner has checked it. It is then moved + * to the pending_orphans list. + */ + struct list_head orphans; /* linked list of blocks needing orphan + * processing. */ + struct list_head cleaning; /* list of blocks being cleaned. + */ + }; union { struct inode *my_inode; /* only valid for block holding an inode */ }; -- 2.39.5