From: NeilBrown Date: Thu, 1 Jul 2010 07:51:35 +0000 (+1000) Subject: Make sure the segment being written is never cleaned. X-Git-Url: http://git.neil.brown.name/?a=commitdiff_plain;h=edc711d22b6cef422320d136d9014830edb46286;p=LaFS.git Make sure the segment being written is never cleaned. Cleaning the current segment would be a bad idea as it's usage count isn't really representative of anything useful. So leave it in the table flags as 'active' to avoid it becoming cleanable, and remove it when the segment is finished with. Signed-off-by: NeilBrown --- diff --git a/cluster.c b/cluster.c index 3bbe200..5569205 100644 --- a/cluster.c +++ b/cluster.c @@ -402,7 +402,6 @@ static int new_segment(struct fs *fs, int cnum) * clean_reserved */ struct wc *wc = fs->wc + cnum; - u64 p; unsigned int dev; u32 seg; @@ -411,11 +410,7 @@ static int new_segment(struct fs *fs, int cnum) * don't I ??? */ if (wc->seg.dev >= 0) { - p = segtovirt(fs, wc->seg.dev, wc->seg.num); - /* If this is the first segment, p would be 0, - * so add one to ensure we drop the reference. - */ - lafs_seg_deref(fs, p + 1, 0); + lafs_seg_forget(fs, wc->seg.dev, wc->seg.num); wc->seg.dev = -1; } diff --git a/lafs.h b/lafs.h index 6adfb36..78cadfd 100644 --- a/lafs.h +++ b/lafs.h @@ -612,6 +612,7 @@ struct datablock *lafs_find_orphan(struct inode *ino); int lafs_prealloc(struct block *b, int type); int lafs_seg_ref_block(struct block *b, int ssnum); void lafs_seg_deref(struct fs *fs, u64 addr, int ssnum); +void lafs_seg_forget(struct fs *fs, int dev, u32 seg); void lafs_seg_flush_all(struct fs *fs); void lafs_seg_apply_all(struct fs *fs); void lafs_seg_put_all(struct fs *fs); diff --git a/segments.c b/segments.c index 8b59a48..674c0fb 100644 --- a/segments.c +++ b/segments.c @@ -659,7 +659,7 @@ int lafs_space_alloc(struct fs *fs, int credits, int why) * Segment usage / youth tracking */ -/* information about free and cleanable segments are stored in a table +/* Information about free and cleanable segments are stored in a table * comprised of a few pages. Indexes into this table are 16bit. 4bits * for page number, 12 bits for index in the page. * Different pages have different sized entries to allow for different @@ -683,7 +683,7 @@ int lafs_space_alloc(struct fs *fs, int credits, int why) * segments can enter at any point. * When we choose a segment to clean, we remove the entry from the cleanable * list. We do this by setting the score to 0xFFFFFFFE and unlinking it. - * After cleaning as completed a scan should find that it is clean and so + * After cleaning has completed a scan should find that it is clean and so * add it to the 'clean' list. * When we record a 'clean' segment as 'free' (after a checkpoint) we * move it from the clean list to the free list, from where it will be @@ -692,6 +692,11 @@ int lafs_space_alloc(struct fs *fs, int credits, int why) * is more than half the size of the table. * Similarly when the cleanable table reaches half the available size * we remove the least-interesting half. + * When we choose a free segment to start filling, we remove it from the + * free list but not from the table. The score is set to 0xFFFFFFFD to + * record that it is unlinked but busy. If it is found to be cleanable, + * it will be ignored. When we finish filling a segment, we find the entry + * again and remove it properly so it can become cleanable later. */ struct segstat { @@ -986,12 +991,12 @@ static void segdelete(struct segtracker *st, struct segstat *ss) int h; int pos; - BUG_ON(ss2 != ss); + BUG_ON(ss2 == NULL); pos = *where[0]; for (h = 0; h < SEG_NUM_HEIGHTS && *where[h] == pos ; h++) - *where[h] = ss->skip[h]; - ss->next = st->unused.first; + *where[h] = ss2->skip[h]; + ss2->next = st->unused.first; st->unused.first = pos; st->unused.cnt++; lafs_check_seg_cnt(st); @@ -1100,8 +1105,8 @@ again: if (fs->segtrack->free.first == 0xFFFF) fs->segtrack->free.last = 0xFFFF; fs->segtrack->free.cnt--; - segdelete(fs->segtrack, ss); - + ss->score = 0xFFFFFFFD; + /* still in table, but unlinked */ spin_unlock(&fs->lock); /* now need to reserve/dirty/reference the youth and @@ -1121,10 +1126,6 @@ again: if (ssnum == 0) (void)lafs_pin_dblock(ssum->youthblk, AccountSpace); lafs_checkpoint_unlock(fs); - /* These aren't in the table any more - the segsum holds - * the necessary reference */ - putdref(ssum->ssblk, MKREF(intable)); - putdref(ssum->youthblk, MKREF(intable)); } lafs_dirty_dblock(db); putdref(db, MKREF(youth)); @@ -1151,6 +1152,33 @@ again: } } +void lafs_seg_forget(struct fs *fs, int dev, u32 seg) +{ + /* this segment was being filled and is now full. + * We need to drop it from the table, and drop + * references to the blocks + */ + struct segstat tmp; + struct segsum *ss; + + spin_lock(&fs->lock); + tmp.dev = dev; + tmp.segment = seg; + segdelete(fs->segtrack, &tmp); + spin_unlock(&fs->lock); + + ss = segsum_find(fs, seg, dev, 0); + BUG_ON(IS_ERR(ss)); + BUG_ON(atomic_read(&ss->refcnt) < 2); + /* Removed from table so ... */ + putdref(ss->ssblk, MKREF(intable)); + putdref(ss->youthblk, MKREF(intable)); + ss_put(ss, fs); + ss_put(ss, fs); + + +} + static u16 segunused(struct segtracker *st) { struct segstat *ss; @@ -1249,9 +1277,10 @@ static int add_clean(struct fs *fs, unsigned int dev, u32 seg) fs->segtrack->clean.last = fs->segtrack->clean.first; fs->segtrack->clean.cnt++; + } else if (ss->score != 0xFFFFFFFD) { + ss->score = 0; + ss->usage = 0; } - ss->score = 0; - ss->usage = 0; spin_unlock(&fs->lock); return 0; } @@ -1474,8 +1503,8 @@ retry: BUG_ON(rv); goto retry; } - spin_unlock(&fs->lock); segdelete(fs->segtrack, ss); + spin_unlock(&fs->lock); if (ss->usage < (fs->devs[ss->dev].segment_size - 4) * 126 / 128) /* weird heuristic ?? */ @@ -1531,6 +1560,8 @@ static int add_cleanable(struct fs *fs, unsigned int dev, u32 seg, /* already in the table. Just update the usage. * It must be on the right list. */ + if (ss->score == 0xFFFFFFFD) + ; /* still in use */ if (ss->usage == 0 && ss->score > 0) ; /* on free list, leave it alone */ else {