{
int err = 0;
struct fs *fs = fs_from_inode(b->inode);
+ int in_emergency;
if (!test_bit(B_PhysValid, &b->flags))
b->physaddr = 0;
*/
alloc_type = ReleaseSpace;
+ /* Important to test EmergencyClean before we
+ * called in to lafs_space_alloc to avoid races:
+ * space becomes available and EmergencyClean are
+ * set at the same time (strange, but true).
+ */
+ in_emergency = test_bit(EmergencyClean, &fs->fsstate);
/* Allocate space in the filesystem */
err = err ?: lafs_prealloc(b, alloc_type);
if (err == 0)
return 0;
if (alloc_type == NewSpace) {
- if (test_bit(EmergencyClean, &fs->fsstate))
+ if (in_emergency)
return -ENOSPC;
return -EAGAIN;
}
if (!test_bit(FinalCheckpoint, &fs->fsstate))
lafs_seg_apply_all(fs);
+ lafs_clean_free(fs);
+ if (test_bit(EmergencyPending, &fs->fsstate))
+ set_bit(EmergencyClean, &fs->fsstate);
lafs_write_state(fs);
dprintk("State written, all done %d\n", fs->seq);
dprintk("C=%llu F=%llu T=%llu\n", C, F, T);
if ((F < C || C * F >= T * (F - C)) &&
!test_bit(EmergencyClean, &fs->fsstate) &&
+ !test_bit(EmergencyPending, &fs->fsstate) &&
!test_bit(CleanerBlocks, &fs->fsstate)) {
dprintk("CLEANER: enough cleaning with %d segments\n",
i);
if (!tc->have_addr) {
dprintk("CLEANER: Nothing found to clean at %d :-(\n",
i);
+ if (i == 0 && test_bit(EmergencyPending, &fs->fsstate)
+ && !test_bit(EmergencyClean, &fs->fsstate))
+ lafs_checkpoint_start(fs);
break;
}
printk("CLEANER: clean %d/%d\n", tc->dev, tc->seg);
}
}
if (test_bit(CleanerBlocks, &fs->fsstate)) {
- int clean = lafs_clean_count(fs);
+ int any_clean;
+ int clean = lafs_clean_count(fs, &any_clean);
dprintk("clean=%d max_seg=%d need=%d\n", (int)clean,
(int)fs->max_segment, (int)fs->cleaner.need);
- if (clean * fs->max_segment >= fs->cleaner.need)
+ if (any_clean &&
+ clean * fs->max_segment >=
+ fs->allocated_blocks + fs->cleaner.need)
lafs_checkpoint_start(fs);
}
return MAX_SCHEDULE_TIMEOUT;
if (cnum && fs->clean_reserved < fs->max_segment) {
/* we have reached the end of the last cleaner
* segment. The remainder of clean_reserved
- * is of no value (if there even is any
+ * is of no value (if there even is any)
*/
if (fs->clean_reserved) {
spin_lock(&fs->alloc_lock);
int lafs_alloc_cleaner_segs(struct fs *fs, int max);
int lafs_space_alloc(struct fs *fs, int credits, int why);
unsigned long lafs_scan_seg(struct fs *fs);
-int lafs_clean_count(struct fs *fs);
+int lafs_clean_count(struct fs *fs, int *any_clean);
+void lafs_clean_free(struct fs *fs);
/* Cleaner */
unsigned long lafs_do_clean(struct fs *fs);
* This function applies all the delayed updates to the segment usage
* files at the end of a checkpoint.
*/
-static void clean_free(struct fs *fs);
void lafs_seg_apply_all(struct fs *fs)
{
int i;
spin_unlock(&fs->stable_lock);
}
/* Now any clean segments found earlier are free. */
- clean_free(fs);
}
/*
if (credits == 0) {
if (why == NewSpace)
- /* FIXME This is really a bit too early - need to wait
- * at least one checkpoint
- */
- set_bit(EmergencyClean, &fs->fsstate);
+ set_bit(EmergencyPending, &fs->fsstate);
if (!test_bit(CleanerBlocks, &fs->fsstate) ||
fs->cleaner.need > watermark + fs->max_segment) {
fs->cleaner.need = watermark + fs->max_segment;
lafs_wake_thread(fs);
}
} else if (why == NewSpace)
- clear_bit(EmergencyClean, &fs->fsstate);
+ if (test_bit(EmergencyClean, &fs->fsstate) ||
+ test_bit(EmergencyPending, &fs->fsstate)) {
+ clear_bit(EmergencyPending, &fs->fsstate);
+ clear_bit(EmergencyClean, &fs->fsstate);
+ }
fs->allocated_blocks += credits;
BUG_ON(fs->free_blocks + fs->clean_reserved < fs->allocated_blocks);
return rv;
}
-int lafs_clean_count(struct fs *fs)
+int lafs_clean_count(struct fs *fs, int *any_clean)
{
int rv;
spin_lock(&fs->lock);
+ *any_clean = fs->segtrack->clean.cnt != 0;
rv = fs->segtrack->free.cnt + fs->segtrack->clean.cnt;
spin_unlock(&fs->lock);
return rv;
spin_unlock(&fs->lock);
}
-static void clean_free(struct fs *fs)
+void lafs_clean_free(struct fs *fs)
{
/* We are finishing off a checkpoint. Move all from 'clean'
* list to 'free' list, and set the youth for each to 0.
u16 *where[SEG_NUM_HEIGHTS];
WARN_ON(lafs_check_seg_cnt(fs->segtrack));
- if (fs->scan.trace || lafs_trace)
+ if (fs->scan.trace || lafs_trace || 1)
printk("CLEANABLE: %u/%lu y=%d u=%d\n",
dev, (unsigned long)seg, (int)youth, (int)usage);
if (youth < 8)
if (test_bit(EmergencyClean, &fs->fsstate))
score = usage;
else
+ /* 0x10000 is to ensure this score is always
+ * more than the above score */
score = youth * usage / segsize + 0x10000;
spin_lock(&fs->lock);
#define SecondFlushNeeded 9 /* Need a second cluster to commit the blocks
* in the previous one
*/
+#define EmergencyPending 10 /* Cleaner isn't quite in emergency mode, but
+ * should be after the next checkpoint unless that
+ * releases lots of space
+ */
struct work_struct done_work; /* used for handling
* refile after write completes */
fs->nonlog_dev = le16_to_cpu(st->nonlog_dev);
fs->nonlog_offset = le16_to_cpu(st->nonlog_offset);
fs->youth_next = le16_to_cpu(st->nextyouth);
+ fs->checkpoint_youth = fs->youth_next;
if (fs->youth_next < 8)
fs->youth_next = 8;
fs->scan.first_free_pass = 1;