if (err == 0)
return 0;
+ /* FIXME maybe CleanSpace should return -EAGAIN if there
+ * is a good chance that the cleaner will help out soon??
+ * I wonder how "soon" can be defined.
+ */
if (alloc_type == CleanSpace || alloc_type == NewSpace)
return -ENOSPC;
if (alloc_type == ReleaseSpace)
unsigned long long cp = fs->wc[0].cluster_seq;
WARN_ON(test_bit(FinalCheckpoint, &fs->fsstate));
set_bit(CheckpointNeeded, &fs->fsstate);
+ /* Whenever we do a checkpoint, get anyone waiting on
+ * space to check again */
+ clear_bit(CleanerBlocks, &fs->fsstate);
fs->prime_sb->s_dirt = 0;
lafs_wake_cleaner(fs);
return cp;
* right??
*/
wait_event(fs->phase_wait,
+ !test_bit(CleanerBlocks, &fs->fsstate) &&
!test_bit(CheckpointNeeded, &fs->fsstate) &&
fs->checkpointing == 0);
}
timeout = to;
lafs_clusters_done(fs);
+ cond_resched();
}
return 0;
}
F = 0;
dprintk("C=%llu F=%llu T=%llu\n", C, F, T);
- if (F < C || C * F >= T * (F - C)) {
+ if ((F < C || C * F >= T * (F - C)) &&
+ !test_bit(CleanerBlocks, &fs->fsstate)) {
dprintk("CLEANER: enough cleaning with %d segments\n",
i);
break;
wake_up(&fs->async_complete);
}
}
+ if (test_bit(CleanerBlocks, &fs->fsstate)) {
+ int clean = lafs_clean_count(fs);
+ dprintk("clean=%d max_seg=%d need=%d\n", (int)clean,
+ (int)fs->max_segment, (int)fs->cleaner.need);
+ if (clean * fs->max_segment >= fs->cleaner.need)
+ lafs_checkpoint_start(fs);
+ }
return MAX_SCHEDULE_TIMEOUT;
}
void lafs_space_return(struct fs *fs, int credits);
int lafs_space_alloc(struct fs *fs, int credits, int why);
unsigned long lafs_scan_seg(struct fs *fs);
+int lafs_clean_count(struct fs *fs);
/* Cleaner */
int lafs_start_cleaner(struct fs *fs);
if (fs->rolled) {
/* We cannot account properly before roll-forward has
* completed. FIXME once it has completed we need to
- * check and invalidate the FS is there was a problem.
+ * check and invalidate the FS if there was a problem.
*/
- if (fs->free_blocks < fs->allocated_blocks
+ if (fs->free_segs < fs->allocated_blocks
+ credits + watermark)
credits = 0; /* Sorry, no room */
}
- if (credits == 0 && why == AccountSpace)
- /* FIXME I should switch to READ-ONLY here,
- * not BUG.
- */
- BUG();
+ if (credits == 0) {
+ if (why == AccountSpace)
+ /* FIXME I should switch to READ-ONLY here,
+ * not BUG.
+ */
+ BUG();
+
+ if (!test_bit(CleanerBlocks, &fs->fsstate) ||
+ fs->cleaner.need > watermark + fs->max_segment) {
+ fs->cleaner.need = watermark + fs->max_segment;
+ set_bit(CleanerBlocks, &fs->fsstate);
+ lafs_wake_cleaner(fs);
+ }
+ }
fs->allocated_blocks += credits;
- BUG_ON(fs->free_blocks < fs->allocated_blocks);
+// BUG_ON(fs->free_blocks < fs->allocated_blocks);
spin_unlock(&fs->alloc_lock);
return credits;
}
fs->segtrack->free.cnt--;
segdelete(fs->segtrack, ss);
+ fs->free_segs -= fs->devs[*dev].segment_size;
spin_unlock(&fs->lock);
/* now need to reserve/dirty/reference the youth and
return rv;
}
+int lafs_clean_count(struct fs *fs)
+{
+ int rv;
+ spin_lock(&fs->lock);
+ rv = fs->segtrack->free.cnt + fs->segtrack->clean.cnt;
+ spin_unlock(&fs->lock);
+ return rv;
+}
+
+
static int add_free(struct fs *fs, unsigned int dev, u32 seg, u16 *youthp)
{
/* This dev/seg is known to be free. add it to the list */
int err;
ss = segfollow(fs->segtrack, ssn);
fs->free_blocks += fs->devs[ss->dev].segment_size; // FIXME locking??
+ fs->free_segs += fs->devs[ss->dev].segment_size; // FIXME locking??
db = lafs_get_block(fs->devs[ss->dev].segsum,
ss->segment >> (fs->prime_sb->s_blocksize_bits-1),
NULL, GFP_KERNEL | __GFP_NOFAIL,
yp = map_dblock(fs->scan.youth_db);
for (i = 0; i < segments ; i++)
if (yp[i] == cpu_to_le16(0)) {
- if (fs->scan.first_free_pass)
+ if (fs->scan.first_free_pass) {
fs->free_blocks +=
fs->devs[fs->scan.free_dev]
.segment_size;
+ fs->free_segs +=
+ fs->devs[fs->scan.free_dev]
+ .segment_size;
+ }
if (add_free(fs, fs->scan.free_dev, firstseg + i,
&yp[i])) {
/* Everything in the table owns a reference
#define CleanerDisabled 4
#define OrphansRunning 5
#define CheckpointFlushing 6 /* We are writing the segusage blocks */
+#define CleanerBlocks 7 /* One or more threads is blocked waiting for the
+ * cleaner to progress - cleaner.need blocks are
+ * needed.
+ */
struct work_struct done_work; /* used for handling
* refile after write completes */
u32 cleaning; /* amount of space that is being cleaned
* this checkpoint
*/
+ u32 need; /* Amount of space that is needed by
+ * Some thread waiting on CleanerBlocks
+ * flag.
+ */
struct mutex lock; /* protects list mani and refcnt of core
* cleaner.
*/
/* counters for (pre)allocating space. */
spinlock_t alloc_lock;
u64 free_blocks; /* initialised from free segment info */
+ u64 free_segs; /* counts blocks in completely free segments */
u64 allocated_blocks; /* Blocks that have been (pre)allocated */
u64 clean_reserved; /* Blocks reserved for cleaning */
u64 max_segment; /* largest segment size */