wake_up(&resync_wait);
}
- if (sb->active_disks < sb->raid_disks) {
+ if (mddev->degraded) {
mddev->spare = get_spare(mddev);
if (!mddev->spare)
printk(KERN_ERR "md%d: no spare disk to reconstruct array! "
mark_disk_faulty(sb->disks+mirror->number);
mark_disk_nonsync(sb->disks+mirror->number);
mark_disk_inactive(sb->disks+mirror->number);
- if (!mirror->write_only)
+ if (!mirror->write_only) {
sb->active_disks--;
+ mddev->degraded++;
+ }
sb->working_disks--;
sb->failed_disks++;
mddev->sb_dirty = 1;
*/
conf->working_disks++;
+ mddev->degraded--;
abort:
spin_unlock_irq(&conf->device_lock);
goto out_free_conf;
}
+ mddev->degraded = 0;
for (i = 0; i < MD_SB_DISKS; i++) {
descriptor = sb->disks+i;
disk->used_slot = 1;
disk->head_position = 0;
}
+ if (!disk->used_slot && disk_idk < conf->raid_disks)
+ mddev->degraded++;
}
/*
sb->working_disks--;
sb->failed_disks++;
mddev->sb_dirty = 1;
+ mddev->degraded++;
conf->working_disks--;
conf->failed_disks++;
printk (KERN_ALERT
/*
* 0 for a fully functional array, 1 for a degraded array.
*/
- conf->failed_disks = conf->raid_disks - conf->working_disks;
+ mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
conf->mddev = mddev;
conf->chunk_size = sb->chunk_size;
conf->level = sb->level;
printk(KERN_ERR "raid5: unsupported parity algorithm %d for md%d\n", conf->algorithm, mdidx(mddev));
goto abort;
}
- if (conf->failed_disks > 1) {
+ if (mddev->degraded > 1) {
printk(KERN_ERR "raid5: not enough operational devices for md%d (%d/%d failed)\n", mdidx(mddev), conf->failed_disks, conf->raid_disks);
goto abort;
}
- if (conf->failed_disks == 1 &&
+ if (mddev->degraded == 1 &&
!(sb->state & (1<<MD_SB_CLEAN))) {
printk(KERN_ERR "raid5: cannot start dirty degraded array for md%d\n", mdidx(mddev));
goto abort;
* non-operational disk slot in the 'low' area of
* the disk array.
*/
+ mddev->degraded--;
conf->failed_disks--;
conf->working_disks++;
conf->spare = NULL;
atomic_t active;
mdp_disk_t *spare;
+ int degraded; /* whether md should consider
+ * adding a spare
+ */
+
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;