return 0;
}
+static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks)
+{
+ WARN_ONCE(raid_disks,
+ "%s does not support generic reshape\n", __func__);
+
+ if (sectors == 0)
+ return mddev->dev_sectors;
+
+ return sectors;
+}
+
static int run(mddev_t *mddev)
{
mdk_rdev_t *rdev;
list_for_each_entry(rdev, &mddev->disks, same_set)
conf->rdev = rdev;
- mddev->array_sectors = mddev->dev_sectors;
+ mddev->array_sectors = faulty_size(mddev, 0, 0);
mddev->private = conf;
reconfig(mddev, mddev->layout, -1);
.stop = stop,
.status = status,
.reconfig = reconfig,
+ .size = faulty_size,
};
static int __init raid_init(void)
return ret;
}
+static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks)
+{
+ linear_conf_t *conf = mddev_to_conf(mddev);
+
+ WARN_ONCE(sectors || raid_disks,
+ "%s does not support generic reshape\n", __func__);
+
+ return conf->array_sectors;
+}
+
static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
{
linear_conf_t *conf;
if (!conf)
return 1;
mddev->private = conf;
- mddev->array_sectors = conf->array_sectors;
+ mddev->array_sectors = linear_size(mddev, 0, 0);
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
mddev->queue->unplug_fn = linear_unplug;
newconf->prev = mddev_to_conf(mddev);
mddev->private = newconf;
mddev->raid_disks++;
- mddev->array_sectors = newconf->array_sectors;
+ mddev->array_sectors = linear_size(mddev, 0, 0);
set_capacity(mddev->gendisk, mddev->array_sectors);
return 0;
}
.stop = linear_stop,
.status = linear_status,
.hot_add_disk = linear_add,
+ .size = linear_size,
};
static int __init linear_init (void)
int (*spare_active) (mddev_t *mddev);
sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
int (*resize) (mddev_t *mddev, sector_t sectors);
+ sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks);
int (*check_reshape) (mddev_t *mddev);
int (*start_reshape) (mddev_t *mddev);
int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
+static sector_t multipath_size(mddev_t *mddev, sector_t sectors, int raid_disks)
+{
+ WARN_ONCE(sectors || raid_disks,
+ "%s does not support generic reshape\n", __func__);
+
+ return mddev->dev_sectors;
+}
+
static int multipath_run (mddev_t *mddev)
{
multipath_conf_t *conf;
/*
* Ok, everything is just fine now
*/
- mddev->array_sectors = mddev->dev_sectors;
+ mddev->array_sectors = multipath_size(mddev, 0, 0);
mddev->queue->unplug_fn = multipath_unplug;
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
.error_handler = multipath_error,
.hot_add_disk = multipath_add_disk,
.hot_remove_disk= multipath_remove_disk,
+ .size = multipath_size,
};
static int __init multipath_init (void)
return max;
}
+static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
+{
+ sector_t array_sectors = 0;
+ mdk_rdev_t *rdev;
+
+ WARN_ONCE(sectors || raid_disks,
+ "%s does not support generic reshape\n", __func__);
+
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ array_sectors += rdev->sectors;
+
+ return array_sectors;
+}
+
static int raid0_run (mddev_t *mddev)
{
unsigned cur=0, i=0, nb_zone;
s64 sectors;
raid0_conf_t *conf;
- mdk_rdev_t *rdev;
if (mddev->chunk_size == 0) {
printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
goto out_free_conf;
/* calculate array device size */
- mddev->array_sectors = 0;
- list_for_each_entry(rdev, &mddev->disks, same_set)
- mddev->array_sectors += rdev->sectors;
+ mddev->array_sectors = raid0_size(mddev, 0, 0);
printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
(unsigned long long)mddev->array_sectors);
.run = raid0_run,
.stop = raid0_stop,
.status = raid0_status,
+ .size = raid0_size,
};
static int __init raid0_init (void)
return nr_sectors;
}
+static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks)
+{
+ if (sectors)
+ return sectors;
+
+ return mddev->dev_sectors;
+}
+
static int run(mddev_t *mddev)
{
conf_t *conf;
/*
* Ok, everything is just fine now
*/
- mddev->array_sectors = mddev->dev_sectors;
+ mddev->array_sectors = raid1_size(mddev, 0, 0);
mddev->queue->unplug_fn = raid1_unplug;
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
* any io in the removed space completes, but it hardly seems
* worth it.
*/
- mddev->array_sectors = sectors;
+ mddev->array_sectors = raid1_size(mddev, sectors, 0);
set_capacity(mddev->gendisk, mddev->array_sectors);
mddev->changed = 1;
if (mddev->array_sectors > mddev->dev_sectors &&
.spare_active = raid1_spare_active,
.sync_request = sync_request,
.resize = raid1_resize,
+ .size = raid1_size,
.check_reshape = raid1_reshape,
.quiesce = raid1_quiesce,
};
goto skipped;
}
+static sector_t
+raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
+{
+ sector_t size;
+ conf_t *conf = mddev_to_conf(mddev);
+
+ if (!raid_disks)
+ raid_disks = mddev->raid_disks;
+ if (!sectors)
+ sectors = mddev->dev_sectors;
+
+ size = sectors >> conf->chunk_shift;
+ sector_div(size, conf->far_copies);
+ size = size * raid_disks;
+ sector_div(size, conf->near_copies);
+
+ return size << conf->chunk_shift;
+}
+
static int run(mddev_t *mddev)
{
conf_t *conf;
/*
* Ok, everything is just fine now
*/
- mddev->array_sectors = size << conf->chunk_shift;
- mddev->resync_max_sectors = size << conf->chunk_shift;
+ mddev->array_sectors = raid10_size(mddev, 0, 0);
+ mddev->resync_max_sectors = mddev->array_sectors;
mddev->queue->unplug_fn = raid10_unplug;
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
.spare_active = raid10_spare_active,
.sync_request = sync_request,
.quiesce = raid10_quiesce,
+ .size = raid10_size,
};
static int __init raid_init(void)
.attrs = raid5_attrs,
};
+static sector_t
+raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
+{
+ raid5_conf_t *conf = mddev_to_conf(mddev);
+
+ if (!sectors)
+ sectors = mddev->dev_sectors;
+ if (!raid_disks)
+ raid_disks = conf->previous_raid_disks;
+
+ sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
+ return sectors * (raid_disks - conf->max_degraded);
+}
+
static raid5_conf_t *setup_conf(mddev_t *mddev)
{
raid5_conf_t *conf;
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
- mddev->array_sectors = mddev->dev_sectors *
- (conf->previous_raid_disks - conf->max_degraded);
+ mddev->array_sectors = raid5_size(mddev, 0, 0);
blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
* any io in the removed space completes, but it hardly seems
* worth it.
*/
- raid5_conf_t *conf = mddev_to_conf(mddev);
-
sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
- mddev->array_sectors = sectors * (mddev->raid_disks
- - conf->max_degraded);
+ mddev->array_sectors = raid5_size(mddev, sectors, mddev->raid_disks);
set_capacity(mddev->gendisk, mddev->array_sectors);
mddev->changed = 1;
if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
struct block_device *bdev;
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
- conf->mddev->array_sectors = conf->mddev->dev_sectors *
- (conf->raid_disks - conf->max_degraded);
- set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors);
- conf->mddev->changed = 1;
+ mddev_t *mddev = conf->mddev;
+
+ mddev->array_sectors = raid5_size(mddev, 0, conf->raid_disks);
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ mddev->changed = 1;
+ conf->previous_raid_disks = conf->raid_disks;
bdev = bdget_disk(conf->mddev->gendisk, 0);
if (bdev) {
.spare_active = raid5_spare_active,
.sync_request = sync_request,
.resize = raid5_resize,
+ .size = raid5_size,
#ifdef CONFIG_MD_RAID5_RESHAPE
.check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape,
.spare_active = raid5_spare_active,
.sync_request = sync_request,
.resize = raid5_resize,
+ .size = raid5_size,
#ifdef CONFIG_MD_RAID5_RESHAPE
.check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape,
.spare_active = raid5_spare_active,
.sync_request = sync_request,
.resize = raid5_resize,
+ .size = raid5_size,
#ifdef CONFIG_MD_RAID5_RESHAPE
.check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape,