ext4: grow the s_flex_groups array as needed when resizing
authorTheodore Ts'o <tytso@mit.edu>
Wed, 5 Sep 2012 05:29:50 +0000 (01:29 -0400)
committerTheodore Ts'o <tytso@mit.edu>
Wed, 5 Sep 2012 05:29:50 +0000 (01:29 -0400)
Previously, we allocated the s_flex_groups array to the maximum size
that the file system could be resized.  There was two problems with
this approach.  First, it wasted memory in the common case where the
file system was not resized.  Secondly, once we start allowing online
resizing using the meta_bg scheme, there is no maximum size that the
file system can be resized.  So instead, we need to grow the
s_flex_groups at inline resize time.

Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
fs/ext4/ext4.h
fs/ext4/resize.c
fs/ext4/super.c

index 0df5ee1..464cff7 100644 (file)
@@ -1276,6 +1276,7 @@ struct ext4_sb_info {
 
        unsigned int s_log_groups_per_flex;
        struct flex_groups *s_flex_groups;
+       ext4_group_t s_flex_groups_allocated;
 
        /* workqueue for dio unwritten */
        struct workqueue_struct *dio_unwritten_wq;
@@ -2055,6 +2056,8 @@ extern void ext4_superblock_csum_set(struct super_block *sb,
 extern void *ext4_kvmalloc(size_t size, gfp_t flags);
 extern void *ext4_kvzalloc(size_t size, gfp_t flags);
 extern void ext4_kvfree(void *ptr);
+extern int ext4_alloc_flex_bg_array(struct super_block *sb,
+                                   ext4_group_t ngroup);
 extern __printf(4, 5)
 void __ext4_error(struct super_block *, const char *, unsigned int,
                  const char *, ...);
index 365d800..3f5c67b 100644 (file)
@@ -1503,6 +1503,10 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
        if (err)
                goto out;
 
+       err = ext4_alloc_flex_bg_array(sb, input->group + 1);
+       if (err)
+               return err;
+
        flex_gd.count = 1;
        flex_gd.groups = input;
        flex_gd.bg_flags = &bg_flags;
@@ -1662,7 +1666,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
        unsigned long n_desc_blocks;
        unsigned long o_desc_blocks;
        unsigned long desc_blocks;
-       int err = 0, flexbg_size = 1;
+       int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
 
        o_blocks_count = ext4_blocks_count(es);
 
@@ -1721,13 +1725,13 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
                        goto out;
        }
 
-       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
-           es->s_log_groups_per_flex)
-               flexbg_size = 1 << es->s_log_groups_per_flex;
-
        if (ext4_blocks_count(es) == n_blocks_count)
                goto out;
 
+       err = ext4_alloc_flex_bg_array(sb, n_group + 1);
+       if (err)
+               return err;
+
        flex_gd = alloc_flex_gd(flexbg_size);
        if (flex_gd == NULL) {
                err = -ENOMEM;
index b875ff5..b8de488 100644 (file)
@@ -1925,15 +1925,45 @@ done:
        return res;
 }
 
+int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct flex_groups *new_groups;
+       int size;
+
+       if (!sbi->s_log_groups_per_flex)
+               return 0;
+
+       size = ext4_flex_group(sbi, ngroup - 1) + 1;
+       if (size <= sbi->s_flex_groups_allocated)
+               return 0;
+
+       size = roundup_pow_of_two(size * sizeof(struct flex_groups));
+       new_groups = ext4_kvzalloc(size, GFP_KERNEL);
+       if (!new_groups) {
+               ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
+                        size / (int) sizeof(struct flex_groups));
+               return -ENOMEM;
+       }
+
+       if (sbi->s_flex_groups) {
+               memcpy(new_groups, sbi->s_flex_groups,
+                      (sbi->s_flex_groups_allocated *
+                       sizeof(struct flex_groups)));
+               ext4_kvfree(sbi->s_flex_groups);
+       }
+       sbi->s_flex_groups = new_groups;
+       sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
+       return 0;
+}
+
 static int ext4_fill_flex_info(struct super_block *sb)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_group_desc *gdp = NULL;
-       ext4_group_t flex_group_count;
        ext4_group_t flex_group;
        unsigned int groups_per_flex = 0;
-       size_t size;
-       int i;
+       int i, err;
 
        sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
        if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
@@ -1942,17 +1972,9 @@ static int ext4_fill_flex_info(struct super_block *sb)
        }
        groups_per_flex = 1 << sbi->s_log_groups_per_flex;
 
-       /* We allocate both existing and potentially added groups */
-       flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
-                       ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
-                             EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
-       size = flex_group_count * sizeof(struct flex_groups);
-       sbi->s_flex_groups = ext4_kvzalloc(size, GFP_KERNEL);
-       if (sbi->s_flex_groups == NULL) {
-               ext4_msg(sb, KERN_ERR, "not enough memory for %u flex groups",
-                        flex_group_count);
+       err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
+       if (err)
                goto failed;
-       }
 
        for (i = 0; i < sbi->s_groups_count; i++) {
                gdp = ext4_get_group_desc(sb, i, NULL);