btrfs: fix upper limit for max_inline for page size 64K
authorAnand Jain <anand.jain@oracle.com>
Tue, 10 Aug 2021 15:23:44 +0000 (23:23 +0800)
committerDom Cobley <popcornmix@gmail.com>
Thu, 14 Oct 2021 11:32:53 +0000 (12:32 +0100)
commit 6f93e834fa7c5faa0372e46828b4b2a966ac61d7 upstream.

The mount option max_inline ranges from 0 to the sectorsize (which is
now equal to page size). But we parse the mount options too early and
before the actual sectorsize is read from the superblock. So the upper
limit of max_inline is unaware of the actual sectorsize and is limited
by the temporary sectorsize 4096, even on a system where the default
sectorsize is 64K.

Fix this by reading the superblock sectorsize before the mount option
parse.

Reported-by: Alexander Tsvetkov <alexander.tsvetkov@oracle.com>
CC: stable@vger.kernel.org # 5.4+
Signed-off-by: Anand Jain <anand.jain@oracle.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Signed-off-by: Anand Jain <anand.jain@oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/btrfs/disk-io.c

index e025cd8f3f0713a0f882073bc8c23c09b7045132..ef7df2141f34f3e60b8865a67334686d8714ad4f 100644 (file)
@@ -3019,6 +3019,29 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
         */
        fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
 
+       /*
+        * Flag our filesystem as having big metadata blocks if they are bigger
+        * than the page size
+        */
+       if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
+               if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
+                       btrfs_info(fs_info,
+                               "flagging fs with big metadata feature");
+               features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
+       }
+
+       /* Set up fs_info before parsing mount options */
+       nodesize = btrfs_super_nodesize(disk_super);
+       sectorsize = btrfs_super_sectorsize(disk_super);
+       stripesize = sectorsize;
+       fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
+       fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
+
+       /* Cache block sizes */
+       fs_info->nodesize = nodesize;
+       fs_info->sectorsize = sectorsize;
+       fs_info->stripesize = stripesize;
+
        ret = btrfs_parse_options(fs_info, options, sb->s_flags);
        if (ret) {
                err = ret;
@@ -3045,28 +3068,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
        if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
                btrfs_info(fs_info, "has skinny extents");
 
-       /*
-        * flag our filesystem as having big metadata blocks if
-        * they are bigger than the page size
-        */
-       if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
-               if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
-                       btrfs_info(fs_info,
-                               "flagging fs with big metadata feature");
-               features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
-       }
-
-       nodesize = btrfs_super_nodesize(disk_super);
-       sectorsize = btrfs_super_sectorsize(disk_super);
-       stripesize = sectorsize;
-       fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
-       fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
-
-       /* Cache block sizes */
-       fs_info->nodesize = nodesize;
-       fs_info->sectorsize = sectorsize;
-       fs_info->stripesize = stripesize;
-
        /*
         * mixed block groups end up with duplicate but slightly offset
         * extent buffers for the same range.  It leads to corruptions