Merge 6.4-rc5 into usb-next
[platform/kernel/linux-starfive.git] / fs / ext4 / mballoc.c
index 78259bd..20f67a2 100644 (file)
@@ -745,6 +745,8 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
        MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
 
        grp = ext4_get_group_info(sb, e4b->bd_group);
+       if (!grp)
+               return NULL;
        list_for_each(cur, &grp->bb_prealloc_list) {
                ext4_group_t groupnr;
                struct ext4_prealloc_space *pa;
@@ -1060,9 +1062,9 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
 
 static noinline_for_stack
 void ext4_mb_generate_buddy(struct super_block *sb,
-                               void *buddy, void *bitmap, ext4_group_t group)
+                           void *buddy, void *bitmap, ext4_group_t group,
+                           struct ext4_group_info *grp)
 {
-       struct ext4_group_info *grp = ext4_get_group_info(sb, group);
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
        ext4_grpblk_t i = 0;
@@ -1181,6 +1183,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
                        break;
 
                grinfo = ext4_get_group_info(sb, group);
+               if (!grinfo)
+                       continue;
                /*
                 * If page is uptodate then we came here after online resize
                 * which added some new uninitialized group info structs, so
@@ -1246,6 +1250,10 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
                                group, page->index, i * blocksize);
                        trace_ext4_mb_buddy_bitmap_load(sb, group);
                        grinfo = ext4_get_group_info(sb, group);
+                       if (!grinfo) {
+                               err = -EFSCORRUPTED;
+                               goto out;
+                       }
                        grinfo->bb_fragments = 0;
                        memset(grinfo->bb_counters, 0,
                               sizeof(*grinfo->bb_counters) *
@@ -1256,7 +1264,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
                        ext4_lock_group(sb, group);
                        /* init the buddy */
                        memset(data, 0xff, blocksize);
-                       ext4_mb_generate_buddy(sb, data, incore, group);
+                       ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
                        ext4_unlock_group(sb, group);
                        incore = NULL;
                } else {
@@ -1370,6 +1378,9 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
        might_sleep();
        mb_debug(sb, "init group %u\n", group);
        this_grp = ext4_get_group_info(sb, group);
+       if (!this_grp)
+               return -EFSCORRUPTED;
+
        /*
         * This ensures that we don't reinit the buddy cache
         * page which map to the group from which we are already
@@ -1444,6 +1455,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
 
        blocks_per_page = PAGE_SIZE / sb->s_blocksize;
        grp = ext4_get_group_info(sb, group);
+       if (!grp)
+               return -EFSCORRUPTED;
 
        e4b->bd_blkbits = sb->s_blocksize_bits;
        e4b->bd_info = grp;
@@ -2049,7 +2062,7 @@ static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
        if (bex->fe_len < gex->fe_len)
                return;
 
-       if (finish_group)
+       if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
                ext4_mb_use_best_found(ac, e4b);
 }
 
@@ -2061,6 +2074,20 @@ static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
  * in the context. Later, the best found extent will be used, if
  * mballoc can't find good enough extent.
  *
+ * The algorithm used is roughly as follows:
+ *
+ * * If free extent found is exactly as big as goal, then
+ *   stop the scan and use it immediately
+ *
+ * * If free extent found is smaller than goal, then keep retrying
+ *   upto a max of sbi->s_mb_max_to_scan times (default 200). After
+ *   that stop scanning and use whatever we have.
+ *
+ * * If free extent found is bigger than goal, then keep retrying
+ *   upto a max of sbi->s_mb_min_to_scan times (default 10) before
+ *   stopping the scan and using the extent.
+ *
+ *
  * FIXME: real allocation policy is to be designed yet!
  */
 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
@@ -2159,6 +2186,8 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
        struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
        struct ext4_free_extent ex;
 
+       if (!grp)
+               return -EFSCORRUPTED;
        if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
                return 0;
        if (grp->bb_free == 0)
@@ -2385,7 +2414,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
 
        BUG_ON(cr < 0 || cr >= 4);
 
-       if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+       if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp))
                return false;
 
        free = grp->bb_free;
@@ -2454,6 +2483,8 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
        ext4_grpblk_t free;
        int ret = 0;
 
+       if (!grp)
+               return -EFSCORRUPTED;
        if (sbi->s_mb_stats)
                atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
        if (should_lock) {
@@ -2534,7 +2565,7 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
                 * prefetch once, so we avoid getblk() call, which can
                 * be expensive.
                 */
-               if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
+               if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
                    EXT4_MB_GRP_NEED_INIT(grp) &&
                    ext4_free_group_clusters(sb, gdp) > 0 &&
                    !(ext4_has_group_desc_csum(sb) &&
@@ -2578,7 +2609,7 @@ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
                gdp = ext4_get_group_desc(sb, group, NULL);
                grp = ext4_get_group_info(sb, group);
 
-               if (EXT4_MB_GRP_NEED_INIT(grp) &&
+               if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
                    ext4_free_group_clusters(sb, gdp) > 0 &&
                    !(ext4_has_group_desc_csum(sb) &&
                      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
@@ -2837,6 +2868,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
                sizeof(struct ext4_group_info);
 
        grinfo = ext4_get_group_info(sb, group);
+       if (!grinfo)
+               return 0;
        /* Load the group info in memory only if not already loaded. */
        if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
                err = ext4_mb_load_buddy(sb, group, &e4b);
@@ -2847,7 +2880,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
                buddy_loaded = 1;
        }
 
-       memcpy(&sg, ext4_get_group_info(sb, group), i);
+       memcpy(&sg, grinfo, i);
 
        if (buddy_loaded)
                ext4_mb_unload_buddy(&e4b);
@@ -3208,8 +3241,12 @@ static int ext4_mb_init_backend(struct super_block *sb)
 
 err_freebuddy:
        cachep = get_groupinfo_cache(sb->s_blocksize_bits);
-       while (i-- > 0)
-               kmem_cache_free(cachep, ext4_get_group_info(sb, i));
+       while (i-- > 0) {
+               struct ext4_group_info *grp = ext4_get_group_info(sb, i);
+
+               if (grp)
+                       kmem_cache_free(cachep, grp);
+       }
        i = sbi->s_group_info_size;
        rcu_read_lock();
        group_info = rcu_dereference(sbi->s_group_info);
@@ -3522,6 +3559,8 @@ int ext4_mb_release(struct super_block *sb)
                for (i = 0; i < ngroups; i++) {
                        cond_resched();
                        grinfo = ext4_get_group_info(sb, i);
+                       if (!grinfo)
+                               continue;
                        mb_group_bb_bitmap_free(grinfo);
                        ext4_lock_group(sb, i);
                        count = ext4_mb_cleanup_pa(grinfo);
@@ -4606,6 +4645,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
        struct ext4_free_data *entry;
 
        grp = ext4_get_group_info(sb, group);
+       if (!grp)
+               return;
        n = rb_first(&(grp->bb_free_root));
 
        while (n) {
@@ -4633,6 +4674,9 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
        int preallocated = 0;
        int len;
 
+       if (!grp)
+               return;
+
        /* all form of preallocation discards first load group,
         * so the only competing code is preallocation use.
         * we don't need any locking here
@@ -4869,6 +4913,8 @@ adjust_bex:
 
        ei = EXT4_I(ac->ac_inode);
        grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+       if (!grp)
+               return;
 
        pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock;
        pa->pa_inode = ac->ac_inode;
@@ -4918,6 +4964,8 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
        atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
 
        grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+       if (!grp)
+               return;
        lg = ac->ac_lg;
        BUG_ON(lg == NULL);
 
@@ -5013,7 +5061,11 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
        trace_ext4_mb_release_group_pa(sb, pa);
        BUG_ON(pa->pa_deleted == 0);
        ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
-       BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+       if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
+               ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
+                            e4b->bd_group, group, pa->pa_pstart);
+               return 0;
+       }
        mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
        atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
        trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
@@ -5043,6 +5095,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
        int err;
        int free = 0;
 
+       if (!grp)
+               return 0;
        mb_debug(sb, "discard preallocation for group %u\n", group);
        if (list_empty(&grp->bb_prealloc_list))
                goto out_dbg;
@@ -5297,6 +5351,9 @@ static inline void ext4_mb_show_pa(struct super_block *sb)
                struct ext4_prealloc_space *pa;
                ext4_grpblk_t start;
                struct list_head *cur;
+
+               if (!grp)
+                       continue;
                ext4_lock_group(sb, i);
                list_for_each(cur, &grp->bb_prealloc_list) {
                        pa = list_entry(cur, struct ext4_prealloc_space,
@@ -6064,6 +6121,7 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
        struct buffer_head *bitmap_bh = NULL;
        struct super_block *sb = inode->i_sb;
        struct ext4_group_desc *gdp;
+       struct ext4_group_info *grp;
        unsigned int overflow;
        ext4_grpblk_t bit;
        struct buffer_head *gd_bh;
@@ -6089,8 +6147,8 @@ do_more:
        overflow = 0;
        ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
 
-       if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
-                       ext4_get_group_info(sb, block_group))))
+       grp = ext4_get_group_info(sb, block_group);
+       if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
                return;
 
        /*
@@ -6692,6 +6750,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
 
        for (group = first_group; group <= last_group; group++) {
                grp = ext4_get_group_info(sb, group);
+               if (!grp)
+                       continue;
                /* We only do this if the grp has never been initialized */
                if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
                        ret = ext4_mb_init_group(sb, group, GFP_NOFS);