ext4: Refactor code in ext4_mb_normalize_request() and ext4_mb_use_preallocated()
authorOjaswin Mujoo <ojaswin@linux.ibm.com>
Sat, 25 Mar 2023 08:13:36 +0000 (13:43 +0530)
committerTheodore Ts'o <tytso@mit.edu>
Thu, 6 Apr 2023 05:13:12 +0000 (01:13 -0400)
Change some variable names to be more consistent and
refactor some of the code to make it easier to read.

There are no functional changes in this patch

Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/8edcab489c06cf861b19d87207d9b0ff7ac7f3c1.1679731817.git.ojaswin@linux.ibm.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
fs/ext4/mballoc.c

index 58f16002cb2cacb9520c6c8eed478e728cc47b93..441b3b6d9387838b94e2252ea3ab0b03f080e743 100644 (file)
@@ -3994,7 +3994,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
        loff_t orig_size __maybe_unused;
        ext4_lblk_t start;
        struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
-       struct ext4_prealloc_space *pa;
+       struct ext4_prealloc_space *tmp_pa;
+       ext4_lblk_t tmp_pa_start, tmp_pa_end;
 
        /* do normalize only data requests, metadata requests
           do not need preallocation */
@@ -4097,54 +4098,52 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
 
        /* check we don't cross already preallocated blocks */
        rcu_read_lock();
-       list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
-               ext4_lblk_t pa_end;
-
-               if (pa->pa_deleted)
+       list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
+               if (tmp_pa->pa_deleted)
                        continue;
-               spin_lock(&pa->pa_lock);
-               if (pa->pa_deleted) {
-                       spin_unlock(&pa->pa_lock);
+               spin_lock(&tmp_pa->pa_lock);
+               if (tmp_pa->pa_deleted) {
+                       spin_unlock(&tmp_pa->pa_lock);
                        continue;
                }
 
-               pa_end = pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len);
+               tmp_pa_start = tmp_pa->pa_lstart;
+               tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
 
                /* PA must not overlap original request */
-               BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
-                       ac->ac_o_ex.fe_logical < pa->pa_lstart));
+               BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
+                       ac->ac_o_ex.fe_logical < tmp_pa_start));
 
                /* skip PAs this normalized request doesn't overlap with */
-               if (pa->pa_lstart >= end || pa_end <= start) {
-                       spin_unlock(&pa->pa_lock);
+               if (tmp_pa_start >= end || tmp_pa_end <= start) {
+                       spin_unlock(&tmp_pa->pa_lock);
                        continue;
                }
-               BUG_ON(pa->pa_lstart <= start && pa_end >= end);
+               BUG_ON(tmp_pa_start <= start && tmp_pa_end >= end);
 
                /* adjust start or end to be adjacent to this pa */
-               if (pa_end <= ac->ac_o_ex.fe_logical) {
-                       BUG_ON(pa_end < start);
-                       start = pa_end;
-               } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
-                       BUG_ON(pa->pa_lstart > end);
-                       end = pa->pa_lstart;
+               if (tmp_pa_end <= ac->ac_o_ex.fe_logical) {
+                       BUG_ON(tmp_pa_end < start);
+                       start = tmp_pa_end;
+               } else if (tmp_pa_start > ac->ac_o_ex.fe_logical) {
+                       BUG_ON(tmp_pa_start > end);
+                       end = tmp_pa_start;
                }
-               spin_unlock(&pa->pa_lock);
+               spin_unlock(&tmp_pa->pa_lock);
        }
        rcu_read_unlock();
        size = end - start;
 
        /* XXX: extra loop to check we really don't overlap preallocations */
        rcu_read_lock();
-       list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
-               ext4_lblk_t pa_end;
-
-               spin_lock(&pa->pa_lock);
-               if (pa->pa_deleted == 0) {
-                       pa_end = pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len);
-                       BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
+       list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
+               spin_lock(&tmp_pa->pa_lock);
+               if (tmp_pa->pa_deleted == 0) {
+                       tmp_pa_start = tmp_pa->pa_lstart;
+                       tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
+                       BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
                }
-               spin_unlock(&pa->pa_lock);
+               spin_unlock(&tmp_pa->pa_lock);
        }
        rcu_read_unlock();
 
@@ -4359,7 +4358,8 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
        int order, i;
        struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
        struct ext4_locality_group *lg;
-       struct ext4_prealloc_space *pa, *cpa = NULL;
+       struct ext4_prealloc_space *tmp_pa, *cpa = NULL;
+       ext4_lblk_t tmp_pa_start, tmp_pa_end;
        ext4_fsblk_t goal_block;
 
        /* only data can be preallocated */
@@ -4368,18 +4368,20 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
 
        /* first, try per-file preallocation */
        rcu_read_lock();
-       list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
+       list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
 
                /* all fields in this condition don't change,
                 * so we can skip locking for them */
-               if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
-                   ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
-                                              EXT4_C2B(sbi, pa->pa_len)))
+               tmp_pa_start = tmp_pa->pa_lstart;
+               tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
+
+               if (ac->ac_o_ex.fe_logical < tmp_pa_start ||
+                   ac->ac_o_ex.fe_logical >= tmp_pa_end)
                        continue;
 
                /* non-extent files can't have physical blocks past 2^32 */
                if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
-                   (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
+                   (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
                     EXT4_MAX_BLOCK_FILE_PHYS)) {
                        /*
                         * Since PAs don't overlap, we won't find any
@@ -4389,16 +4391,16 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
                }
 
                /* found preallocated blocks, use them */
-               spin_lock(&pa->pa_lock);
-               if (pa->pa_deleted == 0 && pa->pa_free) {
-                       atomic_inc(&pa->pa_count);
-                       ext4_mb_use_inode_pa(ac, pa);
-                       spin_unlock(&pa->pa_lock);
+               spin_lock(&tmp_pa->pa_lock);
+               if (tmp_pa->pa_deleted == 0 && tmp_pa->pa_free) {
+                       atomic_inc(&tmp_pa->pa_count);
+                       ext4_mb_use_inode_pa(ac, tmp_pa);
+                       spin_unlock(&tmp_pa->pa_lock);
                        ac->ac_criteria = 10;
                        rcu_read_unlock();
                        return true;
                }
-               spin_unlock(&pa->pa_lock);
+               spin_unlock(&tmp_pa->pa_lock);
        }
        rcu_read_unlock();
 
@@ -4422,16 +4424,16 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
         */
        for (i = order; i < PREALLOC_TB_SIZE; i++) {
                rcu_read_lock();
-               list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
+               list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
                                        pa_inode_list) {
-                       spin_lock(&pa->pa_lock);
-                       if (pa->pa_deleted == 0 &&
-                                       pa->pa_free >= ac->ac_o_ex.fe_len) {
+                       spin_lock(&tmp_pa->pa_lock);
+                       if (tmp_pa->pa_deleted == 0 &&
+                                       tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
 
                                cpa = ext4_mb_check_group_pa(goal_block,
-                                                               pa, cpa);
+                                                               tmp_pa, cpa);
                        }
-                       spin_unlock(&pa->pa_lock);
+                       spin_unlock(&tmp_pa->pa_lock);
                }
                rcu_read_unlock();
        }