btrfs: separate out the eb and extent state leak helpers
authorJosef Bacik <josef@toxicpanda.com>
Fri, 9 Sep 2022 21:53:19 +0000 (17:53 -0400)
committerDavid Sterba <dsterba@suse.com>
Mon, 26 Sep 2022 10:28:02 +0000 (12:28 +0200)
Currently we have the add/del functions generic so that we can use them
for both extent buffers and extent states.  We want to separate this
code however, so separate these helpers into per-object helpers in
anticipation of the split.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/extent_io.c

index 8b2c6f3..cf57696 100644 (file)
@@ -43,25 +43,42 @@ static inline bool extent_state_in_tree(const struct extent_state *state)
 static LIST_HEAD(states);
 static DEFINE_SPINLOCK(leak_lock);
 
-static inline void btrfs_leak_debug_add(spinlock_t *lock,
-                                       struct list_head *new,
-                                       struct list_head *head)
+static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
+{
+       struct btrfs_fs_info *fs_info = eb->fs_info;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
+       list_add(&eb->leak_list, &fs_info->allocated_ebs);
+       spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
+}
+
+static inline void btrfs_leak_debug_add_state(struct extent_state *state)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(lock, flags);
-       list_add(new, head);
-       spin_unlock_irqrestore(lock, flags);
+       spin_lock_irqsave(&leak_lock, flags);
+       list_add(&state->leak_list, &states);
+       spin_unlock_irqrestore(&leak_lock, flags);
+}
+
+static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
+{
+       struct btrfs_fs_info *fs_info = eb->fs_info;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
+       list_del(&eb->leak_list);
+       spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
 }
 
-static inline void btrfs_leak_debug_del(spinlock_t *lock,
-                                       struct list_head *entry)
+static inline void btrfs_leak_debug_del_state(struct extent_state *state)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(lock, flags);
-       list_del(entry);
-       spin_unlock_irqrestore(lock, flags);
+       spin_lock_irqsave(&leak_lock, flags);
+       list_del(&state->leak_list);
+       spin_unlock_irqrestore(&leak_lock, flags);
 }
 
 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
@@ -125,9 +142,11 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller,
        }
 }
 #else
-#define btrfs_leak_debug_add(lock, new, head)  do {} while (0)
-#define btrfs_leak_debug_del(lock, entry)      do {} while (0)
-#define btrfs_extent_state_leak_debug_check()  do {} while (0)
+#define btrfs_leak_debug_add_eb(eb)                    do {} while (0)
+#define btrfs_leak_debug_add_state(state)              do {} while (0)
+#define btrfs_leak_debug_del_eb(eb)                    do {} while (0)
+#define btrfs_leak_debug_del_state(state)              do {} while (0)
+#define btrfs_extent_state_leak_debug_check()          do {} while (0)
 #define btrfs_debug_check_extent_io_range(c, s, e)     do {} while (0)
 #endif
 
@@ -334,7 +353,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
                return state;
        state->state = 0;
        RB_CLEAR_NODE(&state->rb_node);
-       btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
+       btrfs_leak_debug_add_state(state);
        refcount_set(&state->refs, 1);
        init_waitqueue_head(&state->wq);
        trace_alloc_extent_state(state, mask, _RET_IP_);
@@ -347,7 +366,7 @@ void free_extent_state(struct extent_state *state)
                return;
        if (refcount_dec_and_test(&state->refs)) {
                WARN_ON(extent_state_in_tree(state));
-               btrfs_leak_debug_del(&leak_lock, &state->leak_list);
+               btrfs_leak_debug_del_state(state);
                trace_free_extent_state(state, _RET_IP_);
                kmem_cache_free(extent_state_cache, state);
        }
@@ -5990,7 +6009,7 @@ static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
 {
        btrfs_release_extent_buffer_pages(eb);
-       btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
+       btrfs_leak_debug_del_eb(eb);
        __free_extent_buffer(eb);
 }
 
@@ -6007,8 +6026,7 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
        eb->bflags = 0;
        init_rwsem(&eb->lock);
 
-       btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
-                            &fs_info->allocated_ebs);
+       btrfs_leak_debug_add_eb(eb);
        INIT_LIST_HEAD(&eb->release_list);
 
        spin_lock_init(&eb->refs_lock);
@@ -6476,7 +6494,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
                        spin_unlock(&eb->refs_lock);
                }
 
-               btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
+               btrfs_leak_debug_del_eb(eb);
                /* Should be safe to release our pages at this point */
                btrfs_release_extent_buffer_pages(eb);
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS