btrfs: Perform pinned cleanup directly in btrfs_destroy_delayed_refs
authorNikolay Borisov <nborisov@suse.com>
Mon, 20 Jan 2020 14:09:08 +0000 (16:09 +0200)
committerDavid Sterba <dsterba@suse.com>
Mon, 23 Mar 2020 16:01:36 +0000 (17:01 +0100)
Having btrfs_destroy_delayed_refs call btrfs_pin_extent is problematic
for making pinned extents tracking per-transaction since
btrfs_trans_handle cannot be passed to btrfs_pin_extent in this context.
Additionally delayed refs heads pinned in btrfs_destroy_delayed_refs
are going to be handled very closely, in btrfs_destroy_pinned_extent.

To enable btrfs_pin_extent to take btrfs_trans_handle simply open code
it in btrfs_destroy_delayed_refs and call btrfs_error_unpin_extent_range
on the range. This enables us to do less work in
btrfs_destroy_pinned_extent and leaves btrfs_pin_extent being called in
contexts which have a valid btrfs_trans_handle.

Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Nikolay Borisov <nborisov@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/disk-io.c

index 49daf6c..194c98a 100644 (file)
@@ -42,6 +42,7 @@
 #include "ref-verify.h"
 #include "block-group.h"
 #include "discard.h"
+#include "space-info.h"
 
 #define BTRFS_SUPER_FLAG_SUPP  (BTRFS_HEADER_FLAG_WRITTEN |\
                                 BTRFS_HEADER_FLAG_RELOC |\
@@ -4308,9 +4309,30 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                spin_unlock(&delayed_refs->lock);
                mutex_unlock(&head->mutex);
 
-               if (pin_bytes)
-                       btrfs_pin_extent(fs_info, head->bytenr,
-                                        head->num_bytes, 1);
+               if (pin_bytes) {
+                       struct btrfs_block_group *cache;
+
+                       cache = btrfs_lookup_block_group(fs_info, head->bytenr);
+                       BUG_ON(!cache);
+
+                       spin_lock(&cache->space_info->lock);
+                       spin_lock(&cache->lock);
+                       cache->pinned += head->num_bytes;
+                       btrfs_space_info_update_bytes_pinned(fs_info,
+                               cache->space_info, head->num_bytes);
+                       cache->reserved -= head->num_bytes;
+                       cache->space_info->bytes_reserved -= head->num_bytes;
+                       spin_unlock(&cache->lock);
+                       spin_unlock(&cache->space_info->lock);
+                       percpu_counter_add_batch(
+                               &cache->space_info->total_bytes_pinned,
+                               head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
+
+                       btrfs_put_block_group(cache);
+
+                       btrfs_error_unpin_extent_range(fs_info, head->bytenr,
+                               head->bytenr + head->num_bytes - 1);
+               }
                btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
                btrfs_put_delayed_ref_head(head);
                cond_resched();