Btrfs: load free space cache if it exists
[platform/kernel/linux-starfive.git] / fs / btrfs / extent-tree.c
index aab40fb..9a325e4 100644 (file)
@@ -421,7 +421,9 @@ err:
        return 0;
 }
 
-static int cache_block_group(struct btrfs_block_group_cache *cache)
+static int cache_block_group(struct btrfs_block_group_cache *cache,
+                            struct btrfs_trans_handle *trans,
+                            int load_cache_only)
 {
        struct btrfs_fs_info *fs_info = cache->fs_info;
        struct btrfs_caching_control *caching_ctl;
@@ -432,6 +434,36 @@ static int cache_block_group(struct btrfs_block_group_cache *cache)
        if (cache->cached != BTRFS_CACHE_NO)
                return 0;
 
+       /*
+        * We can't do the read from on-disk cache during a commit since we need
+        * to have the normal tree locking.
+        */
+       if (!trans->transaction->in_commit) {
+               spin_lock(&cache->lock);
+               if (cache->cached != BTRFS_CACHE_NO) {
+                       spin_unlock(&cache->lock);
+                       return 0;
+               }
+               cache->cached = BTRFS_CACHE_STARTED;
+               spin_unlock(&cache->lock);
+
+               ret = load_free_space_cache(fs_info, cache);
+
+               spin_lock(&cache->lock);
+               if (ret == 1) {
+                       cache->cached = BTRFS_CACHE_FINISHED;
+                       cache->last_byte_to_unpin = (u64)-1;
+               } else {
+                       cache->cached = BTRFS_CACHE_NO;
+               }
+               spin_unlock(&cache->lock);
+               if (ret == 1)
+                       return 0;
+       }
+
+       if (load_cache_only)
+               return 0;
+
        caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
        BUG_ON(!caching_ctl);
 
@@ -2847,6 +2879,8 @@ again:
                        continue;
                }
 
+               if (cache->disk_cache_state == BTRFS_DC_SETUP)
+                       cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
                cache->dirty = 0;
                last = cache->key.objectid + cache->key.offset;
 
@@ -2855,6 +2889,52 @@ again:
                btrfs_put_block_group(cache);
        }
 
+       while (1) {
+               /*
+                * I don't think this is needed since we're just marking our
+                * preallocated extent as written, but just in case it can't
+                * hurt.
+                */
+               if (last == 0) {
+                       err = btrfs_run_delayed_refs(trans, root,
+                                                    (unsigned long)-1);
+                       BUG_ON(err);
+               }
+
+               cache = btrfs_lookup_first_block_group(root->fs_info, last);
+               while (cache) {
+                       /*
+                        * Really this shouldn't happen, but it could if we
+                        * couldn't write the entire preallocated extent and
+                        * splitting the extent resulted in a new block.
+                        */
+                       if (cache->dirty) {
+                               btrfs_put_block_group(cache);
+                               goto again;
+                       }
+                       if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
+                               break;
+                       cache = next_block_group(root, cache);
+               }
+               if (!cache) {
+                       if (last == 0)
+                               break;
+                       last = 0;
+                       continue;
+               }
+
+               btrfs_write_out_cache(root, trans, cache, path);
+
+               /*
+                * If we didn't have an error then the cache state is still
+                * NEED_WRITE, so we can set it to WRITTEN.
+                */
+               if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
+                       cache->disk_cache_state = BTRFS_DC_WRITTEN;
+               last = cache->key.objectid + cache->key.offset;
+               btrfs_put_block_group(cache);
+       }
+
        btrfs_free_path(path);
        return 0;
 }
@@ -3936,6 +4016,14 @@ static int update_block_group(struct btrfs_trans_handle *trans,
                        factor = 2;
                else
                        factor = 1;
+               /*
+                * If this block group has free space cache written out, we
+                * need to make sure to load it if we are removing space.  This
+                * is because we need the unpinning stage to actually add the
+                * space back to the block group, otherwise we will leak space.
+                */
+               if (!alloc && cache->cached == BTRFS_CACHE_NO)
+                       cache_block_group(cache, trans, 1);
 
                byte_in_group = bytenr - cache->key.objectid;
                WARN_ON(byte_in_group > cache->key.offset);
@@ -4780,6 +4868,10 @@ have_block_group:
                if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
                        u64 free_percent;
 
+                       ret = cache_block_group(block_group, trans, 1);
+                       if (block_group->cached == BTRFS_CACHE_FINISHED)
+                               goto have_block_group;
+
                        free_percent = btrfs_block_group_used(&block_group->item);
                        free_percent *= 100;
                        free_percent = div64_u64(free_percent,
@@ -4800,7 +4892,7 @@ have_block_group:
                        if (loop > LOOP_CACHING_NOWAIT ||
                            (loop > LOOP_FIND_IDEAL &&
                             atomic_read(&space_info->caching_threads) < 2)) {
-                               ret = cache_block_group(block_group);
+                               ret = cache_block_group(block_group, trans, 0);
                                BUG_ON(ret);
                        }
                        found_uncached_bg = true;
@@ -5357,7 +5449,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
        u64 num_bytes = ins->offset;
 
        block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
-       cache_block_group(block_group);
+       cache_block_group(block_group, trans, 0);
        caching_ctl = get_caching_control(block_group);
 
        if (!caching_ctl) {