This moves everything out of extent-tree.c to block-rsv.c.
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
export.o tree-log.o free-space-cache.o zlib.o lzo.o zstd.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
- uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o
+ uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
+ block-rsv.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include "ctree.h"
+#include "block-rsv.h"
+#include "space-info.h"
+#include "math.h"
+
+static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *block_rsv,
+ struct btrfs_block_rsv *dest, u64 num_bytes,
+ u64 *qgroup_to_release_ret)
+{
+ struct btrfs_space_info *space_info = block_rsv->space_info;
+ u64 qgroup_to_release = 0;
+ u64 ret;
+
+ spin_lock(&block_rsv->lock);
+ if (num_bytes == (u64)-1) {
+ num_bytes = block_rsv->size;
+ qgroup_to_release = block_rsv->qgroup_rsv_size;
+ }
+ block_rsv->size -= num_bytes;
+ if (block_rsv->reserved >= block_rsv->size) {
+ num_bytes = block_rsv->reserved - block_rsv->size;
+ block_rsv->reserved = block_rsv->size;
+ block_rsv->full = 1;
+ } else {
+ num_bytes = 0;
+ }
+ if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
+ qgroup_to_release = block_rsv->qgroup_rsv_reserved -
+ block_rsv->qgroup_rsv_size;
+ block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
+ } else {
+ qgroup_to_release = 0;
+ }
+ spin_unlock(&block_rsv->lock);
+
+ ret = num_bytes;
+ if (num_bytes > 0) {
+ if (dest) {
+ spin_lock(&dest->lock);
+ if (!dest->full) {
+ u64 bytes_to_add;
+
+ bytes_to_add = dest->size - dest->reserved;
+ bytes_to_add = min(num_bytes, bytes_to_add);
+ dest->reserved += bytes_to_add;
+ if (dest->reserved >= dest->size)
+ dest->full = 1;
+ num_bytes -= bytes_to_add;
+ }
+ spin_unlock(&dest->lock);
+ }
+ if (num_bytes)
+ btrfs_space_info_add_old_bytes(fs_info, space_info,
+ num_bytes);
+ }
+ if (qgroup_to_release_ret)
+ *qgroup_to_release_ret = qgroup_to_release;
+ return ret;
+}
+
+int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
+ struct btrfs_block_rsv *dst, u64 num_bytes,
+ bool update_size)
+{
+ int ret;
+
+ ret = btrfs_block_rsv_use_bytes(src, num_bytes);
+ if (ret)
+ return ret;
+
+ btrfs_block_rsv_add_bytes(dst, num_bytes, update_size);
+ return 0;
+}
+
+void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
+{
+ memset(rsv, 0, sizeof(*rsv));
+ spin_lock_init(&rsv->lock);
+ rsv->type = type;
+}
+
+void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *rsv,
+ unsigned short type)
+{
+ btrfs_init_block_rsv(rsv, type);
+ rsv->space_info = btrfs_find_space_info(fs_info,
+ BTRFS_BLOCK_GROUP_METADATA);
+}
+
+struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
+ unsigned short type)
+{
+ struct btrfs_block_rsv *block_rsv;
+
+ block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
+ if (!block_rsv)
+ return NULL;
+
+ btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
+ return block_rsv;
+}
+
+void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *rsv)
+{
+ if (!rsv)
+ return;
+ btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
+ kfree(rsv);
+}
+
+int btrfs_block_rsv_add(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv, u64 num_bytes,
+ enum btrfs_reserve_flush_enum flush)
+{
+ int ret;
+
+ if (num_bytes == 0)
+ return 0;
+
+ ret = btrfs_reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
+ if (!ret)
+ btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
+
+ return ret;
+}
+
+int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
+{
+ u64 num_bytes = 0;
+ int ret = -ENOSPC;
+
+ if (!block_rsv)
+ return 0;
+
+ spin_lock(&block_rsv->lock);
+ num_bytes = div_factor(block_rsv->size, min_factor);
+ if (block_rsv->reserved >= num_bytes)
+ ret = 0;
+ spin_unlock(&block_rsv->lock);
+
+ return ret;
+}
+
+int btrfs_block_rsv_refill(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv, u64 min_reserved,
+ enum btrfs_reserve_flush_enum flush)
+{
+ u64 num_bytes = 0;
+ int ret = -ENOSPC;
+
+ if (!block_rsv)
+ return 0;
+
+ spin_lock(&block_rsv->lock);
+ num_bytes = min_reserved;
+ if (block_rsv->reserved >= num_bytes)
+ ret = 0;
+ else
+ num_bytes -= block_rsv->reserved;
+ spin_unlock(&block_rsv->lock);
+
+ if (!ret)
+ return 0;
+
+ ret = btrfs_reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
+ if (!ret) {
+ btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
+ return 0;
+ }
+
+ return ret;
+}
+
+u64 __btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *block_rsv,
+ u64 num_bytes, u64 *qgroup_to_release)
+{
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+ struct btrfs_block_rsv *target = NULL;
+
+ /*
+ * If we are the delayed_rsv then push to the global rsv, otherwise dump
+ * into the delayed rsv if it is not full.
+ */
+ if (block_rsv == delayed_rsv)
+ target = global_rsv;
+ else if (block_rsv != global_rsv && !delayed_rsv->full)
+ target = delayed_rsv;
+
+ if (target && block_rsv->space_info != target->space_info)
+ target = NULL;
+
+ return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes,
+ qgroup_to_release);
+}
+
+int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
+{
+ int ret = -ENOSPC;
+
+ spin_lock(&block_rsv->lock);
+ if (block_rsv->reserved >= num_bytes) {
+ block_rsv->reserved -= num_bytes;
+ if (block_rsv->reserved < block_rsv->size)
+ block_rsv->full = 0;
+ ret = 0;
+ }
+ spin_unlock(&block_rsv->lock);
+ return ret;
+}
+
+void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
+ u64 num_bytes, bool update_size)
+{
+ spin_lock(&block_rsv->lock);
+ block_rsv->reserved += num_bytes;
+ if (update_size)
+ block_rsv->size += num_bytes;
+ else if (block_rsv->reserved >= block_rsv->size)
+ block_rsv->full = 1;
+ spin_unlock(&block_rsv->lock);
+}
+
+int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *dest, u64 num_bytes,
+ int min_factor)
+{
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+ u64 min_bytes;
+
+ if (global_rsv->space_info != dest->space_info)
+ return -ENOSPC;
+
+ spin_lock(&global_rsv->lock);
+ min_bytes = div_factor(global_rsv->size, min_factor);
+ if (global_rsv->reserved < min_bytes + num_bytes) {
+ spin_unlock(&global_rsv->lock);
+ return -ENOSPC;
+ }
+ global_rsv->reserved -= num_bytes;
+ if (global_rsv->reserved < global_rsv->size)
+ global_rsv->full = 0;
+ spin_unlock(&global_rsv->lock);
+
+ btrfs_block_rsv_add_bytes(dest, num_bytes, true);
+ return 0;
+}
return block_rsv;
}
-int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
-{
- int ret = -ENOSPC;
- spin_lock(&block_rsv->lock);
- if (block_rsv->reserved >= num_bytes) {
- block_rsv->reserved -= num_bytes;
- if (block_rsv->reserved < block_rsv->size)
- block_rsv->full = 0;
- ret = 0;
- }
- spin_unlock(&block_rsv->lock);
- return ret;
-}
-
-void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
- u64 num_bytes, bool update_size)
-{
- spin_lock(&block_rsv->lock);
- block_rsv->reserved += num_bytes;
- if (update_size)
- block_rsv->size += num_bytes;
- else if (block_rsv->reserved >= block_rsv->size)
- block_rsv->full = 1;
- spin_unlock(&block_rsv->lock);
-}
-
-int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *dest, u64 num_bytes,
- int min_factor)
-{
- struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
- u64 min_bytes;
-
- if (global_rsv->space_info != dest->space_info)
- return -ENOSPC;
-
- spin_lock(&global_rsv->lock);
- min_bytes = div_factor(global_rsv->size, min_factor);
- if (global_rsv->reserved < min_bytes + num_bytes) {
- spin_unlock(&global_rsv->lock);
- return -ENOSPC;
- }
- global_rsv->reserved -= num_bytes;
- if (global_rsv->reserved < global_rsv->size)
- global_rsv->full = 0;
- spin_unlock(&global_rsv->lock);
-
- btrfs_block_rsv_add_bytes(dest, num_bytes, true);
- return 0;
-}
-
/**
* btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv.
* @fs_info - the fs info for our fs.
return 0;
}
-static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *block_rsv,
- struct btrfs_block_rsv *dest, u64 num_bytes,
- u64 *qgroup_to_release_ret)
-{
- struct btrfs_space_info *space_info = block_rsv->space_info;
- u64 qgroup_to_release = 0;
- u64 ret;
-
- spin_lock(&block_rsv->lock);
- if (num_bytes == (u64)-1) {
- num_bytes = block_rsv->size;
- qgroup_to_release = block_rsv->qgroup_rsv_size;
- }
- block_rsv->size -= num_bytes;
- if (block_rsv->reserved >= block_rsv->size) {
- num_bytes = block_rsv->reserved - block_rsv->size;
- block_rsv->reserved = block_rsv->size;
- block_rsv->full = 1;
- } else {
- num_bytes = 0;
- }
- if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
- qgroup_to_release = block_rsv->qgroup_rsv_reserved -
- block_rsv->qgroup_rsv_size;
- block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
- } else {
- qgroup_to_release = 0;
- }
- spin_unlock(&block_rsv->lock);
-
- ret = num_bytes;
- if (num_bytes > 0) {
- if (dest) {
- spin_lock(&dest->lock);
- if (!dest->full) {
- u64 bytes_to_add;
-
- bytes_to_add = dest->size - dest->reserved;
- bytes_to_add = min(num_bytes, bytes_to_add);
- dest->reserved += bytes_to_add;
- if (dest->reserved >= dest->size)
- dest->full = 1;
- num_bytes -= bytes_to_add;
- }
- spin_unlock(&dest->lock);
- }
- if (num_bytes)
- btrfs_space_info_add_old_bytes(fs_info, space_info,
- num_bytes);
- }
- if (qgroup_to_release_ret)
- *qgroup_to_release_ret = qgroup_to_release;
- return ret;
-}
-
-int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
- struct btrfs_block_rsv *dst, u64 num_bytes,
- bool update_size)
-{
- int ret;
-
- ret = btrfs_block_rsv_use_bytes(src, num_bytes);
- if (ret)
- return ret;
-
- btrfs_block_rsv_add_bytes(dst, num_bytes, update_size);
- return 0;
-}
-
-void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
-{
- memset(rsv, 0, sizeof(*rsv));
- spin_lock_init(&rsv->lock);
- rsv->type = type;
-}
-
-void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *rsv,
- unsigned short type)
-{
- btrfs_init_block_rsv(rsv, type);
- rsv->space_info = btrfs_find_space_info(fs_info,
- BTRFS_BLOCK_GROUP_METADATA);
-}
-
-struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
- unsigned short type)
-{
- struct btrfs_block_rsv *block_rsv;
-
- block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
- if (!block_rsv)
- return NULL;
-
- btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
- return block_rsv;
-}
-
-void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *rsv)
-{
- if (!rsv)
- return;
- btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
- kfree(rsv);
-}
-
-int btrfs_block_rsv_add(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv, u64 num_bytes,
- enum btrfs_reserve_flush_enum flush)
-{
- int ret;
-
- if (num_bytes == 0)
- return 0;
-
- ret = btrfs_reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
- if (!ret)
- btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
-
- return ret;
-}
-
-int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
-{
- u64 num_bytes = 0;
- int ret = -ENOSPC;
-
- if (!block_rsv)
- return 0;
-
- spin_lock(&block_rsv->lock);
- num_bytes = div_factor(block_rsv->size, min_factor);
- if (block_rsv->reserved >= num_bytes)
- ret = 0;
- spin_unlock(&block_rsv->lock);
-
- return ret;
-}
-
-int btrfs_block_rsv_refill(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv, u64 min_reserved,
- enum btrfs_reserve_flush_enum flush)
-{
- u64 num_bytes = 0;
- int ret = -ENOSPC;
-
- if (!block_rsv)
- return 0;
-
- spin_lock(&block_rsv->lock);
- num_bytes = min_reserved;
- if (block_rsv->reserved >= num_bytes)
- ret = 0;
- else
- num_bytes -= block_rsv->reserved;
- spin_unlock(&block_rsv->lock);
-
- if (!ret)
- return 0;
-
- ret = btrfs_reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
- if (!ret) {
- btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
- return 0;
- }
-
- return ret;
-}
-
-u64 __btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *block_rsv,
- u64 num_bytes, u64 *qgroup_to_release)
-{
- struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
- struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
- struct btrfs_block_rsv *target = NULL;
-
- /*
- * If we are the delayed_rsv then push to the global rsv, otherwise dump
- * into the delayed rsv if it is not full.
- */
- if (block_rsv == delayed_rsv)
- target = global_rsv;
- else if (block_rsv != global_rsv && !delayed_rsv->full)
- target = delayed_rsv;
-
- if (target && block_rsv->space_info != target->space_info)
- target = NULL;
-
- return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes,
- qgroup_to_release);
-}
-
/**
* btrfs_inode_rsv_release - release any excessive reservation.
* @inode - the inode we need to release from.