BTRFS_RESERVE_FLUSH_ALL,
};
-int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes);
-int __btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len);
+int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len);
int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes);
-void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
-void __btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
+void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
u64 qgroup_reserved);
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
-int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes);
-int __btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len);
-void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes);
-void __btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len);
+int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len);
+void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len);
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
unsigned short type);
num_pages *= 16;
num_pages *= PAGE_CACHE_SIZE;
- ret = __btrfs_check_data_free_space(inode, 0, num_pages);
+ ret = btrfs_check_data_free_space(inode, 0, num_pages);
if (ret)
goto out_put;
&alloc_hint);
if (!ret)
dcs = BTRFS_DC_SETUP;
- __btrfs_free_reserved_data_space(inode, 0, num_pages);
+ btrfs_free_reserved_data_space(inode, 0, num_pages);
out_put:
iput(inode);
}
/*
- * This will check the space that the inode allocates from to make sure we have
- * enough space for bytes.
- */
-int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
-{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- int ret;
-
- ret = btrfs_alloc_data_chunk_ondemand(inode, bytes);
- if (ret < 0)
- return ret;
- ret = btrfs_qgroup_reserve(root, write_bytes);
- return ret;
-}
-
-/*
* New check_data_free_space() with ability for precious data reservation
* Will replace old btrfs_check_data_free_space(), but for patch split,
* add a new function first and then replace it.
*/
-int __btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
+int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
}
/*
- * Called if we need to clear a data reservation for this inode.
- */
-void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
-{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_space_info *data_sinfo;
-
- /* make sure bytes are sectorsize aligned */
- bytes = ALIGN(bytes, root->sectorsize);
-
- data_sinfo = root->fs_info->data_sinfo;
- spin_lock(&data_sinfo->lock);
- WARN_ON(data_sinfo->bytes_may_use < bytes);
- data_sinfo->bytes_may_use -= bytes;
- trace_btrfs_space_reservation(root->fs_info, "space_info",
- data_sinfo->flags, bytes, 0);
- spin_unlock(&data_sinfo->lock);
-}
-
-/*
* Called if we need to clear a data reservation for this inode
* Normally in a error case.
*
* This one will handle the per-indoe data rsv map for accurate reserved
* space framework.
*/
-void __btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
+void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_space_info *data_sinfo;
}
/**
- * __btrfs_delalloc_reserve_space - reserve data and metadata space for
+ * btrfs_delalloc_reserve_space - reserve data and metadata space for
* delalloc
* @inode: inode we're writing to
* @start: start range we are writing to
* Return 0 for success
* Return <0 for error(-ENOSPC or -EQUOT)
*/
-int __btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
+int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
{
int ret;
- ret = __btrfs_check_data_free_space(inode, start, len);
+ ret = btrfs_check_data_free_space(inode, start, len);
if (ret < 0)
return ret;
ret = btrfs_delalloc_reserve_metadata(inode, len);
if (ret < 0)
- __btrfs_free_reserved_data_space(inode, start, len);
+ btrfs_free_reserved_data_space(inode, start, len);
return ret;
}
/**
- * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
- * @inode: inode we're writing to
- * @num_bytes: the number of bytes we want to allocate
- *
- * This will do the following things
- *
- * o reserve space in the data space info for num_bytes
- * o reserve space in the metadata space info based on number of outstanding
- * extents and how much csums will be needed
- * o add to the inodes ->delalloc_bytes
- * o add it to the fs_info's delalloc inodes list.
- *
- * This will return 0 for success and -ENOSPC if there is no space left.
- */
-int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
-{
- int ret;
-
- ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
- if (ret)
- return ret;
-
- ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
- if (ret) {
- btrfs_free_reserved_data_space(inode, num_bytes);
- return ret;
- }
-
- return 0;
-}
-
-/**
- * __btrfs_delalloc_release_space - release data and metadata space for delalloc
+ * btrfs_delalloc_release_space - release data and metadata space for delalloc
* @inode: inode we're releasing space for
* @start: start position of the space already reserved
* @len: the len of the space already reserved
* list if there are no delalloc bytes left.
* Also it will handle the qgroup reserved space.
*/
-void __btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
+void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
{
btrfs_delalloc_release_metadata(inode, len);
- __btrfs_free_reserved_data_space(inode, start, len);
-}
-
-/**
- * btrfs_delalloc_release_space - release data and metadata space for delalloc
- * @inode: inode we're releasing space for
- * @num_bytes: the number of bytes we want to free up
- *
- * This must be matched with a call to btrfs_delalloc_reserve_space. This is
- * called in the case that we don't need the metadata AND data reservations
- * anymore. So if there is an error or we insert an inline extent.
- *
- * This function will release the metadata space that was not used and will
- * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
- * list if there are no delalloc bytes left.
- */
-void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
-{
- btrfs_delalloc_release_metadata(inode, num_bytes);
- btrfs_free_reserved_data_space(inode, num_bytes);
+ btrfs_free_reserved_data_space(inode, start, len);
}
static int update_block_group(struct btrfs_trans_handle *trans,
goto reserve_metadata;
}
}
- ret = __btrfs_check_data_free_space(inode, pos, write_bytes);
+ ret = btrfs_check_data_free_space(inode, pos, write_bytes);
if (ret < 0)
break;
ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
if (ret) {
if (!only_release_metadata)
- __btrfs_free_reserved_data_space(inode, pos,
- write_bytes);
+ btrfs_free_reserved_data_space(inode, pos,
+ write_bytes);
else
btrfs_end_write_no_snapshoting(root);
break;
btrfs_delalloc_release_metadata(inode,
release_bytes);
else
- __btrfs_delalloc_release_space(inode, pos,
+ btrfs_delalloc_release_space(inode, pos,
release_bytes);
}
btrfs_end_write_no_snapshoting(root);
btrfs_delalloc_release_metadata(inode, release_bytes);
} else {
- __btrfs_delalloc_release_space(inode, pos,
- release_bytes);
+ btrfs_delalloc_release_space(inode, pos, release_bytes);
}
}
out:
mutex_unlock(&inode->i_mutex);
/* Let go of our reservation. */
- __btrfs_free_reserved_data_space(inode, alloc_start,
- alloc_end - alloc_start);
+ btrfs_free_reserved_data_space(inode, alloc_start,
+ alloc_end - alloc_start);
return ret;
}
/* Just to make sure we have enough space */
prealloc += 8 * PAGE_CACHE_SIZE;
- ret = __btrfs_delalloc_reserve_space(inode, 0, prealloc);
+ ret = btrfs_delalloc_reserve_space(inode, 0, prealloc);
if (ret)
goto out_put;
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
prealloc, prealloc, &alloc_hint);
if (ret) {
- __btrfs_delalloc_release_space(inode, 0, prealloc);
+ btrfs_delalloc_release_space(inode, 0, prealloc);
goto out_put;
}
- __btrfs_free_reserved_data_space(inode, 0, prealloc);
+ btrfs_free_reserved_data_space(inode, 0, prealloc);
ret = btrfs_write_out_ino_cache(root, trans, path, inode);
out_put:
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
&& do_list && !(state->state & EXTENT_NORESERVE))
- __btrfs_free_reserved_data_space(inode, state->start,
- len);
+ btrfs_free_reserved_data_space(inode, state->start,
+ len);
__percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
root->fs_info->delalloc_batch);
goto again;
}
- ret = __btrfs_delalloc_reserve_space(inode, page_start,
- PAGE_CACHE_SIZE);
+ ret = btrfs_delalloc_reserve_space(inode, page_start,
+ PAGE_CACHE_SIZE);
if (ret) {
mapping_set_error(page->mapping, ret);
end_extent_writepage(page, ret, page_start, page_end);
if ((offset & (blocksize - 1)) == 0 &&
(!len || ((len & (blocksize - 1)) == 0)))
goto out;
- ret = __btrfs_delalloc_reserve_space(inode,
+ ret = btrfs_delalloc_reserve_space(inode,
round_down(from, PAGE_CACHE_SIZE), PAGE_CACHE_SIZE);
if (ret)
goto out;
again:
page = find_or_create_page(mapping, index, mask);
if (!page) {
- __btrfs_delalloc_release_space(inode,
+ btrfs_delalloc_release_space(inode,
round_down(from, PAGE_CACHE_SIZE),
PAGE_CACHE_SIZE);
ret = -ENOMEM;
out_unlock:
if (ret)
- __btrfs_delalloc_release_space(inode, page_start,
- PAGE_CACHE_SIZE);
+ btrfs_delalloc_release_space(inode, page_start,
+ PAGE_CACHE_SIZE);
unlock_page(page);
page_cache_release(page);
out:
spin_unlock(&BTRFS_I(inode)->lock);
}
- __btrfs_free_reserved_data_space(inode, start, len);
+ btrfs_free_reserved_data_space(inode, start, len);
WARN_ON(dio_data->reserve < len);
dio_data->reserve -= len;
current->journal_info = dio_data;
mutex_unlock(&inode->i_mutex);
relock = true;
}
- ret = __btrfs_delalloc_reserve_space(inode, offset, count);
+ ret = btrfs_delalloc_reserve_space(inode, offset, count);
if (ret)
goto out;
dio_data.outstanding_extents = div64_u64(count +
current->journal_info = NULL;
if (ret < 0 && ret != -EIOCBQUEUED) {
if (dio_data.reserve)
- __btrfs_delalloc_release_space(inode, offset,
- dio_data.reserve);
+ btrfs_delalloc_release_space(inode, offset,
+ dio_data.reserve);
} else if (ret >= 0 && (size_t)ret < count)
- __btrfs_delalloc_release_space(inode, offset,
- count - (size_t)ret);
+ btrfs_delalloc_release_space(inode, offset,
+ count - (size_t)ret);
}
out:
if (wakeup)
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
- ret = __btrfs_delalloc_reserve_space(inode, page_start,
- PAGE_CACHE_SIZE);
+ ret = btrfs_delalloc_reserve_space(inode, page_start,
+ PAGE_CACHE_SIZE);
if (!ret) {
ret = file_update_time(vma->vm_file);
reserved = 1;
}
unlock_page(page);
out:
- __btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE);
+ btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE);
out_noreserve:
sb_end_pagefault(inode->i_sb);
return ret;
page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
- ret = __btrfs_delalloc_reserve_space(inode,
+ ret = btrfs_delalloc_reserve_space(inode,
start_index << PAGE_CACHE_SHIFT,
page_cnt << PAGE_CACHE_SHIFT);
if (ret)
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
- __btrfs_delalloc_release_space(inode,
+ btrfs_delalloc_release_space(inode,
start_index << PAGE_CACHE_SHIFT,
(page_cnt - i_done) << PAGE_CACHE_SHIFT);
}
unlock_page(pages[i]);
page_cache_release(pages[i]);
}
- __btrfs_delalloc_release_space(inode,
+ btrfs_delalloc_release_space(inode,
start_index << PAGE_CACHE_SHIFT,
page_cnt << PAGE_CACHE_SHIFT);
return ret;
return ret;
}
-int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
+static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
{
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
spin_unlock(&fs_info->qgroup_lock);
}
+static inline void qgroup_free(struct btrfs_root *root, u64 num_bytes)
+{
+ return btrfs_qgroup_free_refroot(root->fs_info, root->objectid,
+ num_bytes);
+}
void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
{
if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
&changeset);
if (ret < 0)
goto cleanup;
- ret = btrfs_qgroup_reserve(root, changeset.bytes_changed);
+ ret = qgroup_reserve(root, changeset.bytes_changed);
if (ret < 0)
goto cleanup;
goto out;
if (free)
- btrfs_qgroup_free(BTRFS_I(inode)->root,
- changeset.bytes_changed);
+ qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed);
out:
ulist_free(changeset.range_changed);
return ret;
return 0;
BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
- ret = btrfs_qgroup_reserve(root, num_bytes);
+ ret = qgroup_reserve(root, num_bytes);
if (ret < 0)
return ret;
atomic_add(num_bytes, &root->qgroup_meta_rsv);
reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
if (reserved == 0)
return;
- btrfs_qgroup_free(root, reserved);
+ qgroup_free(root, reserved);
}
void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
atomic_sub(num_bytes, &root->qgroup_meta_rsv);
- btrfs_qgroup_free(root, num_bytes);
+ qgroup_free(root, num_bytes);
}
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
struct btrfs_qgroup_inherit *inherit);
-int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes);
void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes);
-static inline void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
-{
- return btrfs_qgroup_free_refroot(root->fs_info, root->objectid,
- num_bytes);
-}
-
/*
* TODO: Add proper trace point for it, as btrfs_qgroup_free() is
* called by everywhere, can't provide good trace for delayed ref case.
{
btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes);
}
-
void assert_qgroups_uptodate(struct btrfs_trans_handle *trans);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
BUG_ON(cluster->start != cluster->boundary[0]);
mutex_lock(&inode->i_mutex);
- ret = __btrfs_check_data_free_space(inode, cluster->start,
- cluster->end + 1 - cluster->start);
+ ret = btrfs_check_data_free_space(inode, cluster->start,
+ cluster->end + 1 - cluster->start);
if (ret)
goto out;
break;
nr++;
}
- __btrfs_free_reserved_data_space(inode, cluster->start,
- cluster->end + 1 - cluster->start);
+ btrfs_free_reserved_data_space(inode, cluster->start,
+ cluster->end + 1 - cluster->start);
out:
mutex_unlock(&inode->i_mutex);
return ret;