#include <stdio.h>
#include <stdlib.h>
+#include <stdint.h>
#include "kerncompat.h"
#include "radix-tree.h"
#include "ctree.h"
#include "crc32c.h"
#include "volumes.h"
#include "free-space-cache.h"
-
-#define BLOCK_GROUP_DATA EXTENT_WRITEBACK
-#define BLOCK_GROUP_METADATA EXTENT_UPTODATE
-#define BLOCK_GROUP_SYSTEM EXTENT_NEW
-
-#define BLOCK_GROUP_DIRTY EXTENT_DIRTY
+#include "math.h"
#define PENDING_EXTENT_INSERT 0
#define PENDING_EXTENT_DELETE 1
btrfs_root *extent_root);
static int del_pending_extents(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root);
+static struct btrfs_block_group_cache *
+btrfs_find_block_group(struct btrfs_root *root, struct btrfs_block_group_cache
+ *hint, u64 search_start, int data, int owner);
static int remove_sb_from_cache(struct btrfs_root *root,
struct btrfs_block_group_cache *cache)
{
int ret;
struct btrfs_block_group_cache *cache = *cache_ret;
- u64 last;
+ u64 last = *start_ret;
u64 start = 0;
u64 end = 0;
u64 search_start = *start_ret;
int wrapped = 0;
- if (!cache) {
+ if (!cache)
goto out;
- }
again:
ret = cache_block_group(root, cache);
if (ret)
goto out;
last = max(search_start, cache->key.objectid);
- if (cache->ro || !block_group_bits(cache, data)) {
+ if (cache->ro || !block_group_bits(cache, data))
goto new_group;
- }
while(1) {
ret = find_first_extent_bit(&root->fs_info->free_space_cache,
return 0;
}
out:
+ *start_ret = last;
cache = btrfs_lookup_block_group(root->fs_info, search_start);
if (!cache) {
printk("Unable to find block group for %llu\n",
wrapped:
cache = btrfs_lookup_first_block_group(root->fs_info, last);
if (!cache) {
-no_cache:
if (!wrapped) {
wrapped = 1;
last = search_start;
}
goto out;
}
- cache = btrfs_find_block_group(root, cache, last, data, 0);
- cache = btrfs_find_block_group(root, cache, last, data, 0);
- if (!cache)
- goto no_cache;
-
*cache_ret = cache;
goto again;
}
-static u64 div_factor(u64 num, int factor)
-{
- if (factor == 10)
- return num;
- num *= factor;
- num /= 10;
- return num;
-}
-
static int block_group_state_bits(u64 flags)
{
int bits = 0;
return bits;
}
-struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
- struct btrfs_block_group_cache
- *hint, u64 search_start,
- int data, int owner)
+static struct btrfs_block_group_cache *
+btrfs_find_block_group(struct btrfs_root *root, struct btrfs_block_group_cache
+ *hint, u64 search_start, int data, int owner)
{
struct btrfs_block_group_cache *cache;
struct extent_io_tree *block_group_cache;
break;
}
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (owner < BTRFS_FIRST_FREE_OBJECTID)
new_size += sizeof(*bi);
return 0;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
key.type = BTRFS_EXTENT_REF_V0_KEY;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
if (match_extent_data_ref(leaf, ref, root_objectid,
owner, offset)) {
if (recow) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
err = 0;
if (match_extent_data_ref(leaf, ref, root_objectid,
owner, offset))
break;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.offset++;
ret = btrfs_insert_empty_item(trans, root, path, &key,
btrfs_mark_buffer_dirty(leaf);
ret = 0;
fail:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
ret = -ENOENT;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (ret == -ENOENT && parent) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.type = BTRFS_EXTENT_REF_V0_KEY;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
static inline int extent_ref_type(u64 parent, u64 owner)
{
+ int type;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
if (parent > 0)
- return BTRFS_SHARED_BLOCK_REF_KEY;
+ type = BTRFS_SHARED_BLOCK_REF_KEY;
else
- return BTRFS_TREE_BLOCK_REF_KEY;
+ type = BTRFS_TREE_BLOCK_REF_KEY;
} else {
if (parent > 0)
- return BTRFS_SHARED_DATA_REF_KEY;
+ type = BTRFS_SHARED_DATA_REF_KEY;
else
- return BTRFS_EXTENT_DATA_REF_KEY;
+ type = BTRFS_EXTENT_DATA_REF_KEY;
}
+ return type;
}
static int find_next_key(struct btrfs_path *path, struct btrfs_key *key)
if (ret) {
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
+ btrfs_release_path(path);
goto again;
}
}
if (ret != -ENOENT)
return ret;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
*ref_ret = NULL;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
btrfs_set_extent_refs(leaf, item, refs + 1);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root->fs_info->extent_root, path);
+ btrfs_release_path(path);
path->reada = 1;
path->leave_spinning = 1;
}
path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
path->reada = 1;
key.objectid = bytenr;
* to make sure.
*/
if (ret > 0 && metadata) {
- if (path->slots) {
+ if (path->slots[0]) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0]);
if (key.objectid == bytenr &&
- key.type == BTRFS_METADATA_ITEM_KEY)
+ key.type == BTRFS_EXTENT_ITEM_KEY &&
+ key.offset == root->leafsize)
ret = 0;
}
if (ret) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = root->leafsize;
metadata = 0;
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA);
path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
path->reada = 1;
key.objectid = bytenr;
if (ret > 0 && skinny_metadata) {
skinny_metadata = 0;
- if (path->slots[0]--) {
+ if (path->slots[0]) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0]);
ret = 0;
}
if (ret) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.offset = root->leafsize;
key.type = BTRFS_EXTENT_ITEM_KEY;
goto again;
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
fail:
finish_current_insert(trans, extent_root);
pending_ret = del_pending_extents(trans, extent_root);
return 0;
}
-int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
-{
- u64 last = 0;
- u64 start;
- u64 end;
- struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
- int ret;
-
- while(1) {
- ret = find_first_extent_bit(pinned_extents, last,
- &start, &end, EXTENT_DIRTY);
- if (ret)
- break;
- set_extent_dirty(copy, start, end, GFP_NOFS);
- last = end + 1;
- }
- return 0;
-}
-
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_io_tree *unpin)
u64 end;
u64 priv;
struct btrfs_fs_info *info = extent_root->fs_info;
- struct btrfs_path *path;
struct pending_extent_op *extent_op;
struct btrfs_key key;
int ret;
btrfs_fs_incompat(extent_root->fs_info,
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA);
- path = btrfs_alloc_path();
-
while(1) {
ret = find_first_extent_bit(&info->extent_ins, 0, &start,
&end, EXTENT_LOCKED);
extent_op->flags,
&extent_op->key,
extent_op->level, &key);
+ BUG_ON(ret);
} else {
BUG_ON(1);
}
GFP_NOFS);
kfree(extent_op);
}
- btrfs_free_path(path);
return 0;
}
NULL, refs_to_drop,
is_data);
BUG_ON(ret);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
path->leave_spinning = 1;
key.objectid = bytenr;
if (ret > 0 && skinny_metadata) {
skinny_metadata = 0;
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
ret = btrfs_search_slot(trans, extent_root,
owner_objectid, 0);
BUG_ON(ret < 0);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
path->leave_spinning = 1;
key.objectid = bytenr;
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
BUG_ON(ret);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
if (is_data) {
ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
ret = find_search_start(root, &block_group, &search_start,
total_needed, data);
if (ret)
- goto error;
+ goto new_group;
ins->objectid = search_start;
ins->offset = num_bytes;
ret = update_block_group(trans, root, ins->objectid, root->leafsize,
1, 0);
- return 0;
+ return ret;
}
static int alloc_tree_block(struct btrfs_trans_handle *trans,
return 1;
}
-/*
- * drop the reference count on the tree rooted at 'snap'. This traverses
- * the tree freeing any blocks that have a ref count of zero after being
- * decremented.
- */
-int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
- *root)
-{
- int ret = 0;
- int wret;
- int level;
- struct btrfs_path *path;
- int i;
- int orig_level;
- struct btrfs_root_item *root_item = &root->root_item;
-
- path = btrfs_alloc_path();
- BUG_ON(!path);
-
- level = btrfs_header_level(root->node);
- orig_level = level;
- if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
- path->nodes[level] = root->node;
- extent_buffer_get(root->node);
- path->slots[level] = 0;
- } else {
- struct btrfs_key key;
- struct btrfs_disk_key found_key;
- struct extent_buffer *node;
-
- btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
- level = root_item->drop_level;
- path->lowest_level = level;
- wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (wret < 0) {
- ret = wret;
- goto out;
- }
- node = path->nodes[level];
- btrfs_node_key(node, &found_key, path->slots[level]);
- WARN_ON(memcmp(&found_key, &root_item->drop_progress,
- sizeof(found_key)));
- }
- while(1) {
- wret = walk_down_tree(trans, root, path, &level);
- if (wret < 0)
- ret = wret;
- if (wret != 0)
- break;
-
- wret = walk_up_tree(trans, root, path, &level);
- if (wret < 0)
- ret = wret;
- if (wret != 0)
- break;
- /*
- ret = -EAGAIN;
- break;
- */
- }
- for (i = 0; i <= orig_level; i++) {
- if (path->nodes[i]) {
- free_extent_buffer(path->nodes[i]);
- path->nodes[i] = NULL;
- }
- }
-out:
- btrfs_free_path(path);
- return ret;
-}
-
#endif
int btrfs_free_block_groups(struct btrfs_fs_info *info)
break;
ret = get_state_private(&info->block_group_cache, start, &ptr);
if (!ret) {
- cache = (struct btrfs_block_group_cache *)ptr;
+ cache = (struct btrfs_block_group_cache *)
+ (uintptr_t)ptr;
if (cache->free_space_ctl) {
btrfs_remove_free_space_cache(cache);
kfree(cache->free_space_ctl);
return 0;
}
-int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *key)
+static int find_first_block_group(struct btrfs_root *root,
+ struct btrfs_path *path, struct btrfs_key *key)
{
int ret;
struct btrfs_key found_key;
cache->cached = 0;
cache->pinned = 0;
key.objectid = found_key.objectid + found_key.offset;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
cache->flags = btrfs_block_group_flags(&cache->item);
bit = 0;
if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
BUG_ON(ret);
bit = block_group_state_bits(type);
- set_extent_bits(block_group_cache, chunk_offset,
- chunk_offset + size - 1,
- bit | EXTENT_LOCKED, GFP_NOFS);
+ ret = set_extent_bits(block_group_cache, chunk_offset,
+ chunk_offset + size - 1,
+ bit | EXTENT_LOCKED, GFP_NOFS);
+ BUG_ON(ret);
- set_state_private(block_group_cache, chunk_offset,
- (unsigned long)cache);
+ ret = set_state_private(block_group_cache, chunk_offset,
+ (unsigned long)cache);
+ BUG_ON(ret);
set_avail_alloc_bits(fs_info, type);
return cache;
sizeof(cache->item));
BUG_ON(ret);
- finish_current_insert(trans, extent_root);
+ ret = finish_current_insert(trans, extent_root);
+ BUG_ON(ret);
ret = del_pending_extents(trans, extent_root);
+ BUG_ON(ret);
+
return 0;
}
alloc, mark_free);
}
-static int btrfs_count_extents_in_block_group(struct btrfs_root *root,
- struct btrfs_path *path, u64 start,
- u64 len,
- u64 *total)
-{
- struct btrfs_key key;
- struct extent_buffer *leaf;
- u64 bytes_used = 0;
- int ret;
- int slot;
-
- key.offset = 0;
- key.objectid = start;
- btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
- ret = btrfs_search_slot(NULL, root->fs_info->extent_root,
- &key, path, 0, 0);
- if (ret < 0)
- return ret;
- while(1) {
- leaf = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- return ret;
- if (ret > 0)
- break;
- leaf = path->nodes[0];
- slot = path->slots[0];
- }
- btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid > start + len)
- break;
- if (key.type == BTRFS_EXTENT_ITEM_KEY)
- bytes_used += key.offset;
- if (key.type == BTRFS_METADATA_ITEM_KEY)
- bytes_used += root->leafsize;
- path->slots[0]++;
- }
- *total = bytes_used;
- btrfs_release_path(root, path);
- return 0;
-}
-
-int btrfs_check_block_accounting(struct btrfs_root *root)
-{
- int ret;
- u64 start = 0;
- u64 bytes_used = 0;
- struct btrfs_path path;
- struct btrfs_block_group_cache *cache;
- struct btrfs_fs_info *fs_info = root->fs_info;
-
- btrfs_init_path(&path);
-
- while(1) {
- cache = btrfs_lookup_block_group(fs_info, start);
- if (!cache)
- break;
-
- ret = btrfs_count_extents_in_block_group(root, &path,
- cache->key.objectid,
- cache->key.offset,
- &bytes_used);
-
- if (ret == 0) {
- u64 on_disk = btrfs_block_group_used(&cache->item);
- if (on_disk != bytes_used) {
- fprintf(stderr, "bad block group accounting found %llu "
- "expected %llu block group %llu\n",
- (unsigned long long)bytes_used,
- (unsigned long long)on_disk,
- (unsigned long long)cache->key.objectid);
- }
- }
- start = cache->key.objectid + cache->key.offset;
-
- cache->space_info->bytes_used = 0;
- }
- return 0;
-}
-
/*
* Fixup block accounting. The initial block accounting created by
* make_block_groups isn't accuracy in this case.
path.slots[0]++;
}
btrfs_set_super_bytes_used(root->fs_info->super_copy, bytes_used);
- btrfs_release_path(root, &path);
+ btrfs_release_path(&path);
return 0;
}