btrfs_root *extent_root);
static int del_pending_extents(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root);
+int btrfs_make_block_group(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 bytes_used,
+ u64 type, u64 chunk_tree, u64 chunk_objectid,
+ u64 size);
static int cache_block_group(struct btrfs_root *root,
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
{
- if ((bits & BLOCK_GROUP_DATA) &&
- (cache->flags & BTRFS_BLOCK_GROUP_DATA))
- return 1;
- if ((bits & BLOCK_GROUP_METADATA) &&
- (cache->flags & BTRFS_BLOCK_GROUP_METADATA))
- return 1;
- if ((bits & BLOCK_GROUP_SYSTEM) &&
- (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
- return 1;
- return 0;
+ return (cache->flags & bits);
}
static int noinline find_search_start(struct btrfs_root *root,
return num;
}
+static int block_group_state_bits(u64 flags)
+{
+ int bits = 0;
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ bits |= BLOCK_GROUP_DATA;
+ if (flags & BTRFS_BLOCK_GROUP_METADATA)
+ bits |= BLOCK_GROUP_METADATA;
+ if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ bits |= BLOCK_GROUP_SYSTEM;
+ return bits;
+}
+
struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache
*hint, u64 search_start,
if (!owner)
factor = 8;
- bit = data;
+ bit = block_group_state_bits(data);
if (search_start && search_start < total_fs_bytes) {
struct btrfs_block_group_cache *shint;
free_check = cache->key.offset;
else
free_check = div_factor(cache->key.offset, factor);
+
if (used + cache->pinned < free_check) {
found_group = cache;
goto found;
}
+ if (full_search) {
+ printk("failed on cache %Lu used %Lu total %Lu\n",
+ cache->key.objectid, used, cache->key.offset);
+ }
cond_resched();
}
if (!full_search) {
return werr;
}
+static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
+ u64 flags)
+{
+ struct list_head *head = &info->space_info;
+ struct list_head *cur;
+ struct btrfs_space_info *found;
+ list_for_each(cur, head) {
+ found = list_entry(cur, struct btrfs_space_info, list);
+ if (found->flags == flags)
+ return found;
+ }
+ return NULL;
+
+}
+
+static int do_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_root *extent_root, u64 alloc_bytes,
+ u64 flags)
+{
+ struct btrfs_space_info *space_info;
+ u64 thresh;
+ u64 start;
+ u64 num_bytes;
+ int ret;
+
+ space_info = __find_space_info(extent_root->fs_info, flags);
+ BUG_ON(!space_info);
+
+ if (space_info->full)
+ return 0;
+
+ thresh = div_factor(space_info->total_bytes, 7);
+ if ((space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
+ thresh)
+ return 0;
+
+ ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
+ if (ret == -ENOSPC) {
+printk("space info full %Lu\n", flags);
+ space_info->full = 1;
+ return 0;
+ }
+
+ BUG_ON(ret);
+
+ ret = btrfs_make_block_group(trans, extent_root, 0, flags,
+ extent_root->fs_info->chunk_root->root_key.objectid,
+ start, num_bytes);
+ BUG_ON(ret);
+ return 0;
+}
+
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc,
num_bytes = min(total, cache->key.offset - byte_in_group);
if (alloc) {
old_val += num_bytes;
+ cache->space_info->bytes_used += num_bytes;
} else {
old_val -= num_bytes;
+ cache->space_info->bytes_used -= num_bytes;
if (mark_free) {
set_extent_dirty(&info->free_space_cache,
bytenr, bytenr + num_bytes - 1,
}
return 0;
}
+
static int update_pinned_extents(struct btrfs_root *root,
u64 bytenr, u64 num, int pin)
{
(bytenr - cache->key.objectid));
if (pin) {
cache->pinned += len;
+ cache->space_info->bytes_pinned += len;
fs_info->total_pinned += len;
} else {
cache->pinned -= len;
+ cache->space_info->bytes_pinned -= len;
fs_info->total_pinned -= len;
}
bytenr += len;
goto new_group;
}
- if (!(data & BLOCK_GROUP_DATA)) {
+ if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
block_group = btrfs_lookup_block_group(info, ins->objectid);
if (block_group)
trans->block_group = block_group;
struct btrfs_path *path;
struct btrfs_key keys[2];
- if (data)
- data = BLOCK_GROUP_DATA;
- else if (root == root->fs_info->chunk_root)
- data = BLOCK_GROUP_SYSTEM;
- else
- data = BLOCK_GROUP_METADATA;
+ if (data) {
+ data = BTRFS_BLOCK_GROUP_DATA;
+ } else if (root == root->fs_info->chunk_root) {
+ data = BTRFS_BLOCK_GROUP_SYSTEM;
+ } else {
+ data = BTRFS_BLOCK_GROUP_METADATA;
+ }
+
+ if (root->ref_cows) {
+ if (data != BTRFS_BLOCK_GROUP_METADATA) {
+ ret = do_chunk_alloc(trans, root->fs_info->extent_root,
+ num_bytes,
+ BTRFS_BLOCK_GROUP_METADATA);
+ BUG_ON(ret);
+ }
+ ret = do_chunk_alloc(trans, root->fs_info->extent_root,
+ num_bytes, data);
+ BUG_ON(ret);
+ }
new_hint = max(hint_byte, root->fs_info->alloc_start);
if (new_hint < btrfs_super_total_bytes(&info->super_copy))
return ret;
}
+static int update_space_info(struct btrfs_fs_info *info, u64 flags,
+ u64 total_bytes, u64 bytes_used,
+ struct btrfs_space_info **space_info)
+{
+ struct btrfs_space_info *found;
+
+ found = __find_space_info(info, flags);
+ if (found) {
+ found->total_bytes += total_bytes;
+ found->bytes_used += bytes_used;
+ WARN_ON(found->total_bytes < found->bytes_used);
+ *space_info = found;
+ return 0;
+ }
+ found = kmalloc(sizeof(*found), GFP_NOFS);
+ if (!found)
+ return -ENOMEM;
+
+ list_add(&found->list, &info->space_info);
+ found->flags = flags;
+ found->total_bytes = total_bytes;
+ found->bytes_used = bytes_used;
+ found->bytes_pinned = 0;
+ found->full = 0;
+ *space_info = found;
+ return 0;
+}
+
int btrfs_read_block_groups(struct btrfs_root *root)
{
struct btrfs_path *path;
int bit;
struct btrfs_block_group_cache *cache;
struct btrfs_fs_info *info = root->fs_info;
+ struct btrfs_space_info *space_info;
struct extent_io_tree *block_group_cache;
struct btrfs_key key;
struct btrfs_key found_key;
bit = BLOCK_GROUP_METADATA;
}
+ ret = update_space_info(info, cache->flags, found_key.offset,
+ btrfs_block_group_used(&cache->item),
+ &space_info);
+ BUG_ON(ret);
+ cache->space_info = space_info;
+
/* use EXTENT_LOCKED to prevent merging */
set_extent_bits(block_group_cache, found_key.objectid,
found_key.objectid + found_key.offset - 1,
btrfs_free_path(path);
return ret;
}
+
+int btrfs_make_block_group(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 bytes_used,
+ u64 type, u64 chunk_tree, u64 chunk_objectid,
+ u64 size)
+{
+ int ret;
+ int bit = 0;
+ struct btrfs_root *extent_root;
+ struct btrfs_block_group_cache *cache;
+ struct extent_io_tree *block_group_cache;
+
+ extent_root = root->fs_info->extent_root;
+ block_group_cache = &root->fs_info->block_group_cache;
+
+ cache = kmalloc(sizeof(*cache), GFP_NOFS);
+ BUG_ON(!cache);
+ cache->key.objectid = chunk_objectid;
+ cache->key.offset = size;
+ cache->cached = 0;
+ cache->pinned = 0;
+ btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
+ memset(&cache->item, 0, sizeof(cache->item));
+ btrfs_set_block_group_used(&cache->item, bytes_used);
+ btrfs_set_block_group_chunk_tree(&cache->item, chunk_tree);
+ btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
+ cache->flags = type;
+ btrfs_set_block_group_flags(&cache->item, type);
+
+ ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
+ &cache->space_info);
+ BUG_ON(ret);
+
+ if (type & BTRFS_BLOCK_GROUP_DATA) {
+ bit = BLOCK_GROUP_DATA;
+ } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
+ bit = BLOCK_GROUP_SYSTEM;
+ } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
+ bit = BLOCK_GROUP_METADATA;
+ }
+ set_extent_bits(block_group_cache, chunk_objectid,
+ chunk_objectid + size - 1,
+ bit | EXTENT_LOCKED, GFP_NOFS);
+ set_state_private(block_group_cache, chunk_objectid,
+ (unsigned long)cache);
+
+ ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
+ sizeof(cache->item));
+ BUG_ON(ret);
+
+ finish_current_insert(trans, extent_root);
+ ret = del_pending_extents(trans, extent_root);
+ BUG_ON(ret);
+ return 0;
+}
btrfs_release_path(root, path);
BUG_ON(*start < search_start);
- if (*start + num_bytes >= search_end) {
+ if (*start + num_bytes > search_end) {
ret = -ENOSPC;
goto error;
}
return -ENOMEM;
ret = find_free_dev_extent(trans, device, path, num_bytes, start);
- if (ret)
+ if (ret) {
goto err;
+ }
key.objectid = device->devid;
key.offset = *start;
return ret;
}
-static struct btrfs_device *next_device(struct list_head *head,
- struct list_head *last)
-{
- struct list_head *next = last->next;
- struct btrfs_device *dev;
-
- if (list_empty(head))
- return NULL;
-
- if (next == head)
- next = next->next;
-
- dev = list_entry(next, struct btrfs_device, dev_list);
- return dev;
-}
-
static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
u64 *objectid)
{
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 *start,
- u64 *num_bytes, u32 type)
+ u64 *num_bytes, u64 type)
{
u64 dev_offset;
struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
struct btrfs_stripe *stripes;
struct btrfs_device *device = NULL;
struct btrfs_chunk *chunk;
+ struct list_head private_devs;
struct list_head *dev_list = &extent_root->fs_info->devices;
- struct list_head *last_dev = extent_root->fs_info->last_device;
+ struct list_head *cur;
struct extent_map_tree *em_tree;
struct map_lookup *map;
struct extent_map *em;
u64 physical;
u64 calc_size = 1024 * 1024 * 1024;
- int num_stripes;
+ u64 avail;
+ u64 max_avail = 0;
+ int num_stripes = 1;
+ int looped = 0;
int ret;
- int index = 0;
+ int index;
struct btrfs_key key;
+ if (list_empty(dev_list))
+ return -ENOSPC;
+again:
+ INIT_LIST_HEAD(&private_devs);
+ cur = dev_list->next;
+ index = 0;
+ /* build a private list of devices we will allocate from */
+ while(index < num_stripes) {
+ device = list_entry(cur, struct btrfs_device, dev_list);
+ avail = device->total_bytes - device->bytes_used;
+ cur = cur->next;
+ if (avail > max_avail)
+ max_avail = avail;
+ if (avail >= calc_size) {
+ list_move_tail(&device->dev_list, &private_devs);
+ index++;
+ }
+ if (cur == dev_list)
+ break;
+ }
+ if (index < num_stripes) {
+ list_splice(&private_devs, dev_list);
+ if (!looped && max_avail > 0) {
+ looped = 1;
+ calc_size = max_avail;
+ goto again;
+ }
+ return -ENOSPC;
+ }
ret = find_next_chunk(chunk_root, &key.objectid);
if (ret)
return ret;
- num_stripes = 1;
chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
if (!chunk)
return -ENOMEM;
stripes = &chunk->stripe;
*num_bytes = calc_size;
+ index = 0;
while(index < num_stripes) {
- device = next_device(dev_list, last_dev);
- BUG_ON(!device);
- last_dev = &device->dev_list;
- extent_root->fs_info->last_device = last_dev;
+ BUG_ON(list_empty(&private_devs));
+ cur = private_devs.next;
+ device = list_entry(cur, struct btrfs_device, dev_list);
+ list_move_tail(&device->dev_list, dev_list);
ret = btrfs_alloc_dev_extent(trans, device,
key.objectid,
physical = dev_offset;
index++;
}
+ BUG_ON(!list_empty(&private_devs));
/* key.objectid was set above */
key.offset = *num_bytes;
int ret;
devid = btrfs_device_id(leaf, dev_item);
- if (btrfs_find_device(root, devid))
- return 0;
-
- device = kmalloc(sizeof(*device), GFP_NOFS);
- if (!device)
- return -ENOMEM;
+ device = btrfs_find_device(root, devid);
+ if (!device) {
+ device = kmalloc(sizeof(*device), GFP_NOFS);
+ if (!device)
+ return -ENOMEM;
+ list_add(&device->dev_list, &root->fs_info->devices);
+ }
fill_device_from_item(leaf, dev_item, device);
device->dev_root = root->fs_info->dev_root;
device->bdev = root->fs_info->sb->s_bdev;
- list_add(&device->dev_list, &root->fs_info->devices);
memcpy(&device->dev_key, key, sizeof(*key));
ret = 0;
#if 0