btrfs: qgroup: allow to unreserve range without releasing other ranges
authorQu Wenruo <wqu@suse.com>
Wed, 8 Jul 2020 06:24:45 +0000 (14:24 +0800)
committerDavid Sterba <dsterba@suse.com>
Mon, 27 Jul 2020 10:55:42 +0000 (12:55 +0200)
[PROBLEM]
Before this patch, when btrfs_qgroup_reserve_data() fails, we free all
reserved space of the changeset.

For example:
ret = btrfs_qgroup_reserve_data(inode, changeset, 0, SZ_1M);
ret = btrfs_qgroup_reserve_data(inode, changeset, SZ_1M, SZ_1M);
ret = btrfs_qgroup_reserve_data(inode, changeset, SZ_2M, SZ_1M);

If the last btrfs_qgroup_reserve_data() failed, it will release the
entire [0, 3M) range.

This behavior is kind of OK for now, as when we hit -EDQUOT, we normally
go error handling and need to release all reserved ranges anyway.

But this also means the following call is not possible:

ret = btrfs_qgroup_reserve_data();
if (ret == -EDQUOT) {
/* Do something to free some qgroup space */
ret = btrfs_qgroup_reserve_data();
}

As if the first btrfs_qgroup_reserve_data() fails, it will free all
reserved qgroup space.

[CAUSE]
This is because we release all reserved ranges when
btrfs_qgroup_reserve_data() fails.

[FIX]
This patch will implement a new function, qgroup_unreserve_range(), to
iterate through the ulist nodes, to find any nodes in the failure range,
and remove the EXTENT_QGROUP_RESERVED bits from the io_tree, and
decrease the extent_changeset::bytes_changed, so that we can revert to
previous state.

This allows later patches to retry btrfs_qgroup_reserve_data() if EDQUOT
happens.

Suggested-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/qgroup.c

index ee0ad33..054f36d 100644 (file)
@@ -3447,6 +3447,73 @@ btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
        }
 }
 
+#define rbtree_iterate_from_safe(node, next, start)                            \
+       for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
+
+static int qgroup_unreserve_range(struct btrfs_inode *inode,
+                                 struct extent_changeset *reserved, u64 start,
+                                 u64 len)
+{
+       struct rb_node *node;
+       struct rb_node *next;
+       struct ulist_node *entry = NULL;
+       int ret = 0;
+
+       node = reserved->range_changed.root.rb_node;
+       while (node) {
+               entry = rb_entry(node, struct ulist_node, rb_node);
+               if (entry->val < start)
+                       node = node->rb_right;
+               else if (entry)
+                       node = node->rb_left;
+               else
+                       break;
+       }
+
+       /* Empty changeset */
+       if (!entry)
+               return 0;
+
+       if (entry->val > start && rb_prev(&entry->rb_node))
+               entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
+                                rb_node);
+
+       rbtree_iterate_from_safe(node, next, &entry->rb_node) {
+               u64 entry_start;
+               u64 entry_end;
+               u64 entry_len;
+               int clear_ret;
+
+               entry = rb_entry(node, struct ulist_node, rb_node);
+               entry_start = entry->val;
+               entry_end = entry->aux;
+               entry_len = entry_end - entry_start + 1;
+
+               if (entry_start >= start + len)
+                       break;
+               if (entry_start + entry_len <= start)
+                       continue;
+               /*
+                * Now the entry is in [start, start + len), revert the
+                * EXTENT_QGROUP_RESERVED bit.
+                */
+               clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
+                                             entry_end, EXTENT_QGROUP_RESERVED);
+               if (!ret && clear_ret < 0)
+                       ret = clear_ret;
+
+               ulist_del(&reserved->range_changed, entry->val, entry->aux);
+               if (likely(reserved->bytes_changed >= entry_len)) {
+                       reserved->bytes_changed -= entry_len;
+               } else {
+                       WARN_ON(1);
+                       reserved->bytes_changed = 0;
+               }
+       }
+
+       return ret;
+}
+
 /*
  * Reserve qgroup space for range [start, start + len).
  *
@@ -3457,18 +3524,14 @@ btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
  * Return <0 for error (including -EQUOT)
  *
  * NOTE: this function may sleep for memory allocation.
- *       if btrfs_qgroup_reserve_data() is called multiple times with
- *       same @reserved, caller must ensure when error happens it's OK
- *       to free *ALL* reserved space.
  */
 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
                        struct extent_changeset **reserved_ret, u64 start,
                        u64 len)
 {
        struct btrfs_root *root = inode->root;
-       struct ulist_node *unode;
-       struct ulist_iterator uiter;
        struct extent_changeset *reserved;
+       bool new_reserved = false;
        u64 orig_reserved;
        u64 to_reserve;
        int ret;
@@ -3481,6 +3544,7 @@ int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
        if (WARN_ON(!reserved_ret))
                return -EINVAL;
        if (!*reserved_ret) {
+               new_reserved = true;
                *reserved_ret = extent_changeset_alloc();
                if (!*reserved_ret)
                        return -ENOMEM;
@@ -3496,7 +3560,7 @@ int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
        trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
                                        to_reserve, QGROUP_RESERVE);
        if (ret < 0)
-               goto cleanup;
+               goto out;
        ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
        if (ret < 0)
                goto cleanup;
@@ -3504,15 +3568,13 @@ int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
        return ret;
 
 cleanup:
-       /* cleanup *ALL* already reserved ranges */
-       ULIST_ITER_INIT(&uiter);
-       while ((unode = ulist_next(&reserved->range_changed, &uiter)))
-               clear_extent_bit(&inode->io_tree, unode->val,
-                                unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
-       /* Also free data bytes of already reserved one */
-       btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
-                                 orig_reserved, BTRFS_QGROUP_RSV_DATA);
-       extent_changeset_release(reserved);
+       qgroup_unreserve_range(inode, reserved, start, len);
+out:
+       if (new_reserved) {
+               extent_changeset_release(reserved);
+               kfree(reserved);
+               *reserved_ret = NULL;
+       }
        return ret;
 }