2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
23 #include "transaction.h"
26 #define BTRFS_DELAYED_WRITEBACK 512
27 #define BTRFS_DELAYED_BACKGROUND 128
28 #define BTRFS_DELAYED_BATCH 16
30 static struct kmem_cache *delayed_node_cache;
32 int __init btrfs_delayed_inode_init(void)
34 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
35 sizeof(struct btrfs_delayed_node),
39 if (!delayed_node_cache)
44 void btrfs_delayed_inode_exit(void)
46 kmem_cache_destroy(delayed_node_cache);
49 static inline void btrfs_init_delayed_node(
50 struct btrfs_delayed_node *delayed_node,
51 struct btrfs_root *root, u64 inode_id)
53 delayed_node->root = root;
54 delayed_node->inode_id = inode_id;
55 refcount_set(&delayed_node->refs, 0);
56 delayed_node->ins_root = RB_ROOT;
57 delayed_node->del_root = RB_ROOT;
58 mutex_init(&delayed_node->mutex);
59 INIT_LIST_HEAD(&delayed_node->n_list);
60 INIT_LIST_HEAD(&delayed_node->p_list);
63 static inline int btrfs_is_continuous_delayed_item(
64 struct btrfs_delayed_item *item1,
65 struct btrfs_delayed_item *item2)
67 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
68 item1->key.objectid == item2->key.objectid &&
69 item1->key.type == item2->key.type &&
70 item1->key.offset + 1 == item2->key.offset)
75 static struct btrfs_delayed_node *btrfs_get_delayed_node(
76 struct btrfs_inode *btrfs_inode)
78 struct btrfs_root *root = btrfs_inode->root;
79 u64 ino = btrfs_ino(btrfs_inode);
80 struct btrfs_delayed_node *node;
82 node = READ_ONCE(btrfs_inode->delayed_node);
84 refcount_inc(&node->refs);
88 spin_lock(&root->inode_lock);
89 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
92 if (btrfs_inode->delayed_node) {
93 refcount_inc(&node->refs); /* can be accessed */
94 BUG_ON(btrfs_inode->delayed_node != node);
95 spin_unlock(&root->inode_lock);
100 * It's possible that we're racing into the middle of removing
101 * this node from the radix tree. In this case, the refcount
102 * was zero and it should never go back to one. Just return
103 * NULL like it was never in the radix at all; our release
104 * function is in the process of removing it.
106 * Some implementations of refcount_inc refuse to bump the
107 * refcount once it has hit zero. If we don't do this dance
108 * here, refcount_inc() may decide to just WARN_ONCE() instead
109 * of actually bumping the refcount.
111 * If this node is properly in the radix, we want to bump the
112 * refcount twice, once for the inode and once for this get
115 if (refcount_inc_not_zero(&node->refs)) {
116 refcount_inc(&node->refs);
117 btrfs_inode->delayed_node = node;
122 spin_unlock(&root->inode_lock);
125 spin_unlock(&root->inode_lock);
130 /* Will return either the node or PTR_ERR(-ENOMEM) */
131 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
132 struct btrfs_inode *btrfs_inode)
134 struct btrfs_delayed_node *node;
135 struct btrfs_root *root = btrfs_inode->root;
136 u64 ino = btrfs_ino(btrfs_inode);
140 node = btrfs_get_delayed_node(btrfs_inode);
144 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
146 return ERR_PTR(-ENOMEM);
147 btrfs_init_delayed_node(node, root, ino);
149 /* cached in the btrfs inode and can be accessed */
150 refcount_set(&node->refs, 2);
152 ret = radix_tree_preload(GFP_NOFS);
154 kmem_cache_free(delayed_node_cache, node);
158 spin_lock(&root->inode_lock);
159 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
160 if (ret == -EEXIST) {
161 spin_unlock(&root->inode_lock);
162 kmem_cache_free(delayed_node_cache, node);
163 radix_tree_preload_end();
166 btrfs_inode->delayed_node = node;
167 spin_unlock(&root->inode_lock);
168 radix_tree_preload_end();
174 * Call it when holding delayed_node->mutex
176 * If mod = 1, add this node into the prepared list.
178 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
179 struct btrfs_delayed_node *node,
182 spin_lock(&root->lock);
183 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
184 if (!list_empty(&node->p_list))
185 list_move_tail(&node->p_list, &root->prepare_list);
187 list_add_tail(&node->p_list, &root->prepare_list);
189 list_add_tail(&node->n_list, &root->node_list);
190 list_add_tail(&node->p_list, &root->prepare_list);
191 refcount_inc(&node->refs); /* inserted into list */
193 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
195 spin_unlock(&root->lock);
198 /* Call it when holding delayed_node->mutex */
199 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
200 struct btrfs_delayed_node *node)
202 spin_lock(&root->lock);
203 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
205 refcount_dec(&node->refs); /* not in the list */
206 list_del_init(&node->n_list);
207 if (!list_empty(&node->p_list))
208 list_del_init(&node->p_list);
209 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
211 spin_unlock(&root->lock);
214 static struct btrfs_delayed_node *btrfs_first_delayed_node(
215 struct btrfs_delayed_root *delayed_root)
218 struct btrfs_delayed_node *node = NULL;
220 spin_lock(&delayed_root->lock);
221 if (list_empty(&delayed_root->node_list))
224 p = delayed_root->node_list.next;
225 node = list_entry(p, struct btrfs_delayed_node, n_list);
226 refcount_inc(&node->refs);
228 spin_unlock(&delayed_root->lock);
233 static struct btrfs_delayed_node *btrfs_next_delayed_node(
234 struct btrfs_delayed_node *node)
236 struct btrfs_delayed_root *delayed_root;
238 struct btrfs_delayed_node *next = NULL;
240 delayed_root = node->root->fs_info->delayed_root;
241 spin_lock(&delayed_root->lock);
242 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
243 /* not in the list */
244 if (list_empty(&delayed_root->node_list))
246 p = delayed_root->node_list.next;
247 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
250 p = node->n_list.next;
252 next = list_entry(p, struct btrfs_delayed_node, n_list);
253 refcount_inc(&next->refs);
255 spin_unlock(&delayed_root->lock);
260 static void __btrfs_release_delayed_node(
261 struct btrfs_delayed_node *delayed_node,
264 struct btrfs_delayed_root *delayed_root;
269 delayed_root = delayed_node->root->fs_info->delayed_root;
271 mutex_lock(&delayed_node->mutex);
272 if (delayed_node->count)
273 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
275 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
276 mutex_unlock(&delayed_node->mutex);
278 if (refcount_dec_and_test(&delayed_node->refs)) {
279 struct btrfs_root *root = delayed_node->root;
281 spin_lock(&root->inode_lock);
283 * Once our refcount goes to zero, nobody is allowed to bump it
284 * back up. We can delete it now.
286 ASSERT(refcount_read(&delayed_node->refs) == 0);
287 radix_tree_delete(&root->delayed_nodes_tree,
288 delayed_node->inode_id);
289 spin_unlock(&root->inode_lock);
290 kmem_cache_free(delayed_node_cache, delayed_node);
294 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
296 __btrfs_release_delayed_node(node, 0);
299 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
300 struct btrfs_delayed_root *delayed_root)
303 struct btrfs_delayed_node *node = NULL;
305 spin_lock(&delayed_root->lock);
306 if (list_empty(&delayed_root->prepare_list))
309 p = delayed_root->prepare_list.next;
311 node = list_entry(p, struct btrfs_delayed_node, p_list);
312 refcount_inc(&node->refs);
314 spin_unlock(&delayed_root->lock);
319 static inline void btrfs_release_prepared_delayed_node(
320 struct btrfs_delayed_node *node)
322 __btrfs_release_delayed_node(node, 1);
325 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
327 struct btrfs_delayed_item *item;
328 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
330 item->data_len = data_len;
331 item->ins_or_del = 0;
332 item->bytes_reserved = 0;
333 item->delayed_node = NULL;
334 refcount_set(&item->refs, 1);
340 * __btrfs_lookup_delayed_item - look up the delayed item by key
341 * @delayed_node: pointer to the delayed node
342 * @key: the key to look up
343 * @prev: used to store the prev item if the right item isn't found
344 * @next: used to store the next item if the right item isn't found
346 * Note: if we don't find the right item, we will return the prev item and
349 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
350 struct rb_root *root,
351 struct btrfs_key *key,
352 struct btrfs_delayed_item **prev,
353 struct btrfs_delayed_item **next)
355 struct rb_node *node, *prev_node = NULL;
356 struct btrfs_delayed_item *delayed_item = NULL;
359 node = root->rb_node;
362 delayed_item = rb_entry(node, struct btrfs_delayed_item,
365 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
367 node = node->rb_right;
369 node = node->rb_left;
378 *prev = delayed_item;
379 else if ((node = rb_prev(prev_node)) != NULL) {
380 *prev = rb_entry(node, struct btrfs_delayed_item,
390 *next = delayed_item;
391 else if ((node = rb_next(prev_node)) != NULL) {
392 *next = rb_entry(node, struct btrfs_delayed_item,
400 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
401 struct btrfs_delayed_node *delayed_node,
402 struct btrfs_key *key)
404 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
408 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
409 struct btrfs_delayed_item *ins,
412 struct rb_node **p, *node;
413 struct rb_node *parent_node = NULL;
414 struct rb_root *root;
415 struct btrfs_delayed_item *item;
418 if (action == BTRFS_DELAYED_INSERTION_ITEM)
419 root = &delayed_node->ins_root;
420 else if (action == BTRFS_DELAYED_DELETION_ITEM)
421 root = &delayed_node->del_root;
425 node = &ins->rb_node;
429 item = rb_entry(parent_node, struct btrfs_delayed_item,
432 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
441 rb_link_node(node, parent_node, p);
442 rb_insert_color(node, root);
443 ins->delayed_node = delayed_node;
444 ins->ins_or_del = action;
446 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
447 action == BTRFS_DELAYED_INSERTION_ITEM &&
448 ins->key.offset >= delayed_node->index_cnt)
449 delayed_node->index_cnt = ins->key.offset + 1;
451 delayed_node->count++;
452 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
456 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
457 struct btrfs_delayed_item *item)
459 return __btrfs_add_delayed_item(node, item,
460 BTRFS_DELAYED_INSERTION_ITEM);
463 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
464 struct btrfs_delayed_item *item)
466 return __btrfs_add_delayed_item(node, item,
467 BTRFS_DELAYED_DELETION_ITEM);
470 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
472 int seq = atomic_inc_return(&delayed_root->items_seq);
475 * atomic_dec_return implies a barrier for waitqueue_active
477 if ((atomic_dec_return(&delayed_root->items) <
478 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
479 waitqueue_active(&delayed_root->wait))
480 wake_up(&delayed_root->wait);
483 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
485 struct rb_root *root;
486 struct btrfs_delayed_root *delayed_root;
488 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
490 BUG_ON(!delayed_root);
491 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
492 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
494 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
495 root = &delayed_item->delayed_node->ins_root;
497 root = &delayed_item->delayed_node->del_root;
499 rb_erase(&delayed_item->rb_node, root);
500 delayed_item->delayed_node->count--;
502 finish_one_item(delayed_root);
505 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
508 __btrfs_remove_delayed_item(item);
509 if (refcount_dec_and_test(&item->refs))
514 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
515 struct btrfs_delayed_node *delayed_node)
518 struct btrfs_delayed_item *item = NULL;
520 p = rb_first(&delayed_node->ins_root);
522 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
527 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
528 struct btrfs_delayed_node *delayed_node)
531 struct btrfs_delayed_item *item = NULL;
533 p = rb_first(&delayed_node->del_root);
535 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
540 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
541 struct btrfs_delayed_item *item)
544 struct btrfs_delayed_item *next = NULL;
546 p = rb_next(&item->rb_node);
548 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
553 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
554 struct btrfs_fs_info *fs_info,
555 struct btrfs_delayed_item *item)
557 struct btrfs_block_rsv *src_rsv;
558 struct btrfs_block_rsv *dst_rsv;
562 if (!trans->bytes_reserved)
565 src_rsv = trans->block_rsv;
566 dst_rsv = &fs_info->delayed_block_rsv;
568 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
569 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
571 trace_btrfs_space_reservation(fs_info, "delayed_item",
574 item->bytes_reserved = num_bytes;
580 static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
581 struct btrfs_delayed_item *item)
583 struct btrfs_block_rsv *rsv;
585 if (!item->bytes_reserved)
588 rsv = &fs_info->delayed_block_rsv;
589 trace_btrfs_space_reservation(fs_info, "delayed_item",
590 item->key.objectid, item->bytes_reserved,
592 btrfs_block_rsv_release(fs_info, rsv,
593 item->bytes_reserved);
596 static int btrfs_delayed_inode_reserve_metadata(
597 struct btrfs_trans_handle *trans,
598 struct btrfs_root *root,
599 struct btrfs_inode *inode,
600 struct btrfs_delayed_node *node)
602 struct btrfs_fs_info *fs_info = root->fs_info;
603 struct btrfs_block_rsv *src_rsv;
604 struct btrfs_block_rsv *dst_rsv;
607 bool release = false;
609 src_rsv = trans->block_rsv;
610 dst_rsv = &fs_info->delayed_block_rsv;
612 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
615 * If our block_rsv is the delalloc block reserve then check and see if
616 * we have our extra reservation for updating the inode. If not fall
617 * through and try to reserve space quickly.
619 * We used to try and steal from the delalloc block rsv or the global
620 * reserve, but we'd steal a full reservation, which isn't kind. We are
621 * here through delalloc which means we've likely just cowed down close
622 * to the leaf that contains the inode, so we would steal less just
623 * doing the fallback inode update, so if we do end up having to steal
624 * from the global block rsv we hopefully only steal one or two blocks
625 * worth which is less likely to hurt us.
627 if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
628 spin_lock(&inode->lock);
629 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
630 &inode->runtime_flags))
634 spin_unlock(&inode->lock);
638 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
639 * which doesn't reserve space for speed. This is a problem since we
640 * still need to reserve space for this update, so try to reserve the
643 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
644 * we're accounted for.
646 if (!src_rsv || (!trans->bytes_reserved &&
647 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
648 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
649 BTRFS_RESERVE_NO_FLUSH);
651 * Since we're under a transaction reserve_metadata_bytes could
652 * try to commit the transaction which will make it return
653 * EAGAIN to make us stop the transaction we have, so return
654 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
659 node->bytes_reserved = num_bytes;
660 trace_btrfs_space_reservation(fs_info,
668 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
671 * Migrate only takes a reservation, it doesn't touch the size of the
672 * block_rsv. This is to simplify people who don't normally have things
673 * migrated from their block rsv. If they go to release their
674 * reservation, that will decrease the size as well, so if migrate
675 * reduced size we'd end up with a negative size. But for the
676 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
677 * but we could in fact do this reserve/migrate dance several times
678 * between the time we did the original reservation and we'd clean it
679 * up. So to take care of this, release the space for the meta
680 * reservation here. I think it may be time for a documentation page on
681 * how block rsvs. work.
684 trace_btrfs_space_reservation(fs_info, "delayed_inode",
685 btrfs_ino(inode), num_bytes, 1);
686 node->bytes_reserved = num_bytes;
690 trace_btrfs_space_reservation(fs_info, "delalloc",
691 btrfs_ino(inode), num_bytes, 0);
692 btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
698 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
699 struct btrfs_delayed_node *node)
701 struct btrfs_block_rsv *rsv;
703 if (!node->bytes_reserved)
706 rsv = &fs_info->delayed_block_rsv;
707 trace_btrfs_space_reservation(fs_info, "delayed_inode",
708 node->inode_id, node->bytes_reserved, 0);
709 btrfs_block_rsv_release(fs_info, rsv,
710 node->bytes_reserved);
711 node->bytes_reserved = 0;
715 * This helper will insert some continuous items into the same leaf according
716 * to the free space of the leaf.
718 static int btrfs_batch_insert_items(struct btrfs_root *root,
719 struct btrfs_path *path,
720 struct btrfs_delayed_item *item)
722 struct btrfs_fs_info *fs_info = root->fs_info;
723 struct btrfs_delayed_item *curr, *next;
725 int total_data_size = 0, total_size = 0;
726 struct extent_buffer *leaf;
728 struct btrfs_key *keys;
730 struct list_head head;
736 BUG_ON(!path->nodes[0]);
738 leaf = path->nodes[0];
739 free_space = btrfs_leaf_free_space(fs_info, leaf);
740 INIT_LIST_HEAD(&head);
746 * count the number of the continuous items that we can insert in batch
748 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
750 total_data_size += next->data_len;
751 total_size += next->data_len + sizeof(struct btrfs_item);
752 list_add_tail(&next->tree_list, &head);
756 next = __btrfs_next_delayed_item(curr);
760 if (!btrfs_is_continuous_delayed_item(curr, next))
770 * we need allocate some memory space, but it might cause the task
771 * to sleep, so we set all locked nodes in the path to blocking locks
774 btrfs_set_path_blocking(path);
776 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
782 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
788 /* get keys of all the delayed items */
790 list_for_each_entry(next, &head, tree_list) {
792 data_size[i] = next->data_len;
796 /* reset all the locked nodes in the patch to spinning locks. */
797 btrfs_clear_path_blocking(path, NULL, 0);
799 /* insert the keys of the items */
800 setup_items_for_insert(root, path, keys, data_size,
801 total_data_size, total_size, nitems);
803 /* insert the dir index items */
804 slot = path->slots[0];
805 list_for_each_entry_safe(curr, next, &head, tree_list) {
806 data_ptr = btrfs_item_ptr(leaf, slot, char);
807 write_extent_buffer(leaf, &curr->data,
808 (unsigned long)data_ptr,
812 btrfs_delayed_item_release_metadata(fs_info, curr);
814 list_del(&curr->tree_list);
815 btrfs_release_delayed_item(curr);
826 * This helper can just do simple insertion that needn't extend item for new
827 * data, such as directory name index insertion, inode insertion.
829 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
830 struct btrfs_root *root,
831 struct btrfs_path *path,
832 struct btrfs_delayed_item *delayed_item)
834 struct btrfs_fs_info *fs_info = root->fs_info;
835 struct extent_buffer *leaf;
839 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
840 delayed_item->data_len);
841 if (ret < 0 && ret != -EEXIST)
844 leaf = path->nodes[0];
846 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
848 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
849 delayed_item->data_len);
850 btrfs_mark_buffer_dirty(leaf);
852 btrfs_delayed_item_release_metadata(fs_info, delayed_item);
857 * we insert an item first, then if there are some continuous items, we try
858 * to insert those items into the same leaf.
860 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
861 struct btrfs_path *path,
862 struct btrfs_root *root,
863 struct btrfs_delayed_node *node)
865 struct btrfs_delayed_item *curr, *prev;
869 mutex_lock(&node->mutex);
870 curr = __btrfs_first_delayed_insertion_item(node);
874 ret = btrfs_insert_delayed_item(trans, root, path, curr);
876 btrfs_release_path(path);
881 curr = __btrfs_next_delayed_item(prev);
882 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
883 /* insert the continuous items into the same leaf */
885 btrfs_batch_insert_items(root, path, curr);
887 btrfs_release_delayed_item(prev);
888 btrfs_mark_buffer_dirty(path->nodes[0]);
890 btrfs_release_path(path);
891 mutex_unlock(&node->mutex);
895 mutex_unlock(&node->mutex);
899 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
900 struct btrfs_root *root,
901 struct btrfs_path *path,
902 struct btrfs_delayed_item *item)
904 struct btrfs_fs_info *fs_info = root->fs_info;
905 struct btrfs_delayed_item *curr, *next;
906 struct extent_buffer *leaf;
907 struct btrfs_key key;
908 struct list_head head;
909 int nitems, i, last_item;
912 BUG_ON(!path->nodes[0]);
914 leaf = path->nodes[0];
917 last_item = btrfs_header_nritems(leaf) - 1;
919 return -ENOENT; /* FIXME: Is errno suitable? */
922 INIT_LIST_HEAD(&head);
923 btrfs_item_key_to_cpu(leaf, &key, i);
926 * count the number of the dir index items that we can delete in batch
928 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
929 list_add_tail(&next->tree_list, &head);
933 next = __btrfs_next_delayed_item(curr);
937 if (!btrfs_is_continuous_delayed_item(curr, next))
943 btrfs_item_key_to_cpu(leaf, &key, i);
949 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
953 list_for_each_entry_safe(curr, next, &head, tree_list) {
954 btrfs_delayed_item_release_metadata(fs_info, curr);
955 list_del(&curr->tree_list);
956 btrfs_release_delayed_item(curr);
963 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
964 struct btrfs_path *path,
965 struct btrfs_root *root,
966 struct btrfs_delayed_node *node)
968 struct btrfs_delayed_item *curr, *prev;
972 mutex_lock(&node->mutex);
973 curr = __btrfs_first_delayed_deletion_item(node);
977 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
982 * can't find the item which the node points to, so this node
983 * is invalid, just drop it.
986 curr = __btrfs_next_delayed_item(prev);
987 btrfs_release_delayed_item(prev);
989 btrfs_release_path(path);
991 mutex_unlock(&node->mutex);
997 btrfs_batch_delete_items(trans, root, path, curr);
998 btrfs_release_path(path);
999 mutex_unlock(&node->mutex);
1003 btrfs_release_path(path);
1004 mutex_unlock(&node->mutex);
1008 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1010 struct btrfs_delayed_root *delayed_root;
1013 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1014 BUG_ON(!delayed_node->root);
1015 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1016 delayed_node->count--;
1018 delayed_root = delayed_node->root->fs_info->delayed_root;
1019 finish_one_item(delayed_root);
1023 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1025 struct btrfs_delayed_root *delayed_root;
1027 ASSERT(delayed_node->root);
1028 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1029 delayed_node->count--;
1031 delayed_root = delayed_node->root->fs_info->delayed_root;
1032 finish_one_item(delayed_root);
1035 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1036 struct btrfs_root *root,
1037 struct btrfs_path *path,
1038 struct btrfs_delayed_node *node)
1040 struct btrfs_fs_info *fs_info = root->fs_info;
1041 struct btrfs_key key;
1042 struct btrfs_inode_item *inode_item;
1043 struct extent_buffer *leaf;
1047 key.objectid = node->inode_id;
1048 key.type = BTRFS_INODE_ITEM_KEY;
1051 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1056 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1058 btrfs_release_path(path);
1060 } else if (ret < 0) {
1064 leaf = path->nodes[0];
1065 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1066 struct btrfs_inode_item);
1067 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1068 sizeof(struct btrfs_inode_item));
1069 btrfs_mark_buffer_dirty(leaf);
1071 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1075 if (path->slots[0] >= btrfs_header_nritems(leaf))
1078 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1079 if (key.objectid != node->inode_id)
1082 if (key.type != BTRFS_INODE_REF_KEY &&
1083 key.type != BTRFS_INODE_EXTREF_KEY)
1087 * Delayed iref deletion is for the inode who has only one link,
1088 * so there is only one iref. The case that several irefs are
1089 * in the same item doesn't exist.
1091 btrfs_del_item(trans, root, path);
1093 btrfs_release_delayed_iref(node);
1095 btrfs_release_path(path);
1097 btrfs_delayed_inode_release_metadata(fs_info, node);
1098 btrfs_release_delayed_inode(node);
1103 btrfs_release_path(path);
1105 key.type = BTRFS_INODE_EXTREF_KEY;
1107 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1113 leaf = path->nodes[0];
1118 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1119 struct btrfs_root *root,
1120 struct btrfs_path *path,
1121 struct btrfs_delayed_node *node)
1125 mutex_lock(&node->mutex);
1126 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1127 mutex_unlock(&node->mutex);
1131 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1132 mutex_unlock(&node->mutex);
1137 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1138 struct btrfs_path *path,
1139 struct btrfs_delayed_node *node)
1143 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1147 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1151 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1156 * Called when committing the transaction.
1157 * Returns 0 on success.
1158 * Returns < 0 on error and returns with an aborted transaction with any
1159 * outstanding delayed items cleaned up.
1161 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1162 struct btrfs_fs_info *fs_info, int nr)
1164 struct btrfs_delayed_root *delayed_root;
1165 struct btrfs_delayed_node *curr_node, *prev_node;
1166 struct btrfs_path *path;
1167 struct btrfs_block_rsv *block_rsv;
1169 bool count = (nr > 0);
1174 path = btrfs_alloc_path();
1177 path->leave_spinning = 1;
1179 block_rsv = trans->block_rsv;
1180 trans->block_rsv = &fs_info->delayed_block_rsv;
1182 delayed_root = fs_info->delayed_root;
1184 curr_node = btrfs_first_delayed_node(delayed_root);
1185 while (curr_node && (!count || (count && nr--))) {
1186 ret = __btrfs_commit_inode_delayed_items(trans, path,
1189 btrfs_release_delayed_node(curr_node);
1191 btrfs_abort_transaction(trans, ret);
1195 prev_node = curr_node;
1196 curr_node = btrfs_next_delayed_node(curr_node);
1197 btrfs_release_delayed_node(prev_node);
1201 btrfs_release_delayed_node(curr_node);
1202 btrfs_free_path(path);
1203 trans->block_rsv = block_rsv;
1208 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1209 struct btrfs_fs_info *fs_info)
1211 return __btrfs_run_delayed_items(trans, fs_info, -1);
1214 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1215 struct btrfs_fs_info *fs_info, int nr)
1217 return __btrfs_run_delayed_items(trans, fs_info, nr);
1220 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1221 struct btrfs_inode *inode)
1223 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1224 struct btrfs_path *path;
1225 struct btrfs_block_rsv *block_rsv;
1231 mutex_lock(&delayed_node->mutex);
1232 if (!delayed_node->count) {
1233 mutex_unlock(&delayed_node->mutex);
1234 btrfs_release_delayed_node(delayed_node);
1237 mutex_unlock(&delayed_node->mutex);
1239 path = btrfs_alloc_path();
1241 btrfs_release_delayed_node(delayed_node);
1244 path->leave_spinning = 1;
1246 block_rsv = trans->block_rsv;
1247 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1249 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1251 btrfs_release_delayed_node(delayed_node);
1252 btrfs_free_path(path);
1253 trans->block_rsv = block_rsv;
1258 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1260 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1261 struct btrfs_trans_handle *trans;
1262 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1263 struct btrfs_path *path;
1264 struct btrfs_block_rsv *block_rsv;
1270 mutex_lock(&delayed_node->mutex);
1271 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1272 mutex_unlock(&delayed_node->mutex);
1273 btrfs_release_delayed_node(delayed_node);
1276 mutex_unlock(&delayed_node->mutex);
1278 trans = btrfs_join_transaction(delayed_node->root);
1279 if (IS_ERR(trans)) {
1280 ret = PTR_ERR(trans);
1284 path = btrfs_alloc_path();
1289 path->leave_spinning = 1;
1291 block_rsv = trans->block_rsv;
1292 trans->block_rsv = &fs_info->delayed_block_rsv;
1294 mutex_lock(&delayed_node->mutex);
1295 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1296 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1297 path, delayed_node);
1300 mutex_unlock(&delayed_node->mutex);
1302 btrfs_free_path(path);
1303 trans->block_rsv = block_rsv;
1305 btrfs_end_transaction(trans);
1306 btrfs_btree_balance_dirty(fs_info);
1308 btrfs_release_delayed_node(delayed_node);
1313 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1315 struct btrfs_delayed_node *delayed_node;
1317 delayed_node = READ_ONCE(inode->delayed_node);
1321 inode->delayed_node = NULL;
1322 btrfs_release_delayed_node(delayed_node);
1325 struct btrfs_async_delayed_work {
1326 struct btrfs_delayed_root *delayed_root;
1328 struct btrfs_work work;
1331 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1333 struct btrfs_async_delayed_work *async_work;
1334 struct btrfs_delayed_root *delayed_root;
1335 struct btrfs_trans_handle *trans;
1336 struct btrfs_path *path;
1337 struct btrfs_delayed_node *delayed_node = NULL;
1338 struct btrfs_root *root;
1339 struct btrfs_block_rsv *block_rsv;
1342 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1343 delayed_root = async_work->delayed_root;
1345 path = btrfs_alloc_path();
1350 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1353 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1357 path->leave_spinning = 1;
1358 root = delayed_node->root;
1360 trans = btrfs_join_transaction(root);
1364 block_rsv = trans->block_rsv;
1365 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1367 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1369 trans->block_rsv = block_rsv;
1370 btrfs_end_transaction(trans);
1371 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1374 btrfs_release_path(path);
1377 btrfs_release_prepared_delayed_node(delayed_node);
1378 if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
1379 total_done < async_work->nr)
1383 btrfs_free_path(path);
1385 wake_up(&delayed_root->wait);
1390 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1391 struct btrfs_fs_info *fs_info, int nr)
1393 struct btrfs_async_delayed_work *async_work;
1395 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
1396 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1399 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1403 async_work->delayed_root = delayed_root;
1404 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1405 btrfs_async_run_delayed_root, NULL, NULL);
1406 async_work->nr = nr;
1408 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1412 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1414 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1417 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1419 int val = atomic_read(&delayed_root->items_seq);
1421 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1424 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1430 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1432 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1434 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1437 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1441 seq = atomic_read(&delayed_root->items_seq);
1443 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1447 wait_event_interruptible(delayed_root->wait,
1448 could_end_wait(delayed_root, seq));
1452 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1455 /* Will return 0 or -ENOMEM */
1456 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1457 struct btrfs_fs_info *fs_info,
1458 const char *name, int name_len,
1459 struct btrfs_inode *dir,
1460 struct btrfs_disk_key *disk_key, u8 type,
1463 struct btrfs_delayed_node *delayed_node;
1464 struct btrfs_delayed_item *delayed_item;
1465 struct btrfs_dir_item *dir_item;
1468 delayed_node = btrfs_get_or_create_delayed_node(dir);
1469 if (IS_ERR(delayed_node))
1470 return PTR_ERR(delayed_node);
1472 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1473 if (!delayed_item) {
1478 delayed_item->key.objectid = btrfs_ino(dir);
1479 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1480 delayed_item->key.offset = index;
1482 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1483 dir_item->location = *disk_key;
1484 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1485 btrfs_set_stack_dir_data_len(dir_item, 0);
1486 btrfs_set_stack_dir_name_len(dir_item, name_len);
1487 btrfs_set_stack_dir_type(dir_item, type);
1488 memcpy((char *)(dir_item + 1), name, name_len);
1490 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
1492 * we have reserved enough space when we start a new transaction,
1493 * so reserving metadata failure is impossible
1498 mutex_lock(&delayed_node->mutex);
1499 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1500 if (unlikely(ret)) {
1502 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1503 name_len, name, delayed_node->root->objectid,
1504 delayed_node->inode_id, ret);
1507 mutex_unlock(&delayed_node->mutex);
1510 btrfs_release_delayed_node(delayed_node);
1514 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1515 struct btrfs_delayed_node *node,
1516 struct btrfs_key *key)
1518 struct btrfs_delayed_item *item;
1520 mutex_lock(&node->mutex);
1521 item = __btrfs_lookup_delayed_insertion_item(node, key);
1523 mutex_unlock(&node->mutex);
1527 btrfs_delayed_item_release_metadata(fs_info, item);
1528 btrfs_release_delayed_item(item);
1529 mutex_unlock(&node->mutex);
1533 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1534 struct btrfs_fs_info *fs_info,
1535 struct btrfs_inode *dir, u64 index)
1537 struct btrfs_delayed_node *node;
1538 struct btrfs_delayed_item *item;
1539 struct btrfs_key item_key;
1542 node = btrfs_get_or_create_delayed_node(dir);
1544 return PTR_ERR(node);
1546 item_key.objectid = btrfs_ino(dir);
1547 item_key.type = BTRFS_DIR_INDEX_KEY;
1548 item_key.offset = index;
1550 ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
1554 item = btrfs_alloc_delayed_item(0);
1560 item->key = item_key;
1562 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
1564 * we have reserved enough space when we start a new transaction,
1565 * so reserving metadata failure is impossible.
1569 mutex_lock(&node->mutex);
1570 ret = __btrfs_add_delayed_deletion_item(node, item);
1571 if (unlikely(ret)) {
1573 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1574 index, node->root->objectid, node->inode_id, ret);
1577 mutex_unlock(&node->mutex);
1579 btrfs_release_delayed_node(node);
1583 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1585 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1591 * Since we have held i_mutex of this directory, it is impossible that
1592 * a new directory index is added into the delayed node and index_cnt
1593 * is updated now. So we needn't lock the delayed node.
1595 if (!delayed_node->index_cnt) {
1596 btrfs_release_delayed_node(delayed_node);
1600 inode->index_cnt = delayed_node->index_cnt;
1601 btrfs_release_delayed_node(delayed_node);
1605 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1606 struct list_head *ins_list,
1607 struct list_head *del_list)
1609 struct btrfs_delayed_node *delayed_node;
1610 struct btrfs_delayed_item *item;
1612 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1617 * We can only do one readdir with delayed items at a time because of
1618 * item->readdir_list.
1620 inode_unlock_shared(inode);
1623 mutex_lock(&delayed_node->mutex);
1624 item = __btrfs_first_delayed_insertion_item(delayed_node);
1626 refcount_inc(&item->refs);
1627 list_add_tail(&item->readdir_list, ins_list);
1628 item = __btrfs_next_delayed_item(item);
1631 item = __btrfs_first_delayed_deletion_item(delayed_node);
1633 refcount_inc(&item->refs);
1634 list_add_tail(&item->readdir_list, del_list);
1635 item = __btrfs_next_delayed_item(item);
1637 mutex_unlock(&delayed_node->mutex);
1639 * This delayed node is still cached in the btrfs inode, so refs
1640 * must be > 1 now, and we needn't check it is going to be freed
1643 * Besides that, this function is used to read dir, we do not
1644 * insert/delete delayed items in this period. So we also needn't
1645 * requeue or dequeue this delayed node.
1647 refcount_dec(&delayed_node->refs);
1652 void btrfs_readdir_put_delayed_items(struct inode *inode,
1653 struct list_head *ins_list,
1654 struct list_head *del_list)
1656 struct btrfs_delayed_item *curr, *next;
1658 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1659 list_del(&curr->readdir_list);
1660 if (refcount_dec_and_test(&curr->refs))
1664 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1665 list_del(&curr->readdir_list);
1666 if (refcount_dec_and_test(&curr->refs))
1671 * The VFS is going to do up_read(), so we need to downgrade back to a
1674 downgrade_write(&inode->i_rwsem);
1677 int btrfs_should_delete_dir_index(struct list_head *del_list,
1680 struct btrfs_delayed_item *curr;
1683 list_for_each_entry(curr, del_list, readdir_list) {
1684 if (curr->key.offset > index)
1686 if (curr->key.offset == index) {
1695 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1698 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1699 struct list_head *ins_list)
1701 struct btrfs_dir_item *di;
1702 struct btrfs_delayed_item *curr, *next;
1703 struct btrfs_key location;
1707 unsigned char d_type;
1709 if (list_empty(ins_list))
1713 * Changing the data of the delayed item is impossible. So
1714 * we needn't lock them. And we have held i_mutex of the
1715 * directory, nobody can delete any directory indexes now.
1717 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1718 list_del(&curr->readdir_list);
1720 if (curr->key.offset < ctx->pos) {
1721 if (refcount_dec_and_test(&curr->refs))
1726 ctx->pos = curr->key.offset;
1728 di = (struct btrfs_dir_item *)curr->data;
1729 name = (char *)(di + 1);
1730 name_len = btrfs_stack_dir_name_len(di);
1732 d_type = btrfs_filetype_table[di->type];
1733 btrfs_disk_key_to_cpu(&location, &di->location);
1735 over = !dir_emit(ctx, name, name_len,
1736 location.objectid, d_type);
1738 if (refcount_dec_and_test(&curr->refs))
1748 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1749 struct btrfs_inode_item *inode_item,
1750 struct inode *inode)
1752 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1753 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1754 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1755 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1756 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1757 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1758 btrfs_set_stack_inode_generation(inode_item,
1759 BTRFS_I(inode)->generation);
1760 btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1761 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1762 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1763 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1764 btrfs_set_stack_inode_block_group(inode_item, 0);
1766 btrfs_set_stack_timespec_sec(&inode_item->atime,
1767 inode->i_atime.tv_sec);
1768 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1769 inode->i_atime.tv_nsec);
1771 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1772 inode->i_mtime.tv_sec);
1773 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1774 inode->i_mtime.tv_nsec);
1776 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1777 inode->i_ctime.tv_sec);
1778 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1779 inode->i_ctime.tv_nsec);
1781 btrfs_set_stack_timespec_sec(&inode_item->otime,
1782 BTRFS_I(inode)->i_otime.tv_sec);
1783 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1784 BTRFS_I(inode)->i_otime.tv_nsec);
1787 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1789 struct btrfs_delayed_node *delayed_node;
1790 struct btrfs_inode_item *inode_item;
1792 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1796 mutex_lock(&delayed_node->mutex);
1797 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1798 mutex_unlock(&delayed_node->mutex);
1799 btrfs_release_delayed_node(delayed_node);
1803 inode_item = &delayed_node->inode_item;
1805 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1806 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1807 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1808 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1809 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1810 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1811 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1812 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1814 inode->i_version = btrfs_stack_inode_sequence(inode_item);
1816 *rdev = btrfs_stack_inode_rdev(inode_item);
1817 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1819 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1820 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1822 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1823 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1825 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1826 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1828 BTRFS_I(inode)->i_otime.tv_sec =
1829 btrfs_stack_timespec_sec(&inode_item->otime);
1830 BTRFS_I(inode)->i_otime.tv_nsec =
1831 btrfs_stack_timespec_nsec(&inode_item->otime);
1833 inode->i_generation = BTRFS_I(inode)->generation;
1834 BTRFS_I(inode)->index_cnt = (u64)-1;
1836 mutex_unlock(&delayed_node->mutex);
1837 btrfs_release_delayed_node(delayed_node);
1841 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1842 struct btrfs_root *root, struct inode *inode)
1844 struct btrfs_delayed_node *delayed_node;
1847 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1848 if (IS_ERR(delayed_node))
1849 return PTR_ERR(delayed_node);
1851 mutex_lock(&delayed_node->mutex);
1852 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1853 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1857 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1862 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1863 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1864 delayed_node->count++;
1865 atomic_inc(&root->fs_info->delayed_root->items);
1867 mutex_unlock(&delayed_node->mutex);
1868 btrfs_release_delayed_node(delayed_node);
1872 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1874 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1875 struct btrfs_delayed_node *delayed_node;
1878 * we don't do delayed inode updates during log recovery because it
1879 * leads to enospc problems. This means we also can't do
1880 * delayed inode refs
1882 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1885 delayed_node = btrfs_get_or_create_delayed_node(inode);
1886 if (IS_ERR(delayed_node))
1887 return PTR_ERR(delayed_node);
1890 * We don't reserve space for inode ref deletion is because:
1891 * - We ONLY do async inode ref deletion for the inode who has only
1892 * one link(i_nlink == 1), it means there is only one inode ref.
1893 * And in most case, the inode ref and the inode item are in the
1894 * same leaf, and we will deal with them at the same time.
1895 * Since we are sure we will reserve the space for the inode item,
1896 * it is unnecessary to reserve space for inode ref deletion.
1897 * - If the inode ref and the inode item are not in the same leaf,
1898 * We also needn't worry about enospc problem, because we reserve
1899 * much more space for the inode update than it needs.
1900 * - At the worst, we can steal some space from the global reservation.
1903 mutex_lock(&delayed_node->mutex);
1904 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1907 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1908 delayed_node->count++;
1909 atomic_inc(&fs_info->delayed_root->items);
1911 mutex_unlock(&delayed_node->mutex);
1912 btrfs_release_delayed_node(delayed_node);
1916 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1918 struct btrfs_root *root = delayed_node->root;
1919 struct btrfs_fs_info *fs_info = root->fs_info;
1920 struct btrfs_delayed_item *curr_item, *prev_item;
1922 mutex_lock(&delayed_node->mutex);
1923 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1925 btrfs_delayed_item_release_metadata(fs_info, curr_item);
1926 prev_item = curr_item;
1927 curr_item = __btrfs_next_delayed_item(prev_item);
1928 btrfs_release_delayed_item(prev_item);
1931 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1933 btrfs_delayed_item_release_metadata(fs_info, curr_item);
1934 prev_item = curr_item;
1935 curr_item = __btrfs_next_delayed_item(prev_item);
1936 btrfs_release_delayed_item(prev_item);
1939 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1940 btrfs_release_delayed_iref(delayed_node);
1942 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1943 btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
1944 btrfs_release_delayed_inode(delayed_node);
1946 mutex_unlock(&delayed_node->mutex);
1949 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1951 struct btrfs_delayed_node *delayed_node;
1953 delayed_node = btrfs_get_delayed_node(inode);
1957 __btrfs_kill_delayed_node(delayed_node);
1958 btrfs_release_delayed_node(delayed_node);
1961 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1964 struct btrfs_delayed_node *delayed_nodes[8];
1968 spin_lock(&root->inode_lock);
1969 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1970 (void **)delayed_nodes, inode_id,
1971 ARRAY_SIZE(delayed_nodes));
1973 spin_unlock(&root->inode_lock);
1977 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1979 for (i = 0; i < n; i++)
1980 refcount_inc(&delayed_nodes[i]->refs);
1981 spin_unlock(&root->inode_lock);
1983 for (i = 0; i < n; i++) {
1984 __btrfs_kill_delayed_node(delayed_nodes[i]);
1985 btrfs_release_delayed_node(delayed_nodes[i]);
1990 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1992 struct btrfs_delayed_node *curr_node, *prev_node;
1994 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1996 __btrfs_kill_delayed_node(curr_node);
1998 prev_node = curr_node;
1999 curr_node = btrfs_next_delayed_node(curr_node);
2000 btrfs_release_delayed_node(prev_node);