1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs extent cache support
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Samsung Electronics
7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8 * Chao Yu <chao2.yu@samsung.com>
12 #include <linux/f2fs_fs.h>
16 #include <trace/events/f2fs.h>
18 static void __set_extent_info(struct extent_info *ei,
19 unsigned int fofs, unsigned int len,
20 block_t blk, bool keep_clen,
21 enum extent_type type)
26 if (type == EX_READ) {
30 #ifdef CONFIG_F2FS_FS_COMPRESSION
36 static bool __may_read_extent_tree(struct inode *inode)
38 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40 if (!test_opt(sbi, READ_EXTENT_CACHE))
42 if (is_inode_flag_set(inode, FI_NO_EXTENT))
44 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
45 !f2fs_sb_has_readonly(sbi))
47 return S_ISREG(inode->i_mode);
50 static bool __may_extent_tree(struct inode *inode, enum extent_type type)
52 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
55 * for recovered files during mount do not create extents
56 * if shrinker is not registered.
58 if (list_empty(&sbi->s_list))
62 return __may_read_extent_tree(inode);
66 static void __try_update_largest_extent(struct extent_tree *et,
67 struct extent_node *en)
69 if (et->type != EX_READ)
71 if (en->ei.len <= et->largest.len)
75 et->largest_updated = true;
78 static bool __is_extent_mergeable(struct extent_info *back,
79 struct extent_info *front, enum extent_type type)
81 if (type == EX_READ) {
82 #ifdef CONFIG_F2FS_FS_COMPRESSION
83 if (back->c_len && back->len != back->c_len)
85 if (front->c_len && front->len != front->c_len)
88 return (back->fofs + back->len == front->fofs &&
89 back->blk + back->len == front->blk);
94 static bool __is_back_mergeable(struct extent_info *cur,
95 struct extent_info *back, enum extent_type type)
97 return __is_extent_mergeable(back, cur, type);
100 static bool __is_front_mergeable(struct extent_info *cur,
101 struct extent_info *front, enum extent_type type)
103 return __is_extent_mergeable(cur, front, type);
106 static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
110 if (cached_re->ofs <= ofs &&
111 cached_re->ofs + cached_re->len > ofs) {
118 static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
121 struct rb_node *node = root->rb_root.rb_node;
125 re = rb_entry(node, struct rb_entry, rb_node);
128 node = node->rb_left;
129 else if (ofs >= re->ofs + re->len)
130 node = node->rb_right;
137 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
138 struct rb_entry *cached_re, unsigned int ofs)
142 re = __lookup_rb_tree_fast(cached_re, ofs);
144 return __lookup_rb_tree_slow(root, ofs);
149 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
150 struct rb_root_cached *root,
151 struct rb_node **parent,
152 unsigned long long key, bool *leftmost)
154 struct rb_node **p = &root->rb_root.rb_node;
159 re = rb_entry(*parent, struct rb_entry, rb_node);
172 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
173 struct rb_root_cached *root,
174 struct rb_node **parent,
175 unsigned int ofs, bool *leftmost)
177 struct rb_node **p = &root->rb_root.rb_node;
182 re = rb_entry(*parent, struct rb_entry, rb_node);
186 } else if (ofs >= re->ofs + re->len) {
198 * lookup rb entry in position of @ofs in rb-tree,
199 * if hit, return the entry, otherwise, return NULL
200 * @prev_ex: extent before ofs
201 * @next_ex: extent after ofs
202 * @insert_p: insert point for new extent at ofs
203 * in order to simpfy the insertion after.
204 * tree must stay unchanged between lookup and insertion.
206 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
207 struct rb_entry *cached_re,
209 struct rb_entry **prev_entry,
210 struct rb_entry **next_entry,
211 struct rb_node ***insert_p,
212 struct rb_node **insert_parent,
213 bool force, bool *leftmost)
215 struct rb_node **pnode = &root->rb_root.rb_node;
216 struct rb_node *parent = NULL, *tmp_node;
217 struct rb_entry *re = cached_re;
220 *insert_parent = NULL;
224 if (RB_EMPTY_ROOT(&root->rb_root))
228 if (re->ofs <= ofs && re->ofs + re->len > ofs)
229 goto lookup_neighbors;
237 re = rb_entry(*pnode, struct rb_entry, rb_node);
240 pnode = &(*pnode)->rb_left;
241 } else if (ofs >= re->ofs + re->len) {
242 pnode = &(*pnode)->rb_right;
246 goto lookup_neighbors;
251 *insert_parent = parent;
253 re = rb_entry(parent, struct rb_entry, rb_node);
255 if (parent && ofs > re->ofs)
256 tmp_node = rb_next(parent);
257 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
260 if (parent && ofs < re->ofs)
261 tmp_node = rb_prev(parent);
262 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
266 if (ofs == re->ofs || force) {
267 /* lookup prev node for merging backward later */
268 tmp_node = rb_prev(&re->rb_node);
269 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
271 if (ofs == re->ofs + re->len - 1 || force) {
272 /* lookup next node for merging frontward later */
273 tmp_node = rb_next(&re->rb_node);
274 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
279 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
280 struct rb_root_cached *root, bool check_key)
282 #ifdef CONFIG_F2FS_CHECK_FS
283 struct rb_node *cur = rb_first_cached(root), *next;
284 struct rb_entry *cur_re, *next_re;
294 cur_re = rb_entry(cur, struct rb_entry, rb_node);
295 next_re = rb_entry(next, struct rb_entry, rb_node);
298 if (cur_re->key > next_re->key) {
299 f2fs_info(sbi, "inconsistent rbtree, "
300 "cur(%llu) next(%llu)",
301 cur_re->key, next_re->key);
307 if (cur_re->ofs + cur_re->len > next_re->ofs) {
308 f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
309 cur_re->ofs, cur_re->len,
310 next_re->ofs, next_re->len);
320 static struct kmem_cache *extent_tree_slab;
321 static struct kmem_cache *extent_node_slab;
323 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
324 struct extent_tree *et, struct extent_info *ei,
325 struct rb_node *parent, struct rb_node **p,
328 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
329 struct extent_node *en;
331 en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
336 INIT_LIST_HEAD(&en->list);
339 rb_link_node(&en->rb_node, parent, p);
340 rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
341 atomic_inc(&et->node_cnt);
342 atomic_inc(&eti->total_ext_node);
346 static void __detach_extent_node(struct f2fs_sb_info *sbi,
347 struct extent_tree *et, struct extent_node *en)
349 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
351 rb_erase_cached(&en->rb_node, &et->root);
352 atomic_dec(&et->node_cnt);
353 atomic_dec(&eti->total_ext_node);
355 if (et->cached_en == en)
356 et->cached_en = NULL;
357 kmem_cache_free(extent_node_slab, en);
361 * Flow to release an extent_node:
363 * 2. __detach_extent_node
364 * 3. kmem_cache_free.
366 static void __release_extent_node(struct f2fs_sb_info *sbi,
367 struct extent_tree *et, struct extent_node *en)
369 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
371 spin_lock(&eti->extent_lock);
372 f2fs_bug_on(sbi, list_empty(&en->list));
373 list_del_init(&en->list);
374 spin_unlock(&eti->extent_lock);
376 __detach_extent_node(sbi, et, en);
379 static struct extent_tree *__grab_extent_tree(struct inode *inode,
380 enum extent_type type)
382 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
383 struct extent_tree_info *eti = &sbi->extent_tree[type];
384 struct extent_tree *et;
385 nid_t ino = inode->i_ino;
387 mutex_lock(&eti->extent_tree_lock);
388 et = radix_tree_lookup(&eti->extent_tree_root, ino);
390 et = f2fs_kmem_cache_alloc(extent_tree_slab,
391 GFP_NOFS, true, NULL);
392 f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et);
393 memset(et, 0, sizeof(struct extent_tree));
396 et->root = RB_ROOT_CACHED;
397 et->cached_en = NULL;
398 rwlock_init(&et->lock);
399 INIT_LIST_HEAD(&et->list);
400 atomic_set(&et->node_cnt, 0);
401 atomic_inc(&eti->total_ext_tree);
403 atomic_dec(&eti->total_zombie_tree);
404 list_del_init(&et->list);
406 mutex_unlock(&eti->extent_tree_lock);
408 /* never died until evict_inode */
409 F2FS_I(inode)->extent_tree[type] = et;
414 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
415 struct extent_tree *et)
417 struct rb_node *node, *next;
418 struct extent_node *en;
419 unsigned int count = atomic_read(&et->node_cnt);
421 node = rb_first_cached(&et->root);
423 next = rb_next(node);
424 en = rb_entry(node, struct extent_node, rb_node);
425 __release_extent_node(sbi, et, en);
429 return count - atomic_read(&et->node_cnt);
432 static void __drop_largest_extent(struct extent_tree *et,
433 pgoff_t fofs, unsigned int len)
435 if (fofs < et->largest.fofs + et->largest.len &&
436 fofs + len > et->largest.fofs) {
438 et->largest_updated = true;
442 /* return true, if inode page is changed */
443 static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage,
444 enum extent_type type)
446 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
447 struct extent_tree_info *eti = &sbi->extent_tree[type];
448 struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL;
449 struct extent_tree *et;
450 struct extent_node *en;
451 struct extent_info ei;
453 if (!__may_extent_tree(inode, type)) {
454 /* drop largest read extent */
455 if (type == EX_READ && i_ext && i_ext->len) {
456 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
458 set_page_dirty(ipage);
463 et = __grab_extent_tree(inode, type);
465 if (!i_ext || !i_ext->len)
468 BUG_ON(type != EX_READ);
470 get_read_extent_info(&ei, i_ext);
472 write_lock(&et->lock);
473 if (atomic_read(&et->node_cnt))
476 en = __attach_extent_node(sbi, et, &ei, NULL,
477 &et->root.rb_root.rb_node, true);
479 et->largest = en->ei;
482 spin_lock(&eti->extent_lock);
483 list_add_tail(&en->list, &eti->extent_list);
484 spin_unlock(&eti->extent_lock);
487 write_unlock(&et->lock);
489 if (type == EX_READ && !F2FS_I(inode)->extent_tree[EX_READ])
490 set_inode_flag(inode, FI_NO_EXTENT);
493 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
495 /* initialize read cache */
496 __f2fs_init_extent_tree(inode, ipage, EX_READ);
499 static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
500 struct extent_info *ei, enum extent_type type)
502 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
503 struct extent_tree_info *eti = &sbi->extent_tree[type];
504 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
505 struct extent_node *en;
511 trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
513 read_lock(&et->lock);
515 if (type == EX_READ &&
516 et->largest.fofs <= pgofs &&
517 et->largest.fofs + et->largest.len > pgofs) {
520 stat_inc_largest_node_hit(sbi);
524 en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
525 (struct rb_entry *)et->cached_en, pgofs);
529 if (en == et->cached_en)
530 stat_inc_cached_node_hit(sbi, type);
532 stat_inc_rbtree_node_hit(sbi, type);
535 spin_lock(&eti->extent_lock);
536 if (!list_empty(&en->list)) {
537 list_move_tail(&en->list, &eti->extent_list);
540 spin_unlock(&eti->extent_lock);
543 stat_inc_total_hit(sbi, type);
544 read_unlock(&et->lock);
547 trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei);
551 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
552 struct extent_tree *et, struct extent_info *ei,
553 struct extent_node *prev_ex,
554 struct extent_node *next_ex)
556 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
557 struct extent_node *en = NULL;
559 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) {
560 prev_ex->ei.len += ei->len;
565 if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) {
566 next_ex->ei.fofs = ei->fofs;
567 next_ex->ei.len += ei->len;
568 if (et->type == EX_READ)
569 next_ex->ei.blk = ei->blk;
571 __release_extent_node(sbi, et, prev_ex);
579 __try_update_largest_extent(et, en);
581 spin_lock(&eti->extent_lock);
582 if (!list_empty(&en->list)) {
583 list_move_tail(&en->list, &eti->extent_list);
586 spin_unlock(&eti->extent_lock);
590 static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
591 struct extent_tree *et, struct extent_info *ei,
592 struct rb_node **insert_p,
593 struct rb_node *insert_parent,
596 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
598 struct rb_node *parent = NULL;
599 struct extent_node *en = NULL;
601 if (insert_p && insert_parent) {
602 parent = insert_parent;
609 p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
610 ei->fofs, &leftmost);
612 en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
616 __try_update_largest_extent(et, en);
618 /* update in global extent list */
619 spin_lock(&eti->extent_lock);
620 list_add_tail(&en->list, &eti->extent_list);
622 spin_unlock(&eti->extent_lock);
626 static void __update_extent_tree_range(struct inode *inode,
627 struct extent_info *tei, enum extent_type type)
629 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
630 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
631 struct extent_node *en = NULL, *en1 = NULL;
632 struct extent_node *prev_en = NULL, *next_en = NULL;
633 struct extent_info ei, dei, prev;
634 struct rb_node **insert_p = NULL, *insert_parent = NULL;
635 unsigned int fofs = tei->fofs, len = tei->len;
636 unsigned int end = fofs + len;
637 bool updated = false;
638 bool leftmost = false;
644 trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
646 write_lock(&et->lock);
648 if (type == EX_READ) {
649 if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
650 write_unlock(&et->lock);
658 * drop largest extent before lookup, in case it's already
659 * been shrunk from extent tree
661 __drop_largest_extent(et, fofs, len);
664 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
665 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
666 (struct rb_entry *)et->cached_en, fofs,
667 (struct rb_entry **)&prev_en,
668 (struct rb_entry **)&next_en,
669 &insert_p, &insert_parent, false,
674 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
675 while (en && en->ei.fofs < end) {
676 unsigned int org_end;
677 int parts = 0; /* # of parts current extent split into */
679 next_en = en1 = NULL;
682 org_end = dei.fofs + dei.len;
683 f2fs_bug_on(sbi, fofs >= org_end);
685 if (fofs > dei.fofs && (type != EX_READ ||
686 fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) {
687 en->ei.len = fofs - en->ei.fofs;
692 if (end < org_end && (type != EX_READ ||
693 org_end - end >= F2FS_MIN_EXTENT_LEN)) {
695 __set_extent_info(&ei,
697 end - dei.fofs + dei.blk, false,
699 en1 = __insert_extent_tree(sbi, et, &ei,
703 __set_extent_info(&en->ei,
704 end, en->ei.len - (end - dei.fofs),
705 en->ei.blk + (end - dei.fofs), true,
713 struct rb_node *node = rb_next(&en->rb_node);
715 next_en = rb_entry_safe(node, struct extent_node,
720 __try_update_largest_extent(et, en);
722 __release_extent_node(sbi, et, en);
725 * if original extent is split into zero or two parts, extent
726 * tree has been altered by deletion or insertion, therefore
727 * invalidate pointers regard to tree.
731 insert_parent = NULL;
736 /* 3. update extent in read extent cache */
737 BUG_ON(type != EX_READ);
740 __set_extent_info(&ei, fofs, len, tei->blk, false, EX_READ);
741 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
742 __insert_extent_tree(sbi, et, &ei,
743 insert_p, insert_parent, leftmost);
745 /* give up extent_cache, if split and small updates happen */
747 prev.len < F2FS_MIN_EXTENT_LEN &&
748 et->largest.len < F2FS_MIN_EXTENT_LEN) {
750 et->largest_updated = true;
751 set_inode_flag(inode, FI_NO_EXTENT);
755 if (is_inode_flag_set(inode, FI_NO_EXTENT))
756 __free_extent_tree(sbi, et);
758 if (et->largest_updated) {
759 et->largest_updated = false;
763 write_unlock(&et->lock);
766 f2fs_mark_inode_dirty_sync(inode, true);
769 #ifdef CONFIG_F2FS_FS_COMPRESSION
770 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
771 pgoff_t fofs, block_t blkaddr, unsigned int llen,
774 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
775 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
776 struct extent_node *en = NULL;
777 struct extent_node *prev_en = NULL, *next_en = NULL;
778 struct extent_info ei;
779 struct rb_node **insert_p = NULL, *insert_parent = NULL;
780 bool leftmost = false;
782 trace_f2fs_update_read_extent_tree_range(inode, fofs, llen,
785 /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
786 if (is_inode_flag_set(inode, FI_NO_EXTENT))
789 write_lock(&et->lock);
791 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
792 (struct rb_entry *)et->cached_en, fofs,
793 (struct rb_entry **)&prev_en,
794 (struct rb_entry **)&next_en,
795 &insert_p, &insert_parent, false,
800 __set_extent_info(&ei, fofs, llen, blkaddr, true, EX_READ);
803 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
804 __insert_extent_tree(sbi, et, &ei,
805 insert_p, insert_parent, leftmost);
807 write_unlock(&et->lock);
811 static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
813 struct extent_info ei;
815 if (!__may_extent_tree(dn->inode, type))
818 ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
822 if (type == EX_READ) {
823 if (dn->data_blkaddr == NEW_ADDR)
826 ei.blk = dn->data_blkaddr;
828 __update_extent_tree_range(dn->inode, &ei, type);
831 static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink,
832 enum extent_type type)
834 struct extent_tree_info *eti = &sbi->extent_tree[type];
835 struct extent_tree *et, *next;
836 struct extent_node *en;
837 unsigned int node_cnt = 0, tree_cnt = 0;
840 if (!atomic_read(&eti->total_zombie_tree))
843 if (!mutex_trylock(&eti->extent_tree_lock))
846 /* 1. remove unreferenced extent tree */
847 list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
848 if (atomic_read(&et->node_cnt)) {
849 write_lock(&et->lock);
850 node_cnt += __free_extent_tree(sbi, et);
851 write_unlock(&et->lock);
853 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
854 list_del_init(&et->list);
855 radix_tree_delete(&eti->extent_tree_root, et->ino);
856 kmem_cache_free(extent_tree_slab, et);
857 atomic_dec(&eti->total_ext_tree);
858 atomic_dec(&eti->total_zombie_tree);
861 if (node_cnt + tree_cnt >= nr_shrink)
865 mutex_unlock(&eti->extent_tree_lock);
868 /* 2. remove LRU extent entries */
869 if (!mutex_trylock(&eti->extent_tree_lock))
872 remained = nr_shrink - (node_cnt + tree_cnt);
874 spin_lock(&eti->extent_lock);
875 for (; remained > 0; remained--) {
876 if (list_empty(&eti->extent_list))
878 en = list_first_entry(&eti->extent_list,
879 struct extent_node, list);
881 if (!write_trylock(&et->lock)) {
882 /* refresh this extent node's position in extent list */
883 list_move_tail(&en->list, &eti->extent_list);
887 list_del_init(&en->list);
888 spin_unlock(&eti->extent_lock);
890 __detach_extent_node(sbi, et, en);
892 write_unlock(&et->lock);
894 spin_lock(&eti->extent_lock);
896 spin_unlock(&eti->extent_lock);
899 mutex_unlock(&eti->extent_tree_lock);
901 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type);
903 return node_cnt + tree_cnt;
906 /* read extent cache operations */
907 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
908 struct extent_info *ei)
910 if (!__may_extent_tree(inode, EX_READ))
913 return __lookup_extent_tree(inode, pgofs, ei, EX_READ);
916 void f2fs_update_read_extent_cache(struct dnode_of_data *dn)
918 return __update_extent_cache(dn, EX_READ);
921 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
922 pgoff_t fofs, block_t blkaddr, unsigned int len)
924 struct extent_info ei = {
930 if (!__may_extent_tree(dn->inode, EX_READ))
933 __update_extent_tree_range(dn->inode, &ei, EX_READ);
936 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
938 if (!test_opt(sbi, READ_EXTENT_CACHE))
941 return __shrink_extent_tree(sbi, nr_shrink, EX_READ);
944 static unsigned int __destroy_extent_node(struct inode *inode,
945 enum extent_type type)
947 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
948 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
949 unsigned int node_cnt = 0;
951 if (!et || !atomic_read(&et->node_cnt))
954 write_lock(&et->lock);
955 node_cnt = __free_extent_tree(sbi, et);
956 write_unlock(&et->lock);
961 void f2fs_destroy_extent_node(struct inode *inode)
963 __destroy_extent_node(inode, EX_READ);
966 static void __drop_extent_tree(struct inode *inode, enum extent_type type)
968 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
969 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
970 bool updated = false;
972 if (!__may_extent_tree(inode, type))
975 write_lock(&et->lock);
976 __free_extent_tree(sbi, et);
977 if (type == EX_READ) {
978 set_inode_flag(inode, FI_NO_EXTENT);
979 if (et->largest.len) {
984 write_unlock(&et->lock);
986 f2fs_mark_inode_dirty_sync(inode, true);
989 void f2fs_drop_extent_tree(struct inode *inode)
991 __drop_extent_tree(inode, EX_READ);
994 static void __destroy_extent_tree(struct inode *inode, enum extent_type type)
996 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
997 struct extent_tree_info *eti = &sbi->extent_tree[type];
998 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
999 unsigned int node_cnt = 0;
1004 if (inode->i_nlink && !is_bad_inode(inode) &&
1005 atomic_read(&et->node_cnt)) {
1006 mutex_lock(&eti->extent_tree_lock);
1007 list_add_tail(&et->list, &eti->zombie_list);
1008 atomic_inc(&eti->total_zombie_tree);
1009 mutex_unlock(&eti->extent_tree_lock);
1013 /* free all extent info belong to this extent tree */
1014 node_cnt = __destroy_extent_node(inode, type);
1016 /* delete extent tree entry in radix tree */
1017 mutex_lock(&eti->extent_tree_lock);
1018 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
1019 radix_tree_delete(&eti->extent_tree_root, inode->i_ino);
1020 kmem_cache_free(extent_tree_slab, et);
1021 atomic_dec(&eti->total_ext_tree);
1022 mutex_unlock(&eti->extent_tree_lock);
1024 F2FS_I(inode)->extent_tree[type] = NULL;
1026 trace_f2fs_destroy_extent_tree(inode, node_cnt, type);
1029 void f2fs_destroy_extent_tree(struct inode *inode)
1031 __destroy_extent_tree(inode, EX_READ);
1034 static void __init_extent_tree_info(struct extent_tree_info *eti)
1036 INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO);
1037 mutex_init(&eti->extent_tree_lock);
1038 INIT_LIST_HEAD(&eti->extent_list);
1039 spin_lock_init(&eti->extent_lock);
1040 atomic_set(&eti->total_ext_tree, 0);
1041 INIT_LIST_HEAD(&eti->zombie_list);
1042 atomic_set(&eti->total_zombie_tree, 0);
1043 atomic_set(&eti->total_ext_node, 0);
1046 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
1048 __init_extent_tree_info(&sbi->extent_tree[EX_READ]);
1051 int __init f2fs_create_extent_cache(void)
1053 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
1054 sizeof(struct extent_tree));
1055 if (!extent_tree_slab)
1057 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
1058 sizeof(struct extent_node));
1059 if (!extent_node_slab) {
1060 kmem_cache_destroy(extent_tree_slab);
1066 void f2fs_destroy_extent_cache(void)
1068 kmem_cache_destroy(extent_node_slab);
1069 kmem_cache_destroy(extent_tree_slab);