1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs extent cache support
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Samsung Electronics
7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8 * Chao Yu <chao2.yu@samsung.com>
12 #include <linux/f2fs_fs.h>
16 #include <trace/events/f2fs.h>
18 static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
22 if (cached_re->ofs <= ofs &&
23 cached_re->ofs + cached_re->len > ofs) {
30 static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
33 struct rb_node *node = root->rb_root.rb_node;
37 re = rb_entry(node, struct rb_entry, rb_node);
41 else if (ofs >= re->ofs + re->len)
42 node = node->rb_right;
49 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
50 struct rb_entry *cached_re, unsigned int ofs)
54 re = __lookup_rb_tree_fast(cached_re, ofs);
56 return __lookup_rb_tree_slow(root, ofs);
61 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
62 struct rb_root_cached *root,
63 struct rb_node **parent,
64 unsigned long long key, bool *leftmost)
66 struct rb_node **p = &root->rb_root.rb_node;
71 re = rb_entry(*parent, struct rb_entry, rb_node);
84 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
85 struct rb_root_cached *root,
86 struct rb_node **parent,
87 unsigned int ofs, bool *leftmost)
89 struct rb_node **p = &root->rb_root.rb_node;
94 re = rb_entry(*parent, struct rb_entry, rb_node);
98 } else if (ofs >= re->ofs + re->len) {
110 * lookup rb entry in position of @ofs in rb-tree,
111 * if hit, return the entry, otherwise, return NULL
112 * @prev_ex: extent before ofs
113 * @next_ex: extent after ofs
114 * @insert_p: insert point for new extent at ofs
115 * in order to simpfy the insertion after.
116 * tree must stay unchanged between lookup and insertion.
118 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
119 struct rb_entry *cached_re,
121 struct rb_entry **prev_entry,
122 struct rb_entry **next_entry,
123 struct rb_node ***insert_p,
124 struct rb_node **insert_parent,
125 bool force, bool *leftmost)
127 struct rb_node **pnode = &root->rb_root.rb_node;
128 struct rb_node *parent = NULL, *tmp_node;
129 struct rb_entry *re = cached_re;
132 *insert_parent = NULL;
136 if (RB_EMPTY_ROOT(&root->rb_root))
140 if (re->ofs <= ofs && re->ofs + re->len > ofs)
141 goto lookup_neighbors;
149 re = rb_entry(*pnode, struct rb_entry, rb_node);
152 pnode = &(*pnode)->rb_left;
153 } else if (ofs >= re->ofs + re->len) {
154 pnode = &(*pnode)->rb_right;
158 goto lookup_neighbors;
163 *insert_parent = parent;
165 re = rb_entry(parent, struct rb_entry, rb_node);
167 if (parent && ofs > re->ofs)
168 tmp_node = rb_next(parent);
169 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
172 if (parent && ofs < re->ofs)
173 tmp_node = rb_prev(parent);
174 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
178 if (ofs == re->ofs || force) {
179 /* lookup prev node for merging backward later */
180 tmp_node = rb_prev(&re->rb_node);
181 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
183 if (ofs == re->ofs + re->len - 1 || force) {
184 /* lookup next node for merging frontward later */
185 tmp_node = rb_next(&re->rb_node);
186 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
191 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
192 struct rb_root_cached *root, bool check_key)
194 #ifdef CONFIG_F2FS_CHECK_FS
195 struct rb_node *cur = rb_first_cached(root), *next;
196 struct rb_entry *cur_re, *next_re;
206 cur_re = rb_entry(cur, struct rb_entry, rb_node);
207 next_re = rb_entry(next, struct rb_entry, rb_node);
210 if (cur_re->key > next_re->key) {
211 f2fs_info(sbi, "inconsistent rbtree, "
212 "cur(%llu) next(%llu)",
213 cur_re->key, next_re->key);
219 if (cur_re->ofs + cur_re->len > next_re->ofs) {
220 f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
221 cur_re->ofs, cur_re->len,
222 next_re->ofs, next_re->len);
232 static struct kmem_cache *extent_tree_slab;
233 static struct kmem_cache *extent_node_slab;
235 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
236 struct extent_tree *et, struct extent_info *ei,
237 struct rb_node *parent, struct rb_node **p,
240 struct extent_node *en;
242 en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
247 INIT_LIST_HEAD(&en->list);
250 rb_link_node(&en->rb_node, parent, p);
251 rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
252 atomic_inc(&et->node_cnt);
253 atomic_inc(&sbi->total_ext_node);
257 static void __detach_extent_node(struct f2fs_sb_info *sbi,
258 struct extent_tree *et, struct extent_node *en)
260 rb_erase_cached(&en->rb_node, &et->root);
261 atomic_dec(&et->node_cnt);
262 atomic_dec(&sbi->total_ext_node);
264 if (et->cached_en == en)
265 et->cached_en = NULL;
266 kmem_cache_free(extent_node_slab, en);
270 * Flow to release an extent_node:
272 * 2. __detach_extent_node
273 * 3. kmem_cache_free.
275 static void __release_extent_node(struct f2fs_sb_info *sbi,
276 struct extent_tree *et, struct extent_node *en)
278 spin_lock(&sbi->extent_lock);
279 f2fs_bug_on(sbi, list_empty(&en->list));
280 list_del_init(&en->list);
281 spin_unlock(&sbi->extent_lock);
283 __detach_extent_node(sbi, et, en);
286 static struct extent_tree *__grab_extent_tree(struct inode *inode)
288 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
289 struct extent_tree *et;
290 nid_t ino = inode->i_ino;
292 mutex_lock(&sbi->extent_tree_lock);
293 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
295 et = f2fs_kmem_cache_alloc(extent_tree_slab,
296 GFP_NOFS, true, NULL);
297 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
298 memset(et, 0, sizeof(struct extent_tree));
300 et->root = RB_ROOT_CACHED;
301 et->cached_en = NULL;
302 rwlock_init(&et->lock);
303 INIT_LIST_HEAD(&et->list);
304 atomic_set(&et->node_cnt, 0);
305 atomic_inc(&sbi->total_ext_tree);
307 atomic_dec(&sbi->total_zombie_tree);
308 list_del_init(&et->list);
310 mutex_unlock(&sbi->extent_tree_lock);
312 /* never died until evict_inode */
313 F2FS_I(inode)->extent_tree = et;
318 static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
319 struct extent_tree *et, struct extent_info *ei)
321 struct rb_node **p = &et->root.rb_root.rb_node;
322 struct extent_node *en;
324 en = __attach_extent_node(sbi, et, ei, NULL, p, true);
328 et->largest = en->ei;
333 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
334 struct extent_tree *et)
336 struct rb_node *node, *next;
337 struct extent_node *en;
338 unsigned int count = atomic_read(&et->node_cnt);
340 node = rb_first_cached(&et->root);
342 next = rb_next(node);
343 en = rb_entry(node, struct extent_node, rb_node);
344 __release_extent_node(sbi, et, en);
348 return count - atomic_read(&et->node_cnt);
351 static void __drop_largest_extent(struct extent_tree *et,
352 pgoff_t fofs, unsigned int len)
354 if (fofs < et->largest.fofs + et->largest.len &&
355 fofs + len > et->largest.fofs) {
357 et->largest_updated = true;
361 /* return true, if inode page is changed */
362 static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
364 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
365 struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL;
366 struct extent_tree *et;
367 struct extent_node *en;
368 struct extent_info ei;
370 if (!f2fs_may_extent_tree(inode)) {
371 /* drop largest extent */
372 if (i_ext && i_ext->len) {
373 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
375 set_page_dirty(ipage);
381 et = __grab_extent_tree(inode);
383 if (!i_ext || !i_ext->len)
386 get_extent_info(&ei, i_ext);
388 write_lock(&et->lock);
389 if (atomic_read(&et->node_cnt))
392 en = __init_extent_tree(sbi, et, &ei);
394 spin_lock(&sbi->extent_lock);
395 list_add_tail(&en->list, &sbi->extent_list);
396 spin_unlock(&sbi->extent_lock);
399 write_unlock(&et->lock);
402 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
404 __f2fs_init_extent_tree(inode, ipage);
406 if (!F2FS_I(inode)->extent_tree)
407 set_inode_flag(inode, FI_NO_EXTENT);
410 static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
411 struct extent_info *ei)
413 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
414 struct extent_tree *et = F2FS_I(inode)->extent_tree;
415 struct extent_node *en;
418 f2fs_bug_on(sbi, !et);
420 trace_f2fs_lookup_extent_tree_start(inode, pgofs);
422 read_lock(&et->lock);
424 if (et->largest.fofs <= pgofs &&
425 et->largest.fofs + et->largest.len > pgofs) {
428 stat_inc_largest_node_hit(sbi);
432 en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
433 (struct rb_entry *)et->cached_en, pgofs);
437 if (en == et->cached_en)
438 stat_inc_cached_node_hit(sbi);
440 stat_inc_rbtree_node_hit(sbi);
443 spin_lock(&sbi->extent_lock);
444 if (!list_empty(&en->list)) {
445 list_move_tail(&en->list, &sbi->extent_list);
448 spin_unlock(&sbi->extent_lock);
451 stat_inc_total_hit(sbi);
452 read_unlock(&et->lock);
454 trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
458 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
459 struct extent_tree *et, struct extent_info *ei,
460 struct extent_node *prev_ex,
461 struct extent_node *next_ex)
463 struct extent_node *en = NULL;
465 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
466 prev_ex->ei.len += ei->len;
471 if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
472 next_ex->ei.fofs = ei->fofs;
473 next_ex->ei.blk = ei->blk;
474 next_ex->ei.len += ei->len;
476 __release_extent_node(sbi, et, prev_ex);
484 __try_update_largest_extent(et, en);
486 spin_lock(&sbi->extent_lock);
487 if (!list_empty(&en->list)) {
488 list_move_tail(&en->list, &sbi->extent_list);
491 spin_unlock(&sbi->extent_lock);
495 static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
496 struct extent_tree *et, struct extent_info *ei,
497 struct rb_node **insert_p,
498 struct rb_node *insert_parent,
502 struct rb_node *parent = NULL;
503 struct extent_node *en = NULL;
505 if (insert_p && insert_parent) {
506 parent = insert_parent;
513 p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
514 ei->fofs, &leftmost);
516 en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
520 __try_update_largest_extent(et, en);
522 /* update in global extent list */
523 spin_lock(&sbi->extent_lock);
524 list_add_tail(&en->list, &sbi->extent_list);
526 spin_unlock(&sbi->extent_lock);
530 static void f2fs_update_extent_tree_range(struct inode *inode,
531 pgoff_t fofs, block_t blkaddr, unsigned int len)
533 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
534 struct extent_tree *et = F2FS_I(inode)->extent_tree;
535 struct extent_node *en = NULL, *en1 = NULL;
536 struct extent_node *prev_en = NULL, *next_en = NULL;
537 struct extent_info ei, dei, prev;
538 struct rb_node **insert_p = NULL, *insert_parent = NULL;
539 unsigned int end = fofs + len;
540 unsigned int pos = (unsigned int)fofs;
541 bool updated = false;
542 bool leftmost = false;
547 trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
549 write_lock(&et->lock);
551 if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
552 write_unlock(&et->lock);
560 * drop largest extent before lookup, in case it's already
561 * been shrunk from extent tree
563 __drop_largest_extent(et, fofs, len);
565 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
566 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
567 (struct rb_entry *)et->cached_en, fofs,
568 (struct rb_entry **)&prev_en,
569 (struct rb_entry **)&next_en,
570 &insert_p, &insert_parent, false,
575 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
576 while (en && en->ei.fofs < end) {
577 unsigned int org_end;
578 int parts = 0; /* # of parts current extent split into */
580 next_en = en1 = NULL;
583 org_end = dei.fofs + dei.len;
584 f2fs_bug_on(sbi, pos >= org_end);
586 if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
587 en->ei.len = pos - en->ei.fofs;
592 if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
594 set_extent_info(&ei, end,
595 end - dei.fofs + dei.blk,
597 en1 = __insert_extent_tree(sbi, et, &ei,
602 en->ei.blk += end - dei.fofs;
603 en->ei.len -= end - dei.fofs;
610 struct rb_node *node = rb_next(&en->rb_node);
612 next_en = rb_entry_safe(node, struct extent_node,
617 __try_update_largest_extent(et, en);
619 __release_extent_node(sbi, et, en);
622 * if original extent is split into zero or two parts, extent
623 * tree has been altered by deletion or insertion, therefore
624 * invalidate pointers regard to tree.
628 insert_parent = NULL;
633 /* 3. update extent in extent cache */
636 set_extent_info(&ei, fofs, blkaddr, len);
637 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
638 __insert_extent_tree(sbi, et, &ei,
639 insert_p, insert_parent, leftmost);
641 /* give up extent_cache, if split and small updates happen */
643 prev.len < F2FS_MIN_EXTENT_LEN &&
644 et->largest.len < F2FS_MIN_EXTENT_LEN) {
646 et->largest_updated = true;
647 set_inode_flag(inode, FI_NO_EXTENT);
651 if (is_inode_flag_set(inode, FI_NO_EXTENT))
652 __free_extent_tree(sbi, et);
654 if (et->largest_updated) {
655 et->largest_updated = false;
659 write_unlock(&et->lock);
662 f2fs_mark_inode_dirty_sync(inode, true);
665 #ifdef CONFIG_F2FS_FS_COMPRESSION
666 void f2fs_update_extent_tree_range_compressed(struct inode *inode,
667 pgoff_t fofs, block_t blkaddr, unsigned int llen,
670 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
671 struct extent_tree *et = F2FS_I(inode)->extent_tree;
672 struct extent_node *en = NULL;
673 struct extent_node *prev_en = NULL, *next_en = NULL;
674 struct extent_info ei;
675 struct rb_node **insert_p = NULL, *insert_parent = NULL;
676 bool leftmost = false;
678 trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
680 /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
681 if (is_inode_flag_set(inode, FI_NO_EXTENT))
684 write_lock(&et->lock);
686 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
687 (struct rb_entry *)et->cached_en, fofs,
688 (struct rb_entry **)&prev_en,
689 (struct rb_entry **)&next_en,
690 &insert_p, &insert_parent, false,
695 set_extent_info(&ei, fofs, blkaddr, llen);
698 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
699 __insert_extent_tree(sbi, et, &ei,
700 insert_p, insert_parent, leftmost);
702 write_unlock(&et->lock);
706 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
708 struct extent_tree *et, *next;
709 struct extent_node *en;
710 unsigned int node_cnt = 0, tree_cnt = 0;
713 if (!test_opt(sbi, EXTENT_CACHE))
716 if (!atomic_read(&sbi->total_zombie_tree))
719 if (!mutex_trylock(&sbi->extent_tree_lock))
722 /* 1. remove unreferenced extent tree */
723 list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
724 if (atomic_read(&et->node_cnt)) {
725 write_lock(&et->lock);
726 node_cnt += __free_extent_tree(sbi, et);
727 write_unlock(&et->lock);
729 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
730 list_del_init(&et->list);
731 radix_tree_delete(&sbi->extent_tree_root, et->ino);
732 kmem_cache_free(extent_tree_slab, et);
733 atomic_dec(&sbi->total_ext_tree);
734 atomic_dec(&sbi->total_zombie_tree);
737 if (node_cnt + tree_cnt >= nr_shrink)
741 mutex_unlock(&sbi->extent_tree_lock);
744 /* 2. remove LRU extent entries */
745 if (!mutex_trylock(&sbi->extent_tree_lock))
748 remained = nr_shrink - (node_cnt + tree_cnt);
750 spin_lock(&sbi->extent_lock);
751 for (; remained > 0; remained--) {
752 if (list_empty(&sbi->extent_list))
754 en = list_first_entry(&sbi->extent_list,
755 struct extent_node, list);
757 if (!write_trylock(&et->lock)) {
758 /* refresh this extent node's position in extent list */
759 list_move_tail(&en->list, &sbi->extent_list);
763 list_del_init(&en->list);
764 spin_unlock(&sbi->extent_lock);
766 __detach_extent_node(sbi, et, en);
768 write_unlock(&et->lock);
770 spin_lock(&sbi->extent_lock);
772 spin_unlock(&sbi->extent_lock);
775 mutex_unlock(&sbi->extent_tree_lock);
777 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
779 return node_cnt + tree_cnt;
782 unsigned int f2fs_destroy_extent_node(struct inode *inode)
784 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
785 struct extent_tree *et = F2FS_I(inode)->extent_tree;
786 unsigned int node_cnt = 0;
788 if (!et || !atomic_read(&et->node_cnt))
791 write_lock(&et->lock);
792 node_cnt = __free_extent_tree(sbi, et);
793 write_unlock(&et->lock);
798 void f2fs_drop_extent_tree(struct inode *inode)
800 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
801 struct extent_tree *et = F2FS_I(inode)->extent_tree;
802 bool updated = false;
804 if (!f2fs_may_extent_tree(inode))
807 set_inode_flag(inode, FI_NO_EXTENT);
809 write_lock(&et->lock);
810 __free_extent_tree(sbi, et);
811 if (et->largest.len) {
815 write_unlock(&et->lock);
817 f2fs_mark_inode_dirty_sync(inode, true);
820 void f2fs_destroy_extent_tree(struct inode *inode)
822 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
823 struct extent_tree *et = F2FS_I(inode)->extent_tree;
824 unsigned int node_cnt = 0;
829 if (inode->i_nlink && !is_bad_inode(inode) &&
830 atomic_read(&et->node_cnt)) {
831 mutex_lock(&sbi->extent_tree_lock);
832 list_add_tail(&et->list, &sbi->zombie_list);
833 atomic_inc(&sbi->total_zombie_tree);
834 mutex_unlock(&sbi->extent_tree_lock);
838 /* free all extent info belong to this extent tree */
839 node_cnt = f2fs_destroy_extent_node(inode);
841 /* delete extent tree entry in radix tree */
842 mutex_lock(&sbi->extent_tree_lock);
843 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
844 radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
845 kmem_cache_free(extent_tree_slab, et);
846 atomic_dec(&sbi->total_ext_tree);
847 mutex_unlock(&sbi->extent_tree_lock);
849 F2FS_I(inode)->extent_tree = NULL;
851 trace_f2fs_destroy_extent_tree(inode, node_cnt);
854 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
855 struct extent_info *ei)
857 if (!f2fs_may_extent_tree(inode))
860 return f2fs_lookup_extent_tree(inode, pgofs, ei);
863 void f2fs_update_extent_cache(struct dnode_of_data *dn)
868 if (!f2fs_may_extent_tree(dn->inode))
871 if (dn->data_blkaddr == NEW_ADDR)
874 blkaddr = dn->data_blkaddr;
876 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
878 f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
881 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
882 pgoff_t fofs, block_t blkaddr, unsigned int len)
885 if (!f2fs_may_extent_tree(dn->inode))
888 f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
891 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
893 INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
894 mutex_init(&sbi->extent_tree_lock);
895 INIT_LIST_HEAD(&sbi->extent_list);
896 spin_lock_init(&sbi->extent_lock);
897 atomic_set(&sbi->total_ext_tree, 0);
898 INIT_LIST_HEAD(&sbi->zombie_list);
899 atomic_set(&sbi->total_zombie_tree, 0);
900 atomic_set(&sbi->total_ext_node, 0);
903 int __init f2fs_create_extent_cache(void)
905 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
906 sizeof(struct extent_tree));
907 if (!extent_tree_slab)
909 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
910 sizeof(struct extent_node));
911 if (!extent_node_slab) {
912 kmem_cache_destroy(extent_tree_slab);
918 void f2fs_destroy_extent_cache(void)
920 kmem_cache_destroy(extent_node_slab);
921 kmem_cache_destroy(extent_tree_slab);