1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/init.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/freezer.h>
16 #include <linux/sched/signal.h>
22 #include <trace/events/f2fs.h>
24 static unsigned int count_bits(const unsigned long *addr,
25 unsigned int offset, unsigned int len);
27 static int gc_thread_func(void *data)
29 struct f2fs_sb_info *sbi = data;
30 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
31 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
34 wait_ms = gc_th->min_sleep_time;
40 wait_event_interruptible_timeout(*wq,
41 kthread_should_stop() || freezing(current) ||
43 msecs_to_jiffies(wait_ms));
45 /* give it a try one time */
49 if (try_to_freeze()) {
50 stat_other_skip_bggc_count(sbi);
53 if (kthread_should_stop())
56 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
57 increase_sleep_time(gc_th, &wait_ms);
58 stat_other_skip_bggc_count(sbi);
62 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
63 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
64 f2fs_stop_checkpoint(sbi, false);
67 if (!sb_start_write_trylock(sbi->sb)) {
68 stat_other_skip_bggc_count(sbi);
73 * [GC triggering condition]
74 * 0. GC is not conducted currently.
75 * 1. There are enough dirty segments.
76 * 2. IO subsystem is idle by checking the # of writeback pages.
77 * 3. IO subsystem is idle by checking the # of requests in
78 * bdev's request list.
80 * Note) We have to avoid triggering GCs frequently.
81 * Because it is possible that some segments can be
82 * invalidated soon after by user update or deletion.
83 * So, I'd like to wait some time to collect dirty segments.
85 if (sbi->gc_mode == GC_URGENT_HIGH) {
86 wait_ms = gc_th->urgent_sleep_time;
87 down_write(&sbi->gc_lock);
91 if (!down_write_trylock(&sbi->gc_lock)) {
92 stat_other_skip_bggc_count(sbi);
96 if (!is_idle(sbi, GC_TIME)) {
97 increase_sleep_time(gc_th, &wait_ms);
98 up_write(&sbi->gc_lock);
99 stat_io_skip_bggc_count(sbi);
103 if (has_enough_invalid_blocks(sbi))
104 decrease_sleep_time(gc_th, &wait_ms);
106 increase_sleep_time(gc_th, &wait_ms);
108 stat_inc_bggc_count(sbi->stat_info);
110 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
112 /* if return value is not zero, no victim was selected */
113 if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO))
114 wait_ms = gc_th->no_gc_sleep_time;
116 trace_f2fs_background_gc(sbi->sb, wait_ms,
117 prefree_segments(sbi), free_segments(sbi));
119 /* balancing f2fs's metadata periodically */
120 f2fs_balance_fs_bg(sbi, true);
122 sb_end_write(sbi->sb);
124 } while (!kthread_should_stop());
128 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
130 struct f2fs_gc_kthread *gc_th;
131 dev_t dev = sbi->sb->s_bdev->bd_dev;
134 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
140 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
141 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
142 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
143 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
147 sbi->gc_thread = gc_th;
148 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
149 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
150 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
151 if (IS_ERR(gc_th->f2fs_gc_task)) {
152 err = PTR_ERR(gc_th->f2fs_gc_task);
154 sbi->gc_thread = NULL;
160 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
162 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
165 kthread_stop(gc_th->f2fs_gc_task);
167 sbi->gc_thread = NULL;
170 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
172 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
174 switch (sbi->gc_mode) {
186 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
187 int type, struct victim_sel_policy *p)
189 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
191 if (p->alloc_mode == SSR) {
192 p->gc_mode = GC_GREEDY;
193 p->dirty_bitmap = dirty_i->dirty_segmap[type];
194 p->max_search = dirty_i->nr_dirty[type];
197 p->gc_mode = select_gc_type(sbi, gc_type);
198 p->ofs_unit = sbi->segs_per_sec;
199 if (__is_large_section(sbi)) {
200 p->dirty_bitmap = dirty_i->dirty_secmap;
201 p->max_search = count_bits(p->dirty_bitmap,
204 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
205 p->max_search = dirty_i->nr_dirty[DIRTY];
210 * adjust candidates range, should select all dirty segments for
211 * foreground GC and urgent GC cases.
213 if (gc_type != FG_GC &&
214 (sbi->gc_mode != GC_URGENT_HIGH) &&
215 p->max_search > sbi->max_victim_search)
216 p->max_search = sbi->max_victim_search;
218 /* let's select beginning hot/small space first in no_heap mode*/
219 if (test_opt(sbi, NOHEAP) &&
220 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
223 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
226 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
227 struct victim_sel_policy *p)
229 /* SSR allocates in a segment unit */
230 if (p->alloc_mode == SSR)
231 return sbi->blocks_per_seg;
232 if (p->gc_mode == GC_GREEDY)
233 return 2 * sbi->blocks_per_seg * p->ofs_unit;
234 else if (p->gc_mode == GC_CB)
236 else /* No other gc_mode */
240 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
242 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
246 * If the gc_type is FG_GC, we can select victim segments
247 * selected by background GC before.
248 * Those segments guarantee they have small valid blocks.
250 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
251 if (sec_usage_check(sbi, secno))
253 clear_bit(secno, dirty_i->victim_secmap);
254 return GET_SEG_FROM_SEC(sbi, secno);
259 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
261 struct sit_info *sit_i = SIT_I(sbi);
262 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
263 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
264 unsigned long long mtime = 0;
265 unsigned int vblocks;
266 unsigned char age = 0;
270 for (i = 0; i < sbi->segs_per_sec; i++)
271 mtime += get_seg_entry(sbi, start + i)->mtime;
272 vblocks = get_valid_blocks(sbi, segno, true);
274 mtime = div_u64(mtime, sbi->segs_per_sec);
275 vblocks = div_u64(vblocks, sbi->segs_per_sec);
277 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
279 /* Handle if the system time has changed by the user */
280 if (mtime < sit_i->min_mtime)
281 sit_i->min_mtime = mtime;
282 if (mtime > sit_i->max_mtime)
283 sit_i->max_mtime = mtime;
284 if (sit_i->max_mtime != sit_i->min_mtime)
285 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
286 sit_i->max_mtime - sit_i->min_mtime);
288 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
291 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
292 unsigned int segno, struct victim_sel_policy *p)
294 if (p->alloc_mode == SSR)
295 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
297 /* alloc_mode == LFS */
298 if (p->gc_mode == GC_GREEDY)
299 return get_valid_blocks(sbi, segno, true);
301 return get_cb_cost(sbi, segno);
304 static unsigned int count_bits(const unsigned long *addr,
305 unsigned int offset, unsigned int len)
307 unsigned int end = offset + len, sum = 0;
309 while (offset < end) {
310 if (test_bit(offset++, addr))
317 * This function is called from two paths.
318 * One is garbage collection and the other is SSR segment selection.
319 * When it is called during GC, it just gets a victim segment
320 * and it does not remove it from dirty seglist.
321 * When it is called from SSR segment selection, it finds a segment
322 * which has minimum valid blocks and removes it from dirty seglist.
324 static int get_victim_by_default(struct f2fs_sb_info *sbi,
325 unsigned int *result, int gc_type, int type, char alloc_mode)
327 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
328 struct sit_info *sm = SIT_I(sbi);
329 struct victim_sel_policy p;
330 unsigned int secno, last_victim;
331 unsigned int last_segment;
332 unsigned int nsearched = 0;
335 mutex_lock(&dirty_i->seglist_lock);
336 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
338 p.alloc_mode = alloc_mode;
339 select_policy(sbi, gc_type, type, &p);
341 p.min_segno = NULL_SEGNO;
342 p.min_cost = get_max_cost(sbi, &p);
344 if (*result != NULL_SEGNO) {
345 if (!get_valid_blocks(sbi, *result, false)) {
350 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
353 p.min_segno = *result;
358 if (p.max_search == 0)
361 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
362 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
363 p.min_segno = sbi->next_victim_seg[BG_GC];
364 *result = p.min_segno;
365 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
368 if (gc_type == FG_GC &&
369 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
370 p.min_segno = sbi->next_victim_seg[FG_GC];
371 *result = p.min_segno;
372 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
377 last_victim = sm->last_victim[p.gc_mode];
378 if (p.alloc_mode == LFS && gc_type == FG_GC) {
379 p.min_segno = check_bg_victims(sbi);
380 if (p.min_segno != NULL_SEGNO)
385 unsigned long cost, *dirty_bitmap;
386 unsigned int unit_no, segno;
388 dirty_bitmap = p.dirty_bitmap;
389 unit_no = find_next_bit(dirty_bitmap,
390 last_segment / p.ofs_unit,
391 p.offset / p.ofs_unit);
392 segno = unit_no * p.ofs_unit;
393 if (segno >= last_segment) {
394 if (sm->last_victim[p.gc_mode]) {
396 sm->last_victim[p.gc_mode];
397 sm->last_victim[p.gc_mode] = 0;
404 p.offset = segno + p.ofs_unit;
407 #ifdef CONFIG_F2FS_CHECK_FS
409 * skip selecting the invalid segno (that is failed due to block
410 * validity check failure during GC) to avoid endless GC loop in
413 if (test_bit(segno, sm->invalid_segmap))
417 secno = GET_SEC_FROM_SEG(sbi, segno);
419 if (sec_usage_check(sbi, secno))
421 /* Don't touch checkpointed data */
422 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
423 get_ckpt_valid_blocks(sbi, segno) &&
424 p.alloc_mode != SSR))
426 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
429 cost = get_gc_cost(sbi, segno, &p);
431 if (p.min_cost > cost) {
436 if (nsearched >= p.max_search) {
437 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
438 sm->last_victim[p.gc_mode] =
439 last_victim + p.ofs_unit;
441 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
442 sm->last_victim[p.gc_mode] %=
443 (MAIN_SECS(sbi) * sbi->segs_per_sec);
447 if (p.min_segno != NULL_SEGNO) {
449 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
451 if (p.alloc_mode == LFS) {
452 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
453 if (gc_type == FG_GC)
454 sbi->cur_victim_sec = secno;
456 set_bit(secno, dirty_i->victim_secmap);
462 if (p.min_segno != NULL_SEGNO)
463 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
465 prefree_segments(sbi), free_segments(sbi));
466 mutex_unlock(&dirty_i->seglist_lock);
471 static const struct victim_selection default_v_ops = {
472 .get_victim = get_victim_by_default,
475 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
477 struct inode_entry *ie;
479 ie = radix_tree_lookup(&gc_list->iroot, ino);
485 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
487 struct inode_entry *new_ie;
489 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
493 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
494 new_ie->inode = inode;
496 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
497 list_add_tail(&new_ie->list, &gc_list->ilist);
500 static void put_gc_inode(struct gc_inode_list *gc_list)
502 struct inode_entry *ie, *next_ie;
503 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
504 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
507 kmem_cache_free(f2fs_inode_entry_slab, ie);
511 static int check_valid_map(struct f2fs_sb_info *sbi,
512 unsigned int segno, int offset)
514 struct sit_info *sit_i = SIT_I(sbi);
515 struct seg_entry *sentry;
518 down_read(&sit_i->sentry_lock);
519 sentry = get_seg_entry(sbi, segno);
520 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
521 up_read(&sit_i->sentry_lock);
526 * This function compares node address got in summary with that in NAT.
527 * On validity, copy that node with cold status, otherwise (invalid node)
530 static int gc_node_segment(struct f2fs_sb_info *sbi,
531 struct f2fs_summary *sum, unsigned int segno, int gc_type)
533 struct f2fs_summary *entry;
537 bool fggc = (gc_type == FG_GC);
540 start_addr = START_BLOCK(sbi, segno);
545 if (fggc && phase == 2)
546 atomic_inc(&sbi->wb_sync_req[NODE]);
548 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
549 nid_t nid = le32_to_cpu(entry->nid);
550 struct page *node_page;
554 /* stop BG_GC if there is not enough free sections. */
555 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
558 if (check_valid_map(sbi, segno, off) == 0)
562 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
568 f2fs_ra_node_page(sbi, nid);
573 node_page = f2fs_get_node_page(sbi, nid);
574 if (IS_ERR(node_page))
577 /* block may become invalid during f2fs_get_node_page */
578 if (check_valid_map(sbi, segno, off) == 0) {
579 f2fs_put_page(node_page, 1);
583 if (f2fs_get_node_info(sbi, nid, &ni)) {
584 f2fs_put_page(node_page, 1);
588 if (ni.blk_addr != start_addr + off) {
589 f2fs_put_page(node_page, 1);
593 err = f2fs_move_node_page(node_page, gc_type);
594 if (!err && gc_type == FG_GC)
596 stat_inc_node_blk_count(sbi, 1, gc_type);
603 atomic_dec(&sbi->wb_sync_req[NODE]);
608 * Calculate start block index indicating the given node offset.
609 * Be careful, caller should give this node offset only indicating direct node
610 * blocks. If any node offsets, which point the other types of node blocks such
611 * as indirect or double indirect node blocks, are given, it must be a caller's
614 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
616 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
624 } else if (node_ofs <= indirect_blks) {
625 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
626 bidx = node_ofs - 2 - dec;
628 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
629 bidx = node_ofs - 5 - dec;
631 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
634 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
635 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
637 struct page *node_page;
639 unsigned int ofs_in_node;
640 block_t source_blkaddr;
642 nid = le32_to_cpu(sum->nid);
643 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
645 node_page = f2fs_get_node_page(sbi, nid);
646 if (IS_ERR(node_page))
649 if (f2fs_get_node_info(sbi, nid, dni)) {
650 f2fs_put_page(node_page, 1);
654 if (sum->version != dni->version) {
655 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
657 set_sbi_flag(sbi, SBI_NEED_FSCK);
660 *nofs = ofs_of_node(node_page);
661 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
662 f2fs_put_page(node_page, 1);
664 if (source_blkaddr != blkaddr) {
665 #ifdef CONFIG_F2FS_CHECK_FS
666 unsigned int segno = GET_SEGNO(sbi, blkaddr);
667 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
669 if (unlikely(check_valid_map(sbi, segno, offset))) {
670 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
671 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
672 blkaddr, source_blkaddr, segno);
682 static int ra_data_block(struct inode *inode, pgoff_t index)
684 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
685 struct address_space *mapping = inode->i_mapping;
686 struct dnode_of_data dn;
688 struct extent_info ei = {0, 0, 0};
689 struct f2fs_io_info fio = {
696 .encrypted_page = NULL,
702 page = f2fs_grab_cache_page(mapping, index, true);
706 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
707 dn.data_blkaddr = ei.blk + index - ei.fofs;
708 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
709 DATA_GENERIC_ENHANCE_READ))) {
716 set_new_dnode(&dn, inode, NULL, NULL, 0);
717 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
722 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
726 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
727 DATA_GENERIC_ENHANCE))) {
734 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
737 * don't cache encrypted data into meta inode until previous dirty
738 * data were writebacked to avoid racing between GC and flush.
740 f2fs_wait_on_page_writeback(page, DATA, true, true);
742 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
744 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
746 FGP_LOCK | FGP_CREAT, GFP_NOFS);
747 if (!fio.encrypted_page) {
752 err = f2fs_submit_page_bio(&fio);
754 goto put_encrypted_page;
755 f2fs_put_page(fio.encrypted_page, 0);
756 f2fs_put_page(page, 1);
758 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
759 f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
763 f2fs_put_page(fio.encrypted_page, 1);
765 f2fs_put_page(page, 1);
770 * Move data block via META_MAPPING while keeping locked data page.
771 * This can be used to move blocks, aka LBAs, directly on disk.
773 static int move_data_block(struct inode *inode, block_t bidx,
774 int gc_type, unsigned int segno, int off)
776 struct f2fs_io_info fio = {
777 .sbi = F2FS_I_SB(inode),
783 .encrypted_page = NULL,
787 struct dnode_of_data dn;
788 struct f2fs_summary sum;
790 struct page *page, *mpage;
793 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
795 /* do not read out */
796 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
800 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
805 if (f2fs_is_atomic_file(inode)) {
806 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
807 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
812 if (f2fs_is_pinned_file(inode)) {
813 f2fs_pin_file_control(inode, true);
818 set_new_dnode(&dn, inode, NULL, NULL, 0);
819 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
823 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
824 ClearPageUptodate(page);
830 * don't cache encrypted data into meta inode until previous dirty
831 * data were writebacked to avoid racing between GC and flush.
833 f2fs_wait_on_page_writeback(page, DATA, true, true);
835 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
837 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
841 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
845 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
848 down_write(&fio.sbi->io_order_lock);
850 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
851 fio.old_blkaddr, false);
857 fio.encrypted_page = mpage;
859 /* read source block in mpage */
860 if (!PageUptodate(mpage)) {
861 err = f2fs_submit_page_bio(&fio);
863 f2fs_put_page(mpage, 1);
867 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
868 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
871 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
872 !PageUptodate(mpage))) {
874 f2fs_put_page(mpage, 1);
879 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
880 &sum, CURSEG_COLD_DATA, NULL);
882 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
883 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
884 if (!fio.encrypted_page) {
886 f2fs_put_page(mpage, 1);
890 /* write target block */
891 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
892 memcpy(page_address(fio.encrypted_page),
893 page_address(mpage), PAGE_SIZE);
894 f2fs_put_page(mpage, 1);
895 invalidate_mapping_pages(META_MAPPING(fio.sbi),
896 fio.old_blkaddr, fio.old_blkaddr);
898 set_page_dirty(fio.encrypted_page);
899 if (clear_page_dirty_for_io(fio.encrypted_page))
900 dec_page_count(fio.sbi, F2FS_DIRTY_META);
902 set_page_writeback(fio.encrypted_page);
903 ClearPageError(page);
905 /* allocate block address */
906 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
908 fio.op = REQ_OP_WRITE;
909 fio.op_flags = REQ_SYNC;
910 fio.new_blkaddr = newaddr;
911 f2fs_submit_page_write(&fio);
914 if (PageWriteback(fio.encrypted_page))
915 end_page_writeback(fio.encrypted_page);
919 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
921 f2fs_update_data_blkaddr(&dn, newaddr);
922 set_inode_flag(inode, FI_APPEND_WRITE);
923 if (page->index == 0)
924 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
926 f2fs_put_page(fio.encrypted_page, 1);
929 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
933 up_write(&fio.sbi->io_order_lock);
937 f2fs_put_page(page, 1);
941 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
942 unsigned int segno, int off)
947 page = f2fs_get_lock_data_page(inode, bidx, true);
949 return PTR_ERR(page);
951 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
956 if (f2fs_is_atomic_file(inode)) {
957 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
958 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
962 if (f2fs_is_pinned_file(inode)) {
963 if (gc_type == FG_GC)
964 f2fs_pin_file_control(inode, true);
969 if (gc_type == BG_GC) {
970 if (PageWriteback(page)) {
974 set_page_dirty(page);
977 struct f2fs_io_info fio = {
978 .sbi = F2FS_I_SB(inode),
983 .op_flags = REQ_SYNC,
984 .old_blkaddr = NULL_ADDR,
986 .encrypted_page = NULL,
987 .need_lock = LOCK_REQ,
988 .io_type = FS_GC_DATA_IO,
990 bool is_dirty = PageDirty(page);
993 f2fs_wait_on_page_writeback(page, DATA, true, true);
995 set_page_dirty(page);
996 if (clear_page_dirty_for_io(page)) {
997 inode_dec_dirty_pages(inode);
998 f2fs_remove_dirty_inode(inode);
1001 set_cold_data(page);
1003 err = f2fs_do_write_data_page(&fio);
1005 clear_cold_data(page);
1006 if (err == -ENOMEM) {
1007 congestion_wait(BLK_RW_ASYNC,
1008 DEFAULT_IO_TIMEOUT);
1012 set_page_dirty(page);
1016 f2fs_put_page(page, 1);
1021 * This function tries to get parent node of victim data block, and identifies
1022 * data block validity. If the block is valid, copy that with cold status and
1023 * modify parent node.
1024 * If the parent node is not valid or the data block address is different,
1025 * the victim data block is ignored.
1027 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1028 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
1030 struct super_block *sb = sbi->sb;
1031 struct f2fs_summary *entry;
1037 start_addr = START_BLOCK(sbi, segno);
1042 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
1043 struct page *data_page;
1044 struct inode *inode;
1045 struct node_info dni; /* dnode info for the data */
1046 unsigned int ofs_in_node, nofs;
1048 nid_t nid = le32_to_cpu(entry->nid);
1051 * stop BG_GC if there is not enough free sections.
1052 * Or, stop GC if the segment becomes fully valid caused by
1053 * race condition along with SSR block allocation.
1055 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1056 get_valid_blocks(sbi, segno, true) ==
1060 if (check_valid_map(sbi, segno, off) == 0)
1064 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1070 f2fs_ra_node_page(sbi, nid);
1074 /* Get an inode by ino with checking validity */
1075 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1079 f2fs_ra_node_page(sbi, dni.ino);
1083 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1086 inode = f2fs_iget(sb, dni.ino);
1087 if (IS_ERR(inode) || is_bad_inode(inode)) {
1088 set_sbi_flag(sbi, SBI_NEED_FSCK);
1092 if (!down_write_trylock(
1093 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1095 sbi->skipped_gc_rwsem++;
1099 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1102 if (f2fs_post_read_required(inode)) {
1103 int err = ra_data_block(inode, start_bidx);
1105 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1110 add_gc_inode(gc_list, inode);
1114 data_page = f2fs_get_read_data_page(inode,
1115 start_bidx, REQ_RAHEAD, true);
1116 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1117 if (IS_ERR(data_page)) {
1122 f2fs_put_page(data_page, 0);
1123 add_gc_inode(gc_list, inode);
1128 inode = find_gc_inode(gc_list, dni.ino);
1130 struct f2fs_inode_info *fi = F2FS_I(inode);
1131 bool locked = false;
1134 if (S_ISREG(inode->i_mode)) {
1135 if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
1137 if (!down_write_trylock(
1138 &fi->i_gc_rwsem[WRITE])) {
1139 sbi->skipped_gc_rwsem++;
1140 up_write(&fi->i_gc_rwsem[READ]);
1145 /* wait for all inflight aio data */
1146 inode_dio_wait(inode);
1149 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1151 if (f2fs_post_read_required(inode))
1152 err = move_data_block(inode, start_bidx,
1153 gc_type, segno, off);
1155 err = move_data_page(inode, start_bidx, gc_type,
1158 if (!err && (gc_type == FG_GC ||
1159 f2fs_post_read_required(inode)))
1163 up_write(&fi->i_gc_rwsem[WRITE]);
1164 up_write(&fi->i_gc_rwsem[READ]);
1167 stat_inc_data_blk_count(sbi, 1, gc_type);
1177 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1180 struct sit_info *sit_i = SIT_I(sbi);
1183 down_write(&sit_i->sentry_lock);
1184 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1185 NO_CHECK_TYPE, LFS);
1186 up_write(&sit_i->sentry_lock);
1190 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1191 unsigned int start_segno,
1192 struct gc_inode_list *gc_list, int gc_type)
1194 struct page *sum_page;
1195 struct f2fs_summary_block *sum;
1196 struct blk_plug plug;
1197 unsigned int segno = start_segno;
1198 unsigned int end_segno = start_segno + sbi->segs_per_sec;
1199 int seg_freed = 0, migrated = 0;
1200 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1201 SUM_TYPE_DATA : SUM_TYPE_NODE;
1204 if (__is_large_section(sbi))
1205 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1207 /* readahead multi ssa blocks those have contiguous address */
1208 if (__is_large_section(sbi))
1209 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1210 end_segno - segno, META_SSA, true);
1212 /* reference all summary page */
1213 while (segno < end_segno) {
1214 sum_page = f2fs_get_sum_page(sbi, segno++);
1215 if (IS_ERR(sum_page)) {
1216 int err = PTR_ERR(sum_page);
1218 end_segno = segno - 1;
1219 for (segno = start_segno; segno < end_segno; segno++) {
1220 sum_page = find_get_page(META_MAPPING(sbi),
1221 GET_SUM_BLOCK(sbi, segno));
1222 f2fs_put_page(sum_page, 0);
1223 f2fs_put_page(sum_page, 0);
1227 unlock_page(sum_page);
1230 blk_start_plug(&plug);
1232 for (segno = start_segno; segno < end_segno; segno++) {
1234 /* find segment summary of victim */
1235 sum_page = find_get_page(META_MAPPING(sbi),
1236 GET_SUM_BLOCK(sbi, segno));
1237 f2fs_put_page(sum_page, 0);
1239 if (get_valid_blocks(sbi, segno, false) == 0)
1241 if (gc_type == BG_GC && __is_large_section(sbi) &&
1242 migrated >= sbi->migration_granularity)
1244 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1247 sum = page_address(sum_page);
1248 if (type != GET_SUM_TYPE((&sum->footer))) {
1249 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1250 segno, type, GET_SUM_TYPE((&sum->footer)));
1251 set_sbi_flag(sbi, SBI_NEED_FSCK);
1252 f2fs_stop_checkpoint(sbi, false);
1257 * this is to avoid deadlock:
1258 * - lock_page(sum_page) - f2fs_replace_block
1259 * - check_valid_map() - down_write(sentry_lock)
1260 * - down_read(sentry_lock) - change_curseg()
1261 * - lock_page(sum_page)
1263 if (type == SUM_TYPE_NODE)
1264 submitted += gc_node_segment(sbi, sum->entries, segno,
1267 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1270 stat_inc_seg_count(sbi, type, gc_type);
1274 if (gc_type == FG_GC &&
1275 get_valid_blocks(sbi, segno, false) == 0)
1278 if (__is_large_section(sbi) && segno + 1 < end_segno)
1279 sbi->next_victim_seg[gc_type] = segno + 1;
1281 f2fs_put_page(sum_page, 0);
1285 f2fs_submit_merged_write(sbi,
1286 (type == SUM_TYPE_NODE) ? NODE : DATA);
1288 blk_finish_plug(&plug);
1290 stat_inc_call_count(sbi->stat_info);
1295 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1296 bool background, unsigned int segno)
1298 int gc_type = sync ? FG_GC : BG_GC;
1299 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1301 struct cp_control cpc;
1302 unsigned int init_segno = segno;
1303 struct gc_inode_list gc_list = {
1304 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1305 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1307 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1308 unsigned long long first_skipped;
1309 unsigned int skipped_round = 0, round = 0;
1311 trace_f2fs_gc_begin(sbi->sb, sync, background,
1312 get_pages(sbi, F2FS_DIRTY_NODES),
1313 get_pages(sbi, F2FS_DIRTY_DENTS),
1314 get_pages(sbi, F2FS_DIRTY_IMETA),
1317 reserved_segments(sbi),
1318 prefree_segments(sbi));
1320 cpc.reason = __get_cp_reason(sbi);
1321 sbi->skipped_gc_rwsem = 0;
1322 first_skipped = last_skipped;
1324 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1328 if (unlikely(f2fs_cp_error(sbi))) {
1333 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1335 * For example, if there are many prefree_segments below given
1336 * threshold, we can make them free by checkpoint. Then, we
1337 * secure free segments which doesn't need fggc any more.
1339 if (prefree_segments(sbi) &&
1340 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1341 ret = f2fs_write_checkpoint(sbi, &cpc);
1345 if (has_not_enough_free_secs(sbi, 0, 0))
1349 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1350 if (gc_type == BG_GC && !background) {
1354 ret = __get_victim(sbi, &segno, gc_type);
1358 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1359 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1361 total_freed += seg_freed;
1363 if (gc_type == FG_GC) {
1364 if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1365 sbi->skipped_gc_rwsem)
1367 last_skipped = sbi->skipped_atomic_files[FG_GC];
1371 if (gc_type == FG_GC && seg_freed)
1372 sbi->cur_victim_sec = NULL_SEGNO;
1377 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1378 if (skipped_round <= MAX_SKIP_GC_COUNT ||
1379 skipped_round * 2 < round) {
1384 if (first_skipped < last_skipped &&
1385 (last_skipped - first_skipped) >
1386 sbi->skipped_gc_rwsem) {
1387 f2fs_drop_inmem_pages_all(sbi, true);
1391 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1392 ret = f2fs_write_checkpoint(sbi, &cpc);
1395 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1396 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1398 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1399 get_pages(sbi, F2FS_DIRTY_NODES),
1400 get_pages(sbi, F2FS_DIRTY_DENTS),
1401 get_pages(sbi, F2FS_DIRTY_IMETA),
1404 reserved_segments(sbi),
1405 prefree_segments(sbi));
1407 up_write(&sbi->gc_lock);
1409 put_gc_inode(&gc_list);
1412 ret = sec_freed ? 0 : -EAGAIN;
1416 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1418 DIRTY_I(sbi)->v_ops = &default_v_ops;
1420 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1422 /* give warm/cold data area from slower device */
1423 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1424 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1425 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1428 static int free_segment_range(struct f2fs_sb_info *sbi,
1429 unsigned int secs, bool gc_only)
1431 unsigned int segno, next_inuse, start, end;
1432 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1433 int gc_mode, gc_type;
1437 /* Force block allocation for GC */
1438 MAIN_SECS(sbi) -= secs;
1439 start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1440 end = MAIN_SEGS(sbi) - 1;
1442 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1443 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1444 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1445 SIT_I(sbi)->last_victim[gc_mode] = 0;
1447 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1448 if (sbi->next_victim_seg[gc_type] >= start)
1449 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1450 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1452 /* Move out cursegs from the target range */
1453 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
1454 f2fs_allocate_segment_for_resize(sbi, type, start, end);
1456 /* do GC to move out valid blocks in the range */
1457 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1458 struct gc_inode_list gc_list = {
1459 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1460 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1463 do_garbage_collect(sbi, segno, &gc_list, FG_GC);
1464 put_gc_inode(&gc_list);
1466 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1470 if (fatal_signal_pending(current)) {
1478 err = f2fs_write_checkpoint(sbi, &cpc);
1482 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1483 if (next_inuse <= end) {
1484 f2fs_err(sbi, "segno %u should be free but still inuse!",
1486 f2fs_bug_on(sbi, 1);
1489 MAIN_SECS(sbi) += secs;
1493 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1495 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1498 int segment_count_main;
1499 long long block_count;
1500 int segs = secs * sbi->segs_per_sec;
1502 down_write(&sbi->sb_lock);
1504 section_count = le32_to_cpu(raw_sb->section_count);
1505 segment_count = le32_to_cpu(raw_sb->segment_count);
1506 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
1507 block_count = le64_to_cpu(raw_sb->block_count);
1509 raw_sb->section_count = cpu_to_le32(section_count + secs);
1510 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
1511 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
1512 raw_sb->block_count = cpu_to_le64(block_count +
1513 (long long)segs * sbi->blocks_per_seg);
1514 if (f2fs_is_multi_device(sbi)) {
1515 int last_dev = sbi->s_ndevs - 1;
1517 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
1519 raw_sb->devs[last_dev].total_segments =
1520 cpu_to_le32(dev_segs + segs);
1523 up_write(&sbi->sb_lock);
1526 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1528 int segs = secs * sbi->segs_per_sec;
1529 long long blks = (long long)segs * sbi->blocks_per_seg;
1530 long long user_block_count =
1531 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1533 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1534 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
1535 MAIN_SECS(sbi) += secs;
1536 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1537 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
1538 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
1540 if (f2fs_is_multi_device(sbi)) {
1541 int last_dev = sbi->s_ndevs - 1;
1543 FDEV(last_dev).total_segments =
1544 (int)FDEV(last_dev).total_segments + segs;
1545 FDEV(last_dev).end_blk =
1546 (long long)FDEV(last_dev).end_blk + blks;
1547 #ifdef CONFIG_BLK_DEV_ZONED
1548 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
1549 (int)(blks >> sbi->log_blocks_per_blkz);
1554 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
1556 __u64 old_block_count, shrunk_blocks;
1557 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1562 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
1563 if (block_count > old_block_count)
1566 if (f2fs_is_multi_device(sbi)) {
1567 int last_dev = sbi->s_ndevs - 1;
1568 __u64 last_segs = FDEV(last_dev).total_segments;
1570 if (block_count + last_segs * sbi->blocks_per_seg <=
1575 /* new fs size should align to section size */
1576 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
1580 if (block_count == old_block_count)
1583 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1584 f2fs_err(sbi, "Should run fsck to repair first.");
1585 return -EFSCORRUPTED;
1588 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
1589 f2fs_err(sbi, "Checkpoint should be enabled.");
1593 shrunk_blocks = old_block_count - block_count;
1594 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
1597 if (!down_write_trylock(&sbi->gc_lock))
1600 /* stop CP to protect MAIN_SEC in free_segment_range */
1602 err = free_segment_range(sbi, secs, true);
1603 f2fs_unlock_op(sbi);
1604 up_write(&sbi->gc_lock);
1608 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
1610 freeze_super(sbi->sb);
1611 down_write(&sbi->gc_lock);
1612 mutex_lock(&sbi->cp_mutex);
1614 spin_lock(&sbi->stat_lock);
1615 if (shrunk_blocks + valid_user_blocks(sbi) +
1616 sbi->current_reserved_blocks + sbi->unusable_block_count +
1617 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
1620 sbi->user_block_count -= shrunk_blocks;
1621 spin_unlock(&sbi->stat_lock);
1625 err = free_segment_range(sbi, secs, false);
1629 update_sb_metadata(sbi, -secs);
1631 err = f2fs_commit_super(sbi, false);
1633 update_sb_metadata(sbi, secs);
1637 update_fs_metadata(sbi, -secs);
1638 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
1639 set_sbi_flag(sbi, SBI_IS_DIRTY);
1641 err = f2fs_write_checkpoint(sbi, &cpc);
1643 update_fs_metadata(sbi, secs);
1644 update_sb_metadata(sbi, secs);
1645 f2fs_commit_super(sbi, false);
1649 set_sbi_flag(sbi, SBI_NEED_FSCK);
1650 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
1652 spin_lock(&sbi->stat_lock);
1653 sbi->user_block_count += shrunk_blocks;
1654 spin_unlock(&sbi->stat_lock);
1657 mutex_unlock(&sbi->cp_mutex);
1658 up_write(&sbi->gc_lock);
1659 thaw_super(sbi->sb);
1660 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);