dt-bindings: pinctrl: qcom,sm6375-pinctrl: do not require function on non-GPIOs
[platform/kernel/linux-starfive.git] / fs / f2fs / gc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/gc.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/f2fs_fs.h>
12 #include <linux/kthread.h>
13 #include <linux/delay.h>
14 #include <linux/freezer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/random.h>
17 #include <linux/sched/mm.h>
18
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "gc.h"
23 #include "iostat.h"
24 #include <trace/events/f2fs.h>
25
26 static struct kmem_cache *victim_entry_slab;
27
28 static unsigned int count_bits(const unsigned long *addr,
29                                 unsigned int offset, unsigned int len);
30
31 static int gc_thread_func(void *data)
32 {
33         struct f2fs_sb_info *sbi = data;
34         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35         wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36         wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
37         unsigned int wait_ms;
38         struct f2fs_gc_control gc_control = {
39                 .victim_segno = NULL_SEGNO,
40                 .should_migrate_blocks = false,
41                 .err_gc_skipped = false };
42
43         wait_ms = gc_th->min_sleep_time;
44
45         set_freezable();
46         do {
47                 bool sync_mode, foreground = false;
48
49                 wait_event_interruptible_timeout(*wq,
50                                 kthread_should_stop() || freezing(current) ||
51                                 waitqueue_active(fggc_wq) ||
52                                 gc_th->gc_wake,
53                                 msecs_to_jiffies(wait_ms));
54
55                 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
56                         foreground = true;
57
58                 /* give it a try one time */
59                 if (gc_th->gc_wake)
60                         gc_th->gc_wake = 0;
61
62                 if (try_to_freeze()) {
63                         stat_other_skip_bggc_count(sbi);
64                         continue;
65                 }
66                 if (kthread_should_stop())
67                         break;
68
69                 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
70                         increase_sleep_time(gc_th, &wait_ms);
71                         stat_other_skip_bggc_count(sbi);
72                         continue;
73                 }
74
75                 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
76                         f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
77                         f2fs_stop_checkpoint(sbi, false);
78                 }
79
80                 if (!sb_start_write_trylock(sbi->sb)) {
81                         stat_other_skip_bggc_count(sbi);
82                         continue;
83                 }
84
85                 /*
86                  * [GC triggering condition]
87                  * 0. GC is not conducted currently.
88                  * 1. There are enough dirty segments.
89                  * 2. IO subsystem is idle by checking the # of writeback pages.
90                  * 3. IO subsystem is idle by checking the # of requests in
91                  *    bdev's request list.
92                  *
93                  * Note) We have to avoid triggering GCs frequently.
94                  * Because it is possible that some segments can be
95                  * invalidated soon after by user update or deletion.
96                  * So, I'd like to wait some time to collect dirty segments.
97                  */
98                 if (sbi->gc_mode == GC_URGENT_HIGH) {
99                         spin_lock(&sbi->gc_urgent_high_lock);
100                         if (sbi->gc_urgent_high_limited) {
101                                 if (!sbi->gc_urgent_high_remaining) {
102                                         sbi->gc_urgent_high_limited = false;
103                                         spin_unlock(&sbi->gc_urgent_high_lock);
104                                         sbi->gc_mode = GC_NORMAL;
105                                         continue;
106                                 }
107                                 sbi->gc_urgent_high_remaining--;
108                         }
109                         spin_unlock(&sbi->gc_urgent_high_lock);
110                 }
111
112                 if (sbi->gc_mode == GC_URGENT_HIGH ||
113                                 sbi->gc_mode == GC_URGENT_MID) {
114                         wait_ms = gc_th->urgent_sleep_time;
115                         f2fs_down_write(&sbi->gc_lock);
116                         goto do_gc;
117                 }
118
119                 if (foreground) {
120                         f2fs_down_write(&sbi->gc_lock);
121                         goto do_gc;
122                 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
123                         stat_other_skip_bggc_count(sbi);
124                         goto next;
125                 }
126
127                 if (!is_idle(sbi, GC_TIME)) {
128                         increase_sleep_time(gc_th, &wait_ms);
129                         f2fs_up_write(&sbi->gc_lock);
130                         stat_io_skip_bggc_count(sbi);
131                         goto next;
132                 }
133
134                 if (has_enough_invalid_blocks(sbi))
135                         decrease_sleep_time(gc_th, &wait_ms);
136                 else
137                         increase_sleep_time(gc_th, &wait_ms);
138 do_gc:
139                 if (!foreground)
140                         stat_inc_bggc_count(sbi->stat_info);
141
142                 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
143
144                 /* foreground GC was been triggered via f2fs_balance_fs() */
145                 if (foreground)
146                         sync_mode = false;
147
148                 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
149                 gc_control.no_bg_gc = foreground;
150                 gc_control.nr_free_secs = foreground ? 1 : 0;
151
152                 /* if return value is not zero, no victim was selected */
153                 if (f2fs_gc(sbi, &gc_control)) {
154                         /* don't bother wait_ms by foreground gc */
155                         if (!foreground)
156                                 wait_ms = gc_th->no_gc_sleep_time;
157                 }
158
159                 if (foreground)
160                         wake_up_all(&gc_th->fggc_wq);
161
162                 trace_f2fs_background_gc(sbi->sb, wait_ms,
163                                 prefree_segments(sbi), free_segments(sbi));
164
165                 /* balancing f2fs's metadata periodically */
166                 f2fs_balance_fs_bg(sbi, true);
167 next:
168                 sb_end_write(sbi->sb);
169
170         } while (!kthread_should_stop());
171         return 0;
172 }
173
174 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
175 {
176         struct f2fs_gc_kthread *gc_th;
177         dev_t dev = sbi->sb->s_bdev->bd_dev;
178         int err = 0;
179
180         gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
181         if (!gc_th) {
182                 err = -ENOMEM;
183                 goto out;
184         }
185
186         gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
187         gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
188         gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
189         gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
190
191         gc_th->gc_wake = 0;
192
193         sbi->gc_thread = gc_th;
194         init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
195         init_waitqueue_head(&sbi->gc_thread->fggc_wq);
196         sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
197                         "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
198         if (IS_ERR(gc_th->f2fs_gc_task)) {
199                 err = PTR_ERR(gc_th->f2fs_gc_task);
200                 kfree(gc_th);
201                 sbi->gc_thread = NULL;
202         }
203 out:
204         return err;
205 }
206
207 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
208 {
209         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
210
211         if (!gc_th)
212                 return;
213         kthread_stop(gc_th->f2fs_gc_task);
214         wake_up_all(&gc_th->fggc_wq);
215         kfree(gc_th);
216         sbi->gc_thread = NULL;
217 }
218
219 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
220 {
221         int gc_mode;
222
223         if (gc_type == BG_GC) {
224                 if (sbi->am.atgc_enabled)
225                         gc_mode = GC_AT;
226                 else
227                         gc_mode = GC_CB;
228         } else {
229                 gc_mode = GC_GREEDY;
230         }
231
232         switch (sbi->gc_mode) {
233         case GC_IDLE_CB:
234                 gc_mode = GC_CB;
235                 break;
236         case GC_IDLE_GREEDY:
237         case GC_URGENT_HIGH:
238                 gc_mode = GC_GREEDY;
239                 break;
240         case GC_IDLE_AT:
241                 gc_mode = GC_AT;
242                 break;
243         }
244
245         return gc_mode;
246 }
247
248 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
249                         int type, struct victim_sel_policy *p)
250 {
251         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
252
253         if (p->alloc_mode == SSR) {
254                 p->gc_mode = GC_GREEDY;
255                 p->dirty_bitmap = dirty_i->dirty_segmap[type];
256                 p->max_search = dirty_i->nr_dirty[type];
257                 p->ofs_unit = 1;
258         } else if (p->alloc_mode == AT_SSR) {
259                 p->gc_mode = GC_GREEDY;
260                 p->dirty_bitmap = dirty_i->dirty_segmap[type];
261                 p->max_search = dirty_i->nr_dirty[type];
262                 p->ofs_unit = 1;
263         } else {
264                 p->gc_mode = select_gc_type(sbi, gc_type);
265                 p->ofs_unit = sbi->segs_per_sec;
266                 if (__is_large_section(sbi)) {
267                         p->dirty_bitmap = dirty_i->dirty_secmap;
268                         p->max_search = count_bits(p->dirty_bitmap,
269                                                 0, MAIN_SECS(sbi));
270                 } else {
271                         p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
272                         p->max_search = dirty_i->nr_dirty[DIRTY];
273                 }
274         }
275
276         /*
277          * adjust candidates range, should select all dirty segments for
278          * foreground GC and urgent GC cases.
279          */
280         if (gc_type != FG_GC &&
281                         (sbi->gc_mode != GC_URGENT_HIGH) &&
282                         (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
283                         p->max_search > sbi->max_victim_search)
284                 p->max_search = sbi->max_victim_search;
285
286         /* let's select beginning hot/small space first in no_heap mode*/
287         if (f2fs_need_rand_seg(sbi))
288                 p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
289         else if (test_opt(sbi, NOHEAP) &&
290                 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
291                 p->offset = 0;
292         else
293                 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
294 }
295
296 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
297                                 struct victim_sel_policy *p)
298 {
299         /* SSR allocates in a segment unit */
300         if (p->alloc_mode == SSR)
301                 return sbi->blocks_per_seg;
302         else if (p->alloc_mode == AT_SSR)
303                 return UINT_MAX;
304
305         /* LFS */
306         if (p->gc_mode == GC_GREEDY)
307                 return 2 * sbi->blocks_per_seg * p->ofs_unit;
308         else if (p->gc_mode == GC_CB)
309                 return UINT_MAX;
310         else if (p->gc_mode == GC_AT)
311                 return UINT_MAX;
312         else /* No other gc_mode */
313                 return 0;
314 }
315
316 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
317 {
318         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
319         unsigned int secno;
320
321         /*
322          * If the gc_type is FG_GC, we can select victim segments
323          * selected by background GC before.
324          * Those segments guarantee they have small valid blocks.
325          */
326         for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
327                 if (sec_usage_check(sbi, secno))
328                         continue;
329                 clear_bit(secno, dirty_i->victim_secmap);
330                 return GET_SEG_FROM_SEC(sbi, secno);
331         }
332         return NULL_SEGNO;
333 }
334
335 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
336 {
337         struct sit_info *sit_i = SIT_I(sbi);
338         unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
339         unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
340         unsigned long long mtime = 0;
341         unsigned int vblocks;
342         unsigned char age = 0;
343         unsigned char u;
344         unsigned int i;
345         unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
346
347         for (i = 0; i < usable_segs_per_sec; i++)
348                 mtime += get_seg_entry(sbi, start + i)->mtime;
349         vblocks = get_valid_blocks(sbi, segno, true);
350
351         mtime = div_u64(mtime, usable_segs_per_sec);
352         vblocks = div_u64(vblocks, usable_segs_per_sec);
353
354         u = (vblocks * 100) >> sbi->log_blocks_per_seg;
355
356         /* Handle if the system time has changed by the user */
357         if (mtime < sit_i->min_mtime)
358                 sit_i->min_mtime = mtime;
359         if (mtime > sit_i->max_mtime)
360                 sit_i->max_mtime = mtime;
361         if (sit_i->max_mtime != sit_i->min_mtime)
362                 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
363                                 sit_i->max_mtime - sit_i->min_mtime);
364
365         return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
366 }
367
368 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
369                         unsigned int segno, struct victim_sel_policy *p)
370 {
371         if (p->alloc_mode == SSR)
372                 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
373
374         /* alloc_mode == LFS */
375         if (p->gc_mode == GC_GREEDY)
376                 return get_valid_blocks(sbi, segno, true);
377         else if (p->gc_mode == GC_CB)
378                 return get_cb_cost(sbi, segno);
379
380         f2fs_bug_on(sbi, 1);
381         return 0;
382 }
383
384 static unsigned int count_bits(const unsigned long *addr,
385                                 unsigned int offset, unsigned int len)
386 {
387         unsigned int end = offset + len, sum = 0;
388
389         while (offset < end) {
390                 if (test_bit(offset++, addr))
391                         ++sum;
392         }
393         return sum;
394 }
395
396 static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
397                                 unsigned long long mtime, unsigned int segno,
398                                 struct rb_node *parent, struct rb_node **p,
399                                 bool left_most)
400 {
401         struct atgc_management *am = &sbi->am;
402         struct victim_entry *ve;
403
404         ve =  f2fs_kmem_cache_alloc(victim_entry_slab,
405                                 GFP_NOFS, true, NULL);
406
407         ve->mtime = mtime;
408         ve->segno = segno;
409
410         rb_link_node(&ve->rb_node, parent, p);
411         rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
412
413         list_add_tail(&ve->list, &am->victim_list);
414
415         am->victim_count++;
416
417         return ve;
418 }
419
420 static void insert_victim_entry(struct f2fs_sb_info *sbi,
421                                 unsigned long long mtime, unsigned int segno)
422 {
423         struct atgc_management *am = &sbi->am;
424         struct rb_node **p;
425         struct rb_node *parent = NULL;
426         bool left_most = true;
427
428         p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
429         attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
430 }
431
432 static void add_victim_entry(struct f2fs_sb_info *sbi,
433                                 struct victim_sel_policy *p, unsigned int segno)
434 {
435         struct sit_info *sit_i = SIT_I(sbi);
436         unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
437         unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
438         unsigned long long mtime = 0;
439         unsigned int i;
440
441         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
442                 if (p->gc_mode == GC_AT &&
443                         get_valid_blocks(sbi, segno, true) == 0)
444                         return;
445         }
446
447         for (i = 0; i < sbi->segs_per_sec; i++)
448                 mtime += get_seg_entry(sbi, start + i)->mtime;
449         mtime = div_u64(mtime, sbi->segs_per_sec);
450
451         /* Handle if the system time has changed by the user */
452         if (mtime < sit_i->min_mtime)
453                 sit_i->min_mtime = mtime;
454         if (mtime > sit_i->max_mtime)
455                 sit_i->max_mtime = mtime;
456         if (mtime < sit_i->dirty_min_mtime)
457                 sit_i->dirty_min_mtime = mtime;
458         if (mtime > sit_i->dirty_max_mtime)
459                 sit_i->dirty_max_mtime = mtime;
460
461         /* don't choose young section as candidate */
462         if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
463                 return;
464
465         insert_victim_entry(sbi, mtime, segno);
466 }
467
468 static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
469                                                 struct victim_sel_policy *p)
470 {
471         struct atgc_management *am = &sbi->am;
472         struct rb_node *parent = NULL;
473         bool left_most;
474
475         f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
476
477         return parent;
478 }
479
480 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
481                                                 struct victim_sel_policy *p)
482 {
483         struct sit_info *sit_i = SIT_I(sbi);
484         struct atgc_management *am = &sbi->am;
485         struct rb_root_cached *root = &am->root;
486         struct rb_node *node;
487         struct rb_entry *re;
488         struct victim_entry *ve;
489         unsigned long long total_time;
490         unsigned long long age, u, accu;
491         unsigned long long max_mtime = sit_i->dirty_max_mtime;
492         unsigned long long min_mtime = sit_i->dirty_min_mtime;
493         unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
494         unsigned int vblocks;
495         unsigned int dirty_threshold = max(am->max_candidate_count,
496                                         am->candidate_ratio *
497                                         am->victim_count / 100);
498         unsigned int age_weight = am->age_weight;
499         unsigned int cost;
500         unsigned int iter = 0;
501
502         if (max_mtime < min_mtime)
503                 return;
504
505         max_mtime += 1;
506         total_time = max_mtime - min_mtime;
507
508         accu = div64_u64(ULLONG_MAX, total_time);
509         accu = min_t(unsigned long long, div_u64(accu, 100),
510                                         DEFAULT_ACCURACY_CLASS);
511
512         node = rb_first_cached(root);
513 next:
514         re = rb_entry_safe(node, struct rb_entry, rb_node);
515         if (!re)
516                 return;
517
518         ve = (struct victim_entry *)re;
519
520         if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
521                 goto skip;
522
523         /* age = 10000 * x% * 60 */
524         age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
525                                                                 age_weight;
526
527         vblocks = get_valid_blocks(sbi, ve->segno, true);
528         f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
529
530         /* u = 10000 * x% * 40 */
531         u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
532                                                         (100 - age_weight);
533
534         f2fs_bug_on(sbi, age + u >= UINT_MAX);
535
536         cost = UINT_MAX - (age + u);
537         iter++;
538
539         if (cost < p->min_cost ||
540                         (cost == p->min_cost && age > p->oldest_age)) {
541                 p->min_cost = cost;
542                 p->oldest_age = age;
543                 p->min_segno = ve->segno;
544         }
545 skip:
546         if (iter < dirty_threshold) {
547                 node = rb_next(node);
548                 goto next;
549         }
550 }
551
552 /*
553  * select candidates around source section in range of
554  * [target - dirty_threshold, target + dirty_threshold]
555  */
556 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
557                                                 struct victim_sel_policy *p)
558 {
559         struct sit_info *sit_i = SIT_I(sbi);
560         struct atgc_management *am = &sbi->am;
561         struct rb_node *node;
562         struct rb_entry *re;
563         struct victim_entry *ve;
564         unsigned long long age;
565         unsigned long long max_mtime = sit_i->dirty_max_mtime;
566         unsigned long long min_mtime = sit_i->dirty_min_mtime;
567         unsigned int seg_blocks = sbi->blocks_per_seg;
568         unsigned int vblocks;
569         unsigned int dirty_threshold = max(am->max_candidate_count,
570                                         am->candidate_ratio *
571                                         am->victim_count / 100);
572         unsigned int cost;
573         unsigned int iter = 0;
574         int stage = 0;
575
576         if (max_mtime < min_mtime)
577                 return;
578         max_mtime += 1;
579 next_stage:
580         node = lookup_central_victim(sbi, p);
581 next_node:
582         re = rb_entry_safe(node, struct rb_entry, rb_node);
583         if (!re) {
584                 if (stage == 0)
585                         goto skip_stage;
586                 return;
587         }
588
589         ve = (struct victim_entry *)re;
590
591         if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
592                 goto skip_node;
593
594         age = max_mtime - ve->mtime;
595
596         vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
597         f2fs_bug_on(sbi, !vblocks);
598
599         /* rare case */
600         if (vblocks == seg_blocks)
601                 goto skip_node;
602
603         iter++;
604
605         age = max_mtime - abs(p->age - age);
606         cost = UINT_MAX - vblocks;
607
608         if (cost < p->min_cost ||
609                         (cost == p->min_cost && age > p->oldest_age)) {
610                 p->min_cost = cost;
611                 p->oldest_age = age;
612                 p->min_segno = ve->segno;
613         }
614 skip_node:
615         if (iter < dirty_threshold) {
616                 if (stage == 0)
617                         node = rb_prev(node);
618                 else if (stage == 1)
619                         node = rb_next(node);
620                 goto next_node;
621         }
622 skip_stage:
623         if (stage < 1) {
624                 stage++;
625                 iter = 0;
626                 goto next_stage;
627         }
628 }
629 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
630                                                 struct victim_sel_policy *p)
631 {
632         f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
633                                                 &sbi->am.root, true));
634
635         if (p->gc_mode == GC_AT)
636                 atgc_lookup_victim(sbi, p);
637         else if (p->alloc_mode == AT_SSR)
638                 atssr_lookup_victim(sbi, p);
639         else
640                 f2fs_bug_on(sbi, 1);
641 }
642
643 static void release_victim_entry(struct f2fs_sb_info *sbi)
644 {
645         struct atgc_management *am = &sbi->am;
646         struct victim_entry *ve, *tmp;
647
648         list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
649                 list_del(&ve->list);
650                 kmem_cache_free(victim_entry_slab, ve);
651                 am->victim_count--;
652         }
653
654         am->root = RB_ROOT_CACHED;
655
656         f2fs_bug_on(sbi, am->victim_count);
657         f2fs_bug_on(sbi, !list_empty(&am->victim_list));
658 }
659
660 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
661 {
662         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
663         unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
664
665         if (!dirty_i->enable_pin_section)
666                 return false;
667         if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
668                 dirty_i->pinned_secmap_cnt++;
669         return true;
670 }
671
672 static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
673 {
674         return dirty_i->pinned_secmap_cnt;
675 }
676
677 static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
678                                                 unsigned int secno)
679 {
680         return dirty_i->enable_pin_section &&
681                 f2fs_pinned_section_exists(dirty_i) &&
682                 test_bit(secno, dirty_i->pinned_secmap);
683 }
684
685 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
686 {
687         unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
688
689         if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
690                 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
691                 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
692         }
693         DIRTY_I(sbi)->enable_pin_section = enable;
694 }
695
696 static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
697                                                         unsigned int segno)
698 {
699         if (!f2fs_is_pinned_file(inode))
700                 return 0;
701         if (gc_type != FG_GC)
702                 return -EBUSY;
703         if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
704                 f2fs_pin_file_control(inode, true);
705         return -EAGAIN;
706 }
707
708 /*
709  * This function is called from two paths.
710  * One is garbage collection and the other is SSR segment selection.
711  * When it is called during GC, it just gets a victim segment
712  * and it does not remove it from dirty seglist.
713  * When it is called from SSR segment selection, it finds a segment
714  * which has minimum valid blocks and removes it from dirty seglist.
715  */
716 static int get_victim_by_default(struct f2fs_sb_info *sbi,
717                         unsigned int *result, int gc_type, int type,
718                         char alloc_mode, unsigned long long age)
719 {
720         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
721         struct sit_info *sm = SIT_I(sbi);
722         struct victim_sel_policy p;
723         unsigned int secno, last_victim;
724         unsigned int last_segment;
725         unsigned int nsearched;
726         bool is_atgc;
727         int ret = 0;
728
729         mutex_lock(&dirty_i->seglist_lock);
730         last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
731
732         p.alloc_mode = alloc_mode;
733         p.age = age;
734         p.age_threshold = sbi->am.age_threshold;
735
736 retry:
737         select_policy(sbi, gc_type, type, &p);
738         p.min_segno = NULL_SEGNO;
739         p.oldest_age = 0;
740         p.min_cost = get_max_cost(sbi, &p);
741
742         is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
743         nsearched = 0;
744
745         if (is_atgc)
746                 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
747
748         if (*result != NULL_SEGNO) {
749                 if (!get_valid_blocks(sbi, *result, false)) {
750                         ret = -ENODATA;
751                         goto out;
752                 }
753
754                 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
755                         ret = -EBUSY;
756                 else
757                         p.min_segno = *result;
758                 goto out;
759         }
760
761         ret = -ENODATA;
762         if (p.max_search == 0)
763                 goto out;
764
765         if (__is_large_section(sbi) && p.alloc_mode == LFS) {
766                 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
767                         p.min_segno = sbi->next_victim_seg[BG_GC];
768                         *result = p.min_segno;
769                         sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
770                         goto got_result;
771                 }
772                 if (gc_type == FG_GC &&
773                                 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
774                         p.min_segno = sbi->next_victim_seg[FG_GC];
775                         *result = p.min_segno;
776                         sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
777                         goto got_result;
778                 }
779         }
780
781         last_victim = sm->last_victim[p.gc_mode];
782         if (p.alloc_mode == LFS && gc_type == FG_GC) {
783                 p.min_segno = check_bg_victims(sbi);
784                 if (p.min_segno != NULL_SEGNO)
785                         goto got_it;
786         }
787
788         while (1) {
789                 unsigned long cost, *dirty_bitmap;
790                 unsigned int unit_no, segno;
791
792                 dirty_bitmap = p.dirty_bitmap;
793                 unit_no = find_next_bit(dirty_bitmap,
794                                 last_segment / p.ofs_unit,
795                                 p.offset / p.ofs_unit);
796                 segno = unit_no * p.ofs_unit;
797                 if (segno >= last_segment) {
798                         if (sm->last_victim[p.gc_mode]) {
799                                 last_segment =
800                                         sm->last_victim[p.gc_mode];
801                                 sm->last_victim[p.gc_mode] = 0;
802                                 p.offset = 0;
803                                 continue;
804                         }
805                         break;
806                 }
807
808                 p.offset = segno + p.ofs_unit;
809                 nsearched++;
810
811 #ifdef CONFIG_F2FS_CHECK_FS
812                 /*
813                  * skip selecting the invalid segno (that is failed due to block
814                  * validity check failure during GC) to avoid endless GC loop in
815                  * such cases.
816                  */
817                 if (test_bit(segno, sm->invalid_segmap))
818                         goto next;
819 #endif
820
821                 secno = GET_SEC_FROM_SEG(sbi, segno);
822
823                 if (sec_usage_check(sbi, secno))
824                         goto next;
825
826                 /* Don't touch checkpointed data */
827                 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
828                         if (p.alloc_mode == LFS) {
829                                 /*
830                                  * LFS is set to find source section during GC.
831                                  * The victim should have no checkpointed data.
832                                  */
833                                 if (get_ckpt_valid_blocks(sbi, segno, true))
834                                         goto next;
835                         } else {
836                                 /*
837                                  * SSR | AT_SSR are set to find target segment
838                                  * for writes which can be full by checkpointed
839                                  * and newly written blocks.
840                                  */
841                                 if (!f2fs_segment_has_free_slot(sbi, segno))
842                                         goto next;
843                         }
844                 }
845
846                 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
847                         goto next;
848
849                 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
850                         goto next;
851
852                 if (is_atgc) {
853                         add_victim_entry(sbi, &p, segno);
854                         goto next;
855                 }
856
857                 cost = get_gc_cost(sbi, segno, &p);
858
859                 if (p.min_cost > cost) {
860                         p.min_segno = segno;
861                         p.min_cost = cost;
862                 }
863 next:
864                 if (nsearched >= p.max_search) {
865                         if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
866                                 sm->last_victim[p.gc_mode] =
867                                         last_victim + p.ofs_unit;
868                         else
869                                 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
870                         sm->last_victim[p.gc_mode] %=
871                                 (MAIN_SECS(sbi) * sbi->segs_per_sec);
872                         break;
873                 }
874         }
875
876         /* get victim for GC_AT/AT_SSR */
877         if (is_atgc) {
878                 lookup_victim_by_age(sbi, &p);
879                 release_victim_entry(sbi);
880         }
881
882         if (is_atgc && p.min_segno == NULL_SEGNO &&
883                         sm->elapsed_time < p.age_threshold) {
884                 p.age_threshold = 0;
885                 goto retry;
886         }
887
888         if (p.min_segno != NULL_SEGNO) {
889 got_it:
890                 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
891 got_result:
892                 if (p.alloc_mode == LFS) {
893                         secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
894                         if (gc_type == FG_GC)
895                                 sbi->cur_victim_sec = secno;
896                         else
897                                 set_bit(secno, dirty_i->victim_secmap);
898                 }
899                 ret = 0;
900
901         }
902 out:
903         if (p.min_segno != NULL_SEGNO)
904                 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
905                                 sbi->cur_victim_sec,
906                                 prefree_segments(sbi), free_segments(sbi));
907         mutex_unlock(&dirty_i->seglist_lock);
908
909         return ret;
910 }
911
912 static const struct victim_selection default_v_ops = {
913         .get_victim = get_victim_by_default,
914 };
915
916 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
917 {
918         struct inode_entry *ie;
919
920         ie = radix_tree_lookup(&gc_list->iroot, ino);
921         if (ie)
922                 return ie->inode;
923         return NULL;
924 }
925
926 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
927 {
928         struct inode_entry *new_ie;
929
930         if (inode == find_gc_inode(gc_list, inode->i_ino)) {
931                 iput(inode);
932                 return;
933         }
934         new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
935                                         GFP_NOFS, true, NULL);
936         new_ie->inode = inode;
937
938         f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
939         list_add_tail(&new_ie->list, &gc_list->ilist);
940 }
941
942 static void put_gc_inode(struct gc_inode_list *gc_list)
943 {
944         struct inode_entry *ie, *next_ie;
945
946         list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
947                 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
948                 iput(ie->inode);
949                 list_del(&ie->list);
950                 kmem_cache_free(f2fs_inode_entry_slab, ie);
951         }
952 }
953
954 static int check_valid_map(struct f2fs_sb_info *sbi,
955                                 unsigned int segno, int offset)
956 {
957         struct sit_info *sit_i = SIT_I(sbi);
958         struct seg_entry *sentry;
959         int ret;
960
961         down_read(&sit_i->sentry_lock);
962         sentry = get_seg_entry(sbi, segno);
963         ret = f2fs_test_bit(offset, sentry->cur_valid_map);
964         up_read(&sit_i->sentry_lock);
965         return ret;
966 }
967
968 /*
969  * This function compares node address got in summary with that in NAT.
970  * On validity, copy that node with cold status, otherwise (invalid node)
971  * ignore that.
972  */
973 static int gc_node_segment(struct f2fs_sb_info *sbi,
974                 struct f2fs_summary *sum, unsigned int segno, int gc_type)
975 {
976         struct f2fs_summary *entry;
977         block_t start_addr;
978         int off;
979         int phase = 0;
980         bool fggc = (gc_type == FG_GC);
981         int submitted = 0;
982         unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
983
984         start_addr = START_BLOCK(sbi, segno);
985
986 next_step:
987         entry = sum;
988
989         if (fggc && phase == 2)
990                 atomic_inc(&sbi->wb_sync_req[NODE]);
991
992         for (off = 0; off < usable_blks_in_seg; off++, entry++) {
993                 nid_t nid = le32_to_cpu(entry->nid);
994                 struct page *node_page;
995                 struct node_info ni;
996                 int err;
997
998                 /* stop BG_GC if there is not enough free sections. */
999                 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1000                         return submitted;
1001
1002                 if (check_valid_map(sbi, segno, off) == 0)
1003                         continue;
1004
1005                 if (phase == 0) {
1006                         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1007                                                         META_NAT, true);
1008                         continue;
1009                 }
1010
1011                 if (phase == 1) {
1012                         f2fs_ra_node_page(sbi, nid);
1013                         continue;
1014                 }
1015
1016                 /* phase == 2 */
1017                 node_page = f2fs_get_node_page(sbi, nid);
1018                 if (IS_ERR(node_page))
1019                         continue;
1020
1021                 /* block may become invalid during f2fs_get_node_page */
1022                 if (check_valid_map(sbi, segno, off) == 0) {
1023                         f2fs_put_page(node_page, 1);
1024                         continue;
1025                 }
1026
1027                 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1028                         f2fs_put_page(node_page, 1);
1029                         continue;
1030                 }
1031
1032                 if (ni.blk_addr != start_addr + off) {
1033                         f2fs_put_page(node_page, 1);
1034                         continue;
1035                 }
1036
1037                 err = f2fs_move_node_page(node_page, gc_type);
1038                 if (!err && gc_type == FG_GC)
1039                         submitted++;
1040                 stat_inc_node_blk_count(sbi, 1, gc_type);
1041         }
1042
1043         if (++phase < 3)
1044                 goto next_step;
1045
1046         if (fggc)
1047                 atomic_dec(&sbi->wb_sync_req[NODE]);
1048         return submitted;
1049 }
1050
1051 /*
1052  * Calculate start block index indicating the given node offset.
1053  * Be careful, caller should give this node offset only indicating direct node
1054  * blocks. If any node offsets, which point the other types of node blocks such
1055  * as indirect or double indirect node blocks, are given, it must be a caller's
1056  * bug.
1057  */
1058 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1059 {
1060         unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1061         unsigned int bidx;
1062
1063         if (node_ofs == 0)
1064                 return 0;
1065
1066         if (node_ofs <= 2) {
1067                 bidx = node_ofs - 1;
1068         } else if (node_ofs <= indirect_blks) {
1069                 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1070
1071                 bidx = node_ofs - 2 - dec;
1072         } else {
1073                 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1074
1075                 bidx = node_ofs - 5 - dec;
1076         }
1077         return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1078 }
1079
1080 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1081                 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1082 {
1083         struct page *node_page;
1084         nid_t nid;
1085         unsigned int ofs_in_node;
1086         block_t source_blkaddr;
1087
1088         nid = le32_to_cpu(sum->nid);
1089         ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1090
1091         node_page = f2fs_get_node_page(sbi, nid);
1092         if (IS_ERR(node_page))
1093                 return false;
1094
1095         if (f2fs_get_node_info(sbi, nid, dni, false)) {
1096                 f2fs_put_page(node_page, 1);
1097                 return false;
1098         }
1099
1100         if (sum->version != dni->version) {
1101                 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1102                           __func__);
1103                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1104         }
1105
1106         if (f2fs_check_nid_range(sbi, dni->ino)) {
1107                 f2fs_put_page(node_page, 1);
1108                 return false;
1109         }
1110
1111         *nofs = ofs_of_node(node_page);
1112         source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1113         f2fs_put_page(node_page, 1);
1114
1115         if (source_blkaddr != blkaddr) {
1116 #ifdef CONFIG_F2FS_CHECK_FS
1117                 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1118                 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1119
1120                 if (unlikely(check_valid_map(sbi, segno, offset))) {
1121                         if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1122                                 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1123                                          blkaddr, source_blkaddr, segno);
1124                                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1125                         }
1126                 }
1127 #endif
1128                 return false;
1129         }
1130         return true;
1131 }
1132
1133 static int ra_data_block(struct inode *inode, pgoff_t index)
1134 {
1135         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1136         struct address_space *mapping = inode->i_mapping;
1137         struct dnode_of_data dn;
1138         struct page *page;
1139         struct extent_info ei = {0, 0, 0};
1140         struct f2fs_io_info fio = {
1141                 .sbi = sbi,
1142                 .ino = inode->i_ino,
1143                 .type = DATA,
1144                 .temp = COLD,
1145                 .op = REQ_OP_READ,
1146                 .op_flags = 0,
1147                 .encrypted_page = NULL,
1148                 .in_list = false,
1149                 .retry = false,
1150         };
1151         int err;
1152
1153         page = f2fs_grab_cache_page(mapping, index, true);
1154         if (!page)
1155                 return -ENOMEM;
1156
1157         if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1158                 dn.data_blkaddr = ei.blk + index - ei.fofs;
1159                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1160                                                 DATA_GENERIC_ENHANCE_READ))) {
1161                         err = -EFSCORRUPTED;
1162                         goto put_page;
1163                 }
1164                 goto got_it;
1165         }
1166
1167         set_new_dnode(&dn, inode, NULL, NULL, 0);
1168         err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1169         if (err)
1170                 goto put_page;
1171         f2fs_put_dnode(&dn);
1172
1173         if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1174                 err = -ENOENT;
1175                 goto put_page;
1176         }
1177         if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1178                                                 DATA_GENERIC_ENHANCE))) {
1179                 err = -EFSCORRUPTED;
1180                 goto put_page;
1181         }
1182 got_it:
1183         /* read page */
1184         fio.page = page;
1185         fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1186
1187         /*
1188          * don't cache encrypted data into meta inode until previous dirty
1189          * data were writebacked to avoid racing between GC and flush.
1190          */
1191         f2fs_wait_on_page_writeback(page, DATA, true, true);
1192
1193         f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1194
1195         fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1196                                         dn.data_blkaddr,
1197                                         FGP_LOCK | FGP_CREAT, GFP_NOFS);
1198         if (!fio.encrypted_page) {
1199                 err = -ENOMEM;
1200                 goto put_page;
1201         }
1202
1203         err = f2fs_submit_page_bio(&fio);
1204         if (err)
1205                 goto put_encrypted_page;
1206         f2fs_put_page(fio.encrypted_page, 0);
1207         f2fs_put_page(page, 1);
1208
1209         f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1210         f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1211
1212         return 0;
1213 put_encrypted_page:
1214         f2fs_put_page(fio.encrypted_page, 1);
1215 put_page:
1216         f2fs_put_page(page, 1);
1217         return err;
1218 }
1219
1220 /*
1221  * Move data block via META_MAPPING while keeping locked data page.
1222  * This can be used to move blocks, aka LBAs, directly on disk.
1223  */
1224 static int move_data_block(struct inode *inode, block_t bidx,
1225                                 int gc_type, unsigned int segno, int off)
1226 {
1227         struct f2fs_io_info fio = {
1228                 .sbi = F2FS_I_SB(inode),
1229                 .ino = inode->i_ino,
1230                 .type = DATA,
1231                 .temp = COLD,
1232                 .op = REQ_OP_READ,
1233                 .op_flags = 0,
1234                 .encrypted_page = NULL,
1235                 .in_list = false,
1236                 .retry = false,
1237         };
1238         struct dnode_of_data dn;
1239         struct f2fs_summary sum;
1240         struct node_info ni;
1241         struct page *page, *mpage;
1242         block_t newaddr;
1243         int err = 0;
1244         bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1245         int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1246                                 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1247                                 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1248
1249         /* do not read out */
1250         page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1251         if (!page)
1252                 return -ENOMEM;
1253
1254         if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1255                 err = -ENOENT;
1256                 goto out;
1257         }
1258
1259         err = f2fs_gc_pinned_control(inode, gc_type, segno);
1260         if (err)
1261                 goto out;
1262
1263         set_new_dnode(&dn, inode, NULL, NULL, 0);
1264         err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1265         if (err)
1266                 goto out;
1267
1268         if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1269                 ClearPageUptodate(page);
1270                 err = -ENOENT;
1271                 goto put_out;
1272         }
1273
1274         /*
1275          * don't cache encrypted data into meta inode until previous dirty
1276          * data were writebacked to avoid racing between GC and flush.
1277          */
1278         f2fs_wait_on_page_writeback(page, DATA, true, true);
1279
1280         f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1281
1282         err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1283         if (err)
1284                 goto put_out;
1285
1286         /* read page */
1287         fio.page = page;
1288         fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1289
1290         if (lfs_mode)
1291                 f2fs_down_write(&fio.sbi->io_order_lock);
1292
1293         mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1294                                         fio.old_blkaddr, false);
1295         if (!mpage) {
1296                 err = -ENOMEM;
1297                 goto up_out;
1298         }
1299
1300         fio.encrypted_page = mpage;
1301
1302         /* read source block in mpage */
1303         if (!PageUptodate(mpage)) {
1304                 err = f2fs_submit_page_bio(&fio);
1305                 if (err) {
1306                         f2fs_put_page(mpage, 1);
1307                         goto up_out;
1308                 }
1309
1310                 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1311                 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1312
1313                 lock_page(mpage);
1314                 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1315                                                 !PageUptodate(mpage))) {
1316                         err = -EIO;
1317                         f2fs_put_page(mpage, 1);
1318                         goto up_out;
1319                 }
1320         }
1321
1322         set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1323
1324         /* allocate block address */
1325         f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1326                                 &sum, type, NULL);
1327
1328         fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1329                                 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1330         if (!fio.encrypted_page) {
1331                 err = -ENOMEM;
1332                 f2fs_put_page(mpage, 1);
1333                 goto recover_block;
1334         }
1335
1336         /* write target block */
1337         f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1338         memcpy(page_address(fio.encrypted_page),
1339                                 page_address(mpage), PAGE_SIZE);
1340         f2fs_put_page(mpage, 1);
1341         invalidate_mapping_pages(META_MAPPING(fio.sbi),
1342                                 fio.old_blkaddr, fio.old_blkaddr);
1343         f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
1344
1345         set_page_dirty(fio.encrypted_page);
1346         if (clear_page_dirty_for_io(fio.encrypted_page))
1347                 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1348
1349         set_page_writeback(fio.encrypted_page);
1350         ClearPageError(page);
1351
1352         fio.op = REQ_OP_WRITE;
1353         fio.op_flags = REQ_SYNC;
1354         fio.new_blkaddr = newaddr;
1355         f2fs_submit_page_write(&fio);
1356         if (fio.retry) {
1357                 err = -EAGAIN;
1358                 if (PageWriteback(fio.encrypted_page))
1359                         end_page_writeback(fio.encrypted_page);
1360                 goto put_page_out;
1361         }
1362
1363         f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
1364
1365         f2fs_update_data_blkaddr(&dn, newaddr);
1366         set_inode_flag(inode, FI_APPEND_WRITE);
1367         if (page->index == 0)
1368                 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1369 put_page_out:
1370         f2fs_put_page(fio.encrypted_page, 1);
1371 recover_block:
1372         if (err)
1373                 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1374                                                         true, true, true);
1375 up_out:
1376         if (lfs_mode)
1377                 f2fs_up_write(&fio.sbi->io_order_lock);
1378 put_out:
1379         f2fs_put_dnode(&dn);
1380 out:
1381         f2fs_put_page(page, 1);
1382         return err;
1383 }
1384
1385 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1386                                                         unsigned int segno, int off)
1387 {
1388         struct page *page;
1389         int err = 0;
1390
1391         page = f2fs_get_lock_data_page(inode, bidx, true);
1392         if (IS_ERR(page))
1393                 return PTR_ERR(page);
1394
1395         if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1396                 err = -ENOENT;
1397                 goto out;
1398         }
1399
1400         err = f2fs_gc_pinned_control(inode, gc_type, segno);
1401         if (err)
1402                 goto out;
1403
1404         if (gc_type == BG_GC) {
1405                 if (PageWriteback(page)) {
1406                         err = -EAGAIN;
1407                         goto out;
1408                 }
1409                 set_page_dirty(page);
1410                 set_page_private_gcing(page);
1411         } else {
1412                 struct f2fs_io_info fio = {
1413                         .sbi = F2FS_I_SB(inode),
1414                         .ino = inode->i_ino,
1415                         .type = DATA,
1416                         .temp = COLD,
1417                         .op = REQ_OP_WRITE,
1418                         .op_flags = REQ_SYNC,
1419                         .old_blkaddr = NULL_ADDR,
1420                         .page = page,
1421                         .encrypted_page = NULL,
1422                         .need_lock = LOCK_REQ,
1423                         .io_type = FS_GC_DATA_IO,
1424                 };
1425                 bool is_dirty = PageDirty(page);
1426
1427 retry:
1428                 f2fs_wait_on_page_writeback(page, DATA, true, true);
1429
1430                 set_page_dirty(page);
1431                 if (clear_page_dirty_for_io(page)) {
1432                         inode_dec_dirty_pages(inode);
1433                         f2fs_remove_dirty_inode(inode);
1434                 }
1435
1436                 set_page_private_gcing(page);
1437
1438                 err = f2fs_do_write_data_page(&fio);
1439                 if (err) {
1440                         clear_page_private_gcing(page);
1441                         if (err == -ENOMEM) {
1442                                 memalloc_retry_wait(GFP_NOFS);
1443                                 goto retry;
1444                         }
1445                         if (is_dirty)
1446                                 set_page_dirty(page);
1447                 }
1448         }
1449 out:
1450         f2fs_put_page(page, 1);
1451         return err;
1452 }
1453
1454 /*
1455  * This function tries to get parent node of victim data block, and identifies
1456  * data block validity. If the block is valid, copy that with cold status and
1457  * modify parent node.
1458  * If the parent node is not valid or the data block address is different,
1459  * the victim data block is ignored.
1460  */
1461 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1462                 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1463                 bool force_migrate)
1464 {
1465         struct super_block *sb = sbi->sb;
1466         struct f2fs_summary *entry;
1467         block_t start_addr;
1468         int off;
1469         int phase = 0;
1470         int submitted = 0;
1471         unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1472
1473         start_addr = START_BLOCK(sbi, segno);
1474
1475 next_step:
1476         entry = sum;
1477
1478         for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1479                 struct page *data_page;
1480                 struct inode *inode;
1481                 struct node_info dni; /* dnode info for the data */
1482                 unsigned int ofs_in_node, nofs;
1483                 block_t start_bidx;
1484                 nid_t nid = le32_to_cpu(entry->nid);
1485
1486                 /*
1487                  * stop BG_GC if there is not enough free sections.
1488                  * Or, stop GC if the segment becomes fully valid caused by
1489                  * race condition along with SSR block allocation.
1490                  */
1491                 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1492                         (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1493                                                         CAP_BLKS_PER_SEC(sbi)))
1494                         return submitted;
1495
1496                 if (check_valid_map(sbi, segno, off) == 0)
1497                         continue;
1498
1499                 if (phase == 0) {
1500                         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1501                                                         META_NAT, true);
1502                         continue;
1503                 }
1504
1505                 if (phase == 1) {
1506                         f2fs_ra_node_page(sbi, nid);
1507                         continue;
1508                 }
1509
1510                 /* Get an inode by ino with checking validity */
1511                 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1512                         continue;
1513
1514                 if (phase == 2) {
1515                         f2fs_ra_node_page(sbi, dni.ino);
1516                         continue;
1517                 }
1518
1519                 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1520
1521                 if (phase == 3) {
1522                         int err;
1523
1524                         inode = f2fs_iget(sb, dni.ino);
1525                         if (IS_ERR(inode) || is_bad_inode(inode) ||
1526                                         special_file(inode->i_mode))
1527                                 continue;
1528
1529                         err = f2fs_gc_pinned_control(inode, gc_type, segno);
1530                         if (err == -EAGAIN) {
1531                                 iput(inode);
1532                                 return submitted;
1533                         }
1534
1535                         if (!f2fs_down_write_trylock(
1536                                 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1537                                 iput(inode);
1538                                 sbi->skipped_gc_rwsem++;
1539                                 continue;
1540                         }
1541
1542                         start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1543                                                                 ofs_in_node;
1544
1545                         if (f2fs_post_read_required(inode)) {
1546                                 int err = ra_data_block(inode, start_bidx);
1547
1548                                 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1549                                 if (err) {
1550                                         iput(inode);
1551                                         continue;
1552                                 }
1553                                 add_gc_inode(gc_list, inode);
1554                                 continue;
1555                         }
1556
1557                         data_page = f2fs_get_read_data_page(inode,
1558                                                 start_bidx, REQ_RAHEAD, true);
1559                         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1560                         if (IS_ERR(data_page)) {
1561                                 iput(inode);
1562                                 continue;
1563                         }
1564
1565                         f2fs_put_page(data_page, 0);
1566                         add_gc_inode(gc_list, inode);
1567                         continue;
1568                 }
1569
1570                 /* phase 4 */
1571                 inode = find_gc_inode(gc_list, dni.ino);
1572                 if (inode) {
1573                         struct f2fs_inode_info *fi = F2FS_I(inode);
1574                         bool locked = false;
1575                         int err;
1576
1577                         if (S_ISREG(inode->i_mode)) {
1578                                 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) {
1579                                         sbi->skipped_gc_rwsem++;
1580                                         continue;
1581                                 }
1582                                 if (!f2fs_down_write_trylock(
1583                                                 &fi->i_gc_rwsem[WRITE])) {
1584                                         sbi->skipped_gc_rwsem++;
1585                                         f2fs_up_write(&fi->i_gc_rwsem[READ]);
1586                                         continue;
1587                                 }
1588                                 locked = true;
1589
1590                                 /* wait for all inflight aio data */
1591                                 inode_dio_wait(inode);
1592                         }
1593
1594                         start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1595                                                                 + ofs_in_node;
1596                         if (f2fs_post_read_required(inode))
1597                                 err = move_data_block(inode, start_bidx,
1598                                                         gc_type, segno, off);
1599                         else
1600                                 err = move_data_page(inode, start_bidx, gc_type,
1601                                                                 segno, off);
1602
1603                         if (!err && (gc_type == FG_GC ||
1604                                         f2fs_post_read_required(inode)))
1605                                 submitted++;
1606
1607                         if (locked) {
1608                                 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1609                                 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1610                         }
1611
1612                         stat_inc_data_blk_count(sbi, 1, gc_type);
1613                 }
1614         }
1615
1616         if (++phase < 5)
1617                 goto next_step;
1618
1619         return submitted;
1620 }
1621
1622 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1623                         int gc_type)
1624 {
1625         struct sit_info *sit_i = SIT_I(sbi);
1626         int ret;
1627
1628         down_write(&sit_i->sentry_lock);
1629         ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1630                                               NO_CHECK_TYPE, LFS, 0);
1631         up_write(&sit_i->sentry_lock);
1632         return ret;
1633 }
1634
1635 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1636                                 unsigned int start_segno,
1637                                 struct gc_inode_list *gc_list, int gc_type,
1638                                 bool force_migrate)
1639 {
1640         struct page *sum_page;
1641         struct f2fs_summary_block *sum;
1642         struct blk_plug plug;
1643         unsigned int segno = start_segno;
1644         unsigned int end_segno = start_segno + sbi->segs_per_sec;
1645         int seg_freed = 0, migrated = 0;
1646         unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1647                                                 SUM_TYPE_DATA : SUM_TYPE_NODE;
1648         int submitted = 0;
1649
1650         if (__is_large_section(sbi))
1651                 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1652
1653         /*
1654          * zone-capacity can be less than zone-size in zoned devices,
1655          * resulting in less than expected usable segments in the zone,
1656          * calculate the end segno in the zone which can be garbage collected
1657          */
1658         if (f2fs_sb_has_blkzoned(sbi))
1659                 end_segno -= sbi->segs_per_sec -
1660                                         f2fs_usable_segs_in_sec(sbi, segno);
1661
1662         sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1663
1664         /* readahead multi ssa blocks those have contiguous address */
1665         if (__is_large_section(sbi))
1666                 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1667                                         end_segno - segno, META_SSA, true);
1668
1669         /* reference all summary page */
1670         while (segno < end_segno) {
1671                 sum_page = f2fs_get_sum_page(sbi, segno++);
1672                 if (IS_ERR(sum_page)) {
1673                         int err = PTR_ERR(sum_page);
1674
1675                         end_segno = segno - 1;
1676                         for (segno = start_segno; segno < end_segno; segno++) {
1677                                 sum_page = find_get_page(META_MAPPING(sbi),
1678                                                 GET_SUM_BLOCK(sbi, segno));
1679                                 f2fs_put_page(sum_page, 0);
1680                                 f2fs_put_page(sum_page, 0);
1681                         }
1682                         return err;
1683                 }
1684                 unlock_page(sum_page);
1685         }
1686
1687         blk_start_plug(&plug);
1688
1689         for (segno = start_segno; segno < end_segno; segno++) {
1690
1691                 /* find segment summary of victim */
1692                 sum_page = find_get_page(META_MAPPING(sbi),
1693                                         GET_SUM_BLOCK(sbi, segno));
1694                 f2fs_put_page(sum_page, 0);
1695
1696                 if (get_valid_blocks(sbi, segno, false) == 0)
1697                         goto freed;
1698                 if (gc_type == BG_GC && __is_large_section(sbi) &&
1699                                 migrated >= sbi->migration_granularity)
1700                         goto skip;
1701                 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1702                         goto skip;
1703
1704                 sum = page_address(sum_page);
1705                 if (type != GET_SUM_TYPE((&sum->footer))) {
1706                         f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1707                                  segno, type, GET_SUM_TYPE((&sum->footer)));
1708                         set_sbi_flag(sbi, SBI_NEED_FSCK);
1709                         f2fs_stop_checkpoint(sbi, false);
1710                         goto skip;
1711                 }
1712
1713                 /*
1714                  * this is to avoid deadlock:
1715                  * - lock_page(sum_page)         - f2fs_replace_block
1716                  *  - check_valid_map()            - down_write(sentry_lock)
1717                  *   - down_read(sentry_lock)     - change_curseg()
1718                  *                                  - lock_page(sum_page)
1719                  */
1720                 if (type == SUM_TYPE_NODE)
1721                         submitted += gc_node_segment(sbi, sum->entries, segno,
1722                                                                 gc_type);
1723                 else
1724                         submitted += gc_data_segment(sbi, sum->entries, gc_list,
1725                                                         segno, gc_type,
1726                                                         force_migrate);
1727
1728                 stat_inc_seg_count(sbi, type, gc_type);
1729                 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1730                 migrated++;
1731
1732 freed:
1733                 if (gc_type == FG_GC &&
1734                                 get_valid_blocks(sbi, segno, false) == 0)
1735                         seg_freed++;
1736
1737                 if (__is_large_section(sbi) && segno + 1 < end_segno)
1738                         sbi->next_victim_seg[gc_type] = segno + 1;
1739 skip:
1740                 f2fs_put_page(sum_page, 0);
1741         }
1742
1743         if (submitted)
1744                 f2fs_submit_merged_write(sbi,
1745                                 (type == SUM_TYPE_NODE) ? NODE : DATA);
1746
1747         blk_finish_plug(&plug);
1748
1749         stat_inc_call_count(sbi->stat_info);
1750
1751         return seg_freed;
1752 }
1753
1754 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1755 {
1756         int gc_type = gc_control->init_gc_type;
1757         unsigned int segno = gc_control->victim_segno;
1758         int sec_freed = 0, seg_freed = 0, total_freed = 0;
1759         int ret = 0;
1760         struct cp_control cpc;
1761         struct gc_inode_list gc_list = {
1762                 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1763                 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1764         };
1765         unsigned int skipped_round = 0, round = 0;
1766
1767         trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1768                                 gc_control->nr_free_secs,
1769                                 get_pages(sbi, F2FS_DIRTY_NODES),
1770                                 get_pages(sbi, F2FS_DIRTY_DENTS),
1771                                 get_pages(sbi, F2FS_DIRTY_IMETA),
1772                                 free_sections(sbi),
1773                                 free_segments(sbi),
1774                                 reserved_segments(sbi),
1775                                 prefree_segments(sbi));
1776
1777         cpc.reason = __get_cp_reason(sbi);
1778         sbi->skipped_gc_rwsem = 0;
1779 gc_more:
1780         if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1781                 ret = -EINVAL;
1782                 goto stop;
1783         }
1784         if (unlikely(f2fs_cp_error(sbi))) {
1785                 ret = -EIO;
1786                 goto stop;
1787         }
1788
1789         if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1790                 /*
1791                  * For example, if there are many prefree_segments below given
1792                  * threshold, we can make them free by checkpoint. Then, we
1793                  * secure free segments which doesn't need fggc any more.
1794                  */
1795                 if (prefree_segments(sbi)) {
1796                         ret = f2fs_write_checkpoint(sbi, &cpc);
1797                         if (ret)
1798                                 goto stop;
1799                 }
1800                 if (has_not_enough_free_secs(sbi, 0, 0))
1801                         gc_type = FG_GC;
1802         }
1803
1804         /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1805         if (gc_type == BG_GC && gc_control->no_bg_gc) {
1806                 ret = -EINVAL;
1807                 goto stop;
1808         }
1809 retry:
1810         ret = __get_victim(sbi, &segno, gc_type);
1811         if (ret) {
1812                 /* allow to search victim from sections has pinned data */
1813                 if (ret == -ENODATA && gc_type == FG_GC &&
1814                                 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1815                         f2fs_unpin_all_sections(sbi, false);
1816                         goto retry;
1817                 }
1818                 goto stop;
1819         }
1820
1821         seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1822                                 gc_control->should_migrate_blocks);
1823         total_freed += seg_freed;
1824
1825         if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1826                 sec_freed++;
1827
1828         if (gc_type == FG_GC)
1829                 sbi->cur_victim_sec = NULL_SEGNO;
1830
1831         if (gc_control->init_gc_type == FG_GC ||
1832             !has_not_enough_free_secs(sbi,
1833                                 (gc_type == FG_GC) ? sec_freed : 0, 0)) {
1834                 if (gc_type == FG_GC && sec_freed < gc_control->nr_free_secs)
1835                         goto go_gc_more;
1836                 goto stop;
1837         }
1838
1839         /* FG_GC stops GC by skip_count */
1840         if (gc_type == FG_GC) {
1841                 if (sbi->skipped_gc_rwsem)
1842                         skipped_round++;
1843                 round++;
1844                 if (skipped_round > MAX_SKIP_GC_COUNT &&
1845                                 skipped_round * 2 >= round) {
1846                         ret = f2fs_write_checkpoint(sbi, &cpc);
1847                         goto stop;
1848                 }
1849         }
1850
1851         /* Write checkpoint to reclaim prefree segments */
1852         if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
1853                                 prefree_segments(sbi)) {
1854                 ret = f2fs_write_checkpoint(sbi, &cpc);
1855                 if (ret)
1856                         goto stop;
1857         }
1858 go_gc_more:
1859         segno = NULL_SEGNO;
1860         goto gc_more;
1861
1862 stop:
1863         SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1864         SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1865
1866         if (gc_type == FG_GC)
1867                 f2fs_unpin_all_sections(sbi, true);
1868
1869         trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1870                                 get_pages(sbi, F2FS_DIRTY_NODES),
1871                                 get_pages(sbi, F2FS_DIRTY_DENTS),
1872                                 get_pages(sbi, F2FS_DIRTY_IMETA),
1873                                 free_sections(sbi),
1874                                 free_segments(sbi),
1875                                 reserved_segments(sbi),
1876                                 prefree_segments(sbi));
1877
1878         f2fs_up_write(&sbi->gc_lock);
1879
1880         put_gc_inode(&gc_list);
1881
1882         if (gc_control->err_gc_skipped && !ret)
1883                 ret = sec_freed ? 0 : -EAGAIN;
1884         return ret;
1885 }
1886
1887 int __init f2fs_create_garbage_collection_cache(void)
1888 {
1889         victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1890                                         sizeof(struct victim_entry));
1891         if (!victim_entry_slab)
1892                 return -ENOMEM;
1893         return 0;
1894 }
1895
1896 void f2fs_destroy_garbage_collection_cache(void)
1897 {
1898         kmem_cache_destroy(victim_entry_slab);
1899 }
1900
1901 static void init_atgc_management(struct f2fs_sb_info *sbi)
1902 {
1903         struct atgc_management *am = &sbi->am;
1904
1905         if (test_opt(sbi, ATGC) &&
1906                 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1907                 am->atgc_enabled = true;
1908
1909         am->root = RB_ROOT_CACHED;
1910         INIT_LIST_HEAD(&am->victim_list);
1911         am->victim_count = 0;
1912
1913         am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1914         am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1915         am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1916         am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1917 }
1918
1919 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1920 {
1921         DIRTY_I(sbi)->v_ops = &default_v_ops;
1922
1923         sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1924
1925         /* give warm/cold data area from slower device */
1926         if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1927                 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1928                                 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1929
1930         init_atgc_management(sbi);
1931 }
1932
1933 static int free_segment_range(struct f2fs_sb_info *sbi,
1934                                 unsigned int secs, bool gc_only)
1935 {
1936         unsigned int segno, next_inuse, start, end;
1937         struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1938         int gc_mode, gc_type;
1939         int err = 0;
1940         int type;
1941
1942         /* Force block allocation for GC */
1943         MAIN_SECS(sbi) -= secs;
1944         start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1945         end = MAIN_SEGS(sbi) - 1;
1946
1947         mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1948         for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1949                 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1950                         SIT_I(sbi)->last_victim[gc_mode] = 0;
1951
1952         for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1953                 if (sbi->next_victim_seg[gc_type] >= start)
1954                         sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1955         mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1956
1957         /* Move out cursegs from the target range */
1958         for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
1959                 f2fs_allocate_segment_for_resize(sbi, type, start, end);
1960
1961         /* do GC to move out valid blocks in the range */
1962         for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1963                 struct gc_inode_list gc_list = {
1964                         .ilist = LIST_HEAD_INIT(gc_list.ilist),
1965                         .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1966                 };
1967
1968                 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
1969                 put_gc_inode(&gc_list);
1970
1971                 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1972                         err = -EAGAIN;
1973                         goto out;
1974                 }
1975                 if (fatal_signal_pending(current)) {
1976                         err = -ERESTARTSYS;
1977                         goto out;
1978                 }
1979         }
1980         if (gc_only)
1981                 goto out;
1982
1983         err = f2fs_write_checkpoint(sbi, &cpc);
1984         if (err)
1985                 goto out;
1986
1987         next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1988         if (next_inuse <= end) {
1989                 f2fs_err(sbi, "segno %u should be free but still inuse!",
1990                          next_inuse);
1991                 f2fs_bug_on(sbi, 1);
1992         }
1993 out:
1994         MAIN_SECS(sbi) += secs;
1995         return err;
1996 }
1997
1998 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1999 {
2000         struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2001         int section_count;
2002         int segment_count;
2003         int segment_count_main;
2004         long long block_count;
2005         int segs = secs * sbi->segs_per_sec;
2006
2007         f2fs_down_write(&sbi->sb_lock);
2008
2009         section_count = le32_to_cpu(raw_sb->section_count);
2010         segment_count = le32_to_cpu(raw_sb->segment_count);
2011         segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2012         block_count = le64_to_cpu(raw_sb->block_count);
2013
2014         raw_sb->section_count = cpu_to_le32(section_count + secs);
2015         raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2016         raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2017         raw_sb->block_count = cpu_to_le64(block_count +
2018                                         (long long)segs * sbi->blocks_per_seg);
2019         if (f2fs_is_multi_device(sbi)) {
2020                 int last_dev = sbi->s_ndevs - 1;
2021                 int dev_segs =
2022                         le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2023
2024                 raw_sb->devs[last_dev].total_segments =
2025                                                 cpu_to_le32(dev_segs + segs);
2026         }
2027
2028         f2fs_up_write(&sbi->sb_lock);
2029 }
2030
2031 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2032 {
2033         int segs = secs * sbi->segs_per_sec;
2034         long long blks = (long long)segs * sbi->blocks_per_seg;
2035         long long user_block_count =
2036                                 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2037
2038         SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2039         MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2040         MAIN_SECS(sbi) += secs;
2041         FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2042         FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2043         F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2044
2045         if (f2fs_is_multi_device(sbi)) {
2046                 int last_dev = sbi->s_ndevs - 1;
2047
2048                 FDEV(last_dev).total_segments =
2049                                 (int)FDEV(last_dev).total_segments + segs;
2050                 FDEV(last_dev).end_blk =
2051                                 (long long)FDEV(last_dev).end_blk + blks;
2052 #ifdef CONFIG_BLK_DEV_ZONED
2053                 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
2054                                         (int)(blks >> sbi->log_blocks_per_blkz);
2055 #endif
2056         }
2057 }
2058
2059 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
2060 {
2061         __u64 old_block_count, shrunk_blocks;
2062         struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2063         unsigned int secs;
2064         int err = 0;
2065         __u32 rem;
2066
2067         old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2068         if (block_count > old_block_count)
2069                 return -EINVAL;
2070
2071         if (f2fs_is_multi_device(sbi)) {
2072                 int last_dev = sbi->s_ndevs - 1;
2073                 __u64 last_segs = FDEV(last_dev).total_segments;
2074
2075                 if (block_count + last_segs * sbi->blocks_per_seg <=
2076                                                                 old_block_count)
2077                         return -EINVAL;
2078         }
2079
2080         /* new fs size should align to section size */
2081         div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2082         if (rem)
2083                 return -EINVAL;
2084
2085         if (block_count == old_block_count)
2086                 return 0;
2087
2088         if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2089                 f2fs_err(sbi, "Should run fsck to repair first.");
2090                 return -EFSCORRUPTED;
2091         }
2092
2093         if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2094                 f2fs_err(sbi, "Checkpoint should be enabled.");
2095                 return -EINVAL;
2096         }
2097
2098         shrunk_blocks = old_block_count - block_count;
2099         secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2100
2101         /* stop other GC */
2102         if (!f2fs_down_write_trylock(&sbi->gc_lock))
2103                 return -EAGAIN;
2104
2105         /* stop CP to protect MAIN_SEC in free_segment_range */
2106         f2fs_lock_op(sbi);
2107
2108         spin_lock(&sbi->stat_lock);
2109         if (shrunk_blocks + valid_user_blocks(sbi) +
2110                 sbi->current_reserved_blocks + sbi->unusable_block_count +
2111                 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2112                 err = -ENOSPC;
2113         spin_unlock(&sbi->stat_lock);
2114
2115         if (err)
2116                 goto out_unlock;
2117
2118         err = free_segment_range(sbi, secs, true);
2119
2120 out_unlock:
2121         f2fs_unlock_op(sbi);
2122         f2fs_up_write(&sbi->gc_lock);
2123         if (err)
2124                 return err;
2125
2126         set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2127
2128         freeze_super(sbi->sb);
2129         f2fs_down_write(&sbi->gc_lock);
2130         f2fs_down_write(&sbi->cp_global_sem);
2131
2132         spin_lock(&sbi->stat_lock);
2133         if (shrunk_blocks + valid_user_blocks(sbi) +
2134                 sbi->current_reserved_blocks + sbi->unusable_block_count +
2135                 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2136                 err = -ENOSPC;
2137         else
2138                 sbi->user_block_count -= shrunk_blocks;
2139         spin_unlock(&sbi->stat_lock);
2140         if (err)
2141                 goto out_err;
2142
2143         err = free_segment_range(sbi, secs, false);
2144         if (err)
2145                 goto recover_out;
2146
2147         update_sb_metadata(sbi, -secs);
2148
2149         err = f2fs_commit_super(sbi, false);
2150         if (err) {
2151                 update_sb_metadata(sbi, secs);
2152                 goto recover_out;
2153         }
2154
2155         update_fs_metadata(sbi, -secs);
2156         clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2157         set_sbi_flag(sbi, SBI_IS_DIRTY);
2158
2159         err = f2fs_write_checkpoint(sbi, &cpc);
2160         if (err) {
2161                 update_fs_metadata(sbi, secs);
2162                 update_sb_metadata(sbi, secs);
2163                 f2fs_commit_super(sbi, false);
2164         }
2165 recover_out:
2166         if (err) {
2167                 set_sbi_flag(sbi, SBI_NEED_FSCK);
2168                 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2169
2170                 spin_lock(&sbi->stat_lock);
2171                 sbi->user_block_count += shrunk_blocks;
2172                 spin_unlock(&sbi->stat_lock);
2173         }
2174 out_err:
2175         f2fs_up_write(&sbi->cp_global_sem);
2176         f2fs_up_write(&sbi->gc_lock);
2177         thaw_super(sbi->sb);
2178         clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2179         return err;
2180 }