4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/vmalloc.h>
21 #include <trace/events/f2fs.h>
24 * This function balances dirty node and dentry pages.
25 * In addition, it controls garbage collection.
27 void f2fs_balance_fs(struct f2fs_sb_info *sbi)
30 * We should do GC or end up with checkpoint, if there are so many dirty
31 * dir/node pages without enough free segments.
33 if (has_not_enough_free_secs(sbi, 0)) {
34 mutex_lock(&sbi->gc_mutex);
39 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
40 enum dirty_type dirty_type)
42 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
44 /* need not be added */
45 if (IS_CURSEG(sbi, segno))
48 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
49 dirty_i->nr_dirty[dirty_type]++;
51 if (dirty_type == DIRTY) {
52 struct seg_entry *sentry = get_seg_entry(sbi, segno);
53 enum dirty_type t = DIRTY_HOT_DATA;
55 dirty_type = sentry->type;
57 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
58 dirty_i->nr_dirty[dirty_type]++;
60 /* Only one bitmap should be set */
61 for (; t <= DIRTY_COLD_NODE; t++) {
64 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
65 dirty_i->nr_dirty[t]--;
70 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
71 enum dirty_type dirty_type)
73 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
75 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
76 dirty_i->nr_dirty[dirty_type]--;
78 if (dirty_type == DIRTY) {
79 enum dirty_type t = DIRTY_HOT_DATA;
81 /* clear all the bitmaps */
82 for (; t <= DIRTY_COLD_NODE; t++)
83 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
84 dirty_i->nr_dirty[t]--;
86 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
87 clear_bit(GET_SECNO(sbi, segno),
88 dirty_i->victim_secmap);
93 * Should not occur error such as -ENOMEM.
94 * Adding dirty entry into seglist is not critical operation.
95 * If a given segment is one of current working segments, it won't be added.
97 void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
99 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
100 unsigned short valid_blocks;
102 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
105 mutex_lock(&dirty_i->seglist_lock);
107 valid_blocks = get_valid_blocks(sbi, segno, 0);
109 if (valid_blocks == 0) {
110 __locate_dirty_segment(sbi, segno, PRE);
111 __remove_dirty_segment(sbi, segno, DIRTY);
112 } else if (valid_blocks < sbi->blocks_per_seg) {
113 __locate_dirty_segment(sbi, segno, DIRTY);
115 /* Recovery routine with SSR needs this */
116 __remove_dirty_segment(sbi, segno, DIRTY);
119 mutex_unlock(&dirty_i->seglist_lock);
124 * Should call clear_prefree_segments after checkpoint is done.
126 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
128 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
129 unsigned int segno, offset = 0;
130 unsigned int total_segs = TOTAL_SEGS(sbi);
132 mutex_lock(&dirty_i->seglist_lock);
134 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
136 if (segno >= total_segs)
138 __set_test_and_free(sbi, segno);
141 mutex_unlock(&dirty_i->seglist_lock);
144 void clear_prefree_segments(struct f2fs_sb_info *sbi)
146 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
147 unsigned int segno, offset = 0;
148 unsigned int total_segs = TOTAL_SEGS(sbi);
150 mutex_lock(&dirty_i->seglist_lock);
152 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
154 if (segno >= total_segs)
158 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
159 dirty_i->nr_dirty[PRE]--;
162 if (test_opt(sbi, DISCARD))
163 blkdev_issue_discard(sbi->sb->s_bdev,
164 START_BLOCK(sbi, segno) <<
165 sbi->log_sectors_per_block,
166 1 << (sbi->log_sectors_per_block +
167 sbi->log_blocks_per_seg),
170 mutex_unlock(&dirty_i->seglist_lock);
173 static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
175 struct sit_info *sit_i = SIT_I(sbi);
176 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
177 sit_i->dirty_sentries++;
180 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
181 unsigned int segno, int modified)
183 struct seg_entry *se = get_seg_entry(sbi, segno);
186 __mark_sit_entry_dirty(sbi, segno);
189 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
191 struct seg_entry *se;
192 unsigned int segno, offset;
193 long int new_vblocks;
195 segno = GET_SEGNO(sbi, blkaddr);
197 se = get_seg_entry(sbi, segno);
198 new_vblocks = se->valid_blocks + del;
199 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
201 BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) ||
202 (new_vblocks > sbi->blocks_per_seg)));
204 se->valid_blocks = new_vblocks;
205 se->mtime = get_mtime(sbi);
206 SIT_I(sbi)->max_mtime = se->mtime;
208 /* Update valid block bitmap */
210 if (f2fs_set_bit(offset, se->cur_valid_map))
213 if (!f2fs_clear_bit(offset, se->cur_valid_map))
216 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
217 se->ckpt_valid_blocks += del;
219 __mark_sit_entry_dirty(sbi, segno);
221 /* update total number of valid blocks to be written in ckpt area */
222 SIT_I(sbi)->written_valid_blocks += del;
224 if (sbi->segs_per_sec > 1)
225 get_sec_entry(sbi, segno)->valid_blocks += del;
228 static void refresh_sit_entry(struct f2fs_sb_info *sbi,
229 block_t old_blkaddr, block_t new_blkaddr)
231 update_sit_entry(sbi, new_blkaddr, 1);
232 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
233 update_sit_entry(sbi, old_blkaddr, -1);
236 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
238 unsigned int segno = GET_SEGNO(sbi, addr);
239 struct sit_info *sit_i = SIT_I(sbi);
241 BUG_ON(addr == NULL_ADDR);
242 if (addr == NEW_ADDR)
245 /* add it into sit main buffer */
246 mutex_lock(&sit_i->sentry_lock);
248 update_sit_entry(sbi, addr, -1);
250 /* add it into dirty seglist */
251 locate_dirty_segment(sbi, segno);
253 mutex_unlock(&sit_i->sentry_lock);
257 * This function should be resided under the curseg_mutex lock
259 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
260 struct f2fs_summary *sum, unsigned short offset)
262 struct curseg_info *curseg = CURSEG_I(sbi, type);
263 void *addr = curseg->sum_blk;
264 addr += offset * sizeof(struct f2fs_summary);
265 memcpy(addr, sum, sizeof(struct f2fs_summary));
270 * Calculate the number of current summary pages for writing
272 int npages_for_summary_flush(struct f2fs_sb_info *sbi)
274 int total_size_bytes = 0;
275 int valid_sum_count = 0;
278 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
279 if (sbi->ckpt->alloc_type[i] == SSR)
280 valid_sum_count += sbi->blocks_per_seg;
282 valid_sum_count += curseg_blkoff(sbi, i);
285 total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1)
286 + sizeof(struct nat_journal) + 2
287 + sizeof(struct sit_journal) + 2;
288 sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE;
289 if (total_size_bytes < sum_space)
291 else if (total_size_bytes < 2 * sum_space)
297 * Caller should put this summary page
299 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
301 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
304 static void write_sum_page(struct f2fs_sb_info *sbi,
305 struct f2fs_summary_block *sum_blk, block_t blk_addr)
307 struct page *page = grab_meta_page(sbi, blk_addr);
308 void *kaddr = page_address(page);
309 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
310 set_page_dirty(page);
311 f2fs_put_page(page, 1);
314 static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type)
316 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
317 unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
319 unsigned int ofs = 0;
322 * If there is not enough reserved sections,
323 * we should not reuse prefree segments.
325 if (has_not_enough_free_secs(sbi, 0))
329 * NODE page should not reuse prefree segment,
330 * since those information is used for SPOR.
332 if (IS_NODESEG(type))
335 segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
336 ofs += sbi->segs_per_sec;
338 if (segno < TOTAL_SEGS(sbi)) {
341 /* skip intermediate segments in a section */
342 if (segno % sbi->segs_per_sec)
345 /* skip if the section is currently used */
346 if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
349 /* skip if whole section is not prefree */
350 for (i = 1; i < sbi->segs_per_sec; i++)
351 if (!test_bit(segno + i, prefree_segmap))
354 /* skip if whole section was not free at the last checkpoint */
355 for (i = 0; i < sbi->segs_per_sec; i++)
356 if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
364 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
366 struct curseg_info *curseg = CURSEG_I(sbi, type);
367 unsigned int segno = curseg->segno;
368 struct free_segmap_info *free_i = FREE_I(sbi);
370 if (segno + 1 < TOTAL_SEGS(sbi) && (segno + 1) % sbi->segs_per_sec)
371 return !test_bit(segno + 1, free_i->free_segmap);
376 * Find a new segment from the free segments bitmap to right order
377 * This function should be returned with success, otherwise BUG
379 static void get_new_segment(struct f2fs_sb_info *sbi,
380 unsigned int *newseg, bool new_sec, int dir)
382 struct free_segmap_info *free_i = FREE_I(sbi);
383 unsigned int segno, secno, zoneno;
384 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
385 unsigned int hint = *newseg / sbi->segs_per_sec;
386 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
387 unsigned int left_start = hint;
392 write_lock(&free_i->segmap_lock);
394 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
395 segno = find_next_zero_bit(free_i->free_segmap,
396 TOTAL_SEGS(sbi), *newseg + 1);
397 if (segno - *newseg < sbi->segs_per_sec -
398 (*newseg % sbi->segs_per_sec))
402 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
403 if (secno >= TOTAL_SECS(sbi)) {
404 if (dir == ALLOC_RIGHT) {
405 secno = find_next_zero_bit(free_i->free_secmap,
407 BUG_ON(secno >= TOTAL_SECS(sbi));
410 left_start = hint - 1;
416 while (test_bit(left_start, free_i->free_secmap)) {
417 if (left_start > 0) {
421 left_start = find_next_zero_bit(free_i->free_secmap,
423 BUG_ON(left_start >= TOTAL_SECS(sbi));
429 segno = secno * sbi->segs_per_sec;
430 zoneno = secno / sbi->secs_per_zone;
432 /* give up on finding another zone */
435 if (sbi->secs_per_zone == 1)
437 if (zoneno == old_zoneno)
439 if (dir == ALLOC_LEFT) {
440 if (!go_left && zoneno + 1 >= total_zones)
442 if (go_left && zoneno == 0)
445 for (i = 0; i < NR_CURSEG_TYPE; i++)
446 if (CURSEG_I(sbi, i)->zone == zoneno)
449 if (i < NR_CURSEG_TYPE) {
450 /* zone is in user, try another */
452 hint = zoneno * sbi->secs_per_zone - 1;
453 else if (zoneno + 1 >= total_zones)
456 hint = (zoneno + 1) * sbi->secs_per_zone;
458 goto find_other_zone;
461 /* set it as dirty segment in free segmap */
462 BUG_ON(test_bit(segno, free_i->free_segmap));
463 __set_inuse(sbi, segno);
465 write_unlock(&free_i->segmap_lock);
468 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
470 struct curseg_info *curseg = CURSEG_I(sbi, type);
471 struct summary_footer *sum_footer;
473 curseg->segno = curseg->next_segno;
474 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
475 curseg->next_blkoff = 0;
476 curseg->next_segno = NULL_SEGNO;
478 sum_footer = &(curseg->sum_blk->footer);
479 memset(sum_footer, 0, sizeof(struct summary_footer));
480 if (IS_DATASEG(type))
481 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
482 if (IS_NODESEG(type))
483 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
484 __set_sit_entry_type(sbi, type, curseg->segno, modified);
488 * Allocate a current working segment.
489 * This function always allocates a free segment in LFS manner.
491 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
493 struct curseg_info *curseg = CURSEG_I(sbi, type);
494 unsigned int segno = curseg->segno;
495 int dir = ALLOC_LEFT;
497 write_sum_page(sbi, curseg->sum_blk,
498 GET_SUM_BLOCK(sbi, curseg->segno));
499 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
502 if (test_opt(sbi, NOHEAP))
505 get_new_segment(sbi, &segno, new_sec, dir);
506 curseg->next_segno = segno;
507 reset_curseg(sbi, type, 1);
508 curseg->alloc_type = LFS;
511 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
512 struct curseg_info *seg, block_t start)
514 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
516 for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) {
517 if (!f2fs_test_bit(ofs, se->ckpt_valid_map)
518 && !f2fs_test_bit(ofs, se->cur_valid_map))
521 seg->next_blkoff = ofs;
525 * If a segment is written by LFS manner, next block offset is just obtained
526 * by increasing the current block offset. However, if a segment is written by
527 * SSR manner, next block offset obtained by calling __next_free_blkoff
529 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
530 struct curseg_info *seg)
532 if (seg->alloc_type == SSR)
533 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
539 * This function always allocates a used segment (from dirty seglist) by SSR
540 * manner, so it should recover the existing segment information of valid blocks
542 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
544 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
545 struct curseg_info *curseg = CURSEG_I(sbi, type);
546 unsigned int new_segno = curseg->next_segno;
547 struct f2fs_summary_block *sum_node;
548 struct page *sum_page;
550 write_sum_page(sbi, curseg->sum_blk,
551 GET_SUM_BLOCK(sbi, curseg->segno));
552 __set_test_and_inuse(sbi, new_segno);
554 mutex_lock(&dirty_i->seglist_lock);
555 __remove_dirty_segment(sbi, new_segno, PRE);
556 __remove_dirty_segment(sbi, new_segno, DIRTY);
557 mutex_unlock(&dirty_i->seglist_lock);
559 reset_curseg(sbi, type, 1);
560 curseg->alloc_type = SSR;
561 __next_free_blkoff(sbi, curseg, 0);
564 sum_page = get_sum_page(sbi, new_segno);
565 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
566 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
567 f2fs_put_page(sum_page, 1);
571 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
573 struct curseg_info *curseg = CURSEG_I(sbi, type);
574 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
576 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
577 return v_ops->get_victim(sbi,
578 &(curseg)->next_segno, BG_GC, type, SSR);
580 /* For data segments, let's do SSR more intensively */
581 for (; type >= CURSEG_HOT_DATA; type--)
582 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
589 * flush out current segment and replace it with new segment
590 * This function should be returned with success, otherwise BUG
592 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
593 int type, bool force)
595 struct curseg_info *curseg = CURSEG_I(sbi, type);
598 new_curseg(sbi, type, true);
602 curseg->next_segno = check_prefree_segments(sbi, type);
604 if (curseg->next_segno != NULL_SEGNO)
605 change_curseg(sbi, type, false);
606 else if (type == CURSEG_WARM_NODE)
607 new_curseg(sbi, type, false);
608 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
609 new_curseg(sbi, type, false);
610 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
611 change_curseg(sbi, type, true);
613 new_curseg(sbi, type, false);
615 sbi->segment_count[curseg->alloc_type]++;
618 void allocate_new_segments(struct f2fs_sb_info *sbi)
620 struct curseg_info *curseg;
621 unsigned int old_curseg;
624 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
625 curseg = CURSEG_I(sbi, i);
626 old_curseg = curseg->segno;
627 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
628 locate_dirty_segment(sbi, old_curseg);
632 static const struct segment_allocation default_salloc_ops = {
633 .allocate_segment = allocate_segment_by_default,
636 static void f2fs_end_io_write(struct bio *bio, int err)
638 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
639 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
640 struct bio_private *p = bio->bi_private;
643 struct page *page = bvec->bv_page;
645 if (--bvec >= bio->bi_io_vec)
646 prefetchw(&bvec->bv_page->flags);
650 set_bit(AS_EIO, &page->mapping->flags);
651 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
652 p->sbi->sb->s_flags |= MS_RDONLY;
654 end_page_writeback(page);
655 dec_page_count(p->sbi, F2FS_WRITEBACK);
656 } while (bvec >= bio->bi_io_vec);
664 struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
667 struct bio_private *priv;
669 priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
675 /* No failure on bio allocation */
676 bio = bio_alloc(GFP_NOIO, npages);
678 bio->bi_private = priv;
682 static void do_submit_bio(struct f2fs_sb_info *sbi,
683 enum page_type type, bool sync)
685 int rw = sync ? WRITE_SYNC : WRITE;
686 enum page_type btype = type > META ? META : type;
688 if (type >= META_FLUSH)
689 rw = WRITE_FLUSH_FUA;
691 if (sbi->bio[btype]) {
692 struct bio_private *p = sbi->bio[btype]->bi_private;
694 sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
696 trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]);
698 if (type == META_FLUSH) {
699 DECLARE_COMPLETION_ONSTACK(wait);
702 submit_bio(rw, sbi->bio[btype]);
703 wait_for_completion(&wait);
706 submit_bio(rw, sbi->bio[btype]);
708 sbi->bio[btype] = NULL;
712 void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync)
714 down_write(&sbi->bio_sem);
715 do_submit_bio(sbi, type, sync);
716 up_write(&sbi->bio_sem);
719 static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
720 block_t blk_addr, enum page_type type)
722 struct block_device *bdev = sbi->sb->s_bdev;
724 verify_block_addr(sbi, blk_addr);
726 down_write(&sbi->bio_sem);
728 inc_page_count(sbi, F2FS_WRITEBACK);
730 if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
731 do_submit_bio(sbi, type, false);
733 if (sbi->bio[type] == NULL) {
734 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_get_nr_vecs(bdev));
735 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
737 * The end_io will be assigned at the sumbission phase.
738 * Until then, let bio_add_page() merge consecutive IOs as much
743 if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
745 do_submit_bio(sbi, type, false);
749 sbi->last_block_in_bio[type] = blk_addr;
751 up_write(&sbi->bio_sem);
752 trace_f2fs_submit_write_page(page, blk_addr, type);
755 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
757 struct curseg_info *curseg = CURSEG_I(sbi, type);
758 if (curseg->next_blkoff < sbi->blocks_per_seg)
763 static int __get_segment_type_2(struct page *page, enum page_type p_type)
766 return CURSEG_HOT_DATA;
768 return CURSEG_HOT_NODE;
771 static int __get_segment_type_4(struct page *page, enum page_type p_type)
773 if (p_type == DATA) {
774 struct inode *inode = page->mapping->host;
776 if (S_ISDIR(inode->i_mode))
777 return CURSEG_HOT_DATA;
779 return CURSEG_COLD_DATA;
781 if (IS_DNODE(page) && !is_cold_node(page))
782 return CURSEG_HOT_NODE;
784 return CURSEG_COLD_NODE;
788 static int __get_segment_type_6(struct page *page, enum page_type p_type)
790 if (p_type == DATA) {
791 struct inode *inode = page->mapping->host;
793 if (S_ISDIR(inode->i_mode))
794 return CURSEG_HOT_DATA;
795 else if (is_cold_data(page) || is_cold_file(inode))
796 return CURSEG_COLD_DATA;
798 return CURSEG_WARM_DATA;
801 return is_cold_node(page) ? CURSEG_WARM_NODE :
804 return CURSEG_COLD_NODE;
808 static int __get_segment_type(struct page *page, enum page_type p_type)
810 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
811 switch (sbi->active_logs) {
813 return __get_segment_type_2(page, p_type);
815 return __get_segment_type_4(page, p_type);
817 /* NR_CURSEG_TYPE(6) logs by default */
818 BUG_ON(sbi->active_logs != NR_CURSEG_TYPE);
819 return __get_segment_type_6(page, p_type);
822 static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
823 block_t old_blkaddr, block_t *new_blkaddr,
824 struct f2fs_summary *sum, enum page_type p_type)
826 struct sit_info *sit_i = SIT_I(sbi);
827 struct curseg_info *curseg;
828 unsigned int old_cursegno;
831 type = __get_segment_type(page, p_type);
832 curseg = CURSEG_I(sbi, type);
834 mutex_lock(&curseg->curseg_mutex);
836 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
837 old_cursegno = curseg->segno;
840 * __add_sum_entry should be resided under the curseg_mutex
841 * because, this function updates a summary entry in the
842 * current summary block.
844 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
846 mutex_lock(&sit_i->sentry_lock);
847 __refresh_next_blkoff(sbi, curseg);
848 sbi->block_count[curseg->alloc_type]++;
851 * SIT information should be updated before segment allocation,
852 * since SSR needs latest valid block information.
854 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
856 if (!__has_curseg_space(sbi, type))
857 sit_i->s_ops->allocate_segment(sbi, type, false);
859 locate_dirty_segment(sbi, old_cursegno);
860 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
861 mutex_unlock(&sit_i->sentry_lock);
864 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
866 /* writeout dirty page into bdev */
867 submit_write_page(sbi, page, *new_blkaddr, p_type);
869 mutex_unlock(&curseg->curseg_mutex);
872 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
874 set_page_writeback(page);
875 submit_write_page(sbi, page, page->index, META);
878 void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
879 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
881 struct f2fs_summary sum;
882 set_summary(&sum, nid, 0, 0);
883 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE);
886 void write_data_page(struct inode *inode, struct page *page,
887 struct dnode_of_data *dn, block_t old_blkaddr,
888 block_t *new_blkaddr)
890 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
891 struct f2fs_summary sum;
894 BUG_ON(old_blkaddr == NULL_ADDR);
895 get_node_info(sbi, dn->nid, &ni);
896 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
898 do_write_page(sbi, page, old_blkaddr,
899 new_blkaddr, &sum, DATA);
902 void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page,
903 block_t old_blk_addr)
905 submit_write_page(sbi, page, old_blk_addr, DATA);
908 void recover_data_page(struct f2fs_sb_info *sbi,
909 struct page *page, struct f2fs_summary *sum,
910 block_t old_blkaddr, block_t new_blkaddr)
912 struct sit_info *sit_i = SIT_I(sbi);
913 struct curseg_info *curseg;
914 unsigned int segno, old_cursegno;
915 struct seg_entry *se;
918 segno = GET_SEGNO(sbi, new_blkaddr);
919 se = get_seg_entry(sbi, segno);
922 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
923 if (old_blkaddr == NULL_ADDR)
924 type = CURSEG_COLD_DATA;
926 type = CURSEG_WARM_DATA;
928 curseg = CURSEG_I(sbi, type);
930 mutex_lock(&curseg->curseg_mutex);
931 mutex_lock(&sit_i->sentry_lock);
933 old_cursegno = curseg->segno;
935 /* change the current segment */
936 if (segno != curseg->segno) {
937 curseg->next_segno = segno;
938 change_curseg(sbi, type, true);
941 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
942 (sbi->blocks_per_seg - 1);
943 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
945 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
947 locate_dirty_segment(sbi, old_cursegno);
948 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
950 mutex_unlock(&sit_i->sentry_lock);
951 mutex_unlock(&curseg->curseg_mutex);
954 void rewrite_node_page(struct f2fs_sb_info *sbi,
955 struct page *page, struct f2fs_summary *sum,
956 block_t old_blkaddr, block_t new_blkaddr)
958 struct sit_info *sit_i = SIT_I(sbi);
959 int type = CURSEG_WARM_NODE;
960 struct curseg_info *curseg;
961 unsigned int segno, old_cursegno;
962 block_t next_blkaddr = next_blkaddr_of_node(page);
963 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
965 curseg = CURSEG_I(sbi, type);
967 mutex_lock(&curseg->curseg_mutex);
968 mutex_lock(&sit_i->sentry_lock);
970 segno = GET_SEGNO(sbi, new_blkaddr);
971 old_cursegno = curseg->segno;
973 /* change the current segment */
974 if (segno != curseg->segno) {
975 curseg->next_segno = segno;
976 change_curseg(sbi, type, true);
978 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
979 (sbi->blocks_per_seg - 1);
980 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
982 /* change the current log to the next block addr in advance */
983 if (next_segno != segno) {
984 curseg->next_segno = next_segno;
985 change_curseg(sbi, type, true);
987 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) &
988 (sbi->blocks_per_seg - 1);
990 /* rewrite node page */
991 set_page_writeback(page);
992 submit_write_page(sbi, page, new_blkaddr, NODE);
993 f2fs_submit_bio(sbi, NODE, true);
994 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
996 locate_dirty_segment(sbi, old_cursegno);
997 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
999 mutex_unlock(&sit_i->sentry_lock);
1000 mutex_unlock(&curseg->curseg_mutex);
1003 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1005 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1006 struct curseg_info *seg_i;
1007 unsigned char *kaddr;
1012 start = start_sum_block(sbi);
1014 page = get_meta_page(sbi, start++);
1015 kaddr = (unsigned char *)page_address(page);
1017 /* Step 1: restore nat cache */
1018 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1019 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1021 /* Step 2: restore sit cache */
1022 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1023 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1025 offset = 2 * SUM_JOURNAL_SIZE;
1027 /* Step 3: restore summary entries */
1028 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1029 unsigned short blk_off;
1032 seg_i = CURSEG_I(sbi, i);
1033 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1034 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1035 seg_i->next_segno = segno;
1036 reset_curseg(sbi, i, 0);
1037 seg_i->alloc_type = ckpt->alloc_type[i];
1038 seg_i->next_blkoff = blk_off;
1040 if (seg_i->alloc_type == SSR)
1041 blk_off = sbi->blocks_per_seg;
1043 for (j = 0; j < blk_off; j++) {
1044 struct f2fs_summary *s;
1045 s = (struct f2fs_summary *)(kaddr + offset);
1046 seg_i->sum_blk->entries[j] = *s;
1047 offset += SUMMARY_SIZE;
1048 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1052 f2fs_put_page(page, 1);
1055 page = get_meta_page(sbi, start++);
1056 kaddr = (unsigned char *)page_address(page);
1060 f2fs_put_page(page, 1);
1064 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1066 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1067 struct f2fs_summary_block *sum;
1068 struct curseg_info *curseg;
1070 unsigned short blk_off;
1071 unsigned int segno = 0;
1072 block_t blk_addr = 0;
1074 /* get segment number and block addr */
1075 if (IS_DATASEG(type)) {
1076 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1077 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1079 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1080 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1082 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1084 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1086 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1088 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1089 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1090 type - CURSEG_HOT_NODE);
1092 blk_addr = GET_SUM_BLOCK(sbi, segno);
1095 new = get_meta_page(sbi, blk_addr);
1096 sum = (struct f2fs_summary_block *)page_address(new);
1098 if (IS_NODESEG(type)) {
1099 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
1100 struct f2fs_summary *ns = &sum->entries[0];
1102 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1104 ns->ofs_in_node = 0;
1107 if (restore_node_summary(sbi, segno, sum)) {
1108 f2fs_put_page(new, 1);
1114 /* set uncompleted segment to curseg */
1115 curseg = CURSEG_I(sbi, type);
1116 mutex_lock(&curseg->curseg_mutex);
1117 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1118 curseg->next_segno = segno;
1119 reset_curseg(sbi, type, 0);
1120 curseg->alloc_type = ckpt->alloc_type[type];
1121 curseg->next_blkoff = blk_off;
1122 mutex_unlock(&curseg->curseg_mutex);
1123 f2fs_put_page(new, 1);
1127 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1129 int type = CURSEG_HOT_DATA;
1131 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1132 /* restore for compacted data summary */
1133 if (read_compacted_summaries(sbi))
1135 type = CURSEG_HOT_NODE;
1138 for (; type <= CURSEG_COLD_NODE; type++)
1139 if (read_normal_summaries(sbi, type))
1144 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1147 unsigned char *kaddr;
1148 struct f2fs_summary *summary;
1149 struct curseg_info *seg_i;
1150 int written_size = 0;
1153 page = grab_meta_page(sbi, blkaddr++);
1154 kaddr = (unsigned char *)page_address(page);
1156 /* Step 1: write nat cache */
1157 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1158 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1159 written_size += SUM_JOURNAL_SIZE;
1161 /* Step 2: write sit cache */
1162 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1163 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1165 written_size += SUM_JOURNAL_SIZE;
1167 set_page_dirty(page);
1169 /* Step 3: write summary entries */
1170 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1171 unsigned short blkoff;
1172 seg_i = CURSEG_I(sbi, i);
1173 if (sbi->ckpt->alloc_type[i] == SSR)
1174 blkoff = sbi->blocks_per_seg;
1176 blkoff = curseg_blkoff(sbi, i);
1178 for (j = 0; j < blkoff; j++) {
1180 page = grab_meta_page(sbi, blkaddr++);
1181 kaddr = (unsigned char *)page_address(page);
1184 summary = (struct f2fs_summary *)(kaddr + written_size);
1185 *summary = seg_i->sum_blk->entries[j];
1186 written_size += SUMMARY_SIZE;
1187 set_page_dirty(page);
1189 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1193 f2fs_put_page(page, 1);
1198 f2fs_put_page(page, 1);
1201 static void write_normal_summaries(struct f2fs_sb_info *sbi,
1202 block_t blkaddr, int type)
1205 if (IS_DATASEG(type))
1206 end = type + NR_CURSEG_DATA_TYPE;
1208 end = type + NR_CURSEG_NODE_TYPE;
1210 for (i = type; i < end; i++) {
1211 struct curseg_info *sum = CURSEG_I(sbi, i);
1212 mutex_lock(&sum->curseg_mutex);
1213 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1214 mutex_unlock(&sum->curseg_mutex);
1218 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1220 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
1221 write_compacted_summaries(sbi, start_blk);
1223 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1226 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1228 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
1229 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
1233 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1234 unsigned int val, int alloc)
1238 if (type == NAT_JOURNAL) {
1239 for (i = 0; i < nats_in_cursum(sum); i++) {
1240 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1243 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1244 return update_nats_in_cursum(sum, 1);
1245 } else if (type == SIT_JOURNAL) {
1246 for (i = 0; i < sits_in_cursum(sum); i++)
1247 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1249 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1250 return update_sits_in_cursum(sum, 1);
1255 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1258 struct sit_info *sit_i = SIT_I(sbi);
1259 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1260 block_t blk_addr = sit_i->sit_base_addr + offset;
1262 check_seg_range(sbi, segno);
1264 /* calculate sit block address */
1265 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1266 blk_addr += sit_i->sit_blocks;
1268 return get_meta_page(sbi, blk_addr);
1271 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1274 struct sit_info *sit_i = SIT_I(sbi);
1275 struct page *src_page, *dst_page;
1276 pgoff_t src_off, dst_off;
1277 void *src_addr, *dst_addr;
1279 src_off = current_sit_addr(sbi, start);
1280 dst_off = next_sit_addr(sbi, src_off);
1282 /* get current sit block page without lock */
1283 src_page = get_meta_page(sbi, src_off);
1284 dst_page = grab_meta_page(sbi, dst_off);
1285 BUG_ON(PageDirty(src_page));
1287 src_addr = page_address(src_page);
1288 dst_addr = page_address(dst_page);
1289 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1291 set_page_dirty(dst_page);
1292 f2fs_put_page(src_page, 1);
1294 set_to_next_sit(sit_i, start);
1299 static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
1301 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1302 struct f2fs_summary_block *sum = curseg->sum_blk;
1306 * If the journal area in the current summary is full of sit entries,
1307 * all the sit entries will be flushed. Otherwise the sit entries
1308 * are not able to replace with newly hot sit entries.
1310 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
1311 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1313 segno = le32_to_cpu(segno_in_journal(sum, i));
1314 __mark_sit_entry_dirty(sbi, segno);
1316 update_sits_in_cursum(sum, -sits_in_cursum(sum));
1323 * CP calls this function, which flushes SIT entries including sit_journal,
1324 * and moves prefree segs to free segs.
1326 void flush_sit_entries(struct f2fs_sb_info *sbi)
1328 struct sit_info *sit_i = SIT_I(sbi);
1329 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1330 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1331 struct f2fs_summary_block *sum = curseg->sum_blk;
1332 unsigned long nsegs = TOTAL_SEGS(sbi);
1333 struct page *page = NULL;
1334 struct f2fs_sit_block *raw_sit = NULL;
1335 unsigned int start = 0, end = 0;
1336 unsigned int segno = -1;
1339 mutex_lock(&curseg->curseg_mutex);
1340 mutex_lock(&sit_i->sentry_lock);
1343 * "flushed" indicates whether sit entries in journal are flushed
1344 * to the SIT area or not.
1346 flushed = flush_sits_in_journal(sbi);
1348 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) {
1349 struct seg_entry *se = get_seg_entry(sbi, segno);
1350 int sit_offset, offset;
1352 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1357 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
1359 segno_in_journal(sum, offset) = cpu_to_le32(segno);
1360 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
1364 if (!page || (start > segno) || (segno > end)) {
1366 f2fs_put_page(page, 1);
1370 start = START_SEGNO(sit_i, segno);
1371 end = start + SIT_ENTRY_PER_BLOCK - 1;
1373 /* read sit block that will be updated */
1374 page = get_next_sit_page(sbi, start);
1375 raw_sit = page_address(page);
1378 /* udpate entry in SIT block */
1379 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
1381 __clear_bit(segno, bitmap);
1382 sit_i->dirty_sentries--;
1384 mutex_unlock(&sit_i->sentry_lock);
1385 mutex_unlock(&curseg->curseg_mutex);
1387 /* writeout last modified SIT block */
1388 f2fs_put_page(page, 1);
1390 set_prefree_as_free_segments(sbi);
1393 static int build_sit_info(struct f2fs_sb_info *sbi)
1395 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1396 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1397 struct sit_info *sit_i;
1398 unsigned int sit_segs, start;
1399 char *src_bitmap, *dst_bitmap;
1400 unsigned int bitmap_size;
1402 /* allocate memory for SIT information */
1403 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1407 SM_I(sbi)->sit_info = sit_i;
1409 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1410 if (!sit_i->sentries)
1413 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1414 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1415 if (!sit_i->dirty_sentries_bitmap)
1418 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1419 sit_i->sentries[start].cur_valid_map
1420 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1421 sit_i->sentries[start].ckpt_valid_map
1422 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1423 if (!sit_i->sentries[start].cur_valid_map
1424 || !sit_i->sentries[start].ckpt_valid_map)
1428 if (sbi->segs_per_sec > 1) {
1429 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
1430 sizeof(struct sec_entry));
1431 if (!sit_i->sec_entries)
1435 /* get information related with SIT */
1436 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1438 /* setup SIT bitmap from ckeckpoint pack */
1439 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1440 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1442 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
1446 /* init SIT information */
1447 sit_i->s_ops = &default_salloc_ops;
1449 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1450 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1451 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1452 sit_i->sit_bitmap = dst_bitmap;
1453 sit_i->bitmap_size = bitmap_size;
1454 sit_i->dirty_sentries = 0;
1455 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1456 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1457 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1458 mutex_init(&sit_i->sentry_lock);
1462 static int build_free_segmap(struct f2fs_sb_info *sbi)
1464 struct f2fs_sm_info *sm_info = SM_I(sbi);
1465 struct free_segmap_info *free_i;
1466 unsigned int bitmap_size, sec_bitmap_size;
1468 /* allocate memory for free segmap information */
1469 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1473 SM_I(sbi)->free_info = free_i;
1475 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1476 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1477 if (!free_i->free_segmap)
1480 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
1481 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1482 if (!free_i->free_secmap)
1485 /* set all segments as dirty temporarily */
1486 memset(free_i->free_segmap, 0xff, bitmap_size);
1487 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1489 /* init free segmap information */
1490 free_i->start_segno =
1491 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1492 free_i->free_segments = 0;
1493 free_i->free_sections = 0;
1494 rwlock_init(&free_i->segmap_lock);
1498 static int build_curseg(struct f2fs_sb_info *sbi)
1500 struct curseg_info *array;
1503 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
1507 SM_I(sbi)->curseg_array = array;
1509 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1510 mutex_init(&array[i].curseg_mutex);
1511 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1512 if (!array[i].sum_blk)
1514 array[i].segno = NULL_SEGNO;
1515 array[i].next_blkoff = 0;
1517 return restore_curseg_summaries(sbi);
1520 static void build_sit_entries(struct f2fs_sb_info *sbi)
1522 struct sit_info *sit_i = SIT_I(sbi);
1523 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1524 struct f2fs_summary_block *sum = curseg->sum_blk;
1527 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1528 struct seg_entry *se = &sit_i->sentries[start];
1529 struct f2fs_sit_block *sit_blk;
1530 struct f2fs_sit_entry sit;
1534 mutex_lock(&curseg->curseg_mutex);
1535 for (i = 0; i < sits_in_cursum(sum); i++) {
1536 if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
1537 sit = sit_in_journal(sum, i);
1538 mutex_unlock(&curseg->curseg_mutex);
1542 mutex_unlock(&curseg->curseg_mutex);
1543 page = get_current_sit_page(sbi, start);
1544 sit_blk = (struct f2fs_sit_block *)page_address(page);
1545 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1546 f2fs_put_page(page, 1);
1548 check_block_count(sbi, start, &sit);
1549 seg_info_from_raw_sit(se, &sit);
1550 if (sbi->segs_per_sec > 1) {
1551 struct sec_entry *e = get_sec_entry(sbi, start);
1552 e->valid_blocks += se->valid_blocks;
1557 static void init_free_segmap(struct f2fs_sb_info *sbi)
1562 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1563 struct seg_entry *sentry = get_seg_entry(sbi, start);
1564 if (!sentry->valid_blocks)
1565 __set_free(sbi, start);
1568 /* set use the current segments */
1569 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1570 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1571 __set_test_and_inuse(sbi, curseg_t->segno);
1575 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1577 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1578 struct free_segmap_info *free_i = FREE_I(sbi);
1579 unsigned int segno = 0, offset = 0;
1580 unsigned short valid_blocks;
1582 while (segno < TOTAL_SEGS(sbi)) {
1583 /* find dirty segment based on free segmap */
1584 segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset);
1585 if (segno >= TOTAL_SEGS(sbi))
1588 valid_blocks = get_valid_blocks(sbi, segno, 0);
1589 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
1591 mutex_lock(&dirty_i->seglist_lock);
1592 __locate_dirty_segment(sbi, segno, DIRTY);
1593 mutex_unlock(&dirty_i->seglist_lock);
1597 static int init_victim_secmap(struct f2fs_sb_info *sbi)
1599 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1600 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
1602 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1603 if (!dirty_i->victim_secmap)
1608 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1610 struct dirty_seglist_info *dirty_i;
1611 unsigned int bitmap_size, i;
1613 /* allocate memory for dirty segments list information */
1614 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1618 SM_I(sbi)->dirty_info = dirty_i;
1619 mutex_init(&dirty_i->seglist_lock);
1621 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1623 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1624 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
1625 if (!dirty_i->dirty_segmap[i])
1629 init_dirty_segmap(sbi);
1630 return init_victim_secmap(sbi);
1634 * Update min, max modified time for cost-benefit GC algorithm
1636 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1638 struct sit_info *sit_i = SIT_I(sbi);
1641 mutex_lock(&sit_i->sentry_lock);
1643 sit_i->min_mtime = LLONG_MAX;
1645 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1647 unsigned long long mtime = 0;
1649 for (i = 0; i < sbi->segs_per_sec; i++)
1650 mtime += get_seg_entry(sbi, segno + i)->mtime;
1652 mtime = div_u64(mtime, sbi->segs_per_sec);
1654 if (sit_i->min_mtime > mtime)
1655 sit_i->min_mtime = mtime;
1657 sit_i->max_mtime = get_mtime(sbi);
1658 mutex_unlock(&sit_i->sentry_lock);
1661 int build_segment_manager(struct f2fs_sb_info *sbi)
1663 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1664 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1665 struct f2fs_sm_info *sm_info;
1668 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1673 sbi->sm_info = sm_info;
1674 INIT_LIST_HEAD(&sm_info->wblist_head);
1675 spin_lock_init(&sm_info->wblist_lock);
1676 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1677 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1678 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1679 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1680 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1681 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1682 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
1684 err = build_sit_info(sbi);
1687 err = build_free_segmap(sbi);
1690 err = build_curseg(sbi);
1694 /* reinit free segmap based on SIT */
1695 build_sit_entries(sbi);
1697 init_free_segmap(sbi);
1698 err = build_dirty_segmap(sbi);
1702 init_min_max_mtime(sbi);
1706 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1707 enum dirty_type dirty_type)
1709 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1711 mutex_lock(&dirty_i->seglist_lock);
1712 kfree(dirty_i->dirty_segmap[dirty_type]);
1713 dirty_i->nr_dirty[dirty_type] = 0;
1714 mutex_unlock(&dirty_i->seglist_lock);
1717 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
1719 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1720 kfree(dirty_i->victim_secmap);
1723 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1725 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1731 /* discard pre-free/dirty segments list */
1732 for (i = 0; i < NR_DIRTY_TYPE; i++)
1733 discard_dirty_segmap(sbi, i);
1735 destroy_victim_secmap(sbi);
1736 SM_I(sbi)->dirty_info = NULL;
1740 static void destroy_curseg(struct f2fs_sb_info *sbi)
1742 struct curseg_info *array = SM_I(sbi)->curseg_array;
1747 SM_I(sbi)->curseg_array = NULL;
1748 for (i = 0; i < NR_CURSEG_TYPE; i++)
1749 kfree(array[i].sum_blk);
1753 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
1755 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
1758 SM_I(sbi)->free_info = NULL;
1759 kfree(free_i->free_segmap);
1760 kfree(free_i->free_secmap);
1764 static void destroy_sit_info(struct f2fs_sb_info *sbi)
1766 struct sit_info *sit_i = SIT_I(sbi);
1772 if (sit_i->sentries) {
1773 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1774 kfree(sit_i->sentries[start].cur_valid_map);
1775 kfree(sit_i->sentries[start].ckpt_valid_map);
1778 vfree(sit_i->sentries);
1779 vfree(sit_i->sec_entries);
1780 kfree(sit_i->dirty_sentries_bitmap);
1782 SM_I(sbi)->sit_info = NULL;
1783 kfree(sit_i->sit_bitmap);
1787 void destroy_segment_manager(struct f2fs_sb_info *sbi)
1789 struct f2fs_sm_info *sm_info = SM_I(sbi);
1790 destroy_dirty_segmap(sbi);
1791 destroy_curseg(sbi);
1792 destroy_free_segmap(sbi);
1793 destroy_sit_info(sbi);
1794 sbi->sm_info = NULL;