4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
18 * Roll forward recovery scenarios.
20 * [Term] F: fsync_mark, D: dentry_mark
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
25 * 2. inode(x) | CP | inode(F) | dnode(F)
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
31 * 4. inode(x) | CP | dnode(F) | inode(F)
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
37 * 6. CP | inode(DF) | dnode(F)
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
48 static struct kmem_cache *fsync_entry_slab;
50 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
52 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
54 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
59 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
62 struct fsync_inode_entry *entry;
64 list_for_each_entry(entry, head, list)
65 if (entry->inode->i_ino == ino)
71 static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
72 struct list_head *head, nid_t ino, bool quota_inode)
75 struct fsync_inode_entry *entry;
78 inode = f2fs_iget_retry(sbi->sb, ino);
80 return ERR_CAST(inode);
82 err = dquot_initialize(inode);
87 err = dquot_alloc_inode(inode);
92 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
94 list_add_tail(&entry->list, head);
102 static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
105 /* inode should not be recovered, drop it */
106 f2fs_inode_synced(entry->inode);
109 list_del(&entry->list);
110 kmem_cache_free(fsync_entry_slab, entry);
113 static int recover_dentry(struct inode *inode, struct page *ipage,
114 struct list_head *dir_list)
116 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
117 nid_t pino = le32_to_cpu(raw_inode->i_pino);
118 struct f2fs_dir_entry *de;
119 struct fscrypt_name fname;
121 struct inode *dir, *einode;
122 struct fsync_inode_entry *entry;
126 entry = get_fsync_inode(dir_list, pino);
128 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
131 dir = ERR_CAST(entry);
132 err = PTR_ERR(entry);
139 memset(&fname, 0, sizeof(struct fscrypt_name));
140 fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
141 fname.disk_name.name = raw_inode->i_name;
143 if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
149 de = __f2fs_find_entry(dir, &fname, &page);
150 if (de && inode->i_ino == le32_to_cpu(de->ino))
154 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
155 if (IS_ERR(einode)) {
157 err = PTR_ERR(einode);
163 err = dquot_initialize(einode);
169 err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
174 f2fs_delete_entry(de, page, dir, einode);
177 } else if (IS_ERR(page)) {
180 err = f2fs_add_dentry(dir, &fname, inode,
181 inode->i_ino, inode->i_mode);
188 f2fs_put_page(page, 0);
190 if (file_enc_name(inode))
191 name = "<encrypted>";
193 name = raw_inode->i_name;
194 f2fs_msg(inode->i_sb, KERN_NOTICE,
195 "%s: ino = %x, name = %s, dir = %lx, err = %d",
196 __func__, ino_of_node(ipage), name,
197 IS_ERR(dir) ? 0 : dir->i_ino, err);
201 static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
203 if (ri->i_inline & F2FS_PIN_FILE)
204 set_inode_flag(inode, FI_PIN_FILE);
206 clear_inode_flag(inode, FI_PIN_FILE);
207 if (ri->i_inline & F2FS_DATA_EXIST)
208 set_inode_flag(inode, FI_DATA_EXIST);
210 clear_inode_flag(inode, FI_DATA_EXIST);
213 static void recover_inode(struct inode *inode, struct page *page)
215 struct f2fs_inode *raw = F2FS_INODE(page);
218 inode->i_mode = le16_to_cpu(raw->i_mode);
219 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
220 inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
221 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
222 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
223 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
224 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
225 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
227 F2FS_I(inode)->i_advise = raw->i_advise;
228 F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
230 recover_inline_flags(inode, raw);
232 if (file_enc_name(inode))
233 name = "<encrypted>";
235 name = F2FS_INODE(page)->i_name;
237 f2fs_msg(inode->i_sb, KERN_NOTICE,
238 "recover_inode: ino = %x, name = %s, inline = %x",
239 ino_of_node(page), name, raw->i_inline);
242 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
245 struct curseg_info *curseg;
246 struct page *page = NULL;
248 unsigned int loop_cnt = 0;
249 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
250 valid_user_blocks(sbi);
253 /* get node pages in the current segment */
254 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
255 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
258 struct fsync_inode_entry *entry;
260 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
263 page = f2fs_get_tmp_page(sbi, blkaddr);
269 if (!is_recoverable_dnode(page))
272 if (!is_fsync_dnode(page))
275 entry = get_fsync_inode(head, ino_of_node(page));
277 bool quota_inode = false;
280 IS_INODE(page) && is_dent_dnode(page)) {
281 err = f2fs_recover_inode_page(sbi, page);
288 * CP | dnode(F) | inode(DF)
289 * For this case, we should not give up now.
291 entry = add_fsync_inode(sbi, head, ino_of_node(page),
294 err = PTR_ERR(entry);
295 if (err == -ENOENT) {
302 entry->blkaddr = blkaddr;
304 if (IS_INODE(page) && is_dent_dnode(page))
305 entry->last_dentry = blkaddr;
307 /* sanity check in order to detect looped node chain */
308 if (++loop_cnt >= free_blocks ||
309 blkaddr == next_blkaddr_of_node(page)) {
310 f2fs_msg(sbi->sb, KERN_NOTICE,
311 "%s: detect looped node chain, "
312 "blkaddr:%u, next:%u",
313 __func__, blkaddr, next_blkaddr_of_node(page));
318 /* check next segment */
319 blkaddr = next_blkaddr_of_node(page);
320 f2fs_put_page(page, 1);
322 f2fs_ra_meta_pages_cond(sbi, blkaddr);
324 f2fs_put_page(page, 1);
328 static void destroy_fsync_dnodes(struct list_head *head, int drop)
330 struct fsync_inode_entry *entry, *tmp;
332 list_for_each_entry_safe(entry, tmp, head, list)
333 del_fsync_inode(entry, drop);
336 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
337 block_t blkaddr, struct dnode_of_data *dn)
339 struct seg_entry *sentry;
340 unsigned int segno = GET_SEGNO(sbi, blkaddr);
341 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
342 struct f2fs_summary_block *sum_node;
343 struct f2fs_summary sum;
344 struct page *sum_page, *node_page;
345 struct dnode_of_data tdn = *dn;
352 sentry = get_seg_entry(sbi, segno);
353 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
356 /* Get the previous summary */
357 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
358 struct curseg_info *curseg = CURSEG_I(sbi, i);
359 if (curseg->segno == segno) {
360 sum = curseg->sum_blk->entries[blkoff];
365 sum_page = f2fs_get_sum_page(sbi, segno);
366 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
367 sum = sum_node->entries[blkoff];
368 f2fs_put_page(sum_page, 1);
370 /* Use the locked dnode page and inode */
371 nid = le32_to_cpu(sum.nid);
372 if (dn->inode->i_ino == nid) {
374 if (!dn->inode_page_locked)
375 lock_page(dn->inode_page);
376 tdn.node_page = dn->inode_page;
377 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
379 } else if (dn->nid == nid) {
380 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
384 /* Get the node page */
385 node_page = f2fs_get_node_page(sbi, nid);
386 if (IS_ERR(node_page))
387 return PTR_ERR(node_page);
389 offset = ofs_of_node(node_page);
390 ino = ino_of_node(node_page);
391 f2fs_put_page(node_page, 1);
393 if (ino != dn->inode->i_ino) {
396 /* Deallocate previous index in the node page */
397 inode = f2fs_iget_retry(sbi->sb, ino);
399 return PTR_ERR(inode);
401 ret = dquot_initialize(inode);
410 bidx = f2fs_start_bidx_of_node(offset, inode) +
411 le16_to_cpu(sum.ofs_in_node);
414 * if inode page is locked, unlock temporarily, but its reference
417 if (ino == dn->inode->i_ino && dn->inode_page_locked)
418 unlock_page(dn->inode_page);
420 set_new_dnode(&tdn, inode, NULL, NULL, 0);
421 if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
424 if (tdn.data_blkaddr == blkaddr)
425 f2fs_truncate_data_blocks_range(&tdn, 1);
427 f2fs_put_dnode(&tdn);
429 if (ino != dn->inode->i_ino)
431 else if (dn->inode_page_locked)
432 lock_page(dn->inode_page);
436 if (datablock_addr(tdn.inode, tdn.node_page,
437 tdn.ofs_in_node) == blkaddr)
438 f2fs_truncate_data_blocks_range(&tdn, 1);
439 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
440 unlock_page(dn->inode_page);
444 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
447 struct dnode_of_data dn;
449 unsigned int start, end;
450 int err = 0, recovered = 0;
452 /* step 1: recover xattr */
453 if (IS_INODE(page)) {
454 f2fs_recover_inline_xattr(inode, page);
455 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
456 err = f2fs_recover_xattr_data(inode, page);
462 /* step 2: recover inline data */
463 if (f2fs_recover_inline_data(inode, page))
466 /* step 3: recover data indices */
467 start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
468 end = start + ADDRS_PER_PAGE(page, inode);
470 set_new_dnode(&dn, inode, NULL, NULL, 0);
472 err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
474 if (err == -ENOMEM) {
475 congestion_wait(BLK_RW_ASYNC, HZ/50);
481 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
483 err = f2fs_get_node_info(sbi, dn.nid, &ni);
487 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
489 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
490 f2fs_msg(sbi->sb, KERN_WARNING,
491 "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
492 inode->i_ino, ofs_of_node(dn.node_page),
498 for (; start < end; start++, dn.ofs_in_node++) {
501 src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
502 dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
504 /* skip recovering if dest is the same as src */
508 /* dest is invalid, just invalidate src block */
509 if (dest == NULL_ADDR) {
510 f2fs_truncate_data_blocks_range(&dn, 1);
514 if (!file_keep_isize(inode) &&
515 (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
516 f2fs_i_size_write(inode,
517 (loff_t)(start + 1) << PAGE_SHIFT);
520 * dest is reserved block, invalidate src block
521 * and then reserve one new block in dnode page.
523 if (dest == NEW_ADDR) {
524 f2fs_truncate_data_blocks_range(&dn, 1);
525 f2fs_reserve_new_block(&dn);
529 /* dest is valid block, try to recover from src to dest */
530 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
532 if (src == NULL_ADDR) {
533 err = f2fs_reserve_new_block(&dn);
535 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
536 err = f2fs_reserve_new_block(&dn);
537 /* We should not get -ENOSPC */
538 f2fs_bug_on(sbi, err);
543 /* Check the previous node page having this index */
544 err = check_index_in_prev_nodes(sbi, dest, &dn);
546 if (err == -ENOMEM) {
547 congestion_wait(BLK_RW_ASYNC, HZ/50);
553 /* write dummy data page */
554 f2fs_replace_block(sbi, &dn, src, dest,
555 ni.version, false, false);
560 copy_node_footer(dn.node_page, page);
561 fill_node_footer(dn.node_page, dn.nid, ni.ino,
562 ofs_of_node(page), false);
563 set_page_dirty(dn.node_page);
567 f2fs_msg(sbi->sb, KERN_NOTICE,
568 "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
570 file_keep_isize(inode) ? "keep" : "recover",
575 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
576 struct list_head *tmp_inode_list, struct list_head *dir_list)
578 struct curseg_info *curseg;
579 struct page *page = NULL;
583 /* get node pages in the current segment */
584 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
585 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
588 struct fsync_inode_entry *entry;
590 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
593 f2fs_ra_meta_pages_cond(sbi, blkaddr);
595 page = f2fs_get_tmp_page(sbi, blkaddr);
601 if (!is_recoverable_dnode(page)) {
602 f2fs_put_page(page, 1);
606 entry = get_fsync_inode(inode_list, ino_of_node(page));
610 * inode(x) | CP | inode(x) | dnode(F)
611 * In this case, we can lose the latest inode(x).
612 * So, call recover_inode for the inode update.
615 recover_inode(entry->inode, page);
616 if (entry->last_dentry == blkaddr) {
617 err = recover_dentry(entry->inode, page, dir_list);
619 f2fs_put_page(page, 1);
623 err = do_recover_data(sbi, entry->inode, page);
625 f2fs_put_page(page, 1);
629 if (entry->blkaddr == blkaddr)
630 list_move_tail(&entry->list, tmp_inode_list);
632 /* check next segment */
633 blkaddr = next_blkaddr_of_node(page);
634 f2fs_put_page(page, 1);
637 f2fs_allocate_new_segments(sbi);
641 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
643 struct list_head inode_list, tmp_inode_list;
644 struct list_head dir_list;
647 unsigned long s_flags = sbi->sb->s_flags;
648 bool need_writecp = false;
653 if (s_flags & SB_RDONLY) {
654 f2fs_msg(sbi->sb, KERN_INFO,
655 "recover fsync data on readonly fs");
656 sbi->sb->s_flags &= ~SB_RDONLY;
660 /* Needed for iput() to work correctly and not trash data */
661 sbi->sb->s_flags |= SB_ACTIVE;
662 /* Turn on quotas so that they are updated correctly */
663 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
666 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
667 sizeof(struct fsync_inode_entry));
668 if (!fsync_entry_slab) {
673 INIT_LIST_HEAD(&inode_list);
674 INIT_LIST_HEAD(&tmp_inode_list);
675 INIT_LIST_HEAD(&dir_list);
677 /* prevent checkpoint */
678 mutex_lock(&sbi->cp_mutex);
680 /* step #1: find fsynced inode numbers */
681 err = find_fsync_dnodes(sbi, &inode_list, check_only);
682 if (err || list_empty(&inode_list))
692 /* step #2: recover data */
693 err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
695 f2fs_bug_on(sbi, !list_empty(&inode_list));
697 /* restore s_flags to let iput() trash data */
698 sbi->sb->s_flags = s_flags;
701 destroy_fsync_dnodes(&inode_list, err);
702 destroy_fsync_dnodes(&tmp_inode_list, err);
704 /* truncate meta pages to be used by the recovery */
705 truncate_inode_pages_range(META_MAPPING(sbi),
706 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
709 truncate_inode_pages_final(NODE_MAPPING(sbi));
710 truncate_inode_pages_final(META_MAPPING(sbi));
712 clear_sbi_flag(sbi, SBI_POR_DOING);
714 mutex_unlock(&sbi->cp_mutex);
716 /* let's drop all the directory inodes for clean checkpoint */
717 destroy_fsync_dnodes(&dir_list, err);
720 set_sbi_flag(sbi, SBI_IS_RECOVERED);
723 struct cp_control cpc = {
724 .reason = CP_RECOVERY,
726 err = f2fs_write_checkpoint(sbi, &cpc);
730 kmem_cache_destroy(fsync_entry_slab);
733 /* Turn quotas off */
735 f2fs_quota_off_umount(sbi->sb);
737 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
739 return ret ? ret: err;