1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <asm/unaligned.h>
10 #include <linux/f2fs_fs.h>
16 * Roll forward recovery scenarios.
18 * [Term] F: fsync_mark, D: dentry_mark
20 * 1. inode(x) | CP | inode(x) | dnode(F)
21 * -> Update the latest inode(x).
23 * 2. inode(x) | CP | inode(F) | dnode(F)
26 * 3. inode(x) | CP | dnode(F) | inode(x)
27 * -> Recover to the latest dnode(F), and drop the last inode(x)
29 * 4. inode(x) | CP | dnode(F) | inode(F)
32 * 5. CP | inode(x) | dnode(F)
33 * -> The inode(DF) was missing. Should drop this dnode(F).
35 * 6. CP | inode(DF) | dnode(F)
38 * 7. CP | dnode(F) | inode(DF)
39 * -> If f2fs_iget fails, then goto next to find inode(DF).
41 * 8. CP | dnode(F) | inode(x)
42 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * But it will fail due to no inode(DF).
46 static struct kmem_cache *fsync_entry_slab;
49 extern struct kmem_cache *f2fs_cf_name_slab;
52 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
54 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
56 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
61 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
64 struct fsync_inode_entry *entry;
66 list_for_each_entry(entry, head, list)
67 if (entry->inode->i_ino == ino)
73 static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
74 struct list_head *head, nid_t ino, bool quota_inode)
77 struct fsync_inode_entry *entry;
80 inode = f2fs_iget_retry(sbi->sb, ino);
82 return ERR_CAST(inode);
84 err = f2fs_dquot_initialize(inode);
89 err = dquot_alloc_inode(inode);
94 entry = f2fs_kmem_cache_alloc(fsync_entry_slab,
95 GFP_F2FS_ZERO, true, NULL);
97 list_add_tail(&entry->list, head);
105 static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
108 /* inode should not be recovered, drop it */
109 f2fs_inode_synced(entry->inode);
112 list_del(&entry->list);
113 kmem_cache_free(fsync_entry_slab, entry);
116 static int init_recovered_filename(const struct inode *dir,
117 struct f2fs_inode *raw_inode,
118 struct f2fs_filename *fname,
119 struct qstr *usr_fname)
123 memset(fname, 0, sizeof(*fname));
124 fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
125 fname->disk_name.name = raw_inode->i_name;
127 if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
128 return -ENAMETOOLONG;
130 if (!IS_ENCRYPTED(dir)) {
131 usr_fname->name = fname->disk_name.name;
132 usr_fname->len = fname->disk_name.len;
133 fname->usr_fname = usr_fname;
136 /* Compute the hash of the filename */
137 if (IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)) {
139 * In this case the hash isn't computable without the key, so it
142 if (fname->disk_name.len + sizeof(f2fs_hash_t) > F2FS_NAME_LEN)
144 fname->hash = get_unaligned((f2fs_hash_t *)
145 &raw_inode->i_name[fname->disk_name.len]);
146 } else if (IS_CASEFOLDED(dir)) {
147 err = f2fs_init_casefolded_name(dir, fname);
150 f2fs_hash_filename(dir, fname);
151 #ifdef CONFIG_UNICODE
152 /* Case-sensitive match is fine for recovery */
153 kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
154 fname->cf_name.name = NULL;
157 f2fs_hash_filename(dir, fname);
162 static int recover_dentry(struct inode *inode, struct page *ipage,
163 struct list_head *dir_list)
165 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
166 nid_t pino = le32_to_cpu(raw_inode->i_pino);
167 struct f2fs_dir_entry *de;
168 struct f2fs_filename fname;
169 struct qstr usr_fname;
171 struct inode *dir, *einode;
172 struct fsync_inode_entry *entry;
176 entry = get_fsync_inode(dir_list, pino);
178 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
181 dir = ERR_CAST(entry);
182 err = PTR_ERR(entry);
188 err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
192 de = __f2fs_find_entry(dir, &fname, &page);
193 if (de && inode->i_ino == le32_to_cpu(de->ino))
197 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
198 if (IS_ERR(einode)) {
200 err = PTR_ERR(einode);
206 err = f2fs_dquot_initialize(einode);
212 err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
217 f2fs_delete_entry(de, page, dir, einode);
220 } else if (IS_ERR(page)) {
223 err = f2fs_add_dentry(dir, &fname, inode,
224 inode->i_ino, inode->i_mode);
231 f2fs_put_page(page, 0);
233 if (file_enc_name(inode))
234 name = "<encrypted>";
236 name = raw_inode->i_name;
237 f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
238 __func__, ino_of_node(ipage), name,
239 IS_ERR(dir) ? 0 : dir->i_ino, err);
243 static int recover_quota_data(struct inode *inode, struct page *page)
245 struct f2fs_inode *raw = F2FS_INODE(page);
247 uid_t i_uid = le32_to_cpu(raw->i_uid);
248 gid_t i_gid = le32_to_cpu(raw->i_gid);
251 memset(&attr, 0, sizeof(attr));
253 attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
254 attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
256 if (!uid_eq(attr.ia_uid, inode->i_uid))
257 attr.ia_valid |= ATTR_UID;
258 if (!gid_eq(attr.ia_gid, inode->i_gid))
259 attr.ia_valid |= ATTR_GID;
264 err = dquot_transfer(inode, &attr);
266 set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
270 static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
272 if (ri->i_inline & F2FS_PIN_FILE)
273 set_inode_flag(inode, FI_PIN_FILE);
275 clear_inode_flag(inode, FI_PIN_FILE);
276 if (ri->i_inline & F2FS_DATA_EXIST)
277 set_inode_flag(inode, FI_DATA_EXIST);
279 clear_inode_flag(inode, FI_DATA_EXIST);
282 static int recover_inode(struct inode *inode, struct page *page)
284 struct f2fs_inode *raw = F2FS_INODE(page);
288 inode->i_mode = le16_to_cpu(raw->i_mode);
290 err = recover_quota_data(inode, page);
294 i_uid_write(inode, le32_to_cpu(raw->i_uid));
295 i_gid_write(inode, le32_to_cpu(raw->i_gid));
297 if (raw->i_inline & F2FS_EXTRA_ATTR) {
298 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
299 F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
304 i_projid = (projid_t)le32_to_cpu(raw->i_projid);
305 kprojid = make_kprojid(&init_user_ns, i_projid);
307 if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
308 err = f2fs_transfer_project_quota(inode,
312 F2FS_I(inode)->i_projid = kprojid;
317 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
318 inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
319 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
320 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
321 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
322 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
323 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
325 F2FS_I(inode)->i_advise = raw->i_advise;
326 F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
327 f2fs_set_inode_flags(inode);
328 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
329 le16_to_cpu(raw->i_gc_failures);
331 recover_inline_flags(inode, raw);
333 f2fs_mark_inode_dirty_sync(inode, true);
335 if (file_enc_name(inode))
336 name = "<encrypted>";
338 name = F2FS_INODE(page)->i_name;
340 f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
341 ino_of_node(page), name, raw->i_inline);
345 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
348 struct curseg_info *curseg;
349 struct page *page = NULL;
351 unsigned int loop_cnt = 0;
352 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
353 valid_user_blocks(sbi);
356 /* get node pages in the current segment */
357 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
358 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
361 struct fsync_inode_entry *entry;
363 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
366 page = f2fs_get_tmp_page(sbi, blkaddr);
372 if (!is_recoverable_dnode(page)) {
373 f2fs_put_page(page, 1);
377 if (!is_fsync_dnode(page))
380 entry = get_fsync_inode(head, ino_of_node(page));
382 bool quota_inode = false;
385 IS_INODE(page) && is_dent_dnode(page)) {
386 err = f2fs_recover_inode_page(sbi, page);
388 f2fs_put_page(page, 1);
395 * CP | dnode(F) | inode(DF)
396 * For this case, we should not give up now.
398 entry = add_fsync_inode(sbi, head, ino_of_node(page),
401 err = PTR_ERR(entry);
402 if (err == -ENOENT) {
406 f2fs_put_page(page, 1);
410 entry->blkaddr = blkaddr;
412 if (IS_INODE(page) && is_dent_dnode(page))
413 entry->last_dentry = blkaddr;
415 /* sanity check in order to detect looped node chain */
416 if (++loop_cnt >= free_blocks ||
417 blkaddr == next_blkaddr_of_node(page)) {
418 f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
420 next_blkaddr_of_node(page));
421 f2fs_put_page(page, 1);
426 /* check next segment */
427 blkaddr = next_blkaddr_of_node(page);
428 f2fs_put_page(page, 1);
430 f2fs_ra_meta_pages_cond(sbi, blkaddr);
435 static void destroy_fsync_dnodes(struct list_head *head, int drop)
437 struct fsync_inode_entry *entry, *tmp;
439 list_for_each_entry_safe(entry, tmp, head, list)
440 del_fsync_inode(entry, drop);
443 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
444 block_t blkaddr, struct dnode_of_data *dn)
446 struct seg_entry *sentry;
447 unsigned int segno = GET_SEGNO(sbi, blkaddr);
448 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
449 struct f2fs_summary_block *sum_node;
450 struct f2fs_summary sum;
451 struct page *sum_page, *node_page;
452 struct dnode_of_data tdn = *dn;
459 sentry = get_seg_entry(sbi, segno);
460 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
463 /* Get the previous summary */
464 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
465 struct curseg_info *curseg = CURSEG_I(sbi, i);
467 if (curseg->segno == segno) {
468 sum = curseg->sum_blk->entries[blkoff];
473 sum_page = f2fs_get_sum_page(sbi, segno);
474 if (IS_ERR(sum_page))
475 return PTR_ERR(sum_page);
476 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
477 sum = sum_node->entries[blkoff];
478 f2fs_put_page(sum_page, 1);
480 /* Use the locked dnode page and inode */
481 nid = le32_to_cpu(sum.nid);
482 if (dn->inode->i_ino == nid) {
484 if (!dn->inode_page_locked)
485 lock_page(dn->inode_page);
486 tdn.node_page = dn->inode_page;
487 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
489 } else if (dn->nid == nid) {
490 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
494 /* Get the node page */
495 node_page = f2fs_get_node_page(sbi, nid);
496 if (IS_ERR(node_page))
497 return PTR_ERR(node_page);
499 offset = ofs_of_node(node_page);
500 ino = ino_of_node(node_page);
501 f2fs_put_page(node_page, 1);
503 if (ino != dn->inode->i_ino) {
506 /* Deallocate previous index in the node page */
507 inode = f2fs_iget_retry(sbi->sb, ino);
509 return PTR_ERR(inode);
511 ret = f2fs_dquot_initialize(inode);
520 bidx = f2fs_start_bidx_of_node(offset, inode) +
521 le16_to_cpu(sum.ofs_in_node);
524 * if inode page is locked, unlock temporarily, but its reference
527 if (ino == dn->inode->i_ino && dn->inode_page_locked)
528 unlock_page(dn->inode_page);
530 set_new_dnode(&tdn, inode, NULL, NULL, 0);
531 if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
534 if (tdn.data_blkaddr == blkaddr)
535 f2fs_truncate_data_blocks_range(&tdn, 1);
537 f2fs_put_dnode(&tdn);
539 if (ino != dn->inode->i_ino)
541 else if (dn->inode_page_locked)
542 lock_page(dn->inode_page);
546 if (f2fs_data_blkaddr(&tdn) == blkaddr)
547 f2fs_truncate_data_blocks_range(&tdn, 1);
548 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
549 unlock_page(dn->inode_page);
553 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
556 struct dnode_of_data dn;
558 unsigned int start, end;
559 int err = 0, recovered = 0;
561 /* step 1: recover xattr */
562 if (IS_INODE(page)) {
563 err = f2fs_recover_inline_xattr(inode, page);
566 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
567 err = f2fs_recover_xattr_data(inode, page);
573 /* step 2: recover inline data */
574 err = f2fs_recover_inline_data(inode, page);
581 /* step 3: recover data indices */
582 start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
583 end = start + ADDRS_PER_PAGE(page, inode);
585 set_new_dnode(&dn, inode, NULL, NULL, 0);
587 err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
589 if (err == -ENOMEM) {
590 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
596 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
598 err = f2fs_get_node_info(sbi, dn.nid, &ni);
602 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
604 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
605 f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
606 inode->i_ino, ofs_of_node(dn.node_page),
612 for (; start < end; start++, dn.ofs_in_node++) {
615 src = f2fs_data_blkaddr(&dn);
616 dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
618 if (__is_valid_data_blkaddr(src) &&
619 !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
624 if (__is_valid_data_blkaddr(dest) &&
625 !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
630 /* skip recovering if dest is the same as src */
634 /* dest is invalid, just invalidate src block */
635 if (dest == NULL_ADDR) {
636 f2fs_truncate_data_blocks_range(&dn, 1);
640 if (!file_keep_isize(inode) &&
641 (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
642 f2fs_i_size_write(inode,
643 (loff_t)(start + 1) << PAGE_SHIFT);
646 * dest is reserved block, invalidate src block
647 * and then reserve one new block in dnode page.
649 if (dest == NEW_ADDR) {
650 f2fs_truncate_data_blocks_range(&dn, 1);
651 f2fs_reserve_new_block(&dn);
655 /* dest is valid block, try to recover from src to dest */
656 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
658 if (src == NULL_ADDR) {
659 err = f2fs_reserve_new_block(&dn);
661 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
662 err = f2fs_reserve_new_block(&dn);
663 /* We should not get -ENOSPC */
664 f2fs_bug_on(sbi, err);
669 /* Check the previous node page having this index */
670 err = check_index_in_prev_nodes(sbi, dest, &dn);
672 if (err == -ENOMEM) {
673 congestion_wait(BLK_RW_ASYNC,
680 /* write dummy data page */
681 f2fs_replace_block(sbi, &dn, src, dest,
682 ni.version, false, false);
687 copy_node_footer(dn.node_page, page);
688 fill_node_footer(dn.node_page, dn.nid, ni.ino,
689 ofs_of_node(page), false);
690 set_page_dirty(dn.node_page);
694 f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
695 inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
700 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
701 struct list_head *tmp_inode_list, struct list_head *dir_list)
703 struct curseg_info *curseg;
704 struct page *page = NULL;
708 /* get node pages in the current segment */
709 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
710 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
713 struct fsync_inode_entry *entry;
715 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
718 f2fs_ra_meta_pages_cond(sbi, blkaddr);
720 page = f2fs_get_tmp_page(sbi, blkaddr);
726 if (!is_recoverable_dnode(page)) {
727 f2fs_put_page(page, 1);
731 entry = get_fsync_inode(inode_list, ino_of_node(page));
735 * inode(x) | CP | inode(x) | dnode(F)
736 * In this case, we can lose the latest inode(x).
737 * So, call recover_inode for the inode update.
739 if (IS_INODE(page)) {
740 err = recover_inode(entry->inode, page);
742 f2fs_put_page(page, 1);
746 if (entry->last_dentry == blkaddr) {
747 err = recover_dentry(entry->inode, page, dir_list);
749 f2fs_put_page(page, 1);
753 err = do_recover_data(sbi, entry->inode, page);
755 f2fs_put_page(page, 1);
759 if (entry->blkaddr == blkaddr)
760 list_move_tail(&entry->list, tmp_inode_list);
762 /* check next segment */
763 blkaddr = next_blkaddr_of_node(page);
764 f2fs_put_page(page, 1);
767 f2fs_allocate_new_segments(sbi);
771 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
773 struct list_head inode_list, tmp_inode_list;
774 struct list_head dir_list;
777 unsigned long s_flags = sbi->sb->s_flags;
778 bool need_writecp = false;
779 bool fix_curseg_write_pointer = false;
784 if (s_flags & SB_RDONLY) {
785 f2fs_info(sbi, "recover fsync data on readonly fs");
786 sbi->sb->s_flags &= ~SB_RDONLY;
790 /* Turn on quotas so that they are updated correctly */
791 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
794 INIT_LIST_HEAD(&inode_list);
795 INIT_LIST_HEAD(&tmp_inode_list);
796 INIT_LIST_HEAD(&dir_list);
798 /* prevent checkpoint */
799 down_write(&sbi->cp_global_sem);
801 /* step #1: find fsynced inode numbers */
802 err = find_fsync_dnodes(sbi, &inode_list, check_only);
803 if (err || list_empty(&inode_list))
813 /* step #2: recover data */
814 err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
816 f2fs_bug_on(sbi, !list_empty(&inode_list));
818 f2fs_bug_on(sbi, sbi->sb->s_flags & SB_ACTIVE);
820 fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
822 destroy_fsync_dnodes(&inode_list, err);
823 destroy_fsync_dnodes(&tmp_inode_list, err);
825 /* truncate meta pages to be used by the recovery */
826 truncate_inode_pages_range(META_MAPPING(sbi),
827 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
830 truncate_inode_pages_final(NODE_MAPPING(sbi));
831 truncate_inode_pages_final(META_MAPPING(sbi));
835 * If fsync data succeeds or there is no fsync data to recover,
836 * and the f2fs is not read only, check and fix zoned block devices'
837 * write pointer consistency.
839 if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
840 f2fs_sb_has_blkzoned(sbi)) {
841 err = f2fs_fix_curseg_write_pointer(sbi);
846 clear_sbi_flag(sbi, SBI_POR_DOING);
848 up_write(&sbi->cp_global_sem);
850 /* let's drop all the directory inodes for clean checkpoint */
851 destroy_fsync_dnodes(&dir_list, err);
854 set_sbi_flag(sbi, SBI_IS_RECOVERED);
857 struct cp_control cpc = {
858 .reason = CP_RECOVERY,
860 err = f2fs_write_checkpoint(sbi, &cpc);
865 /* Turn quotas off */
867 f2fs_quota_off_umount(sbi->sb);
869 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
871 return ret ? ret : err;
874 int __init f2fs_create_recovery_cache(void)
876 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
877 sizeof(struct fsync_inode_entry));
878 if (!fsync_entry_slab)
883 void f2fs_destroy_recovery_cache(void)
885 kmem_cache_destroy(fsync_entry_slab);