4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
17 static struct kmem_cache *fsync_entry_slab;
19 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
21 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
22 > sbi->user_block_count)
27 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
30 struct list_head *this;
31 struct fsync_inode_entry *entry;
33 list_for_each(this, head) {
34 entry = list_entry(this, struct fsync_inode_entry, list);
35 if (entry->inode->i_ino == ino)
41 static int recover_dentry(struct page *ipage, struct inode *inode)
43 void *kaddr = page_address(ipage);
44 struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
45 struct f2fs_inode *raw_inode = &(raw_node->i);
46 nid_t pino = le32_to_cpu(raw_inode->i_pino);
52 dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino);
54 dir = f2fs_iget(inode->i_sb, pino);
59 set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
62 name.len = le32_to_cpu(raw_inode->i_namelen);
63 name.name = raw_inode->i_name;
65 if (f2fs_find_entry(dir, &name, &page)) {
67 f2fs_put_page(page, 0);
69 err = __f2fs_add_link(dir, &name, inode);
72 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: "
73 "ino = %x, name = %s, dir = %lx, err = %d",
74 ino_of_node(ipage), raw_inode->i_name, dir->i_ino, err);
78 static int recover_inode(struct inode *inode, struct page *node_page)
80 void *kaddr = page_address(node_page);
81 struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
82 struct f2fs_inode *raw_inode = &(raw_node->i);
84 if (!IS_INODE(node_page))
87 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
88 i_size_write(inode, le64_to_cpu(raw_inode->i_size));
89 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
90 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
91 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
92 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
93 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
94 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
96 if (is_dent_dnode(node_page))
97 return recover_dentry(node_page, inode);
99 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
100 ino_of_node(node_page), raw_inode->i_name);
104 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
106 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
107 struct curseg_info *curseg;
112 /* get node pages in the current segment */
113 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
114 blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
117 page = alloc_page(GFP_F2FS_ZERO);
119 return PTR_ERR(page);
123 struct fsync_inode_entry *entry;
125 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
131 if (cp_ver != cpver_of_node(page))
134 if (!is_fsync_dnode(page))
137 entry = get_fsync_inode(head, ino_of_node(page));
139 if (IS_INODE(page) && is_dent_dnode(page))
140 set_inode_flag(F2FS_I(entry->inode),
143 if (IS_INODE(page) && is_dent_dnode(page)) {
144 err = recover_inode_page(sbi, page);
149 /* add this fsync inode to the list */
150 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
156 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
157 if (IS_ERR(entry->inode)) {
158 err = PTR_ERR(entry->inode);
159 kmem_cache_free(fsync_entry_slab, entry);
162 list_add_tail(&entry->list, head);
164 entry->blkaddr = blkaddr;
166 err = recover_inode(entry->inode, page);
167 if (err && err != -ENOENT)
170 /* check next segment */
171 blkaddr = next_blkaddr_of_node(page);
175 __free_pages(page, 0);
179 static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
180 struct list_head *head)
182 struct fsync_inode_entry *entry, *tmp;
184 list_for_each_entry_safe(entry, tmp, head, list) {
186 list_del(&entry->list);
187 kmem_cache_free(fsync_entry_slab, entry);
191 static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
194 struct seg_entry *sentry;
195 unsigned int segno = GET_SEGNO(sbi, blkaddr);
196 unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
197 (sbi->blocks_per_seg - 1);
198 struct f2fs_summary sum;
202 struct page *node_page;
206 sentry = get_seg_entry(sbi, segno);
207 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
210 /* Get the previous summary */
211 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
212 struct curseg_info *curseg = CURSEG_I(sbi, i);
213 if (curseg->segno == segno) {
214 sum = curseg->sum_blk->entries[blkoff];
218 if (i > CURSEG_COLD_DATA) {
219 struct page *sum_page = get_sum_page(sbi, segno);
220 struct f2fs_summary_block *sum_node;
221 kaddr = page_address(sum_page);
222 sum_node = (struct f2fs_summary_block *)kaddr;
223 sum = sum_node->entries[blkoff];
224 f2fs_put_page(sum_page, 1);
227 /* Get the node page */
228 node_page = get_node_page(sbi, le32_to_cpu(sum.nid));
229 bidx = start_bidx_of_node(ofs_of_node(node_page)) +
230 le16_to_cpu(sum.ofs_in_node);
231 ino = ino_of_node(node_page);
232 f2fs_put_page(node_page, 1);
234 /* Deallocate previous index in the node page */
235 inode = f2fs_iget(sbi->sb, ino);
239 truncate_hole(inode, bidx, bidx + 1);
243 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
244 struct page *page, block_t blkaddr)
246 unsigned int start, end;
247 struct dnode_of_data dn;
248 struct f2fs_summary sum;
250 int err = 0, recovered = 0;
253 start = start_bidx_of_node(ofs_of_node(page));
255 end = start + ADDRS_PER_INODE;
257 end = start + ADDRS_PER_BLOCK;
259 ilock = mutex_lock_op(sbi);
260 set_new_dnode(&dn, inode, NULL, NULL, 0);
262 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
264 mutex_unlock_op(sbi, ilock);
268 wait_on_page_writeback(dn.node_page);
270 get_node_info(sbi, dn.nid, &ni);
271 BUG_ON(ni.ino != ino_of_node(page));
272 BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
274 for (; start < end; start++) {
277 src = datablock_addr(dn.node_page, dn.ofs_in_node);
278 dest = datablock_addr(page, dn.ofs_in_node);
280 if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
281 if (src == NULL_ADDR) {
282 int err = reserve_new_block(&dn);
283 /* We should not get -ENOSPC */
287 /* Check the previous node page having this index */
288 check_index_in_prev_nodes(sbi, dest);
290 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
292 /* write dummy data page */
293 recover_data_page(sbi, NULL, &sum, src, dest);
294 update_extent_cache(dest, &dn);
300 /* write node page in place */
301 set_summary(&sum, dn.nid, 0, 0);
302 if (IS_INODE(dn.node_page))
303 sync_inode_page(&dn);
305 copy_node_footer(dn.node_page, page);
306 fill_node_footer(dn.node_page, dn.nid, ni.ino,
307 ofs_of_node(page), false);
308 set_page_dirty(dn.node_page);
310 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
312 mutex_unlock_op(sbi, ilock);
314 f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
315 "recovered_data = %d blocks",
316 inode->i_ino, recovered);
320 static int recover_data(struct f2fs_sb_info *sbi,
321 struct list_head *head, int type)
323 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
324 struct curseg_info *curseg;
329 /* get node pages in the current segment */
330 curseg = CURSEG_I(sbi, type);
331 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
334 page = alloc_page(GFP_NOFS | __GFP_ZERO);
341 struct fsync_inode_entry *entry;
343 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
349 if (cp_ver != cpver_of_node(page))
352 entry = get_fsync_inode(head, ino_of_node(page));
356 err = do_recover_data(sbi, entry->inode, page, blkaddr);
360 if (entry->blkaddr == blkaddr) {
362 list_del(&entry->list);
363 kmem_cache_free(fsync_entry_slab, entry);
366 /* check next segment */
367 blkaddr = next_blkaddr_of_node(page);
371 __free_pages(page, 0);
374 allocate_new_segments(sbi);
378 int recover_fsync_data(struct f2fs_sb_info *sbi)
380 struct list_head inode_list;
383 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
384 sizeof(struct fsync_inode_entry), NULL);
385 if (unlikely(!fsync_entry_slab))
388 INIT_LIST_HEAD(&inode_list);
390 /* step #1: find fsynced inode numbers */
392 err = find_fsync_dnodes(sbi, &inode_list);
396 if (list_empty(&inode_list))
399 /* step #2: recover data */
400 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
401 BUG_ON(!list_empty(&inode_list));
403 destroy_fsync_dnodes(sbi, &inode_list);
404 kmem_cache_destroy(fsync_entry_slab);
407 write_checkpoint(sbi, false);