4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
17 static struct kmem_cache *fsync_entry_slab;
19 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
21 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
22 > sbi->user_block_count)
27 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
30 struct list_head *this;
31 struct fsync_inode_entry *entry;
33 list_for_each(this, head) {
34 entry = list_entry(this, struct fsync_inode_entry, list);
35 if (entry->inode->i_ino == ino)
41 static int recover_dentry(struct page *ipage, struct inode *inode)
43 struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage);
44 struct f2fs_inode *raw_inode = &(raw_node->i);
46 struct f2fs_dir_entry *de;
51 if (!is_dent_dnode(ipage))
54 dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino));
60 name.len = le32_to_cpu(raw_inode->i_namelen);
61 name.name = raw_inode->i_name;
63 de = f2fs_find_entry(dir, &name, &page);
66 f2fs_put_page(page, 0);
68 err = __f2fs_add_link(dir, &name, inode);
76 static int recover_inode(struct inode *inode, struct page *node_page)
78 void *kaddr = page_address(node_page);
79 struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
80 struct f2fs_inode *raw_inode = &(raw_node->i);
82 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
83 i_size_write(inode, le64_to_cpu(raw_inode->i_size));
84 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
85 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
86 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
87 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
88 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
89 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
91 return recover_dentry(node_page, inode);
94 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
96 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
97 struct curseg_info *curseg;
102 /* get node pages in the current segment */
103 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
104 blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
107 page = alloc_page(GFP_F2FS_ZERO);
109 return PTR_ERR(page);
113 struct fsync_inode_entry *entry;
115 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
121 if (cp_ver != cpver_of_node(page))
124 if (!is_fsync_dnode(page))
127 entry = get_fsync_inode(head, ino_of_node(page));
129 entry->blkaddr = blkaddr;
130 if (IS_INODE(page) && is_dent_dnode(page))
131 set_inode_flag(F2FS_I(entry->inode),
134 if (IS_INODE(page) && is_dent_dnode(page)) {
135 err = recover_inode_page(sbi, page);
140 /* add this fsync inode to the list */
141 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
147 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
148 if (IS_ERR(entry->inode)) {
149 err = PTR_ERR(entry->inode);
150 kmem_cache_free(fsync_entry_slab, entry);
154 list_add_tail(&entry->list, head);
155 entry->blkaddr = blkaddr;
157 if (IS_INODE(page)) {
158 err = recover_inode(entry->inode, page);
159 if (err == -ENOENT) {
167 /* check next segment */
168 blkaddr = next_blkaddr_of_node(page);
173 __free_pages(page, 0);
177 static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
178 struct list_head *head)
180 struct fsync_inode_entry *entry, *tmp;
182 list_for_each_entry_safe(entry, tmp, head, list) {
184 list_del(&entry->list);
185 kmem_cache_free(fsync_entry_slab, entry);
189 static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
192 struct seg_entry *sentry;
193 unsigned int segno = GET_SEGNO(sbi, blkaddr);
194 unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
195 (sbi->blocks_per_seg - 1);
196 struct f2fs_summary sum;
200 struct page *node_page;
204 sentry = get_seg_entry(sbi, segno);
205 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
208 /* Get the previous summary */
209 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
210 struct curseg_info *curseg = CURSEG_I(sbi, i);
211 if (curseg->segno == segno) {
212 sum = curseg->sum_blk->entries[blkoff];
216 if (i > CURSEG_COLD_DATA) {
217 struct page *sum_page = get_sum_page(sbi, segno);
218 struct f2fs_summary_block *sum_node;
219 kaddr = page_address(sum_page);
220 sum_node = (struct f2fs_summary_block *)kaddr;
221 sum = sum_node->entries[blkoff];
222 f2fs_put_page(sum_page, 1);
225 /* Get the node page */
226 node_page = get_node_page(sbi, le32_to_cpu(sum.nid));
227 bidx = start_bidx_of_node(ofs_of_node(node_page)) +
228 le16_to_cpu(sum.ofs_in_node);
229 ino = ino_of_node(node_page);
230 f2fs_put_page(node_page, 1);
232 /* Deallocate previous index in the node page */
233 inode = f2fs_iget(sbi->sb, ino);
237 truncate_hole(inode, bidx, bidx + 1);
241 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
242 struct page *page, block_t blkaddr)
244 unsigned int start, end;
245 struct dnode_of_data dn;
246 struct f2fs_summary sum;
251 start = start_bidx_of_node(ofs_of_node(page));
253 end = start + ADDRS_PER_INODE;
255 end = start + ADDRS_PER_BLOCK;
257 ilock = mutex_lock_op(sbi);
258 set_new_dnode(&dn, inode, NULL, NULL, 0);
260 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
262 mutex_unlock_op(sbi, ilock);
266 wait_on_page_writeback(dn.node_page);
268 get_node_info(sbi, dn.nid, &ni);
269 BUG_ON(ni.ino != ino_of_node(page));
270 BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
272 for (; start < end; start++) {
275 src = datablock_addr(dn.node_page, dn.ofs_in_node);
276 dest = datablock_addr(page, dn.ofs_in_node);
278 if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
279 if (src == NULL_ADDR) {
280 int err = reserve_new_block(&dn);
281 /* We should not get -ENOSPC */
285 /* Check the previous node page having this index */
286 check_index_in_prev_nodes(sbi, dest);
288 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
290 /* write dummy data page */
291 recover_data_page(sbi, NULL, &sum, src, dest);
292 update_extent_cache(dest, &dn);
297 /* write node page in place */
298 set_summary(&sum, dn.nid, 0, 0);
299 if (IS_INODE(dn.node_page))
300 sync_inode_page(&dn);
302 copy_node_footer(dn.node_page, page);
303 fill_node_footer(dn.node_page, dn.nid, ni.ino,
304 ofs_of_node(page), false);
305 set_page_dirty(dn.node_page);
307 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
309 mutex_unlock_op(sbi, ilock);
313 static int recover_data(struct f2fs_sb_info *sbi,
314 struct list_head *head, int type)
316 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
317 struct curseg_info *curseg;
322 /* get node pages in the current segment */
323 curseg = CURSEG_I(sbi, type);
324 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
327 page = alloc_page(GFP_NOFS | __GFP_ZERO);
334 struct fsync_inode_entry *entry;
336 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
342 if (cp_ver != cpver_of_node(page))
345 entry = get_fsync_inode(head, ino_of_node(page));
349 err = do_recover_data(sbi, entry->inode, page, blkaddr);
353 if (entry->blkaddr == blkaddr) {
355 list_del(&entry->list);
356 kmem_cache_free(fsync_entry_slab, entry);
359 /* check next segment */
360 blkaddr = next_blkaddr_of_node(page);
365 __free_pages(page, 0);
368 allocate_new_segments(sbi);
372 int recover_fsync_data(struct f2fs_sb_info *sbi)
374 struct list_head inode_list;
377 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
378 sizeof(struct fsync_inode_entry), NULL);
379 if (unlikely(!fsync_entry_slab))
382 INIT_LIST_HEAD(&inode_list);
384 /* step #1: find fsynced inode numbers */
385 err = find_fsync_dnodes(sbi, &inode_list);
389 if (list_empty(&inode_list))
392 /* step #2: recover data */
394 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
396 BUG_ON(!list_empty(&inode_list));
398 destroy_fsync_dnodes(sbi, &inode_list);
399 kmem_cache_destroy(fsync_entry_slab);
400 write_checkpoint(sbi, false);