f2fs: use inner macro GFP_F2FS_ZERO for simplification
[platform/kernel/linux-starfive.git] / fs / f2fs / node.c
1 /*
2  * fs/f2fs/node.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include <trace/events/f2fs.h>
23
24 static struct kmem_cache *nat_entry_slab;
25 static struct kmem_cache *free_nid_slab;
26
27 static void clear_node_page_dirty(struct page *page)
28 {
29         struct address_space *mapping = page->mapping;
30         struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
31         unsigned int long flags;
32
33         if (PageDirty(page)) {
34                 spin_lock_irqsave(&mapping->tree_lock, flags);
35                 radix_tree_tag_clear(&mapping->page_tree,
36                                 page_index(page),
37                                 PAGECACHE_TAG_DIRTY);
38                 spin_unlock_irqrestore(&mapping->tree_lock, flags);
39
40                 clear_page_dirty_for_io(page);
41                 dec_page_count(sbi, F2FS_DIRTY_NODES);
42         }
43         ClearPageUptodate(page);
44 }
45
46 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
47 {
48         pgoff_t index = current_nat_addr(sbi, nid);
49         return get_meta_page(sbi, index);
50 }
51
52 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
53 {
54         struct page *src_page;
55         struct page *dst_page;
56         pgoff_t src_off;
57         pgoff_t dst_off;
58         void *src_addr;
59         void *dst_addr;
60         struct f2fs_nm_info *nm_i = NM_I(sbi);
61
62         src_off = current_nat_addr(sbi, nid);
63         dst_off = next_nat_addr(sbi, src_off);
64
65         /* get current nat block page with lock */
66         src_page = get_meta_page(sbi, src_off);
67
68         /* Dirty src_page means that it is already the new target NAT page. */
69         if (PageDirty(src_page))
70                 return src_page;
71
72         dst_page = grab_meta_page(sbi, dst_off);
73
74         src_addr = page_address(src_page);
75         dst_addr = page_address(dst_page);
76         memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
77         set_page_dirty(dst_page);
78         f2fs_put_page(src_page, 1);
79
80         set_to_next_nat(nm_i, nid);
81
82         return dst_page;
83 }
84
85 /*
86  * Readahead NAT pages
87  */
88 static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
89 {
90         struct address_space *mapping = sbi->meta_inode->i_mapping;
91         struct f2fs_nm_info *nm_i = NM_I(sbi);
92         struct page *page;
93         pgoff_t index;
94         int i;
95
96         for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
97                 if (nid >= nm_i->max_nid)
98                         nid = 0;
99                 index = current_nat_addr(sbi, nid);
100
101                 page = grab_cache_page(mapping, index);
102                 if (!page)
103                         continue;
104                 if (PageUptodate(page)) {
105                         mark_page_accessed(page);
106                         f2fs_put_page(page, 1);
107                         continue;
108                 }
109                 f2fs_submit_page_mbio(sbi, page, index, META, READ);
110                 mark_page_accessed(page);
111                 f2fs_put_page(page, 0);
112         }
113         f2fs_submit_merged_bio(sbi, META, true, READ);
114 }
115
116 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
117 {
118         return radix_tree_lookup(&nm_i->nat_root, n);
119 }
120
121 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
122                 nid_t start, unsigned int nr, struct nat_entry **ep)
123 {
124         return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
125 }
126
127 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
128 {
129         list_del(&e->list);
130         radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
131         nm_i->nat_cnt--;
132         kmem_cache_free(nat_entry_slab, e);
133 }
134
135 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
136 {
137         struct f2fs_nm_info *nm_i = NM_I(sbi);
138         struct nat_entry *e;
139         int is_cp = 1;
140
141         read_lock(&nm_i->nat_tree_lock);
142         e = __lookup_nat_cache(nm_i, nid);
143         if (e && !e->checkpointed)
144                 is_cp = 0;
145         read_unlock(&nm_i->nat_tree_lock);
146         return is_cp;
147 }
148
149 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
150 {
151         struct nat_entry *new;
152
153         new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
154         if (!new)
155                 return NULL;
156         if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
157                 kmem_cache_free(nat_entry_slab, new);
158                 return NULL;
159         }
160         memset(new, 0, sizeof(struct nat_entry));
161         nat_set_nid(new, nid);
162         list_add_tail(&new->list, &nm_i->nat_entries);
163         nm_i->nat_cnt++;
164         return new;
165 }
166
167 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
168                                                 struct f2fs_nat_entry *ne)
169 {
170         struct nat_entry *e;
171 retry:
172         write_lock(&nm_i->nat_tree_lock);
173         e = __lookup_nat_cache(nm_i, nid);
174         if (!e) {
175                 e = grab_nat_entry(nm_i, nid);
176                 if (!e) {
177                         write_unlock(&nm_i->nat_tree_lock);
178                         goto retry;
179                 }
180                 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
181                 nat_set_ino(e, le32_to_cpu(ne->ino));
182                 nat_set_version(e, ne->version);
183                 e->checkpointed = true;
184         }
185         write_unlock(&nm_i->nat_tree_lock);
186 }
187
188 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
189                         block_t new_blkaddr)
190 {
191         struct f2fs_nm_info *nm_i = NM_I(sbi);
192         struct nat_entry *e;
193 retry:
194         write_lock(&nm_i->nat_tree_lock);
195         e = __lookup_nat_cache(nm_i, ni->nid);
196         if (!e) {
197                 e = grab_nat_entry(nm_i, ni->nid);
198                 if (!e) {
199                         write_unlock(&nm_i->nat_tree_lock);
200                         goto retry;
201                 }
202                 e->ni = *ni;
203                 e->checkpointed = true;
204                 f2fs_bug_on(ni->blk_addr == NEW_ADDR);
205         } else if (new_blkaddr == NEW_ADDR) {
206                 /*
207                  * when nid is reallocated,
208                  * previous nat entry can be remained in nat cache.
209                  * So, reinitialize it with new information.
210                  */
211                 e->ni = *ni;
212                 f2fs_bug_on(ni->blk_addr != NULL_ADDR);
213         }
214
215         if (new_blkaddr == NEW_ADDR)
216                 e->checkpointed = false;
217
218         /* sanity check */
219         f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
220         f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
221                         new_blkaddr == NULL_ADDR);
222         f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
223                         new_blkaddr == NEW_ADDR);
224         f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
225                         nat_get_blkaddr(e) != NULL_ADDR &&
226                         new_blkaddr == NEW_ADDR);
227
228         /* increament version no as node is removed */
229         if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
230                 unsigned char version = nat_get_version(e);
231                 nat_set_version(e, inc_node_version(version));
232         }
233
234         /* change address */
235         nat_set_blkaddr(e, new_blkaddr);
236         __set_nat_cache_dirty(nm_i, e);
237         write_unlock(&nm_i->nat_tree_lock);
238 }
239
240 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
241 {
242         struct f2fs_nm_info *nm_i = NM_I(sbi);
243
244         if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
245                 return 0;
246
247         write_lock(&nm_i->nat_tree_lock);
248         while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
249                 struct nat_entry *ne;
250                 ne = list_first_entry(&nm_i->nat_entries,
251                                         struct nat_entry, list);
252                 __del_from_nat_cache(nm_i, ne);
253                 nr_shrink--;
254         }
255         write_unlock(&nm_i->nat_tree_lock);
256         return nr_shrink;
257 }
258
259 /*
260  * This function returns always success
261  */
262 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
263 {
264         struct f2fs_nm_info *nm_i = NM_I(sbi);
265         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
266         struct f2fs_summary_block *sum = curseg->sum_blk;
267         nid_t start_nid = START_NID(nid);
268         struct f2fs_nat_block *nat_blk;
269         struct page *page = NULL;
270         struct f2fs_nat_entry ne;
271         struct nat_entry *e;
272         int i;
273
274         memset(&ne, 0, sizeof(struct f2fs_nat_entry));
275         ni->nid = nid;
276
277         /* Check nat cache */
278         read_lock(&nm_i->nat_tree_lock);
279         e = __lookup_nat_cache(nm_i, nid);
280         if (e) {
281                 ni->ino = nat_get_ino(e);
282                 ni->blk_addr = nat_get_blkaddr(e);
283                 ni->version = nat_get_version(e);
284         }
285         read_unlock(&nm_i->nat_tree_lock);
286         if (e)
287                 return;
288
289         /* Check current segment summary */
290         mutex_lock(&curseg->curseg_mutex);
291         i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
292         if (i >= 0) {
293                 ne = nat_in_journal(sum, i);
294                 node_info_from_raw_nat(ni, &ne);
295         }
296         mutex_unlock(&curseg->curseg_mutex);
297         if (i >= 0)
298                 goto cache;
299
300         /* Fill node_info from nat page */
301         page = get_current_nat_page(sbi, start_nid);
302         nat_blk = (struct f2fs_nat_block *)page_address(page);
303         ne = nat_blk->entries[nid - start_nid];
304         node_info_from_raw_nat(ni, &ne);
305         f2fs_put_page(page, 1);
306 cache:
307         /* cache nat entry */
308         cache_nat_entry(NM_I(sbi), nid, &ne);
309 }
310
311 /*
312  * The maximum depth is four.
313  * Offset[0] will have raw inode offset.
314  */
315 static int get_node_path(struct f2fs_inode_info *fi, long block,
316                                 int offset[4], unsigned int noffset[4])
317 {
318         const long direct_index = ADDRS_PER_INODE(fi);
319         const long direct_blks = ADDRS_PER_BLOCK;
320         const long dptrs_per_blk = NIDS_PER_BLOCK;
321         const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
322         const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
323         int n = 0;
324         int level = 0;
325
326         noffset[0] = 0;
327
328         if (block < direct_index) {
329                 offset[n] = block;
330                 goto got;
331         }
332         block -= direct_index;
333         if (block < direct_blks) {
334                 offset[n++] = NODE_DIR1_BLOCK;
335                 noffset[n] = 1;
336                 offset[n] = block;
337                 level = 1;
338                 goto got;
339         }
340         block -= direct_blks;
341         if (block < direct_blks) {
342                 offset[n++] = NODE_DIR2_BLOCK;
343                 noffset[n] = 2;
344                 offset[n] = block;
345                 level = 1;
346                 goto got;
347         }
348         block -= direct_blks;
349         if (block < indirect_blks) {
350                 offset[n++] = NODE_IND1_BLOCK;
351                 noffset[n] = 3;
352                 offset[n++] = block / direct_blks;
353                 noffset[n] = 4 + offset[n - 1];
354                 offset[n] = block % direct_blks;
355                 level = 2;
356                 goto got;
357         }
358         block -= indirect_blks;
359         if (block < indirect_blks) {
360                 offset[n++] = NODE_IND2_BLOCK;
361                 noffset[n] = 4 + dptrs_per_blk;
362                 offset[n++] = block / direct_blks;
363                 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
364                 offset[n] = block % direct_blks;
365                 level = 2;
366                 goto got;
367         }
368         block -= indirect_blks;
369         if (block < dindirect_blks) {
370                 offset[n++] = NODE_DIND_BLOCK;
371                 noffset[n] = 5 + (dptrs_per_blk * 2);
372                 offset[n++] = block / indirect_blks;
373                 noffset[n] = 6 + (dptrs_per_blk * 2) +
374                               offset[n - 1] * (dptrs_per_blk + 1);
375                 offset[n++] = (block / direct_blks) % dptrs_per_blk;
376                 noffset[n] = 7 + (dptrs_per_blk * 2) +
377                               offset[n - 2] * (dptrs_per_blk + 1) +
378                               offset[n - 1];
379                 offset[n] = block % direct_blks;
380                 level = 3;
381                 goto got;
382         } else {
383                 BUG();
384         }
385 got:
386         return level;
387 }
388
389 /*
390  * Caller should call f2fs_put_dnode(dn).
391  * Also, it should grab and release a mutex by calling mutex_lock_op() and
392  * mutex_unlock_op() only if ro is not set RDONLY_NODE.
393  * In the case of RDONLY_NODE, we don't need to care about mutex.
394  */
395 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
396 {
397         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
398         struct page *npage[4];
399         struct page *parent;
400         int offset[4];
401         unsigned int noffset[4];
402         nid_t nids[4];
403         int level, i;
404         int err = 0;
405
406         level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
407
408         nids[0] = dn->inode->i_ino;
409         npage[0] = dn->inode_page;
410
411         if (!npage[0]) {
412                 npage[0] = get_node_page(sbi, nids[0]);
413                 if (IS_ERR(npage[0]))
414                         return PTR_ERR(npage[0]);
415         }
416         parent = npage[0];
417         if (level != 0)
418                 nids[1] = get_nid(parent, offset[0], true);
419         dn->inode_page = npage[0];
420         dn->inode_page_locked = true;
421
422         /* get indirect or direct nodes */
423         for (i = 1; i <= level; i++) {
424                 bool done = false;
425
426                 if (!nids[i] && mode == ALLOC_NODE) {
427                         /* alloc new node */
428                         if (!alloc_nid(sbi, &(nids[i]))) {
429                                 err = -ENOSPC;
430                                 goto release_pages;
431                         }
432
433                         dn->nid = nids[i];
434                         npage[i] = new_node_page(dn, noffset[i], NULL);
435                         if (IS_ERR(npage[i])) {
436                                 alloc_nid_failed(sbi, nids[i]);
437                                 err = PTR_ERR(npage[i]);
438                                 goto release_pages;
439                         }
440
441                         set_nid(parent, offset[i - 1], nids[i], i == 1);
442                         alloc_nid_done(sbi, nids[i]);
443                         done = true;
444                 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
445                         npage[i] = get_node_page_ra(parent, offset[i - 1]);
446                         if (IS_ERR(npage[i])) {
447                                 err = PTR_ERR(npage[i]);
448                                 goto release_pages;
449                         }
450                         done = true;
451                 }
452                 if (i == 1) {
453                         dn->inode_page_locked = false;
454                         unlock_page(parent);
455                 } else {
456                         f2fs_put_page(parent, 1);
457                 }
458
459                 if (!done) {
460                         npage[i] = get_node_page(sbi, nids[i]);
461                         if (IS_ERR(npage[i])) {
462                                 err = PTR_ERR(npage[i]);
463                                 f2fs_put_page(npage[0], 0);
464                                 goto release_out;
465                         }
466                 }
467                 if (i < level) {
468                         parent = npage[i];
469                         nids[i + 1] = get_nid(parent, offset[i], false);
470                 }
471         }
472         dn->nid = nids[level];
473         dn->ofs_in_node = offset[level];
474         dn->node_page = npage[level];
475         dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
476         return 0;
477
478 release_pages:
479         f2fs_put_page(parent, 1);
480         if (i > 1)
481                 f2fs_put_page(npage[0], 0);
482 release_out:
483         dn->inode_page = NULL;
484         dn->node_page = NULL;
485         return err;
486 }
487
488 static void truncate_node(struct dnode_of_data *dn)
489 {
490         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
491         struct node_info ni;
492
493         get_node_info(sbi, dn->nid, &ni);
494         if (dn->inode->i_blocks == 0) {
495                 f2fs_bug_on(ni.blk_addr != NULL_ADDR);
496                 goto invalidate;
497         }
498         f2fs_bug_on(ni.blk_addr == NULL_ADDR);
499
500         /* Deallocate node address */
501         invalidate_blocks(sbi, ni.blk_addr);
502         dec_valid_node_count(sbi, dn->inode);
503         set_node_addr(sbi, &ni, NULL_ADDR);
504
505         if (dn->nid == dn->inode->i_ino) {
506                 remove_orphan_inode(sbi, dn->nid);
507                 dec_valid_inode_count(sbi);
508         } else {
509                 sync_inode_page(dn);
510         }
511 invalidate:
512         clear_node_page_dirty(dn->node_page);
513         F2FS_SET_SB_DIRT(sbi);
514
515         f2fs_put_page(dn->node_page, 1);
516         dn->node_page = NULL;
517         trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
518 }
519
520 static int truncate_dnode(struct dnode_of_data *dn)
521 {
522         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
523         struct page *page;
524
525         if (dn->nid == 0)
526                 return 1;
527
528         /* get direct node */
529         page = get_node_page(sbi, dn->nid);
530         if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
531                 return 1;
532         else if (IS_ERR(page))
533                 return PTR_ERR(page);
534
535         /* Make dnode_of_data for parameter */
536         dn->node_page = page;
537         dn->ofs_in_node = 0;
538         truncate_data_blocks(dn);
539         truncate_node(dn);
540         return 1;
541 }
542
543 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
544                                                 int ofs, int depth)
545 {
546         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
547         struct dnode_of_data rdn = *dn;
548         struct page *page;
549         struct f2fs_node *rn;
550         nid_t child_nid;
551         unsigned int child_nofs;
552         int freed = 0;
553         int i, ret;
554
555         if (dn->nid == 0)
556                 return NIDS_PER_BLOCK + 1;
557
558         trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
559
560         page = get_node_page(sbi, dn->nid);
561         if (IS_ERR(page)) {
562                 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
563                 return PTR_ERR(page);
564         }
565
566         rn = F2FS_NODE(page);
567         if (depth < 3) {
568                 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
569                         child_nid = le32_to_cpu(rn->in.nid[i]);
570                         if (child_nid == 0)
571                                 continue;
572                         rdn.nid = child_nid;
573                         ret = truncate_dnode(&rdn);
574                         if (ret < 0)
575                                 goto out_err;
576                         set_nid(page, i, 0, false);
577                 }
578         } else {
579                 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
580                 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
581                         child_nid = le32_to_cpu(rn->in.nid[i]);
582                         if (child_nid == 0) {
583                                 child_nofs += NIDS_PER_BLOCK + 1;
584                                 continue;
585                         }
586                         rdn.nid = child_nid;
587                         ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
588                         if (ret == (NIDS_PER_BLOCK + 1)) {
589                                 set_nid(page, i, 0, false);
590                                 child_nofs += ret;
591                         } else if (ret < 0 && ret != -ENOENT) {
592                                 goto out_err;
593                         }
594                 }
595                 freed = child_nofs;
596         }
597
598         if (!ofs) {
599                 /* remove current indirect node */
600                 dn->node_page = page;
601                 truncate_node(dn);
602                 freed++;
603         } else {
604                 f2fs_put_page(page, 1);
605         }
606         trace_f2fs_truncate_nodes_exit(dn->inode, freed);
607         return freed;
608
609 out_err:
610         f2fs_put_page(page, 1);
611         trace_f2fs_truncate_nodes_exit(dn->inode, ret);
612         return ret;
613 }
614
615 static int truncate_partial_nodes(struct dnode_of_data *dn,
616                         struct f2fs_inode *ri, int *offset, int depth)
617 {
618         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
619         struct page *pages[2];
620         nid_t nid[3];
621         nid_t child_nid;
622         int err = 0;
623         int i;
624         int idx = depth - 2;
625
626         nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
627         if (!nid[0])
628                 return 0;
629
630         /* get indirect nodes in the path */
631         for (i = 0; i < depth - 1; i++) {
632                 /* refernece count'll be increased */
633                 pages[i] = get_node_page(sbi, nid[i]);
634                 if (IS_ERR(pages[i])) {
635                         depth = i + 1;
636                         err = PTR_ERR(pages[i]);
637                         goto fail;
638                 }
639                 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
640         }
641
642         /* free direct nodes linked to a partial indirect node */
643         for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
644                 child_nid = get_nid(pages[idx], i, false);
645                 if (!child_nid)
646                         continue;
647                 dn->nid = child_nid;
648                 err = truncate_dnode(dn);
649                 if (err < 0)
650                         goto fail;
651                 set_nid(pages[idx], i, 0, false);
652         }
653
654         if (offset[depth - 1] == 0) {
655                 dn->node_page = pages[idx];
656                 dn->nid = nid[idx];
657                 truncate_node(dn);
658         } else {
659                 f2fs_put_page(pages[idx], 1);
660         }
661         offset[idx]++;
662         offset[depth - 1] = 0;
663 fail:
664         for (i = depth - 3; i >= 0; i--)
665                 f2fs_put_page(pages[i], 1);
666
667         trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
668
669         return err;
670 }
671
672 /*
673  * All the block addresses of data and nodes should be nullified.
674  */
675 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
676 {
677         struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
678         struct address_space *node_mapping = sbi->node_inode->i_mapping;
679         int err = 0, cont = 1;
680         int level, offset[4], noffset[4];
681         unsigned int nofs = 0;
682         struct f2fs_node *rn;
683         struct dnode_of_data dn;
684         struct page *page;
685
686         trace_f2fs_truncate_inode_blocks_enter(inode, from);
687
688         level = get_node_path(F2FS_I(inode), from, offset, noffset);
689 restart:
690         page = get_node_page(sbi, inode->i_ino);
691         if (IS_ERR(page)) {
692                 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
693                 return PTR_ERR(page);
694         }
695
696         set_new_dnode(&dn, inode, page, NULL, 0);
697         unlock_page(page);
698
699         rn = F2FS_NODE(page);
700         switch (level) {
701         case 0:
702         case 1:
703                 nofs = noffset[1];
704                 break;
705         case 2:
706                 nofs = noffset[1];
707                 if (!offset[level - 1])
708                         goto skip_partial;
709                 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
710                 if (err < 0 && err != -ENOENT)
711                         goto fail;
712                 nofs += 1 + NIDS_PER_BLOCK;
713                 break;
714         case 3:
715                 nofs = 5 + 2 * NIDS_PER_BLOCK;
716                 if (!offset[level - 1])
717                         goto skip_partial;
718                 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
719                 if (err < 0 && err != -ENOENT)
720                         goto fail;
721                 break;
722         default:
723                 BUG();
724         }
725
726 skip_partial:
727         while (cont) {
728                 dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
729                 switch (offset[0]) {
730                 case NODE_DIR1_BLOCK:
731                 case NODE_DIR2_BLOCK:
732                         err = truncate_dnode(&dn);
733                         break;
734
735                 case NODE_IND1_BLOCK:
736                 case NODE_IND2_BLOCK:
737                         err = truncate_nodes(&dn, nofs, offset[1], 2);
738                         break;
739
740                 case NODE_DIND_BLOCK:
741                         err = truncate_nodes(&dn, nofs, offset[1], 3);
742                         cont = 0;
743                         break;
744
745                 default:
746                         BUG();
747                 }
748                 if (err < 0 && err != -ENOENT)
749                         goto fail;
750                 if (offset[1] == 0 &&
751                                 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
752                         lock_page(page);
753                         if (page->mapping != node_mapping) {
754                                 f2fs_put_page(page, 1);
755                                 goto restart;
756                         }
757                         wait_on_page_writeback(page);
758                         rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
759                         set_page_dirty(page);
760                         unlock_page(page);
761                 }
762                 offset[1] = 0;
763                 offset[0]++;
764                 nofs += err;
765         }
766 fail:
767         f2fs_put_page(page, 0);
768         trace_f2fs_truncate_inode_blocks_exit(inode, err);
769         return err > 0 ? 0 : err;
770 }
771
772 int truncate_xattr_node(struct inode *inode, struct page *page)
773 {
774         struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
775         nid_t nid = F2FS_I(inode)->i_xattr_nid;
776         struct dnode_of_data dn;
777         struct page *npage;
778
779         if (!nid)
780                 return 0;
781
782         npage = get_node_page(sbi, nid);
783         if (IS_ERR(npage))
784                 return PTR_ERR(npage);
785
786         F2FS_I(inode)->i_xattr_nid = 0;
787
788         /* need to do checkpoint during fsync */
789         F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
790
791         set_new_dnode(&dn, inode, page, npage, nid);
792
793         if (page)
794                 dn.inode_page_locked = true;
795         truncate_node(&dn);
796         return 0;
797 }
798
799 /*
800  * Caller should grab and release a mutex by calling mutex_lock_op() and
801  * mutex_unlock_op().
802  */
803 void remove_inode_page(struct inode *inode)
804 {
805         struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
806         struct page *page;
807         nid_t ino = inode->i_ino;
808         struct dnode_of_data dn;
809
810         page = get_node_page(sbi, ino);
811         if (IS_ERR(page))
812                 return;
813
814         if (truncate_xattr_node(inode, page)) {
815                 f2fs_put_page(page, 1);
816                 return;
817         }
818         /* 0 is possible, after f2fs_new_inode() is failed */
819         f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
820         set_new_dnode(&dn, inode, page, page, ino);
821         truncate_node(&dn);
822 }
823
824 struct page *new_inode_page(struct inode *inode, const struct qstr *name)
825 {
826         struct dnode_of_data dn;
827
828         /* allocate inode page for new inode */
829         set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
830
831         /* caller should f2fs_put_page(page, 1); */
832         return new_node_page(&dn, 0, NULL);
833 }
834
835 struct page *new_node_page(struct dnode_of_data *dn,
836                                 unsigned int ofs, struct page *ipage)
837 {
838         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
839         struct address_space *mapping = sbi->node_inode->i_mapping;
840         struct node_info old_ni, new_ni;
841         struct page *page;
842         int err;
843
844         if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
845                 return ERR_PTR(-EPERM);
846
847         page = grab_cache_page(mapping, dn->nid);
848         if (!page)
849                 return ERR_PTR(-ENOMEM);
850
851         if (!inc_valid_node_count(sbi, dn->inode)) {
852                 err = -ENOSPC;
853                 goto fail;
854         }
855
856         get_node_info(sbi, dn->nid, &old_ni);
857
858         /* Reinitialize old_ni with new node page */
859         f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
860         new_ni = old_ni;
861         new_ni.ino = dn->inode->i_ino;
862         set_node_addr(sbi, &new_ni, NEW_ADDR);
863
864         fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
865         set_cold_node(dn->inode, page);
866         SetPageUptodate(page);
867         set_page_dirty(page);
868
869         if (ofs == XATTR_NODE_OFFSET)
870                 F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
871
872         dn->node_page = page;
873         if (ipage)
874                 update_inode(dn->inode, ipage);
875         else
876                 sync_inode_page(dn);
877         if (ofs == 0)
878                 inc_valid_inode_count(sbi);
879
880         return page;
881
882 fail:
883         clear_node_page_dirty(page);
884         f2fs_put_page(page, 1);
885         return ERR_PTR(err);
886 }
887
888 /*
889  * Caller should do after getting the following values.
890  * 0: f2fs_put_page(page, 0)
891  * LOCKED_PAGE: f2fs_put_page(page, 1)
892  * error: nothing
893  */
894 static int read_node_page(struct page *page, int rw)
895 {
896         struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
897         struct node_info ni;
898
899         get_node_info(sbi, page->index, &ni);
900
901         if (ni.blk_addr == NULL_ADDR) {
902                 f2fs_put_page(page, 1);
903                 return -ENOENT;
904         }
905
906         if (PageUptodate(page))
907                 return LOCKED_PAGE;
908
909         return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw);
910 }
911
912 /*
913  * Readahead a node page
914  */
915 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
916 {
917         struct address_space *mapping = sbi->node_inode->i_mapping;
918         struct page *apage;
919         int err;
920
921         apage = find_get_page(mapping, nid);
922         if (apage && PageUptodate(apage)) {
923                 f2fs_put_page(apage, 0);
924                 return;
925         }
926         f2fs_put_page(apage, 0);
927
928         apage = grab_cache_page(mapping, nid);
929         if (!apage)
930                 return;
931
932         err = read_node_page(apage, READA);
933         if (err == 0)
934                 f2fs_put_page(apage, 0);
935         else if (err == LOCKED_PAGE)
936                 f2fs_put_page(apage, 1);
937 }
938
939 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
940 {
941         struct address_space *mapping = sbi->node_inode->i_mapping;
942         struct page *page;
943         int err;
944 repeat:
945         page = grab_cache_page(mapping, nid);
946         if (!page)
947                 return ERR_PTR(-ENOMEM);
948
949         err = read_node_page(page, READ_SYNC);
950         if (err < 0)
951                 return ERR_PTR(err);
952         else if (err == LOCKED_PAGE)
953                 goto got_it;
954
955         lock_page(page);
956         if (!PageUptodate(page)) {
957                 f2fs_put_page(page, 1);
958                 return ERR_PTR(-EIO);
959         }
960         if (page->mapping != mapping) {
961                 f2fs_put_page(page, 1);
962                 goto repeat;
963         }
964 got_it:
965         f2fs_bug_on(nid != nid_of_node(page));
966         mark_page_accessed(page);
967         return page;
968 }
969
970 /*
971  * Return a locked page for the desired node page.
972  * And, readahead MAX_RA_NODE number of node pages.
973  */
974 struct page *get_node_page_ra(struct page *parent, int start)
975 {
976         struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
977         struct address_space *mapping = sbi->node_inode->i_mapping;
978         struct blk_plug plug;
979         struct page *page;
980         int err, i, end;
981         nid_t nid;
982
983         /* First, try getting the desired direct node. */
984         nid = get_nid(parent, start, false);
985         if (!nid)
986                 return ERR_PTR(-ENOENT);
987 repeat:
988         page = grab_cache_page(mapping, nid);
989         if (!page)
990                 return ERR_PTR(-ENOMEM);
991
992         err = read_node_page(page, READ_SYNC);
993         if (err < 0)
994                 return ERR_PTR(err);
995         else if (err == LOCKED_PAGE)
996                 goto page_hit;
997
998         blk_start_plug(&plug);
999
1000         /* Then, try readahead for siblings of the desired node */
1001         end = start + MAX_RA_NODE;
1002         end = min(end, NIDS_PER_BLOCK);
1003         for (i = start + 1; i < end; i++) {
1004                 nid = get_nid(parent, i, false);
1005                 if (!nid)
1006                         continue;
1007                 ra_node_page(sbi, nid);
1008         }
1009
1010         blk_finish_plug(&plug);
1011
1012         lock_page(page);
1013         if (page->mapping != mapping) {
1014                 f2fs_put_page(page, 1);
1015                 goto repeat;
1016         }
1017 page_hit:
1018         if (!PageUptodate(page)) {
1019                 f2fs_put_page(page, 1);
1020                 return ERR_PTR(-EIO);
1021         }
1022         mark_page_accessed(page);
1023         return page;
1024 }
1025
1026 void sync_inode_page(struct dnode_of_data *dn)
1027 {
1028         if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1029                 update_inode(dn->inode, dn->node_page);
1030         } else if (dn->inode_page) {
1031                 if (!dn->inode_page_locked)
1032                         lock_page(dn->inode_page);
1033                 update_inode(dn->inode, dn->inode_page);
1034                 if (!dn->inode_page_locked)
1035                         unlock_page(dn->inode_page);
1036         } else {
1037                 update_inode_page(dn->inode);
1038         }
1039 }
1040
1041 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1042                                         struct writeback_control *wbc)
1043 {
1044         struct address_space *mapping = sbi->node_inode->i_mapping;
1045         pgoff_t index, end;
1046         struct pagevec pvec;
1047         int step = ino ? 2 : 0;
1048         int nwritten = 0, wrote = 0;
1049
1050         pagevec_init(&pvec, 0);
1051
1052 next_step:
1053         index = 0;
1054         end = LONG_MAX;
1055
1056         while (index <= end) {
1057                 int i, nr_pages;
1058                 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1059                                 PAGECACHE_TAG_DIRTY,
1060                                 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1061                 if (nr_pages == 0)
1062                         break;
1063
1064                 for (i = 0; i < nr_pages; i++) {
1065                         struct page *page = pvec.pages[i];
1066
1067                         /*
1068                          * flushing sequence with step:
1069                          * 0. indirect nodes
1070                          * 1. dentry dnodes
1071                          * 2. file dnodes
1072                          */
1073                         if (step == 0 && IS_DNODE(page))
1074                                 continue;
1075                         if (step == 1 && (!IS_DNODE(page) ||
1076                                                 is_cold_node(page)))
1077                                 continue;
1078                         if (step == 2 && (!IS_DNODE(page) ||
1079                                                 !is_cold_node(page)))
1080                                 continue;
1081
1082                         /*
1083                          * If an fsync mode,
1084                          * we should not skip writing node pages.
1085                          */
1086                         if (ino && ino_of_node(page) == ino)
1087                                 lock_page(page);
1088                         else if (!trylock_page(page))
1089                                 continue;
1090
1091                         if (unlikely(page->mapping != mapping)) {
1092 continue_unlock:
1093                                 unlock_page(page);
1094                                 continue;
1095                         }
1096                         if (ino && ino_of_node(page) != ino)
1097                                 goto continue_unlock;
1098
1099                         if (!PageDirty(page)) {
1100                                 /* someone wrote it for us */
1101                                 goto continue_unlock;
1102                         }
1103
1104                         if (!clear_page_dirty_for_io(page))
1105                                 goto continue_unlock;
1106
1107                         /* called by fsync() */
1108                         if (ino && IS_DNODE(page)) {
1109                                 int mark = !is_checkpointed_node(sbi, ino);
1110                                 set_fsync_mark(page, 1);
1111                                 if (IS_INODE(page))
1112                                         set_dentry_mark(page, mark);
1113                                 nwritten++;
1114                         } else {
1115                                 set_fsync_mark(page, 0);
1116                                 set_dentry_mark(page, 0);
1117                         }
1118                         mapping->a_ops->writepage(page, wbc);
1119                         wrote++;
1120
1121                         if (--wbc->nr_to_write == 0)
1122                                 break;
1123                 }
1124                 pagevec_release(&pvec);
1125                 cond_resched();
1126
1127                 if (wbc->nr_to_write == 0) {
1128                         step = 2;
1129                         break;
1130                 }
1131         }
1132
1133         if (step < 2) {
1134                 step++;
1135                 goto next_step;
1136         }
1137
1138         if (wrote)
1139                 f2fs_submit_merged_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL,
1140                                                                         WRITE);
1141         return nwritten;
1142 }
1143
1144 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1145 {
1146         struct address_space *mapping = sbi->node_inode->i_mapping;
1147         pgoff_t index = 0, end = LONG_MAX;
1148         struct pagevec pvec;
1149         int nr_pages;
1150         int ret2 = 0, ret = 0;
1151
1152         pagevec_init(&pvec, 0);
1153         while ((index <= end) &&
1154                         (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1155                         PAGECACHE_TAG_WRITEBACK,
1156                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
1157                 unsigned i;
1158
1159                 for (i = 0; i < nr_pages; i++) {
1160                         struct page *page = pvec.pages[i];
1161
1162                         /* until radix tree lookup accepts end_index */
1163                         if (page->index > end)
1164                                 continue;
1165
1166                         if (ino && ino_of_node(page) == ino) {
1167                                 wait_on_page_writeback(page);
1168                                 if (TestClearPageError(page))
1169                                         ret = -EIO;
1170                         }
1171                 }
1172                 pagevec_release(&pvec);
1173                 cond_resched();
1174         }
1175
1176         if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
1177                 ret2 = -ENOSPC;
1178         if (test_and_clear_bit(AS_EIO, &mapping->flags))
1179                 ret2 = -EIO;
1180         if (!ret)
1181                 ret = ret2;
1182         return ret;
1183 }
1184
1185 static int f2fs_write_node_page(struct page *page,
1186                                 struct writeback_control *wbc)
1187 {
1188         struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1189         nid_t nid;
1190         block_t new_addr;
1191         struct node_info ni;
1192
1193         if (sbi->por_doing)
1194                 goto redirty_out;
1195
1196         wait_on_page_writeback(page);
1197
1198         /* get old block addr of this node page */
1199         nid = nid_of_node(page);
1200         f2fs_bug_on(page->index != nid);
1201
1202         get_node_info(sbi, nid, &ni);
1203
1204         /* This page is already truncated */
1205         if (ni.blk_addr == NULL_ADDR) {
1206                 dec_page_count(sbi, F2FS_DIRTY_NODES);
1207                 unlock_page(page);
1208                 return 0;
1209         }
1210
1211         if (wbc->for_reclaim)
1212                 goto redirty_out;
1213
1214         mutex_lock(&sbi->node_write);
1215         set_page_writeback(page);
1216         write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1217         set_node_addr(sbi, &ni, new_addr);
1218         dec_page_count(sbi, F2FS_DIRTY_NODES);
1219         mutex_unlock(&sbi->node_write);
1220         unlock_page(page);
1221         return 0;
1222
1223 redirty_out:
1224         dec_page_count(sbi, F2FS_DIRTY_NODES);
1225         wbc->pages_skipped++;
1226         set_page_dirty(page);
1227         return AOP_WRITEPAGE_ACTIVATE;
1228 }
1229
1230 /*
1231  * It is very important to gather dirty pages and write at once, so that we can
1232  * submit a big bio without interfering other data writes.
1233  * Be default, 512 pages (2MB) * 3 node types, is more reasonable.
1234  */
1235 #define COLLECT_DIRTY_NODES     1536
1236 static int f2fs_write_node_pages(struct address_space *mapping,
1237                             struct writeback_control *wbc)
1238 {
1239         struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1240         long nr_to_write = wbc->nr_to_write;
1241
1242         /* balancing f2fs's metadata in background */
1243         f2fs_balance_fs_bg(sbi);
1244
1245         /* collect a number of dirty node pages and write together */
1246         if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1247                 return 0;
1248
1249         /* if mounting is failed, skip writing node pages */
1250         wbc->nr_to_write = 3 * max_hw_blocks(sbi);
1251         sync_node_pages(sbi, 0, wbc);
1252         wbc->nr_to_write = nr_to_write - (3 * max_hw_blocks(sbi) -
1253                                                 wbc->nr_to_write);
1254         return 0;
1255 }
1256
1257 static int f2fs_set_node_page_dirty(struct page *page)
1258 {
1259         struct address_space *mapping = page->mapping;
1260         struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1261
1262         trace_f2fs_set_page_dirty(page, NODE);
1263
1264         SetPageUptodate(page);
1265         if (!PageDirty(page)) {
1266                 __set_page_dirty_nobuffers(page);
1267                 inc_page_count(sbi, F2FS_DIRTY_NODES);
1268                 SetPagePrivate(page);
1269                 return 1;
1270         }
1271         return 0;
1272 }
1273
1274 static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
1275                                       unsigned int length)
1276 {
1277         struct inode *inode = page->mapping->host;
1278         struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1279         if (PageDirty(page))
1280                 dec_page_count(sbi, F2FS_DIRTY_NODES);
1281         ClearPagePrivate(page);
1282 }
1283
1284 static int f2fs_release_node_page(struct page *page, gfp_t wait)
1285 {
1286         ClearPagePrivate(page);
1287         return 1;
1288 }
1289
1290 /*
1291  * Structure of the f2fs node operations
1292  */
1293 const struct address_space_operations f2fs_node_aops = {
1294         .writepage      = f2fs_write_node_page,
1295         .writepages     = f2fs_write_node_pages,
1296         .set_page_dirty = f2fs_set_node_page_dirty,
1297         .invalidatepage = f2fs_invalidate_node_page,
1298         .releasepage    = f2fs_release_node_page,
1299 };
1300
1301 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1302 {
1303         struct list_head *this;
1304         struct free_nid *i;
1305         list_for_each(this, head) {
1306                 i = list_entry(this, struct free_nid, list);
1307                 if (i->nid == n)
1308                         return i;
1309         }
1310         return NULL;
1311 }
1312
1313 static void __del_from_free_nid_list(struct free_nid *i)
1314 {
1315         list_del(&i->list);
1316         kmem_cache_free(free_nid_slab, i);
1317 }
1318
1319 static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
1320 {
1321         struct free_nid *i;
1322         struct nat_entry *ne;
1323         bool allocated = false;
1324
1325         if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1326                 return -1;
1327
1328         /* 0 nid should not be used */
1329         if (nid == 0)
1330                 return 0;
1331
1332         if (build) {
1333                 /* do not add allocated nids */
1334                 read_lock(&nm_i->nat_tree_lock);
1335                 ne = __lookup_nat_cache(nm_i, nid);
1336                 if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1337                         allocated = true;
1338                 read_unlock(&nm_i->nat_tree_lock);
1339                 if (allocated)
1340                         return 0;
1341         }
1342
1343         i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1344         i->nid = nid;
1345         i->state = NID_NEW;
1346
1347         spin_lock(&nm_i->free_nid_list_lock);
1348         if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1349                 spin_unlock(&nm_i->free_nid_list_lock);
1350                 kmem_cache_free(free_nid_slab, i);
1351                 return 0;
1352         }
1353         list_add_tail(&i->list, &nm_i->free_nid_list);
1354         nm_i->fcnt++;
1355         spin_unlock(&nm_i->free_nid_list_lock);
1356         return 1;
1357 }
1358
1359 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1360 {
1361         struct free_nid *i;
1362         spin_lock(&nm_i->free_nid_list_lock);
1363         i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1364         if (i && i->state == NID_NEW) {
1365                 __del_from_free_nid_list(i);
1366                 nm_i->fcnt--;
1367         }
1368         spin_unlock(&nm_i->free_nid_list_lock);
1369 }
1370
1371 static void scan_nat_page(struct f2fs_nm_info *nm_i,
1372                         struct page *nat_page, nid_t start_nid)
1373 {
1374         struct f2fs_nat_block *nat_blk = page_address(nat_page);
1375         block_t blk_addr;
1376         int i;
1377
1378         i = start_nid % NAT_ENTRY_PER_BLOCK;
1379
1380         for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1381
1382                 if (start_nid >= nm_i->max_nid)
1383                         break;
1384
1385                 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1386                 f2fs_bug_on(blk_addr == NEW_ADDR);
1387                 if (blk_addr == NULL_ADDR) {
1388                         if (add_free_nid(nm_i, start_nid, true) < 0)
1389                                 break;
1390                 }
1391         }
1392 }
1393
1394 static void build_free_nids(struct f2fs_sb_info *sbi)
1395 {
1396         struct f2fs_nm_info *nm_i = NM_I(sbi);
1397         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1398         struct f2fs_summary_block *sum = curseg->sum_blk;
1399         int i = 0;
1400         nid_t nid = nm_i->next_scan_nid;
1401
1402         /* Enough entries */
1403         if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1404                 return;
1405
1406         /* readahead nat pages to be scanned */
1407         ra_nat_pages(sbi, nid);
1408
1409         while (1) {
1410                 struct page *page = get_current_nat_page(sbi, nid);
1411
1412                 scan_nat_page(nm_i, page, nid);
1413                 f2fs_put_page(page, 1);
1414
1415                 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1416                 if (nid >= nm_i->max_nid)
1417                         nid = 0;
1418
1419                 if (i++ == FREE_NID_PAGES)
1420                         break;
1421         }
1422
1423         /* go to the next free nat pages to find free nids abundantly */
1424         nm_i->next_scan_nid = nid;
1425
1426         /* find free nids from current sum_pages */
1427         mutex_lock(&curseg->curseg_mutex);
1428         for (i = 0; i < nats_in_cursum(sum); i++) {
1429                 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1430                 nid = le32_to_cpu(nid_in_journal(sum, i));
1431                 if (addr == NULL_ADDR)
1432                         add_free_nid(nm_i, nid, true);
1433                 else
1434                         remove_free_nid(nm_i, nid);
1435         }
1436         mutex_unlock(&curseg->curseg_mutex);
1437 }
1438
1439 /*
1440  * If this function returns success, caller can obtain a new nid
1441  * from second parameter of this function.
1442  * The returned nid could be used ino as well as nid when inode is created.
1443  */
1444 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1445 {
1446         struct f2fs_nm_info *nm_i = NM_I(sbi);
1447         struct free_nid *i = NULL;
1448         struct list_head *this;
1449 retry:
1450         if (sbi->total_valid_node_count + 1 >= nm_i->max_nid)
1451                 return false;
1452
1453         spin_lock(&nm_i->free_nid_list_lock);
1454
1455         /* We should not use stale free nids created by build_free_nids */
1456         if (nm_i->fcnt && !sbi->on_build_free_nids) {
1457                 f2fs_bug_on(list_empty(&nm_i->free_nid_list));
1458                 list_for_each(this, &nm_i->free_nid_list) {
1459                         i = list_entry(this, struct free_nid, list);
1460                         if (i->state == NID_NEW)
1461                                 break;
1462                 }
1463
1464                 f2fs_bug_on(i->state != NID_NEW);
1465                 *nid = i->nid;
1466                 i->state = NID_ALLOC;
1467                 nm_i->fcnt--;
1468                 spin_unlock(&nm_i->free_nid_list_lock);
1469                 return true;
1470         }
1471         spin_unlock(&nm_i->free_nid_list_lock);
1472
1473         /* Let's scan nat pages and its caches to get free nids */
1474         mutex_lock(&nm_i->build_lock);
1475         sbi->on_build_free_nids = true;
1476         build_free_nids(sbi);
1477         sbi->on_build_free_nids = false;
1478         mutex_unlock(&nm_i->build_lock);
1479         goto retry;
1480 }
1481
1482 /*
1483  * alloc_nid() should be called prior to this function.
1484  */
1485 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1486 {
1487         struct f2fs_nm_info *nm_i = NM_I(sbi);
1488         struct free_nid *i;
1489
1490         spin_lock(&nm_i->free_nid_list_lock);
1491         i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1492         f2fs_bug_on(!i || i->state != NID_ALLOC);
1493         __del_from_free_nid_list(i);
1494         spin_unlock(&nm_i->free_nid_list_lock);
1495 }
1496
1497 /*
1498  * alloc_nid() should be called prior to this function.
1499  */
1500 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1501 {
1502         struct f2fs_nm_info *nm_i = NM_I(sbi);
1503         struct free_nid *i;
1504
1505         if (!nid)
1506                 return;
1507
1508         spin_lock(&nm_i->free_nid_list_lock);
1509         i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1510         f2fs_bug_on(!i || i->state != NID_ALLOC);
1511         if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
1512                 __del_from_free_nid_list(i);
1513         } else {
1514                 i->state = NID_NEW;
1515                 nm_i->fcnt++;
1516         }
1517         spin_unlock(&nm_i->free_nid_list_lock);
1518 }
1519
1520 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1521                 struct f2fs_summary *sum, struct node_info *ni,
1522                 block_t new_blkaddr)
1523 {
1524         rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1525         set_node_addr(sbi, ni, new_blkaddr);
1526         clear_node_page_dirty(page);
1527 }
1528
1529 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1530 {
1531         struct address_space *mapping = sbi->node_inode->i_mapping;
1532         struct f2fs_node *src, *dst;
1533         nid_t ino = ino_of_node(page);
1534         struct node_info old_ni, new_ni;
1535         struct page *ipage;
1536
1537         ipage = grab_cache_page(mapping, ino);
1538         if (!ipage)
1539                 return -ENOMEM;
1540
1541         /* Should not use this inode  from free nid list */
1542         remove_free_nid(NM_I(sbi), ino);
1543
1544         get_node_info(sbi, ino, &old_ni);
1545         SetPageUptodate(ipage);
1546         fill_node_footer(ipage, ino, ino, 0, true);
1547
1548         src = F2FS_NODE(page);
1549         dst = F2FS_NODE(ipage);
1550
1551         memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1552         dst->i.i_size = 0;
1553         dst->i.i_blocks = cpu_to_le64(1);
1554         dst->i.i_links = cpu_to_le32(1);
1555         dst->i.i_xattr_nid = 0;
1556
1557         new_ni = old_ni;
1558         new_ni.ino = ino;
1559
1560         if (!inc_valid_node_count(sbi, NULL))
1561                 WARN_ON(1);
1562         set_node_addr(sbi, &new_ni, NEW_ADDR);
1563         inc_valid_inode_count(sbi);
1564         f2fs_put_page(ipage, 1);
1565         return 0;
1566 }
1567
1568 /*
1569  * ra_sum_pages() merge contiguous pages into one bio and submit.
1570  * these pre-readed pages are linked in pages list.
1571  */
1572 static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
1573                                 int start, int nrpages)
1574 {
1575         struct page *page;
1576         int page_idx = start;
1577
1578         for (; page_idx < start + nrpages; page_idx++) {
1579                 /* alloc temporal page for read node summary info*/
1580                 page = alloc_page(GFP_F2FS_ZERO);
1581                 if (!page) {
1582                         struct page *tmp;
1583                         list_for_each_entry_safe(page, tmp, pages, lru) {
1584                                 list_del(&page->lru);
1585                                 unlock_page(page);
1586                                 __free_pages(page, 0);
1587                         }
1588                         return -ENOMEM;
1589                 }
1590
1591                 lock_page(page);
1592                 page->index = page_idx;
1593                 list_add_tail(&page->lru, pages);
1594         }
1595
1596         list_for_each_entry(page, pages, lru)
1597                 f2fs_submit_page_mbio(sbi, page, page->index, META, READ);
1598
1599         f2fs_submit_merged_bio(sbi, META, true, READ);
1600         return 0;
1601 }
1602
1603 int restore_node_summary(struct f2fs_sb_info *sbi,
1604                         unsigned int segno, struct f2fs_summary_block *sum)
1605 {
1606         struct f2fs_node *rn;
1607         struct f2fs_summary *sum_entry;
1608         struct page *page, *tmp;
1609         block_t addr;
1610         int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
1611         int i, last_offset, nrpages, err = 0;
1612         LIST_HEAD(page_list);
1613
1614         /* scan the node segment */
1615         last_offset = sbi->blocks_per_seg;
1616         addr = START_BLOCK(sbi, segno);
1617         sum_entry = &sum->entries[0];
1618
1619         for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
1620                 nrpages = min(last_offset - i, bio_blocks);
1621
1622                 /* read ahead node pages */
1623                 err = ra_sum_pages(sbi, &page_list, addr, nrpages);
1624                 if (err)
1625                         return err;
1626
1627                 list_for_each_entry_safe(page, tmp, &page_list, lru) {
1628
1629                         lock_page(page);
1630                         if(PageUptodate(page)) {
1631                                 rn = F2FS_NODE(page);
1632                                 sum_entry->nid = rn->footer.nid;
1633                                 sum_entry->version = 0;
1634                                 sum_entry->ofs_in_node = 0;
1635                                 sum_entry++;
1636                         } else {
1637                                 err = -EIO;
1638                         }
1639
1640                         list_del(&page->lru);
1641                         unlock_page(page);
1642                         __free_pages(page, 0);
1643                 }
1644         }
1645         return err;
1646 }
1647
1648 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1649 {
1650         struct f2fs_nm_info *nm_i = NM_I(sbi);
1651         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1652         struct f2fs_summary_block *sum = curseg->sum_blk;
1653         int i;
1654
1655         mutex_lock(&curseg->curseg_mutex);
1656
1657         if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1658                 mutex_unlock(&curseg->curseg_mutex);
1659                 return false;
1660         }
1661
1662         for (i = 0; i < nats_in_cursum(sum); i++) {
1663                 struct nat_entry *ne;
1664                 struct f2fs_nat_entry raw_ne;
1665                 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1666
1667                 raw_ne = nat_in_journal(sum, i);
1668 retry:
1669                 write_lock(&nm_i->nat_tree_lock);
1670                 ne = __lookup_nat_cache(nm_i, nid);
1671                 if (ne) {
1672                         __set_nat_cache_dirty(nm_i, ne);
1673                         write_unlock(&nm_i->nat_tree_lock);
1674                         continue;
1675                 }
1676                 ne = grab_nat_entry(nm_i, nid);
1677                 if (!ne) {
1678                         write_unlock(&nm_i->nat_tree_lock);
1679                         goto retry;
1680                 }
1681                 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1682                 nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1683                 nat_set_version(ne, raw_ne.version);
1684                 __set_nat_cache_dirty(nm_i, ne);
1685                 write_unlock(&nm_i->nat_tree_lock);
1686         }
1687         update_nats_in_cursum(sum, -i);
1688         mutex_unlock(&curseg->curseg_mutex);
1689         return true;
1690 }
1691
1692 /*
1693  * This function is called during the checkpointing process.
1694  */
1695 void flush_nat_entries(struct f2fs_sb_info *sbi)
1696 {
1697         struct f2fs_nm_info *nm_i = NM_I(sbi);
1698         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1699         struct f2fs_summary_block *sum = curseg->sum_blk;
1700         struct list_head *cur, *n;
1701         struct page *page = NULL;
1702         struct f2fs_nat_block *nat_blk = NULL;
1703         nid_t start_nid = 0, end_nid = 0;
1704         bool flushed;
1705
1706         flushed = flush_nats_in_journal(sbi);
1707
1708         if (!flushed)
1709                 mutex_lock(&curseg->curseg_mutex);
1710
1711         /* 1) flush dirty nat caches */
1712         list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1713                 struct nat_entry *ne;
1714                 nid_t nid;
1715                 struct f2fs_nat_entry raw_ne;
1716                 int offset = -1;
1717                 block_t new_blkaddr;
1718
1719                 ne = list_entry(cur, struct nat_entry, list);
1720                 nid = nat_get_nid(ne);
1721
1722                 if (nat_get_blkaddr(ne) == NEW_ADDR)
1723                         continue;
1724                 if (flushed)
1725                         goto to_nat_page;
1726
1727                 /* if there is room for nat enries in curseg->sumpage */
1728                 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1729                 if (offset >= 0) {
1730                         raw_ne = nat_in_journal(sum, offset);
1731                         goto flush_now;
1732                 }
1733 to_nat_page:
1734                 if (!page || (start_nid > nid || nid > end_nid)) {
1735                         if (page) {
1736                                 f2fs_put_page(page, 1);
1737                                 page = NULL;
1738                         }
1739                         start_nid = START_NID(nid);
1740                         end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1741
1742                         /*
1743                          * get nat block with dirty flag, increased reference
1744                          * count, mapped and lock
1745                          */
1746                         page = get_next_nat_page(sbi, start_nid);
1747                         nat_blk = page_address(page);
1748                 }
1749
1750                 f2fs_bug_on(!nat_blk);
1751                 raw_ne = nat_blk->entries[nid - start_nid];
1752 flush_now:
1753                 new_blkaddr = nat_get_blkaddr(ne);
1754
1755                 raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1756                 raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1757                 raw_ne.version = nat_get_version(ne);
1758
1759                 if (offset < 0) {
1760                         nat_blk->entries[nid - start_nid] = raw_ne;
1761                 } else {
1762                         nat_in_journal(sum, offset) = raw_ne;
1763                         nid_in_journal(sum, offset) = cpu_to_le32(nid);
1764                 }
1765
1766                 if (nat_get_blkaddr(ne) == NULL_ADDR &&
1767                                 add_free_nid(NM_I(sbi), nid, false) <= 0) {
1768                         write_lock(&nm_i->nat_tree_lock);
1769                         __del_from_nat_cache(nm_i, ne);
1770                         write_unlock(&nm_i->nat_tree_lock);
1771                 } else {
1772                         write_lock(&nm_i->nat_tree_lock);
1773                         __clear_nat_cache_dirty(nm_i, ne);
1774                         ne->checkpointed = true;
1775                         write_unlock(&nm_i->nat_tree_lock);
1776                 }
1777         }
1778         if (!flushed)
1779                 mutex_unlock(&curseg->curseg_mutex);
1780         f2fs_put_page(page, 1);
1781
1782         /* 2) shrink nat caches if necessary */
1783         try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1784 }
1785
1786 static int init_node_manager(struct f2fs_sb_info *sbi)
1787 {
1788         struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1789         struct f2fs_nm_info *nm_i = NM_I(sbi);
1790         unsigned char *version_bitmap;
1791         unsigned int nat_segs, nat_blocks;
1792
1793         nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1794
1795         /* segment_count_nat includes pair segment so divide to 2. */
1796         nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1797         nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1798         nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1799         nm_i->fcnt = 0;
1800         nm_i->nat_cnt = 0;
1801
1802         INIT_LIST_HEAD(&nm_i->free_nid_list);
1803         INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1804         INIT_LIST_HEAD(&nm_i->nat_entries);
1805         INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1806
1807         mutex_init(&nm_i->build_lock);
1808         spin_lock_init(&nm_i->free_nid_list_lock);
1809         rwlock_init(&nm_i->nat_tree_lock);
1810
1811         nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1812         nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1813         version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1814         if (!version_bitmap)
1815                 return -EFAULT;
1816
1817         nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1818                                         GFP_KERNEL);
1819         if (!nm_i->nat_bitmap)
1820                 return -ENOMEM;
1821         return 0;
1822 }
1823
1824 int build_node_manager(struct f2fs_sb_info *sbi)
1825 {
1826         int err;
1827
1828         sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1829         if (!sbi->nm_info)
1830                 return -ENOMEM;
1831
1832         err = init_node_manager(sbi);
1833         if (err)
1834                 return err;
1835
1836         build_free_nids(sbi);
1837         return 0;
1838 }
1839
1840 void destroy_node_manager(struct f2fs_sb_info *sbi)
1841 {
1842         struct f2fs_nm_info *nm_i = NM_I(sbi);
1843         struct free_nid *i, *next_i;
1844         struct nat_entry *natvec[NATVEC_SIZE];
1845         nid_t nid = 0;
1846         unsigned int found;
1847
1848         if (!nm_i)
1849                 return;
1850
1851         /* destroy free nid list */
1852         spin_lock(&nm_i->free_nid_list_lock);
1853         list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1854                 f2fs_bug_on(i->state == NID_ALLOC);
1855                 __del_from_free_nid_list(i);
1856                 nm_i->fcnt--;
1857         }
1858         f2fs_bug_on(nm_i->fcnt);
1859         spin_unlock(&nm_i->free_nid_list_lock);
1860
1861         /* destroy nat cache */
1862         write_lock(&nm_i->nat_tree_lock);
1863         while ((found = __gang_lookup_nat_cache(nm_i,
1864                                         nid, NATVEC_SIZE, natvec))) {
1865                 unsigned idx;
1866                 for (idx = 0; idx < found; idx++) {
1867                         struct nat_entry *e = natvec[idx];
1868                         nid = nat_get_nid(e) + 1;
1869                         __del_from_nat_cache(nm_i, e);
1870                 }
1871         }
1872         f2fs_bug_on(nm_i->nat_cnt);
1873         write_unlock(&nm_i->nat_tree_lock);
1874
1875         kfree(nm_i->nat_bitmap);
1876         sbi->nm_info = NULL;
1877         kfree(nm_i);
1878 }
1879
1880 int __init create_node_manager_caches(void)
1881 {
1882         nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1883                         sizeof(struct nat_entry), NULL);
1884         if (!nat_entry_slab)
1885                 return -ENOMEM;
1886
1887         free_nid_slab = f2fs_kmem_cache_create("free_nid",
1888                         sizeof(struct free_nid), NULL);
1889         if (!free_nid_slab) {
1890                 kmem_cache_destroy(nat_entry_slab);
1891                 return -ENOMEM;
1892         }
1893         return 0;
1894 }
1895
1896 void destroy_node_manager_caches(void)
1897 {
1898         kmem_cache_destroy(free_nid_slab);
1899         kmem_cache_destroy(nat_entry_slab);
1900 }