2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
7 * Created by David Woodhouse <dwmw2@infradead.org>
9 * For licensing information, see the file 'LICENCE' in this directory.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mm.h> /* kvfree() */
23 static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
24 struct jffs2_inode_cache *, struct jffs2_full_dirent **);
26 static inline struct jffs2_inode_cache *
27 first_inode_chain(int *i, struct jffs2_sb_info *c)
29 for (; *i < c->inocache_hashsize; (*i)++) {
30 if (c->inocache_list[*i])
31 return c->inocache_list[*i];
36 static inline struct jffs2_inode_cache *
37 next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
39 /* More in this chain? */
43 return first_inode_chain(i, c);
46 #define for_each_inode(i, c, ic) \
47 for (i = 0, ic = first_inode_chain(&i, (c)); \
49 ic = next_inode(&i, ic, (c)))
52 static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
53 struct jffs2_inode_cache *ic,
56 struct jffs2_full_dirent *fd;
58 dbg_fsbuild("building directory inode #%u\n", ic->ino);
60 /* For each child, increase nlink */
61 for(fd = ic->scan_dents; fd; fd = fd->next) {
62 struct jffs2_inode_cache *child_ic;
66 /* we can get high latency here with huge directories */
68 child_ic = jffs2_get_ino_cache(c, fd->ino);
70 dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
71 fd->name, fd->ino, ic->ino);
72 jffs2_mark_node_obsolete(c, fd->raw);
73 /* Clear the ic/raw union so it doesn't cause problems later. */
78 /* From this point, fd->raw is no longer used so we can set fd->ic */
80 child_ic->pino_nlink++;
81 /* If we appear (at this stage) to have hard-linked directories,
82 * set a flag to trigger a scan later */
83 if (fd->type == DT_DIR) {
84 child_ic->flags |= INO_FLAGS_IS_DIR;
85 if (child_ic->pino_nlink > 1)
89 dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
90 /* Can't free scan_dents so far. We might need them in pass 2 */
95 - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
96 - Scan directory tree from top down, setting nlink in inocaches
97 - Scan inocaches for inodes with nlink==0
99 static int jffs2_build_filesystem(struct jffs2_sb_info *c)
101 int ret, i, dir_hardlinks = 0;
102 struct jffs2_inode_cache *ic;
103 struct jffs2_full_dirent *fd;
104 struct jffs2_full_dirent *dead_fds = NULL;
106 dbg_fsbuild("build FS data structures\n");
108 /* First, scan the medium and build all the inode caches with
109 lists of physical nodes */
111 c->flags |= JFFS2_SB_FLAG_SCANNING;
112 ret = jffs2_scan_medium(c);
113 c->flags &= ~JFFS2_SB_FLAG_SCANNING;
117 dbg_fsbuild("scanned flash completely\n");
118 jffs2_dbg_dump_block_lists_nolock(c);
120 dbg_fsbuild("pass 1 starting\n");
121 c->flags |= JFFS2_SB_FLAG_BUILDING;
122 /* Now scan the directory tree, increasing nlink according to every dirent found. */
123 for_each_inode(i, c, ic) {
124 if (ic->scan_dents) {
125 jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
130 dbg_fsbuild("pass 1 complete\n");
132 /* Next, scan for inodes with nlink == 0 and remove them. If
133 they were directories, then decrement the nlink of their
134 children too, and repeat the scan. As that's going to be
135 a fairly uncommon occurrence, it's not so evil to do it this
136 way. Recursion bad. */
137 dbg_fsbuild("pass 2 starting\n");
139 for_each_inode(i, c, ic) {
143 jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
147 dbg_fsbuild("pass 2a starting\n");
153 ic = jffs2_get_ino_cache(c, fd->ino);
156 jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
157 jffs2_free_full_dirent(fd);
160 dbg_fsbuild("pass 2a complete\n");
163 /* If we detected directory hardlinks earlier, *hopefully*
164 * they are gone now because some of the links were from
165 * dead directories which still had some old dirents lying
166 * around and not yet garbage-collected, but which have
167 * been discarded above. So clear the pino_nlink field
168 * in each directory, so that the final scan below can
169 * print appropriate warnings. */
170 for_each_inode(i, c, ic) {
171 if (ic->flags & INO_FLAGS_IS_DIR)
175 dbg_fsbuild("freeing temporary data structures\n");
177 /* Finally, we can scan again and free the dirent structs */
178 for_each_inode(i, c, ic) {
179 while(ic->scan_dents) {
181 ic->scan_dents = fd->next;
182 /* We do use the pino_nlink field to count nlink of
183 * directories during fs build, so set it to the
184 * parent ino# now. Now that there's hopefully only
186 if (fd->type == DT_DIR) {
188 /* We'll have complained about it and marked the coresponding
189 raw node obsolete already. Just skip it. */
193 /* We *have* to have set this in jffs2_build_inode_pass1() */
194 BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
196 /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
197 * is set. Otherwise, we know this should never trigger anyway, so
198 * we don't do the check. And ic->pino_nlink still contains the nlink
199 * value (which is 1). */
200 if (dir_hardlinks && fd->ic->pino_nlink) {
201 JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
202 fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
203 /* Should we unlink it from its previous parent? */
206 /* For directories, ic->pino_nlink holds that parent inode # */
207 fd->ic->pino_nlink = ic->ino;
209 jffs2_free_full_dirent(fd);
211 ic->scan_dents = NULL;
214 ret = jffs2_build_xattr_subsystem(c);
218 c->flags &= ~JFFS2_SB_FLAG_BUILDING;
220 dbg_fsbuild("FS build complete\n");
222 /* Rotate the lists by some number to ensure wear levelling */
223 jffs2_rotate_lists(c);
229 for_each_inode(i, c, ic) {
230 while(ic->scan_dents) {
232 ic->scan_dents = fd->next;
233 jffs2_free_full_dirent(fd);
236 jffs2_clear_xattr_subsystem(c);
242 static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
243 struct jffs2_inode_cache *ic,
244 struct jffs2_full_dirent **dead_fds)
246 struct jffs2_raw_node_ref *raw;
247 struct jffs2_full_dirent *fd;
249 dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino);
252 while (raw != (void *)ic) {
253 struct jffs2_raw_node_ref *next = raw->next_in_ino;
254 dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw));
255 jffs2_mark_node_obsolete(c, raw);
259 if (ic->scan_dents) {
261 dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino);
263 while(ic->scan_dents) {
264 struct jffs2_inode_cache *child_ic;
267 ic->scan_dents = fd->next;
270 /* It's a deletion dirent. Ignore it */
271 dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name);
272 jffs2_free_full_dirent(fd);
278 dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino);
280 child_ic = jffs2_get_ino_cache(c, fd->ino);
282 dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n",
284 jffs2_free_full_dirent(fd);
288 /* Reduce nlink of the child. If it's now zero, stick it on the
289 dead_fds list to be cleaned up later. Else just free the fd */
290 child_ic->pino_nlink--;
292 if (!child_ic->pino_nlink) {
293 dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
295 fd->next = *dead_fds;
298 dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
299 fd->ino, fd->name, child_ic->pino_nlink);
300 jffs2_free_full_dirent(fd);
306 We don't delete the inocache from the hash list and free it yet.
307 The erase code will do that, when all the nodes are completely gone.
311 static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
315 /* Deletion should almost _always_ be allowed. We're fairly
316 buggered once we stop allowing people to delete stuff
317 because there's not enough free space... */
318 c->resv_blocks_deletion = 2;
320 /* Be conservative about how much space we need before we allow writes.
321 On top of that which is required for deletia, require an extra 2%
322 of the medium to be available, for overhead caused by nodes being
323 split across blocks, etc. */
325 size = c->flash_size / 50; /* 2% of flash size */
326 size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
327 size += c->sector_size - 1; /* ... and round up */
329 c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
331 /* When do we let the GC thread run in the background */
333 c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
335 /* When do we allow garbage collection to merge nodes to make
336 long-term progress at the expense of short-term space exhaustion? */
337 c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
339 /* When do we allow garbage collection to eat from bad blocks rather
340 than actually making progress? */
341 c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
343 /* What number of 'very dirty' eraseblocks do we allow before we
344 trigger the GC thread even if we don't _need_ the space. When we
345 can't mark nodes obsolete on the medium, the old dirty nodes cause
346 performance problems because we have to inspect and discard them. */
347 c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger;
348 if (jffs2_can_mark_obsolete(c))
349 c->vdirty_blocks_gctrigger *= 10;
351 /* If there's less than this amount of dirty space, don't bother
352 trying to GC to make more space. It'll be a fruitless task */
353 c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
355 dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
356 c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
357 dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
358 c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
359 dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
360 c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024);
361 dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n",
362 c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024);
363 dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n",
364 c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024);
365 dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n",
366 c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024);
367 dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n",
368 c->nospc_dirty_size);
369 dbg_fsbuild("Very dirty blocks before GC triggered: %d\n",
370 c->vdirty_blocks_gctrigger);
373 int jffs2_do_mount_fs(struct jffs2_sb_info *c)
379 c->free_size = c->flash_size;
380 c->nr_blocks = c->flash_size / c->sector_size;
381 size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
383 if (jffs2_blocks_use_vmalloc(c))
384 c->blocks = vzalloc(size);
387 c->blocks = kzalloc(size, GFP_KERNEL);
391 for (i=0; i<c->nr_blocks; i++) {
392 INIT_LIST_HEAD(&c->blocks[i].list);
393 c->blocks[i].offset = i * c->sector_size;
394 c->blocks[i].free_size = c->sector_size;
397 INIT_LIST_HEAD(&c->clean_list);
398 INIT_LIST_HEAD(&c->very_dirty_list);
399 INIT_LIST_HEAD(&c->dirty_list);
400 INIT_LIST_HEAD(&c->erasable_list);
401 INIT_LIST_HEAD(&c->erasing_list);
402 INIT_LIST_HEAD(&c->erase_checking_list);
403 INIT_LIST_HEAD(&c->erase_pending_list);
404 INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
405 INIT_LIST_HEAD(&c->erase_complete_list);
406 INIT_LIST_HEAD(&c->free_list);
407 INIT_LIST_HEAD(&c->bad_list);
408 INIT_LIST_HEAD(&c->bad_used_list);
412 ret = jffs2_sum_init(c);
416 if (jffs2_build_filesystem(c)) {
417 dbg_fsbuild("build_fs failed\n");
418 jffs2_free_ino_caches(c);
419 jffs2_free_raw_node_refs(c);
424 jffs2_calc_trigger_levels(c);