2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright © 2001-2007 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@infradead.org>
8 * For licensing information, see the file 'LICENCE' in this directory.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/kernel.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/compiler.h>
17 #include <linux/sched.h> /* For cond_resched() */
22 * Check whether the user is allowed to write.
24 static int jffs2_rp_can_write(struct jffs2_sb_info *c)
27 struct jffs2_mount_opts *opts = &c->mount_opts;
29 avail = c->dirty_size + c->free_size + c->unchecked_size +
30 c->erasing_size - c->resv_blocks_write * c->sector_size
31 - c->nospc_dirty_size;
33 if (avail < 2 * opts->rp_size)
34 jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
35 "erasing_size %u, unchecked_size %u, "
36 "nr_erasing_blocks %u, avail %u, resrv %u\n",
37 opts->rp_size, c->dirty_size, c->free_size,
38 c->erasing_size, c->unchecked_size,
39 c->nr_erasing_blocks, avail, c->nospc_dirty_size);
41 if (avail > opts->rp_size)
44 /* Always allow root */
45 if (capable(CAP_SYS_RESOURCE))
48 jffs2_dbg(1, "forbid writing\n");
53 * jffs2_reserve_space - request physical space to write nodes to flash
55 * @minsize: Minimum acceptable size of allocation
56 * @len: Returned value of allocation length
57 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
59 * Requests a block of physical space on the flash. Returns zero for success
60 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
61 * error if appropriate. Doesn't return len since that's
63 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
64 * allocation semaphore, to prevent more than one allocation from being
65 * active at any time. The semaphore is later released by jffs2_commit_allocation()
67 * jffs2_reserve_space() may trigger garbage collection in order to make room
68 * for the requested allocation.
71 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
72 uint32_t *len, uint32_t sumsize);
74 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
75 uint32_t *len, int prio, uint32_t sumsize)
78 int blocksneeded = c->resv_blocks_write;
80 minsize = PAD(minsize);
82 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
83 mutex_lock(&c->alloc_sem);
85 jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
87 spin_lock(&c->erase_completion_lock);
90 * Check if the free space is greater then size of the reserved pool.
91 * If not, only allow root to proceed with writing.
93 if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
98 /* this needs a little more thought (true <tglx> :)) */
99 while(ret == -EAGAIN) {
100 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
101 uint32_t dirty, avail;
103 /* calculate real dirty size
104 * dirty_size contains blocks on erase_pending_list
105 * those blocks are counted in c->nr_erasing_blocks.
106 * If one block is actually erased, it is not longer counted as dirty_space
107 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
108 * with c->nr_erasing_blocks * c->sector_size again.
109 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
110 * This helps us to force gc and pick eventually a clean block to spread the load.
111 * We add unchecked_size here, as we hopefully will find some space to use.
112 * This will affect the sum only once, as gc first finishes checking
115 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
116 if (dirty < c->nospc_dirty_size) {
117 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
118 jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
122 jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
123 dirty, c->unchecked_size,
126 spin_unlock(&c->erase_completion_lock);
127 mutex_unlock(&c->alloc_sem);
131 /* Calc possibly available space. Possibly available means that we
132 * don't know, if unchecked size contains obsoleted nodes, which could give us some
133 * more usable space. This will affect the sum only once, as gc first finishes checking
135 + Return -ENOSPC, if the maximum possibly available space is less or equal than
136 * blocksneeded * sector_size.
137 * This blocks endless gc looping on a filesystem, which is nearly full, even if
138 * the check above passes.
140 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
141 if ( (avail / c->sector_size) <= blocksneeded) {
142 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
143 jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
148 jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
149 avail, blocksneeded * c->sector_size);
150 spin_unlock(&c->erase_completion_lock);
151 mutex_unlock(&c->alloc_sem);
155 mutex_unlock(&c->alloc_sem);
157 jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
158 c->nr_free_blocks, c->nr_erasing_blocks,
159 c->free_size, c->dirty_size, c->wasted_size,
160 c->used_size, c->erasing_size, c->bad_size,
161 c->free_size + c->dirty_size +
162 c->wasted_size + c->used_size +
163 c->erasing_size + c->bad_size,
165 spin_unlock(&c->erase_completion_lock);
167 ret = jffs2_garbage_collect_pass(c);
169 if (ret == -EAGAIN) {
170 spin_lock(&c->erase_completion_lock);
171 if (c->nr_erasing_blocks &&
172 list_empty(&c->erase_pending_list) &&
173 list_empty(&c->erase_complete_list)) {
174 DECLARE_WAITQUEUE(wait, current);
175 set_current_state(TASK_UNINTERRUPTIBLE);
176 add_wait_queue(&c->erase_wait, &wait);
177 jffs2_dbg(1, "%s waiting for erase to complete\n",
179 spin_unlock(&c->erase_completion_lock);
183 spin_unlock(&c->erase_completion_lock);
189 if (signal_pending(current))
192 mutex_lock(&c->alloc_sem);
193 spin_lock(&c->erase_completion_lock);
196 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
198 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
203 spin_unlock(&c->erase_completion_lock);
205 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
207 mutex_unlock(&c->alloc_sem);
211 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
212 uint32_t *len, uint32_t sumsize)
215 minsize = PAD(minsize);
217 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
220 spin_lock(&c->erase_completion_lock);
221 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
223 jffs2_dbg(1, "%s(): looping, ret is %d\n",
226 spin_unlock(&c->erase_completion_lock);
234 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
240 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
242 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
245 if (c->nextblock == NULL) {
246 jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
247 __func__, jeb->offset);
250 /* Check, if we have a dirty block now, or if it was dirty already */
251 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
252 c->dirty_size += jeb->wasted_size;
253 c->wasted_size -= jeb->wasted_size;
254 jeb->dirty_size += jeb->wasted_size;
255 jeb->wasted_size = 0;
256 if (VERYDIRTY(c, jeb->dirty_size)) {
257 jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
258 jeb->offset, jeb->free_size, jeb->dirty_size,
260 list_add_tail(&jeb->list, &c->very_dirty_list);
262 jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
263 jeb->offset, jeb->free_size, jeb->dirty_size,
265 list_add_tail(&jeb->list, &c->dirty_list);
268 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
269 jeb->offset, jeb->free_size, jeb->dirty_size,
271 list_add_tail(&jeb->list, &c->clean_list);
277 /* Select a new jeb for nextblock */
279 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
281 struct list_head *next;
283 /* Take the next block off the 'free' list */
285 if (list_empty(&c->free_list)) {
287 if (!c->nr_erasing_blocks &&
288 !list_empty(&c->erasable_list)) {
289 struct jffs2_eraseblock *ejeb;
291 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
292 list_move_tail(&ejeb->list, &c->erase_pending_list);
293 c->nr_erasing_blocks++;
294 jffs2_garbage_collect_trigger(c);
295 jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
296 __func__, ejeb->offset);
299 if (!c->nr_erasing_blocks &&
300 !list_empty(&c->erasable_pending_wbuf_list)) {
301 jffs2_dbg(1, "%s(): Flushing write buffer\n",
303 /* c->nextblock is NULL, no update to c->nextblock allowed */
304 spin_unlock(&c->erase_completion_lock);
305 jffs2_flush_wbuf_pad(c);
306 spin_lock(&c->erase_completion_lock);
307 /* Have another go. It'll be on the erasable_list now */
311 if (!c->nr_erasing_blocks) {
312 /* Ouch. We're in GC, or we wouldn't have got here.
313 And there's no space left. At all. */
314 pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
315 c->nr_erasing_blocks, c->nr_free_blocks,
316 list_empty(&c->erasable_list) ? "yes" : "no",
317 list_empty(&c->erasing_list) ? "yes" : "no",
318 list_empty(&c->erase_pending_list) ? "yes" : "no");
322 spin_unlock(&c->erase_completion_lock);
323 /* Don't wait for it; just erase one right now */
324 jffs2_erase_pending_blocks(c, 1);
325 spin_lock(&c->erase_completion_lock);
327 /* An erase may have failed, decreasing the
328 amount of free space available. So we must
329 restart from the beginning */
333 next = c->free_list.next;
335 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
338 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
340 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
341 /* adjust write buffer offset, else we get a non contiguous write bug */
342 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
343 c->wbuf_ofs = 0xffffffff;
346 jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
347 __func__, c->nextblock->offset);
352 /* Called with alloc sem _and_ erase_completion_lock */
353 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
354 uint32_t *len, uint32_t sumsize)
356 struct jffs2_eraseblock *jeb = c->nextblock;
357 uint32_t reserved_size; /* for summary information at the end of the jeb */
363 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
364 /* NOSUM_SIZE means not to generate summary */
367 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
368 dbg_summary("minsize=%d , jeb->free=%d ,"
369 "summary->size=%d , sumsize=%d\n",
370 minsize, jeb->free_size,
371 c->summary->sum_size, sumsize);
374 /* Is there enough space for writing out the current node, or we have to
375 write out summary information now, close this jeb and select new nextblock? */
376 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
377 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
379 /* Has summary been disabled for this jeb? */
380 if (jffs2_sum_is_disabled(c->summary)) {
381 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
385 /* Writing out the collected summary information */
386 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
387 ret = jffs2_sum_write_sumnode(c);
392 if (jffs2_sum_is_disabled(c->summary)) {
393 /* jffs2_write_sumnode() couldn't write out the summary information
394 diabling summary for this jeb and free the collected information
396 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
400 jffs2_close_nextblock(c, jeb);
402 /* keep always valid value in reserved_size */
403 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
406 if (jeb && minsize > jeb->free_size) {
409 /* Skip the end of this block and file it as having some dirty space */
410 /* If there's a pending write to it, flush now */
412 if (jffs2_wbuf_dirty(c)) {
413 spin_unlock(&c->erase_completion_lock);
414 jffs2_dbg(1, "%s(): Flushing write buffer\n",
416 jffs2_flush_wbuf_pad(c);
417 spin_lock(&c->erase_completion_lock);
422 spin_unlock(&c->erase_completion_lock);
424 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
426 /* Just lock it again and continue. Nothing much can change because
427 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
428 we hold c->erase_completion_lock in the majority of this function...
429 but that's a question for another (more caffeine-rich) day. */
430 spin_lock(&c->erase_completion_lock);
435 waste = jeb->free_size;
436 jffs2_link_node_ref(c, jeb,
437 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
439 /* FIXME: that made it count as dirty. Convert to wasted */
440 jeb->dirty_size -= waste;
441 c->dirty_size -= waste;
442 jeb->wasted_size += waste;
443 c->wasted_size += waste;
445 jffs2_close_nextblock(c, jeb);
452 ret = jffs2_find_nextblock(c);
458 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
459 pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
460 jeb->offset, jeb->free_size);
464 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
466 *len = jeb->free_size - reserved_size;
468 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
469 !jeb->first_node->next_in_ino) {
470 /* Only node in it beforehand was a CLEANMARKER node (we think).
471 So mark it obsolete now that there's going to be another node
472 in the block. This will reduce used_size to zero but We've
473 already set c->nextblock so that jffs2_mark_node_obsolete()
474 won't try to refile it to the dirty_list.
476 spin_unlock(&c->erase_completion_lock);
477 jffs2_mark_node_obsolete(c, jeb->first_node);
478 spin_lock(&c->erase_completion_lock);
481 jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
483 *len, jeb->offset + (c->sector_size - jeb->free_size));
488 * jffs2_add_physical_node_ref - add a physical node reference to the list
489 * @c: superblock info
490 * @new: new node reference to add
491 * @len: length of this physical node
493 * Should only be used to report nodes for which space has been allocated
494 * by jffs2_reserve_space.
496 * Must be called with the alloc_sem held.
499 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
500 uint32_t ofs, uint32_t len,
501 struct jffs2_inode_cache *ic)
503 struct jffs2_eraseblock *jeb;
504 struct jffs2_raw_node_ref *new;
506 jeb = &c->blocks[ofs / c->sector_size];
508 jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
509 __func__, ofs & ~3, ofs & 3, len);
511 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
512 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
513 even after refiling c->nextblock */
514 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
515 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
516 pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
519 pr_warn("nextblock 0x%08x", c->nextblock->offset);
521 pr_warn("No nextblock");
522 pr_cont(", expected at %08x\n",
523 jeb->offset + (c->sector_size - jeb->free_size));
524 return ERR_PTR(-EINVAL);
527 spin_lock(&c->erase_completion_lock);
529 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
531 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
532 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
533 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
534 jeb->offset, jeb->free_size, jeb->dirty_size,
536 if (jffs2_wbuf_dirty(c)) {
537 /* Flush the last write in the block if it's outstanding */
538 spin_unlock(&c->erase_completion_lock);
539 jffs2_flush_wbuf_pad(c);
540 spin_lock(&c->erase_completion_lock);
543 list_add_tail(&jeb->list, &c->clean_list);
546 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
547 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
549 spin_unlock(&c->erase_completion_lock);
555 void jffs2_complete_reservation(struct jffs2_sb_info *c)
557 jffs2_dbg(1, "jffs2_complete_reservation()\n");
558 spin_lock(&c->erase_completion_lock);
559 jffs2_garbage_collect_trigger(c);
560 spin_unlock(&c->erase_completion_lock);
561 mutex_unlock(&c->alloc_sem);
564 static inline int on_list(struct list_head *obj, struct list_head *head)
566 struct list_head *this;
568 list_for_each(this, head) {
570 jffs2_dbg(1, "%p is on list at %p\n", obj, head);
578 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
580 struct jffs2_eraseblock *jeb;
582 struct jffs2_unknown_node n;
588 pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
591 if (ref_obsolete(ref)) {
592 jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
593 __func__, ref_offset(ref));
596 blocknr = ref->flash_offset / c->sector_size;
597 if (blocknr >= c->nr_blocks) {
598 pr_notice("raw node at 0x%08x is off the end of device!\n",
602 jeb = &c->blocks[blocknr];
604 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
605 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
606 /* Hm. This may confuse static lock analysis. If any of the above
607 three conditions is false, we're going to return from this
608 function without actually obliterating any nodes or freeing
609 any jffs2_raw_node_refs. So we don't need to stop erases from
610 happening, or protect against people holding an obsolete
611 jffs2_raw_node_ref without the erase_completion_lock. */
612 mutex_lock(&c->erase_free_sem);
615 spin_lock(&c->erase_completion_lock);
617 freed_len = ref_totlen(c, jeb, ref);
619 if (ref_flags(ref) == REF_UNCHECKED) {
620 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
621 pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
623 ref->flash_offset, jeb->used_size);
626 jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
627 ref_offset(ref), freed_len);
628 jeb->unchecked_size -= freed_len;
629 c->unchecked_size -= freed_len;
631 D1(if (unlikely(jeb->used_size < freed_len)) {
632 pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
634 ref->flash_offset, jeb->used_size);
637 jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
638 ref_offset(ref), freed_len);
639 jeb->used_size -= freed_len;
640 c->used_size -= freed_len;
643 // Take care, that wasted size is taken into concern
644 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
645 jffs2_dbg(1, "Dirtying\n");
646 addedsize = freed_len;
647 jeb->dirty_size += freed_len;
648 c->dirty_size += freed_len;
650 /* Convert wasted space to dirty, if not a bad block */
651 if (jeb->wasted_size) {
652 if (on_list(&jeb->list, &c->bad_used_list)) {
653 jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
655 addedsize = 0; /* To fool the refiling code later */
657 jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
658 jeb->wasted_size, jeb->offset);
659 addedsize += jeb->wasted_size;
660 jeb->dirty_size += jeb->wasted_size;
661 c->dirty_size += jeb->wasted_size;
662 c->wasted_size -= jeb->wasted_size;
663 jeb->wasted_size = 0;
667 jffs2_dbg(1, "Wasting\n");
669 jeb->wasted_size += freed_len;
670 c->wasted_size += freed_len;
672 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
674 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
675 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
677 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
678 /* Flash scanning is in progress. Don't muck about with the block
679 lists because they're not ready yet, and don't actually
680 obliterate nodes that look obsolete. If they weren't
681 marked obsolete on the flash at the time they _became_
682 obsolete, there was probably a reason for that. */
683 spin_unlock(&c->erase_completion_lock);
684 /* We didn't lock the erase_free_sem */
688 if (jeb == c->nextblock) {
689 jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
691 } else if (!jeb->used_size && !jeb->unchecked_size) {
692 if (jeb == c->gcblock) {
693 jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
697 jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
699 list_del(&jeb->list);
701 if (jffs2_wbuf_dirty(c)) {
702 jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
703 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
706 /* Most of the time, we just erase it immediately. Otherwise we
707 spend ages scanning it on mount, etc. */
708 jffs2_dbg(1, "...and adding to erase_pending_list\n");
709 list_add_tail(&jeb->list, &c->erase_pending_list);
710 c->nr_erasing_blocks++;
711 jffs2_garbage_collect_trigger(c);
713 /* Sometimes, however, we leave it elsewhere so it doesn't get
714 immediately reused, and we spread the load a bit. */
715 jffs2_dbg(1, "...and adding to erasable_list\n");
716 list_add_tail(&jeb->list, &c->erasable_list);
719 jffs2_dbg(1, "Done OK\n");
720 } else if (jeb == c->gcblock) {
721 jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
723 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
724 jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
726 list_del(&jeb->list);
727 jffs2_dbg(1, "...and adding to dirty_list\n");
728 list_add_tail(&jeb->list, &c->dirty_list);
729 } else if (VERYDIRTY(c, jeb->dirty_size) &&
730 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
731 jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
733 list_del(&jeb->list);
734 jffs2_dbg(1, "...and adding to very_dirty_list\n");
735 list_add_tail(&jeb->list, &c->very_dirty_list);
737 jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
738 jeb->offset, jeb->free_size, jeb->dirty_size,
742 spin_unlock(&c->erase_completion_lock);
744 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
745 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
746 /* We didn't lock the erase_free_sem */
750 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
751 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
752 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
753 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
755 jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
757 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
759 pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
760 ref_offset(ref), ret);
763 if (retlen != sizeof(n)) {
764 pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
765 ref_offset(ref), retlen);
768 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
769 pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
770 je32_to_cpu(n.totlen), freed_len);
773 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
774 jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
775 ref_offset(ref), je16_to_cpu(n.nodetype));
778 /* XXX FIXME: This is ugly now */
779 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
780 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
782 pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
783 ref_offset(ref), ret);
786 if (retlen != sizeof(n)) {
787 pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
788 ref_offset(ref), retlen);
792 /* Nodes which have been marked obsolete no longer need to be
793 associated with any inode. Remove them from the per-inode list.
795 Note we can't do this for NAND at the moment because we need
796 obsolete dirent nodes to stay on the lists, because of the
797 horridness in jffs2_garbage_collect_deletion_dirent(). Also
798 because we delete the inocache, and on NAND we need that to
799 stay around until all the nodes are actually erased, in order
800 to stop us from giving the same inode number to another newly
802 if (ref->next_in_ino) {
803 struct jffs2_inode_cache *ic;
804 struct jffs2_raw_node_ref **p;
806 spin_lock(&c->erase_completion_lock);
808 ic = jffs2_raw_ref_to_ic(ref);
809 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
812 *p = ref->next_in_ino;
813 ref->next_in_ino = NULL;
816 #ifdef CONFIG_JFFS2_FS_XATTR
817 case RAWNODE_CLASS_XATTR_DATUM:
818 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
820 case RAWNODE_CLASS_XATTR_REF:
821 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
825 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
826 jffs2_del_ino_cache(c, ic);
829 spin_unlock(&c->erase_completion_lock);
833 mutex_unlock(&c->erase_free_sem);
836 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
840 int nr_very_dirty = 0;
841 struct jffs2_eraseblock *jeb;
843 if (!list_empty(&c->erase_complete_list) ||
844 !list_empty(&c->erase_pending_list))
847 if (c->unchecked_size) {
848 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
849 c->unchecked_size, c->checked_ino);
853 /* dirty_size contains blocks on erase_pending_list
854 * those blocks are counted in c->nr_erasing_blocks.
855 * If one block is actually erased, it is not longer counted as dirty_space
856 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
857 * with c->nr_erasing_blocks * c->sector_size again.
858 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
859 * This helps us to force gc and pick eventually a clean block to spread the load.
861 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
863 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
864 (dirty > c->nospc_dirty_size))
867 list_for_each_entry(jeb, &c->very_dirty_list, list) {
869 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
871 /* In debug mode, actually go through and count them all */
877 jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
878 __func__, c->nr_free_blocks, c->nr_erasing_blocks,
879 c->dirty_size, nr_very_dirty, ret ? "yes" : "no");