1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
11 #include <dm/devres.h>
12 #include <linux/crc32.h>
13 #include <linux/err.h>
14 #include <u-boot/crc.h>
18 #include <ubi_uboot.h>
21 #include <linux/compat.h>
22 #include <linux/math64.h>
26 * init_seen - allocate memory for used for debugging.
27 * @ubi: UBI device description object
29 static inline int *init_seen(struct ubi_device *ubi)
33 if (!ubi_dbg_chk_fastmap(ubi))
36 ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
38 return ERR_PTR(-ENOMEM);
44 * free_seen - free the seen logic integer array.
45 * @seen: integer array of @ubi->peb_count size
47 static inline void free_seen(int *seen)
53 * set_seen - mark a PEB as seen.
54 * @ubi: UBI device description object
55 * @pnum: The PEB to be makred as seen
56 * @seen: integer array of @ubi->peb_count size
58 static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
60 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
67 * self_check_seen - check whether all PEB have been seen by fastmap.
68 * @ubi: UBI device description object
69 * @seen: integer array of @ubi->peb_count size
71 static int self_check_seen(struct ubi_device *ubi, int *seen)
75 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
78 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
79 if (!seen[pnum] && ubi->lookuptbl[pnum]) {
80 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
89 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
90 * @ubi: UBI device description object
92 size_t ubi_calc_fm_size(struct ubi_device *ubi)
96 size = sizeof(struct ubi_fm_sb) +
97 sizeof(struct ubi_fm_hdr) +
98 sizeof(struct ubi_fm_scan_pool) +
99 sizeof(struct ubi_fm_scan_pool) +
100 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
101 (sizeof(struct ubi_fm_eba) +
102 (ubi->peb_count * sizeof(__be32))) +
103 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
104 return roundup(size, ubi->leb_size);
109 * new_fm_vhdr - allocate a new volume header for fastmap usage.
110 * @ubi: UBI device description object
111 * @vol_id: the VID of the new header
113 * Returns a new struct ubi_vid_hdr on success.
114 * NULL indicates out of memory.
116 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
118 struct ubi_vid_hdr *new;
120 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
124 new->vol_type = UBI_VID_DYNAMIC;
125 new->vol_id = cpu_to_be32(vol_id);
127 /* UBI implementations without fastmap support have to delete the
130 new->compat = UBI_COMPAT_DELETE;
137 * add_aeb - create and add a attach erase block to a given list.
138 * @ai: UBI attach info object
139 * @list: the target list
140 * @pnum: PEB number of the new attach erase block
141 * @ec: erease counter of the new LEB
142 * @scrub: scrub this PEB after attaching
144 * Returns 0 on success, < 0 indicates an internal error.
146 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
147 int pnum, int ec, int scrub)
149 struct ubi_ainf_peb *aeb;
151 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
159 aeb->copy_flag = aeb->sqnum = 0;
161 ai->ec_sum += aeb->ec;
164 if (ai->max_ec < aeb->ec)
165 ai->max_ec = aeb->ec;
167 if (ai->min_ec > aeb->ec)
168 ai->min_ec = aeb->ec;
170 list_add_tail(&aeb->u.list, list);
176 * add_vol - create and add a new volume to ubi_attach_info.
177 * @ai: ubi_attach_info object
178 * @vol_id: VID of the new volume
179 * @used_ebs: number of used EBS
180 * @data_pad: data padding value of the new volume
181 * @vol_type: volume type
182 * @last_eb_bytes: number of bytes in the last LEB
184 * Returns the new struct ubi_ainf_volume on success.
185 * NULL indicates an error.
187 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
188 int used_ebs, int data_pad, u8 vol_type,
191 struct ubi_ainf_volume *av;
192 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
196 av = rb_entry(parent, struct ubi_ainf_volume, rb);
198 if (vol_id > av->vol_id)
200 else if (vol_id < av->vol_id)
203 return ERR_PTR(-EINVAL);
206 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
210 av->highest_lnum = av->leb_count = av->used_ebs = 0;
212 av->data_pad = data_pad;
213 av->last_data_size = last_eb_bytes;
215 av->vol_type = vol_type;
217 if (av->vol_type == UBI_STATIC_VOLUME)
218 av->used_ebs = used_ebs;
220 dbg_bld("found volume (ID %i)", vol_id);
222 rb_link_node(&av->rb, parent, p);
223 rb_insert_color(&av->rb, &ai->volumes);
230 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
231 * from it's original list.
232 * @ai: ubi_attach_info object
233 * @aeb: the to be assigned SEB
234 * @av: target scan volume
236 static void assign_aeb_to_av(struct ubi_attach_info *ai,
237 struct ubi_ainf_peb *aeb,
238 struct ubi_ainf_volume *av)
240 struct ubi_ainf_peb *tmp_aeb;
241 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
243 p = &av->root.rb_node;
247 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
248 if (aeb->lnum != tmp_aeb->lnum) {
249 if (aeb->lnum < tmp_aeb->lnum)
259 list_del(&aeb->u.list);
262 rb_link_node(&aeb->u.rb, parent, p);
263 rb_insert_color(&aeb->u.rb, &av->root);
267 * update_vol - inserts or updates a LEB which was found a pool.
268 * @ubi: the UBI device object
269 * @ai: attach info object
270 * @av: the volume this LEB belongs to
271 * @new_vh: the volume header derived from new_aeb
272 * @new_aeb: the AEB to be examined
274 * Returns 0 on success, < 0 indicates an internal error.
276 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
277 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
278 struct ubi_ainf_peb *new_aeb)
280 struct rb_node **p = &av->root.rb_node, *parent = NULL;
281 struct ubi_ainf_peb *aeb, *victim;
286 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
288 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
289 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
297 /* This case can happen if the fastmap gets written
298 * because of a volume change (creation, deletion, ..).
299 * Then a PEB can be within the persistent EBA and the pool.
301 if (aeb->pnum == new_aeb->pnum) {
302 ubi_assert(aeb->lnum == new_aeb->lnum);
303 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
308 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
312 /* new_aeb is newer */
314 victim = kmem_cache_alloc(ai->aeb_slab_cache,
319 victim->ec = aeb->ec;
320 victim->pnum = aeb->pnum;
321 list_add_tail(&victim->u.list, &ai->erase);
323 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
325 be32_to_cpu(new_vh->data_size);
327 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
328 av->vol_id, aeb->lnum, new_aeb->pnum);
330 aeb->ec = new_aeb->ec;
331 aeb->pnum = new_aeb->pnum;
332 aeb->copy_flag = new_vh->copy_flag;
333 aeb->scrub = new_aeb->scrub;
334 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
336 /* new_aeb is older */
338 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
339 av->vol_id, aeb->lnum, new_aeb->pnum);
340 list_add_tail(&new_aeb->u.list, &ai->erase);
345 /* This LEB is new, let's add it to the volume */
347 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
348 av->highest_lnum = be32_to_cpu(new_vh->lnum);
349 av->last_data_size = be32_to_cpu(new_vh->data_size);
352 if (av->vol_type == UBI_STATIC_VOLUME)
353 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
357 rb_link_node(&new_aeb->u.rb, parent, p);
358 rb_insert_color(&new_aeb->u.rb, &av->root);
364 * process_pool_aeb - we found a non-empty PEB in a pool.
365 * @ubi: UBI device object
366 * @ai: attach info object
367 * @new_vh: the volume header derived from new_aeb
368 * @new_aeb: the AEB to be examined
370 * Returns 0 on success, < 0 indicates an internal error.
372 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
373 struct ubi_vid_hdr *new_vh,
374 struct ubi_ainf_peb *new_aeb)
376 struct ubi_ainf_volume *av, *tmp_av = NULL;
377 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
380 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
381 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
382 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
387 /* Find the volume this SEB belongs to */
390 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
392 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
394 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
405 ubi_err(ubi, "orphaned volume in fastmap pool!");
406 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
407 return UBI_BAD_FASTMAP;
410 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
412 return update_vol(ubi, ai, av, new_vh, new_aeb);
416 * unmap_peb - unmap a PEB.
417 * If fastmap detects a free PEB in the pool it has to check whether
418 * this PEB has been unmapped after writing the fastmap.
420 * @ai: UBI attach info object
421 * @pnum: The PEB to be unmapped
423 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
425 struct ubi_ainf_volume *av;
426 struct rb_node *node, *node2;
427 struct ubi_ainf_peb *aeb;
429 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
430 av = rb_entry(node, struct ubi_ainf_volume, rb);
432 for (node2 = rb_first(&av->root); node2;
433 node2 = rb_next(node2)) {
434 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
435 if (aeb->pnum == pnum) {
436 rb_erase(&aeb->u.rb, &av->root);
438 kmem_cache_free(ai->aeb_slab_cache, aeb);
446 * scan_pool - scans a pool for changed (no longer empty PEBs).
447 * @ubi: UBI device object
448 * @ai: attach info object
449 * @pebs: an array of all PEB numbers in the to be scanned pool
450 * @pool_size: size of the pool (number of entries in @pebs)
451 * @max_sqnum: pointer to the maximal sequence number
452 * @free: list of PEBs which are most likely free (and go into @ai->free)
454 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
455 * < 0 indicates an internal error.
458 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
459 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
460 struct list_head *free)
462 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
463 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
464 struct list_head *free)
467 struct ubi_vid_hdr *vh;
468 struct ubi_ec_hdr *ech;
469 struct ubi_ainf_peb *new_aeb;
470 int i, pnum, err, ret = 0;
472 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
476 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
482 dbg_bld("scanning fastmap pool: size = %i", pool_size);
485 * Now scan all PEBs in the pool to find changes which have been made
486 * after the creation of the fastmap
488 for (i = 0; i < pool_size; i++) {
492 pnum = be32_to_cpu(pebs[i]);
494 if (ubi_io_is_bad(ubi, pnum)) {
495 ubi_err(ubi, "bad PEB in fastmap pool!");
496 ret = UBI_BAD_FASTMAP;
500 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
501 if (err && err != UBI_IO_BITFLIPS) {
502 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
504 ret = err > 0 ? UBI_BAD_FASTMAP : err;
506 } else if (err == UBI_IO_BITFLIPS)
510 * Older UBI implementations have image_seq set to zero, so
511 * we shouldn't fail if image_seq == 0.
513 image_seq = be32_to_cpu(ech->image_seq);
515 if (image_seq && (image_seq != ubi->image_seq)) {
516 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
517 be32_to_cpu(ech->image_seq), ubi->image_seq);
518 ret = UBI_BAD_FASTMAP;
522 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
523 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
524 unsigned long long ec = be64_to_cpu(ech->ec);
526 dbg_bld("Adding PEB to free: %i", pnum);
527 if (err == UBI_IO_FF_BITFLIPS)
528 add_aeb(ai, free, pnum, ec, 1);
530 add_aeb(ai, free, pnum, ec, 0);
532 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
533 dbg_bld("Found non empty PEB:%i in pool", pnum);
535 if (err == UBI_IO_BITFLIPS)
538 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
545 new_aeb->ec = be64_to_cpu(ech->ec);
546 new_aeb->pnum = pnum;
547 new_aeb->lnum = be32_to_cpu(vh->lnum);
548 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
549 new_aeb->copy_flag = vh->copy_flag;
550 new_aeb->scrub = scrub;
552 if (*max_sqnum < new_aeb->sqnum)
553 *max_sqnum = new_aeb->sqnum;
555 err = process_pool_aeb(ubi, ai, vh, new_aeb);
557 ret = err > 0 ? UBI_BAD_FASTMAP : err;
561 /* We are paranoid and fall back to scanning mode */
562 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
563 ret = err > 0 ? UBI_BAD_FASTMAP : err;
570 ubi_free_vid_hdr(ubi, vh);
576 * count_fastmap_pebs - Counts the PEBs found by fastmap.
577 * @ai: The UBI attach info object
579 static int count_fastmap_pebs(struct ubi_attach_info *ai)
581 struct ubi_ainf_peb *aeb;
582 struct ubi_ainf_volume *av;
583 struct rb_node *rb1, *rb2;
586 list_for_each_entry(aeb, &ai->erase, u.list)
589 list_for_each_entry(aeb, &ai->free, u.list)
592 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
593 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
600 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
601 * @ubi: UBI device object
602 * @ai: UBI attach info object
603 * @fm: the fastmap to be attached
605 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
606 * < 0 indicates an internal error.
608 static int ubi_attach_fastmap(struct ubi_device *ubi,
609 struct ubi_attach_info *ai,
610 struct ubi_fastmap_layout *fm)
612 struct list_head used, free;
613 struct ubi_ainf_volume *av;
614 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
615 struct ubi_fm_sb *fmsb;
616 struct ubi_fm_hdr *fmhdr;
617 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
618 struct ubi_fm_ec *fmec;
619 struct ubi_fm_volhdr *fmvhdr;
620 struct ubi_fm_eba *fm_eba;
621 int ret, i, j, pool_size, wl_pool_size;
622 size_t fm_pos = 0, fm_size = ubi->fm_size;
623 unsigned long long max_sqnum = 0;
624 void *fm_raw = ubi->fm_buf;
626 INIT_LIST_HEAD(&used);
627 INIT_LIST_HEAD(&free);
628 ai->min_ec = UBI_MAX_ERASECOUNTER;
630 fmsb = (struct ubi_fm_sb *)(fm_raw);
631 ai->max_sqnum = fmsb->sqnum;
632 fm_pos += sizeof(struct ubi_fm_sb);
633 if (fm_pos >= fm_size)
636 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
637 fm_pos += sizeof(*fmhdr);
638 if (fm_pos >= fm_size)
641 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
642 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
643 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
647 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
648 fm_pos += sizeof(*fmpl);
649 if (fm_pos >= fm_size)
651 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
652 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
653 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
657 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
658 fm_pos += sizeof(*fmpl_wl);
659 if (fm_pos >= fm_size)
661 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
662 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
663 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
667 pool_size = be16_to_cpu(fmpl->size);
668 wl_pool_size = be16_to_cpu(fmpl_wl->size);
669 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
670 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
672 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
673 ubi_err(ubi, "bad pool size: %i", pool_size);
677 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
678 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
683 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
684 fm->max_pool_size < 0) {
685 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
689 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
690 fm->max_wl_pool_size < 0) {
691 ubi_err(ubi, "bad maximal WL pool size: %i",
692 fm->max_wl_pool_size);
696 /* read EC values from free list */
697 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
698 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
699 fm_pos += sizeof(*fmec);
700 if (fm_pos >= fm_size)
703 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
704 be32_to_cpu(fmec->ec), 0);
707 /* read EC values from used list */
708 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
709 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
710 fm_pos += sizeof(*fmec);
711 if (fm_pos >= fm_size)
714 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
715 be32_to_cpu(fmec->ec), 0);
718 /* read EC values from scrub list */
719 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
720 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
721 fm_pos += sizeof(*fmec);
722 if (fm_pos >= fm_size)
725 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
726 be32_to_cpu(fmec->ec), 1);
729 /* read EC values from erase list */
730 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
731 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
732 fm_pos += sizeof(*fmec);
733 if (fm_pos >= fm_size)
736 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
737 be32_to_cpu(fmec->ec), 1);
740 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
741 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
743 /* Iterate over all volumes and read their EBA table */
744 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
745 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
746 fm_pos += sizeof(*fmvhdr);
747 if (fm_pos >= fm_size)
750 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
751 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
752 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
756 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
757 be32_to_cpu(fmvhdr->used_ebs),
758 be32_to_cpu(fmvhdr->data_pad),
760 be32_to_cpu(fmvhdr->last_eb_bytes));
764 if (PTR_ERR(av) == -EINVAL) {
765 ubi_err(ubi, "volume (ID %i) already exists",
771 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
772 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
774 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
775 fm_pos += sizeof(*fm_eba);
776 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
777 if (fm_pos >= fm_size)
780 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
781 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
782 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
786 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
787 int pnum = be32_to_cpu(fm_eba->pnum[j]);
789 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
793 list_for_each_entry(tmp_aeb, &used, u.list) {
794 if (tmp_aeb->pnum == pnum) {
801 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
807 if (av->highest_lnum <= aeb->lnum)
808 av->highest_lnum = aeb->lnum;
810 assign_aeb_to_av(ai, aeb, av);
812 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
813 aeb->pnum, aeb->lnum, av->vol_id);
817 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
821 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
825 if (max_sqnum > ai->max_sqnum)
826 ai->max_sqnum = max_sqnum;
828 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
829 list_move_tail(&tmp_aeb->u.list, &ai->free);
831 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
832 list_move_tail(&tmp_aeb->u.list, &ai->erase);
834 ubi_assert(list_empty(&free));
837 * If fastmap is leaking PEBs (must not happen), raise a
838 * fat warning and fall back to scanning mode.
839 * We do this here because in ubi_wl_init() it's too late
840 * and we cannot fall back to scanning.
843 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
844 ai->bad_peb_count - fm->used_blocks))
847 if (count_fastmap_pebs(ai) != ubi->peb_count -
848 ai->bad_peb_count - fm->used_blocks) {
857 ret = UBI_BAD_FASTMAP;
859 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
860 list_del(&tmp_aeb->u.list);
861 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
863 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
864 list_del(&tmp_aeb->u.list);
865 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
872 * ubi_scan_fastmap - scan the fastmap.
873 * @ubi: UBI device object
874 * @ai: UBI attach info to be filled
875 * @fm_anchor: The fastmap starts at this PEB
877 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
878 * UBI_BAD_FASTMAP if one was found but is not usable.
879 * < 0 indicates an internal error.
881 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
884 struct ubi_fm_sb *fmsb, *fmsb2;
885 struct ubi_vid_hdr *vh;
886 struct ubi_ec_hdr *ech;
887 struct ubi_fastmap_layout *fm;
888 int i, used_blocks, pnum, ret = 0;
891 unsigned long long sqnum = 0;
893 down_write(&ubi->fm_protect);
894 memset(ubi->fm_buf, 0, ubi->fm_size);
896 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
902 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
909 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
910 if (ret && ret != UBI_IO_BITFLIPS)
912 else if (ret == UBI_IO_BITFLIPS)
913 fm->to_be_tortured[0] = 1;
915 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
916 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
917 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
918 ret = UBI_BAD_FASTMAP;
922 if (fmsb->version != UBI_FM_FMT_VERSION) {
923 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
924 fmsb->version, UBI_FM_FMT_VERSION);
925 ret = UBI_BAD_FASTMAP;
929 used_blocks = be32_to_cpu(fmsb->used_blocks);
930 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
931 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
933 ret = UBI_BAD_FASTMAP;
937 fm_size = ubi->leb_size * used_blocks;
938 if (fm_size != ubi->fm_size) {
939 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
940 fm_size, ubi->fm_size);
941 ret = UBI_BAD_FASTMAP;
945 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
951 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
957 for (i = 0; i < used_blocks; i++) {
960 pnum = be32_to_cpu(fmsb->block_loc[i]);
962 if (ubi_io_is_bad(ubi, pnum)) {
963 ret = UBI_BAD_FASTMAP;
967 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
968 if (ret && ret != UBI_IO_BITFLIPS) {
969 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
972 ret = UBI_BAD_FASTMAP;
974 } else if (ret == UBI_IO_BITFLIPS)
975 fm->to_be_tortured[i] = 1;
977 image_seq = be32_to_cpu(ech->image_seq);
979 ubi->image_seq = image_seq;
982 * Older UBI implementations have image_seq set to zero, so
983 * we shouldn't fail if image_seq == 0.
985 if (image_seq && (image_seq != ubi->image_seq)) {
986 ubi_err(ubi, "wrong image seq:%d instead of %d",
987 be32_to_cpu(ech->image_seq), ubi->image_seq);
988 ret = UBI_BAD_FASTMAP;
992 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
993 if (ret && ret != UBI_IO_BITFLIPS) {
994 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
1000 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1001 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
1002 be32_to_cpu(vh->vol_id),
1003 UBI_FM_SB_VOLUME_ID);
1004 ret = UBI_BAD_FASTMAP;
1008 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1009 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1010 be32_to_cpu(vh->vol_id),
1011 UBI_FM_DATA_VOLUME_ID);
1012 ret = UBI_BAD_FASTMAP;
1017 if (sqnum < be64_to_cpu(vh->sqnum))
1018 sqnum = be64_to_cpu(vh->sqnum);
1020 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1021 ubi->leb_start, ubi->leb_size);
1022 if (ret && ret != UBI_IO_BITFLIPS) {
1023 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1024 "err: %i)", i, pnum, ret);
1032 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1033 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1034 fmsb2->data_crc = 0;
1035 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1036 if (crc != tmp_crc) {
1037 ubi_err(ubi, "fastmap data CRC is invalid");
1038 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1040 ret = UBI_BAD_FASTMAP;
1044 fmsb2->sqnum = sqnum;
1046 fm->used_blocks = used_blocks;
1048 ret = ubi_attach_fastmap(ubi, ai, fm);
1051 ret = UBI_BAD_FASTMAP;
1055 for (i = 0; i < used_blocks; i++) {
1056 struct ubi_wl_entry *e;
1058 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1067 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1068 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1073 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1074 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1075 ubi_msg(ubi, "attached by fastmap");
1076 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1077 ubi_msg(ubi, "fastmap WL pool size: %d",
1078 ubi->fm_wl_pool.max_size);
1079 ubi->fm_disabled = 0;
1081 ubi_free_vid_hdr(ubi, vh);
1084 up_write(&ubi->fm_protect);
1085 if (ret == UBI_BAD_FASTMAP)
1086 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1090 ubi_free_vid_hdr(ubi, vh);
1099 * ubi_write_fastmap - writes a fastmap.
1100 * @ubi: UBI device object
1101 * @new_fm: the to be written fastmap
1103 * Returns 0 on success, < 0 indicates an internal error.
1105 static int ubi_write_fastmap(struct ubi_device *ubi,
1106 struct ubi_fastmap_layout *new_fm)
1110 struct ubi_fm_sb *fmsb;
1111 struct ubi_fm_hdr *fmh;
1112 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1113 struct ubi_fm_ec *fec;
1114 struct ubi_fm_volhdr *fvh;
1115 struct ubi_fm_eba *feba;
1116 struct ubi_wl_entry *wl_e;
1117 struct ubi_volume *vol;
1118 struct ubi_vid_hdr *avhdr, *dvhdr;
1119 struct ubi_work *ubi_wrk;
1120 struct rb_node *tmp_rb;
1121 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1122 int scrub_peb_count, erase_peb_count;
1123 int *seen_pebs = NULL;
1125 fm_raw = ubi->fm_buf;
1126 memset(ubi->fm_buf, 0, ubi->fm_size);
1128 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1134 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1140 seen_pebs = init_seen(ubi);
1141 if (IS_ERR(seen_pebs)) {
1142 ret = PTR_ERR(seen_pebs);
1146 spin_lock(&ubi->volumes_lock);
1147 spin_lock(&ubi->wl_lock);
1149 fmsb = (struct ubi_fm_sb *)fm_raw;
1150 fm_pos += sizeof(*fmsb);
1151 ubi_assert(fm_pos <= ubi->fm_size);
1153 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1154 fm_pos += sizeof(*fmh);
1155 ubi_assert(fm_pos <= ubi->fm_size);
1157 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1158 fmsb->version = UBI_FM_FMT_VERSION;
1159 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1160 /* the max sqnum will be filled in while *reading* the fastmap */
1163 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1166 scrub_peb_count = 0;
1167 erase_peb_count = 0;
1170 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1171 fm_pos += sizeof(*fmpl);
1172 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1173 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1174 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1176 for (i = 0; i < ubi->fm_pool.size; i++) {
1177 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1178 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1181 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1182 fm_pos += sizeof(*fmpl_wl);
1183 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1184 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1185 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1187 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1188 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1189 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1192 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1193 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1195 fec->pnum = cpu_to_be32(wl_e->pnum);
1196 set_seen(ubi, wl_e->pnum, seen_pebs);
1197 fec->ec = cpu_to_be32(wl_e->ec);
1200 fm_pos += sizeof(*fec);
1201 ubi_assert(fm_pos <= ubi->fm_size);
1203 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1205 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1206 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1208 fec->pnum = cpu_to_be32(wl_e->pnum);
1209 set_seen(ubi, wl_e->pnum, seen_pebs);
1210 fec->ec = cpu_to_be32(wl_e->ec);
1213 fm_pos += sizeof(*fec);
1214 ubi_assert(fm_pos <= ubi->fm_size);
1217 ubi_for_each_protected_peb(ubi, i, wl_e) {
1218 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1220 fec->pnum = cpu_to_be32(wl_e->pnum);
1221 set_seen(ubi, wl_e->pnum, seen_pebs);
1222 fec->ec = cpu_to_be32(wl_e->ec);
1225 fm_pos += sizeof(*fec);
1226 ubi_assert(fm_pos <= ubi->fm_size);
1228 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1230 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1231 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1233 fec->pnum = cpu_to_be32(wl_e->pnum);
1234 set_seen(ubi, wl_e->pnum, seen_pebs);
1235 fec->ec = cpu_to_be32(wl_e->ec);
1238 fm_pos += sizeof(*fec);
1239 ubi_assert(fm_pos <= ubi->fm_size);
1241 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1244 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1245 if (ubi_is_erase_work(ubi_wrk)) {
1249 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1251 fec->pnum = cpu_to_be32(wl_e->pnum);
1252 set_seen(ubi, wl_e->pnum, seen_pebs);
1253 fec->ec = cpu_to_be32(wl_e->ec);
1256 fm_pos += sizeof(*fec);
1257 ubi_assert(fm_pos <= ubi->fm_size);
1260 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1262 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1263 vol = ubi->volumes[i];
1270 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1271 fm_pos += sizeof(*fvh);
1272 ubi_assert(fm_pos <= ubi->fm_size);
1274 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1275 fvh->vol_id = cpu_to_be32(vol->vol_id);
1276 fvh->vol_type = vol->vol_type;
1277 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1278 fvh->data_pad = cpu_to_be32(vol->data_pad);
1279 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1281 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1282 vol->vol_type == UBI_STATIC_VOLUME);
1284 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1285 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1286 ubi_assert(fm_pos <= ubi->fm_size);
1288 for (j = 0; j < vol->reserved_pebs; j++)
1289 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1291 feba->reserved_pebs = cpu_to_be32(j);
1292 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1294 fmh->vol_count = cpu_to_be32(vol_count);
1295 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1297 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1300 spin_unlock(&ubi->wl_lock);
1301 spin_unlock(&ubi->volumes_lock);
1303 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1304 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1306 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1310 for (i = 0; i < new_fm->used_blocks; i++) {
1311 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1312 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1313 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1317 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1320 for (i = 1; i < new_fm->used_blocks; i++) {
1321 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1322 dvhdr->lnum = cpu_to_be32(i);
1323 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1324 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1325 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1327 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1328 new_fm->e[i]->pnum);
1333 for (i = 0; i < new_fm->used_blocks; i++) {
1334 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1335 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1337 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1338 new_fm->e[i]->pnum);
1346 ret = self_check_seen(ubi, seen_pebs);
1347 dbg_bld("fastmap written!");
1350 ubi_free_vid_hdr(ubi, avhdr);
1351 ubi_free_vid_hdr(ubi, dvhdr);
1352 free_seen(seen_pebs);
1358 * erase_block - Manually erase a PEB.
1359 * @ubi: UBI device object
1360 * @pnum: PEB to be erased
1362 * Returns the new EC value on success, < 0 indicates an internal error.
1364 static int erase_block(struct ubi_device *ubi, int pnum)
1367 struct ubi_ec_hdr *ec_hdr;
1370 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1374 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1377 else if (ret && ret != UBI_IO_BITFLIPS) {
1382 ret = ubi_io_sync_erase(ubi, pnum, 0);
1386 ec = be64_to_cpu(ec_hdr->ec);
1388 if (ec > UBI_MAX_ERASECOUNTER) {
1393 ec_hdr->ec = cpu_to_be64(ec);
1394 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1405 * invalidate_fastmap - destroys a fastmap.
1406 * @ubi: UBI device object
1408 * This function ensures that upon next UBI attach a full scan
1409 * is issued. We need this if UBI is about to write a new fastmap
1410 * but is unable to do so. In this case we have two options:
1411 * a) Make sure that the current fastmap will not be usued upon
1412 * attach time and contine or b) fall back to RO mode to have the
1413 * current fastmap in a valid state.
1414 * Returns 0 on success, < 0 indicates an internal error.
1416 static int invalidate_fastmap(struct ubi_device *ubi)
1419 struct ubi_fastmap_layout *fm;
1420 struct ubi_wl_entry *e;
1421 struct ubi_vid_hdr *vh = NULL;
1429 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1433 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1438 e = ubi_wl_get_fm_peb(ubi, 1);
1443 * Create fake fastmap such that UBI will fall back
1446 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1447 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
1449 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1453 fm->used_blocks = 1;
1459 ubi_free_vid_hdr(ubi, vh);
1468 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1470 * @ubi: UBI device object
1471 * @fm: fastmap layout object
1473 static void return_fm_pebs(struct ubi_device *ubi,
1474 struct ubi_fastmap_layout *fm)
1481 for (i = 0; i < fm->used_blocks; i++) {
1483 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1484 fm->to_be_tortured[i]);
1491 * ubi_update_fastmap - will be called by UBI if a volume changes or
1492 * a fastmap pool becomes full.
1493 * @ubi: UBI device object
1495 * Returns 0 on success, < 0 indicates an internal error.
1497 int ubi_update_fastmap(struct ubi_device *ubi)
1500 struct ubi_fastmap_layout *new_fm, *old_fm;
1501 struct ubi_wl_entry *tmp_e;
1503 down_write(&ubi->fm_protect);
1505 ubi_refill_pools(ubi);
1507 if (ubi->ro_mode || ubi->fm_disabled) {
1508 up_write(&ubi->fm_protect);
1512 ret = ubi_ensure_anchor_pebs(ubi);
1514 up_write(&ubi->fm_protect);
1518 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1520 up_write(&ubi->fm_protect);
1524 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1528 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1529 ubi_err(ubi, "fastmap too large");
1534 for (i = 1; i < new_fm->used_blocks; i++) {
1535 spin_lock(&ubi->wl_lock);
1536 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1537 spin_unlock(&ubi->wl_lock);
1540 if (old_fm && old_fm->e[i]) {
1541 ret = erase_block(ubi, old_fm->e[i]->pnum);
1543 ubi_err(ubi, "could not erase old fastmap PEB");
1545 for (j = 1; j < i; j++) {
1546 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1548 new_fm->e[j] = NULL;
1552 new_fm->e[i] = old_fm->e[i];
1553 old_fm->e[i] = NULL;
1555 ubi_err(ubi, "could not get any free erase block");
1557 for (j = 1; j < i; j++) {
1558 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1559 new_fm->e[j] = NULL;
1566 new_fm->e[i] = tmp_e;
1568 if (old_fm && old_fm->e[i]) {
1569 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1570 old_fm->to_be_tortured[i]);
1571 old_fm->e[i] = NULL;
1576 /* Old fastmap is larger than the new one */
1577 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1578 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1579 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1580 old_fm->to_be_tortured[i]);
1581 old_fm->e[i] = NULL;
1585 spin_lock(&ubi->wl_lock);
1586 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1587 spin_unlock(&ubi->wl_lock);
1590 /* no fresh anchor PEB was found, reuse the old one */
1592 ret = erase_block(ubi, old_fm->e[0]->pnum);
1594 ubi_err(ubi, "could not erase old anchor PEB");
1596 for (i = 1; i < new_fm->used_blocks; i++) {
1597 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1599 new_fm->e[i] = NULL;
1603 new_fm->e[0] = old_fm->e[0];
1604 new_fm->e[0]->ec = ret;
1605 old_fm->e[0] = NULL;
1607 /* we've got a new anchor PEB, return the old one */
1608 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1609 old_fm->to_be_tortured[0]);
1610 new_fm->e[0] = tmp_e;
1611 old_fm->e[0] = NULL;
1615 ubi_err(ubi, "could not find any anchor PEB");
1617 for (i = 1; i < new_fm->used_blocks; i++) {
1618 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1619 new_fm->e[i] = NULL;
1625 new_fm->e[0] = tmp_e;
1628 down_write(&ubi->work_sem);
1629 down_write(&ubi->fm_eba_sem);
1630 ret = ubi_write_fastmap(ubi, new_fm);
1631 up_write(&ubi->fm_eba_sem);
1632 up_write(&ubi->work_sem);
1638 up_write(&ubi->fm_protect);
1643 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1645 ret = invalidate_fastmap(ubi);
1647 ubi_err(ubi, "Unable to invalidiate current fastmap!");
1650 return_fm_pebs(ubi, old_fm);
1651 return_fm_pebs(ubi, new_fm);