static ulong spl_nand_fit_read(struct spl_load_info *load, ulong offs,
ulong size, void *dst)
{
+ ulong sector;
int ret;
+ sector = *(int *)load->priv;
+ offs = sector + nand_spl_adjust_offset(sector, offs - sector);
ret = nand_spl_load_image(offs, size, dst);
if (!ret)
return size;
debug("Found FIT\n");
load.dev = NULL;
- load.priv = NULL;
+ load.priv = &offset;
load.filename = NULL;
load.bl_len = 1;
load.read = spl_nand_fit_read;
return 0;
}
+/**
+ * nand_spl_adjust_offset - Adjust offset from a starting sector
+ * @sector: Address of the sector
+ * @offs: Offset starting from @sector
+ *
+ * If one or more bad blocks are in the address space between @sector
+ * and @sector + @offs, @offs is increased by the NAND block size for
+ * each bad block found.
+ */
+u32 nand_spl_adjust_offset(u32 sector, u32 offs)
+{
+ unsigned int block, lastblock;
+
+ block = sector / CONFIG_SYS_NAND_BLOCK_SIZE;
+ lastblock = (sector + offs) / CONFIG_SYS_NAND_BLOCK_SIZE;
+
+ while (block <= lastblock) {
+ if (nand_is_bad_block(block)) {
+ offs += CONFIG_SYS_NAND_BLOCK_SIZE;
+ lastblock++;
+ }
+
+ block++;
+ }
+
+ return offs;
+}
+
#ifdef CONFIG_SPL_UBI
/*
* Temporary storage for non NAND page aligned and non NAND page sized
int allexcept);
int nand_get_lock_status(struct mtd_info *mtd, loff_t offset);
+u32 nand_spl_adjust_offset(u32 sector, u32 offs);
int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst);
int nand_spl_read_block(int block, int offset, int len, void *dst);
void nand_deselect(void);