struct _ext_info ext_info;
/* added for slc */
struct _fip_info fip_info;
+ uint32_t ddrp_start_page;
};
-
union nand_core_clk {
/*raw register data */
uint32_t d32;
#define NAND_ENV_BLOCK_NUM 8
#define NAND_KEY_BLOCK_NUM 8
#define NAND_DTB_BLOCK_NUM 4
+#define NAND_DDR_BLOCK_NUM 2
#define AML_CHIP_NONE_RB 4
#define AML_INTERLEAVING_MODE 8
#define KEY_NAND_MAGIC "nkey"
#define SEC_NAND_MAGIC "nsec"
#define DTB_NAND_MAGIC "ndtb"
+#define DDR_NAND_MAGIC "nddr"
#define NAND_SYS_PART_SIZE 0x8000000
struct nand_menson_key {
struct aml_nandrsv_info_t *aml_nandenv_info;
struct aml_nandrsv_info_t *aml_nandkey_info;
struct aml_nandrsv_info_t *aml_nanddtb_info;
+ struct aml_nandrsv_info_t *aml_nandddr_info;
struct aml_nand_bch_desc *bch_desc;
/*golbal variable for retry support*/
int aml_nand_key_check(struct mtd_info *mtd);
+int aml_nand_ddr_check(struct mtd_info *mtd);
+
/*int aml_nand_free_valid_env(struct mtd_info *mtd);*/
int aml_nand_save_bbt(struct mtd_info *mtd, u_char *buf);
#endif
aml_nand_key_check(mtd);
aml_nand_dtb_check(mtd);
+ aml_nand_ddr_check(mtd);
if (aml_chip->support_new_nand == 1) {
if ((new_nand_info->type)
struct nand_setup *p_nand_setup = NULL;
int each_boot_pages, boot_num, bbt_pages;
uint32_t pages_per_blk_shift, bbt_size;
+ uint32_t ddrp_start_block = 0;
pages_per_blk_shift = (chip->phys_erase_shift - chip->page_shift);
aml_chip_normal = mtd_to_nand_chip(nand_info[1]);
p_ext_info->bbt_occupy_pages = bbt_pages;
p_ext_info->bbt_start_block =
(BOOT_TOTAL_PAGES >> pages_per_blk_shift) + NAND_GAP_BLOCK_NUM;
+ ddrp_start_block = aml_chip_normal->aml_nandddr_info->start_block;
+ p_nand_page0->ddrp_start_page =
+ (ddrp_start_block << pages_per_blk_shift)
+ + aml_chip_normal->aml_nandddr_info->valid_node->phy_page_addr;
+ pr_info("ddrp_start_page = 0x%x ddr_start_block = 0x%x\n",
+ p_nand_page0->ddrp_start_page, ddrp_start_block);
/* fill descrete infos */
if (aml_chip->bl_mode) {
p_fip_info->version = 1;
aml_chip->aml_nanddtb_info->size = aml_chip->dtbsize;
memcpy(aml_chip->aml_nanddtb_info->name, DTB_NAND_MAGIC, 4);
+ aml_chip->aml_nandddr_info =
+ kzalloc(sizeof(struct aml_nandrsv_info_t), GFP_KERNEL);
+ if (aml_chip->aml_nandddr_info == NULL)
+ return -ENOMEM;
+ aml_chip->aml_nandddr_info->mtd = mtd;
+ aml_chip->aml_nandddr_info->valid_node =
+ kzalloc(sizeof(struct valid_node_t), GFP_KERNEL);
+ if (aml_chip->aml_nandddr_info->valid_node == NULL)
+ return -ENOMEM;
+
+ aml_chip->aml_nandddr_info->valid_node->phy_blk_addr = -1;
+ aml_chip->aml_nandddr_info->start_block =
+ aml_chip->aml_nanddtb_info->end_block;
+ aml_chip->aml_nandddr_info->end_block =
+ aml_chip->aml_nanddtb_info->end_block + NAND_DDR_BLOCK_NUM;
+ aml_chip->aml_nandddr_info->size = aml_chip->dtbsize;
+ memcpy(aml_chip->aml_nandddr_info->name, DDR_NAND_MAGIC, 4);
+
if ((vernier - (BOOT_TOTAL_PAGES >> pages_per_blk_shift)) >
RESERVED_BLOCK_NUM) {
pr_info("ERROR: total blk number is over the limit\n");
aml_chip->aml_nandkey_info->start_block);
pr_info("dtb_start=%d\n",
aml_chip->aml_nanddtb_info->start_block);
+ pr_info("ddr_start=%d\n",
+ aml_chip->aml_nandddr_info->start_block);
return 0;
}
return ret;
}
+int aml_nand_ddr_check(struct mtd_info *mtd)
+{
+ struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
+ int ret = 0;
+
+ ret = aml_nand_scan_rsv_info(mtd, aml_chip->aml_nandddr_info);
+ if ((ret != 0) && ((ret != (-1))))
+ pr_info("%s %d\n", __func__, __LINE__);
+
+ if (aml_chip->aml_nandddr_info->valid == 0)
+ pr_info("%s %d NO dtb exist\n", __func__, __LINE__);
+
+ return ret;
+}
+
int aml_nand_bbt_check(struct mtd_info *mtd)
{
struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);