From 8c44111fb034cbd8f454ea2d5db7342a8df2ed62 Mon Sep 17 00:00:00 2001 From: "yi.zeng" Date: Sat, 29 Jul 2017 23:57:50 +0800 Subject: [PATCH] mtd: nand: add nandkey and secure storge PD#148390: mtd: nand: add nandkey and secure storage Change-Id: I220688cd14d8bf16105871e0bf0b233591b1bd33 Signed-off-by: yi.zeng --- MAINTAINERS | 3 + drivers/amlogic/mtd_meson8b/Makefile | 5 +- drivers/amlogic/mtd_meson8b/aml_mtd.h | 38 +- drivers/amlogic/mtd_meson8b/aml_nand.c | 13 +- drivers/amlogic/mtd_meson8b/env_old.c | 3 +- drivers/amlogic/mtd_meson8b/key_old.c | 114 --- drivers/amlogic/mtd_meson8b/m3_nand.c | 63 +- drivers/amlogic/mtd_meson8b/nand_key.c | 993 +++++++++++++++++++++++++++ drivers/amlogic/mtd_meson8b/secure_storage.c | 743 ++++++++++++++++++++ 9 files changed, 1837 insertions(+), 138 deletions(-) delete mode 100644 drivers/amlogic/mtd_meson8b/key_old.c create mode 100644 drivers/amlogic/mtd_meson8b/nand_key.c create mode 100644 drivers/amlogic/mtd_meson8b/secure_storage.c diff --git a/MAINTAINERS b/MAINTAINERS index 3e64440..1a3a4d3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13885,6 +13885,9 @@ F: sound/soc/codecs/amlogic/aml_pmu3.h AMLOGIC MTD MESON8B DRIVER M: Yonghui Yu F: drivers/amlogic/mtd_meson8b/ +M: Yi.Zeng +F: drivers/amlogic/mtd_meson8b/nand_key.c +F: drivers/amlogic/mtd_meson8b/secure_storage.c AMLOGIC PM/SLEEP M8B DRIVER SUPPORT M: Qiufang Dai diff --git a/drivers/amlogic/mtd_meson8b/Makefile b/drivers/amlogic/mtd_meson8b/Makefile index 24d982a..b0ae301 100644 --- a/drivers/amlogic/mtd_meson8b/Makefile +++ b/drivers/amlogic/mtd_meson8b/Makefile @@ -5,7 +5,8 @@ obj-$(CONFIG_AMLOGIC_M8B_NAND) += aml_nand.o \ m3_nand.o \ env_old.o \ - key_old.o \ + nand_key.o \ nand_flash.o \ mtd_driver.o \ - new_nand.o + new_nand.o \ + secure_storage.o diff --git a/drivers/amlogic/mtd_meson8b/aml_mtd.h b/drivers/amlogic/mtd_meson8b/aml_mtd.h index 59125ab..6266011 100644 --- a/drivers/amlogic/mtd_meson8b/aml_mtd.h +++ b/drivers/amlogic/mtd_meson8b/aml_mtd.h @@ -117,19 +117,23 @@ struct _ext_info { uint32_t read_info; uint32_t new_type; uint32_t page_per_blk; - uint32_t xlc; + uint32_t secure_block; + uint32_t secure_start_blk; + //uint32_t xlc; uint32_t ce_mask; - uint32_t boot_num; - uint32_t each_boot_pages; - uint32_t rsv[2]; + //uint32_t boot_num; + //uint32_t each_boot_pages; + //uint32_t rsv[2]; + uint32_t secure_end_blk; /* add new below, */ + uint32_t reserved; }; /*max size is 384 bytes*/ struct _nand_page0 { struct nand_setup nand_setup; unsigned char page_list[16]; - struct _nand_cmd retry_usr[32]; + struct _nand_cmd retry_usr[164]; struct _ext_info ext_info; }; @@ -416,6 +420,9 @@ struct aml_nand_bch_desc { #define NAND_MINI_PART_BLOCKNUM 2 +#define NAND_KEY_SAVE_MULTI_BLOCK //key save in multi block same time + + struct aml_nand_read_retry { u8 flag; u8 reg_cnt; @@ -484,7 +491,16 @@ struct aml_nandkey_info_t { int start_block; int end_block; }; - +struct aml_nandsecure_info_t { + struct mtd_info *mtd; + struct env_valid_node_t *secure_valid_node; + struct env_free_node_t *secure_free_node; + u_char secure_valid; + u_char secure_init; + u_char part_num_before_sys; + int start_block; + int end_block; +}; struct aml_nand_chip { struct mtd_info *mtd; struct nand_chip chip; @@ -541,6 +557,8 @@ struct aml_nand_chip { struct aml_nandenv_info_t *aml_nandenv_info; unsigned int update_env_flag; struct aml_nandkey_info_t *aml_nandkey_info; + struct aml_nandsecure_info_t *aml_nandsecure_info; + unsigned int secure_protect; #else struct aml_nandrsv_info_t *aml_nandbbt_info; struct aml_nandrsv_info_t *aml_nandenv_info; @@ -800,11 +818,11 @@ int aml_key_init(struct aml_nand_chip *aml_chip); int aml_nand_rsv_erase_protect(struct mtd_info *mtd, unsigned int block_addr); -int aml_nand_save_key(struct mtd_info *mtd, u_char *buf); +//int aml_nand_save_key(struct mtd_info *mtd, u_char *buf); -int aml_nand_read_key(struct mtd_info *mtd, size_t offset, u_char *buf); +//int aml_nand_read_key(struct mtd_info *mtd, size_t offset, u_char *buf); -int aml_nand_key_check(struct mtd_info *mtd); +//int aml_nand_key_check(struct mtd_info *mtd); /*int aml_nand_free_valid_env(struct mtd_info *mtd);*/ @@ -859,6 +877,8 @@ extern uint8_t nand_boot_flag; /*external defined variable*/ extern int info_disprotect; extern struct aml_nand_flash_dev aml_nand_flash_ids[]; +extern int secure_device_init(struct mtd_info *mtd); +extern bool meson_secure_enabled(void); void aml_nand_new_nand_param_init(struct mtd_info *mtd, struct aml_nand_flash_dev *type); diff --git a/drivers/amlogic/mtd_meson8b/aml_nand.c b/drivers/amlogic/mtd_meson8b/aml_nand.c index f403b17..dc5e17f 100644 --- a/drivers/amlogic/mtd_meson8b/aml_nand.c +++ b/drivers/amlogic/mtd_meson8b/aml_nand.c @@ -306,6 +306,7 @@ static int aml_nand_add_partition(struct aml_nand_chip *aml_chip) unsigned int bad_blk_addr[128]; #ifdef CONFIG_AMLOGIC_M8B_NANDKEY uint64_t key_block; + uint64_t secure_block = 0; #endif mini_part_size = @@ -340,6 +341,10 @@ static int aml_nand_add_partition(struct aml_nand_chip *aml_chip) #ifdef CONFIG_AMLOGIC_M8B_NANDKEY key_block = aml_chip->aml_nandkey_info->end_block - aml_chip->aml_nandkey_info->start_block + 1; + if (meson_secure_enabled()) + secure_block = + aml_chip->aml_nandsecure_info->end_block + - aml_chip->aml_nandsecure_info->start_block + 1; #endif @@ -457,6 +462,8 @@ static int aml_nand_add_partition(struct aml_nand_chip *aml_chip) } else { temp_parts->size -= key_block*mtd->erasesize; } + if (meson_secure_enabled()) + temp_parts->size -= secure_block*mtd->erasesize; #endif } @@ -2617,7 +2624,11 @@ int aml_nand_init(struct aml_nand_chip *aml_chip) err = aml_key_init(aml_chip); if (err) pr_err("invalid nand key\n"); - + if (meson_secure_enabled()) { + err = secure_device_init(mtd); + if (err) + pr_err("invalid secure device\n"); + } #endif /* CONFIG_AMLOGIC_M8B_NANDKEY */ #endif /* CONFIG_AMLOGIC_M8B_NAND */ diff --git a/drivers/amlogic/mtd_meson8b/env_old.c b/drivers/amlogic/mtd_meson8b/env_old.c index c58db8e..1bed74c 100644 --- a/drivers/amlogic/mtd_meson8b/env_old.c +++ b/drivers/amlogic/mtd_meson8b/env_old.c @@ -58,8 +58,9 @@ int aml_nand_rsv_erase_protect(struct mtd_info *mtd, unsigned int block_addr) { if (!_aml_rsv_isprotect()) return 0; - pr_err(" %s() %d: fixme\n", __func__, __LINE__); #if 0 + pr_err(" %s() %d: fixme\n", __func__, __LINE__); + if (aml_chip->aml_nandkey_info != NULL) { if (aml_chip->aml_nandkey_info->valid) if ((block_addr >= aml_chip->aml_nandkey_info->start_block) diff --git a/drivers/amlogic/mtd_meson8b/key_old.c b/drivers/amlogic/mtd_meson8b/key_old.c deleted file mode 100644 index b9df942..0000000 --- a/drivers/amlogic/mtd_meson8b/key_old.c +++ /dev/null @@ -1,114 +0,0 @@ -/* - * drivers/amlogic/mtd_old/key_old.c - * - * Copyright (C) 2017 Amlogic, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - */ -#include "key_old.h" -#include "env_old.h" - -static struct aml_nand_chip *aml_chip_key; - -int aml_nand_key_check(struct mtd_info *mtd) -{ - int ret = 0; -#if 0 - struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); - - ret = aml_nand_scan_rsv_info(mtd, aml_chip->aml_nandkey_info); - if ((ret != 0) && ((ret != (-1)))) - pr_info("%s %d\n", __func__, __LINE__); - - if (aml_chip->aml_nandkey_info->valid == 0) - pr_info("%s %d NO key exist\n", __func__, __LINE__); -#endif - pr_info("%s() %d\n", __func__, __LINE__); - return ret; -} - -int aml_nand_key_init(struct mtd_info *mtd) -{ - int ret = 0, error; - struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); - //struct nand_chip *chip = &aml_chip->chip; - struct aml_nandkey_info_t *key_info; - int remain_start_block, remain_total_block; - int remain_block = 0, bad_blk_cnt = 0; - int phys_erase_shift, max_key_blk; - loff_t offset; - - aml_chip->aml_nandkey_info = - kzalloc(sizeof(struct aml_nandkey_info_t), - GFP_KERNEL); - if (aml_chip->aml_nandkey_info == NULL) { - pr_err("%s() %d: ENOMEM\n", __func__, __LINE__); - return -ENOMEM; - } - key_info = aml_chip->aml_nandkey_info; - - key_info->env_init = 0; - key_info->mtd = mtd; - /*key_info->env_valid_node->phy_blk_addr = -1;*/ - - - phys_erase_shift = fls(mtd->erasesize) - 1; - max_key_blk = (NAND_MINIKEY_PART_SIZE >> phys_erase_shift); - if (max_key_blk < NAND_MINIKEY_PART_BLOCKNUM) - max_key_blk = NAND_MINIKEY_PART_BLOCKNUM; - - offset = mtd->size - mtd->erasesize; - remain_start_block = (int)(offset >> phys_erase_shift); - remain_total_block = REMAIN_TAIL_BLOCK_NUM; - key_info->start_block = remain_start_block; - key_info->end_block = remain_start_block; - bad_blk_cnt = 0; - do { - offset = mtd->erasesize; - offset *= remain_start_block; - error = mtd->_block_isbad(mtd, offset); - if (error) { - key_info->nand_bbt_info.nand_bbt[bad_blk_cnt++] = - remain_start_block; - if (bad_blk_cnt >= MAX_BAD_BLK_NUM) { - pr_err("bad block too much,%s\n", __func__); - return -ENOMEM; - } - key_info->start_block--; - remain_start_block--; - continue; - } - remain_start_block--; - } while (++remain_block < remain_total_block); - - key_info->start_block -= (remain_block-1); - - pr_info("%s()%d: key [%d,%d]\n", __func__, __LINE__, - key_info->start_block, key_info->end_block); - - return ret; -} - -int aml_key_init(struct aml_nand_chip *aml_chip) -{ - int ret = 0; - struct mtd_info *mtd = aml_chip->mtd; - /* avoid null */ - aml_chip_key = aml_chip; - - ret = aml_nand_key_init(mtd); - /* fixme, NOOP key */ - /* storage_ops_read(amlnf_key_read); */ - /* storage_ops_write(amlnf_key_write); */ - return ret; -} - diff --git a/drivers/amlogic/mtd_meson8b/m3_nand.c b/drivers/amlogic/mtd_meson8b/m3_nand.c index 19c725f..be28576 100644 --- a/drivers/amlogic/mtd_meson8b/m3_nand.c +++ b/drivers/amlogic/mtd_meson8b/m3_nand.c @@ -1022,20 +1022,32 @@ void __attribute__((unused)) nand_info_page_prepare( **/ u32 devops_len = (BOOT_PAGES_PER_COPY-1) * aml_chip->page_size; /* struct hw_controller *controller = &(aml_chip->controller); */ - int i, nand_read_info; + int i, nand_read_info, ran_mode = 0; u32 en_slc, configure_data; - u32 boot_num = 1, each_boot_pages; + u32 boot_num = 1; u32 valid_pages = BOOT_TOTAL_PAGES; + unsigned char chip_num = 0, plane_num = 0, micron_nand = 0; + int k, secure_block, valid_chip_num = 0; struct _nand_page0 *p_nand_page0 = NULL; struct _ext_info *p_ext_info = NULL; struct nand_setup *p_nand_setup = NULL; + struct aml_nand_chip *aml_chip_device1 = NULL; + struct aml_nand_platform *plat = NULL; p_nand_page0 = (struct _nand_page0 *) page0_buf; p_nand_setup = &p_nand_page0->nand_setup; p_ext_info = &p_nand_page0->ext_info; + for (i = 0; i < aml_nand_mid_device.dev_num; i++) { + plat = &aml_nand_mid_device.aml_nand_platform[i]; + if (!strncmp((char *)plat->name, NAND_NORMAL_NAME, + strlen((const char *)NAND_NORMAL_NAME))) { + aml_chip_device1 = plat->aml_chip; + break; + } + } configure_data = NFC_CMD_N2M(aml_chip->ran_mode, aml_chip->bch_mode, 0, (chip->ecc.size >> 3), chip->ecc.steps); @@ -1059,8 +1071,8 @@ void __attribute__((unused)) nand_info_page_prepare( 0, NAND_PAGELIST_CNT); /* chip_num occupy the lowest 2 bit */ - nand_read_info = controller->chip_num; - + /* nand_read_info = controller->chip_num; */ + nand_read_info = 1; /* *make it *1)calu the number of boot saved and pages each boot needs. @@ -1078,21 +1090,50 @@ void __attribute__((unused)) nand_info_page_prepare( else break; } - each_boot_pages = valid_pages/boot_num; + /* each_boot_pages = valid_pages/boot_num; */ p_ext_info->read_info = nand_read_info; p_ext_info->page_per_blk = aml_chip->block_size / aml_chip->page_size; /* fixme, only ce0 is enabled! */ p_ext_info->ce_mask = 0x01; + p_ext_info->new_type = aml_chip->new_nand_info.type; /* xlc is not in using for now */ - p_ext_info->xlc = 1; - p_ext_info->boot_num = boot_num; - p_ext_info->each_boot_pages = each_boot_pages; - + /* p_ext_info->xlc = 1; */ + /* p_ext_info->boot_num = boot_num; */ + /* p_ext_info->each_boot_pages = each_boot_pages; */ +#if 1 + if (meson_secure_enabled()) { + p_ext_info->secure_start_blk = + aml_chip_device1->aml_nandsecure_info->start_block; + p_ext_info->secure_end_blk = + aml_chip_device1->aml_nandsecure_info->end_block; + + valid_chip_num = 0; + for (k = 0; k < 0; k++) { + if (aml_chip_device1->valid_chip[k]) + valid_chip_num++; + } + chip_num = valid_chip_num; + if (aml_chip_device1->plane_num == 2) + plane_num = 1; + ran_mode = aml_chip_device1->ran_mode; + if ((aml_chip_device1->mfr_type == NAND_MFR_MICRON) || + (aml_chip_device1->mfr_type == NAND_MFR_INTEL)) + micron_nand = 1; + nand_read_info = chip_num | (plane_num << 2) | + (ran_mode << 3) | (micron_nand << 4); + p_ext_info->read_info = nand_read_info; + secure_block = + aml_chip_device1->aml_nandsecure_info->start_block; + p_ext_info->secure_block = secure_block; + } +#endif /* pr_info("new_type = 0x%x\n", p_ext_info->new_type); */ pr_info("page_per_blk = 0x%x\n", p_ext_info->page_per_blk); - pr_info("boot_num = %d each_boot_pages = %d", boot_num, - each_boot_pages); + /* + *pr_info("boot_num = %d each_boot_pages = %d", boot_num, + * each_boot_pages); + */ } static int m3_nand_boot_write_page_hwecc(struct mtd_info *mtd, diff --git a/drivers/amlogic/mtd_meson8b/nand_key.c b/drivers/amlogic/mtd_meson8b/nand_key.c new file mode 100644 index 0000000..0a7c0ec --- /dev/null +++ b/drivers/amlogic/mtd_meson8b/nand_key.c @@ -0,0 +1,993 @@ +/* + * drivers/amlogic/mtd_meson8b/nand_key.c + * + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +/* #include "key_old.h" */ +#include "aml_mtd.h" +#include "env_old.h" +#include "linux/crc32.h" +#include + +#define KEYSIZE (CONFIG_KEYSIZE - (sizeof(uint32_t))) + +/*error code*/ +#define NAND_KEY_RD_ERR 2 +#define NAND_KEY_CONFIG_ERR 3 + +#define REMAIN_TAIL_BLOCK_NUM 8 + + +/* static struct aml_nand_chip *aml_chip_key; */ +static int default_keyironment_size = + (KEYSIZE - sizeof(struct aml_nand_bbt_info)); + +struct mesonkey_t { + uint32_t crc;/* CRC32 over data bytes */ + uint8_t data[KEYSIZE];/* Environment data */ +}; + +static struct env_free_node_t *alloc_fn(void) +{ + struct env_free_node_t *fn; + + fn = kzalloc(sizeof(struct env_free_node_t), GFP_KERNEL); + return fn; +} + +static int aml_nand_read_key(struct mtd_info *mtd, + uint64_t offset, u_char *buf) +{ + struct env_oobinfo_t *key_oobinfo; + int error = 0; + uint64_t addr = offset; + size_t amount_loaded = 0; + size_t len; + struct mtd_oob_ops oob_ops; + struct mtd_oob_region aml_oob_region; + uint8_t *data_buf; + uint8_t key_oob_buf[sizeof(struct env_oobinfo_t)]; + + struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); + + if (!aml_chip->aml_nandkey_info->env_valid) + return 1; + + data_buf = kzalloc(mtd->writesize, GFP_KERNEL); + if (data_buf == NULL) + return -ENOMEM; + + key_oobinfo = (struct env_oobinfo_t *)key_oob_buf; + + error = mtd->ooblayout->free(mtd, 0, + (struct mtd_oob_region *)&aml_oob_region); + if (error != 0) { + pr_err("read oob free failed %s() %d\n", + __func__, __LINE__); + error = NAND_KEY_RD_ERR; + goto exit; + } + + while (amount_loaded < CONFIG_KEYSIZE) { + + oob_ops.mode = MTD_OPS_AUTO_OOB; + oob_ops.len = mtd->writesize; + oob_ops.ooblen = sizeof(struct env_oobinfo_t); + oob_ops.ooboffs = aml_oob_region.offset; + oob_ops.datbuf = data_buf; + oob_ops.oobbuf = key_oob_buf; + + memset((uint8_t *)oob_ops.datbuf, 0x0, mtd->writesize); + memset((uint8_t *)oob_ops.oobbuf, 0x0, oob_ops.ooblen); + + error = mtd->_read_oob(mtd, addr, &oob_ops); + if ((error != 0) && (error != -EUCLEAN)) { + pr_err("blk check failed: %llx, %d\n", + (uint64_t)addr, error); + error = NAND_KEY_RD_ERR; + goto exit; + } + + if (memcmp(key_oobinfo->name, ENV_KEY_MAGIC, 4)) + pr_err("invalid key magic: %llx\n", (uint64_t)addr); + + addr += mtd->writesize; + len = min(mtd->writesize, CONFIG_KEYSIZE - amount_loaded); + memcpy(buf + amount_loaded, data_buf, len); + amount_loaded += mtd->writesize; + } + if (amount_loaded < CONFIG_KEYSIZE) { + error = NAND_KEY_CONFIG_ERR; + pr_err("key size err, w:%d,c %d, %s\n", + mtd->writesize, CONFIG_KEYSIZE, __func__); + goto exit; + } +exit: + kfree(data_buf); + return error; +} + + +static int aml_nand_write_key(struct mtd_info *mtd, + uint64_t offset, u_char *buf) +{ + struct env_oobinfo_t *key_oobinfo; + int error = 0; + uint64_t addr = 0; + size_t amount_saved = 0; + size_t len; + struct mtd_oob_ops oob_ops; + struct mtd_oob_region oob_reg; + uint8_t *data_buf; + uint8_t key_oob_buf[sizeof(struct env_oobinfo_t)]; + + struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); + + data_buf = kzalloc(mtd->writesize, GFP_KERNEL); + if (data_buf == NULL) + return -ENOMEM; + + addr = offset; + key_oobinfo = (struct env_oobinfo_t *)key_oob_buf; + memcpy(key_oobinfo->name, ENV_KEY_MAGIC, 4); + key_oobinfo->ec = aml_chip->aml_nandkey_info->env_valid_node->ec; + key_oobinfo->timestamp = + aml_chip->aml_nandkey_info->env_valid_node->timestamp; + key_oobinfo->status_page = 1; + + error = mtd->ooblayout->free(mtd, 0, (struct mtd_oob_region *)&oob_reg); + if (error != 0) { + pr_err("read oob free failed: %s() %d\n", __func__, __LINE__); + error = 1; + goto exit; + } + + while (amount_saved < CONFIG_KEYSIZE) { + + oob_ops.mode = MTD_OPS_AUTO_OOB; + oob_ops.len = mtd->writesize; + oob_ops.ooblen = sizeof(struct env_oobinfo_t); + oob_ops.ooboffs = oob_reg.offset; + oob_ops.datbuf = data_buf; + oob_ops.oobbuf = key_oob_buf; + + memset((uint8_t *)oob_ops.datbuf, 0x0, mtd->writesize); + len = min(mtd->writesize, CONFIG_KEYSIZE - amount_saved); + memcpy((uint8_t *)oob_ops.datbuf, buf + amount_saved, len); + + error = mtd->_write_oob(mtd, addr, &oob_ops); + if (error) { + pr_err("blk write failed: %llx, %d\n", + (uint64_t)addr, error); + error = 1; + goto exit; + } + + addr += mtd->writesize; + amount_saved += mtd->writesize; + } + if (amount_saved < CONFIG_KEYSIZE) { + error = 1; + pr_err("amount_saved < CONFIG_KEYSIZE, %s\n", __func__); + goto exit; + } + + aml_chip->aml_nandkey_info->env_valid = 1; +exit: + kfree(data_buf); + return error; +} + +static int aml_nand_get_key(struct mtd_info *mtd, u_char *buf) +{ + struct aml_nand_bbt_info *nand_bbt_info; + int error = 0, flag = 0; + uint64_t addr = 0; + struct mesonkey_t *key_ptr = (struct mesonkey_t *)buf; + struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); + struct env_valid_node_t *cur_valid_node, *tail_valid_node; + + cur_valid_node = aml_chip->aml_nandkey_info->env_valid_node; + tail_valid_node = cur_valid_node->next; + do { + while (tail_valid_node != NULL) { + if (cur_valid_node->rd_flag == NAND_KEY_RD_ERR) { + cur_valid_node = tail_valid_node; + tail_valid_node = tail_valid_node->next; + } else + break; + } + if (cur_valid_node->rd_flag == NAND_KEY_RD_ERR) { + error = NAND_KEY_RD_ERR; + pr_err("no valid block get key,%s\n", __func__); + goto exit; + } + + addr = cur_valid_node->phy_blk_addr; + addr *= mtd->erasesize; + addr += cur_valid_node->phy_page_addr * mtd->writesize; + pr_info("read:addr:0x%llx,phy_blk_addr:%d,phy_page_addr:%d,%s:%d\n", + addr, + cur_valid_node->phy_blk_addr, + cur_valid_node->phy_page_addr, + __func__, __LINE__); + + error = aml_nand_read_key(mtd, addr, (u_char *)key_ptr); + if (error) { + pr_err("nand key read fail,%s\n", __func__); + if (error == NAND_KEY_RD_ERR) { + cur_valid_node->rd_flag = NAND_KEY_RD_ERR; + flag = 1; + } else + goto exit; + } else + flag = 0; + } while (flag); + nand_bbt_info = &aml_chip->aml_nandkey_info->nand_bbt_info; + memcpy(nand_bbt_info->bbt_head_magic, + key_ptr->data + default_keyironment_size, + sizeof(struct aml_nand_bbt_info)); +exit: + return error; +} + + +static int aml_nand_save_key(struct mtd_info *mtd, u_char *buf) +{ + struct aml_nand_bbt_info *nand_bbt_info; + struct env_free_node_t *env_free_node, *key_tmp_node; + int error = 0, pages_per_blk, i = 1; + uint64_t addr = 0; + struct erase_info aml_key_erase_info; + struct mesonkey_t *key_ptr = (struct mesonkey_t *)buf; + int group_block_count = 0; + int group_max_block = NAND_MINIKEY_PART_BLOCKNUM; + struct env_valid_node_t *tmp_valid_node, *tail_valid_node; + struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); + struct aml_nandkey_info_t *keyinfo; + + if (!aml_chip->aml_nandkey_info->env_init) + return 1; + + keyinfo = aml_chip->aml_nandkey_info; + pages_per_blk = mtd->erasesize / mtd->writesize; + if ((mtd->writesize < CONFIG_KEYSIZE) && + (keyinfo->env_valid == 1)) + i = (CONFIG_KEYSIZE + mtd->writesize - 1) / mtd->writesize; + + if (keyinfo->env_valid) { + keyinfo->env_valid_node->phy_page_addr += i; + tail_valid_node = keyinfo->env_valid_node->next; + while (tail_valid_node) { + tail_valid_node->phy_page_addr += i; + tail_valid_node = tail_valid_node->next; + } + + if ((keyinfo->env_valid_node->phy_page_addr + + i) > pages_per_blk) { + + env_free_node = alloc_fn(); + if (env_free_node == NULL) + return -ENOMEM; + + env_free_node->phy_blk_addr = + keyinfo->env_valid_node->phy_blk_addr; + env_free_node->ec = keyinfo->env_valid_node->ec; + key_tmp_node = keyinfo->env_free_node; + if (keyinfo->env_free_node == NULL) + keyinfo->env_free_node = env_free_node; + else { + while (key_tmp_node->next != NULL) + key_tmp_node = key_tmp_node->next; + key_tmp_node->next = env_free_node; + } + tail_valid_node = keyinfo->env_valid_node->next; + while (tail_valid_node) { + env_free_node = alloc_fn(); + if (env_free_node == NULL) + return -ENOMEM; + + env_free_node->phy_blk_addr = + tail_valid_node->phy_blk_addr; + env_free_node->ec = tail_valid_node->ec; + keyinfo->env_valid_node->next = + tail_valid_node->next; + kfree(tail_valid_node); + tail_valid_node = + keyinfo->env_valid_node->next; + + key_tmp_node = keyinfo->env_free_node; + while (key_tmp_node->next != NULL) + key_tmp_node = key_tmp_node->next; + key_tmp_node->next = env_free_node; + } + + key_tmp_node = keyinfo->env_free_node; + keyinfo->env_valid_node->phy_blk_addr = + key_tmp_node->phy_blk_addr; + keyinfo->env_valid_node->phy_page_addr = 0; + keyinfo->env_valid_node->ec = key_tmp_node->ec; + keyinfo->env_valid_node->timestamp += 1; + keyinfo->env_valid_node->next = NULL; + keyinfo->env_free_node = key_tmp_node->next; + kfree(key_tmp_node); + + group_block_count++; + tail_valid_node = keyinfo->env_valid_node; + key_tmp_node = keyinfo->env_free_node; + while (key_tmp_node && + group_block_count < group_max_block) { + tmp_valid_node = + kzalloc(sizeof(struct env_valid_node_t), + GFP_KERNEL); + if (tmp_valid_node == NULL) + return -ENOMEM; + + tmp_valid_node->ec = key_tmp_node->ec; + tmp_valid_node->phy_blk_addr = + key_tmp_node->phy_blk_addr; + tmp_valid_node->phy_page_addr = 0; + tmp_valid_node->timestamp += 1; + tmp_valid_node->next = NULL; + keyinfo->env_free_node = key_tmp_node->next; + kfree(key_tmp_node); + key_tmp_node = keyinfo->env_free_node; + while (tail_valid_node->next != NULL) + tail_valid_node = tail_valid_node->next; + tail_valid_node->next = tmp_valid_node; + group_block_count++; + } + } + } else { + + key_tmp_node = keyinfo->env_free_node; + keyinfo->env_valid_node->phy_blk_addr = + key_tmp_node->phy_blk_addr; + keyinfo->env_valid_node->phy_page_addr = 0; + keyinfo->env_valid_node->ec = key_tmp_node->ec; + keyinfo->env_valid_node->timestamp += 1; + keyinfo->env_valid_node->next = NULL; + keyinfo->env_free_node = key_tmp_node->next; + kfree(key_tmp_node); + group_block_count++; + tail_valid_node = keyinfo->env_valid_node; + key_tmp_node = keyinfo->env_free_node; + while (key_tmp_node && + group_block_count < group_max_block) { + tmp_valid_node = + kzalloc(sizeof(struct env_valid_node_t), + GFP_KERNEL); + tmp_valid_node->ec = key_tmp_node->ec; + tmp_valid_node->phy_blk_addr = + key_tmp_node->phy_blk_addr; + tmp_valid_node->phy_page_addr = 0; + tmp_valid_node->timestamp += 1; + tmp_valid_node->next = NULL; + keyinfo->env_free_node = key_tmp_node->next; + kfree(key_tmp_node); + key_tmp_node = keyinfo->env_free_node; + while (tail_valid_node->next != NULL) + tail_valid_node = tail_valid_node->next; + tail_valid_node->next = tmp_valid_node; + group_block_count++; + } + } + + tail_valid_node = keyinfo->env_valid_node; + while (tail_valid_node != NULL) { + addr = tail_valid_node->phy_blk_addr; + addr *= mtd->erasesize; + addr += tail_valid_node->phy_page_addr * + mtd->writesize; + pr_info( + "write:addr:0x%llx,phy_blk_addr:%d,phy_page_addr:%d,%s:%d\n", + addr, + tail_valid_node->phy_blk_addr, + tail_valid_node->phy_page_addr, + __func__, __LINE__); + + if (tail_valid_node->phy_page_addr == 0) { + memset(&aml_key_erase_info, 0, + sizeof(struct erase_info)); + aml_key_erase_info.mtd = mtd; + aml_key_erase_info.addr = addr; + aml_key_erase_info.len = mtd->erasesize; + error = mtd->_erase(mtd, &aml_key_erase_info); + if (error) { + pr_err("key free blk erase failed %d\n", error); + mtd->_block_markbad(mtd, addr); + return error; + } + tail_valid_node->ec++; + } + nand_bbt_info = &keyinfo->nand_bbt_info; + if ((!memcmp(nand_bbt_info->bbt_head_magic, + BBT_HEAD_MAGIC, 4)) && + (!memcmp(nand_bbt_info->bbt_tail_magic, + BBT_TAIL_MAGIC, 4))) { + + memcpy(key_ptr->data + default_keyironment_size, + keyinfo->nand_bbt_info.bbt_head_magic, + sizeof(struct aml_nand_bbt_info)); + key_ptr->crc = + (crc32((0 ^ 0xffffffffL), + key_ptr->data, KEYSIZE) ^ 0xffffffffL); + } + + if (aml_nand_write_key(mtd, + addr, (u_char *) key_ptr)) { + pr_err("update nand key FAILED!\n"); + return 1; + } + tail_valid_node = tail_valid_node->next; + } + + return error; +} + +static struct env_free_node_t *get_free_tail(struct env_free_node_t *head) +{ + struct env_free_node_t *fn = head; + + while (fn->next != NULL) + fn = fn->next; + return fn; +} + +static int aml_nand_key_init(struct mtd_info *mtd) +{ + struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); + struct nand_chip *chip = &aml_chip->chip; + struct env_oobinfo_t *key_oobinfo; + /* free_node; */ + struct env_free_node_t *fn; + /* tmp_node; */ + struct env_free_node_t *tn; + struct env_free_node_t *key_prev_node; + struct aml_nandkey_info_t *k_info; + #ifdef NAND_KEY_SAVE_MULTI_BLOCK + struct env_valid_node_t *env_valid_node; + struct env_valid_node_t *tmp_valid_node; + struct env_free_node_t *multi_free_node; + struct env_free_node_t *free_tmp_node; + int have_env_free_node_flag; + #endif + struct mtd_oob_region aml_oob_region; + int error = 0, start_blk, key_blk, i; + int pages_per_blk, bad_blk_cnt = 0; + int max_key_blk, phys_erase_shift; + uint64_t offset; + uint8_t *data_buf; + struct mtd_oob_ops aml_oob_ops; + uint8_t key_oob_buf[sizeof(struct env_oobinfo_t)]; + int remain_block = 0; + int remain_start_block; + int remain_total_blk; + int key_end; + uint32_t env_node_size; + + data_buf = kzalloc(mtd->writesize, GFP_KERNEL); + if (data_buf == NULL) + return -ENOMEM; + + aml_chip->aml_nandkey_info = + kzalloc(sizeof(struct aml_nandkey_info_t), GFP_KERNEL); + + if (aml_chip->aml_nandkey_info == NULL) { + kfree(data_buf); + data_buf = NULL; + return -ENOMEM; + } + env_node_size = sizeof(struct env_free_node_t); + k_info = aml_chip->aml_nandkey_info; + k_info->env_init = 0; + k_info->mtd = mtd; + k_info->env_valid_node = + kzalloc(sizeof(struct env_valid_node_t), GFP_KERNEL); + if (k_info->env_valid_node == NULL) { + kfree(data_buf); + data_buf = NULL; + kfree(k_info); + k_info = NULL; + return -ENOMEM; + } + k_info->env_valid_node->phy_blk_addr = -1; + + phys_erase_shift = fls(mtd->erasesize) - 1; + max_key_blk = (NAND_MINIKEY_PART_SIZE >> phys_erase_shift); + if (max_key_blk < NAND_MINIKEY_PART_BLOCKNUM) + max_key_blk = NAND_MINIKEY_PART_BLOCKNUM; + +#ifdef NEW_NAND_SUPPORT + if ((aml_chip->new_nand_info.type) + && (aml_chip->new_nand_info.type < 10)) + offset += RETRY_NAND_BLK_NUM * mtd->erasesize; +#endif + + /* + *start_blk = (int)(offset >> phys_erase_shift); + *total_blk = (int)(mtd->size >> phys_erase_shift); + *aml_chip->aml_nandkey_info->start_block=start_blk; + *pr_info("start_blk=%d\n",aml_chip->aml_nandkey_info->start_block); + *aml_chip->aml_nandkey_info->end_block=start_blk; + */ + pages_per_blk = (1 << + (chip->phys_erase_shift - chip->page_shift)); + key_oobinfo = (struct env_oobinfo_t *)key_oob_buf; + /* + *if ((default_keyironment_size + * + sizeof(struct aml_nand_bbt_info)) > KEYSIZE) + *total_blk = start_blk + max_key_blk; + */ + +#define REMAIN_TAIL_BLOCK_NUM 8 + offset = mtd->size - mtd->erasesize; + remain_start_block = + (int)(offset >> phys_erase_shift); + remain_total_blk = REMAIN_TAIL_BLOCK_NUM; + k_info->start_block = remain_start_block; + k_info->end_block = remain_start_block; + bad_blk_cnt = 0; + do { + offset = mtd->erasesize; + offset *= remain_start_block; + error = mtd->_block_isbad(mtd, offset); + if (error) { + k_info->nand_bbt_info.nand_bbt[bad_blk_cnt++] + = remain_start_block; + if (bad_blk_cnt >= MAX_BAD_BLK_NUM) { + pr_err("bad block too much,%s\n", __func__); + return -ENOMEM; + } + k_info->start_block--; + remain_start_block--; + continue; + } + remain_start_block--; + } while (++remain_block < remain_total_blk); + k_info->start_block -= (remain_block - 1); + pr_info( + "key start_blk=%d,end_blk=%d,%s:%d\n", + k_info->start_block, + k_info->end_block, __func__, __LINE__); + + key_blk = 0; + start_blk = k_info->start_block; + + error = mtd->ooblayout->free(mtd, 0, + (struct mtd_oob_region *)&aml_oob_region); + if (error != 0) { + pr_err(" oob free failed: %s() %d\n", + __func__, __LINE__); + return -ERANGE; + } + key_end = remain_total_blk + k_info->start_block; + do { + offset = mtd->erasesize; + offset *= start_blk; + error = mtd->_block_isbad(mtd, offset); + if (error) { + start_blk++; + continue; + } + aml_oob_ops.mode = MTD_OPS_AUTO_OOB; + aml_oob_ops.len = mtd->writesize; + aml_oob_ops.ooblen = sizeof(struct env_oobinfo_t); + aml_oob_ops.ooboffs = aml_oob_region.offset; + /* aml_oob_ops.ooboffs = mtd->ecclayout->oobfree[0].offset; */ + aml_oob_ops.datbuf = data_buf; + aml_oob_ops.oobbuf = key_oob_buf; + memset((uint8_t *)aml_oob_ops.datbuf, + 0x0, mtd->writesize); + memset((uint8_t *)aml_oob_ops.oobbuf, + 0x0, aml_oob_ops.ooblen); + + error = mtd->_read_oob(mtd, offset, &aml_oob_ops); + if ((error != 0) && (error != -EUCLEAN)) { + pr_err( + "blk check good but read failed: %llx, %d\n", + (uint64_t)offset, error); + continue; + } + + k_info->env_init = 1; + if (!memcmp(key_oobinfo->name, ENV_KEY_MAGIC, 4)) { + k_info->env_valid = 1; + if (k_info->env_valid_node->phy_blk_addr >= 0) { + fn = alloc_fn(); + if (fn == NULL) + return -ENOMEM; + + fn->dirty_flag = 1; + have_env_free_node_flag = 0; + if (key_oobinfo->timestamp > + k_info->env_valid_node->timestamp) { + + fn->phy_blk_addr = + k_info->env_valid_node->phy_blk_addr; + fn->ec = + k_info->env_valid_node->ec; + fn->next = NULL; + have_env_free_node_flag = 1; + + tmp_valid_node = + k_info->env_valid_node->next; + while (tmp_valid_node != NULL) { + k_info->env_valid_node->next = + tmp_valid_node->next; + multi_free_node = + kzalloc(env_node_size, + GFP_KERNEL); + multi_free_node->phy_blk_addr = + tmp_valid_node->phy_blk_addr; + multi_free_node->ec = + tmp_valid_node->ec; + kfree(tmp_valid_node); + multi_free_node->dirty_flag = 1; + free_tmp_node = fn; + free_tmp_node = + get_free_tail(free_tmp_node); + free_tmp_node->next = + multi_free_node; + tmp_valid_node = + k_info->env_valid_node->next; + } + k_info->env_valid_node->phy_blk_addr = + start_blk; + k_info->env_valid_node->phy_page_addr + = 0; + k_info->env_valid_node->ec = + key_oobinfo->ec; + k_info->env_valid_node->timestamp = + key_oobinfo->timestamp; + k_info->env_valid_node->next = NULL; + + } else if (key_oobinfo->timestamp == + k_info->env_valid_node->timestamp) { + tmp_valid_node = + k_info->env_valid_node; + env_valid_node = + kzalloc(sizeof(struct env_valid_node_t), + GFP_KERNEL); + if (env_valid_node == NULL) + return -ENOMEM; + env_valid_node->phy_blk_addr + = start_blk; + env_valid_node->phy_page_addr = 0; + env_valid_node->timestamp = + key_oobinfo->timestamp; + env_valid_node->ec = key_oobinfo->ec; + while (tmp_valid_node->next != NULL) { + tmp_valid_node = + tmp_valid_node->next; + } + tmp_valid_node->next = env_valid_node; + } else { + fn->phy_blk_addr + = start_blk; + fn->ec = key_oobinfo->ec; + have_env_free_node_flag = 1; + } + + if (have_env_free_node_flag) { + if (k_info->env_free_node == NULL) + k_info->env_free_node = fn; + else { + tn = + k_info->env_free_node; + tn = get_free_tail(tn); + tn->next = fn; + } + } else { + kfree(fn); + fn = NULL; + } + } else { + k_info->env_valid_node->phy_blk_addr = + start_blk; + k_info->env_valid_node->phy_page_addr = 0; + k_info->env_valid_node->ec = + key_oobinfo->ec; + k_info->env_valid_node->timestamp = + key_oobinfo->timestamp; + } + } else if (key_blk < max_key_blk) { + fn = alloc_fn(); + if (fn == NULL) + return -ENOMEM; + + fn->phy_blk_addr = start_blk; + fn->ec = key_oobinfo->ec; + if (k_info->env_free_node == NULL) + k_info->env_free_node = fn; + else { + tn = k_info->env_free_node; + key_prev_node = tn; + while (tn != NULL) { + if (tn->dirty_flag == 1) + break; + key_prev_node = tn; + tn = tn->next; + } + if (key_prev_node == tn) { + fn->next = tn; + k_info->env_free_node = fn; + } else { + key_prev_node->next = fn; + fn->next = tn; + } + } + } + key_blk++; + if ((key_blk >= max_key_blk) && (k_info->env_valid == 1)) + break; + + } while ((++start_blk) < key_end); + if (start_blk >= key_end) { + memcpy(k_info->nand_bbt_info.bbt_head_magic, + BBT_HEAD_MAGIC, 4); + memcpy(k_info->nand_bbt_info.bbt_tail_magic, + BBT_TAIL_MAGIC, 4); + } + + if (k_info->env_valid == 1) { + + aml_oob_ops.mode = MTD_OPS_AUTO_OOB; + aml_oob_ops.len = mtd->writesize; + aml_oob_ops.ooblen = sizeof(struct env_oobinfo_t); + aml_oob_ops.ooboffs = aml_oob_region.offset; + aml_oob_ops.datbuf = data_buf; + aml_oob_ops.oobbuf = key_oob_buf; + + for (i = 0; i < pages_per_blk; i++) { + + memset((uint8_t *)aml_oob_ops.datbuf, + 0x0, mtd->writesize); + memset((uint8_t *)aml_oob_ops.oobbuf, + 0x0, aml_oob_ops.ooblen); + + offset = k_info->env_valid_node->phy_blk_addr; + offset *= mtd->erasesize; + offset += i * mtd->writesize; + error = mtd->_read_oob(mtd, offset, &aml_oob_ops); + if ((error != 0) && (error != -EUCLEAN)) { + pr_err( + "read failed: %llx, %d\n", + (uint64_t)offset, error); + continue; + } + + #ifdef NAND_KEY_SAVE_MULTI_BLOCK + if (!memcmp(key_oobinfo->name, ENV_KEY_MAGIC, 4)) { + k_info->env_valid_node->phy_page_addr = i; + tmp_valid_node = k_info->env_valid_node->next; + while (tmp_valid_node != NULL) { + tmp_valid_node->phy_page_addr = i; + tmp_valid_node = tmp_valid_node->next; + } + } else + break; + #else + if (!memcmp(key_oobinfo->name, ENV_KEY_MAGIC, 4)) + k_info->env_valid_node->phy_page_addr = i; + else + break; + #endif + } + } + #ifdef NAND_KEY_SAVE_MULTI_BLOCK + if ((mtd->writesize < CONFIG_KEYSIZE) + && (k_info->env_valid == 1)) { + i = (CONFIG_KEYSIZE + mtd->writesize - 1) / mtd->writesize; + k_info->env_valid_node->phy_page_addr -= (i - 1); + + tmp_valid_node = k_info->env_valid_node->next; + while (tmp_valid_node != NULL) { + tmp_valid_node->phy_page_addr -= (i - 1); + tmp_valid_node = tmp_valid_node->next; + } + } + #else + if ((mtd->writesize < CONFIG_KEYSIZE) + && (k_info->env_valid == 1)) { + i = (CONFIG_KEYSIZE + mtd->writesize - 1) / mtd->writesize; + k_info->env_valid_node->phy_page_addr -= (i - 1); + } + #endif + + offset = + k_info->env_valid_node->phy_blk_addr; + offset *= mtd->erasesize; + offset += + k_info->env_valid_node->phy_page_addr + * mtd->writesize; + if (k_info->env_valid_node->phy_blk_addr < 0) + pr_err("aml nand key not have valid addr: not wrote\n"); + else + pr_info("aml nand key valid addr: %llx\n", offset); + #ifdef NAND_KEY_SAVE_MULTI_BLOCK + tmp_valid_node = k_info->env_valid_node->next; + while (tmp_valid_node != NULL) { + offset = tmp_valid_node->phy_blk_addr; + offset *= mtd->erasesize; + offset += tmp_valid_node->phy_page_addr * mtd->writesize; + pr_info("aml nand key valid addr: %llx\n", (uint64_t)offset); + tmp_valid_node = tmp_valid_node->next; + } + #endif + + pr_info( + "CONFIG_KEYSIZE=%d;KEYSIZE=%d;bbt=%d;default_keyironment_size=%d\n", + CONFIG_KEYSIZE, KEYSIZE, + sizeof(struct aml_nand_bbt_info), + default_keyironment_size); + kfree(data_buf); + + return 0; +} + + +static int aml_nand_key_check(struct mtd_info *mtd) +{ + struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); + struct aml_nand_platform *plat = aml_chip->platform; + struct platform_nand_chip *chip = + &plat->platform_nand_data.chip; + struct aml_nand_bbt_info *nand_bbt_info; + struct aml_nandkey_info_t *k_info; + struct mesonkey_t *key_ptr; + int error = 0, start_blk, total_blk, update_key_flag = 0; + int i, j, nr, phys_erase_shift; + uint64_t offset; + + chip = chip; + nr = 0; + error = aml_nand_key_init(mtd); + if (error) + return error; + key_ptr = kzalloc(sizeof(struct mesonkey_t), GFP_KERNEL); + if (key_ptr == NULL) + return -ENOMEM; + k_info = aml_chip->aml_nandkey_info; + + if (k_info->env_valid == 1) + pr_info("%s() %d, inited\n", __func__, __LINE__); + else { + update_key_flag = 1; + nand_bbt_info = (struct aml_nand_bbt_info *) + (key_ptr->data + default_keyironment_size); + + memcpy(nand_bbt_info->nand_bbt, + k_info->nand_bbt_info.nand_bbt, + MAX_BAD_BLK_NUM * sizeof(int16_t)); + memcpy(nand_bbt_info->bbt_head_magic, + BBT_HEAD_MAGIC, 4); + memcpy(nand_bbt_info->bbt_tail_magic, + BBT_TAIL_MAGIC, 4); + phys_erase_shift = fls(mtd->erasesize) - 1; + offset = k_info->start_block; + offset *= mtd->erasesize; + + start_blk = (int)(offset >> phys_erase_shift); + total_blk = (int)(mtd->size >> phys_erase_shift); + for (i = start_blk; i < total_blk; i++) { + aml_chip->block_status[i] = NAND_BLOCK_GOOD; + for (j = 0; j < MAX_BAD_BLK_NUM; j++) { + if (nand_bbt_info->nand_bbt[j] == i) { + aml_chip->block_status[i] = + NAND_BLOCK_BAD; + break; + } + } + } + memcpy(k_info->nand_bbt_info.bbt_head_magic, + key_ptr->data + default_keyironment_size, + sizeof(struct aml_nand_bbt_info)); + } + + + if (update_key_flag) { + error = aml_nand_save_key(mtd, (u_char *)key_ptr); + if (error) { + pr_err("nand key save failed: %d\n", error); + goto exit; + } + } + +exit: + kfree(key_ptr); + return 0; +} + +static struct mtd_info *nand_key_mtd; + +static int32_t nand_key_read(uint8_t *buf, + uint32_t len, uint32_t *actual_length) +{ + struct mesonkey_t *key_ptr = NULL; + int error = 0; + uint32_t *length; + + length = actual_length; + *length = 0; + if (len > default_keyironment_size) { + pr_err("key data len too much,%s\n", __func__); + return -EFAULT; + } + key_ptr = kzalloc(CONFIG_KEYSIZE, GFP_KERNEL); + if (key_ptr == NULL) + return -ENOMEM; + memset(key_ptr, 0, CONFIG_KEYSIZE); + error = aml_nand_get_key(nand_key_mtd, (u_char *)key_ptr); + if (error) { + pr_err("read key error,%s\n", __func__); + error = -EFAULT; + goto exit; + } + memcpy(buf, key_ptr->data + 0, len); + *length = len; +exit: + kfree(key_ptr); + return 0; +} + +static int32_t nand_key_write(uint8_t *buf, + uint32_t len, uint32_t *actual_length) +{ + struct mesonkey_t *key_ptr = NULL; + int error = 0; + uint32_t *length; + + length = actual_length; + *length = 0; + if (len > default_keyironment_size) { + pr_err("key data len error,%s\n", __func__); + return -EFAULT; + } + key_ptr = kzalloc(CONFIG_KEYSIZE, GFP_KERNEL); + if (key_ptr == NULL) + return -ENOMEM; + memset(key_ptr, 0, CONFIG_KEYSIZE); + memcpy(key_ptr->data + 0, buf, len); + + error = aml_nand_save_key(nand_key_mtd, (u_char *)key_ptr); + if (error) { + pr_err("save key error,%s\n", __func__); + error = -EFAULT; + goto exit; + } + *length = len; + +exit: + kfree(key_ptr); + return error; +} +int aml_key_init(struct aml_nand_chip *aml_chip) +{ + int err = 0; + + pr_info("nand key: nand_key_probe.\n"); + err = aml_nand_key_check(aml_chip->mtd); + if (err) + pr_err("invalid nand key\n"); + + nand_key_mtd = aml_chip->mtd; + + storage_ops_read(nand_key_read); + storage_ops_write(nand_key_write); + pr_info("nand key init ok! %s()%d\n", __func__, __LINE__); + + return err; +} diff --git a/drivers/amlogic/mtd_meson8b/secure_storage.c b/drivers/amlogic/mtd_meson8b/secure_storage.c new file mode 100644 index 0000000..7c54421 --- /dev/null +++ b/drivers/amlogic/mtd_meson8b/secure_storage.c @@ -0,0 +1,743 @@ +/* + * drivers/amlogic/mtd_meson8b/secure_storage.c + * + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include "aml_mtd.h" +#include "env_old.h" + +#define CONFIG_SECURE_SIZE (0x10000*2)/* 128k */ +#define SECURE_SIZE (CONFIG_SECURE_SIZE - (sizeof(uint32_t))) +#define SECURE_STORE_MAGIC (0x6365736e) +#define REMAIN_BLOCK_NUM 4 +#define NAND_SECURE_BLK 2 + + + +struct secure_t { + uint32_t crc; /* CRC32 over data bytes */ + uint8_t data[SECURE_SIZE]; /* Environment data */ +}; + +struct secure_oobinfo_t { + int32_t name; + uint32_t timestamp; +}; + +struct mtd_info *nand_secure_mtd; +static int aml_nand_read_secure(struct mtd_info *mtd, + loff_t offset, uint8_t *buf) +{ + struct secure_oobinfo_t *secure_oobinfo; + struct mtd_oob_ops *aml_oob_ops = NULL; + struct mtd_oob_region aml_oob_region; + int error = 0, err; + loff_t addr = 0, len; + size_t amount_loaded = 0; + uint8_t *data_buf = NULL; + uint8_t secure_oob_buf[sizeof(struct secure_oobinfo_t)]; + struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); + struct aml_nandsecure_info_t *s_info; + + s_info = aml_chip->aml_nandsecure_info; + if (!s_info->secure_valid) + return 1; + err = mtd->ooblayout->free(mtd, 0, + (struct mtd_oob_region *)&aml_oob_region); + if (err != 0) { + pr_err("read oob free failed :func : %s,line : %d\n", + __func__, __LINE__); + err = -ENOMEM; + goto exit; + } + + addr = s_info->secure_valid_node->phy_blk_addr; + addr *= mtd->erasesize; + addr += s_info->secure_valid_node->phy_page_addr + * mtd->writesize; + pr_err("%s(): valid addr: %llx at block %d page %d\n", + __func__, (uint64_t)addr, + s_info->secure_valid_node->phy_blk_addr, + s_info->secure_valid_node->phy_page_addr); + + data_buf = kzalloc(mtd->writesize, GFP_KERNEL); + if (data_buf == NULL) { + err = -ENOMEM; + goto exit; + } + + aml_oob_ops = kzalloc(sizeof(struct mtd_oob_ops), GFP_KERNEL); + if (aml_oob_ops == NULL) { + err = -ENOMEM; + goto exit; + } + + secure_oobinfo = (struct secure_oobinfo_t *)secure_oob_buf; + while (amount_loaded < CONFIG_SECURE_SIZE) { + aml_oob_ops->mode = MTD_OPS_AUTO_OOB; + aml_oob_ops->len = mtd->writesize; + aml_oob_ops->ooblen = sizeof(struct secure_oobinfo_t); + aml_oob_ops->ooboffs = aml_oob_region.offset; + aml_oob_ops->datbuf = data_buf; + aml_oob_ops->oobbuf = secure_oob_buf; + + memset((uint8_t *)aml_oob_ops->datbuf, 0x0, + mtd->writesize); + memset((uint8_t *)aml_oob_ops->oobbuf, 0x0, + aml_oob_ops->ooblen); + + error = mtd->_read_oob(mtd, addr, aml_oob_ops); + if ((error != 0) && (error != -EUCLEAN)) { + pr_err("blk check good but read failed: %llx, %d\n", + (uint64_t)addr, error); + err = -EIO; + goto exit; + } + + if (secure_oobinfo->name != SECURE_STORE_MAGIC) + pr_err("%s() invalid magic: %llx, magic = ox%x\n", + __func__, + (uint64_t)addr, secure_oobinfo->name); + + addr += mtd->writesize; + len = min(mtd->writesize, CONFIG_SECURE_SIZE - amount_loaded); + memcpy(buf + amount_loaded, data_buf, len); + amount_loaded += mtd->writesize; + } + if (amount_loaded < CONFIG_SECURE_SIZE) { + err = -EIO; + goto exit; + } + + kfree(data_buf); + kfree(aml_oob_ops); + return 0; + +exit: + kfree(aml_oob_ops); + aml_oob_ops = NULL; + kfree(data_buf); + data_buf = NULL; + return err; +} + +static int aml_nand_write_secure(struct mtd_info *mtd, + loff_t offset, uint8_t *buf) +{ + struct secure_oobinfo_t *secure_oobinfo; + struct mtd_oob_ops *aml_oob_ops = NULL; + struct mtd_oob_region aml_oob_region; + struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); + uint8_t secure_oob_buf[sizeof(struct secure_oobinfo_t)]; + size_t len, amount_saved = 0; + uint8_t *data_buf = NULL; + int error = 0, err; + loff_t addr = 0; + + aml_oob_ops = kzalloc(sizeof(struct mtd_oob_ops), GFP_KERNEL); + if (aml_oob_ops == NULL) { + err = -ENOMEM; + goto exit; + } + data_buf = kzalloc(mtd->writesize, GFP_KERNEL); + if (data_buf == NULL) { + err = -ENOMEM; + goto exit; + } + err = mtd->ooblayout->free(mtd, 0, + (struct mtd_oob_region *)&aml_oob_region); + if (err != 0) { + pr_err("%s() %d: read oob failed\n", __func__, __LINE__); + err = -ENOMEM; + goto exit; + } + + addr = offset; + secure_oobinfo = (struct secure_oobinfo_t *)secure_oob_buf; + secure_oobinfo->name = SECURE_STORE_MAGIC; + secure_oobinfo->timestamp = + aml_chip->aml_nandsecure_info->secure_valid_node->timestamp; + + while (amount_saved < CONFIG_SECURE_SIZE) { + aml_oob_ops->mode = MTD_OPS_AUTO_OOB; + aml_oob_ops->len = mtd->writesize; + aml_oob_ops->ooblen = sizeof(struct secure_oobinfo_t); + aml_oob_ops->ooboffs = aml_oob_region.offset; + aml_oob_ops->datbuf = data_buf; + aml_oob_ops->oobbuf = secure_oob_buf; + memset((uint8_t *)aml_oob_ops->datbuf, 0x0, mtd->writesize); + len = min(mtd->writesize, CONFIG_SECURE_SIZE - amount_saved); + memcpy((uint8_t *)aml_oob_ops->datbuf, buf + amount_saved, len); + + error = mtd->_write_oob(mtd, addr, aml_oob_ops); + if (error) { + pr_err("write failed: %llx, %d\n", + (uint64_t)addr, error); + err = 1; + goto exit; + } + + addr += mtd->writesize; + amount_saved += mtd->writesize; + } + + if (amount_saved < CONFIG_SECURE_SIZE) { + err = 1; + goto exit; + } + + kfree(data_buf); + kfree(aml_oob_ops); + return 0; + +exit: + kfree(aml_oob_ops); + aml_oob_ops = NULL; + kfree(data_buf); + data_buf = NULL; + return err; +} + +/* + * alloc free node + */ +static struct env_free_node_t *alloc_fn(void) +{ + return kzalloc(sizeof(struct env_free_node_t), GFP_KERNEL); +} + +int aml_nand_save_secure(struct mtd_info *mtd, uint8_t *buf) +{ + struct env_free_node_t *fn = NULL; + struct env_free_node_t *tn; + struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); + struct erase_info *nand_erase_info; + int error = 0, pages_per_blk, i = 1; + int16_t blk; + loff_t addr = 0; + struct secure_t *secure_ptr = (struct secure_t *)buf; + struct aml_nandsecure_info_t *sinfo; + struct env_valid_node_t *svn; + + sinfo = aml_chip->aml_nandsecure_info; + if (!sinfo->secure_init) + return 1; + nand_erase_info = kzalloc(sizeof(struct erase_info), GFP_KERNEL); + if (nand_erase_info == NULL) { + pr_err("%s %d no mem for nand_erase_info\n", + __func__, __LINE__); + error = -ENOMEM; + goto exit; + } + + pages_per_blk = mtd->erasesize / mtd->writesize; + if ((mtd->writesize < CONFIG_SECURE_SIZE) + && (sinfo->secure_valid == 1)) + i = (CONFIG_SECURE_SIZE + mtd->writesize - 1) / mtd->writesize; + + svn = sinfo->secure_valid_node; + if (sinfo->secure_valid) { + svn->phy_page_addr += i; + /* if current valid node block is full, get a free one */ + if ((svn->phy_page_addr + i) > pages_per_blk) { + + fn = alloc_fn(); + if (fn == NULL) { + error = -ENOMEM; + goto exit; + } + tn = alloc_fn(); + if (tn == NULL) { + pr_err("%s %d no mem for secure_tmp_node\n", + __func__, __LINE__); + error = -ENOMEM; + goto exit; + } + #if 0 + /* fixme, bug here! */ + tn = (struct env_free_node_t *)(svn); + + fn = sinfo->secure_free_node; + + svn->phy_blk_addr = fn->phy_blk_addr; + svn->phy_page_addr = 0; + svn->timestamp += 1; + sinfo->secure_free_node = tn; + #endif + blk = svn->phy_blk_addr; + fn = sinfo->secure_free_node; + svn->phy_blk_addr = fn->phy_blk_addr; + svn->phy_page_addr = 0; + svn->timestamp += 1; + sinfo->secure_free_node->phy_blk_addr = blk; + } + } else { + /* get a free node from free list */ + tn = sinfo->secure_free_node; + svn->phy_blk_addr = tn->phy_blk_addr; + svn->phy_page_addr = 0; + svn->timestamp += 1; + sinfo->secure_free_node = tn->next; + kfree(tn); + } + + addr = svn->phy_blk_addr; + addr *= mtd->erasesize; + addr += svn->phy_page_addr * mtd->writesize; + + if (svn->phy_page_addr == 0) { + + memset(nand_erase_info, 0, sizeof(struct erase_info)); + nand_erase_info->mtd = mtd; + nand_erase_info->addr = addr; + nand_erase_info->len = mtd->erasesize; + + aml_chip->key_protect = 1; + aml_chip->secure_protect = 1; + + error = mtd->_erase(mtd, nand_erase_info); + if (error) { + pr_err("secure free blk erase failed %d\n", error); + mtd->_block_markbad(mtd, addr); + goto exit; + } + aml_chip->secure_protect = 0; + aml_chip->key_protect = 0; + + } + + if (aml_nand_write_secure(mtd, addr, (uint8_t *) secure_ptr)) { + pr_err("nand secure info update FAILED!\n"); + error = 1; + goto exit; + } + pr_err("nand secure info save Ok\ns"); + kfree(nand_erase_info); + return error; + +exit: + kfree(nand_erase_info); + nand_erase_info = NULL; + kfree(fn); + fn = NULL; + return error; +} + + +static int aml_nand_secure_init(struct mtd_info *mtd) +{ + struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); + struct nand_chip *chip = &aml_chip->chip; + struct secure_oobinfo_t *secure_oobinfo; + /* free node */ + struct env_free_node_t *fn; + /* temp node */ + struct env_free_node_t *tn; + /* previous node */ + struct env_free_node_t *pn; + int error = 0, err, start_blk, tmp_blk; + int secure_blk, i, pages_per_blk; + int max_secure_blk, phys_erase_shift; + loff_t offset; + uint8_t *data_buf = NULL; + uint32_t remain_start_block, remain_tatol_block; + uint32_t remain_block, total_blk; + struct mtd_oob_ops *aml_oob_ops = NULL; + struct mtd_oob_region aml_oob_region; + uint8_t secure_oob_buf[sizeof(struct secure_oobinfo_t)]; + struct aml_nandsecure_info_t *sinfo; + struct env_valid_node_t *svn; + + err = mtd->ooblayout->free(mtd, 0, + (struct mtd_oob_region *)&aml_oob_region); + + if (err != 0) { + pr_err("%s() %d: read oob free failed\n", + __func__, __LINE__); + err = -ENOMEM; + goto exit; + } + + aml_oob_ops = kzalloc(sizeof(struct mtd_oob_ops), GFP_KERNEL); + if (aml_oob_ops == NULL) { + err = -ENOMEM; + goto exit; + } + data_buf = kzalloc(mtd->writesize, GFP_KERNEL); + if (data_buf == NULL) { + err = -ENOMEM; + goto exit; + } + aml_chip->aml_nandsecure_info + = kzalloc(sizeof(struct aml_nandsecure_info_t), GFP_KERNEL); + if (aml_chip->aml_nandsecure_info == NULL) { + err = -ENOMEM; + goto exit; + } + + sinfo = aml_chip->aml_nandsecure_info; + sinfo->mtd = mtd; + + sinfo->secure_valid_node + = kzalloc(sizeof(struct env_valid_node_t), GFP_KERNEL); + if (sinfo->secure_valid_node == NULL) { + pr_err("%s %d no mem\n", __func__, __LINE__); + err = -ENOMEM; + goto exit; + } + svn = sinfo->secure_valid_node; + svn->phy_blk_addr = -1; + + phys_erase_shift = fls(mtd->erasesize) - 1; + max_secure_blk = NAND_SECURE_BLK; + + offset = mtd->size - mtd->erasesize; + total_blk = (int)(offset >> phys_erase_shift); + + pages_per_blk = (1 << (chip->phys_erase_shift - chip->page_shift)); + secure_oobinfo = (struct secure_oobinfo_t *)secure_oob_buf; +#if 1 + remain_tatol_block = REMAIN_BLOCK_NUM; + remain_block = 0; + remain_start_block = aml_chip->aml_nandkey_info->start_block - 1; + sinfo->end_block = remain_start_block; + do { + offset = mtd->erasesize; + offset *= remain_start_block; + pr_err(">>>> off 0x%llx\n", offset); + error = mtd->_block_isbad(mtd, offset); + if (error == FACTORY_BAD_BLOCK_ERROR) { + remain_start_block--; + continue; + } + remain_start_block--; + } while (++remain_block < remain_tatol_block); + + sinfo->start_block = (int)(offset >> phys_erase_shift); + pr_err("%s,%d : secure start blk=%d end_blk=%d\n", + __func__, __LINE__, + sinfo->start_block, + sinfo->end_block); +#else + int bad_blk_cnt = 0; + + offset = mtd->size - mtd->erasesize; + /* if without keys, scan secureblocks @ the tails */ + remain_start_block = (int)(offset >> phys_erase_shift); + remain_block = 0; + remain_tatol_block = REMAIN_BLOCK_NUM; + sinfo->start_block = remain_start_block; + sinfo->end_block = remain_start_block; + bad_blk_cnt = 0; + do { + offset = mtd->erasesize; + offset *= remain_start_block; + error = mtd->_block_isbad(mtd, offset); + if (error == FACTORY_BAD_BLOCK_ERROR) { + sinfo->start_block--; + remain_start_block--; + continue; + } + remain_start_block--; + } while (++remain_block < remain_tatol_block); + sinfo->start_block -= (remain_block - 1); + pr_err("secure start_blk=%d,end_blk=%d,%s:%d\n", + sinfo->start_block, + sinfo->end_block, + __func__, __LINE__); +#endif + + tmp_blk = start_blk = sinfo->start_block; + secure_blk = 0; + do { + + offset = mtd->erasesize; + offset *= start_blk; + error = mtd->_block_isbad(mtd, offset); + if (error) + continue; + aml_oob_ops->mode = MTD_OPS_AUTO_OOB; + aml_oob_ops->len = mtd->writesize; + aml_oob_ops->ooblen = sizeof(struct secure_oobinfo_t); + aml_oob_ops->ooboffs = aml_oob_region.offset; + aml_oob_ops->datbuf = data_buf; + aml_oob_ops->oobbuf = secure_oob_buf; + + memset((uint8_t *)aml_oob_ops->datbuf, + 0x0, mtd->writesize); + memset((uint8_t *)aml_oob_ops->oobbuf, + 0x0, aml_oob_ops->ooblen); + + error = mtd->_read_oob(mtd, offset, aml_oob_ops); + if ((error != 0) && (error != -EUCLEAN)) { + pr_err("blk check good but read failed: %llx, %d\n", + (uint64_t)offset, error); + continue; + } + + sinfo->secure_init = 1; + if ((secure_oobinfo->name + == SECURE_STORE_MAGIC)) { + sinfo->secure_valid = 1; + if (svn->phy_blk_addr >= 0) { + fn = alloc_fn(); + if (fn == NULL) { + pr_err("%s %d no mem for secure_free_node\n", + __func__, __LINE__); + err = -ENOMEM; + goto exit; + } + fn->dirty_flag = 1; + if (secure_oobinfo->timestamp + > svn->timestamp) { + fn->phy_blk_addr + = svn->phy_blk_addr; + svn->phy_blk_addr + = start_blk; + svn->phy_page_addr = 0; + svn->timestamp + = secure_oobinfo->timestamp; + } else + fn->phy_blk_addr = start_blk; + if (sinfo->secure_free_node == NULL) + sinfo->secure_free_node = fn; + else { + tn = sinfo->secure_free_node; + while (tn->next != NULL) + tn = tn->next; + tn->next = fn; + } + } else { + svn->phy_blk_addr = start_blk; + svn->phy_page_addr = 0; + svn->timestamp = secure_oobinfo->timestamp; + } + } else if (secure_blk < max_secure_blk) { + fn = alloc_fn(); + if (fn == NULL) { + err = -ENOMEM; + goto exit; + } + fn->phy_blk_addr = start_blk; + if (sinfo->secure_free_node == NULL) { + sinfo->secure_free_node = fn; + } else { + tn = sinfo->secure_free_node; + pn = tn; + while (tn != NULL) { + if (tn->dirty_flag == 1) + break; + pn = tn; + tn = tn->next; + } + if (pn == tn) { + fn->next = tn; + sinfo->secure_free_node = fn; + } else { + pn->next = fn; + fn->next = tn; + } + } + } + secure_blk++; + + if ((secure_blk >= max_secure_blk) + && (sinfo->secure_valid == 1)) + break; + } while ((++start_blk) <= sinfo->end_block); + + if (sinfo->secure_valid == 1) { + aml_oob_ops->mode = MTD_OPS_AUTO_OOB; + aml_oob_ops->len = mtd->writesize; + aml_oob_ops->ooblen = sizeof(struct secure_oobinfo_t); + aml_oob_ops->ooboffs = aml_oob_region.offset; + aml_oob_ops->datbuf = data_buf; + aml_oob_ops->oobbuf = secure_oob_buf; + + for (i = 0; i < pages_per_blk; i++) { + + memset((uint8_t *)aml_oob_ops->datbuf, + 0x0, mtd->writesize); + memset((uint8_t *)aml_oob_ops->oobbuf, + 0x0, aml_oob_ops->ooblen); + + offset = svn->phy_blk_addr; + offset *= mtd->erasesize; + offset += i * mtd->writesize; + error = mtd->_read_oob(mtd, offset, aml_oob_ops); + if ((error != 0) && (error != -EUCLEAN)) { + pr_err("blk check good but read failed: %llx, %d\n", + (uint64_t)offset, error); + continue; + } + + if (secure_oobinfo->name == SECURE_STORE_MAGIC) + svn->phy_page_addr = i; + else + break; + } + } + if ((mtd->writesize < CONFIG_SECURE_SIZE) + && (sinfo->secure_valid == 1)) { + i = (CONFIG_SECURE_SIZE + mtd->writesize - 1) / mtd->writesize; + svn->phy_page_addr -= (i - 1); + } + + pr_err("secure_valid_node->add =%d\n", + svn->phy_blk_addr); + if (sinfo->secure_free_node) + pr_err("secure_free_node->add =%d\n", + sinfo->secure_free_node->phy_blk_addr); + + offset = svn->phy_blk_addr; + offset *= mtd->erasesize; + offset += mtd->writesize + * svn->phy_page_addr; + pr_err("aml nand secure info valid addr: %llx\n", (uint64_t)offset); + pr_err("CONFIG_SECURE_SIZE=0x%x;\n", CONFIG_SECURE_SIZE); + + kfree(data_buf); + kfree(aml_oob_ops); + return 0; + +exit: + kfree(data_buf); + data_buf = NULL; + kfree(aml_oob_ops); + aml_oob_ops = NULL; + kfree(aml_chip->aml_nandsecure_info->secure_valid_node); + aml_chip->aml_nandsecure_info->secure_valid_node = NULL; + kfree(aml_chip->aml_nandsecure_info); + aml_chip->aml_nandsecure_info = NULL; + kfree(fn); + fn = NULL; + return err; + +} + +static int secure_info_check(struct mtd_info *mtd) +{ + struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd); + struct secure_t *secure_ptr; + int error = 0; + + error = aml_nand_secure_init(mtd); + if (error) + return error; + + secure_ptr = kzalloc(sizeof(struct secure_t), GFP_KERNEL); + if (secure_ptr == NULL) + return -ENOMEM; + /* default secure data set to a5; */ + memset(secure_ptr, 0xa5, sizeof(struct secure_t)); + + if (aml_chip->aml_nandsecure_info->secure_valid == 1) { + + goto exit; + } else { + pr_info("nand secure info save\n"); + error = aml_nand_save_secure(mtd, (uint8_t *)secure_ptr); + if (error) + pr_err("nand secure info save failed\n"); + } + +exit: + kfree(secure_ptr); + return 0; + +} + +int secure_storage_nand_read(char *buf, uint32_t len) +{ + int err, size; + uint8_t *storage_buf = NULL; + + if (nand_secure_mtd == NULL) { + pr_err("secure storage is init fail\n"); + return 1; + } + storage_buf = kzalloc(CONFIG_SECURE_SIZE, GFP_KERNEL); + if (storage_buf == NULL) + return -ENOMEM; + err = aml_nand_read_secure(nand_secure_mtd, 0, storage_buf); + if (err < 0) { + pr_err("%s:%d,read fail\n", __func__, __LINE__); + kfree(storage_buf); + return err; + } else if (err == 1) { + pr_err("%s:%d,no any key\n", __func__, __LINE__); + return 0; + } + + if (len > CONFIG_SECURE_SIZE) { + size = CONFIG_SECURE_SIZE; + pr_info("%s() %d: len %d, %d, act %d\n", + __func__, __LINE__, + len, CONFIG_SECURE_SIZE, + CONFIG_SECURE_SIZE); + } else + size = len; + + memcpy(buf, storage_buf, size); + kfree(storage_buf); + return 0; +} + +int secure_storage_nand_write(char *buf, uint32_t len) +{ + int err = 0, size; + uint8_t *storage_buf = NULL; + + if (nand_secure_mtd == NULL) { + pr_err("secure storage init fail\n"); + return 1; + } + storage_buf = kzalloc(CONFIG_SECURE_SIZE, GFP_KERNEL); + if (storage_buf == NULL) + return -ENOMEM; + + if (len > CONFIG_SECURE_SIZE) { + size = CONFIG_SECURE_SIZE; + pr_err("%s()%d: len:%x %x, act %x byte\n", + __func__, __LINE__, + len, CONFIG_SECURE_SIZE, + CONFIG_SECURE_SIZE); + } else + size = len; + memcpy(storage_buf, buf, size); + err = aml_nand_save_secure(nand_secure_mtd, storage_buf); + if (err) + pr_err("%s:%d,secure storage write fail\n", + __func__, __LINE__); + + kfree(storage_buf); + return err; +} + +int secure_device_init(struct mtd_info *mtd) +{ + int ret = 0; + + nand_secure_mtd = mtd; + pr_info("%s(): %d\n", __func__, __LINE__); + + ret = secure_info_check(mtd); + if (ret) + pr_err("invalid secure info\n"); + + return ret; + +} + -- 2.7.4