1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
10 #define pr_fmt(fmt) "spi-nand: " fmt
13 #include <linux/device.h>
14 #include <linux/jiffies.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mtd/spinand.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
27 #include <dm/device_compat.h>
28 #include <dm/devres.h>
29 #include <linux/bitops.h>
30 #include <linux/bug.h>
31 #include <linux/mtd/spinand.h>
34 /* SPI NAND index visible in MTD names */
35 static int spi_nand_idx;
37 static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
38 const struct nand_page_io_req *req,
41 struct nand_device *nand = spinand_to_nand(spinand);
44 if (nand->memorg.planes_per_lun < 2)
47 /* The plane number is passed in MSB just above the column address */
48 shift = fls(nand->memorg.pagesize);
49 *column |= req->pos.plane << shift;
52 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
54 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
58 ret = spi_mem_exec_op(spinand->slave, &op);
62 *val = *spinand->scratchbuf;
66 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
68 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
71 *spinand->scratchbuf = val;
72 return spi_mem_exec_op(spinand->slave, &op);
75 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
77 return spinand_read_reg_op(spinand, REG_STATUS, status);
80 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
82 struct nand_device *nand = spinand_to_nand(spinand);
84 if (WARN_ON(spinand->cur_target < 0 ||
85 spinand->cur_target >= nand->memorg.ntargets))
88 *cfg = spinand->cfg_cache[spinand->cur_target];
92 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
94 struct nand_device *nand = spinand_to_nand(spinand);
97 if (WARN_ON(spinand->cur_target < 0 ||
98 spinand->cur_target >= nand->memorg.ntargets))
101 if (spinand->cfg_cache[spinand->cur_target] == cfg)
104 ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
108 spinand->cfg_cache[spinand->cur_target] = cfg;
113 * spinand_upd_cfg() - Update the configuration register
114 * @spinand: the spinand device
115 * @mask: the mask encoding the bits to update in the config reg
116 * @val: the new value to apply
118 * Update the configuration register.
120 * Return: 0 on success, a negative error code otherwise.
122 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
127 ret = spinand_get_cfg(spinand, &cfg);
134 return spinand_set_cfg(spinand, cfg);
138 * spinand_select_target() - Select a specific NAND target/die
139 * @spinand: the spinand device
140 * @target: the target/die to select
142 * Select a new target/die. If chip only has one die, this function is a NOOP.
144 * Return: 0 on success, a negative error code otherwise.
146 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
148 struct nand_device *nand = spinand_to_nand(spinand);
151 if (WARN_ON(target >= nand->memorg.ntargets))
154 if (spinand->cur_target == target)
157 if (nand->memorg.ntargets == 1) {
158 spinand->cur_target = target;
162 ret = spinand->select_target(spinand, target);
166 spinand->cur_target = target;
170 static int spinand_init_cfg_cache(struct spinand_device *spinand)
172 struct nand_device *nand = spinand_to_nand(spinand);
173 struct udevice *dev = spinand->slave->dev;
177 spinand->cfg_cache = devm_kzalloc(dev,
178 sizeof(*spinand->cfg_cache) *
179 nand->memorg.ntargets,
181 if (!spinand->cfg_cache)
184 for (target = 0; target < nand->memorg.ntargets; target++) {
185 ret = spinand_select_target(spinand, target);
190 * We use spinand_read_reg_op() instead of spinand_get_cfg()
191 * here to bypass the config cache.
193 ret = spinand_read_reg_op(spinand, REG_CFG,
194 &spinand->cfg_cache[target]);
202 static int spinand_init_quad_enable(struct spinand_device *spinand)
206 if (!(spinand->flags & SPINAND_HAS_QE_BIT))
209 if (spinand->op_templates.read_cache->data.buswidth == 4 ||
210 spinand->op_templates.write_cache->data.buswidth == 4 ||
211 spinand->op_templates.update_cache->data.buswidth == 4)
214 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
215 enable ? CFG_QUAD_ENABLE : 0);
218 static int spinand_ecc_enable(struct spinand_device *spinand,
221 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
222 enable ? CFG_ECC_ENABLE : 0);
225 static int spinand_write_enable_op(struct spinand_device *spinand)
227 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
229 return spi_mem_exec_op(spinand->slave, &op);
232 static int spinand_load_page_op(struct spinand_device *spinand,
233 const struct nand_page_io_req *req)
235 struct nand_device *nand = spinand_to_nand(spinand);
236 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
237 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
239 return spi_mem_exec_op(spinand->slave, &op);
242 static int spinand_read_from_cache_op(struct spinand_device *spinand,
243 const struct nand_page_io_req *req)
245 struct spi_mem_op op = *spinand->op_templates.read_cache;
246 struct nand_device *nand = spinand_to_nand(spinand);
247 struct mtd_info *mtd = nanddev_to_mtd(nand);
248 struct nand_page_io_req adjreq = *req;
249 unsigned int nbytes = 0;
255 adjreq.datalen = nanddev_page_size(nand);
257 adjreq.databuf.in = spinand->databuf;
258 buf = spinand->databuf;
259 nbytes = adjreq.datalen;
263 adjreq.ooblen = nanddev_per_page_oobsize(nand);
265 adjreq.oobbuf.in = spinand->oobbuf;
266 nbytes += nanddev_per_page_oobsize(nand);
268 buf = spinand->oobbuf;
269 column = nanddev_page_size(nand);
273 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
274 op.addr.val = column;
277 * Some controllers are limited in term of max RX data size. In this
278 * case, just repeat the READ_CACHE operation after updating the
282 op.data.buf.in = buf;
283 op.data.nbytes = nbytes;
284 ret = spi_mem_adjust_op_size(spinand->slave, &op);
288 ret = spi_mem_exec_op(spinand->slave, &op);
292 buf += op.data.nbytes;
293 nbytes -= op.data.nbytes;
294 op.addr.val += op.data.nbytes;
298 memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
302 if (req->mode == MTD_OPS_AUTO_OOB)
303 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
308 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
315 static int spinand_write_to_cache_op(struct spinand_device *spinand,
316 const struct nand_page_io_req *req)
318 struct spi_mem_op op = *spinand->op_templates.write_cache;
319 struct nand_device *nand = spinand_to_nand(spinand);
320 struct mtd_info *mtd = nanddev_to_mtd(nand);
321 struct nand_page_io_req adjreq = *req;
322 unsigned int nbytes = 0;
327 memset(spinand->databuf, 0xff,
328 nanddev_page_size(nand) +
329 nanddev_per_page_oobsize(nand));
332 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
335 adjreq.datalen = nanddev_page_size(nand);
336 adjreq.databuf.out = spinand->databuf;
337 nbytes = adjreq.datalen;
338 buf = spinand->databuf;
342 if (req->mode == MTD_OPS_AUTO_OOB)
343 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
348 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
351 adjreq.ooblen = nanddev_per_page_oobsize(nand);
353 nbytes += nanddev_per_page_oobsize(nand);
355 buf = spinand->oobbuf;
356 column = nanddev_page_size(nand);
360 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
362 op = *spinand->op_templates.write_cache;
363 op.addr.val = column;
366 * Some controllers are limited in term of max TX data size. In this
367 * case, split the operation into one LOAD CACHE and one or more
371 op.data.buf.out = buf;
372 op.data.nbytes = nbytes;
374 ret = spi_mem_adjust_op_size(spinand->slave, &op);
378 ret = spi_mem_exec_op(spinand->slave, &op);
382 buf += op.data.nbytes;
383 nbytes -= op.data.nbytes;
384 op.addr.val += op.data.nbytes;
387 * We need to use the RANDOM LOAD CACHE operation if there's
388 * more than one iteration, because the LOAD operation resets
392 column = op.addr.val;
393 op = *spinand->op_templates.update_cache;
394 op.addr.val = column;
401 static int spinand_program_op(struct spinand_device *spinand,
402 const struct nand_page_io_req *req)
404 struct nand_device *nand = spinand_to_nand(spinand);
405 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
406 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
408 return spi_mem_exec_op(spinand->slave, &op);
411 static int spinand_erase_op(struct spinand_device *spinand,
412 const struct nand_pos *pos)
414 struct nand_device *nand = &spinand->base;
415 unsigned int row = nanddev_pos_to_row(nand, pos);
416 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
418 return spi_mem_exec_op(spinand->slave, &op);
421 static int spinand_wait(struct spinand_device *spinand, u8 *s)
423 unsigned long start, stop;
427 start = get_timer(0);
430 ret = spinand_read_status(spinand, &status);
434 if (!(status & STATUS_BUSY))
436 } while (get_timer(start) < stop);
439 * Extra read, just in case the STATUS_READY bit has changed
440 * since our last check
442 ret = spinand_read_status(spinand, &status);
450 return status & STATUS_BUSY ? -ETIMEDOUT : 0;
453 static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
455 struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
459 ret = spi_mem_exec_op(spinand->slave, &op);
461 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
466 static int spinand_reset_op(struct spinand_device *spinand)
468 struct spi_mem_op op = SPINAND_RESET_OP;
471 ret = spi_mem_exec_op(spinand->slave, &op);
475 return spinand_wait(spinand, NULL);
478 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
480 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
483 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
485 struct nand_device *nand = spinand_to_nand(spinand);
487 if (spinand->eccinfo.get_status)
488 return spinand->eccinfo.get_status(spinand, status);
490 switch (status & STATUS_ECC_MASK) {
491 case STATUS_ECC_NO_BITFLIPS:
494 case STATUS_ECC_HAS_BITFLIPS:
496 * We have no way to know exactly how many bitflips have been
497 * fixed, so let's return the maximum possible value so that
498 * wear-leveling layers move the data immediately.
500 return nand->eccreq.strength;
502 case STATUS_ECC_UNCOR_ERROR:
512 static int spinand_read_page(struct spinand_device *spinand,
513 const struct nand_page_io_req *req,
519 ret = spinand_load_page_op(spinand, req);
523 ret = spinand_wait(spinand, &status);
527 ret = spinand_read_from_cache_op(spinand, req);
534 return spinand_check_ecc_status(spinand, status);
537 static int spinand_write_page(struct spinand_device *spinand,
538 const struct nand_page_io_req *req)
543 ret = spinand_write_enable_op(spinand);
547 ret = spinand_write_to_cache_op(spinand, req);
551 ret = spinand_program_op(spinand, req);
555 ret = spinand_wait(spinand, &status);
556 if (!ret && (status & STATUS_PROG_FAILED))
562 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
563 struct mtd_oob_ops *ops)
565 struct spinand_device *spinand = mtd_to_spinand(mtd);
566 struct nand_device *nand = mtd_to_nanddev(mtd);
567 unsigned int max_bitflips = 0;
568 struct nand_io_iter iter;
569 bool enable_ecc = false;
570 bool ecc_failed = false;
573 if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
577 mutex_lock(&spinand->lock);
580 nanddev_io_for_each_page(nand, from, ops, &iter) {
581 ret = spinand_select_target(spinand, iter.req.pos.target);
585 ret = spinand_ecc_enable(spinand, enable_ecc);
589 ret = spinand_read_page(spinand, &iter.req, enable_ecc);
590 if (ret < 0 && ret != -EBADMSG)
593 if (ret == -EBADMSG) {
595 mtd->ecc_stats.failed++;
598 mtd->ecc_stats.corrected += ret;
599 max_bitflips = max_t(unsigned int, max_bitflips, ret);
602 ops->retlen += iter.req.datalen;
603 ops->oobretlen += iter.req.ooblen;
607 mutex_unlock(&spinand->lock);
609 if (ecc_failed && !ret)
612 return ret ? ret : max_bitflips;
615 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
616 struct mtd_oob_ops *ops)
618 struct spinand_device *spinand = mtd_to_spinand(mtd);
619 struct nand_device *nand = mtd_to_nanddev(mtd);
620 struct nand_io_iter iter;
621 bool enable_ecc = false;
624 if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
628 mutex_lock(&spinand->lock);
631 nanddev_io_for_each_page(nand, to, ops, &iter) {
632 ret = spinand_select_target(spinand, iter.req.pos.target);
636 ret = spinand_ecc_enable(spinand, enable_ecc);
640 ret = spinand_write_page(spinand, &iter.req);
644 ops->retlen += iter.req.datalen;
645 ops->oobretlen += iter.req.ooblen;
649 mutex_unlock(&spinand->lock);
655 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
657 struct spinand_device *spinand = nand_to_spinand(nand);
658 struct nand_page_io_req req = {
662 .oobbuf.in = spinand->oobbuf,
667 memset(spinand->oobbuf, 0, 2);
668 ret = spinand_select_target(spinand, pos->target);
672 ret = spinand_read_page(spinand, &req, false);
676 if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
682 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
684 struct nand_device *nand = mtd_to_nanddev(mtd);
686 struct spinand_device *spinand = nand_to_spinand(nand);
691 nanddev_offs_to_pos(nand, offs, &pos);
693 mutex_lock(&spinand->lock);
695 ret = nanddev_isbad(nand, &pos);
697 mutex_unlock(&spinand->lock);
702 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
704 struct spinand_device *spinand = nand_to_spinand(nand);
705 struct nand_page_io_req req = {
709 .oobbuf.out = spinand->oobbuf,
713 /* Erase block before marking it bad. */
714 ret = spinand_select_target(spinand, pos->target);
718 ret = spinand_write_enable_op(spinand);
722 ret = spinand_erase_op(spinand, pos);
726 memset(spinand->oobbuf, 0, 2);
727 return spinand_write_page(spinand, &req);
730 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
732 struct nand_device *nand = mtd_to_nanddev(mtd);
734 struct spinand_device *spinand = nand_to_spinand(nand);
739 nanddev_offs_to_pos(nand, offs, &pos);
741 mutex_lock(&spinand->lock);
743 ret = nanddev_markbad(nand, &pos);
745 mutex_unlock(&spinand->lock);
750 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
752 struct spinand_device *spinand = nand_to_spinand(nand);
756 ret = spinand_select_target(spinand, pos->target);
760 ret = spinand_write_enable_op(spinand);
764 ret = spinand_erase_op(spinand, pos);
768 ret = spinand_wait(spinand, &status);
769 if (!ret && (status & STATUS_ERASE_FAILED))
775 static int spinand_mtd_erase(struct mtd_info *mtd,
776 struct erase_info *einfo)
779 struct spinand_device *spinand = mtd_to_spinand(mtd);
784 mutex_lock(&spinand->lock);
786 ret = nanddev_mtd_erase(mtd, einfo);
788 mutex_unlock(&spinand->lock);
794 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
797 struct spinand_device *spinand = mtd_to_spinand(mtd);
799 struct nand_device *nand = mtd_to_nanddev(mtd);
803 nanddev_offs_to_pos(nand, offs, &pos);
805 mutex_lock(&spinand->lock);
807 ret = nanddev_isreserved(nand, &pos);
809 mutex_unlock(&spinand->lock);
815 const struct spi_mem_op *
816 spinand_find_supported_op(struct spinand_device *spinand,
817 const struct spi_mem_op *ops,
822 for (i = 0; i < nops; i++) {
823 if (spi_mem_supports_op(spinand->slave, &ops[i]))
830 static const struct nand_ops spinand_ops = {
831 .erase = spinand_erase,
832 .markbad = spinand_markbad,
833 .isbad = spinand_isbad,
836 static const struct spinand_manufacturer *spinand_manufacturers[] = {
837 &gigadevice_spinand_manufacturer,
838 ¯onix_spinand_manufacturer,
839 µn_spinand_manufacturer,
840 &toshiba_spinand_manufacturer,
841 &winbond_spinand_manufacturer,
844 static int spinand_manufacturer_detect(struct spinand_device *spinand)
849 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
850 ret = spinand_manufacturers[i]->ops->detect(spinand);
852 spinand->manufacturer = spinand_manufacturers[i];
854 } else if (ret < 0) {
862 static int spinand_manufacturer_init(struct spinand_device *spinand)
864 if (spinand->manufacturer->ops->init)
865 return spinand->manufacturer->ops->init(spinand);
870 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
872 /* Release manufacturer private data */
873 if (spinand->manufacturer->ops->cleanup)
874 return spinand->manufacturer->ops->cleanup(spinand);
877 static const struct spi_mem_op *
878 spinand_select_op_variant(struct spinand_device *spinand,
879 const struct spinand_op_variants *variants)
881 struct nand_device *nand = spinand_to_nand(spinand);
884 for (i = 0; i < variants->nops; i++) {
885 struct spi_mem_op op = variants->ops[i];
889 nbytes = nanddev_per_page_oobsize(nand) +
890 nanddev_page_size(nand);
893 op.data.nbytes = nbytes;
894 ret = spi_mem_adjust_op_size(spinand->slave, &op);
898 if (!spi_mem_supports_op(spinand->slave, &op))
901 nbytes -= op.data.nbytes;
905 return &variants->ops[i];
912 * spinand_match_and_init() - Try to find a match between a device ID and an
913 * entry in a spinand_info table
914 * @spinand: SPI NAND object
915 * @table: SPI NAND device description table
916 * @table_size: size of the device description table
918 * Should be used by SPI NAND manufacturer drivers when they want to find a
919 * match between a device ID retrieved through the READ_ID command and an
920 * entry in the SPI NAND description table. If a match is found, the spinand
921 * object will be initialized with information provided by the matching
922 * spinand_info entry.
924 * Return: 0 on success, a negative error code otherwise.
926 int spinand_match_and_init(struct spinand_device *spinand,
927 const struct spinand_info *table,
928 unsigned int table_size, u8 devid)
930 struct nand_device *nand = spinand_to_nand(spinand);
933 for (i = 0; i < table_size; i++) {
934 const struct spinand_info *info = &table[i];
935 const struct spi_mem_op *op;
937 if (devid != info->devid)
940 nand->memorg = table[i].memorg;
941 nand->eccreq = table[i].eccreq;
942 spinand->eccinfo = table[i].eccinfo;
943 spinand->flags = table[i].flags;
944 spinand->select_target = table[i].select_target;
946 op = spinand_select_op_variant(spinand,
947 info->op_variants.read_cache);
951 spinand->op_templates.read_cache = op;
953 op = spinand_select_op_variant(spinand,
954 info->op_variants.write_cache);
958 spinand->op_templates.write_cache = op;
960 op = spinand_select_op_variant(spinand,
961 info->op_variants.update_cache);
962 spinand->op_templates.update_cache = op;
970 static int spinand_detect(struct spinand_device *spinand)
972 struct nand_device *nand = spinand_to_nand(spinand);
975 ret = spinand_reset_op(spinand);
979 ret = spinand_read_id_op(spinand, spinand->id.data);
983 spinand->id.len = SPINAND_MAX_ID_LEN;
985 ret = spinand_manufacturer_detect(spinand);
987 dev_err(spinand->slave->dev, "unknown raw ID %*phN\n",
988 SPINAND_MAX_ID_LEN, spinand->id.data);
992 if (nand->memorg.ntargets > 1 && !spinand->select_target) {
993 dev_err(spinand->slave->dev,
994 "SPI NANDs with more than one die must implement ->select_target()\n");
998 dev_info(spinand->slave->dev,
999 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1000 dev_info(spinand->slave->dev,
1001 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1002 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1003 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1008 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
1009 struct mtd_oob_region *region)
1014 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
1015 struct mtd_oob_region *region)
1020 /* Reserve 2 bytes for the BBM. */
1022 region->length = 62;
1027 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
1028 .ecc = spinand_noecc_ooblayout_ecc,
1029 .rfree = spinand_noecc_ooblayout_free,
1032 static int spinand_init(struct spinand_device *spinand)
1034 struct mtd_info *mtd = spinand_to_mtd(spinand);
1035 struct nand_device *nand = mtd_to_nanddev(mtd);
1039 * We need a scratch buffer because the spi_mem interface requires that
1040 * buf passed in spi_mem_op->data.buf be DMA-able.
1042 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1043 if (!spinand->scratchbuf)
1046 ret = spinand_detect(spinand);
1051 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1052 * may use this buffer for DMA access.
1053 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1055 spinand->databuf = kzalloc(nanddev_page_size(nand) +
1056 nanddev_per_page_oobsize(nand),
1058 if (!spinand->databuf) {
1063 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1065 ret = spinand_init_cfg_cache(spinand);
1069 ret = spinand_init_quad_enable(spinand);
1073 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1077 ret = spinand_manufacturer_init(spinand);
1079 dev_err(spinand->slave->dev,
1080 "Failed to initialize the SPI NAND chip (err = %d)\n",
1085 /* After power up, all blocks are locked, so unlock them here. */
1086 for (i = 0; i < nand->memorg.ntargets; i++) {
1087 ret = spinand_select_target(spinand, i);
1091 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1096 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1098 goto err_manuf_cleanup;
1101 * Right now, we don't support ECC, so let the whole oob
1102 * area is available for user.
1104 mtd->_read_oob = spinand_mtd_read;
1105 mtd->_write_oob = spinand_mtd_write;
1106 mtd->_block_isbad = spinand_mtd_block_isbad;
1107 mtd->_block_markbad = spinand_mtd_block_markbad;
1108 mtd->_block_isreserved = spinand_mtd_block_isreserved;
1109 mtd->_erase = spinand_mtd_erase;
1111 if (spinand->eccinfo.ooblayout)
1112 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1114 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1116 ret = mtd_ooblayout_count_freebytes(mtd);
1118 goto err_cleanup_nanddev;
1120 mtd->oobavail = ret;
1124 err_cleanup_nanddev:
1125 nanddev_cleanup(nand);
1128 spinand_manufacturer_cleanup(spinand);
1131 kfree(spinand->databuf);
1132 kfree(spinand->scratchbuf);
1136 static void spinand_cleanup(struct spinand_device *spinand)
1138 struct nand_device *nand = spinand_to_nand(spinand);
1140 nanddev_cleanup(nand);
1141 spinand_manufacturer_cleanup(spinand);
1142 kfree(spinand->databuf);
1143 kfree(spinand->scratchbuf);
1146 static int spinand_probe(struct udevice *dev)
1148 struct spinand_device *spinand = dev_get_priv(dev);
1149 struct spi_slave *slave = dev_get_parent_priv(dev);
1150 struct mtd_info *mtd = dev_get_uclass_priv(dev);
1151 struct nand_device *nand = spinand_to_nand(spinand);
1155 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1160 spinand->spimem = mem;
1161 spi_mem_set_drvdata(mem, spinand);
1162 spinand_set_of_node(spinand, mem->spi->dev.of_node);
1163 mutex_init(&spinand->lock);
1165 mtd = spinand_to_mtd(spinand);
1166 mtd->dev.parent = &mem->spi->dev;
1171 mtd->name = malloc(20);
1174 sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
1175 spinand->slave = slave;
1176 spinand_set_of_node(spinand, dev->node.np);
1179 ret = spinand_init(spinand);
1184 ret = mtd_device_register(mtd, NULL, 0);
1186 ret = add_mtd_device(mtd);
1189 goto err_spinand_cleanup;
1193 err_spinand_cleanup:
1194 spinand_cleanup(spinand);
1200 static int spinand_remove(struct udevice *slave)
1202 struct spinand_device *spinand;
1203 struct mtd_info *mtd;
1206 spinand = spi_mem_get_drvdata(slave);
1207 mtd = spinand_to_mtd(spinand);
1210 ret = mtd_device_unregister(mtd);
1214 spinand_cleanup(spinand);
1219 static const struct spi_device_id spinand_ids[] = {
1220 { .name = "spi-nand" },
1225 static const struct of_device_id spinand_of_ids[] = {
1226 { .compatible = "spi-nand" },
1231 static struct spi_mem_driver spinand_drv = {
1233 .id_table = spinand_ids,
1236 .of_match_table = of_match_ptr(spinand_of_ids),
1239 .probe = spinand_probe,
1240 .remove = spinand_remove,
1242 module_spi_mem_driver(spinand_drv);
1244 MODULE_DESCRIPTION("SPI NAND framework");
1245 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1246 MODULE_LICENSE("GPL v2");
1247 #endif /* __UBOOT__ */
1249 static const struct udevice_id spinand_ids[] = {
1250 { .compatible = "spi-nand" },
1254 U_BOOT_DRIVER(spinand) = {
1257 .of_match = spinand_ids,
1258 .priv_auto = sizeof(struct spinand_device),
1259 .probe = spinand_probe,