1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
10 #define pr_fmt(fmt) "spi-nand: " fmt
13 #include <linux/device.h>
14 #include <linux/jiffies.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mtd/spinand.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
27 #include <linux/mtd/spinand.h>
30 /* SPI NAND index visible in MTD names */
31 static int spi_nand_idx;
33 static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
34 const struct nand_page_io_req *req,
37 struct nand_device *nand = spinand_to_nand(spinand);
40 if (nand->memorg.planes_per_lun < 2)
43 /* The plane number is passed in MSB just above the column address */
44 shift = fls(nand->memorg.pagesize);
45 *column |= req->pos.plane << shift;
48 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
50 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
54 ret = spi_mem_exec_op(spinand->slave, &op);
58 *val = *spinand->scratchbuf;
62 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
64 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
67 *spinand->scratchbuf = val;
68 return spi_mem_exec_op(spinand->slave, &op);
71 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
73 return spinand_read_reg_op(spinand, REG_STATUS, status);
76 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
78 struct nand_device *nand = spinand_to_nand(spinand);
80 if (WARN_ON(spinand->cur_target < 0 ||
81 spinand->cur_target >= nand->memorg.ntargets))
84 *cfg = spinand->cfg_cache[spinand->cur_target];
88 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
90 struct nand_device *nand = spinand_to_nand(spinand);
93 if (WARN_ON(spinand->cur_target < 0 ||
94 spinand->cur_target >= nand->memorg.ntargets))
97 if (spinand->cfg_cache[spinand->cur_target] == cfg)
100 ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
104 spinand->cfg_cache[spinand->cur_target] = cfg;
109 * spinand_upd_cfg() - Update the configuration register
110 * @spinand: the spinand device
111 * @mask: the mask encoding the bits to update in the config reg
112 * @val: the new value to apply
114 * Update the configuration register.
116 * Return: 0 on success, a negative error code otherwise.
118 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
123 ret = spinand_get_cfg(spinand, &cfg);
130 return spinand_set_cfg(spinand, cfg);
134 * spinand_select_target() - Select a specific NAND target/die
135 * @spinand: the spinand device
136 * @target: the target/die to select
138 * Select a new target/die. If chip only has one die, this function is a NOOP.
140 * Return: 0 on success, a negative error code otherwise.
142 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
144 struct nand_device *nand = spinand_to_nand(spinand);
147 if (WARN_ON(target >= nand->memorg.ntargets))
150 if (spinand->cur_target == target)
153 if (nand->memorg.ntargets == 1) {
154 spinand->cur_target = target;
158 ret = spinand->select_target(spinand, target);
162 spinand->cur_target = target;
166 static int spinand_init_cfg_cache(struct spinand_device *spinand)
168 struct nand_device *nand = spinand_to_nand(spinand);
169 struct udevice *dev = spinand->slave->dev;
173 spinand->cfg_cache = devm_kzalloc(dev,
174 sizeof(*spinand->cfg_cache) *
175 nand->memorg.ntargets,
177 if (!spinand->cfg_cache)
180 for (target = 0; target < nand->memorg.ntargets; target++) {
181 ret = spinand_select_target(spinand, target);
186 * We use spinand_read_reg_op() instead of spinand_get_cfg()
187 * here to bypass the config cache.
189 ret = spinand_read_reg_op(spinand, REG_CFG,
190 &spinand->cfg_cache[target]);
198 static int spinand_init_quad_enable(struct spinand_device *spinand)
202 if (!(spinand->flags & SPINAND_HAS_QE_BIT))
205 if (spinand->op_templates.read_cache->data.buswidth == 4 ||
206 spinand->op_templates.write_cache->data.buswidth == 4 ||
207 spinand->op_templates.update_cache->data.buswidth == 4)
210 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
211 enable ? CFG_QUAD_ENABLE : 0);
214 static int spinand_ecc_enable(struct spinand_device *spinand,
217 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
218 enable ? CFG_ECC_ENABLE : 0);
221 static int spinand_write_enable_op(struct spinand_device *spinand)
223 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
225 return spi_mem_exec_op(spinand->slave, &op);
228 static int spinand_load_page_op(struct spinand_device *spinand,
229 const struct nand_page_io_req *req)
231 struct nand_device *nand = spinand_to_nand(spinand);
232 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
233 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
235 return spi_mem_exec_op(spinand->slave, &op);
238 static int spinand_read_from_cache_op(struct spinand_device *spinand,
239 const struct nand_page_io_req *req)
241 struct spi_mem_op op = *spinand->op_templates.read_cache;
242 struct nand_device *nand = spinand_to_nand(spinand);
243 struct mtd_info *mtd = nanddev_to_mtd(nand);
244 struct nand_page_io_req adjreq = *req;
245 unsigned int nbytes = 0;
251 adjreq.datalen = nanddev_page_size(nand);
253 adjreq.databuf.in = spinand->databuf;
254 buf = spinand->databuf;
255 nbytes = adjreq.datalen;
259 adjreq.ooblen = nanddev_per_page_oobsize(nand);
261 adjreq.oobbuf.in = spinand->oobbuf;
262 nbytes += nanddev_per_page_oobsize(nand);
264 buf = spinand->oobbuf;
265 column = nanddev_page_size(nand);
269 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
270 op.addr.val = column;
273 * Some controllers are limited in term of max RX data size. In this
274 * case, just repeat the READ_CACHE operation after updating the
278 op.data.buf.in = buf;
279 op.data.nbytes = nbytes;
280 ret = spi_mem_adjust_op_size(spinand->slave, &op);
284 ret = spi_mem_exec_op(spinand->slave, &op);
288 buf += op.data.nbytes;
289 nbytes -= op.data.nbytes;
290 op.addr.val += op.data.nbytes;
294 memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
298 if (req->mode == MTD_OPS_AUTO_OOB)
299 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
304 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
311 static int spinand_write_to_cache_op(struct spinand_device *spinand,
312 const struct nand_page_io_req *req)
314 struct spi_mem_op op = *spinand->op_templates.write_cache;
315 struct nand_device *nand = spinand_to_nand(spinand);
316 struct mtd_info *mtd = nanddev_to_mtd(nand);
317 struct nand_page_io_req adjreq = *req;
318 unsigned int nbytes = 0;
323 memset(spinand->databuf, 0xff,
324 nanddev_page_size(nand) +
325 nanddev_per_page_oobsize(nand));
328 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
331 adjreq.datalen = nanddev_page_size(nand);
332 adjreq.databuf.out = spinand->databuf;
333 nbytes = adjreq.datalen;
334 buf = spinand->databuf;
338 if (req->mode == MTD_OPS_AUTO_OOB)
339 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
344 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
347 adjreq.ooblen = nanddev_per_page_oobsize(nand);
349 nbytes += nanddev_per_page_oobsize(nand);
351 buf = spinand->oobbuf;
352 column = nanddev_page_size(nand);
356 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
358 op = *spinand->op_templates.write_cache;
359 op.addr.val = column;
362 * Some controllers are limited in term of max TX data size. In this
363 * case, split the operation into one LOAD CACHE and one or more
367 op.data.buf.out = buf;
368 op.data.nbytes = nbytes;
370 ret = spi_mem_adjust_op_size(spinand->slave, &op);
374 ret = spi_mem_exec_op(spinand->slave, &op);
378 buf += op.data.nbytes;
379 nbytes -= op.data.nbytes;
380 op.addr.val += op.data.nbytes;
383 * We need to use the RANDOM LOAD CACHE operation if there's
384 * more than one iteration, because the LOAD operation resets
388 column = op.addr.val;
389 op = *spinand->op_templates.update_cache;
390 op.addr.val = column;
397 static int spinand_program_op(struct spinand_device *spinand,
398 const struct nand_page_io_req *req)
400 struct nand_device *nand = spinand_to_nand(spinand);
401 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
402 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
404 return spi_mem_exec_op(spinand->slave, &op);
407 static int spinand_erase_op(struct spinand_device *spinand,
408 const struct nand_pos *pos)
410 struct nand_device *nand = &spinand->base;
411 unsigned int row = nanddev_pos_to_row(nand, pos);
412 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
414 return spi_mem_exec_op(spinand->slave, &op);
417 static int spinand_wait(struct spinand_device *spinand, u8 *s)
419 unsigned long start, stop;
423 start = get_timer(0);
426 ret = spinand_read_status(spinand, &status);
430 if (!(status & STATUS_BUSY))
432 } while (get_timer(start) < stop);
435 * Extra read, just in case the STATUS_READY bit has changed
436 * since our last check
438 ret = spinand_read_status(spinand, &status);
446 return status & STATUS_BUSY ? -ETIMEDOUT : 0;
449 static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
451 struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
455 ret = spi_mem_exec_op(spinand->slave, &op);
457 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
462 static int spinand_reset_op(struct spinand_device *spinand)
464 struct spi_mem_op op = SPINAND_RESET_OP;
467 ret = spi_mem_exec_op(spinand->slave, &op);
471 return spinand_wait(spinand, NULL);
474 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
476 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
479 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
481 struct nand_device *nand = spinand_to_nand(spinand);
483 if (spinand->eccinfo.get_status)
484 return spinand->eccinfo.get_status(spinand, status);
486 switch (status & STATUS_ECC_MASK) {
487 case STATUS_ECC_NO_BITFLIPS:
490 case STATUS_ECC_HAS_BITFLIPS:
492 * We have no way to know exactly how many bitflips have been
493 * fixed, so let's return the maximum possible value so that
494 * wear-leveling layers move the data immediately.
496 return nand->eccreq.strength;
498 case STATUS_ECC_UNCOR_ERROR:
508 static int spinand_read_page(struct spinand_device *spinand,
509 const struct nand_page_io_req *req,
515 ret = spinand_load_page_op(spinand, req);
519 ret = spinand_wait(spinand, &status);
523 ret = spinand_read_from_cache_op(spinand, req);
530 return spinand_check_ecc_status(spinand, status);
533 static int spinand_write_page(struct spinand_device *spinand,
534 const struct nand_page_io_req *req)
539 ret = spinand_write_enable_op(spinand);
543 ret = spinand_write_to_cache_op(spinand, req);
547 ret = spinand_program_op(spinand, req);
551 ret = spinand_wait(spinand, &status);
552 if (!ret && (status & STATUS_PROG_FAILED))
558 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
559 struct mtd_oob_ops *ops)
561 struct spinand_device *spinand = mtd_to_spinand(mtd);
562 struct nand_device *nand = mtd_to_nanddev(mtd);
563 unsigned int max_bitflips = 0;
564 struct nand_io_iter iter;
565 bool enable_ecc = false;
566 bool ecc_failed = false;
569 if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
573 mutex_lock(&spinand->lock);
576 nanddev_io_for_each_page(nand, from, ops, &iter) {
577 ret = spinand_select_target(spinand, iter.req.pos.target);
581 ret = spinand_ecc_enable(spinand, enable_ecc);
585 ret = spinand_read_page(spinand, &iter.req, enable_ecc);
586 if (ret < 0 && ret != -EBADMSG)
589 if (ret == -EBADMSG) {
591 mtd->ecc_stats.failed++;
594 mtd->ecc_stats.corrected += ret;
595 max_bitflips = max_t(unsigned int, max_bitflips, ret);
598 ops->retlen += iter.req.datalen;
599 ops->oobretlen += iter.req.ooblen;
603 mutex_unlock(&spinand->lock);
605 if (ecc_failed && !ret)
608 return ret ? ret : max_bitflips;
611 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
612 struct mtd_oob_ops *ops)
614 struct spinand_device *spinand = mtd_to_spinand(mtd);
615 struct nand_device *nand = mtd_to_nanddev(mtd);
616 struct nand_io_iter iter;
617 bool enable_ecc = false;
620 if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
624 mutex_lock(&spinand->lock);
627 nanddev_io_for_each_page(nand, to, ops, &iter) {
628 ret = spinand_select_target(spinand, iter.req.pos.target);
632 ret = spinand_ecc_enable(spinand, enable_ecc);
636 ret = spinand_write_page(spinand, &iter.req);
640 ops->retlen += iter.req.datalen;
641 ops->oobretlen += iter.req.ooblen;
645 mutex_unlock(&spinand->lock);
651 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
653 struct spinand_device *spinand = nand_to_spinand(nand);
654 struct nand_page_io_req req = {
658 .oobbuf.in = spinand->oobbuf,
663 memset(spinand->oobbuf, 0, 2);
664 ret = spinand_select_target(spinand, pos->target);
668 ret = spinand_read_page(spinand, &req, false);
672 if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
678 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
680 struct nand_device *nand = mtd_to_nanddev(mtd);
682 struct spinand_device *spinand = nand_to_spinand(nand);
687 nanddev_offs_to_pos(nand, offs, &pos);
689 mutex_lock(&spinand->lock);
691 ret = nanddev_isbad(nand, &pos);
693 mutex_unlock(&spinand->lock);
698 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
700 struct spinand_device *spinand = nand_to_spinand(nand);
701 struct nand_page_io_req req = {
705 .oobbuf.out = spinand->oobbuf,
709 /* Erase block before marking it bad. */
710 ret = spinand_select_target(spinand, pos->target);
714 ret = spinand_write_enable_op(spinand);
718 ret = spinand_erase_op(spinand, pos);
722 memset(spinand->oobbuf, 0, 2);
723 return spinand_write_page(spinand, &req);
726 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
728 struct nand_device *nand = mtd_to_nanddev(mtd);
730 struct spinand_device *spinand = nand_to_spinand(nand);
735 nanddev_offs_to_pos(nand, offs, &pos);
737 mutex_lock(&spinand->lock);
739 ret = nanddev_markbad(nand, &pos);
741 mutex_unlock(&spinand->lock);
746 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
748 struct spinand_device *spinand = nand_to_spinand(nand);
752 ret = spinand_select_target(spinand, pos->target);
756 ret = spinand_write_enable_op(spinand);
760 ret = spinand_erase_op(spinand, pos);
764 ret = spinand_wait(spinand, &status);
765 if (!ret && (status & STATUS_ERASE_FAILED))
771 static int spinand_mtd_erase(struct mtd_info *mtd,
772 struct erase_info *einfo)
775 struct spinand_device *spinand = mtd_to_spinand(mtd);
780 mutex_lock(&spinand->lock);
782 ret = nanddev_mtd_erase(mtd, einfo);
784 mutex_unlock(&spinand->lock);
790 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
793 struct spinand_device *spinand = mtd_to_spinand(mtd);
795 struct nand_device *nand = mtd_to_nanddev(mtd);
799 nanddev_offs_to_pos(nand, offs, &pos);
801 mutex_lock(&spinand->lock);
803 ret = nanddev_isreserved(nand, &pos);
805 mutex_unlock(&spinand->lock);
811 const struct spi_mem_op *
812 spinand_find_supported_op(struct spinand_device *spinand,
813 const struct spi_mem_op *ops,
818 for (i = 0; i < nops; i++) {
819 if (spi_mem_supports_op(spinand->slave, &ops[i]))
826 static const struct nand_ops spinand_ops = {
827 .erase = spinand_erase,
828 .markbad = spinand_markbad,
829 .isbad = spinand_isbad,
832 static const struct spinand_manufacturer *spinand_manufacturers[] = {
833 ¯onix_spinand_manufacturer,
834 µn_spinand_manufacturer,
835 &winbond_spinand_manufacturer,
838 static int spinand_manufacturer_detect(struct spinand_device *spinand)
843 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
844 ret = spinand_manufacturers[i]->ops->detect(spinand);
846 spinand->manufacturer = spinand_manufacturers[i];
848 } else if (ret < 0) {
856 static int spinand_manufacturer_init(struct spinand_device *spinand)
858 if (spinand->manufacturer->ops->init)
859 return spinand->manufacturer->ops->init(spinand);
864 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
866 /* Release manufacturer private data */
867 if (spinand->manufacturer->ops->cleanup)
868 return spinand->manufacturer->ops->cleanup(spinand);
871 static const struct spi_mem_op *
872 spinand_select_op_variant(struct spinand_device *spinand,
873 const struct spinand_op_variants *variants)
875 struct nand_device *nand = spinand_to_nand(spinand);
878 for (i = 0; i < variants->nops; i++) {
879 struct spi_mem_op op = variants->ops[i];
883 nbytes = nanddev_per_page_oobsize(nand) +
884 nanddev_page_size(nand);
887 op.data.nbytes = nbytes;
888 ret = spi_mem_adjust_op_size(spinand->slave, &op);
892 if (!spi_mem_supports_op(spinand->slave, &op))
895 nbytes -= op.data.nbytes;
899 return &variants->ops[i];
906 * spinand_match_and_init() - Try to find a match between a device ID and an
907 * entry in a spinand_info table
908 * @spinand: SPI NAND object
909 * @table: SPI NAND device description table
910 * @table_size: size of the device description table
912 * Should be used by SPI NAND manufacturer drivers when they want to find a
913 * match between a device ID retrieved through the READ_ID command and an
914 * entry in the SPI NAND description table. If a match is found, the spinand
915 * object will be initialized with information provided by the matching
916 * spinand_info entry.
918 * Return: 0 on success, a negative error code otherwise.
920 int spinand_match_and_init(struct spinand_device *spinand,
921 const struct spinand_info *table,
922 unsigned int table_size, u8 devid)
924 struct nand_device *nand = spinand_to_nand(spinand);
927 for (i = 0; i < table_size; i++) {
928 const struct spinand_info *info = &table[i];
929 const struct spi_mem_op *op;
931 if (devid != info->devid)
934 nand->memorg = table[i].memorg;
935 nand->eccreq = table[i].eccreq;
936 spinand->eccinfo = table[i].eccinfo;
937 spinand->flags = table[i].flags;
938 spinand->select_target = table[i].select_target;
940 op = spinand_select_op_variant(spinand,
941 info->op_variants.read_cache);
945 spinand->op_templates.read_cache = op;
947 op = spinand_select_op_variant(spinand,
948 info->op_variants.write_cache);
952 spinand->op_templates.write_cache = op;
954 op = spinand_select_op_variant(spinand,
955 info->op_variants.update_cache);
956 spinand->op_templates.update_cache = op;
964 static int spinand_detect(struct spinand_device *spinand)
966 struct nand_device *nand = spinand_to_nand(spinand);
969 ret = spinand_reset_op(spinand);
973 ret = spinand_read_id_op(spinand, spinand->id.data);
977 spinand->id.len = SPINAND_MAX_ID_LEN;
979 ret = spinand_manufacturer_detect(spinand);
981 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
986 if (nand->memorg.ntargets > 1 && !spinand->select_target) {
988 "SPI NANDs with more than one die must implement ->select_target()\n");
992 dev_info(spinand->slave->dev,
993 "%s SPI NAND was found.\n", spinand->manufacturer->name);
994 dev_info(spinand->slave->dev,
995 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
996 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
997 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1002 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
1003 struct mtd_oob_region *region)
1008 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
1009 struct mtd_oob_region *region)
1014 /* Reserve 2 bytes for the BBM. */
1016 region->length = 62;
1021 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
1022 .ecc = spinand_noecc_ooblayout_ecc,
1023 .free = spinand_noecc_ooblayout_free,
1026 static int spinand_init(struct spinand_device *spinand)
1028 struct mtd_info *mtd = spinand_to_mtd(spinand);
1029 struct nand_device *nand = mtd_to_nanddev(mtd);
1033 * We need a scratch buffer because the spi_mem interface requires that
1034 * buf passed in spi_mem_op->data.buf be DMA-able.
1036 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1037 if (!spinand->scratchbuf)
1040 ret = spinand_detect(spinand);
1045 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1046 * may use this buffer for DMA access.
1047 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1049 spinand->databuf = kzalloc(nanddev_page_size(nand) +
1050 nanddev_per_page_oobsize(nand),
1052 if (!spinand->databuf) {
1057 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1059 ret = spinand_init_cfg_cache(spinand);
1063 ret = spinand_init_quad_enable(spinand);
1067 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1071 ret = spinand_manufacturer_init(spinand);
1074 "Failed to initialize the SPI NAND chip (err = %d)\n",
1079 /* After power up, all blocks are locked, so unlock them here. */
1080 for (i = 0; i < nand->memorg.ntargets; i++) {
1081 ret = spinand_select_target(spinand, i);
1085 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1090 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1092 goto err_manuf_cleanup;
1095 * Right now, we don't support ECC, so let the whole oob
1096 * area is available for user.
1098 mtd->_read_oob = spinand_mtd_read;
1099 mtd->_write_oob = spinand_mtd_write;
1100 mtd->_block_isbad = spinand_mtd_block_isbad;
1101 mtd->_block_markbad = spinand_mtd_block_markbad;
1102 mtd->_block_isreserved = spinand_mtd_block_isreserved;
1103 mtd->_erase = spinand_mtd_erase;
1105 if (spinand->eccinfo.ooblayout)
1106 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1108 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1110 ret = mtd_ooblayout_count_freebytes(mtd);
1112 goto err_cleanup_nanddev;
1114 mtd->oobavail = ret;
1118 err_cleanup_nanddev:
1119 nanddev_cleanup(nand);
1122 spinand_manufacturer_cleanup(spinand);
1125 kfree(spinand->databuf);
1126 kfree(spinand->scratchbuf);
1130 static void spinand_cleanup(struct spinand_device *spinand)
1132 struct nand_device *nand = spinand_to_nand(spinand);
1134 nanddev_cleanup(nand);
1135 spinand_manufacturer_cleanup(spinand);
1136 kfree(spinand->databuf);
1137 kfree(spinand->scratchbuf);
1140 static int spinand_probe(struct udevice *dev)
1142 struct spinand_device *spinand = dev_get_priv(dev);
1143 struct spi_slave *slave = dev_get_parent_priv(dev);
1144 struct mtd_info *mtd = dev_get_uclass_priv(dev);
1145 struct nand_device *nand = spinand_to_nand(spinand);
1149 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1154 spinand->spimem = mem;
1155 spi_mem_set_drvdata(mem, spinand);
1156 spinand_set_of_node(spinand, mem->spi->dev.of_node);
1157 mutex_init(&spinand->lock);
1159 mtd = spinand_to_mtd(spinand);
1160 mtd->dev.parent = &mem->spi->dev;
1165 mtd->name = malloc(20);
1168 sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
1169 spinand->slave = slave;
1170 spinand_set_of_node(spinand, dev->node.np);
1173 ret = spinand_init(spinand);
1178 ret = mtd_device_register(mtd, NULL, 0);
1180 ret = add_mtd_device(mtd);
1183 goto err_spinand_cleanup;
1187 err_spinand_cleanup:
1188 spinand_cleanup(spinand);
1194 static int spinand_remove(struct udevice *slave)
1196 struct spinand_device *spinand;
1197 struct mtd_info *mtd;
1200 spinand = spi_mem_get_drvdata(slave);
1201 mtd = spinand_to_mtd(spinand);
1204 ret = mtd_device_unregister(mtd);
1208 spinand_cleanup(spinand);
1213 static const struct spi_device_id spinand_ids[] = {
1214 { .name = "spi-nand" },
1219 static const struct of_device_id spinand_of_ids[] = {
1220 { .compatible = "spi-nand" },
1225 static struct spi_mem_driver spinand_drv = {
1227 .id_table = spinand_ids,
1230 .of_match_table = of_match_ptr(spinand_of_ids),
1233 .probe = spinand_probe,
1234 .remove = spinand_remove,
1236 module_spi_mem_driver(spinand_drv);
1238 MODULE_DESCRIPTION("SPI NAND framework");
1239 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1240 MODULE_LICENSE("GPL v2");
1241 #endif /* __UBOOT__ */
1243 static const struct udevice_id spinand_ids[] = {
1244 { .compatible = "spi-nand" },
1248 U_BOOT_DRIVER(spinand) = {
1251 .of_match = spinand_ids,
1252 .priv_auto_alloc_size = sizeof(struct spinand_device),
1253 .probe = spinand_probe,