1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
10 #define pr_fmt(fmt) "spi-nand: " fmt
13 #include <linux/device.h>
14 #include <linux/jiffies.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mtd/spinand.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
27 #include <dm/device_compat.h>
28 #include <dm/devres.h>
29 #include <linux/mtd/spinand.h>
32 /* SPI NAND index visible in MTD names */
33 static int spi_nand_idx;
35 static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
36 const struct nand_page_io_req *req,
39 struct nand_device *nand = spinand_to_nand(spinand);
42 if (nand->memorg.planes_per_lun < 2)
45 /* The plane number is passed in MSB just above the column address */
46 shift = fls(nand->memorg.pagesize);
47 *column |= req->pos.plane << shift;
50 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
52 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
56 ret = spi_mem_exec_op(spinand->slave, &op);
60 *val = *spinand->scratchbuf;
64 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
66 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
69 *spinand->scratchbuf = val;
70 return spi_mem_exec_op(spinand->slave, &op);
73 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
75 return spinand_read_reg_op(spinand, REG_STATUS, status);
78 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
80 struct nand_device *nand = spinand_to_nand(spinand);
82 if (WARN_ON(spinand->cur_target < 0 ||
83 spinand->cur_target >= nand->memorg.ntargets))
86 *cfg = spinand->cfg_cache[spinand->cur_target];
90 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
92 struct nand_device *nand = spinand_to_nand(spinand);
95 if (WARN_ON(spinand->cur_target < 0 ||
96 spinand->cur_target >= nand->memorg.ntargets))
99 if (spinand->cfg_cache[spinand->cur_target] == cfg)
102 ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
106 spinand->cfg_cache[spinand->cur_target] = cfg;
111 * spinand_upd_cfg() - Update the configuration register
112 * @spinand: the spinand device
113 * @mask: the mask encoding the bits to update in the config reg
114 * @val: the new value to apply
116 * Update the configuration register.
118 * Return: 0 on success, a negative error code otherwise.
120 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
125 ret = spinand_get_cfg(spinand, &cfg);
132 return spinand_set_cfg(spinand, cfg);
136 * spinand_select_target() - Select a specific NAND target/die
137 * @spinand: the spinand device
138 * @target: the target/die to select
140 * Select a new target/die. If chip only has one die, this function is a NOOP.
142 * Return: 0 on success, a negative error code otherwise.
144 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
146 struct nand_device *nand = spinand_to_nand(spinand);
149 if (WARN_ON(target >= nand->memorg.ntargets))
152 if (spinand->cur_target == target)
155 if (nand->memorg.ntargets == 1) {
156 spinand->cur_target = target;
160 ret = spinand->select_target(spinand, target);
164 spinand->cur_target = target;
168 static int spinand_init_cfg_cache(struct spinand_device *spinand)
170 struct nand_device *nand = spinand_to_nand(spinand);
171 struct udevice *dev = spinand->slave->dev;
175 spinand->cfg_cache = devm_kzalloc(dev,
176 sizeof(*spinand->cfg_cache) *
177 nand->memorg.ntargets,
179 if (!spinand->cfg_cache)
182 for (target = 0; target < nand->memorg.ntargets; target++) {
183 ret = spinand_select_target(spinand, target);
188 * We use spinand_read_reg_op() instead of spinand_get_cfg()
189 * here to bypass the config cache.
191 ret = spinand_read_reg_op(spinand, REG_CFG,
192 &spinand->cfg_cache[target]);
200 static int spinand_init_quad_enable(struct spinand_device *spinand)
204 if (!(spinand->flags & SPINAND_HAS_QE_BIT))
207 if (spinand->op_templates.read_cache->data.buswidth == 4 ||
208 spinand->op_templates.write_cache->data.buswidth == 4 ||
209 spinand->op_templates.update_cache->data.buswidth == 4)
212 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
213 enable ? CFG_QUAD_ENABLE : 0);
216 static int spinand_ecc_enable(struct spinand_device *spinand,
219 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
220 enable ? CFG_ECC_ENABLE : 0);
223 static int spinand_write_enable_op(struct spinand_device *spinand)
225 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
227 return spi_mem_exec_op(spinand->slave, &op);
230 static int spinand_load_page_op(struct spinand_device *spinand,
231 const struct nand_page_io_req *req)
233 struct nand_device *nand = spinand_to_nand(spinand);
234 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
235 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
237 return spi_mem_exec_op(spinand->slave, &op);
240 static int spinand_read_from_cache_op(struct spinand_device *spinand,
241 const struct nand_page_io_req *req)
243 struct spi_mem_op op = *spinand->op_templates.read_cache;
244 struct nand_device *nand = spinand_to_nand(spinand);
245 struct mtd_info *mtd = nanddev_to_mtd(nand);
246 struct nand_page_io_req adjreq = *req;
247 unsigned int nbytes = 0;
253 adjreq.datalen = nanddev_page_size(nand);
255 adjreq.databuf.in = spinand->databuf;
256 buf = spinand->databuf;
257 nbytes = adjreq.datalen;
261 adjreq.ooblen = nanddev_per_page_oobsize(nand);
263 adjreq.oobbuf.in = spinand->oobbuf;
264 nbytes += nanddev_per_page_oobsize(nand);
266 buf = spinand->oobbuf;
267 column = nanddev_page_size(nand);
271 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
272 op.addr.val = column;
275 * Some controllers are limited in term of max RX data size. In this
276 * case, just repeat the READ_CACHE operation after updating the
280 op.data.buf.in = buf;
281 op.data.nbytes = nbytes;
282 ret = spi_mem_adjust_op_size(spinand->slave, &op);
286 ret = spi_mem_exec_op(spinand->slave, &op);
290 buf += op.data.nbytes;
291 nbytes -= op.data.nbytes;
292 op.addr.val += op.data.nbytes;
296 memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
300 if (req->mode == MTD_OPS_AUTO_OOB)
301 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
306 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
313 static int spinand_write_to_cache_op(struct spinand_device *spinand,
314 const struct nand_page_io_req *req)
316 struct spi_mem_op op = *spinand->op_templates.write_cache;
317 struct nand_device *nand = spinand_to_nand(spinand);
318 struct mtd_info *mtd = nanddev_to_mtd(nand);
319 struct nand_page_io_req adjreq = *req;
320 unsigned int nbytes = 0;
325 memset(spinand->databuf, 0xff,
326 nanddev_page_size(nand) +
327 nanddev_per_page_oobsize(nand));
330 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
333 adjreq.datalen = nanddev_page_size(nand);
334 adjreq.databuf.out = spinand->databuf;
335 nbytes = adjreq.datalen;
336 buf = spinand->databuf;
340 if (req->mode == MTD_OPS_AUTO_OOB)
341 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
346 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
349 adjreq.ooblen = nanddev_per_page_oobsize(nand);
351 nbytes += nanddev_per_page_oobsize(nand);
353 buf = spinand->oobbuf;
354 column = nanddev_page_size(nand);
358 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
360 op = *spinand->op_templates.write_cache;
361 op.addr.val = column;
364 * Some controllers are limited in term of max TX data size. In this
365 * case, split the operation into one LOAD CACHE and one or more
369 op.data.buf.out = buf;
370 op.data.nbytes = nbytes;
372 ret = spi_mem_adjust_op_size(spinand->slave, &op);
376 ret = spi_mem_exec_op(spinand->slave, &op);
380 buf += op.data.nbytes;
381 nbytes -= op.data.nbytes;
382 op.addr.val += op.data.nbytes;
385 * We need to use the RANDOM LOAD CACHE operation if there's
386 * more than one iteration, because the LOAD operation resets
390 column = op.addr.val;
391 op = *spinand->op_templates.update_cache;
392 op.addr.val = column;
399 static int spinand_program_op(struct spinand_device *spinand,
400 const struct nand_page_io_req *req)
402 struct nand_device *nand = spinand_to_nand(spinand);
403 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
404 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
406 return spi_mem_exec_op(spinand->slave, &op);
409 static int spinand_erase_op(struct spinand_device *spinand,
410 const struct nand_pos *pos)
412 struct nand_device *nand = &spinand->base;
413 unsigned int row = nanddev_pos_to_row(nand, pos);
414 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
416 return spi_mem_exec_op(spinand->slave, &op);
419 static int spinand_wait(struct spinand_device *spinand, u8 *s)
421 unsigned long start, stop;
425 start = get_timer(0);
428 ret = spinand_read_status(spinand, &status);
432 if (!(status & STATUS_BUSY))
434 } while (get_timer(start) < stop);
437 * Extra read, just in case the STATUS_READY bit has changed
438 * since our last check
440 ret = spinand_read_status(spinand, &status);
448 return status & STATUS_BUSY ? -ETIMEDOUT : 0;
451 static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
453 struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
457 ret = spi_mem_exec_op(spinand->slave, &op);
459 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
464 static int spinand_reset_op(struct spinand_device *spinand)
466 struct spi_mem_op op = SPINAND_RESET_OP;
469 ret = spi_mem_exec_op(spinand->slave, &op);
473 return spinand_wait(spinand, NULL);
476 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
478 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
481 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
483 struct nand_device *nand = spinand_to_nand(spinand);
485 if (spinand->eccinfo.get_status)
486 return spinand->eccinfo.get_status(spinand, status);
488 switch (status & STATUS_ECC_MASK) {
489 case STATUS_ECC_NO_BITFLIPS:
492 case STATUS_ECC_HAS_BITFLIPS:
494 * We have no way to know exactly how many bitflips have been
495 * fixed, so let's return the maximum possible value so that
496 * wear-leveling layers move the data immediately.
498 return nand->eccreq.strength;
500 case STATUS_ECC_UNCOR_ERROR:
510 static int spinand_read_page(struct spinand_device *spinand,
511 const struct nand_page_io_req *req,
517 ret = spinand_load_page_op(spinand, req);
521 ret = spinand_wait(spinand, &status);
525 ret = spinand_read_from_cache_op(spinand, req);
532 return spinand_check_ecc_status(spinand, status);
535 static int spinand_write_page(struct spinand_device *spinand,
536 const struct nand_page_io_req *req)
541 ret = spinand_write_enable_op(spinand);
545 ret = spinand_write_to_cache_op(spinand, req);
549 ret = spinand_program_op(spinand, req);
553 ret = spinand_wait(spinand, &status);
554 if (!ret && (status & STATUS_PROG_FAILED))
560 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
561 struct mtd_oob_ops *ops)
563 struct spinand_device *spinand = mtd_to_spinand(mtd);
564 struct nand_device *nand = mtd_to_nanddev(mtd);
565 unsigned int max_bitflips = 0;
566 struct nand_io_iter iter;
567 bool enable_ecc = false;
568 bool ecc_failed = false;
571 if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
575 mutex_lock(&spinand->lock);
578 nanddev_io_for_each_page(nand, from, ops, &iter) {
579 ret = spinand_select_target(spinand, iter.req.pos.target);
583 ret = spinand_ecc_enable(spinand, enable_ecc);
587 ret = spinand_read_page(spinand, &iter.req, enable_ecc);
588 if (ret < 0 && ret != -EBADMSG)
591 if (ret == -EBADMSG) {
593 mtd->ecc_stats.failed++;
596 mtd->ecc_stats.corrected += ret;
597 max_bitflips = max_t(unsigned int, max_bitflips, ret);
600 ops->retlen += iter.req.datalen;
601 ops->oobretlen += iter.req.ooblen;
605 mutex_unlock(&spinand->lock);
607 if (ecc_failed && !ret)
610 return ret ? ret : max_bitflips;
613 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
614 struct mtd_oob_ops *ops)
616 struct spinand_device *spinand = mtd_to_spinand(mtd);
617 struct nand_device *nand = mtd_to_nanddev(mtd);
618 struct nand_io_iter iter;
619 bool enable_ecc = false;
622 if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
626 mutex_lock(&spinand->lock);
629 nanddev_io_for_each_page(nand, to, ops, &iter) {
630 ret = spinand_select_target(spinand, iter.req.pos.target);
634 ret = spinand_ecc_enable(spinand, enable_ecc);
638 ret = spinand_write_page(spinand, &iter.req);
642 ops->retlen += iter.req.datalen;
643 ops->oobretlen += iter.req.ooblen;
647 mutex_unlock(&spinand->lock);
653 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
655 struct spinand_device *spinand = nand_to_spinand(nand);
656 struct nand_page_io_req req = {
660 .oobbuf.in = spinand->oobbuf,
665 memset(spinand->oobbuf, 0, 2);
666 ret = spinand_select_target(spinand, pos->target);
670 ret = spinand_read_page(spinand, &req, false);
674 if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
680 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
682 struct nand_device *nand = mtd_to_nanddev(mtd);
684 struct spinand_device *spinand = nand_to_spinand(nand);
689 nanddev_offs_to_pos(nand, offs, &pos);
691 mutex_lock(&spinand->lock);
693 ret = nanddev_isbad(nand, &pos);
695 mutex_unlock(&spinand->lock);
700 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
702 struct spinand_device *spinand = nand_to_spinand(nand);
703 struct nand_page_io_req req = {
707 .oobbuf.out = spinand->oobbuf,
711 /* Erase block before marking it bad. */
712 ret = spinand_select_target(spinand, pos->target);
716 ret = spinand_write_enable_op(spinand);
720 ret = spinand_erase_op(spinand, pos);
724 memset(spinand->oobbuf, 0, 2);
725 return spinand_write_page(spinand, &req);
728 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
730 struct nand_device *nand = mtd_to_nanddev(mtd);
732 struct spinand_device *spinand = nand_to_spinand(nand);
737 nanddev_offs_to_pos(nand, offs, &pos);
739 mutex_lock(&spinand->lock);
741 ret = nanddev_markbad(nand, &pos);
743 mutex_unlock(&spinand->lock);
748 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
750 struct spinand_device *spinand = nand_to_spinand(nand);
754 ret = spinand_select_target(spinand, pos->target);
758 ret = spinand_write_enable_op(spinand);
762 ret = spinand_erase_op(spinand, pos);
766 ret = spinand_wait(spinand, &status);
767 if (!ret && (status & STATUS_ERASE_FAILED))
773 static int spinand_mtd_erase(struct mtd_info *mtd,
774 struct erase_info *einfo)
777 struct spinand_device *spinand = mtd_to_spinand(mtd);
782 mutex_lock(&spinand->lock);
784 ret = nanddev_mtd_erase(mtd, einfo);
786 mutex_unlock(&spinand->lock);
792 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
795 struct spinand_device *spinand = mtd_to_spinand(mtd);
797 struct nand_device *nand = mtd_to_nanddev(mtd);
801 nanddev_offs_to_pos(nand, offs, &pos);
803 mutex_lock(&spinand->lock);
805 ret = nanddev_isreserved(nand, &pos);
807 mutex_unlock(&spinand->lock);
813 const struct spi_mem_op *
814 spinand_find_supported_op(struct spinand_device *spinand,
815 const struct spi_mem_op *ops,
820 for (i = 0; i < nops; i++) {
821 if (spi_mem_supports_op(spinand->slave, &ops[i]))
828 static const struct nand_ops spinand_ops = {
829 .erase = spinand_erase,
830 .markbad = spinand_markbad,
831 .isbad = spinand_isbad,
834 static const struct spinand_manufacturer *spinand_manufacturers[] = {
835 &gigadevice_spinand_manufacturer,
836 ¯onix_spinand_manufacturer,
837 µn_spinand_manufacturer,
838 &winbond_spinand_manufacturer,
841 static int spinand_manufacturer_detect(struct spinand_device *spinand)
846 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
847 ret = spinand_manufacturers[i]->ops->detect(spinand);
849 spinand->manufacturer = spinand_manufacturers[i];
851 } else if (ret < 0) {
859 static int spinand_manufacturer_init(struct spinand_device *spinand)
861 if (spinand->manufacturer->ops->init)
862 return spinand->manufacturer->ops->init(spinand);
867 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
869 /* Release manufacturer private data */
870 if (spinand->manufacturer->ops->cleanup)
871 return spinand->manufacturer->ops->cleanup(spinand);
874 static const struct spi_mem_op *
875 spinand_select_op_variant(struct spinand_device *spinand,
876 const struct spinand_op_variants *variants)
878 struct nand_device *nand = spinand_to_nand(spinand);
881 for (i = 0; i < variants->nops; i++) {
882 struct spi_mem_op op = variants->ops[i];
886 nbytes = nanddev_per_page_oobsize(nand) +
887 nanddev_page_size(nand);
890 op.data.nbytes = nbytes;
891 ret = spi_mem_adjust_op_size(spinand->slave, &op);
895 if (!spi_mem_supports_op(spinand->slave, &op))
898 nbytes -= op.data.nbytes;
902 return &variants->ops[i];
909 * spinand_match_and_init() - Try to find a match between a device ID and an
910 * entry in a spinand_info table
911 * @spinand: SPI NAND object
912 * @table: SPI NAND device description table
913 * @table_size: size of the device description table
915 * Should be used by SPI NAND manufacturer drivers when they want to find a
916 * match between a device ID retrieved through the READ_ID command and an
917 * entry in the SPI NAND description table. If a match is found, the spinand
918 * object will be initialized with information provided by the matching
919 * spinand_info entry.
921 * Return: 0 on success, a negative error code otherwise.
923 int spinand_match_and_init(struct spinand_device *spinand,
924 const struct spinand_info *table,
925 unsigned int table_size, u8 devid)
927 struct nand_device *nand = spinand_to_nand(spinand);
930 for (i = 0; i < table_size; i++) {
931 const struct spinand_info *info = &table[i];
932 const struct spi_mem_op *op;
934 if (devid != info->devid)
937 nand->memorg = table[i].memorg;
938 nand->eccreq = table[i].eccreq;
939 spinand->eccinfo = table[i].eccinfo;
940 spinand->flags = table[i].flags;
941 spinand->select_target = table[i].select_target;
943 op = spinand_select_op_variant(spinand,
944 info->op_variants.read_cache);
948 spinand->op_templates.read_cache = op;
950 op = spinand_select_op_variant(spinand,
951 info->op_variants.write_cache);
955 spinand->op_templates.write_cache = op;
957 op = spinand_select_op_variant(spinand,
958 info->op_variants.update_cache);
959 spinand->op_templates.update_cache = op;
967 static int spinand_detect(struct spinand_device *spinand)
969 struct nand_device *nand = spinand_to_nand(spinand);
972 ret = spinand_reset_op(spinand);
976 ret = spinand_read_id_op(spinand, spinand->id.data);
980 spinand->id.len = SPINAND_MAX_ID_LEN;
982 ret = spinand_manufacturer_detect(spinand);
984 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
989 if (nand->memorg.ntargets > 1 && !spinand->select_target) {
991 "SPI NANDs with more than one die must implement ->select_target()\n");
995 dev_info(spinand->slave->dev,
996 "%s SPI NAND was found.\n", spinand->manufacturer->name);
997 dev_info(spinand->slave->dev,
998 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
999 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1000 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1005 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
1006 struct mtd_oob_region *region)
1011 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
1012 struct mtd_oob_region *region)
1017 /* Reserve 2 bytes for the BBM. */
1019 region->length = 62;
1024 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
1025 .ecc = spinand_noecc_ooblayout_ecc,
1026 .rfree = spinand_noecc_ooblayout_free,
1029 static int spinand_init(struct spinand_device *spinand)
1031 struct mtd_info *mtd = spinand_to_mtd(spinand);
1032 struct nand_device *nand = mtd_to_nanddev(mtd);
1036 * We need a scratch buffer because the spi_mem interface requires that
1037 * buf passed in spi_mem_op->data.buf be DMA-able.
1039 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1040 if (!spinand->scratchbuf)
1043 ret = spinand_detect(spinand);
1048 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1049 * may use this buffer for DMA access.
1050 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1052 spinand->databuf = kzalloc(nanddev_page_size(nand) +
1053 nanddev_per_page_oobsize(nand),
1055 if (!spinand->databuf) {
1060 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1062 ret = spinand_init_cfg_cache(spinand);
1066 ret = spinand_init_quad_enable(spinand);
1070 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1074 ret = spinand_manufacturer_init(spinand);
1077 "Failed to initialize the SPI NAND chip (err = %d)\n",
1082 /* After power up, all blocks are locked, so unlock them here. */
1083 for (i = 0; i < nand->memorg.ntargets; i++) {
1084 ret = spinand_select_target(spinand, i);
1088 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1093 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1095 goto err_manuf_cleanup;
1098 * Right now, we don't support ECC, so let the whole oob
1099 * area is available for user.
1101 mtd->_read_oob = spinand_mtd_read;
1102 mtd->_write_oob = spinand_mtd_write;
1103 mtd->_block_isbad = spinand_mtd_block_isbad;
1104 mtd->_block_markbad = spinand_mtd_block_markbad;
1105 mtd->_block_isreserved = spinand_mtd_block_isreserved;
1106 mtd->_erase = spinand_mtd_erase;
1108 if (spinand->eccinfo.ooblayout)
1109 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1111 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1113 ret = mtd_ooblayout_count_freebytes(mtd);
1115 goto err_cleanup_nanddev;
1117 mtd->oobavail = ret;
1121 err_cleanup_nanddev:
1122 nanddev_cleanup(nand);
1125 spinand_manufacturer_cleanup(spinand);
1128 kfree(spinand->databuf);
1129 kfree(spinand->scratchbuf);
1133 static void spinand_cleanup(struct spinand_device *spinand)
1135 struct nand_device *nand = spinand_to_nand(spinand);
1137 nanddev_cleanup(nand);
1138 spinand_manufacturer_cleanup(spinand);
1139 kfree(spinand->databuf);
1140 kfree(spinand->scratchbuf);
1143 static int spinand_probe(struct udevice *dev)
1145 struct spinand_device *spinand = dev_get_priv(dev);
1146 struct spi_slave *slave = dev_get_parent_priv(dev);
1147 struct mtd_info *mtd = dev_get_uclass_priv(dev);
1148 struct nand_device *nand = spinand_to_nand(spinand);
1152 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1157 spinand->spimem = mem;
1158 spi_mem_set_drvdata(mem, spinand);
1159 spinand_set_of_node(spinand, mem->spi->dev.of_node);
1160 mutex_init(&spinand->lock);
1162 mtd = spinand_to_mtd(spinand);
1163 mtd->dev.parent = &mem->spi->dev;
1168 mtd->name = malloc(20);
1171 sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
1172 spinand->slave = slave;
1173 spinand_set_of_node(spinand, dev->node.np);
1176 ret = spinand_init(spinand);
1181 ret = mtd_device_register(mtd, NULL, 0);
1183 ret = add_mtd_device(mtd);
1186 goto err_spinand_cleanup;
1190 err_spinand_cleanup:
1191 spinand_cleanup(spinand);
1197 static int spinand_remove(struct udevice *slave)
1199 struct spinand_device *spinand;
1200 struct mtd_info *mtd;
1203 spinand = spi_mem_get_drvdata(slave);
1204 mtd = spinand_to_mtd(spinand);
1207 ret = mtd_device_unregister(mtd);
1211 spinand_cleanup(spinand);
1216 static const struct spi_device_id spinand_ids[] = {
1217 { .name = "spi-nand" },
1222 static const struct of_device_id spinand_of_ids[] = {
1223 { .compatible = "spi-nand" },
1228 static struct spi_mem_driver spinand_drv = {
1230 .id_table = spinand_ids,
1233 .of_match_table = of_match_ptr(spinand_of_ids),
1236 .probe = spinand_probe,
1237 .remove = spinand_remove,
1239 module_spi_mem_driver(spinand_drv);
1241 MODULE_DESCRIPTION("SPI NAND framework");
1242 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1243 MODULE_LICENSE("GPL v2");
1244 #endif /* __UBOOT__ */
1246 static const struct udevice_id spinand_ids[] = {
1247 { .compatible = "spi-nand" },
1251 U_BOOT_DRIVER(spinand) = {
1254 .of_match = spinand_ids,
1255 .priv_auto_alloc_size = sizeof(struct spinand_device),
1256 .probe = spinand_probe,