1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/mtd/nand/pxa3xx_nand.c
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
13 #include <linux/errno.h>
15 #include <asm/arch/cpu.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/types.h>
20 #include "pxa3xx_nand.h"
22 DECLARE_GLOBAL_DATA_PTR;
24 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
25 #define CHIP_DELAY_TIMEOUT 200
26 #define NAND_STOP_DELAY 40
27 #define PAGE_CHUNK_SIZE (2048)
30 * Define a buffer size for the initial command that detects the flash device:
31 * STATUS, READID and PARAM.
32 * ONFI param page is 256 bytes, and there are three redundant copies
33 * to be read. JEDEC param page is 512 bytes, and there are also three
34 * redundant copies to be read.
35 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
37 #define INIT_BUFFER_SIZE 2048
39 /* registers and bit definitions */
40 #define NDCR (0x00) /* Control register */
41 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
42 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
43 #define NDSR (0x14) /* Status Register */
44 #define NDPCR (0x18) /* Page Count Register */
45 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
46 #define NDBDR1 (0x20) /* Bad Block Register 1 */
47 #define NDECCCTRL (0x28) /* ECC control */
48 #define NDDB (0x40) /* Data Buffer */
49 #define NDCB0 (0x48) /* Command Buffer0 */
50 #define NDCB1 (0x4C) /* Command Buffer1 */
51 #define NDCB2 (0x50) /* Command Buffer2 */
53 #define NDCR_SPARE_EN (0x1 << 31)
54 #define NDCR_ECC_EN (0x1 << 30)
55 #define NDCR_DMA_EN (0x1 << 29)
56 #define NDCR_ND_RUN (0x1 << 28)
57 #define NDCR_DWIDTH_C (0x1 << 27)
58 #define NDCR_DWIDTH_M (0x1 << 26)
59 #define NDCR_PAGE_SZ (0x1 << 24)
60 #define NDCR_NCSX (0x1 << 23)
61 #define NDCR_ND_MODE (0x3 << 21)
62 #define NDCR_NAND_MODE (0x0)
63 #define NDCR_CLR_PG_CNT (0x1 << 20)
64 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
65 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
66 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
68 #define NDCR_RA_START (0x1 << 15)
69 #define NDCR_PG_PER_BLK (0x1 << 14)
70 #define NDCR_ND_ARB_EN (0x1 << 12)
71 #define NDCR_INT_MASK (0xFFF)
73 #define NDSR_MASK (0xfff)
74 #define NDSR_ERR_CNT_OFF (16)
75 #define NDSR_ERR_CNT_MASK (0x1f)
76 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
77 #define NDSR_RDY (0x1 << 12)
78 #define NDSR_FLASH_RDY (0x1 << 11)
79 #define NDSR_CS0_PAGED (0x1 << 10)
80 #define NDSR_CS1_PAGED (0x1 << 9)
81 #define NDSR_CS0_CMDD (0x1 << 8)
82 #define NDSR_CS1_CMDD (0x1 << 7)
83 #define NDSR_CS0_BBD (0x1 << 6)
84 #define NDSR_CS1_BBD (0x1 << 5)
85 #define NDSR_UNCORERR (0x1 << 4)
86 #define NDSR_CORERR (0x1 << 3)
87 #define NDSR_WRDREQ (0x1 << 2)
88 #define NDSR_RDDREQ (0x1 << 1)
89 #define NDSR_WRCMDREQ (0x1)
91 #define NDCB0_LEN_OVRD (0x1 << 28)
92 #define NDCB0_ST_ROW_EN (0x1 << 26)
93 #define NDCB0_AUTO_RS (0x1 << 25)
94 #define NDCB0_CSEL (0x1 << 24)
95 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
96 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
97 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
98 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
99 #define NDCB0_NC (0x1 << 20)
100 #define NDCB0_DBC (0x1 << 19)
101 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
102 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
103 #define NDCB0_CMD2_MASK (0xff << 8)
104 #define NDCB0_CMD1_MASK (0xff)
105 #define NDCB0_ADDR_CYC_SHIFT (16)
107 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
108 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
109 #define EXT_CMD_TYPE_READ 4 /* Read */
110 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
111 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
112 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
113 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
115 /* macros for registers read/write */
116 #define nand_writel(info, off, val) \
117 writel((val), (info)->mmio_base + (off))
119 #define nand_readl(info, off) \
120 readl((info)->mmio_base + (off))
122 /* error code and state */
145 enum pxa3xx_nand_variant {
146 PXA3XX_NAND_VARIANT_PXA,
147 PXA3XX_NAND_VARIANT_ARMADA370,
150 struct pxa3xx_nand_host {
151 struct nand_chip chip;
152 struct mtd_info *mtd;
155 /* page size of attached chip */
159 /* calculated from pxa3xx_nand_flash data */
160 unsigned int col_addr_cycles;
161 unsigned int row_addr_cycles;
162 size_t read_id_bytes;
166 struct pxa3xx_nand_info {
167 struct nand_hw_control controller;
168 struct pxa3xx_nand_platform_data *pdata;
171 void __iomem *mmio_base;
172 unsigned long mmio_phys;
173 int cmd_complete, dev_ready;
175 unsigned int buf_start;
176 unsigned int buf_count;
177 unsigned int buf_size;
178 unsigned int data_buff_pos;
179 unsigned int oob_buff_pos;
181 unsigned char *data_buff;
182 unsigned char *oob_buff;
184 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
188 * This driver supports NFCv1 (as found in PXA SoC)
189 * and NFCv2 (as found in Armada 370/XP SoC).
191 enum pxa3xx_nand_variant variant;
194 int use_ecc; /* use HW ECC ? */
195 int ecc_bch; /* using BCH ECC? */
196 int use_spare; /* use spare ? */
199 unsigned int data_size; /* data to be read from FIFO */
200 unsigned int chunk_size; /* split commands chunk size */
201 unsigned int oob_size;
202 unsigned int spare_size;
203 unsigned int ecc_size;
204 unsigned int ecc_err_cnt;
205 unsigned int max_bitflips;
208 /* cached register value */
213 /* generated NDCBx register values */
220 static struct pxa3xx_nand_timing timing[] = {
221 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
222 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
223 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
224 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
227 static struct pxa3xx_nand_flash builtin_flash_types[] = {
228 { 0x46ec, 16, 16, &timing[1] },
229 { 0xdaec, 8, 8, &timing[1] },
230 { 0xd7ec, 8, 8, &timing[1] },
231 { 0xa12c, 8, 8, &timing[2] },
232 { 0xb12c, 16, 16, &timing[2] },
233 { 0xdc2c, 8, 8, &timing[2] },
234 { 0xcc2c, 16, 16, &timing[2] },
235 { 0xba20, 16, 16, &timing[3] },
238 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
239 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
240 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
242 static struct nand_bbt_descr bbt_main_descr = {
243 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
244 | NAND_BBT_2BIT | NAND_BBT_VERSION,
248 .maxblocks = 8, /* Last 8 blocks in each chip */
249 .pattern = bbt_pattern
252 static struct nand_bbt_descr bbt_mirror_descr = {
253 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
254 | NAND_BBT_2BIT | NAND_BBT_VERSION,
258 .maxblocks = 8, /* Last 8 blocks in each chip */
259 .pattern = bbt_mirror_pattern
263 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
266 32, 33, 34, 35, 36, 37, 38, 39,
267 40, 41, 42, 43, 44, 45, 46, 47,
268 48, 49, 50, 51, 52, 53, 54, 55,
269 56, 57, 58, 59, 60, 61, 62, 63},
270 .oobfree = { {2, 30} }
273 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
276 32, 33, 34, 35, 36, 37, 38, 39,
277 40, 41, 42, 43, 44, 45, 46, 47,
278 48, 49, 50, 51, 52, 53, 54, 55,
279 56, 57, 58, 59, 60, 61, 62, 63,
280 96, 97, 98, 99, 100, 101, 102, 103,
281 104, 105, 106, 107, 108, 109, 110, 111,
282 112, 113, 114, 115, 116, 117, 118, 119,
283 120, 121, 122, 123, 124, 125, 126, 127},
284 /* Bootrom looks in bytes 0 & 5 for bad blocks */
285 .oobfree = { {6, 26}, { 64, 32} }
288 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
291 32, 33, 34, 35, 36, 37, 38, 39,
292 40, 41, 42, 43, 44, 45, 46, 47,
293 48, 49, 50, 51, 52, 53, 54, 55,
294 56, 57, 58, 59, 60, 61, 62, 63},
298 #define NDTR0_tCH(c) (min((c), 7) << 19)
299 #define NDTR0_tCS(c) (min((c), 7) << 16)
300 #define NDTR0_tWH(c) (min((c), 7) << 11)
301 #define NDTR0_tWP(c) (min((c), 7) << 8)
302 #define NDTR0_tRH(c) (min((c), 7) << 3)
303 #define NDTR0_tRP(c) (min((c), 7) << 0)
305 #define NDTR1_tR(c) (min((c), 65535) << 16)
306 #define NDTR1_tWHR(c) (min((c), 15) << 4)
307 #define NDTR1_tAR(c) (min((c), 15) << 0)
309 /* convert nano-seconds to nand flash controller clock cycles */
310 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
312 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
314 /* We only support the Armada 370/XP/38x for now */
315 return PXA3XX_NAND_VARIANT_ARMADA370;
318 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
319 const struct pxa3xx_nand_timing *t)
321 struct pxa3xx_nand_info *info = host->info_data;
322 unsigned long nand_clk = mvebu_get_nand_clock();
323 uint32_t ndtr0, ndtr1;
325 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
326 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
327 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
328 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
329 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
330 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
332 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
333 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
334 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
336 info->ndtr0cs0 = ndtr0;
337 info->ndtr1cs0 = ndtr1;
338 nand_writel(info, NDTR0CS0, ndtr0);
339 nand_writel(info, NDTR1CS0, ndtr1);
342 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
343 const struct nand_sdr_timings *t)
345 struct pxa3xx_nand_info *info = host->info_data;
346 struct nand_chip *chip = &host->chip;
347 unsigned long nand_clk = mvebu_get_nand_clock();
348 uint32_t ndtr0, ndtr1;
350 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
351 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
352 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
353 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
354 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
355 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
356 u32 tR = chip->chip_delay * 1000;
357 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
358 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
360 /* fallback to a default value if tR = 0 */
364 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
365 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
366 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
367 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
368 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
369 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
371 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
372 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
373 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
375 info->ndtr0cs0 = ndtr0;
376 info->ndtr1cs0 = ndtr1;
377 nand_writel(info, NDTR0CS0, ndtr0);
378 nand_writel(info, NDTR1CS0, ndtr1);
381 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
383 const struct nand_sdr_timings *timings;
384 struct nand_chip *chip = &host->chip;
385 struct pxa3xx_nand_info *info = host->info_data;
386 const struct pxa3xx_nand_flash *f = NULL;
387 int mode, id, ntypes, i;
389 mode = onfi_get_async_timing_mode(chip);
390 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
391 ntypes = ARRAY_SIZE(builtin_flash_types);
393 chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
395 id = chip->read_byte(host->mtd);
396 id |= chip->read_byte(host->mtd) << 0x8;
398 for (i = 0; i < ntypes; i++) {
399 f = &builtin_flash_types[i];
401 if (f->chip_id == id)
406 dev_err(&info->pdev->dev, "Error: timings not found\n");
410 pxa3xx_nand_set_timing(host, f->timing);
412 if (f->flash_width == 16) {
413 info->reg_ndcr |= NDCR_DWIDTH_M;
414 chip->options |= NAND_BUSWIDTH_16;
417 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
419 mode = fls(mode) - 1;
423 timings = onfi_async_timing_mode_to_sdr_timings(mode);
425 return PTR_ERR(timings);
427 pxa3xx_nand_set_sdr_timing(host, timings);
434 * Set the data and OOB size, depending on the selected
435 * spare and ECC configuration.
436 * Only applicable to READ0, READOOB and PAGEPROG commands.
438 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
439 struct mtd_info *mtd)
441 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
443 info->data_size = mtd->writesize;
447 info->oob_size = info->spare_size;
449 info->oob_size += info->ecc_size;
453 * NOTE: it is a must to set ND_RUN first, then write
454 * command buffer, otherwise, it does not work.
455 * We enable all the interrupt at the same time, and
456 * let pxa3xx_nand_irq to handle all logic.
458 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
462 ndcr = info->reg_ndcr;
467 nand_writel(info, NDECCCTRL, 0x1);
469 ndcr &= ~NDCR_ECC_EN;
471 nand_writel(info, NDECCCTRL, 0x0);
474 ndcr &= ~NDCR_DMA_EN;
477 ndcr |= NDCR_SPARE_EN;
479 ndcr &= ~NDCR_SPARE_EN;
483 /* clear status bits and run */
484 nand_writel(info, NDCR, 0);
485 nand_writel(info, NDSR, NDSR_MASK);
486 nand_writel(info, NDCR, ndcr);
489 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
493 ndcr = nand_readl(info, NDCR);
494 nand_writel(info, NDCR, ndcr | int_mask);
497 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
503 * According to the datasheet, when reading from NDDB
504 * with BCH enabled, after each 32 bytes reads, we
505 * have to make sure that the NDSR.RDDREQ bit is set.
507 * Drain the FIFO 8 32 bits reads at a time, and skip
508 * the polling on the last read.
511 readsl(info->mmio_base + NDDB, data, 8);
514 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
515 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
516 dev_err(&info->pdev->dev,
517 "Timeout on RDDREQ while draining the FIFO\n");
527 readsl(info->mmio_base + NDDB, data, len);
530 static void handle_data_pio(struct pxa3xx_nand_info *info)
532 unsigned int do_bytes = min(info->data_size, info->chunk_size);
534 switch (info->state) {
535 case STATE_PIO_WRITING:
536 writesl(info->mmio_base + NDDB,
537 info->data_buff + info->data_buff_pos,
538 DIV_ROUND_UP(do_bytes, 4));
540 if (info->oob_size > 0)
541 writesl(info->mmio_base + NDDB,
542 info->oob_buff + info->oob_buff_pos,
543 DIV_ROUND_UP(info->oob_size, 4));
545 case STATE_PIO_READING:
547 info->data_buff + info->data_buff_pos,
548 DIV_ROUND_UP(do_bytes, 4));
550 if (info->oob_size > 0)
552 info->oob_buff + info->oob_buff_pos,
553 DIV_ROUND_UP(info->oob_size, 4));
556 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
561 /* Update buffer pointers for multi-page read/write */
562 info->data_buff_pos += do_bytes;
563 info->oob_buff_pos += info->oob_size;
564 info->data_size -= do_bytes;
567 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
569 handle_data_pio(info);
571 info->state = STATE_CMD_DONE;
572 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
575 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
577 unsigned int status, is_completed = 0, is_ready = 0;
578 unsigned int ready, cmd_done;
579 irqreturn_t ret = IRQ_HANDLED;
582 ready = NDSR_FLASH_RDY;
583 cmd_done = NDSR_CS0_CMDD;
586 cmd_done = NDSR_CS1_CMDD;
589 status = nand_readl(info, NDSR);
591 if (status & NDSR_UNCORERR)
592 info->retcode = ERR_UNCORERR;
593 if (status & NDSR_CORERR) {
594 info->retcode = ERR_CORERR;
595 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
597 info->ecc_err_cnt = NDSR_ERR_CNT(status);
599 info->ecc_err_cnt = 1;
602 * Each chunk composing a page is corrected independently,
603 * and we need to store maximum number of corrected bitflips
604 * to return it to the MTD layer in ecc.read_page().
606 info->max_bitflips = max_t(unsigned int,
610 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
611 info->state = (status & NDSR_RDDREQ) ?
612 STATE_PIO_READING : STATE_PIO_WRITING;
613 /* Call the IRQ thread in U-Boot directly */
614 pxa3xx_nand_irq_thread(info);
617 if (status & cmd_done) {
618 info->state = STATE_CMD_DONE;
621 if (status & ready) {
622 info->state = STATE_READY;
626 if (status & NDSR_WRCMDREQ) {
627 nand_writel(info, NDSR, NDSR_WRCMDREQ);
628 status &= ~NDSR_WRCMDREQ;
629 info->state = STATE_CMD_HANDLE;
632 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
633 * must be loaded by writing directly either 12 or 16
634 * bytes directly to NDCB0, four bytes at a time.
636 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
637 * but each NDCBx register can be read.
639 nand_writel(info, NDCB0, info->ndcb0);
640 nand_writel(info, NDCB0, info->ndcb1);
641 nand_writel(info, NDCB0, info->ndcb2);
643 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
644 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
645 nand_writel(info, NDCB0, info->ndcb3);
648 /* clear NDSR to let the controller exit the IRQ */
649 nand_writel(info, NDSR, status);
651 info->cmd_complete = 1;
658 static inline int is_buf_blank(uint8_t *buf, size_t len)
660 for (; len > 0; len--)
666 static void set_command_address(struct pxa3xx_nand_info *info,
667 unsigned int page_size, uint16_t column, int page_addr)
669 /* small page addr setting */
670 if (page_size < PAGE_CHUNK_SIZE) {
671 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
676 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
679 if (page_addr & 0xFF0000)
680 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
686 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
688 struct pxa3xx_nand_host *host = info->host[info->cs];
689 struct mtd_info *mtd = host->mtd;
691 /* reset data and oob column point to handle data */
695 info->data_buff_pos = 0;
696 info->oob_buff_pos = 0;
699 info->retcode = ERR_NONE;
700 info->ecc_err_cnt = 0;
706 case NAND_CMD_PAGEPROG:
708 case NAND_CMD_READOOB:
709 pxa3xx_set_datasize(info, mtd);
721 * If we are about to issue a read command, or about to set
722 * the write address, then clean the data buffer.
724 if (command == NAND_CMD_READ0 ||
725 command == NAND_CMD_READOOB ||
726 command == NAND_CMD_SEQIN) {
727 info->buf_count = mtd->writesize + mtd->oobsize;
728 memset(info->data_buff, 0xFF, info->buf_count);
732 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
733 int ext_cmd_type, uint16_t column, int page_addr)
735 int addr_cycle, exec_cmd;
736 struct pxa3xx_nand_host *host;
737 struct mtd_info *mtd;
739 host = info->host[info->cs];
745 info->ndcb0 = NDCB0_CSEL;
749 if (command == NAND_CMD_SEQIN)
752 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
753 + host->col_addr_cycles);
756 case NAND_CMD_READOOB:
758 info->buf_start = column;
759 info->ndcb0 |= NDCB0_CMD_TYPE(0)
763 if (command == NAND_CMD_READOOB)
764 info->buf_start += mtd->writesize;
767 * Multiple page read needs an 'extended command type' field,
768 * which is either naked-read or last-read according to the
771 if (mtd->writesize == PAGE_CHUNK_SIZE) {
772 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
773 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
774 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
776 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
777 info->ndcb3 = info->chunk_size +
781 set_command_address(info, mtd->writesize, column, page_addr);
786 info->buf_start = column;
787 set_command_address(info, mtd->writesize, 0, page_addr);
790 * Multiple page programming needs to execute the initial
791 * SEQIN command that sets the page address.
793 if (mtd->writesize > PAGE_CHUNK_SIZE) {
794 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
795 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
798 /* No data transfer in this case */
804 case NAND_CMD_PAGEPROG:
805 if (is_buf_blank(info->data_buff,
806 (mtd->writesize + mtd->oobsize))) {
811 /* Second command setting for large pages */
812 if (mtd->writesize > PAGE_CHUNK_SIZE) {
814 * Multiple page write uses the 'extended command'
815 * field. This can be used to issue a command dispatch
816 * or a naked-write depending on the current stage.
818 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
820 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
821 info->ndcb3 = info->chunk_size +
825 * This is the command dispatch that completes a chunked
826 * page program operation.
828 if (info->data_size == 0) {
829 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
830 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
837 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
841 | (NAND_CMD_PAGEPROG << 8)
848 info->buf_count = INIT_BUFFER_SIZE;
849 info->ndcb0 |= NDCB0_CMD_TYPE(0)
853 info->ndcb1 = (column & 0xFF);
854 info->ndcb3 = INIT_BUFFER_SIZE;
855 info->data_size = INIT_BUFFER_SIZE;
858 case NAND_CMD_READID:
859 info->buf_count = host->read_id_bytes;
860 info->ndcb0 |= NDCB0_CMD_TYPE(3)
863 info->ndcb1 = (column & 0xFF);
867 case NAND_CMD_STATUS:
869 info->ndcb0 |= NDCB0_CMD_TYPE(4)
876 case NAND_CMD_ERASE1:
877 info->ndcb0 |= NDCB0_CMD_TYPE(2)
881 | (NAND_CMD_ERASE2 << 8)
883 info->ndcb1 = page_addr;
888 info->ndcb0 |= NDCB0_CMD_TYPE(5)
893 case NAND_CMD_ERASE2:
899 dev_err(&info->pdev->dev, "non-supported command %x\n",
907 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
908 int column, int page_addr)
910 struct nand_chip *chip = mtd_to_nand(mtd);
911 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
912 struct pxa3xx_nand_info *info = host->info_data;
916 * if this is a x16 device ,then convert the input
917 * "byte" address into a "word" address appropriate
918 * for indexing a word-oriented device
920 if (info->reg_ndcr & NDCR_DWIDTH_M)
924 * There may be different NAND chip hooked to
925 * different chip select, so check whether
926 * chip select has been changed, if yes, reset the timing
928 if (info->cs != host->cs) {
930 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
931 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
934 prepare_start_command(info, command);
936 info->state = STATE_PREPARED;
937 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
942 info->cmd_complete = 0;
945 pxa3xx_nand_start(info);
951 status = nand_readl(info, NDSR);
953 pxa3xx_nand_irq(info);
955 if (info->cmd_complete)
958 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
959 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
964 info->state = STATE_IDLE;
967 static void nand_cmdfunc_extended(struct mtd_info *mtd,
968 const unsigned command,
969 int column, int page_addr)
971 struct nand_chip *chip = mtd_to_nand(mtd);
972 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
973 struct pxa3xx_nand_info *info = host->info_data;
974 int exec_cmd, ext_cmd_type;
977 * if this is a x16 device then convert the input
978 * "byte" address into a "word" address appropriate
979 * for indexing a word-oriented device
981 if (info->reg_ndcr & NDCR_DWIDTH_M)
985 * There may be different NAND chip hooked to
986 * different chip select, so check whether
987 * chip select has been changed, if yes, reset the timing
989 if (info->cs != host->cs) {
991 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
992 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
995 /* Select the extended command for the first command */
998 case NAND_CMD_READOOB:
999 ext_cmd_type = EXT_CMD_TYPE_MONO;
1001 case NAND_CMD_SEQIN:
1002 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1004 case NAND_CMD_PAGEPROG:
1005 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1012 prepare_start_command(info, command);
1015 * Prepare the "is ready" completion before starting a command
1016 * transaction sequence. If the command is not executed the
1017 * completion will be completed, see below.
1019 * We can do that inside the loop because the command variable
1020 * is invariant and thus so is the exec_cmd.
1022 info->need_wait = 1;
1023 info->dev_ready = 0;
1028 info->state = STATE_PREPARED;
1029 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1032 info->need_wait = 0;
1033 info->dev_ready = 1;
1037 info->cmd_complete = 0;
1038 pxa3xx_nand_start(info);
1044 status = nand_readl(info, NDSR);
1046 pxa3xx_nand_irq(info);
1048 if (info->cmd_complete)
1051 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1052 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1057 /* Check if the sequence is complete */
1058 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1062 * After a splitted program command sequence has issued
1063 * the command dispatch, the command sequence is complete.
1065 if (info->data_size == 0 &&
1066 command == NAND_CMD_PAGEPROG &&
1067 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1070 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1071 /* Last read: issue a 'last naked read' */
1072 if (info->data_size == info->chunk_size)
1073 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1075 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1078 * If a splitted program command has no more data to transfer,
1079 * the command dispatch must be issued to complete.
1081 } else if (command == NAND_CMD_PAGEPROG &&
1082 info->data_size == 0) {
1083 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1087 info->state = STATE_IDLE;
1090 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1091 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1094 chip->write_buf(mtd, buf, mtd->writesize);
1095 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1100 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1101 struct nand_chip *chip, uint8_t *buf, int oob_required,
1104 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1105 struct pxa3xx_nand_info *info = host->info_data;
1107 chip->read_buf(mtd, buf, mtd->writesize);
1108 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1110 if (info->retcode == ERR_CORERR && info->use_ecc) {
1111 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1113 } else if (info->retcode == ERR_UNCORERR) {
1115 * for blank page (all 0xff), HW will calculate its ECC as
1116 * 0, which is different from the ECC information within
1117 * OOB, ignore such uncorrectable errors
1119 if (is_buf_blank(buf, mtd->writesize))
1120 info->retcode = ERR_NONE;
1122 mtd->ecc_stats.failed++;
1125 return info->max_bitflips;
1128 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1130 struct nand_chip *chip = mtd_to_nand(mtd);
1131 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1132 struct pxa3xx_nand_info *info = host->info_data;
1135 if (info->buf_start < info->buf_count)
1136 /* Has just send a new command? */
1137 retval = info->data_buff[info->buf_start++];
1142 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1144 struct nand_chip *chip = mtd_to_nand(mtd);
1145 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1146 struct pxa3xx_nand_info *info = host->info_data;
1147 u16 retval = 0xFFFF;
1149 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1150 retval = *((u16 *)(info->data_buff+info->buf_start));
1151 info->buf_start += 2;
1156 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1158 struct nand_chip *chip = mtd_to_nand(mtd);
1159 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1160 struct pxa3xx_nand_info *info = host->info_data;
1161 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1163 memcpy(buf, info->data_buff + info->buf_start, real_len);
1164 info->buf_start += real_len;
1167 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1168 const uint8_t *buf, int len)
1170 struct nand_chip *chip = mtd_to_nand(mtd);
1171 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1172 struct pxa3xx_nand_info *info = host->info_data;
1173 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1175 memcpy(info->data_buff + info->buf_start, buf, real_len);
1176 info->buf_start += real_len;
1179 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1184 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1186 struct nand_chip *chip = mtd_to_nand(mtd);
1187 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1188 struct pxa3xx_nand_info *info = host->info_data;
1190 if (info->need_wait) {
1193 info->need_wait = 0;
1199 status = nand_readl(info, NDSR);
1201 pxa3xx_nand_irq(info);
1203 if (info->dev_ready)
1206 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1207 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1208 return NAND_STATUS_FAIL;
1213 /* pxa3xx_nand_send_command has waited for command complete */
1214 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1215 if (info->retcode == ERR_NONE)
1218 return NAND_STATUS_FAIL;
1221 return NAND_STATUS_READY;
1224 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1226 struct pxa3xx_nand_host *host = info->host[info->cs];
1227 struct mtd_info *mtd = host->mtd;
1228 struct nand_chip *chip = mtd_to_nand(mtd);
1230 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1231 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1232 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1237 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1240 * We set 0 by hard coding here, for we don't support keep_config
1241 * when there is more than one chip attached to the controller
1243 struct pxa3xx_nand_host *host = info->host[0];
1244 uint32_t ndcr = nand_readl(info, NDCR);
1246 if (ndcr & NDCR_PAGE_SZ) {
1247 /* Controller's FIFO size */
1248 info->chunk_size = 2048;
1249 host->read_id_bytes = 4;
1251 info->chunk_size = 512;
1252 host->read_id_bytes = 2;
1255 /* Set an initial chunk size */
1256 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1257 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1258 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1262 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1264 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1265 if (info->data_buff == NULL)
1270 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1272 struct pxa3xx_nand_info *info = host->info_data;
1273 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1274 struct mtd_info *mtd;
1275 struct nand_chip *chip;
1276 const struct nand_sdr_timings *timings;
1279 mtd = info->host[info->cs]->mtd;
1280 chip = mtd_to_nand(mtd);
1282 /* configure default flash values */
1283 info->reg_ndcr = 0x0; /* enable all interrupts */
1284 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1285 info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1286 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1288 /* use the common timing to make a try */
1289 timings = onfi_async_timing_mode_to_sdr_timings(0);
1290 if (IS_ERR(timings))
1291 return PTR_ERR(timings);
1293 pxa3xx_nand_set_sdr_timing(host, timings);
1295 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1296 ret = chip->waitfunc(mtd, chip);
1297 if (ret & NAND_STATUS_FAIL)
1303 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1304 struct nand_ecc_ctrl *ecc,
1305 int strength, int ecc_stepsize, int page_size)
1307 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1308 info->chunk_size = 2048;
1309 info->spare_size = 40;
1310 info->ecc_size = 24;
1311 ecc->mode = NAND_ECC_HW;
1315 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1316 info->chunk_size = 512;
1317 info->spare_size = 8;
1319 ecc->mode = NAND_ECC_HW;
1324 * Required ECC: 4-bit correction per 512 bytes
1325 * Select: 16-bit correction per 2048 bytes
1327 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1329 info->chunk_size = 2048;
1330 info->spare_size = 32;
1331 info->ecc_size = 32;
1332 ecc->mode = NAND_ECC_HW;
1333 ecc->size = info->chunk_size;
1334 ecc->layout = &ecc_layout_2KB_bch4bit;
1337 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1339 info->chunk_size = 2048;
1340 info->spare_size = 32;
1341 info->ecc_size = 32;
1342 ecc->mode = NAND_ECC_HW;
1343 ecc->size = info->chunk_size;
1344 ecc->layout = &ecc_layout_4KB_bch4bit;
1348 * Required ECC: 8-bit correction per 512 bytes
1349 * Select: 16-bit correction per 1024 bytes
1351 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1353 info->chunk_size = 1024;
1354 info->spare_size = 0;
1355 info->ecc_size = 32;
1356 ecc->mode = NAND_ECC_HW;
1357 ecc->size = info->chunk_size;
1358 ecc->layout = &ecc_layout_4KB_bch8bit;
1361 dev_err(&info->pdev->dev,
1362 "ECC strength %d at page size %d is not supported\n",
1363 strength, page_size);
1370 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1372 struct nand_chip *chip = mtd_to_nand(mtd);
1373 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1374 struct pxa3xx_nand_info *info = host->info_data;
1375 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1377 uint16_t ecc_strength, ecc_step;
1379 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1382 /* Set a default chunk size */
1383 info->chunk_size = 512;
1385 ret = pxa3xx_nand_sensing(host);
1387 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1394 /* Device detection must be done with ECC disabled */
1395 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1396 nand_writel(info, NDECCCTRL, 0x0);
1398 if (nand_scan_ident(mtd, 1, NULL))
1401 if (!pdata->keep_config) {
1402 ret = pxa3xx_nand_init_timings(host);
1404 dev_err(&info->pdev->dev,
1405 "Failed to set timings: %d\n", ret);
1410 ret = pxa3xx_nand_config_flash(info);
1414 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1416 * We'll use a bad block table stored in-flash and don't
1417 * allow writing the bad block marker to the flash.
1419 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1420 chip->bbt_td = &bbt_main_descr;
1421 chip->bbt_md = &bbt_mirror_descr;
1425 * If the page size is bigger than the FIFO size, let's check
1426 * we are given the right variant and then switch to the extended
1427 * (aka splitted) command handling,
1429 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1430 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1431 chip->cmdfunc = nand_cmdfunc_extended;
1433 dev_err(&info->pdev->dev,
1434 "unsupported page size on this variant\n");
1439 if (pdata->ecc_strength && pdata->ecc_step_size) {
1440 ecc_strength = pdata->ecc_strength;
1441 ecc_step = pdata->ecc_step_size;
1443 ecc_strength = chip->ecc_strength_ds;
1444 ecc_step = chip->ecc_step_ds;
1447 /* Set default ECC strength requirements on non-ONFI devices */
1448 if (ecc_strength < 1 && ecc_step < 1) {
1453 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1454 ecc_step, mtd->writesize);
1458 /* calculate addressing information */
1459 if (mtd->writesize >= 2048)
1460 host->col_addr_cycles = 2;
1462 host->col_addr_cycles = 1;
1464 /* release the initial buffer */
1465 kfree(info->data_buff);
1467 /* allocate the real data + oob buffer */
1468 info->buf_size = mtd->writesize + mtd->oobsize;
1469 ret = pxa3xx_nand_init_buff(info);
1472 info->oob_buff = info->data_buff + mtd->writesize;
1474 if ((mtd->size >> chip->page_shift) > 65536)
1475 host->row_addr_cycles = 3;
1477 host->row_addr_cycles = 2;
1478 return nand_scan_tail(mtd);
1481 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1483 struct pxa3xx_nand_platform_data *pdata;
1484 struct pxa3xx_nand_host *host;
1485 struct nand_chip *chip = NULL;
1486 struct mtd_info *mtd;
1489 pdata = info->pdata;
1490 if (pdata->num_cs <= 0)
1493 info->variant = pxa3xx_nand_get_variant();
1494 for (cs = 0; cs < pdata->num_cs; cs++) {
1495 chip = (struct nand_chip *)
1496 ((u8 *)&info[1] + sizeof(*host) * cs);
1497 mtd = nand_to_mtd(chip);
1498 host = (struct pxa3xx_nand_host *)chip;
1499 info->host[cs] = host;
1502 host->info_data = info;
1503 host->read_id_bytes = 4;
1504 mtd->owner = THIS_MODULE;
1506 nand_set_controller_data(chip, host);
1507 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1508 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1509 chip->controller = &info->controller;
1510 chip->waitfunc = pxa3xx_nand_waitfunc;
1511 chip->select_chip = pxa3xx_nand_select_chip;
1512 chip->read_word = pxa3xx_nand_read_word;
1513 chip->read_byte = pxa3xx_nand_read_byte;
1514 chip->read_buf = pxa3xx_nand_read_buf;
1515 chip->write_buf = pxa3xx_nand_write_buf;
1516 chip->options |= NAND_NO_SUBPAGE_WRITE;
1517 chip->cmdfunc = nand_cmdfunc;
1520 /* Allocate a buffer to allow flash detection */
1521 info->buf_size = INIT_BUFFER_SIZE;
1522 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1523 if (info->data_buff == NULL) {
1525 goto fail_disable_clk;
1528 /* initialize all interrupts to be disabled */
1529 disable_int(info, NDSR_MASK);
1533 kfree(info->data_buff);
1538 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1540 struct pxa3xx_nand_platform_data *pdata;
1541 const void *blob = gd->fdt_blob;
1544 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1548 /* Get address decoding nodes from the FDT blob */
1550 node = fdt_node_offset_by_compatible(blob, node,
1551 "marvell,mvebu-pxa3xx-nand");
1555 /* Bypass disabeld nodes */
1556 if (!fdtdec_get_is_enabled(blob, node))
1559 /* Get the first enabled NAND controler base address */
1561 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1562 blob, node, "reg", 0, NULL, true);
1564 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1565 if (pdata->num_cs != 1) {
1566 pr_err("pxa3xx driver supports single CS only\n");
1570 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1571 pdata->enable_arbiter = 1;
1573 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1574 pdata->keep_config = 1;
1578 * If these are not set, they will be selected according
1579 * to the detected flash type.
1582 pdata->ecc_strength = fdtdec_get_int(blob, node,
1583 "nand-ecc-strength", 0);
1586 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1587 "nand-ecc-step-size", 0);
1589 info->pdata = pdata;
1591 /* Currently support only a single NAND controller */
1594 } while (node >= 0);
1599 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1601 struct pxa3xx_nand_platform_data *pdata;
1602 int ret, cs, probe_success;
1604 ret = pxa3xx_nand_probe_dt(info);
1608 pdata = info->pdata;
1610 ret = alloc_nand_resource(info);
1612 dev_err(&pdev->dev, "alloc nand resource failed\n");
1617 for (cs = 0; cs < pdata->num_cs; cs++) {
1618 struct mtd_info *mtd = info->host[cs]->mtd;
1621 * The mtd name matches the one used in 'mtdparts' kernel
1622 * parameter. This name cannot be changed or otherwise
1623 * user's mtd partitions configuration would get broken.
1625 mtd->name = "pxa3xx_nand-0";
1627 ret = pxa3xx_nand_scan(mtd);
1629 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1634 if (nand_register(cs, mtd))
1647 * Main initialization routine
1649 void board_nand_init(void)
1651 struct pxa3xx_nand_info *info;
1652 struct pxa3xx_nand_host *host;
1655 info = kzalloc(sizeof(*info) +
1656 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1661 ret = pxa3xx_nand_probe(info);