1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/mtd/nand/pxa3xx_nand.c
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
13 #include <linux/errno.h>
15 #include <asm/arch/cpu.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/types.h>
20 #include "pxa3xx_nand.h"
22 DECLARE_GLOBAL_DATA_PTR;
24 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
25 #define CHIP_DELAY_TIMEOUT 200
26 #define NAND_STOP_DELAY 40
27 #define PAGE_CHUNK_SIZE (2048)
30 * Define a buffer size for the initial command that detects the flash device:
31 * STATUS, READID and PARAM.
32 * ONFI param page is 256 bytes, and there are three redundant copies
33 * to be read. JEDEC param page is 512 bytes, and there are also three
34 * redundant copies to be read.
35 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
37 #define INIT_BUFFER_SIZE 2048
39 /* registers and bit definitions */
40 #define NDCR (0x00) /* Control register */
41 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
42 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
43 #define NDSR (0x14) /* Status Register */
44 #define NDPCR (0x18) /* Page Count Register */
45 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
46 #define NDBDR1 (0x20) /* Bad Block Register 1 */
47 #define NDECCCTRL (0x28) /* ECC control */
48 #define NDDB (0x40) /* Data Buffer */
49 #define NDCB0 (0x48) /* Command Buffer0 */
50 #define NDCB1 (0x4C) /* Command Buffer1 */
51 #define NDCB2 (0x50) /* Command Buffer2 */
53 #define NDCR_SPARE_EN (0x1 << 31)
54 #define NDCR_ECC_EN (0x1 << 30)
55 #define NDCR_DMA_EN (0x1 << 29)
56 #define NDCR_ND_RUN (0x1 << 28)
57 #define NDCR_DWIDTH_C (0x1 << 27)
58 #define NDCR_DWIDTH_M (0x1 << 26)
59 #define NDCR_PAGE_SZ (0x1 << 24)
60 #define NDCR_NCSX (0x1 << 23)
61 #define NDCR_ND_MODE (0x3 << 21)
62 #define NDCR_NAND_MODE (0x0)
63 #define NDCR_CLR_PG_CNT (0x1 << 20)
64 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
65 #define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
66 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
67 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
69 #define NDCR_RA_START (0x1 << 15)
70 #define NDCR_PG_PER_BLK (0x1 << 14)
71 #define NDCR_ND_ARB_EN (0x1 << 12)
72 #define NDCR_INT_MASK (0xFFF)
74 #define NDSR_MASK (0xfff)
75 #define NDSR_ERR_CNT_OFF (16)
76 #define NDSR_ERR_CNT_MASK (0x1f)
77 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
78 #define NDSR_RDY (0x1 << 12)
79 #define NDSR_FLASH_RDY (0x1 << 11)
80 #define NDSR_CS0_PAGED (0x1 << 10)
81 #define NDSR_CS1_PAGED (0x1 << 9)
82 #define NDSR_CS0_CMDD (0x1 << 8)
83 #define NDSR_CS1_CMDD (0x1 << 7)
84 #define NDSR_CS0_BBD (0x1 << 6)
85 #define NDSR_CS1_BBD (0x1 << 5)
86 #define NDSR_UNCORERR (0x1 << 4)
87 #define NDSR_CORERR (0x1 << 3)
88 #define NDSR_WRDREQ (0x1 << 2)
89 #define NDSR_RDDREQ (0x1 << 1)
90 #define NDSR_WRCMDREQ (0x1)
92 #define NDCB0_LEN_OVRD (0x1 << 28)
93 #define NDCB0_ST_ROW_EN (0x1 << 26)
94 #define NDCB0_AUTO_RS (0x1 << 25)
95 #define NDCB0_CSEL (0x1 << 24)
96 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
97 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
98 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
99 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
100 #define NDCB0_NC (0x1 << 20)
101 #define NDCB0_DBC (0x1 << 19)
102 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
103 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
104 #define NDCB0_CMD2_MASK (0xff << 8)
105 #define NDCB0_CMD1_MASK (0xff)
106 #define NDCB0_ADDR_CYC_SHIFT (16)
108 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
109 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
110 #define EXT_CMD_TYPE_READ 4 /* Read */
111 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
112 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
113 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
114 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
117 * This should be large enough to read 'ONFI' and 'JEDEC'.
118 * Let's use 7 bytes, which is the maximum ID count supported
119 * by the controller (see NDCR_RD_ID_CNT_MASK).
121 #define READ_ID_BYTES 7
123 /* macros for registers read/write */
124 #define nand_writel(info, off, val) \
125 writel((val), (info)->mmio_base + (off))
127 #define nand_readl(info, off) \
128 readl((info)->mmio_base + (off))
130 /* error code and state */
153 enum pxa3xx_nand_variant {
154 PXA3XX_NAND_VARIANT_PXA,
155 PXA3XX_NAND_VARIANT_ARMADA370,
158 struct pxa3xx_nand_host {
159 struct nand_chip chip;
162 /* page size of attached chip */
166 /* calculated from pxa3xx_nand_flash data */
167 unsigned int col_addr_cycles;
168 unsigned int row_addr_cycles;
171 struct pxa3xx_nand_info {
172 struct nand_hw_control controller;
173 struct pxa3xx_nand_platform_data *pdata;
176 void __iomem *mmio_base;
177 unsigned long mmio_phys;
178 int cmd_complete, dev_ready;
180 unsigned int buf_start;
181 unsigned int buf_count;
182 unsigned int buf_size;
183 unsigned int data_buff_pos;
184 unsigned int oob_buff_pos;
186 unsigned char *data_buff;
187 unsigned char *oob_buff;
189 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
193 * This driver supports NFCv1 (as found in PXA SoC)
194 * and NFCv2 (as found in Armada 370/XP SoC).
196 enum pxa3xx_nand_variant variant;
199 int use_ecc; /* use HW ECC ? */
200 int ecc_bch; /* using BCH ECC? */
201 int use_spare; /* use spare ? */
204 /* Amount of real data per full chunk */
205 unsigned int chunk_size;
207 /* Amount of spare data per full chunk */
208 unsigned int spare_size;
210 /* Number of full chunks (i.e chunk_size + spare_size) */
211 unsigned int nfullchunks;
214 * Total number of chunks. If equal to nfullchunks, then there
215 * are only full chunks. Otherwise, there is one last chunk of
216 * size (last_chunk_size + last_spare_size)
218 unsigned int ntotalchunks;
220 /* Amount of real data in the last chunk */
221 unsigned int last_chunk_size;
223 /* Amount of spare data in the last chunk */
224 unsigned int last_spare_size;
226 unsigned int ecc_size;
227 unsigned int ecc_err_cnt;
228 unsigned int max_bitflips;
232 * Variables only valid during command
233 * execution. step_chunk_size and step_spare_size is the
234 * amount of real data and spare data in the current
235 * chunk. cur_chunk is the current chunk being
238 unsigned int step_chunk_size;
239 unsigned int step_spare_size;
240 unsigned int cur_chunk;
242 /* cached register value */
247 /* generated NDCBx register values */
254 static struct pxa3xx_nand_timing timing[] = {
255 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
256 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
257 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
258 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
261 static struct pxa3xx_nand_flash builtin_flash_types[] = {
262 { 0x46ec, 16, 16, &timing[1] },
263 { 0xdaec, 8, 8, &timing[1] },
264 { 0xd7ec, 8, 8, &timing[1] },
265 { 0xa12c, 8, 8, &timing[2] },
266 { 0xb12c, 16, 16, &timing[2] },
267 { 0xdc2c, 8, 8, &timing[2] },
268 { 0xcc2c, 16, 16, &timing[2] },
269 { 0xba20, 16, 16, &timing[3] },
272 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
273 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
274 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
276 static struct nand_bbt_descr bbt_main_descr = {
277 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
278 | NAND_BBT_2BIT | NAND_BBT_VERSION,
282 .maxblocks = 8, /* Last 8 blocks in each chip */
283 .pattern = bbt_pattern
286 static struct nand_bbt_descr bbt_mirror_descr = {
287 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
288 | NAND_BBT_2BIT | NAND_BBT_VERSION,
292 .maxblocks = 8, /* Last 8 blocks in each chip */
293 .pattern = bbt_mirror_pattern
297 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
300 32, 33, 34, 35, 36, 37, 38, 39,
301 40, 41, 42, 43, 44, 45, 46, 47,
302 48, 49, 50, 51, 52, 53, 54, 55,
303 56, 57, 58, 59, 60, 61, 62, 63},
304 .oobfree = { {2, 30} }
307 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
310 32, 33, 34, 35, 36, 37, 38, 39,
311 40, 41, 42, 43, 44, 45, 46, 47,
312 48, 49, 50, 51, 52, 53, 54, 55,
313 56, 57, 58, 59, 60, 61, 62, 63,
314 96, 97, 98, 99, 100, 101, 102, 103,
315 104, 105, 106, 107, 108, 109, 110, 111,
316 112, 113, 114, 115, 116, 117, 118, 119,
317 120, 121, 122, 123, 124, 125, 126, 127},
318 /* Bootrom looks in bytes 0 & 5 for bad blocks */
319 .oobfree = { {6, 26}, { 64, 32} }
322 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
325 32, 33, 34, 35, 36, 37, 38, 39,
326 40, 41, 42, 43, 44, 45, 46, 47,
327 48, 49, 50, 51, 52, 53, 54, 55,
328 56, 57, 58, 59, 60, 61, 62, 63},
332 #define NDTR0_tCH(c) (min((c), 7) << 19)
333 #define NDTR0_tCS(c) (min((c), 7) << 16)
334 #define NDTR0_tWH(c) (min((c), 7) << 11)
335 #define NDTR0_tWP(c) (min((c), 7) << 8)
336 #define NDTR0_tRH(c) (min((c), 7) << 3)
337 #define NDTR0_tRP(c) (min((c), 7) << 0)
339 #define NDTR1_tR(c) (min((c), 65535) << 16)
340 #define NDTR1_tWHR(c) (min((c), 15) << 4)
341 #define NDTR1_tAR(c) (min((c), 15) << 0)
343 /* convert nano-seconds to nand flash controller clock cycles */
344 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
346 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
348 /* We only support the Armada 370/XP/38x for now */
349 return PXA3XX_NAND_VARIANT_ARMADA370;
352 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
353 const struct pxa3xx_nand_timing *t)
355 struct pxa3xx_nand_info *info = host->info_data;
356 unsigned long nand_clk = mvebu_get_nand_clock();
357 uint32_t ndtr0, ndtr1;
359 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
360 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
361 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
362 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
363 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
364 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
366 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
367 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
368 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
370 info->ndtr0cs0 = ndtr0;
371 info->ndtr1cs0 = ndtr1;
372 nand_writel(info, NDTR0CS0, ndtr0);
373 nand_writel(info, NDTR1CS0, ndtr1);
376 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
377 const struct nand_sdr_timings *t)
379 struct pxa3xx_nand_info *info = host->info_data;
380 struct nand_chip *chip = &host->chip;
381 unsigned long nand_clk = mvebu_get_nand_clock();
382 uint32_t ndtr0, ndtr1;
384 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
385 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
386 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
387 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
388 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
389 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
390 u32 tR = chip->chip_delay * 1000;
391 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
392 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
394 /* fallback to a default value if tR = 0 */
398 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
399 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
400 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
401 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
402 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
403 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
405 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
406 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
407 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
409 info->ndtr0cs0 = ndtr0;
410 info->ndtr1cs0 = ndtr1;
411 nand_writel(info, NDTR0CS0, ndtr0);
412 nand_writel(info, NDTR1CS0, ndtr1);
415 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
417 const struct nand_sdr_timings *timings;
418 struct nand_chip *chip = &host->chip;
419 struct pxa3xx_nand_info *info = host->info_data;
420 const struct pxa3xx_nand_flash *f = NULL;
421 struct mtd_info *mtd = nand_to_mtd(&host->chip);
422 int mode, id, ntypes, i;
424 mode = onfi_get_async_timing_mode(chip);
425 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
426 ntypes = ARRAY_SIZE(builtin_flash_types);
428 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
430 id = chip->read_byte(mtd);
431 id |= chip->read_byte(mtd) << 0x8;
433 for (i = 0; i < ntypes; i++) {
434 f = &builtin_flash_types[i];
436 if (f->chip_id == id)
441 dev_err(&info->pdev->dev, "Error: timings not found\n");
445 pxa3xx_nand_set_timing(host, f->timing);
447 if (f->flash_width == 16) {
448 info->reg_ndcr |= NDCR_DWIDTH_M;
449 chip->options |= NAND_BUSWIDTH_16;
452 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
454 mode = fls(mode) - 1;
458 timings = onfi_async_timing_mode_to_sdr_timings(mode);
460 return PTR_ERR(timings);
462 pxa3xx_nand_set_sdr_timing(host, timings);
469 * NOTE: it is a must to set ND_RUN first, then write
470 * command buffer, otherwise, it does not work.
471 * We enable all the interrupt at the same time, and
472 * let pxa3xx_nand_irq to handle all logic.
474 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
478 ndcr = info->reg_ndcr;
483 nand_writel(info, NDECCCTRL, 0x1);
485 ndcr &= ~NDCR_ECC_EN;
487 nand_writel(info, NDECCCTRL, 0x0);
490 ndcr &= ~NDCR_DMA_EN;
493 ndcr |= NDCR_SPARE_EN;
495 ndcr &= ~NDCR_SPARE_EN;
499 /* clear status bits and run */
500 nand_writel(info, NDSR, NDSR_MASK);
501 nand_writel(info, NDCR, 0);
502 nand_writel(info, NDCR, ndcr);
505 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
509 ndcr = nand_readl(info, NDCR);
510 nand_writel(info, NDCR, ndcr | int_mask);
513 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
519 * According to the datasheet, when reading from NDDB
520 * with BCH enabled, after each 32 bytes reads, we
521 * have to make sure that the NDSR.RDDREQ bit is set.
523 * Drain the FIFO 8 32 bits reads at a time, and skip
524 * the polling on the last read.
527 readsl(info->mmio_base + NDDB, data, 8);
530 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
531 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
532 dev_err(&info->pdev->dev,
533 "Timeout on RDDREQ while draining the FIFO\n");
543 readsl(info->mmio_base + NDDB, data, len);
546 static void handle_data_pio(struct pxa3xx_nand_info *info)
548 switch (info->state) {
549 case STATE_PIO_WRITING:
550 if (info->step_chunk_size)
551 writesl(info->mmio_base + NDDB,
552 info->data_buff + info->data_buff_pos,
553 DIV_ROUND_UP(info->step_chunk_size, 4));
555 if (info->step_spare_size)
556 writesl(info->mmio_base + NDDB,
557 info->oob_buff + info->oob_buff_pos,
558 DIV_ROUND_UP(info->step_spare_size, 4));
560 case STATE_PIO_READING:
561 if (info->step_chunk_size)
563 info->data_buff + info->data_buff_pos,
564 DIV_ROUND_UP(info->step_chunk_size, 4));
566 if (info->step_spare_size)
568 info->oob_buff + info->oob_buff_pos,
569 DIV_ROUND_UP(info->step_spare_size, 4));
572 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
577 /* Update buffer pointers for multi-page read/write */
578 info->data_buff_pos += info->step_chunk_size;
579 info->oob_buff_pos += info->step_spare_size;
582 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
584 handle_data_pio(info);
586 info->state = STATE_CMD_DONE;
587 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
590 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
592 unsigned int status, is_completed = 0, is_ready = 0;
593 unsigned int ready, cmd_done;
594 irqreturn_t ret = IRQ_HANDLED;
597 ready = NDSR_FLASH_RDY;
598 cmd_done = NDSR_CS0_CMDD;
601 cmd_done = NDSR_CS1_CMDD;
604 status = nand_readl(info, NDSR);
606 if (status & NDSR_UNCORERR)
607 info->retcode = ERR_UNCORERR;
608 if (status & NDSR_CORERR) {
609 info->retcode = ERR_CORERR;
610 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
612 info->ecc_err_cnt = NDSR_ERR_CNT(status);
614 info->ecc_err_cnt = 1;
617 * Each chunk composing a page is corrected independently,
618 * and we need to store maximum number of corrected bitflips
619 * to return it to the MTD layer in ecc.read_page().
621 info->max_bitflips = max_t(unsigned int,
625 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
626 info->state = (status & NDSR_RDDREQ) ?
627 STATE_PIO_READING : STATE_PIO_WRITING;
628 /* Call the IRQ thread in U-Boot directly */
629 pxa3xx_nand_irq_thread(info);
632 if (status & cmd_done) {
633 info->state = STATE_CMD_DONE;
636 if (status & ready) {
637 info->state = STATE_READY;
642 * Clear all status bit before issuing the next command, which
643 * can and will alter the status bits and will deserve a new
644 * interrupt on its own. This lets the controller exit the IRQ
646 nand_writel(info, NDSR, status);
648 if (status & NDSR_WRCMDREQ) {
649 status &= ~NDSR_WRCMDREQ;
650 info->state = STATE_CMD_HANDLE;
653 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
654 * must be loaded by writing directly either 12 or 16
655 * bytes directly to NDCB0, four bytes at a time.
657 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
658 * but each NDCBx register can be read.
660 nand_writel(info, NDCB0, info->ndcb0);
661 nand_writel(info, NDCB0, info->ndcb1);
662 nand_writel(info, NDCB0, info->ndcb2);
664 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
665 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
666 nand_writel(info, NDCB0, info->ndcb3);
670 info->cmd_complete = 1;
677 static inline int is_buf_blank(uint8_t *buf, size_t len)
679 for (; len > 0; len--)
685 static void set_command_address(struct pxa3xx_nand_info *info,
686 unsigned int page_size, uint16_t column, int page_addr)
688 /* small page addr setting */
689 if (page_size < PAGE_CHUNK_SIZE) {
690 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
695 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
698 if (page_addr & 0xFF0000)
699 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
705 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
707 struct pxa3xx_nand_host *host = info->host[info->cs];
708 struct mtd_info *mtd = nand_to_mtd(&host->chip);
710 /* reset data and oob column point to handle data */
713 info->data_buff_pos = 0;
714 info->oob_buff_pos = 0;
715 info->step_chunk_size = 0;
716 info->step_spare_size = 0;
720 info->retcode = ERR_NONE;
721 info->ecc_err_cnt = 0;
727 case NAND_CMD_PAGEPROG:
740 * If we are about to issue a read command, or about to set
741 * the write address, then clean the data buffer.
743 if (command == NAND_CMD_READ0 ||
744 command == NAND_CMD_READOOB ||
745 command == NAND_CMD_SEQIN) {
746 info->buf_count = mtd->writesize + mtd->oobsize;
747 memset(info->data_buff, 0xFF, info->buf_count);
751 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
752 int ext_cmd_type, uint16_t column, int page_addr)
754 int addr_cycle, exec_cmd;
755 struct pxa3xx_nand_host *host;
756 struct mtd_info *mtd;
758 host = info->host[info->cs];
759 mtd = nand_to_mtd(&host->chip);
764 info->ndcb0 = NDCB0_CSEL;
768 if (command == NAND_CMD_SEQIN)
771 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
772 + host->col_addr_cycles);
775 case NAND_CMD_READOOB:
777 info->buf_start = column;
778 info->ndcb0 |= NDCB0_CMD_TYPE(0)
782 if (command == NAND_CMD_READOOB)
783 info->buf_start += mtd->writesize;
785 if (info->cur_chunk < info->nfullchunks) {
786 info->step_chunk_size = info->chunk_size;
787 info->step_spare_size = info->spare_size;
789 info->step_chunk_size = info->last_chunk_size;
790 info->step_spare_size = info->last_spare_size;
794 * Multiple page read needs an 'extended command type' field,
795 * which is either naked-read or last-read according to the
798 if (mtd->writesize == PAGE_CHUNK_SIZE) {
799 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
800 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
801 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
803 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
804 info->ndcb3 = info->step_chunk_size +
805 info->step_spare_size;
808 set_command_address(info, mtd->writesize, column, page_addr);
813 info->buf_start = column;
814 set_command_address(info, mtd->writesize, 0, page_addr);
817 * Multiple page programming needs to execute the initial
818 * SEQIN command that sets the page address.
820 if (mtd->writesize > PAGE_CHUNK_SIZE) {
821 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
822 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
829 case NAND_CMD_PAGEPROG:
830 if (is_buf_blank(info->data_buff,
831 (mtd->writesize + mtd->oobsize))) {
836 if (info->cur_chunk < info->nfullchunks) {
837 info->step_chunk_size = info->chunk_size;
838 info->step_spare_size = info->spare_size;
840 info->step_chunk_size = info->last_chunk_size;
841 info->step_spare_size = info->last_spare_size;
844 /* Second command setting for large pages */
845 if (mtd->writesize > PAGE_CHUNK_SIZE) {
847 * Multiple page write uses the 'extended command'
848 * field. This can be used to issue a command dispatch
849 * or a naked-write depending on the current stage.
851 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
853 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
854 info->ndcb3 = info->step_chunk_size +
855 info->step_spare_size;
858 * This is the command dispatch that completes a chunked
859 * page program operation.
861 if (info->cur_chunk == info->ntotalchunks) {
862 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
863 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
870 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
874 | (NAND_CMD_PAGEPROG << 8)
881 info->buf_count = INIT_BUFFER_SIZE;
882 info->ndcb0 |= NDCB0_CMD_TYPE(0)
886 info->ndcb1 = (column & 0xFF);
887 info->ndcb3 = INIT_BUFFER_SIZE;
888 info->step_chunk_size = INIT_BUFFER_SIZE;
891 case NAND_CMD_READID:
892 info->buf_count = READ_ID_BYTES;
893 info->ndcb0 |= NDCB0_CMD_TYPE(3)
896 info->ndcb1 = (column & 0xFF);
898 info->step_chunk_size = 8;
900 case NAND_CMD_STATUS:
902 info->ndcb0 |= NDCB0_CMD_TYPE(4)
906 info->step_chunk_size = 8;
909 case NAND_CMD_ERASE1:
910 info->ndcb0 |= NDCB0_CMD_TYPE(2)
914 | (NAND_CMD_ERASE2 << 8)
916 info->ndcb1 = page_addr;
921 info->ndcb0 |= NDCB0_CMD_TYPE(5)
926 case NAND_CMD_ERASE2:
932 dev_err(&info->pdev->dev, "non-supported command %x\n",
940 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
941 int column, int page_addr)
943 struct nand_chip *chip = mtd_to_nand(mtd);
944 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
945 struct pxa3xx_nand_info *info = host->info_data;
949 * if this is a x16 device ,then convert the input
950 * "byte" address into a "word" address appropriate
951 * for indexing a word-oriented device
953 if (info->reg_ndcr & NDCR_DWIDTH_M)
957 * There may be different NAND chip hooked to
958 * different chip select, so check whether
959 * chip select has been changed, if yes, reset the timing
961 if (info->cs != host->cs) {
963 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
964 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
967 prepare_start_command(info, command);
969 info->state = STATE_PREPARED;
970 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
975 info->cmd_complete = 0;
978 pxa3xx_nand_start(info);
984 status = nand_readl(info, NDSR);
986 pxa3xx_nand_irq(info);
988 if (info->cmd_complete)
991 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
992 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
997 info->state = STATE_IDLE;
1000 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1001 const unsigned command,
1002 int column, int page_addr)
1004 struct nand_chip *chip = mtd_to_nand(mtd);
1005 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1006 struct pxa3xx_nand_info *info = host->info_data;
1007 int exec_cmd, ext_cmd_type;
1010 * if this is a x16 device then convert the input
1011 * "byte" address into a "word" address appropriate
1012 * for indexing a word-oriented device
1014 if (info->reg_ndcr & NDCR_DWIDTH_M)
1018 * There may be different NAND chip hooked to
1019 * different chip select, so check whether
1020 * chip select has been changed, if yes, reset the timing
1022 if (info->cs != host->cs) {
1023 info->cs = host->cs;
1024 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1025 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1028 /* Select the extended command for the first command */
1030 case NAND_CMD_READ0:
1031 case NAND_CMD_READOOB:
1032 ext_cmd_type = EXT_CMD_TYPE_MONO;
1034 case NAND_CMD_SEQIN:
1035 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1037 case NAND_CMD_PAGEPROG:
1038 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1045 prepare_start_command(info, command);
1048 * Prepare the "is ready" completion before starting a command
1049 * transaction sequence. If the command is not executed the
1050 * completion will be completed, see below.
1052 * We can do that inside the loop because the command variable
1053 * is invariant and thus so is the exec_cmd.
1055 info->need_wait = 1;
1056 info->dev_ready = 0;
1061 info->state = STATE_PREPARED;
1062 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1065 info->need_wait = 0;
1066 info->dev_ready = 1;
1070 info->cmd_complete = 0;
1071 pxa3xx_nand_start(info);
1077 status = nand_readl(info, NDSR);
1079 pxa3xx_nand_irq(info);
1081 if (info->cmd_complete)
1084 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1085 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1090 /* Only a few commands need several steps */
1091 if (command != NAND_CMD_PAGEPROG &&
1092 command != NAND_CMD_READ0 &&
1093 command != NAND_CMD_READOOB)
1098 /* Check if the sequence is complete */
1099 if (info->cur_chunk == info->ntotalchunks &&
1100 command != NAND_CMD_PAGEPROG)
1104 * After a splitted program command sequence has issued
1105 * the command dispatch, the command sequence is complete.
1107 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1108 command == NAND_CMD_PAGEPROG &&
1109 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1112 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1113 /* Last read: issue a 'last naked read' */
1114 if (info->cur_chunk == info->ntotalchunks - 1)
1115 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1117 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1120 * If a splitted program command has no more data to transfer,
1121 * the command dispatch must be issued to complete.
1123 } else if (command == NAND_CMD_PAGEPROG &&
1124 info->cur_chunk == info->ntotalchunks) {
1125 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1129 info->state = STATE_IDLE;
1132 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1133 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1136 chip->write_buf(mtd, buf, mtd->writesize);
1137 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1142 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1143 struct nand_chip *chip, uint8_t *buf, int oob_required,
1146 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1147 struct pxa3xx_nand_info *info = host->info_data;
1149 chip->read_buf(mtd, buf, mtd->writesize);
1150 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1152 if (info->retcode == ERR_CORERR && info->use_ecc) {
1153 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1155 } else if (info->retcode == ERR_UNCORERR) {
1157 * for blank page (all 0xff), HW will calculate its ECC as
1158 * 0, which is different from the ECC information within
1159 * OOB, ignore such uncorrectable errors
1161 if (is_buf_blank(buf, mtd->writesize))
1162 info->retcode = ERR_NONE;
1164 mtd->ecc_stats.failed++;
1167 return info->max_bitflips;
1170 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1172 struct nand_chip *chip = mtd_to_nand(mtd);
1173 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1174 struct pxa3xx_nand_info *info = host->info_data;
1177 if (info->buf_start < info->buf_count)
1178 /* Has just send a new command? */
1179 retval = info->data_buff[info->buf_start++];
1184 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1186 struct nand_chip *chip = mtd_to_nand(mtd);
1187 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1188 struct pxa3xx_nand_info *info = host->info_data;
1189 u16 retval = 0xFFFF;
1191 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1192 retval = *((u16 *)(info->data_buff+info->buf_start));
1193 info->buf_start += 2;
1198 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1200 struct nand_chip *chip = mtd_to_nand(mtd);
1201 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1202 struct pxa3xx_nand_info *info = host->info_data;
1203 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1205 memcpy(buf, info->data_buff + info->buf_start, real_len);
1206 info->buf_start += real_len;
1209 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1210 const uint8_t *buf, int len)
1212 struct nand_chip *chip = mtd_to_nand(mtd);
1213 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1214 struct pxa3xx_nand_info *info = host->info_data;
1215 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1217 memcpy(info->data_buff + info->buf_start, buf, real_len);
1218 info->buf_start += real_len;
1221 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1226 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1228 struct nand_chip *chip = mtd_to_nand(mtd);
1229 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1230 struct pxa3xx_nand_info *info = host->info_data;
1232 if (info->need_wait) {
1235 info->need_wait = 0;
1241 status = nand_readl(info, NDSR);
1243 pxa3xx_nand_irq(info);
1245 if (info->dev_ready)
1248 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1249 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1250 return NAND_STATUS_FAIL;
1255 /* pxa3xx_nand_send_command has waited for command complete */
1256 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1257 if (info->retcode == ERR_NONE)
1260 return NAND_STATUS_FAIL;
1263 return NAND_STATUS_READY;
1266 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1268 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1270 /* Configure default flash values */
1271 info->chunk_size = PAGE_CHUNK_SIZE;
1272 info->reg_ndcr = 0x0; /* enable all interrupts */
1273 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1274 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1275 info->reg_ndcr |= NDCR_SPARE_EN;
1280 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1282 struct pxa3xx_nand_host *host = info->host[info->cs];
1283 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1284 struct nand_chip *chip = mtd_to_nand(mtd);
1286 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1287 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1288 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1291 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1293 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1294 uint32_t ndcr = nand_readl(info, NDCR);
1296 /* Set an initial chunk size */
1297 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1298 info->reg_ndcr = ndcr &
1299 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1300 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1301 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1302 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1305 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1307 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1308 if (info->data_buff == NULL)
1313 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1315 struct pxa3xx_nand_info *info = host->info_data;
1316 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1317 struct mtd_info *mtd;
1318 struct nand_chip *chip;
1319 const struct nand_sdr_timings *timings;
1322 mtd = nand_to_mtd(&info->host[info->cs]->chip);
1323 chip = mtd_to_nand(mtd);
1325 /* configure default flash values */
1326 info->reg_ndcr = 0x0; /* enable all interrupts */
1327 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1328 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1329 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1331 /* use the common timing to make a try */
1332 timings = onfi_async_timing_mode_to_sdr_timings(0);
1333 if (IS_ERR(timings))
1334 return PTR_ERR(timings);
1336 pxa3xx_nand_set_sdr_timing(host, timings);
1338 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1339 ret = chip->waitfunc(mtd, chip);
1340 if (ret & NAND_STATUS_FAIL)
1346 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1347 struct nand_ecc_ctrl *ecc,
1348 int strength, int ecc_stepsize, int page_size)
1350 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1351 info->nfullchunks = 1;
1352 info->ntotalchunks = 1;
1353 info->chunk_size = 2048;
1354 info->spare_size = 40;
1355 info->ecc_size = 24;
1356 ecc->mode = NAND_ECC_HW;
1360 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1361 info->nfullchunks = 1;
1362 info->ntotalchunks = 1;
1363 info->chunk_size = 512;
1364 info->spare_size = 8;
1366 ecc->mode = NAND_ECC_HW;
1371 * Required ECC: 4-bit correction per 512 bytes
1372 * Select: 16-bit correction per 2048 bytes
1374 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1376 info->nfullchunks = 1;
1377 info->ntotalchunks = 1;
1378 info->chunk_size = 2048;
1379 info->spare_size = 32;
1380 info->ecc_size = 32;
1381 ecc->mode = NAND_ECC_HW;
1382 ecc->size = info->chunk_size;
1383 ecc->layout = &ecc_layout_2KB_bch4bit;
1386 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1388 info->nfullchunks = 2;
1389 info->ntotalchunks = 2;
1390 info->chunk_size = 2048;
1391 info->spare_size = 32;
1392 info->ecc_size = 32;
1393 ecc->mode = NAND_ECC_HW;
1394 ecc->size = info->chunk_size;
1395 ecc->layout = &ecc_layout_4KB_bch4bit;
1399 * Required ECC: 8-bit correction per 512 bytes
1400 * Select: 16-bit correction per 1024 bytes
1402 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1404 info->nfullchunks = 4;
1405 info->ntotalchunks = 5;
1406 info->chunk_size = 1024;
1407 info->spare_size = 0;
1408 info->last_chunk_size = 0;
1409 info->last_spare_size = 64;
1410 info->ecc_size = 32;
1411 ecc->mode = NAND_ECC_HW;
1412 ecc->size = info->chunk_size;
1413 ecc->layout = &ecc_layout_4KB_bch8bit;
1416 dev_err(&info->pdev->dev,
1417 "ECC strength %d at page size %d is not supported\n",
1418 strength, page_size);
1425 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1427 struct nand_chip *chip = mtd_to_nand(mtd);
1428 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1429 struct pxa3xx_nand_info *info = host->info_data;
1430 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1432 uint16_t ecc_strength, ecc_step;
1434 if (pdata->keep_config) {
1435 pxa3xx_nand_detect_config(info);
1437 ret = pxa3xx_nand_config_ident(info);
1440 ret = pxa3xx_nand_sensing(host);
1442 dev_info(&info->pdev->dev,
1443 "There is no chip on cs %d!\n",
1449 /* Device detection must be done with ECC disabled */
1450 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1451 nand_writel(info, NDECCCTRL, 0x0);
1453 if (nand_scan_ident(mtd, 1, NULL))
1456 if (!pdata->keep_config) {
1457 ret = pxa3xx_nand_init_timings(host);
1459 dev_err(&info->pdev->dev,
1460 "Failed to set timings: %d\n", ret);
1465 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1467 * We'll use a bad block table stored in-flash and don't
1468 * allow writing the bad block marker to the flash.
1470 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1471 chip->bbt_td = &bbt_main_descr;
1472 chip->bbt_md = &bbt_mirror_descr;
1476 * If the page size is bigger than the FIFO size, let's check
1477 * we are given the right variant and then switch to the extended
1478 * (aka splitted) command handling,
1480 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1481 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1482 chip->cmdfunc = nand_cmdfunc_extended;
1484 dev_err(&info->pdev->dev,
1485 "unsupported page size on this variant\n");
1490 if (pdata->ecc_strength && pdata->ecc_step_size) {
1491 ecc_strength = pdata->ecc_strength;
1492 ecc_step = pdata->ecc_step_size;
1494 ecc_strength = chip->ecc_strength_ds;
1495 ecc_step = chip->ecc_step_ds;
1498 /* Set default ECC strength requirements on non-ONFI devices */
1499 if (ecc_strength < 1 && ecc_step < 1) {
1504 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1505 ecc_step, mtd->writesize);
1509 /* calculate addressing information */
1510 if (mtd->writesize >= 2048)
1511 host->col_addr_cycles = 2;
1513 host->col_addr_cycles = 1;
1515 /* release the initial buffer */
1516 kfree(info->data_buff);
1518 /* allocate the real data + oob buffer */
1519 info->buf_size = mtd->writesize + mtd->oobsize;
1520 ret = pxa3xx_nand_init_buff(info);
1523 info->oob_buff = info->data_buff + mtd->writesize;
1525 if ((mtd->size >> chip->page_shift) > 65536)
1526 host->row_addr_cycles = 3;
1528 host->row_addr_cycles = 2;
1530 if (!pdata->keep_config)
1531 pxa3xx_nand_config_tail(info);
1533 return nand_scan_tail(mtd);
1536 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1538 struct pxa3xx_nand_platform_data *pdata;
1539 struct pxa3xx_nand_host *host;
1540 struct nand_chip *chip = NULL;
1541 struct mtd_info *mtd;
1544 pdata = info->pdata;
1545 if (pdata->num_cs <= 0)
1548 info->variant = pxa3xx_nand_get_variant();
1549 for (cs = 0; cs < pdata->num_cs; cs++) {
1550 chip = (struct nand_chip *)
1551 ((u8 *)&info[1] + sizeof(*host) * cs);
1552 mtd = nand_to_mtd(chip);
1553 host = (struct pxa3xx_nand_host *)chip;
1554 info->host[cs] = host;
1556 host->info_data = info;
1557 mtd->owner = THIS_MODULE;
1559 nand_set_controller_data(chip, host);
1560 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1561 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1562 chip->controller = &info->controller;
1563 chip->waitfunc = pxa3xx_nand_waitfunc;
1564 chip->select_chip = pxa3xx_nand_select_chip;
1565 chip->read_word = pxa3xx_nand_read_word;
1566 chip->read_byte = pxa3xx_nand_read_byte;
1567 chip->read_buf = pxa3xx_nand_read_buf;
1568 chip->write_buf = pxa3xx_nand_write_buf;
1569 chip->options |= NAND_NO_SUBPAGE_WRITE;
1570 chip->cmdfunc = nand_cmdfunc;
1573 /* Allocate a buffer to allow flash detection */
1574 info->buf_size = INIT_BUFFER_SIZE;
1575 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1576 if (info->data_buff == NULL) {
1578 goto fail_disable_clk;
1581 /* initialize all interrupts to be disabled */
1582 disable_int(info, NDSR_MASK);
1586 kfree(info->data_buff);
1591 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1593 struct pxa3xx_nand_platform_data *pdata;
1594 const void *blob = gd->fdt_blob;
1597 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1601 /* Get address decoding nodes from the FDT blob */
1603 node = fdt_node_offset_by_compatible(blob, node,
1604 "marvell,mvebu-pxa3xx-nand");
1608 /* Bypass disabeld nodes */
1609 if (!fdtdec_get_is_enabled(blob, node))
1612 /* Get the first enabled NAND controler base address */
1614 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1615 blob, node, "reg", 0, NULL, true);
1617 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1618 if (pdata->num_cs != 1) {
1619 pr_err("pxa3xx driver supports single CS only\n");
1623 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1624 pdata->enable_arbiter = 1;
1626 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1627 pdata->keep_config = 1;
1631 * If these are not set, they will be selected according
1632 * to the detected flash type.
1635 pdata->ecc_strength = fdtdec_get_int(blob, node,
1636 "nand-ecc-strength", 0);
1639 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1640 "nand-ecc-step-size", 0);
1642 info->pdata = pdata;
1644 /* Currently support only a single NAND controller */
1647 } while (node >= 0);
1652 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1654 struct pxa3xx_nand_platform_data *pdata;
1655 int ret, cs, probe_success;
1657 ret = pxa3xx_nand_probe_dt(info);
1661 pdata = info->pdata;
1663 ret = alloc_nand_resource(info);
1665 dev_err(&pdev->dev, "alloc nand resource failed\n");
1670 for (cs = 0; cs < pdata->num_cs; cs++) {
1671 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1674 * The mtd name matches the one used in 'mtdparts' kernel
1675 * parameter. This name cannot be changed or otherwise
1676 * user's mtd partitions configuration would get broken.
1678 mtd->name = "pxa3xx_nand-0";
1680 ret = pxa3xx_nand_scan(mtd);
1682 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1687 if (nand_register(cs, mtd))
1700 * Main initialization routine
1702 void board_nand_init(void)
1704 struct pxa3xx_nand_info *info;
1705 struct pxa3xx_nand_host *host;
1708 info = kzalloc(sizeof(*info) +
1709 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1714 ret = pxa3xx_nand_probe(info);