1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/mtd/nand/raw/pxa3xx_nand.c
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
13 #include <dm/device_compat.h>
14 #include <dm/devres.h>
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/errno.h>
21 #include <asm/arch/cpu.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/rawnand.h>
24 #include <linux/types.h>
27 #include <dm/uclass.h>
30 #include "pxa3xx_nand.h"
32 DECLARE_GLOBAL_DATA_PTR;
34 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
35 #define CHIP_DELAY_TIMEOUT 200
36 #define NAND_STOP_DELAY 40
39 * Define a buffer size for the initial command that detects the flash device:
40 * STATUS, READID and PARAM.
41 * ONFI param page is 256 bytes, and there are three redundant copies
42 * to be read. JEDEC param page is 512 bytes, and there are also three
43 * redundant copies to be read.
44 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
46 #define INIT_BUFFER_SIZE 2048
48 /* registers and bit definitions */
49 #define NDCR (0x00) /* Control register */
50 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
51 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
52 #define NDSR (0x14) /* Status Register */
53 #define NDPCR (0x18) /* Page Count Register */
54 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
55 #define NDBDR1 (0x20) /* Bad Block Register 1 */
56 #define NDECCCTRL (0x28) /* ECC control */
57 #define NDDB (0x40) /* Data Buffer */
58 #define NDCB0 (0x48) /* Command Buffer0 */
59 #define NDCB1 (0x4C) /* Command Buffer1 */
60 #define NDCB2 (0x50) /* Command Buffer2 */
62 #define NDCR_SPARE_EN (0x1 << 31)
63 #define NDCR_ECC_EN (0x1 << 30)
64 #define NDCR_DMA_EN (0x1 << 29)
65 #define NDCR_ND_RUN (0x1 << 28)
66 #define NDCR_DWIDTH_C (0x1 << 27)
67 #define NDCR_DWIDTH_M (0x1 << 26)
68 #define NDCR_PAGE_SZ (0x1 << 24)
69 #define NDCR_NCSX (0x1 << 23)
70 #define NDCR_ND_MODE (0x3 << 21)
71 #define NDCR_NAND_MODE (0x0)
72 #define NDCR_CLR_PG_CNT (0x1 << 20)
73 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
74 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
75 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
77 #define NDCR_RA_START (0x1 << 15)
78 #define NDCR_PG_PER_BLK (0x1 << 14)
79 #define NDCR_ND_ARB_EN (0x1 << 12)
80 #define NDCR_INT_MASK (0xFFF)
82 #define NDSR_MASK (0xfff)
83 #define NDSR_ERR_CNT_OFF (16)
84 #define NDSR_ERR_CNT_MASK (0x1f)
85 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
86 #define NDSR_RDY (0x1 << 12)
87 #define NDSR_FLASH_RDY (0x1 << 11)
88 #define NDSR_CS0_PAGED (0x1 << 10)
89 #define NDSR_CS1_PAGED (0x1 << 9)
90 #define NDSR_CS0_CMDD (0x1 << 8)
91 #define NDSR_CS1_CMDD (0x1 << 7)
92 #define NDSR_CS0_BBD (0x1 << 6)
93 #define NDSR_CS1_BBD (0x1 << 5)
94 #define NDSR_UNCORERR (0x1 << 4)
95 #define NDSR_CORERR (0x1 << 3)
96 #define NDSR_WRDREQ (0x1 << 2)
97 #define NDSR_RDDREQ (0x1 << 1)
98 #define NDSR_WRCMDREQ (0x1)
100 #define NDCB0_LEN_OVRD (0x1 << 28)
101 #define NDCB0_ST_ROW_EN (0x1 << 26)
102 #define NDCB0_AUTO_RS (0x1 << 25)
103 #define NDCB0_CSEL (0x1 << 24)
104 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
105 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
106 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
107 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
108 #define NDCB0_NC (0x1 << 20)
109 #define NDCB0_DBC (0x1 << 19)
110 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
111 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
112 #define NDCB0_CMD2_MASK (0xff << 8)
113 #define NDCB0_CMD1_MASK (0xff)
114 #define NDCB0_ADDR_CYC_SHIFT (16)
116 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
117 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
118 #define EXT_CMD_TYPE_READ 4 /* Read */
119 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
120 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
121 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
122 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
124 /* System control register and bit to enable NAND on some SoCs */
125 #define GENCONF_SOC_DEVICE_MUX 0x208
126 #define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
129 * This should be large enough to read 'ONFI' and 'JEDEC'.
130 * Let's use 7 bytes, which is the maximum ID count supported
131 * by the controller (see NDCR_RD_ID_CNT_MASK).
133 #define READ_ID_BYTES 7
135 /* macros for registers read/write */
136 #define nand_writel(info, off, val) \
137 writel((val), (info)->mmio_base + (off))
139 #define nand_readl(info, off) \
140 readl((info)->mmio_base + (off))
142 /* error code and state */
165 enum pxa3xx_nand_variant {
166 PXA3XX_NAND_VARIANT_PXA,
167 PXA3XX_NAND_VARIANT_ARMADA370,
168 PXA3XX_NAND_VARIANT_ARMADA_8K,
171 struct pxa3xx_nand_host {
172 struct nand_chip chip;
175 /* page size of attached chip */
179 /* calculated from pxa3xx_nand_flash data */
180 unsigned int col_addr_cycles;
181 unsigned int row_addr_cycles;
184 struct pxa3xx_nand_info {
185 struct nand_hw_control controller;
186 struct pxa3xx_nand_platform_data *pdata;
189 void __iomem *mmio_base;
190 unsigned long mmio_phys;
191 int cmd_complete, dev_ready;
193 unsigned int buf_start;
194 unsigned int buf_count;
195 unsigned int buf_size;
196 unsigned int data_buff_pos;
197 unsigned int oob_buff_pos;
199 unsigned char *data_buff;
200 unsigned char *oob_buff;
202 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
206 * This driver supports NFCv1 (as found in PXA SoC)
207 * and NFCv2 (as found in Armada 370/XP SoC).
209 enum pxa3xx_nand_variant variant;
212 int use_ecc; /* use HW ECC ? */
213 int force_raw; /* prevent use_ecc to be set */
214 int ecc_bch; /* using BCH ECC? */
215 int use_spare; /* use spare ? */
218 /* Amount of real data per full chunk */
219 unsigned int chunk_size;
221 /* Amount of spare data per full chunk */
222 unsigned int spare_size;
224 /* Number of full chunks (i.e chunk_size + spare_size) */
225 unsigned int nfullchunks;
228 * Total number of chunks. If equal to nfullchunks, then there
229 * are only full chunks. Otherwise, there is one last chunk of
230 * size (last_chunk_size + last_spare_size)
232 unsigned int ntotalchunks;
234 /* Amount of real data in the last chunk */
235 unsigned int last_chunk_size;
237 /* Amount of spare data in the last chunk */
238 unsigned int last_spare_size;
240 unsigned int ecc_size;
241 unsigned int ecc_err_cnt;
242 unsigned int max_bitflips;
246 * Variables only valid during command
247 * execution. step_chunk_size and step_spare_size is the
248 * amount of real data and spare data in the current
249 * chunk. cur_chunk is the current chunk being
252 unsigned int step_chunk_size;
253 unsigned int step_spare_size;
254 unsigned int cur_chunk;
256 /* cached register value */
261 /* generated NDCBx register values */
268 static struct pxa3xx_nand_timing timing[] = {
270 * tCH Enable signal hold time
271 * tCS Enable signal setup time
272 * tWH ND_nWE high duration
273 * tWP ND_nWE pulse time
274 * tRH ND_nRE high duration
275 * tRP ND_nRE pulse width
276 * tR ND_nWE high to ND_nRE low for read
277 * tWHR ND_nWE high to ND_nRE low for status read
278 * tAR ND_ALE low to ND_nRE low delay
280 /*ch cs wh wp rh rp r whr ar */
281 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
282 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
283 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
284 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
285 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
288 static struct pxa3xx_nand_flash builtin_flash_types[] = {
291 * flash_width Width of Flash memory (DWIDTH_M)
292 * dfc_width Width of flash controller(DWIDTH_C)
294 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
296 { 0x46ec, 16, 16, &timing[1] },
297 { 0xdaec, 8, 8, &timing[1] },
298 { 0xd7ec, 8, 8, &timing[1] },
299 { 0xa12c, 8, 8, &timing[2] },
300 { 0xb12c, 16, 16, &timing[2] },
301 { 0xdc2c, 8, 8, &timing[2] },
302 { 0xcc2c, 16, 16, &timing[2] },
303 { 0xba20, 16, 16, &timing[3] },
304 { 0xda98, 8, 8, &timing[4] },
307 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
308 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
309 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
311 static struct nand_bbt_descr bbt_main_descr = {
312 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
313 | NAND_BBT_2BIT | NAND_BBT_VERSION,
317 .maxblocks = 8, /* Last 8 blocks in each chip */
318 .pattern = bbt_pattern
321 static struct nand_bbt_descr bbt_mirror_descr = {
322 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
323 | NAND_BBT_2BIT | NAND_BBT_VERSION,
327 .maxblocks = 8, /* Last 8 blocks in each chip */
328 .pattern = bbt_mirror_pattern
332 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
335 32, 33, 34, 35, 36, 37, 38, 39,
336 40, 41, 42, 43, 44, 45, 46, 47,
337 48, 49, 50, 51, 52, 53, 54, 55,
338 56, 57, 58, 59, 60, 61, 62, 63},
339 .oobfree = { {2, 30} }
342 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
345 32, 33, 34, 35, 36, 37, 38, 39,
346 40, 41, 42, 43, 44, 45, 46, 47,
347 48, 49, 50, 51, 52, 53, 54, 55,
348 56, 57, 58, 59, 60, 61, 62, 63,
349 64, 65, 66, 67, 68, 69, 70, 71,
350 72, 73, 74, 75, 76, 77, 78, 79,
351 80, 81, 82, 83, 84, 85, 86, 87,
352 88, 89, 90, 91, 92, 93, 94, 95},
353 .oobfree = { {1, 4}, {6, 26} }
356 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
359 32, 33, 34, 35, 36, 37, 38, 39,
360 40, 41, 42, 43, 44, 45, 46, 47,
361 48, 49, 50, 51, 52, 53, 54, 55,
362 56, 57, 58, 59, 60, 61, 62, 63,
363 96, 97, 98, 99, 100, 101, 102, 103,
364 104, 105, 106, 107, 108, 109, 110, 111,
365 112, 113, 114, 115, 116, 117, 118, 119,
366 120, 121, 122, 123, 124, 125, 126, 127},
367 /* Bootrom looks in bytes 0 & 5 for bad blocks */
368 .oobfree = { {6, 26}, { 64, 32} }
371 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
374 32, 33, 34, 35, 36, 37, 38, 39,
375 40, 41, 42, 43, 44, 45, 46, 47,
376 48, 49, 50, 51, 52, 53, 54, 55,
377 56, 57, 58, 59, 60, 61, 62, 63,
379 96, 97, 98, 99, 100, 101, 102, 103,
380 104, 105, 106, 107, 108, 109, 110, 111,
381 112, 113, 114, 115, 116, 117, 118, 119,
382 120, 121, 122, 123, 124, 125, 126, 127,
384 160, 161, 162, 163, 164, 165, 166, 167,
385 168, 169, 170, 171, 172, 173, 174, 175,
386 176, 177, 178, 179, 180, 181, 182, 183,
387 184, 185, 186, 187, 188, 189, 190, 191,
389 224, 225, 226, 227, 228, 229, 230, 231,
390 232, 233, 234, 235, 236, 237, 238, 239,
391 240, 241, 242, 243, 244, 245, 246, 247,
392 248, 249, 250, 251, 252, 253, 254, 255},
394 /* Bootrom looks in bytes 0 & 5 for bad blocks */
395 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
398 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
401 32, 33, 34, 35, 36, 37, 38, 39,
402 40, 41, 42, 43, 44, 45, 46, 47,
403 48, 49, 50, 51, 52, 53, 54, 55,
404 56, 57, 58, 59, 60, 61, 62, 63},
408 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
411 /* HW ECC handles all ECC data and all spare area is free for OOB */
412 .oobfree = {{0, 160} }
415 #define NDTR0_tCH(c) (min((c), 7) << 19)
416 #define NDTR0_tCS(c) (min((c), 7) << 16)
417 #define NDTR0_tWH(c) (min((c), 7) << 11)
418 #define NDTR0_tWP(c) (min((c), 7) << 8)
419 #define NDTR0_tRH(c) (min((c), 7) << 3)
420 #define NDTR0_tRP(c) (min((c), 7) << 0)
422 #define NDTR1_tR(c) (min((c), 65535) << 16)
423 #define NDTR1_tWHR(c) (min((c), 15) << 4)
424 #define NDTR1_tAR(c) (min((c), 15) << 0)
426 /* convert nano-seconds to nand flash controller clock cycles */
427 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
429 static const struct udevice_id pxa3xx_nand_dt_ids[] = {
431 .compatible = "marvell,mvebu-pxa3xx-nand",
432 .data = PXA3XX_NAND_VARIANT_ARMADA370,
435 .compatible = "marvell,armada-8k-nand-controller",
436 .data = PXA3XX_NAND_VARIANT_ARMADA_8K,
441 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(struct udevice *dev)
443 return dev_get_driver_data(dev);
446 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
447 const struct pxa3xx_nand_timing *t)
449 struct pxa3xx_nand_info *info = host->info_data;
450 unsigned long nand_clk = mvebu_get_nand_clock();
451 uint32_t ndtr0, ndtr1;
453 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
454 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
455 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
456 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
457 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
458 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
460 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
461 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
462 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
464 info->ndtr0cs0 = ndtr0;
465 info->ndtr1cs0 = ndtr1;
466 nand_writel(info, NDTR0CS0, ndtr0);
467 nand_writel(info, NDTR1CS0, ndtr1);
470 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
471 const struct nand_sdr_timings *t)
473 struct pxa3xx_nand_info *info = host->info_data;
474 struct nand_chip *chip = &host->chip;
475 unsigned long nand_clk = mvebu_get_nand_clock();
476 uint32_t ndtr0, ndtr1;
478 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
479 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
480 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
481 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
482 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
483 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
484 u32 tR = chip->chip_delay * 1000;
485 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
486 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
488 /* fallback to a default value if tR = 0 */
492 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
493 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
494 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
495 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
496 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
497 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
499 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
500 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
501 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
503 info->ndtr0cs0 = ndtr0;
504 info->ndtr1cs0 = ndtr1;
505 nand_writel(info, NDTR0CS0, ndtr0);
506 nand_writel(info, NDTR1CS0, ndtr1);
509 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
511 const struct nand_sdr_timings *timings;
512 struct nand_chip *chip = &host->chip;
513 struct pxa3xx_nand_info *info = host->info_data;
514 const struct pxa3xx_nand_flash *f = NULL;
515 struct mtd_info *mtd = nand_to_mtd(&host->chip);
516 int mode, id, ntypes, i;
518 mode = onfi_get_async_timing_mode(chip);
519 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
520 ntypes = ARRAY_SIZE(builtin_flash_types);
522 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
524 id = chip->read_byte(mtd);
525 id |= chip->read_byte(mtd) << 0x8;
527 for (i = 0; i < ntypes; i++) {
528 f = &builtin_flash_types[i];
530 if (f->chip_id == id)
535 dev_err(mtd->dev, "Error: timings not found\n");
539 pxa3xx_nand_set_timing(host, f->timing);
541 if (f->flash_width == 16) {
542 info->reg_ndcr |= NDCR_DWIDTH_M;
543 chip->options |= NAND_BUSWIDTH_16;
546 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
548 mode = fls(mode) - 1;
552 timings = onfi_async_timing_mode_to_sdr_timings(mode);
554 return PTR_ERR(timings);
556 pxa3xx_nand_set_sdr_timing(host, timings);
563 * NOTE: it is a must to set ND_RUN first, then write
564 * command buffer, otherwise, it does not work.
565 * We enable all the interrupt at the same time, and
566 * let pxa3xx_nand_irq to handle all logic.
568 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
572 ndcr = info->reg_ndcr;
577 nand_writel(info, NDECCCTRL, 0x1);
579 ndcr &= ~NDCR_ECC_EN;
581 nand_writel(info, NDECCCTRL, 0x0);
584 ndcr &= ~NDCR_DMA_EN;
587 ndcr |= NDCR_SPARE_EN;
589 ndcr &= ~NDCR_SPARE_EN;
593 /* clear status bits and run */
594 nand_writel(info, NDSR, NDSR_MASK);
595 nand_writel(info, NDCR, 0);
596 nand_writel(info, NDCR, ndcr);
599 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
603 ndcr = nand_readl(info, NDCR);
604 nand_writel(info, NDCR, ndcr | int_mask);
607 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
609 if (info->ecc_bch && !info->force_raw) {
613 * According to the datasheet, when reading from NDDB
614 * with BCH enabled, after each 32 bytes reads, we
615 * have to make sure that the NDSR.RDDREQ bit is set.
617 * Drain the FIFO 8 32 bits reads at a time, and skip
618 * the polling on the last read.
621 readsl(info->mmio_base + NDDB, data, 8);
624 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
625 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
626 dev_err(info->controller.active->mtd.dev,
627 "Timeout on RDDREQ while draining the FIFO\n");
637 readsl(info->mmio_base + NDDB, data, len);
640 static void handle_data_pio(struct pxa3xx_nand_info *info)
642 int data_len = info->step_chunk_size;
645 * In raw mode, include the spare area and the ECC bytes that are not
646 * consumed by the controller in the data section. Do not reorganize
647 * here, do it in the ->read_page_raw() handler instead.
650 data_len += info->step_spare_size + info->ecc_size;
652 switch (info->state) {
653 case STATE_PIO_WRITING:
654 if (info->step_chunk_size)
655 writesl(info->mmio_base + NDDB,
656 info->data_buff + info->data_buff_pos,
657 DIV_ROUND_UP(data_len, 4));
659 if (info->step_spare_size)
660 writesl(info->mmio_base + NDDB,
661 info->oob_buff + info->oob_buff_pos,
662 DIV_ROUND_UP(info->step_spare_size, 4));
664 case STATE_PIO_READING:
667 info->data_buff + info->data_buff_pos,
668 DIV_ROUND_UP(data_len, 4));
673 if (info->step_spare_size)
675 info->oob_buff + info->oob_buff_pos,
676 DIV_ROUND_UP(info->step_spare_size, 4));
679 dev_err(info->controller.active->mtd.dev,
680 "%s: invalid state %d\n", __func__, info->state);
684 /* Update buffer pointers for multi-page read/write */
685 info->data_buff_pos += data_len;
686 info->oob_buff_pos += info->step_spare_size;
689 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
691 handle_data_pio(info);
693 info->state = STATE_CMD_DONE;
694 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
697 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
699 unsigned int status, is_completed = 0, is_ready = 0;
700 unsigned int ready, cmd_done;
701 irqreturn_t ret = IRQ_HANDLED;
704 ready = NDSR_FLASH_RDY;
705 cmd_done = NDSR_CS0_CMDD;
708 cmd_done = NDSR_CS1_CMDD;
711 /* TODO - find out why we need the delay during write operation. */
714 status = nand_readl(info, NDSR);
716 if (status & NDSR_UNCORERR)
717 info->retcode = ERR_UNCORERR;
718 if (status & NDSR_CORERR) {
719 info->retcode = ERR_CORERR;
720 if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
721 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
723 info->ecc_err_cnt = NDSR_ERR_CNT(status);
725 info->ecc_err_cnt = 1;
728 * Each chunk composing a page is corrected independently,
729 * and we need to store maximum number of corrected bitflips
730 * to return it to the MTD layer in ecc.read_page().
732 info->max_bitflips = max_t(unsigned int,
736 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
737 info->state = (status & NDSR_RDDREQ) ?
738 STATE_PIO_READING : STATE_PIO_WRITING;
739 /* Call the IRQ thread in U-Boot directly */
740 pxa3xx_nand_irq_thread(info);
743 if (status & cmd_done) {
744 info->state = STATE_CMD_DONE;
747 if (status & ready) {
748 info->state = STATE_READY;
753 * Clear all status bit before issuing the next command, which
754 * can and will alter the status bits and will deserve a new
755 * interrupt on its own. This lets the controller exit the IRQ
757 nand_writel(info, NDSR, status);
759 if (status & NDSR_WRCMDREQ) {
760 status &= ~NDSR_WRCMDREQ;
761 info->state = STATE_CMD_HANDLE;
764 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
765 * must be loaded by writing directly either 12 or 16
766 * bytes directly to NDCB0, four bytes at a time.
768 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
769 * but each NDCBx register can be read.
771 nand_writel(info, NDCB0, info->ndcb0);
772 nand_writel(info, NDCB0, info->ndcb1);
773 nand_writel(info, NDCB0, info->ndcb2);
775 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
776 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
777 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
778 nand_writel(info, NDCB0, info->ndcb3);
782 info->cmd_complete = 1;
789 static inline int is_buf_blank(uint8_t *buf, size_t len)
791 for (; len > 0; len--)
797 static void set_command_address(struct pxa3xx_nand_info *info,
798 unsigned int page_size, uint16_t column, int page_addr)
800 /* small page addr setting */
801 if (page_size < info->chunk_size) {
802 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
807 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
810 if (page_addr & 0xFF0000)
811 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
817 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
819 struct pxa3xx_nand_host *host = info->host[info->cs];
820 struct mtd_info *mtd = nand_to_mtd(&host->chip);
822 /* reset data and oob column point to handle data */
825 info->data_buff_pos = 0;
826 info->oob_buff_pos = 0;
827 info->step_chunk_size = 0;
828 info->step_spare_size = 0;
832 info->retcode = ERR_NONE;
833 info->ecc_err_cnt = 0;
839 case NAND_CMD_READOOB:
840 case NAND_CMD_PAGEPROG:
841 if (!info->force_raw)
854 * If we are about to issue a read command, or about to set
855 * the write address, then clean the data buffer.
857 if (command == NAND_CMD_READ0 ||
858 command == NAND_CMD_READOOB ||
859 command == NAND_CMD_SEQIN) {
860 info->buf_count = mtd->writesize + mtd->oobsize;
861 memset(info->data_buff, 0xFF, info->buf_count);
865 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
866 int ext_cmd_type, uint16_t column, int page_addr)
868 int addr_cycle, exec_cmd;
869 struct pxa3xx_nand_host *host;
870 struct mtd_info *mtd;
872 host = info->host[info->cs];
873 mtd = nand_to_mtd(&host->chip);
878 info->ndcb0 = NDCB0_CSEL;
882 if (command == NAND_CMD_SEQIN)
885 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
886 + host->col_addr_cycles);
889 case NAND_CMD_READOOB:
891 info->buf_start = column;
892 info->ndcb0 |= NDCB0_CMD_TYPE(0)
896 if (command == NAND_CMD_READOOB)
897 info->buf_start += mtd->writesize;
899 if (info->cur_chunk < info->nfullchunks) {
900 info->step_chunk_size = info->chunk_size;
901 info->step_spare_size = info->spare_size;
903 info->step_chunk_size = info->last_chunk_size;
904 info->step_spare_size = info->last_spare_size;
908 * Multiple page read needs an 'extended command type' field,
909 * which is either naked-read or last-read according to the
912 if (info->force_raw) {
913 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
915 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
916 info->ndcb3 = info->step_chunk_size +
917 info->step_spare_size + info->ecc_size;
918 } else if (mtd->writesize == info->chunk_size) {
919 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
920 } else if (mtd->writesize > info->chunk_size) {
921 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
923 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
924 info->ndcb3 = info->step_chunk_size +
925 info->step_spare_size;
928 set_command_address(info, mtd->writesize, column, page_addr);
933 info->buf_start = column;
934 set_command_address(info, mtd->writesize, 0, page_addr);
937 * Multiple page programming needs to execute the initial
938 * SEQIN command that sets the page address.
940 if (mtd->writesize > info->chunk_size) {
941 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
942 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
949 case NAND_CMD_PAGEPROG:
950 if (is_buf_blank(info->data_buff,
951 (mtd->writesize + mtd->oobsize))) {
956 if (info->cur_chunk < info->nfullchunks) {
957 info->step_chunk_size = info->chunk_size;
958 info->step_spare_size = info->spare_size;
960 info->step_chunk_size = info->last_chunk_size;
961 info->step_spare_size = info->last_spare_size;
964 /* Second command setting for large pages */
965 if (mtd->writesize > info->chunk_size) {
967 * Multiple page write uses the 'extended command'
968 * field. This can be used to issue a command dispatch
969 * or a naked-write depending on the current stage.
971 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
973 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
974 info->ndcb3 = info->step_chunk_size +
975 info->step_spare_size;
978 * This is the command dispatch that completes a chunked
979 * page program operation.
981 if (info->cur_chunk == info->ntotalchunks) {
982 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
983 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
990 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
994 | (NAND_CMD_PAGEPROG << 8)
1000 case NAND_CMD_PARAM:
1001 info->buf_count = INIT_BUFFER_SIZE;
1002 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1006 info->ndcb1 = (column & 0xFF);
1007 info->ndcb3 = INIT_BUFFER_SIZE;
1008 info->step_chunk_size = INIT_BUFFER_SIZE;
1011 case NAND_CMD_READID:
1012 info->buf_count = READ_ID_BYTES;
1013 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1016 info->ndcb1 = (column & 0xFF);
1018 info->step_chunk_size = 8;
1020 case NAND_CMD_STATUS:
1021 info->buf_count = 1;
1022 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1026 info->step_chunk_size = 8;
1029 case NAND_CMD_ERASE1:
1030 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1034 | (NAND_CMD_ERASE2 << 8)
1036 info->ndcb1 = page_addr;
1040 case NAND_CMD_RESET:
1041 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1046 case NAND_CMD_ERASE2:
1052 dev_err(mtd->dev, "non-supported command %x\n",
1060 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1061 int column, int page_addr)
1063 struct nand_chip *chip = mtd_to_nand(mtd);
1064 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1065 struct pxa3xx_nand_info *info = host->info_data;
1069 * if this is a x16 device ,then convert the input
1070 * "byte" address into a "word" address appropriate
1071 * for indexing a word-oriented device
1073 if (info->reg_ndcr & NDCR_DWIDTH_M)
1077 * There may be different NAND chip hooked to
1078 * different chip select, so check whether
1079 * chip select has been changed, if yes, reset the timing
1081 if (info->cs != host->cs) {
1082 info->cs = host->cs;
1083 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1084 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1087 prepare_start_command(info, command);
1089 info->state = STATE_PREPARED;
1090 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1095 info->cmd_complete = 0;
1096 info->dev_ready = 0;
1097 info->need_wait = 1;
1098 pxa3xx_nand_start(info);
1104 status = nand_readl(info, NDSR);
1106 pxa3xx_nand_irq(info);
1108 if (info->cmd_complete)
1111 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1112 dev_err(mtd->dev, "Wait timeout!!!\n");
1117 info->state = STATE_IDLE;
1120 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1121 const unsigned command,
1122 int column, int page_addr)
1124 struct nand_chip *chip = mtd_to_nand(mtd);
1125 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1126 struct pxa3xx_nand_info *info = host->info_data;
1127 int exec_cmd, ext_cmd_type;
1130 * if this is a x16 device then convert the input
1131 * "byte" address into a "word" address appropriate
1132 * for indexing a word-oriented device
1134 if (info->reg_ndcr & NDCR_DWIDTH_M)
1138 * There may be different NAND chip hooked to
1139 * different chip select, so check whether
1140 * chip select has been changed, if yes, reset the timing
1142 if (info->cs != host->cs) {
1143 info->cs = host->cs;
1144 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1145 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1148 /* Select the extended command for the first command */
1150 case NAND_CMD_READ0:
1151 case NAND_CMD_READOOB:
1152 ext_cmd_type = EXT_CMD_TYPE_MONO;
1154 case NAND_CMD_SEQIN:
1155 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1157 case NAND_CMD_PAGEPROG:
1158 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1165 prepare_start_command(info, command);
1168 * Prepare the "is ready" completion before starting a command
1169 * transaction sequence. If the command is not executed the
1170 * completion will be completed, see below.
1172 * We can do that inside the loop because the command variable
1173 * is invariant and thus so is the exec_cmd.
1175 info->need_wait = 1;
1176 info->dev_ready = 0;
1181 info->state = STATE_PREPARED;
1182 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1185 info->need_wait = 0;
1186 info->dev_ready = 1;
1190 info->cmd_complete = 0;
1191 pxa3xx_nand_start(info);
1197 status = nand_readl(info, NDSR);
1199 pxa3xx_nand_irq(info);
1201 if (info->cmd_complete)
1204 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1205 dev_err(mtd->dev, "Wait timeout!!!\n");
1210 /* Only a few commands need several steps */
1211 if (command != NAND_CMD_PAGEPROG &&
1212 command != NAND_CMD_READ0 &&
1213 command != NAND_CMD_READOOB)
1218 /* Check if the sequence is complete */
1219 if (info->cur_chunk == info->ntotalchunks &&
1220 command != NAND_CMD_PAGEPROG)
1224 * After a splitted program command sequence has issued
1225 * the command dispatch, the command sequence is complete.
1227 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1228 command == NAND_CMD_PAGEPROG &&
1229 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1232 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1233 /* Last read: issue a 'last naked read' */
1234 if (info->cur_chunk == info->ntotalchunks - 1)
1235 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1237 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1240 * If a splitted program command has no more data to transfer,
1241 * the command dispatch must be issued to complete.
1243 } else if (command == NAND_CMD_PAGEPROG &&
1244 info->cur_chunk == info->ntotalchunks) {
1245 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1249 info->state = STATE_IDLE;
1252 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1253 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1256 chip->write_buf(mtd, buf, mtd->writesize);
1257 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1262 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1263 struct nand_chip *chip, uint8_t *buf, int oob_required,
1266 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1267 struct pxa3xx_nand_info *info = host->info_data;
1270 chip->read_buf(mtd, buf, mtd->writesize);
1271 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1273 if (info->retcode == ERR_CORERR && info->use_ecc) {
1274 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1276 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1278 * Empty pages will trigger uncorrectable errors. Re-read the
1279 * entire page in raw mode and check for bits not being "1".
1280 * If there are more than the supported strength, then it means
1281 * this is an actual uncorrectable error.
1283 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1284 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1285 chip->oob_poi, mtd->oobsize,
1286 NULL, 0, chip->ecc.strength);
1288 mtd->ecc_stats.failed++;
1290 mtd->ecc_stats.corrected += bf;
1291 info->max_bitflips = max_t(unsigned int,
1292 info->max_bitflips, bf);
1293 info->retcode = ERR_CORERR;
1295 info->retcode = ERR_NONE;
1298 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1299 /* Raw read is not supported with Hamming ECC engine */
1300 if (is_buf_blank(buf, mtd->writesize))
1301 info->retcode = ERR_NONE;
1303 mtd->ecc_stats.failed++;
1306 return info->max_bitflips;
1309 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1310 struct nand_chip *chip, uint8_t *buf,
1311 int oob_required, int page)
1313 struct pxa3xx_nand_host *host = chip->priv;
1314 struct pxa3xx_nand_info *info = host->info_data;
1315 int chunk, ecc_off_buf;
1321 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1322 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1324 info->force_raw = true;
1325 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1327 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1328 info->last_spare_size;
1329 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1331 buf + (chunk * info->chunk_size),
1335 (chunk * (info->spare_size)),
1338 chip->oob_poi + ecc_off_buf +
1339 (chunk * (info->ecc_size)),
1340 info->ecc_size - 2);
1343 if (info->ntotalchunks > info->nfullchunks) {
1345 buf + (info->nfullchunks * info->chunk_size),
1346 info->last_chunk_size);
1349 (info->nfullchunks * (info->spare_size)),
1350 info->last_spare_size);
1352 chip->oob_poi + ecc_off_buf +
1353 (info->nfullchunks * (info->ecc_size)),
1354 info->ecc_size - 2);
1357 info->force_raw = false;
1362 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1363 struct nand_chip *chip, int page)
1365 /* Invalidate page cache */
1368 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1372 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1374 struct nand_chip *chip = mtd_to_nand(mtd);
1375 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1376 struct pxa3xx_nand_info *info = host->info_data;
1379 if (info->buf_start < info->buf_count)
1380 /* Has just send a new command? */
1381 retval = info->data_buff[info->buf_start++];
1386 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1388 struct nand_chip *chip = mtd_to_nand(mtd);
1389 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1390 struct pxa3xx_nand_info *info = host->info_data;
1391 u16 retval = 0xFFFF;
1393 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1394 retval = *((u16 *)(info->data_buff+info->buf_start));
1395 info->buf_start += 2;
1400 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1402 struct nand_chip *chip = mtd_to_nand(mtd);
1403 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1404 struct pxa3xx_nand_info *info = host->info_data;
1405 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1407 memcpy(buf, info->data_buff + info->buf_start, real_len);
1408 info->buf_start += real_len;
1411 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1412 const uint8_t *buf, int len)
1414 struct nand_chip *chip = mtd_to_nand(mtd);
1415 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1416 struct pxa3xx_nand_info *info = host->info_data;
1417 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1419 memcpy(info->data_buff + info->buf_start, buf, real_len);
1420 info->buf_start += real_len;
1423 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1428 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1430 struct nand_chip *chip = mtd_to_nand(mtd);
1431 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1432 struct pxa3xx_nand_info *info = host->info_data;
1434 if (info->need_wait) {
1437 info->need_wait = 0;
1443 status = nand_readl(info, NDSR);
1445 pxa3xx_nand_irq(info);
1447 if (info->dev_ready)
1450 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1451 dev_err(mtd->dev, "Ready timeout!!!\n");
1452 return NAND_STATUS_FAIL;
1457 /* pxa3xx_nand_send_command has waited for command complete */
1458 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1459 if (info->retcode == ERR_NONE)
1462 return NAND_STATUS_FAIL;
1465 return NAND_STATUS_READY;
1468 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1470 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1472 /* Configure default flash values */
1473 info->reg_ndcr = 0x0; /* enable all interrupts */
1474 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1475 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1476 info->reg_ndcr |= NDCR_SPARE_EN;
1481 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1483 struct pxa3xx_nand_host *host = info->host[info->cs];
1484 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1485 struct nand_chip *chip = mtd_to_nand(mtd);
1487 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1488 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1489 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1492 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1494 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1495 uint32_t ndcr = nand_readl(info, NDCR);
1497 /* Set an initial chunk size */
1498 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1499 info->reg_ndcr = ndcr &
1500 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1501 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1502 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1503 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1506 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1508 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1509 if (info->data_buff == NULL)
1514 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1516 struct pxa3xx_nand_info *info = host->info_data;
1517 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1518 struct mtd_info *mtd;
1519 struct nand_chip *chip;
1520 const struct nand_sdr_timings *timings;
1523 mtd = nand_to_mtd(&info->host[info->cs]->chip);
1524 chip = mtd_to_nand(mtd);
1526 /* configure default flash values */
1527 info->reg_ndcr = 0x0; /* enable all interrupts */
1528 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1529 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1530 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1532 /* use the common timing to make a try */
1533 timings = onfi_async_timing_mode_to_sdr_timings(0);
1534 if (IS_ERR(timings))
1535 return PTR_ERR(timings);
1537 pxa3xx_nand_set_sdr_timing(host, timings);
1539 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1540 ret = chip->waitfunc(mtd, chip);
1541 if (ret & NAND_STATUS_FAIL)
1547 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1548 struct nand_ecc_ctrl *ecc,
1549 int strength, int ecc_stepsize, int page_size)
1551 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1552 info->nfullchunks = 1;
1553 info->ntotalchunks = 1;
1554 info->chunk_size = 2048;
1555 info->spare_size = 40;
1556 info->ecc_size = 24;
1557 ecc->mode = NAND_ECC_HW;
1561 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1562 info->nfullchunks = 1;
1563 info->ntotalchunks = 1;
1564 info->chunk_size = 512;
1565 info->spare_size = 8;
1567 ecc->mode = NAND_ECC_HW;
1572 * Required ECC: 4-bit correction per 512 bytes
1573 * Select: 16-bit correction per 2048 bytes
1575 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1577 info->nfullchunks = 1;
1578 info->ntotalchunks = 1;
1579 info->chunk_size = 2048;
1580 info->spare_size = 32;
1581 info->ecc_size = 32;
1582 ecc->mode = NAND_ECC_HW;
1583 ecc->size = info->chunk_size;
1584 ecc->layout = &ecc_layout_2KB_bch4bit;
1587 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1589 info->nfullchunks = 2;
1590 info->ntotalchunks = 2;
1591 info->chunk_size = 2048;
1592 info->spare_size = 32;
1593 info->ecc_size = 32;
1594 ecc->mode = NAND_ECC_HW;
1595 ecc->size = info->chunk_size;
1596 ecc->layout = &ecc_layout_4KB_bch4bit;
1599 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1601 info->nfullchunks = 4;
1602 info->ntotalchunks = 4;
1603 info->chunk_size = 2048;
1604 info->spare_size = 32;
1605 info->ecc_size = 32;
1606 ecc->mode = NAND_ECC_HW;
1607 ecc->size = info->chunk_size;
1608 ecc->layout = &ecc_layout_8KB_bch4bit;
1612 * Required ECC: 8-bit correction per 512 bytes
1613 * Select: 16-bit correction per 1024 bytes
1615 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1617 info->nfullchunks = 1;
1618 info->ntotalchunks = 2;
1619 info->chunk_size = 1024;
1620 info->spare_size = 0;
1621 info->last_chunk_size = 1024;
1622 info->last_spare_size = 32;
1623 info->ecc_size = 32;
1624 ecc->mode = NAND_ECC_HW;
1625 ecc->size = info->chunk_size;
1626 ecc->layout = &ecc_layout_2KB_bch8bit;
1629 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1631 info->nfullchunks = 4;
1632 info->ntotalchunks = 5;
1633 info->chunk_size = 1024;
1634 info->spare_size = 0;
1635 info->last_chunk_size = 0;
1636 info->last_spare_size = 64;
1637 info->ecc_size = 32;
1638 ecc->mode = NAND_ECC_HW;
1639 ecc->size = info->chunk_size;
1640 ecc->layout = &ecc_layout_4KB_bch8bit;
1643 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1645 info->nfullchunks = 8;
1646 info->ntotalchunks = 9;
1647 info->chunk_size = 1024;
1648 info->spare_size = 0;
1649 info->last_chunk_size = 0;
1650 info->last_spare_size = 160;
1651 info->ecc_size = 32;
1652 ecc->mode = NAND_ECC_HW;
1653 ecc->size = info->chunk_size;
1654 ecc->layout = &ecc_layout_8KB_bch8bit;
1658 dev_err(info->controller.active->mtd.dev,
1659 "ECC strength %d at page size %d is not supported\n",
1660 strength, page_size);
1667 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1669 struct nand_chip *chip = mtd_to_nand(mtd);
1670 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1671 struct pxa3xx_nand_info *info = host->info_data;
1672 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1674 uint16_t ecc_strength, ecc_step;
1676 if (pdata->keep_config) {
1677 pxa3xx_nand_detect_config(info);
1679 ret = pxa3xx_nand_config_ident(info);
1682 ret = pxa3xx_nand_sensing(host);
1684 dev_info(mtd->dev, "There is no chip on cs %d!\n",
1690 /* Device detection must be done with ECC disabled */
1691 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1692 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
1693 nand_writel(info, NDECCCTRL, 0x0);
1695 if (nand_scan_ident(mtd, 1, NULL))
1698 if (!pdata->keep_config) {
1699 ret = pxa3xx_nand_init_timings(host);
1702 "Failed to set timings: %d\n", ret);
1707 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1709 * We'll use a bad block table stored in-flash and don't
1710 * allow writing the bad block marker to the flash.
1712 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1713 chip->bbt_td = &bbt_main_descr;
1714 chip->bbt_md = &bbt_mirror_descr;
1717 if (pdata->ecc_strength && pdata->ecc_step_size) {
1718 ecc_strength = pdata->ecc_strength;
1719 ecc_step = pdata->ecc_step_size;
1721 ecc_strength = chip->ecc_strength_ds;
1722 ecc_step = chip->ecc_step_ds;
1725 /* Set default ECC strength requirements on non-ONFI devices */
1726 if (ecc_strength < 1 && ecc_step < 1) {
1731 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1732 ecc_step, mtd->writesize);
1737 * If the page size is bigger than the FIFO size, let's check
1738 * we are given the right variant and then switch to the extended
1739 * (aka split) command handling,
1741 if (mtd->writesize > info->chunk_size) {
1742 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1743 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1744 chip->cmdfunc = nand_cmdfunc_extended;
1747 "unsupported page size on this variant\n");
1752 /* calculate addressing information */
1753 if (mtd->writesize >= 2048)
1754 host->col_addr_cycles = 2;
1756 host->col_addr_cycles = 1;
1758 /* release the initial buffer */
1759 kfree(info->data_buff);
1761 /* allocate the real data + oob buffer */
1762 info->buf_size = mtd->writesize + mtd->oobsize;
1763 ret = pxa3xx_nand_init_buff(info);
1766 info->oob_buff = info->data_buff + mtd->writesize;
1768 if ((mtd->size >> chip->page_shift) > 65536)
1769 host->row_addr_cycles = 3;
1771 host->row_addr_cycles = 2;
1773 if (!pdata->keep_config)
1774 pxa3xx_nand_config_tail(info);
1776 return nand_scan_tail(mtd);
1779 static int alloc_nand_resource(struct udevice *dev, struct pxa3xx_nand_info *info)
1781 struct pxa3xx_nand_platform_data *pdata;
1782 struct pxa3xx_nand_host *host;
1783 struct nand_chip *chip = NULL;
1784 struct mtd_info *mtd;
1787 pdata = info->pdata;
1788 if (pdata->num_cs <= 0)
1791 info->variant = pxa3xx_nand_get_variant(dev);
1792 for (cs = 0; cs < pdata->num_cs; cs++) {
1793 chip = (struct nand_chip *)
1794 ((u8 *)&info[1] + sizeof(*host) * cs);
1795 mtd = nand_to_mtd(chip);
1796 host = (struct pxa3xx_nand_host *)chip;
1797 info->host[cs] = host;
1799 host->info_data = info;
1800 mtd->owner = THIS_MODULE;
1802 nand_set_controller_data(chip, host);
1803 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1804 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1805 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
1806 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1807 chip->controller = &info->controller;
1808 chip->waitfunc = pxa3xx_nand_waitfunc;
1809 chip->select_chip = pxa3xx_nand_select_chip;
1810 chip->read_word = pxa3xx_nand_read_word;
1811 chip->read_byte = pxa3xx_nand_read_byte;
1812 chip->read_buf = pxa3xx_nand_read_buf;
1813 chip->write_buf = pxa3xx_nand_write_buf;
1814 chip->options |= NAND_NO_SUBPAGE_WRITE;
1815 chip->cmdfunc = nand_cmdfunc;
1818 /* Allocate a buffer to allow flash detection */
1819 info->buf_size = INIT_BUFFER_SIZE;
1820 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1821 if (info->data_buff == NULL)
1824 /* initialize all interrupts to be disabled */
1825 disable_int(info, NDSR_MASK);
1828 * Some SoCs like A7k/A8k need to enable manually the NAND
1829 * controller to avoid being bootloader dependent. This is done
1830 * through the use of a single bit in the System Functions registers.
1832 if (pxa3xx_nand_get_variant(dev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1833 struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
1834 dev, "marvell,system-controller");
1837 if (IS_ERR(sysctrl_base))
1838 return PTR_ERR(sysctrl_base);
1840 regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, ®);
1841 reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
1842 regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
1848 static int pxa3xx_nand_probe_dt(struct udevice *dev, struct pxa3xx_nand_info *info)
1850 struct pxa3xx_nand_platform_data *pdata;
1852 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1856 info->mmio_base = dev_read_addr_ptr(dev);
1858 pdata->num_cs = dev_read_u32_default(dev, "num-cs", 1);
1859 if (pdata->num_cs != 1) {
1860 pr_err("pxa3xx driver supports single CS only\n");
1864 if (dev_read_bool(dev, "nand-enable-arbiter"))
1865 pdata->enable_arbiter = 1;
1867 if (dev_read_bool(dev, "nand-keep-config"))
1868 pdata->keep_config = 1;
1872 * If these are not set, they will be selected according
1873 * to the detected flash type.
1876 pdata->ecc_strength = dev_read_u32_default(dev, "nand-ecc-strength", 0);
1879 pdata->ecc_step_size = dev_read_u32_default(dev, "nand-ecc-step-size",
1882 info->pdata = pdata;
1887 static int pxa3xx_nand_probe(struct udevice *dev)
1889 struct pxa3xx_nand_platform_data *pdata;
1890 int ret, cs, probe_success;
1891 struct pxa3xx_nand_info *info = dev_get_priv(dev);
1893 ret = pxa3xx_nand_probe_dt(dev, info);
1897 pdata = info->pdata;
1899 ret = alloc_nand_resource(dev, info);
1901 dev_err(dev, "alloc nand resource failed\n");
1906 for (cs = 0; cs < pdata->num_cs; cs++) {
1907 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1910 * The mtd name matches the one used in 'mtdparts' kernel
1911 * parameter. This name cannot be changed or otherwise
1912 * user's mtd partitions configuration would get broken.
1914 mtd->name = "pxa3xx_nand-0";
1916 ret = pxa3xx_nand_scan(mtd);
1918 dev_info(mtd->dev, "failed to scan nand at cs %d\n",
1923 if (nand_register(cs, mtd))
1935 U_BOOT_DRIVER(pxa3xx_nand) = {
1936 .name = "pxa3xx-nand",
1938 .of_match = pxa3xx_nand_dt_ids,
1939 .probe = pxa3xx_nand_probe,
1940 .priv_auto = sizeof(struct pxa3xx_nand_info) +
1941 sizeof(struct pxa3xx_nand_host) * CONFIG_SYS_MAX_NAND_DEVICE,
1944 void board_nand_init(void)
1946 struct udevice *dev;
1949 ret = uclass_get_device_by_driver(UCLASS_MTD,
1950 DM_DRIVER_GET(pxa3xx_nand), &dev);
1951 if (ret && ret != -ENODEV) {
1952 pr_err("Failed to initialize %s. (error %d)\n", dev->name,