f6233756d853dc555eb29a575756b60a3f035ec0
[platform/kernel/u-boot.git] / drivers / mtd / nand / raw / pxa3xx_nand.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/mtd/nand/raw/pxa3xx_nand.c
4  *
5  * Copyright © 2005 Intel Corporation
6  * Copyright © 2006 Marvell International Ltd.
7  */
8
9 #include <common.h>
10 #include <malloc.h>
11 #include <fdtdec.h>
12 #include <nand.h>
13 #include <dm/device_compat.h>
14 #include <dm/devres.h>
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/errno.h>
20 #include <asm/io.h>
21 #include <asm/arch/cpu.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/rawnand.h>
24 #include <linux/types.h>
25 #include <syscon.h>
26 #include <regmap.h>
27 #include <dm/uclass.h>
28 #include <dm/read.h>
29
30 #include "pxa3xx_nand.h"
31
32 DECLARE_GLOBAL_DATA_PTR;
33
34 #define TIMEOUT_DRAIN_FIFO      5       /* in ms */
35 #define CHIP_DELAY_TIMEOUT      200
36 #define NAND_STOP_DELAY         40
37
38 /*
39  * Define a buffer size for the initial command that detects the flash device:
40  * STATUS, READID and PARAM.
41  * ONFI param page is 256 bytes, and there are three redundant copies
42  * to be read. JEDEC param page is 512 bytes, and there are also three
43  * redundant copies to be read.
44  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
45  */
46 #define INIT_BUFFER_SIZE        2048
47
48 /* registers and bit definitions */
49 #define NDCR            (0x00) /* Control register */
50 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
51 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
52 #define NDSR            (0x14) /* Status Register */
53 #define NDPCR           (0x18) /* Page Count Register */
54 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
55 #define NDBDR1          (0x20) /* Bad Block Register 1 */
56 #define NDECCCTRL       (0x28) /* ECC control */
57 #define NDDB            (0x40) /* Data Buffer */
58 #define NDCB0           (0x48) /* Command Buffer0 */
59 #define NDCB1           (0x4C) /* Command Buffer1 */
60 #define NDCB2           (0x50) /* Command Buffer2 */
61
62 #define NDCR_SPARE_EN           (0x1 << 31)
63 #define NDCR_ECC_EN             (0x1 << 30)
64 #define NDCR_DMA_EN             (0x1 << 29)
65 #define NDCR_ND_RUN             (0x1 << 28)
66 #define NDCR_DWIDTH_C           (0x1 << 27)
67 #define NDCR_DWIDTH_M           (0x1 << 26)
68 #define NDCR_PAGE_SZ            (0x1 << 24)
69 #define NDCR_NCSX               (0x1 << 23)
70 #define NDCR_ND_MODE            (0x3 << 21)
71 #define NDCR_NAND_MODE          (0x0)
72 #define NDCR_CLR_PG_CNT         (0x1 << 20)
73 #define NFCV1_NDCR_ARB_CNTL     (0x1 << 19)
74 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
75 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
76
77 #define NDCR_RA_START           (0x1 << 15)
78 #define NDCR_PG_PER_BLK         (0x1 << 14)
79 #define NDCR_ND_ARB_EN          (0x1 << 12)
80 #define NDCR_INT_MASK           (0xFFF)
81
82 #define NDSR_MASK               (0xfff)
83 #define NDSR_ERR_CNT_OFF        (16)
84 #define NDSR_ERR_CNT_MASK       (0x1f)
85 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
86 #define NDSR_RDY                (0x1 << 12)
87 #define NDSR_FLASH_RDY          (0x1 << 11)
88 #define NDSR_CS0_PAGED          (0x1 << 10)
89 #define NDSR_CS1_PAGED          (0x1 << 9)
90 #define NDSR_CS0_CMDD           (0x1 << 8)
91 #define NDSR_CS1_CMDD           (0x1 << 7)
92 #define NDSR_CS0_BBD            (0x1 << 6)
93 #define NDSR_CS1_BBD            (0x1 << 5)
94 #define NDSR_UNCORERR           (0x1 << 4)
95 #define NDSR_CORERR             (0x1 << 3)
96 #define NDSR_WRDREQ             (0x1 << 2)
97 #define NDSR_RDDREQ             (0x1 << 1)
98 #define NDSR_WRCMDREQ           (0x1)
99
100 #define NDCB0_LEN_OVRD          (0x1 << 28)
101 #define NDCB0_ST_ROW_EN         (0x1 << 26)
102 #define NDCB0_AUTO_RS           (0x1 << 25)
103 #define NDCB0_CSEL              (0x1 << 24)
104 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
105 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
106 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
107 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
108 #define NDCB0_NC                (0x1 << 20)
109 #define NDCB0_DBC               (0x1 << 19)
110 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
111 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
112 #define NDCB0_CMD2_MASK         (0xff << 8)
113 #define NDCB0_CMD1_MASK         (0xff)
114 #define NDCB0_ADDR_CYC_SHIFT    (16)
115
116 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
117 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
118 #define EXT_CMD_TYPE_READ       4 /* Read */
119 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
120 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
121 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
122 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
123
124 /* System control register and bit to enable NAND on some SoCs */
125 #define GENCONF_SOC_DEVICE_MUX  0x208
126 #define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
127
128 /*
129  * This should be large enough to read 'ONFI' and 'JEDEC'.
130  * Let's use 7 bytes, which is the maximum ID count supported
131  * by the controller (see NDCR_RD_ID_CNT_MASK).
132  */
133 #define READ_ID_BYTES           7
134
135 /* macros for registers read/write */
136 #define nand_writel(info, off, val)     \
137         writel((val), (info)->mmio_base + (off))
138
139 #define nand_readl(info, off)           \
140         readl((info)->mmio_base + (off))
141
142 /* error code and state */
143 enum {
144         ERR_NONE        = 0,
145         ERR_DMABUSERR   = -1,
146         ERR_SENDCMD     = -2,
147         ERR_UNCORERR    = -3,
148         ERR_BBERR       = -4,
149         ERR_CORERR      = -5,
150 };
151
152 enum {
153         STATE_IDLE = 0,
154         STATE_PREPARED,
155         STATE_CMD_HANDLE,
156         STATE_DMA_READING,
157         STATE_DMA_WRITING,
158         STATE_DMA_DONE,
159         STATE_PIO_READING,
160         STATE_PIO_WRITING,
161         STATE_CMD_DONE,
162         STATE_READY,
163 };
164
165 enum pxa3xx_nand_variant {
166         PXA3XX_NAND_VARIANT_PXA,
167         PXA3XX_NAND_VARIANT_ARMADA370,
168         PXA3XX_NAND_VARIANT_ARMADA_8K,
169 };
170
171 struct pxa3xx_nand_host {
172         struct nand_chip        chip;
173         void                    *info_data;
174
175         /* page size of attached chip */
176         int                     use_ecc;
177         int                     cs;
178
179         /* calculated from pxa3xx_nand_flash data */
180         unsigned int            col_addr_cycles;
181         unsigned int            row_addr_cycles;
182 };
183
184 struct pxa3xx_nand_info {
185         struct nand_hw_control  controller;
186         struct pxa3xx_nand_platform_data *pdata;
187
188         struct clk              *clk;
189         void __iomem            *mmio_base;
190         unsigned long           mmio_phys;
191         int                     cmd_complete, dev_ready;
192
193         unsigned int            buf_start;
194         unsigned int            buf_count;
195         unsigned int            buf_size;
196         unsigned int            data_buff_pos;
197         unsigned int            oob_buff_pos;
198
199         unsigned char           *data_buff;
200         unsigned char           *oob_buff;
201
202         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
203         unsigned int            state;
204
205         /*
206          * This driver supports NFCv1 (as found in PXA SoC)
207          * and NFCv2 (as found in Armada 370/XP SoC).
208          */
209         enum pxa3xx_nand_variant variant;
210
211         int                     cs;
212         int                     use_ecc;        /* use HW ECC ? */
213         int                     force_raw;      /* prevent use_ecc to be set */
214         int                     ecc_bch;        /* using BCH ECC? */
215         int                     use_spare;      /* use spare ? */
216         int                     need_wait;
217
218         /* Amount of real data per full chunk */
219         unsigned int            chunk_size;
220
221         /* Amount of spare data per full chunk */
222         unsigned int            spare_size;
223
224         /* Number of full chunks (i.e chunk_size + spare_size) */
225         unsigned int            nfullchunks;
226
227         /*
228          * Total number of chunks. If equal to nfullchunks, then there
229          * are only full chunks. Otherwise, there is one last chunk of
230          * size (last_chunk_size + last_spare_size)
231          */
232         unsigned int            ntotalchunks;
233
234         /* Amount of real data in the last chunk */
235         unsigned int            last_chunk_size;
236
237         /* Amount of spare data in the last chunk */
238         unsigned int            last_spare_size;
239
240         unsigned int            ecc_size;
241         unsigned int            ecc_err_cnt;
242         unsigned int            max_bitflips;
243         int                     retcode;
244
245         /*
246          * Variables only valid during command
247          * execution. step_chunk_size and step_spare_size is the
248          * amount of real data and spare data in the current
249          * chunk. cur_chunk is the current chunk being
250          * read/programmed.
251          */
252         unsigned int            step_chunk_size;
253         unsigned int            step_spare_size;
254         unsigned int            cur_chunk;
255
256         /* cached register value */
257         uint32_t                reg_ndcr;
258         uint32_t                ndtr0cs0;
259         uint32_t                ndtr1cs0;
260
261         /* generated NDCBx register values */
262         uint32_t                ndcb0;
263         uint32_t                ndcb1;
264         uint32_t                ndcb2;
265         uint32_t                ndcb3;
266 };
267
268 static struct pxa3xx_nand_timing timing[] = {
269         /*
270          * tCH  Enable signal hold time
271          * tCS  Enable signal setup time
272          * tWH  ND_nWE high duration
273          * tWP  ND_nWE pulse time
274          * tRH  ND_nRE high duration
275          * tRP  ND_nRE pulse width
276          * tR   ND_nWE high to ND_nRE low for read
277          * tWHR ND_nWE high to ND_nRE low for status read
278          * tAR  ND_ALE low to ND_nRE low delay
279          */
280         /*ch  cs  wh  wp   rh  rp   r      whr  ar */
281         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
282         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
283         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
284         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
285         {  5, 20, 10,  12, 10,  12, 25000,  60, 10, },
286 };
287
288 static struct pxa3xx_nand_flash builtin_flash_types[] = {
289         /*
290          * chip_id
291          * flash_width  Width of Flash memory (DWIDTH_M)
292          * dfc_width    Width of flash controller(DWIDTH_C)
293          * *timing
294          * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
295          */
296         { 0x46ec, 16, 16, &timing[1] },
297         { 0xdaec,  8,  8, &timing[1] },
298         { 0xd7ec,  8,  8, &timing[1] },
299         { 0xa12c,  8,  8, &timing[2] },
300         { 0xb12c, 16, 16, &timing[2] },
301         { 0xdc2c,  8,  8, &timing[2] },
302         { 0xcc2c, 16, 16, &timing[2] },
303         { 0xba20, 16, 16, &timing[3] },
304         { 0xda98,  8,  8, &timing[4] },
305 };
306
307 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
308 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
309 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
310
311 static struct nand_bbt_descr bbt_main_descr = {
312         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
313                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
314         .offs = 8,
315         .len = 6,
316         .veroffs = 14,
317         .maxblocks = 8,         /* Last 8 blocks in each chip */
318         .pattern = bbt_pattern
319 };
320
321 static struct nand_bbt_descr bbt_mirror_descr = {
322         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
323                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
324         .offs = 8,
325         .len = 6,
326         .veroffs = 14,
327         .maxblocks = 8,         /* Last 8 blocks in each chip */
328         .pattern = bbt_mirror_pattern
329 };
330 #endif
331
332 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
333         .eccbytes = 32,
334         .eccpos = {
335                 32, 33, 34, 35, 36, 37, 38, 39,
336                 40, 41, 42, 43, 44, 45, 46, 47,
337                 48, 49, 50, 51, 52, 53, 54, 55,
338                 56, 57, 58, 59, 60, 61, 62, 63},
339         .oobfree = { {2, 30} }
340 };
341
342 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
343         .eccbytes = 64,
344         .eccpos = {
345                 32, 33, 34, 35, 36, 37, 38, 39,
346                 40, 41, 42, 43, 44, 45, 46, 47,
347                 48, 49, 50, 51, 52, 53, 54, 55,
348                 56, 57, 58, 59, 60, 61, 62, 63,
349                 64, 65, 66, 67, 68, 69, 70, 71,
350                 72, 73, 74, 75, 76, 77, 78, 79,
351                 80, 81, 82, 83, 84, 85, 86, 87,
352                 88, 89, 90, 91, 92, 93, 94, 95},
353         .oobfree = { {1, 4}, {6, 26} }
354 };
355
356 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
357         .eccbytes = 64,
358         .eccpos = {
359                 32,  33,  34,  35,  36,  37,  38,  39,
360                 40,  41,  42,  43,  44,  45,  46,  47,
361                 48,  49,  50,  51,  52,  53,  54,  55,
362                 56,  57,  58,  59,  60,  61,  62,  63,
363                 96,  97,  98,  99,  100, 101, 102, 103,
364                 104, 105, 106, 107, 108, 109, 110, 111,
365                 112, 113, 114, 115, 116, 117, 118, 119,
366                 120, 121, 122, 123, 124, 125, 126, 127},
367         /* Bootrom looks in bytes 0 & 5 for bad blocks */
368         .oobfree = { {6, 26}, { 64, 32} }
369 };
370
371 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
372         .eccbytes = 128,
373         .eccpos = {
374                 32,  33,  34,  35,  36,  37,  38,  39,
375                 40,  41,  42,  43,  44,  45,  46,  47,
376                 48,  49,  50,  51,  52,  53,  54,  55,
377                 56,  57,  58,  59,  60,  61,  62,  63,
378
379                 96,  97,  98,  99,  100, 101, 102, 103,
380                 104, 105, 106, 107, 108, 109, 110, 111,
381                 112, 113, 114, 115, 116, 117, 118, 119,
382                 120, 121, 122, 123, 124, 125, 126, 127,
383
384                 160, 161, 162, 163, 164, 165, 166, 167,
385                 168, 169, 170, 171, 172, 173, 174, 175,
386                 176, 177, 178, 179, 180, 181, 182, 183,
387                 184, 185, 186, 187, 188, 189, 190, 191,
388
389                 224, 225, 226, 227, 228, 229, 230, 231,
390                 232, 233, 234, 235, 236, 237, 238, 239,
391                 240, 241, 242, 243, 244, 245, 246, 247,
392                 248, 249, 250, 251, 252, 253, 254, 255},
393
394         /* Bootrom looks in bytes 0 & 5 for bad blocks */
395         .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
396 };
397
398 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
399         .eccbytes = 128,
400         .eccpos = {
401                 32,  33,  34,  35,  36,  37,  38,  39,
402                 40,  41,  42,  43,  44,  45,  46,  47,
403                 48,  49,  50,  51,  52,  53,  54,  55,
404                 56,  57,  58,  59,  60,  61,  62,  63},
405         .oobfree = { }
406 };
407
408 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
409         .eccbytes = 256,
410         .eccpos = {},
411         /* HW ECC handles all ECC data and all spare area is free for OOB */
412         .oobfree = {{0, 160} }
413 };
414
415 #define NDTR0_tCH(c)    (min((c), 7) << 19)
416 #define NDTR0_tCS(c)    (min((c), 7) << 16)
417 #define NDTR0_tWH(c)    (min((c), 7) << 11)
418 #define NDTR0_tWP(c)    (min((c), 7) << 8)
419 #define NDTR0_tRH(c)    (min((c), 7) << 3)
420 #define NDTR0_tRP(c)    (min((c), 7) << 0)
421
422 #define NDTR1_tR(c)     (min((c), 65535) << 16)
423 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
424 #define NDTR1_tAR(c)    (min((c), 15) << 0)
425
426 /* convert nano-seconds to nand flash controller clock cycles */
427 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
428
429 static const struct udevice_id pxa3xx_nand_dt_ids[] = {
430         {
431                 .compatible = "marvell,mvebu-pxa3xx-nand",
432                 .data = PXA3XX_NAND_VARIANT_ARMADA370,
433         },
434         {
435                 .compatible = "marvell,armada-8k-nand-controller",
436                 .data = PXA3XX_NAND_VARIANT_ARMADA_8K,
437         },
438         {}
439 };
440
441 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(struct udevice *dev)
442 {
443         return dev_get_driver_data(dev);
444 }
445
446 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
447                                    const struct pxa3xx_nand_timing *t)
448 {
449         struct pxa3xx_nand_info *info = host->info_data;
450         unsigned long nand_clk = mvebu_get_nand_clock();
451         uint32_t ndtr0, ndtr1;
452
453         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
454                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
455                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
456                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
457                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
458                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
459
460         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
461                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
462                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
463
464         info->ndtr0cs0 = ndtr0;
465         info->ndtr1cs0 = ndtr1;
466         nand_writel(info, NDTR0CS0, ndtr0);
467         nand_writel(info, NDTR1CS0, ndtr1);
468 }
469
470 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
471                                        const struct nand_sdr_timings *t)
472 {
473         struct pxa3xx_nand_info *info = host->info_data;
474         struct nand_chip *chip = &host->chip;
475         unsigned long nand_clk = mvebu_get_nand_clock();
476         uint32_t ndtr0, ndtr1;
477
478         u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
479         u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
480         u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
481         u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
482         u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
483         u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
484         u32 tR = chip->chip_delay * 1000;
485         u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
486         u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
487
488         /* fallback to a default value if tR = 0 */
489         if (!tR)
490                 tR = 20000;
491
492         ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
493                 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
494                 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
495                 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
496                 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
497                 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
498
499         ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
500                 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
501                 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
502
503         info->ndtr0cs0 = ndtr0;
504         info->ndtr1cs0 = ndtr1;
505         nand_writel(info, NDTR0CS0, ndtr0);
506         nand_writel(info, NDTR1CS0, ndtr1);
507 }
508
509 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
510 {
511         const struct nand_sdr_timings *timings;
512         struct nand_chip *chip = &host->chip;
513         struct pxa3xx_nand_info *info = host->info_data;
514         const struct pxa3xx_nand_flash *f = NULL;
515         struct mtd_info *mtd = nand_to_mtd(&host->chip);
516         int mode, id, ntypes, i;
517
518         mode = onfi_get_async_timing_mode(chip);
519         if (mode == ONFI_TIMING_MODE_UNKNOWN) {
520                 ntypes = ARRAY_SIZE(builtin_flash_types);
521
522                 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
523
524                 id = chip->read_byte(mtd);
525                 id |= chip->read_byte(mtd) << 0x8;
526
527                 for (i = 0; i < ntypes; i++) {
528                         f = &builtin_flash_types[i];
529
530                         if (f->chip_id == id)
531                                 break;
532                 }
533
534                 if (i == ntypes) {
535                         dev_err(mtd->dev, "Error: timings not found\n");
536                         return -EINVAL;
537                 }
538
539                 pxa3xx_nand_set_timing(host, f->timing);
540
541                 if (f->flash_width == 16) {
542                         info->reg_ndcr |= NDCR_DWIDTH_M;
543                         chip->options |= NAND_BUSWIDTH_16;
544                 }
545
546                 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
547         } else {
548                 mode = fls(mode) - 1;
549                 if (mode < 0)
550                         mode = 0;
551
552                 timings = onfi_async_timing_mode_to_sdr_timings(mode);
553                 if (IS_ERR(timings))
554                         return PTR_ERR(timings);
555
556                 pxa3xx_nand_set_sdr_timing(host, timings);
557         }
558
559         return 0;
560 }
561
562 /**
563  * NOTE: it is a must to set ND_RUN first, then write
564  * command buffer, otherwise, it does not work.
565  * We enable all the interrupt at the same time, and
566  * let pxa3xx_nand_irq to handle all logic.
567  */
568 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
569 {
570         uint32_t ndcr;
571
572         ndcr = info->reg_ndcr;
573
574         if (info->use_ecc) {
575                 ndcr |= NDCR_ECC_EN;
576                 if (info->ecc_bch)
577                         nand_writel(info, NDECCCTRL, 0x1);
578         } else {
579                 ndcr &= ~NDCR_ECC_EN;
580                 if (info->ecc_bch)
581                         nand_writel(info, NDECCCTRL, 0x0);
582         }
583
584         ndcr &= ~NDCR_DMA_EN;
585
586         if (info->use_spare)
587                 ndcr |= NDCR_SPARE_EN;
588         else
589                 ndcr &= ~NDCR_SPARE_EN;
590
591         ndcr |= NDCR_ND_RUN;
592
593         /* clear status bits and run */
594         nand_writel(info, NDSR, NDSR_MASK);
595         nand_writel(info, NDCR, 0);
596         nand_writel(info, NDCR, ndcr);
597 }
598
599 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
600 {
601         uint32_t ndcr;
602
603         ndcr = nand_readl(info, NDCR);
604         nand_writel(info, NDCR, ndcr | int_mask);
605 }
606
607 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
608 {
609         if (info->ecc_bch && !info->force_raw) {
610                 u32 ts;
611
612                 /*
613                  * According to the datasheet, when reading from NDDB
614                  * with BCH enabled, after each 32 bytes reads, we
615                  * have to make sure that the NDSR.RDDREQ bit is set.
616                  *
617                  * Drain the FIFO 8 32 bits reads at a time, and skip
618                  * the polling on the last read.
619                  */
620                 while (len > 8) {
621                         readsl(info->mmio_base + NDDB, data, 8);
622
623                         ts = get_timer(0);
624                         while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
625                                 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
626                                         dev_err(info->controller.active->mtd.dev,
627                                                 "Timeout on RDDREQ while draining the FIFO\n");
628                                         return;
629                                 }
630                         }
631
632                         data += 32;
633                         len -= 8;
634                 }
635         }
636
637         readsl(info->mmio_base + NDDB, data, len);
638 }
639
640 static void handle_data_pio(struct pxa3xx_nand_info *info)
641 {
642         int data_len = info->step_chunk_size;
643
644         /*
645          * In raw mode, include the spare area and the ECC bytes that are not
646          * consumed by the controller in the data section. Do not reorganize
647          * here, do it in the ->read_page_raw() handler instead.
648          */
649         if (info->force_raw)
650                 data_len += info->step_spare_size + info->ecc_size;
651
652         switch (info->state) {
653         case STATE_PIO_WRITING:
654                 if (info->step_chunk_size)
655                         writesl(info->mmio_base + NDDB,
656                                 info->data_buff + info->data_buff_pos,
657                                 DIV_ROUND_UP(data_len, 4));
658
659                 if (info->step_spare_size)
660                         writesl(info->mmio_base + NDDB,
661                                 info->oob_buff + info->oob_buff_pos,
662                                 DIV_ROUND_UP(info->step_spare_size, 4));
663                 break;
664         case STATE_PIO_READING:
665                 if (data_len)
666                         drain_fifo(info,
667                                    info->data_buff + info->data_buff_pos,
668                                    DIV_ROUND_UP(data_len, 4));
669
670                 if (info->force_raw)
671                         break;
672
673                 if (info->step_spare_size)
674                         drain_fifo(info,
675                                    info->oob_buff + info->oob_buff_pos,
676                                    DIV_ROUND_UP(info->step_spare_size, 4));
677                 break;
678         default:
679                 dev_err(info->controller.active->mtd.dev,
680                         "%s: invalid state %d\n", __func__, info->state);
681                 BUG();
682         }
683
684         /* Update buffer pointers for multi-page read/write */
685         info->data_buff_pos += data_len;
686         info->oob_buff_pos += info->step_spare_size;
687 }
688
689 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
690 {
691         handle_data_pio(info);
692
693         info->state = STATE_CMD_DONE;
694         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
695 }
696
697 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
698 {
699         unsigned int status, is_completed = 0, is_ready = 0;
700         unsigned int ready, cmd_done;
701         irqreturn_t ret = IRQ_HANDLED;
702
703         if (info->cs == 0) {
704                 ready           = NDSR_FLASH_RDY;
705                 cmd_done        = NDSR_CS0_CMDD;
706         } else {
707                 ready           = NDSR_RDY;
708                 cmd_done        = NDSR_CS1_CMDD;
709         }
710
711         /* TODO - find out why we need the delay during write operation. */
712         ndelay(1);
713
714         status = nand_readl(info, NDSR);
715
716         if (status & NDSR_UNCORERR)
717                 info->retcode = ERR_UNCORERR;
718         if (status & NDSR_CORERR) {
719                 info->retcode = ERR_CORERR;
720                 if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
721                          info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
722                     info->ecc_bch)
723                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
724                 else
725                         info->ecc_err_cnt = 1;
726
727                 /*
728                  * Each chunk composing a page is corrected independently,
729                  * and we need to store maximum number of corrected bitflips
730                  * to return it to the MTD layer in ecc.read_page().
731                  */
732                 info->max_bitflips = max_t(unsigned int,
733                                            info->max_bitflips,
734                                            info->ecc_err_cnt);
735         }
736         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
737                 info->state = (status & NDSR_RDDREQ) ?
738                         STATE_PIO_READING : STATE_PIO_WRITING;
739                 /* Call the IRQ thread in U-Boot directly */
740                 pxa3xx_nand_irq_thread(info);
741                 return 0;
742         }
743         if (status & cmd_done) {
744                 info->state = STATE_CMD_DONE;
745                 is_completed = 1;
746         }
747         if (status & ready) {
748                 info->state = STATE_READY;
749                 is_ready = 1;
750         }
751
752         /*
753          * Clear all status bit before issuing the next command, which
754          * can and will alter the status bits and will deserve a new
755          * interrupt on its own. This lets the controller exit the IRQ
756          */
757         nand_writel(info, NDSR, status);
758
759         if (status & NDSR_WRCMDREQ) {
760                 status &= ~NDSR_WRCMDREQ;
761                 info->state = STATE_CMD_HANDLE;
762
763                 /*
764                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
765                  * must be loaded by writing directly either 12 or 16
766                  * bytes directly to NDCB0, four bytes at a time.
767                  *
768                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
769                  * but each NDCBx register can be read.
770                  */
771                 nand_writel(info, NDCB0, info->ndcb0);
772                 nand_writel(info, NDCB0, info->ndcb1);
773                 nand_writel(info, NDCB0, info->ndcb2);
774
775                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
776                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
777                         info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
778                         nand_writel(info, NDCB0, info->ndcb3);
779         }
780
781         if (is_completed)
782                 info->cmd_complete = 1;
783         if (is_ready)
784                 info->dev_ready = 1;
785
786         return ret;
787 }
788
789 static inline int is_buf_blank(uint8_t *buf, size_t len)
790 {
791         for (; len > 0; len--)
792                 if (*buf++ != 0xff)
793                         return 0;
794         return 1;
795 }
796
797 static void set_command_address(struct pxa3xx_nand_info *info,
798                 unsigned int page_size, uint16_t column, int page_addr)
799 {
800         /* small page addr setting */
801         if (page_size < info->chunk_size) {
802                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
803                                 | (column & 0xFF);
804
805                 info->ndcb2 = 0;
806         } else {
807                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
808                                 | (column & 0xFFFF);
809
810                 if (page_addr & 0xFF0000)
811                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
812                 else
813                         info->ndcb2 = 0;
814         }
815 }
816
817 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
818 {
819         struct pxa3xx_nand_host *host = info->host[info->cs];
820         struct mtd_info *mtd = nand_to_mtd(&host->chip);
821
822         /* reset data and oob column point to handle data */
823         info->buf_start         = 0;
824         info->buf_count         = 0;
825         info->data_buff_pos     = 0;
826         info->oob_buff_pos      = 0;
827         info->step_chunk_size   = 0;
828         info->step_spare_size   = 0;
829         info->cur_chunk         = 0;
830         info->use_ecc           = 0;
831         info->use_spare         = 1;
832         info->retcode           = ERR_NONE;
833         info->ecc_err_cnt       = 0;
834         info->ndcb3             = 0;
835         info->need_wait         = 0;
836
837         switch (command) {
838         case NAND_CMD_READ0:
839         case NAND_CMD_READOOB:
840         case NAND_CMD_PAGEPROG:
841                 if (!info->force_raw)
842                         info->use_ecc = 1;
843                 break;
844         case NAND_CMD_PARAM:
845                 info->use_spare = 0;
846                 break;
847         default:
848                 info->ndcb1 = 0;
849                 info->ndcb2 = 0;
850                 break;
851         }
852
853         /*
854          * If we are about to issue a read command, or about to set
855          * the write address, then clean the data buffer.
856          */
857         if (command == NAND_CMD_READ0 ||
858             command == NAND_CMD_READOOB ||
859             command == NAND_CMD_SEQIN) {
860                 info->buf_count = mtd->writesize + mtd->oobsize;
861                 memset(info->data_buff, 0xFF, info->buf_count);
862         }
863 }
864
865 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
866                 int ext_cmd_type, uint16_t column, int page_addr)
867 {
868         int addr_cycle, exec_cmd;
869         struct pxa3xx_nand_host *host;
870         struct mtd_info *mtd;
871
872         host = info->host[info->cs];
873         mtd = nand_to_mtd(&host->chip);
874         addr_cycle = 0;
875         exec_cmd = 1;
876
877         if (info->cs != 0)
878                 info->ndcb0 = NDCB0_CSEL;
879         else
880                 info->ndcb0 = 0;
881
882         if (command == NAND_CMD_SEQIN)
883                 exec_cmd = 0;
884
885         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
886                                     + host->col_addr_cycles);
887
888         switch (command) {
889         case NAND_CMD_READOOB:
890         case NAND_CMD_READ0:
891                 info->buf_start = column;
892                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
893                                 | addr_cycle
894                                 | NAND_CMD_READ0;
895
896                 if (command == NAND_CMD_READOOB)
897                         info->buf_start += mtd->writesize;
898
899                 if (info->cur_chunk < info->nfullchunks) {
900                         info->step_chunk_size = info->chunk_size;
901                         info->step_spare_size = info->spare_size;
902                 } else {
903                         info->step_chunk_size = info->last_chunk_size;
904                         info->step_spare_size = info->last_spare_size;
905                 }
906
907                 /*
908                  * Multiple page read needs an 'extended command type' field,
909                  * which is either naked-read or last-read according to the
910                  * state.
911                  */
912                 if (info->force_raw) {
913                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
914                                        NDCB0_LEN_OVRD |
915                                        NDCB0_EXT_CMD_TYPE(ext_cmd_type);
916                         info->ndcb3 = info->step_chunk_size +
917                                       info->step_spare_size + info->ecc_size;
918                 } else if (mtd->writesize == info->chunk_size) {
919                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
920                 } else if (mtd->writesize > info->chunk_size) {
921                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
922                                         | NDCB0_LEN_OVRD
923                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
924                         info->ndcb3 = info->step_chunk_size +
925                                 info->step_spare_size;
926                 }
927
928                 set_command_address(info, mtd->writesize, column, page_addr);
929                 break;
930
931         case NAND_CMD_SEQIN:
932
933                 info->buf_start = column;
934                 set_command_address(info, mtd->writesize, 0, page_addr);
935
936                 /*
937                  * Multiple page programming needs to execute the initial
938                  * SEQIN command that sets the page address.
939                  */
940                 if (mtd->writesize > info->chunk_size) {
941                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
942                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
943                                 | addr_cycle
944                                 | command;
945                         exec_cmd = 1;
946                 }
947                 break;
948
949         case NAND_CMD_PAGEPROG:
950                 if (is_buf_blank(info->data_buff,
951                                  (mtd->writesize + mtd->oobsize))) {
952                         exec_cmd = 0;
953                         break;
954                 }
955
956                 if (info->cur_chunk < info->nfullchunks) {
957                         info->step_chunk_size = info->chunk_size;
958                         info->step_spare_size = info->spare_size;
959                 } else {
960                         info->step_chunk_size = info->last_chunk_size;
961                         info->step_spare_size = info->last_spare_size;
962                 }
963
964                 /* Second command setting for large pages */
965                 if (mtd->writesize > info->chunk_size) {
966                         /*
967                          * Multiple page write uses the 'extended command'
968                          * field. This can be used to issue a command dispatch
969                          * or a naked-write depending on the current stage.
970                          */
971                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
972                                         | NDCB0_LEN_OVRD
973                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
974                         info->ndcb3 = info->step_chunk_size +
975                                       info->step_spare_size;
976
977                         /*
978                          * This is the command dispatch that completes a chunked
979                          * page program operation.
980                          */
981                         if (info->cur_chunk == info->ntotalchunks) {
982                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
983                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
984                                         | command;
985                                 info->ndcb1 = 0;
986                                 info->ndcb2 = 0;
987                                 info->ndcb3 = 0;
988                         }
989                 } else {
990                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
991                                         | NDCB0_AUTO_RS
992                                         | NDCB0_ST_ROW_EN
993                                         | NDCB0_DBC
994                                         | (NAND_CMD_PAGEPROG << 8)
995                                         | NAND_CMD_SEQIN
996                                         | addr_cycle;
997                 }
998                 break;
999
1000         case NAND_CMD_PARAM:
1001                 info->buf_count = INIT_BUFFER_SIZE;
1002                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1003                                 | NDCB0_ADDR_CYC(1)
1004                                 | NDCB0_LEN_OVRD
1005                                 | command;
1006                 info->ndcb1 = (column & 0xFF);
1007                 info->ndcb3 = INIT_BUFFER_SIZE;
1008                 info->step_chunk_size = INIT_BUFFER_SIZE;
1009                 break;
1010
1011         case NAND_CMD_READID:
1012                 info->buf_count = READ_ID_BYTES;
1013                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1014                                 | NDCB0_ADDR_CYC(1)
1015                                 | command;
1016                 info->ndcb1 = (column & 0xFF);
1017
1018                 info->step_chunk_size = 8;
1019                 break;
1020         case NAND_CMD_STATUS:
1021                 info->buf_count = 1;
1022                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1023                                 | NDCB0_ADDR_CYC(1)
1024                                 | command;
1025
1026                 info->step_chunk_size = 8;
1027                 break;
1028
1029         case NAND_CMD_ERASE1:
1030                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1031                                 | NDCB0_AUTO_RS
1032                                 | NDCB0_ADDR_CYC(3)
1033                                 | NDCB0_DBC
1034                                 | (NAND_CMD_ERASE2 << 8)
1035                                 | NAND_CMD_ERASE1;
1036                 info->ndcb1 = page_addr;
1037                 info->ndcb2 = 0;
1038
1039                 break;
1040         case NAND_CMD_RESET:
1041                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1042                                 | command;
1043
1044                 break;
1045
1046         case NAND_CMD_ERASE2:
1047                 exec_cmd = 0;
1048                 break;
1049
1050         default:
1051                 exec_cmd = 0;
1052                 dev_err(mtd->dev, "non-supported command %x\n",
1053                         command);
1054                 break;
1055         }
1056
1057         return exec_cmd;
1058 }
1059
1060 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1061                          int column, int page_addr)
1062 {
1063         struct nand_chip *chip = mtd_to_nand(mtd);
1064         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1065         struct pxa3xx_nand_info *info = host->info_data;
1066         int exec_cmd;
1067
1068         /*
1069          * if this is a x16 device ,then convert the input
1070          * "byte" address into a "word" address appropriate
1071          * for indexing a word-oriented device
1072          */
1073         if (info->reg_ndcr & NDCR_DWIDTH_M)
1074                 column /= 2;
1075
1076         /*
1077          * There may be different NAND chip hooked to
1078          * different chip select, so check whether
1079          * chip select has been changed, if yes, reset the timing
1080          */
1081         if (info->cs != host->cs) {
1082                 info->cs = host->cs;
1083                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1084                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1085         }
1086
1087         prepare_start_command(info, command);
1088
1089         info->state = STATE_PREPARED;
1090         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1091
1092         if (exec_cmd) {
1093                 u32 ts;
1094
1095                 info->cmd_complete = 0;
1096                 info->dev_ready = 0;
1097                 info->need_wait = 1;
1098                 pxa3xx_nand_start(info);
1099
1100                 ts = get_timer(0);
1101                 while (1) {
1102                         u32 status;
1103
1104                         status = nand_readl(info, NDSR);
1105                         if (status)
1106                                 pxa3xx_nand_irq(info);
1107
1108                         if (info->cmd_complete)
1109                                 break;
1110
1111                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1112                                 dev_err(mtd->dev, "Wait timeout!!!\n");
1113                                 return;
1114                         }
1115                 }
1116         }
1117         info->state = STATE_IDLE;
1118 }
1119
1120 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1121                                   const unsigned command,
1122                                   int column, int page_addr)
1123 {
1124         struct nand_chip *chip = mtd_to_nand(mtd);
1125         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1126         struct pxa3xx_nand_info *info = host->info_data;
1127         int exec_cmd, ext_cmd_type;
1128
1129         /*
1130          * if this is a x16 device then convert the input
1131          * "byte" address into a "word" address appropriate
1132          * for indexing a word-oriented device
1133          */
1134         if (info->reg_ndcr & NDCR_DWIDTH_M)
1135                 column /= 2;
1136
1137         /*
1138          * There may be different NAND chip hooked to
1139          * different chip select, so check whether
1140          * chip select has been changed, if yes, reset the timing
1141          */
1142         if (info->cs != host->cs) {
1143                 info->cs = host->cs;
1144                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1145                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1146         }
1147
1148         /* Select the extended command for the first command */
1149         switch (command) {
1150         case NAND_CMD_READ0:
1151         case NAND_CMD_READOOB:
1152                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1153                 break;
1154         case NAND_CMD_SEQIN:
1155                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1156                 break;
1157         case NAND_CMD_PAGEPROG:
1158                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1159                 break;
1160         default:
1161                 ext_cmd_type = 0;
1162                 break;
1163         }
1164
1165         prepare_start_command(info, command);
1166
1167         /*
1168          * Prepare the "is ready" completion before starting a command
1169          * transaction sequence. If the command is not executed the
1170          * completion will be completed, see below.
1171          *
1172          * We can do that inside the loop because the command variable
1173          * is invariant and thus so is the exec_cmd.
1174          */
1175         info->need_wait = 1;
1176         info->dev_ready = 0;
1177
1178         do {
1179                 u32 ts;
1180
1181                 info->state = STATE_PREPARED;
1182                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1183                                                column, page_addr);
1184                 if (!exec_cmd) {
1185                         info->need_wait = 0;
1186                         info->dev_ready = 1;
1187                         break;
1188                 }
1189
1190                 info->cmd_complete = 0;
1191                 pxa3xx_nand_start(info);
1192
1193                 ts = get_timer(0);
1194                 while (1) {
1195                         u32 status;
1196
1197                         status = nand_readl(info, NDSR);
1198                         if (status)
1199                                 pxa3xx_nand_irq(info);
1200
1201                         if (info->cmd_complete)
1202                                 break;
1203
1204                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1205                                 dev_err(mtd->dev, "Wait timeout!!!\n");
1206                                 return;
1207                         }
1208                 }
1209
1210                 /* Only a few commands need several steps */
1211                 if (command != NAND_CMD_PAGEPROG &&
1212                     command != NAND_CMD_READ0    &&
1213                     command != NAND_CMD_READOOB)
1214                         break;
1215
1216                 info->cur_chunk++;
1217
1218                 /* Check if the sequence is complete */
1219                 if (info->cur_chunk == info->ntotalchunks &&
1220                     command != NAND_CMD_PAGEPROG)
1221                         break;
1222
1223                 /*
1224                  * After a splitted program command sequence has issued
1225                  * the command dispatch, the command sequence is complete.
1226                  */
1227                 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1228                     command == NAND_CMD_PAGEPROG &&
1229                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1230                         break;
1231
1232                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1233                         /* Last read: issue a 'last naked read' */
1234                         if (info->cur_chunk == info->ntotalchunks - 1)
1235                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1236                         else
1237                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1238
1239                 /*
1240                  * If a splitted program command has no more data to transfer,
1241                  * the command dispatch must be issued to complete.
1242                  */
1243                 } else if (command == NAND_CMD_PAGEPROG &&
1244                            info->cur_chunk == info->ntotalchunks) {
1245                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1246                 }
1247         } while (1);
1248
1249         info->state = STATE_IDLE;
1250 }
1251
1252 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1253                 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1254                 int page)
1255 {
1256         chip->write_buf(mtd, buf, mtd->writesize);
1257         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1258
1259         return 0;
1260 }
1261
1262 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1263                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1264                 int page)
1265 {
1266         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1267         struct pxa3xx_nand_info *info = host->info_data;
1268         int bf;
1269
1270         chip->read_buf(mtd, buf, mtd->writesize);
1271         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1272
1273         if (info->retcode == ERR_CORERR && info->use_ecc) {
1274                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1275
1276         } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1277                 /*
1278                  * Empty pages will trigger uncorrectable errors. Re-read the
1279                  * entire page in raw mode and check for bits not being "1".
1280                  * If there are more than the supported strength, then it means
1281                  * this is an actual uncorrectable error.
1282                  */
1283                 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1284                 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1285                                                  chip->oob_poi, mtd->oobsize,
1286                                                  NULL, 0, chip->ecc.strength);
1287                 if (bf < 0) {
1288                         mtd->ecc_stats.failed++;
1289                 } else if (bf) {
1290                         mtd->ecc_stats.corrected += bf;
1291                         info->max_bitflips = max_t(unsigned int,
1292                                                    info->max_bitflips, bf);
1293                         info->retcode = ERR_CORERR;
1294                 } else {
1295                         info->retcode = ERR_NONE;
1296                 }
1297
1298         } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1299                 /* Raw read is not supported with Hamming ECC engine */
1300                 if (is_buf_blank(buf, mtd->writesize))
1301                         info->retcode = ERR_NONE;
1302                 else
1303                         mtd->ecc_stats.failed++;
1304         }
1305
1306         return info->max_bitflips;
1307 }
1308
1309 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1310                                      struct nand_chip *chip, uint8_t *buf,
1311                                      int oob_required, int page)
1312 {
1313         struct pxa3xx_nand_host *host = chip->priv;
1314         struct pxa3xx_nand_info *info = host->info_data;
1315         int chunk, ecc_off_buf;
1316
1317         if (!info->ecc_bch)
1318                 return -ENOTSUPP;
1319
1320         /*
1321          * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1322          * pxa3xx_nand_start(), which will actually disable the ECC engine.
1323          */
1324         info->force_raw = true;
1325         chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1326
1327         ecc_off_buf = (info->nfullchunks * info->spare_size) +
1328                       info->last_spare_size;
1329         for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1330                 chip->read_buf(mtd,
1331                                buf + (chunk * info->chunk_size),
1332                                info->chunk_size);
1333                 chip->read_buf(mtd,
1334                                chip->oob_poi +
1335                                (chunk * (info->spare_size)),
1336                                info->spare_size);
1337                 chip->read_buf(mtd,
1338                                chip->oob_poi + ecc_off_buf +
1339                                (chunk * (info->ecc_size)),
1340                                info->ecc_size - 2);
1341         }
1342
1343         if (info->ntotalchunks > info->nfullchunks) {
1344                 chip->read_buf(mtd,
1345                                buf + (info->nfullchunks * info->chunk_size),
1346                                info->last_chunk_size);
1347                 chip->read_buf(mtd,
1348                                chip->oob_poi +
1349                                (info->nfullchunks * (info->spare_size)),
1350                                info->last_spare_size);
1351                 chip->read_buf(mtd,
1352                                chip->oob_poi + ecc_off_buf +
1353                                (info->nfullchunks * (info->ecc_size)),
1354                                info->ecc_size - 2);
1355         }
1356
1357         info->force_raw = false;
1358
1359         return 0;
1360 }
1361
1362 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1363                                     struct nand_chip *chip, int page)
1364 {
1365         /* Invalidate page cache */
1366         chip->pagebuf = -1;
1367
1368         return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1369                                        page);
1370 }
1371
1372 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1373 {
1374         struct nand_chip *chip = mtd_to_nand(mtd);
1375         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1376         struct pxa3xx_nand_info *info = host->info_data;
1377         char retval = 0xFF;
1378
1379         if (info->buf_start < info->buf_count)
1380                 /* Has just send a new command? */
1381                 retval = info->data_buff[info->buf_start++];
1382
1383         return retval;
1384 }
1385
1386 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1387 {
1388         struct nand_chip *chip = mtd_to_nand(mtd);
1389         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1390         struct pxa3xx_nand_info *info = host->info_data;
1391         u16 retval = 0xFFFF;
1392
1393         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1394                 retval = *((u16 *)(info->data_buff+info->buf_start));
1395                 info->buf_start += 2;
1396         }
1397         return retval;
1398 }
1399
1400 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1401 {
1402         struct nand_chip *chip = mtd_to_nand(mtd);
1403         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1404         struct pxa3xx_nand_info *info = host->info_data;
1405         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1406
1407         memcpy(buf, info->data_buff + info->buf_start, real_len);
1408         info->buf_start += real_len;
1409 }
1410
1411 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1412                 const uint8_t *buf, int len)
1413 {
1414         struct nand_chip *chip = mtd_to_nand(mtd);
1415         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1416         struct pxa3xx_nand_info *info = host->info_data;
1417         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1418
1419         memcpy(info->data_buff + info->buf_start, buf, real_len);
1420         info->buf_start += real_len;
1421 }
1422
1423 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1424 {
1425         return;
1426 }
1427
1428 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1429 {
1430         struct nand_chip *chip = mtd_to_nand(mtd);
1431         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1432         struct pxa3xx_nand_info *info = host->info_data;
1433
1434         if (info->need_wait) {
1435                 u32 ts;
1436
1437                 info->need_wait = 0;
1438
1439                 ts = get_timer(0);
1440                 while (1) {
1441                         u32 status;
1442
1443                         status = nand_readl(info, NDSR);
1444                         if (status)
1445                                 pxa3xx_nand_irq(info);
1446
1447                         if (info->dev_ready)
1448                                 break;
1449
1450                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1451                                 dev_err(mtd->dev, "Ready timeout!!!\n");
1452                                 return NAND_STATUS_FAIL;
1453                         }
1454                 }
1455         }
1456
1457         /* pxa3xx_nand_send_command has waited for command complete */
1458         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1459                 if (info->retcode == ERR_NONE)
1460                         return 0;
1461                 else
1462                         return NAND_STATUS_FAIL;
1463         }
1464
1465         return NAND_STATUS_READY;
1466 }
1467
1468 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1469 {
1470         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1471
1472         /* Configure default flash values */
1473         info->reg_ndcr = 0x0; /* enable all interrupts */
1474         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1475         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1476         info->reg_ndcr |= NDCR_SPARE_EN;
1477
1478         return 0;
1479 }
1480
1481 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1482 {
1483         struct pxa3xx_nand_host *host = info->host[info->cs];
1484         struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1485         struct nand_chip *chip = mtd_to_nand(mtd);
1486
1487         info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1488         info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1489         info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1490 }
1491
1492 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1493 {
1494         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1495         uint32_t ndcr = nand_readl(info, NDCR);
1496
1497         /* Set an initial chunk size */
1498         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1499         info->reg_ndcr = ndcr &
1500                 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1501         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1502         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1503         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1504 }
1505
1506 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1507 {
1508         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1509         if (info->data_buff == NULL)
1510                 return -ENOMEM;
1511         return 0;
1512 }
1513
1514 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1515 {
1516         struct pxa3xx_nand_info *info = host->info_data;
1517         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1518         struct mtd_info *mtd;
1519         struct nand_chip *chip;
1520         const struct nand_sdr_timings *timings;
1521         int ret;
1522
1523         mtd = nand_to_mtd(&info->host[info->cs]->chip);
1524         chip = mtd_to_nand(mtd);
1525
1526         /* configure default flash values */
1527         info->reg_ndcr = 0x0; /* enable all interrupts */
1528         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1529         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1530         info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1531
1532         /* use the common timing to make a try */
1533         timings = onfi_async_timing_mode_to_sdr_timings(0);
1534         if (IS_ERR(timings))
1535                 return PTR_ERR(timings);
1536
1537         pxa3xx_nand_set_sdr_timing(host, timings);
1538
1539         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1540         ret = chip->waitfunc(mtd, chip);
1541         if (ret & NAND_STATUS_FAIL)
1542                 return -ENODEV;
1543
1544         return 0;
1545 }
1546
1547 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1548                         struct nand_ecc_ctrl *ecc,
1549                         int strength, int ecc_stepsize, int page_size)
1550 {
1551         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1552                 info->nfullchunks = 1;
1553                 info->ntotalchunks = 1;
1554                 info->chunk_size = 2048;
1555                 info->spare_size = 40;
1556                 info->ecc_size = 24;
1557                 ecc->mode = NAND_ECC_HW;
1558                 ecc->size = 512;
1559                 ecc->strength = 1;
1560
1561         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1562                 info->nfullchunks = 1;
1563                 info->ntotalchunks = 1;
1564                 info->chunk_size = 512;
1565                 info->spare_size = 8;
1566                 info->ecc_size = 8;
1567                 ecc->mode = NAND_ECC_HW;
1568                 ecc->size = 512;
1569                 ecc->strength = 1;
1570
1571         /*
1572          * Required ECC: 4-bit correction per 512 bytes
1573          * Select: 16-bit correction per 2048 bytes
1574          */
1575         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1576                 info->ecc_bch = 1;
1577                 info->nfullchunks = 1;
1578                 info->ntotalchunks = 1;
1579                 info->chunk_size = 2048;
1580                 info->spare_size = 32;
1581                 info->ecc_size = 32;
1582                 ecc->mode = NAND_ECC_HW;
1583                 ecc->size = info->chunk_size;
1584                 ecc->layout = &ecc_layout_2KB_bch4bit;
1585                 ecc->strength = 16;
1586
1587         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1588                 info->ecc_bch = 1;
1589                 info->nfullchunks = 2;
1590                 info->ntotalchunks = 2;
1591                 info->chunk_size = 2048;
1592                 info->spare_size = 32;
1593                 info->ecc_size = 32;
1594                 ecc->mode = NAND_ECC_HW;
1595                 ecc->size = info->chunk_size;
1596                 ecc->layout = &ecc_layout_4KB_bch4bit;
1597                 ecc->strength = 16;
1598
1599         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1600                 info->ecc_bch = 1;
1601                 info->nfullchunks = 4;
1602                 info->ntotalchunks = 4;
1603                 info->chunk_size = 2048;
1604                 info->spare_size = 32;
1605                 info->ecc_size = 32;
1606                 ecc->mode = NAND_ECC_HW;
1607                 ecc->size = info->chunk_size;
1608                 ecc->layout = &ecc_layout_8KB_bch4bit;
1609                 ecc->strength = 16;
1610
1611         /*
1612          * Required ECC: 8-bit correction per 512 bytes
1613          * Select: 16-bit correction per 1024 bytes
1614          */
1615         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1616                 info->ecc_bch = 1;
1617                 info->nfullchunks = 1;
1618                 info->ntotalchunks = 2;
1619                 info->chunk_size = 1024;
1620                 info->spare_size = 0;
1621                 info->last_chunk_size = 1024;
1622                 info->last_spare_size = 32;
1623                 info->ecc_size = 32;
1624                 ecc->mode = NAND_ECC_HW;
1625                 ecc->size = info->chunk_size;
1626                 ecc->layout = &ecc_layout_2KB_bch8bit;
1627                 ecc->strength = 16;
1628
1629         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1630                 info->ecc_bch = 1;
1631                 info->nfullchunks = 4;
1632                 info->ntotalchunks = 5;
1633                 info->chunk_size = 1024;
1634                 info->spare_size = 0;
1635                 info->last_chunk_size = 0;
1636                 info->last_spare_size = 64;
1637                 info->ecc_size = 32;
1638                 ecc->mode = NAND_ECC_HW;
1639                 ecc->size = info->chunk_size;
1640                 ecc->layout = &ecc_layout_4KB_bch8bit;
1641                 ecc->strength = 16;
1642
1643         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1644                 info->ecc_bch = 1;
1645                 info->nfullchunks = 8;
1646                 info->ntotalchunks = 9;
1647                 info->chunk_size = 1024;
1648                 info->spare_size = 0;
1649                 info->last_chunk_size = 0;
1650                 info->last_spare_size = 160;
1651                 info->ecc_size = 32;
1652                 ecc->mode = NAND_ECC_HW;
1653                 ecc->size = info->chunk_size;
1654                 ecc->layout = &ecc_layout_8KB_bch8bit;
1655                 ecc->strength = 16;
1656
1657         } else {
1658                 dev_err(info->controller.active->mtd.dev,
1659                         "ECC strength %d at page size %d is not supported\n",
1660                         strength, page_size);
1661                 return -ENODEV;
1662         }
1663
1664         return 0;
1665 }
1666
1667 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1668 {
1669         struct nand_chip *chip = mtd_to_nand(mtd);
1670         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1671         struct pxa3xx_nand_info *info = host->info_data;
1672         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1673         int ret;
1674         uint16_t ecc_strength, ecc_step;
1675
1676         if (pdata->keep_config) {
1677                 pxa3xx_nand_detect_config(info);
1678         } else {
1679                 ret = pxa3xx_nand_config_ident(info);
1680                 if (ret)
1681                         return ret;
1682                 ret = pxa3xx_nand_sensing(host);
1683                 if (ret) {
1684                         dev_info(mtd->dev, "There is no chip on cs %d!\n",
1685                                  info->cs);
1686                         return ret;
1687                 }
1688         }
1689
1690         /* Device detection must be done with ECC disabled */
1691         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1692                 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
1693                 nand_writel(info, NDECCCTRL, 0x0);
1694
1695         if (nand_scan_ident(mtd, 1, NULL))
1696                 return -ENODEV;
1697
1698         if (!pdata->keep_config) {
1699                 ret = pxa3xx_nand_init_timings(host);
1700                 if (ret) {
1701                         dev_err(mtd->dev,
1702                                 "Failed to set timings: %d\n", ret);
1703                         return ret;
1704                 }
1705         }
1706
1707 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1708         /*
1709          * We'll use a bad block table stored in-flash and don't
1710          * allow writing the bad block marker to the flash.
1711          */
1712         chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1713         chip->bbt_td = &bbt_main_descr;
1714         chip->bbt_md = &bbt_mirror_descr;
1715 #endif
1716
1717         if (pdata->ecc_strength && pdata->ecc_step_size) {
1718                 ecc_strength = pdata->ecc_strength;
1719                 ecc_step = pdata->ecc_step_size;
1720         } else {
1721                 ecc_strength = chip->ecc_strength_ds;
1722                 ecc_step = chip->ecc_step_ds;
1723         }
1724
1725         /* Set default ECC strength requirements on non-ONFI devices */
1726         if (ecc_strength < 1 && ecc_step < 1) {
1727                 ecc_strength = 1;
1728                 ecc_step = 512;
1729         }
1730
1731         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1732                            ecc_step, mtd->writesize);
1733         if (ret)
1734                 return ret;
1735
1736         /*
1737          * If the page size is bigger than the FIFO size, let's check
1738          * we are given the right variant and then switch to the extended
1739          * (aka split) command handling,
1740          */
1741         if (mtd->writesize > info->chunk_size) {
1742                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1743                         info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1744                         chip->cmdfunc = nand_cmdfunc_extended;
1745                 } else {
1746                         dev_err(mtd->dev,
1747                                 "unsupported page size on this variant\n");
1748                         return -ENODEV;
1749                 }
1750         }
1751
1752         /* calculate addressing information */
1753         if (mtd->writesize >= 2048)
1754                 host->col_addr_cycles = 2;
1755         else
1756                 host->col_addr_cycles = 1;
1757
1758         /* release the initial buffer */
1759         kfree(info->data_buff);
1760
1761         /* allocate the real data + oob buffer */
1762         info->buf_size = mtd->writesize + mtd->oobsize;
1763         ret = pxa3xx_nand_init_buff(info);
1764         if (ret)
1765                 return ret;
1766         info->oob_buff = info->data_buff + mtd->writesize;
1767
1768         if ((mtd->size >> chip->page_shift) > 65536)
1769                 host->row_addr_cycles = 3;
1770         else
1771                 host->row_addr_cycles = 2;
1772
1773         if (!pdata->keep_config)
1774                 pxa3xx_nand_config_tail(info);
1775
1776         return nand_scan_tail(mtd);
1777 }
1778
1779 static int alloc_nand_resource(struct udevice *dev, struct pxa3xx_nand_info *info)
1780 {
1781         struct pxa3xx_nand_platform_data *pdata;
1782         struct pxa3xx_nand_host *host;
1783         struct nand_chip *chip = NULL;
1784         struct mtd_info *mtd;
1785         int cs;
1786
1787         pdata = info->pdata;
1788         if (pdata->num_cs <= 0)
1789                 return -ENODEV;
1790
1791         info->variant = pxa3xx_nand_get_variant(dev);
1792         for (cs = 0; cs < pdata->num_cs; cs++) {
1793                 chip = (struct nand_chip *)
1794                         ((u8 *)&info[1] + sizeof(*host) * cs);
1795                 mtd = nand_to_mtd(chip);
1796                 host = (struct pxa3xx_nand_host *)chip;
1797                 info->host[cs] = host;
1798                 host->cs = cs;
1799                 host->info_data = info;
1800                 mtd->owner = THIS_MODULE;
1801
1802                 nand_set_controller_data(chip, host);
1803                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1804                 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1805                 chip->ecc.read_oob_raw  = pxa3xx_nand_read_oob_raw;
1806                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1807                 chip->controller        = &info->controller;
1808                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1809                 chip->select_chip       = pxa3xx_nand_select_chip;
1810                 chip->read_word         = pxa3xx_nand_read_word;
1811                 chip->read_byte         = pxa3xx_nand_read_byte;
1812                 chip->read_buf          = pxa3xx_nand_read_buf;
1813                 chip->write_buf         = pxa3xx_nand_write_buf;
1814                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1815                 chip->cmdfunc           = nand_cmdfunc;
1816         }
1817
1818         /* Allocate a buffer to allow flash detection */
1819         info->buf_size = INIT_BUFFER_SIZE;
1820         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1821         if (info->data_buff == NULL)
1822                 return -ENOMEM;
1823
1824         /* initialize all interrupts to be disabled */
1825         disable_int(info, NDSR_MASK);
1826
1827         /*
1828          * Some SoCs like A7k/A8k need to enable manually the NAND
1829          * controller to avoid being bootloader dependent. This is done
1830          * through the use of a single bit in the System Functions registers.
1831          */
1832         if (pxa3xx_nand_get_variant(dev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1833                 struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
1834                                 dev, "marvell,system-controller");
1835                 u32 reg;
1836
1837                 if (IS_ERR(sysctrl_base))
1838                         return PTR_ERR(sysctrl_base);
1839
1840                 regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
1841                 reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
1842                 regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
1843         }
1844
1845         return 0;
1846 }
1847
1848 static int pxa3xx_nand_probe_dt(struct udevice *dev, struct pxa3xx_nand_info *info)
1849 {
1850         struct pxa3xx_nand_platform_data *pdata;
1851
1852         pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1853         if (!pdata)
1854                 return -ENOMEM;
1855
1856         info->mmio_base = dev_read_addr_ptr(dev);
1857
1858         pdata->num_cs = dev_read_u32_default(dev, "num-cs", 1);
1859         if (pdata->num_cs != 1) {
1860                 pr_err("pxa3xx driver supports single CS only\n");
1861                 return -EINVAL;
1862         }
1863
1864         if (dev_read_bool(dev, "nand-enable-arbiter"))
1865                 pdata->enable_arbiter = 1;
1866
1867         if (dev_read_bool(dev, "nand-keep-config"))
1868                 pdata->keep_config = 1;
1869
1870         /*
1871          * ECC parameters.
1872          * If these are not set, they will be selected according
1873          * to the detected flash type.
1874          */
1875         /* ECC strength */
1876         pdata->ecc_strength = dev_read_u32_default(dev, "nand-ecc-strength", 0);
1877
1878         /* ECC step size */
1879         pdata->ecc_step_size = dev_read_u32_default(dev, "nand-ecc-step-size",
1880                         0);
1881
1882         info->pdata = pdata;
1883
1884         return 0;
1885 }
1886
1887 static int pxa3xx_nand_probe(struct udevice *dev)
1888 {
1889         struct pxa3xx_nand_platform_data *pdata;
1890         int ret, cs, probe_success;
1891         struct pxa3xx_nand_info *info = dev_get_priv(dev);
1892
1893         ret = pxa3xx_nand_probe_dt(dev, info);
1894         if (ret)
1895                 return ret;
1896
1897         pdata = info->pdata;
1898
1899         ret = alloc_nand_resource(dev, info);
1900         if (ret) {
1901                 dev_err(dev, "alloc nand resource failed\n");
1902                 return ret;
1903         }
1904
1905         probe_success = 0;
1906         for (cs = 0; cs < pdata->num_cs; cs++) {
1907                 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1908
1909                 /*
1910                  * The mtd name matches the one used in 'mtdparts' kernel
1911                  * parameter. This name cannot be changed or otherwise
1912                  * user's mtd partitions configuration would get broken.
1913                  */
1914                 mtd->name = "pxa3xx_nand-0";
1915                 info->cs = cs;
1916                 ret = pxa3xx_nand_scan(mtd);
1917                 if (ret) {
1918                         dev_info(mtd->dev, "failed to scan nand at cs %d\n",
1919                                  cs);
1920                         continue;
1921                 }
1922
1923                 if (nand_register(cs, mtd))
1924                         continue;
1925
1926                 probe_success = 1;
1927         }
1928
1929         if (!probe_success)
1930                 return -ENODEV;
1931
1932         return 0;
1933 }
1934
1935 U_BOOT_DRIVER(pxa3xx_nand) = {
1936         .name = "pxa3xx-nand",
1937         .id = UCLASS_MTD,
1938         .of_match = pxa3xx_nand_dt_ids,
1939         .probe = pxa3xx_nand_probe,
1940         .priv_auto      = sizeof(struct pxa3xx_nand_info) +
1941                 sizeof(struct pxa3xx_nand_host) * CONFIG_SYS_MAX_NAND_DEVICE,
1942 };
1943
1944 void board_nand_init(void)
1945 {
1946         struct udevice *dev;
1947         int ret;
1948
1949         ret = uclass_get_device_by_driver(UCLASS_MTD,
1950                         DM_DRIVER_GET(pxa3xx_nand), &dev);
1951         if (ret && ret != -ENODEV) {
1952                 pr_err("Failed to initialize %s. (error %d)\n", dev->name,
1953                            ret);
1954         }
1955 }