5cc75de6efbc1a7a0baf84c4c60eb86713dd8c19
[platform/kernel/u-boot.git] / drivers / mtd / nand / pxa3xx_nand.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/mtd/nand/pxa3xx_nand.c
4  *
5  * Copyright © 2005 Intel Corporation
6  * Copyright © 2006 Marvell International Ltd.
7  */
8
9 #include <common.h>
10 #include <malloc.h>
11 #include <fdtdec.h>
12 #include <nand.h>
13 #include <linux/errno.h>
14 #include <asm/io.h>
15 #include <asm/arch/cpu.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/types.h>
19
20 #include "pxa3xx_nand.h"
21
22 DECLARE_GLOBAL_DATA_PTR;
23
24 #define TIMEOUT_DRAIN_FIFO      5       /* in ms */
25 #define CHIP_DELAY_TIMEOUT      200
26 #define NAND_STOP_DELAY         40
27 #define PAGE_CHUNK_SIZE         (2048)
28
29 /*
30  * Define a buffer size for the initial command that detects the flash device:
31  * STATUS, READID and PARAM.
32  * ONFI param page is 256 bytes, and there are three redundant copies
33  * to be read. JEDEC param page is 512 bytes, and there are also three
34  * redundant copies to be read.
35  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
36  */
37 #define INIT_BUFFER_SIZE        2048
38
39 /* registers and bit definitions */
40 #define NDCR            (0x00) /* Control register */
41 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
42 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
43 #define NDSR            (0x14) /* Status Register */
44 #define NDPCR           (0x18) /* Page Count Register */
45 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
46 #define NDBDR1          (0x20) /* Bad Block Register 1 */
47 #define NDECCCTRL       (0x28) /* ECC control */
48 #define NDDB            (0x40) /* Data Buffer */
49 #define NDCB0           (0x48) /* Command Buffer0 */
50 #define NDCB1           (0x4C) /* Command Buffer1 */
51 #define NDCB2           (0x50) /* Command Buffer2 */
52
53 #define NDCR_SPARE_EN           (0x1 << 31)
54 #define NDCR_ECC_EN             (0x1 << 30)
55 #define NDCR_DMA_EN             (0x1 << 29)
56 #define NDCR_ND_RUN             (0x1 << 28)
57 #define NDCR_DWIDTH_C           (0x1 << 27)
58 #define NDCR_DWIDTH_M           (0x1 << 26)
59 #define NDCR_PAGE_SZ            (0x1 << 24)
60 #define NDCR_NCSX               (0x1 << 23)
61 #define NDCR_ND_MODE            (0x3 << 21)
62 #define NDCR_NAND_MODE          (0x0)
63 #define NDCR_CLR_PG_CNT         (0x1 << 20)
64 #define NFCV1_NDCR_ARB_CNTL     (0x1 << 19)
65 #define NFCV2_NDCR_STOP_ON_UNCOR        (0x1 << 19)
66 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
67 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
68
69 #define NDCR_RA_START           (0x1 << 15)
70 #define NDCR_PG_PER_BLK         (0x1 << 14)
71 #define NDCR_ND_ARB_EN          (0x1 << 12)
72 #define NDCR_INT_MASK           (0xFFF)
73
74 #define NDSR_MASK               (0xfff)
75 #define NDSR_ERR_CNT_OFF        (16)
76 #define NDSR_ERR_CNT_MASK       (0x1f)
77 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
78 #define NDSR_RDY                (0x1 << 12)
79 #define NDSR_FLASH_RDY          (0x1 << 11)
80 #define NDSR_CS0_PAGED          (0x1 << 10)
81 #define NDSR_CS1_PAGED          (0x1 << 9)
82 #define NDSR_CS0_CMDD           (0x1 << 8)
83 #define NDSR_CS1_CMDD           (0x1 << 7)
84 #define NDSR_CS0_BBD            (0x1 << 6)
85 #define NDSR_CS1_BBD            (0x1 << 5)
86 #define NDSR_UNCORERR           (0x1 << 4)
87 #define NDSR_CORERR             (0x1 << 3)
88 #define NDSR_WRDREQ             (0x1 << 2)
89 #define NDSR_RDDREQ             (0x1 << 1)
90 #define NDSR_WRCMDREQ           (0x1)
91
92 #define NDCB0_LEN_OVRD          (0x1 << 28)
93 #define NDCB0_ST_ROW_EN         (0x1 << 26)
94 #define NDCB0_AUTO_RS           (0x1 << 25)
95 #define NDCB0_CSEL              (0x1 << 24)
96 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
97 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
98 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
99 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
100 #define NDCB0_NC                (0x1 << 20)
101 #define NDCB0_DBC               (0x1 << 19)
102 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
103 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
104 #define NDCB0_CMD2_MASK         (0xff << 8)
105 #define NDCB0_CMD1_MASK         (0xff)
106 #define NDCB0_ADDR_CYC_SHIFT    (16)
107
108 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
109 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
110 #define EXT_CMD_TYPE_READ       4 /* Read */
111 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
112 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
113 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
114 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
115
116 /*
117  * This should be large enough to read 'ONFI' and 'JEDEC'.
118  * Let's use 7 bytes, which is the maximum ID count supported
119  * by the controller (see NDCR_RD_ID_CNT_MASK).
120  */
121 #define READ_ID_BYTES           7
122
123 /* macros for registers read/write */
124 #define nand_writel(info, off, val)     \
125         writel((val), (info)->mmio_base + (off))
126
127 #define nand_readl(info, off)           \
128         readl((info)->mmio_base + (off))
129
130 /* error code and state */
131 enum {
132         ERR_NONE        = 0,
133         ERR_DMABUSERR   = -1,
134         ERR_SENDCMD     = -2,
135         ERR_UNCORERR    = -3,
136         ERR_BBERR       = -4,
137         ERR_CORERR      = -5,
138 };
139
140 enum {
141         STATE_IDLE = 0,
142         STATE_PREPARED,
143         STATE_CMD_HANDLE,
144         STATE_DMA_READING,
145         STATE_DMA_WRITING,
146         STATE_DMA_DONE,
147         STATE_PIO_READING,
148         STATE_PIO_WRITING,
149         STATE_CMD_DONE,
150         STATE_READY,
151 };
152
153 enum pxa3xx_nand_variant {
154         PXA3XX_NAND_VARIANT_PXA,
155         PXA3XX_NAND_VARIANT_ARMADA370,
156 };
157
158 struct pxa3xx_nand_host {
159         struct nand_chip        chip;
160         void                    *info_data;
161
162         /* page size of attached chip */
163         int                     use_ecc;
164         int                     cs;
165
166         /* calculated from pxa3xx_nand_flash data */
167         unsigned int            col_addr_cycles;
168         unsigned int            row_addr_cycles;
169 };
170
171 struct pxa3xx_nand_info {
172         struct nand_hw_control  controller;
173         struct pxa3xx_nand_platform_data *pdata;
174
175         struct clk              *clk;
176         void __iomem            *mmio_base;
177         unsigned long           mmio_phys;
178         int                     cmd_complete, dev_ready;
179
180         unsigned int            buf_start;
181         unsigned int            buf_count;
182         unsigned int            buf_size;
183         unsigned int            data_buff_pos;
184         unsigned int            oob_buff_pos;
185
186         unsigned char           *data_buff;
187         unsigned char           *oob_buff;
188
189         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
190         unsigned int            state;
191
192         /*
193          * This driver supports NFCv1 (as found in PXA SoC)
194          * and NFCv2 (as found in Armada 370/XP SoC).
195          */
196         enum pxa3xx_nand_variant variant;
197
198         int                     cs;
199         int                     use_ecc;        /* use HW ECC ? */
200         int                     ecc_bch;        /* using BCH ECC? */
201         int                     use_spare;      /* use spare ? */
202         int                     need_wait;
203
204         /* Amount of real data per full chunk */
205         unsigned int            chunk_size;
206
207         /* Amount of spare data per full chunk */
208         unsigned int            spare_size;
209
210         /* Number of full chunks (i.e chunk_size + spare_size) */
211         unsigned int            nfullchunks;
212
213         /*
214          * Total number of chunks. If equal to nfullchunks, then there
215          * are only full chunks. Otherwise, there is one last chunk of
216          * size (last_chunk_size + last_spare_size)
217          */
218         unsigned int            ntotalchunks;
219
220         /* Amount of real data in the last chunk */
221         unsigned int            last_chunk_size;
222
223         /* Amount of spare data in the last chunk */
224         unsigned int            last_spare_size;
225
226         unsigned int            ecc_size;
227         unsigned int            ecc_err_cnt;
228         unsigned int            max_bitflips;
229         int                     retcode;
230
231         /*
232          * Variables only valid during command
233          * execution. step_chunk_size and step_spare_size is the
234          * amount of real data and spare data in the current
235          * chunk. cur_chunk is the current chunk being
236          * read/programmed.
237          */
238         unsigned int            step_chunk_size;
239         unsigned int            step_spare_size;
240         unsigned int            cur_chunk;
241
242         /* cached register value */
243         uint32_t                reg_ndcr;
244         uint32_t                ndtr0cs0;
245         uint32_t                ndtr1cs0;
246
247         /* generated NDCBx register values */
248         uint32_t                ndcb0;
249         uint32_t                ndcb1;
250         uint32_t                ndcb2;
251         uint32_t                ndcb3;
252 };
253
254 static struct pxa3xx_nand_timing timing[] = {
255         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
256         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
257         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
258         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
259 };
260
261 static struct pxa3xx_nand_flash builtin_flash_types[] = {
262         { 0x46ec, 16, 16, &timing[1] },
263         { 0xdaec,  8,  8, &timing[1] },
264         { 0xd7ec,  8,  8, &timing[1] },
265         { 0xa12c,  8,  8, &timing[2] },
266         { 0xb12c, 16, 16, &timing[2] },
267         { 0xdc2c,  8,  8, &timing[2] },
268         { 0xcc2c, 16, 16, &timing[2] },
269         { 0xba20, 16, 16, &timing[3] },
270 };
271
272 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
273 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
274 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
275
276 static struct nand_bbt_descr bbt_main_descr = {
277         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
278                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
279         .offs = 8,
280         .len = 6,
281         .veroffs = 14,
282         .maxblocks = 8,         /* Last 8 blocks in each chip */
283         .pattern = bbt_pattern
284 };
285
286 static struct nand_bbt_descr bbt_mirror_descr = {
287         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
288                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
289         .offs = 8,
290         .len = 6,
291         .veroffs = 14,
292         .maxblocks = 8,         /* Last 8 blocks in each chip */
293         .pattern = bbt_mirror_pattern
294 };
295 #endif
296
297 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
298         .eccbytes = 32,
299         .eccpos = {
300                 32, 33, 34, 35, 36, 37, 38, 39,
301                 40, 41, 42, 43, 44, 45, 46, 47,
302                 48, 49, 50, 51, 52, 53, 54, 55,
303                 56, 57, 58, 59, 60, 61, 62, 63},
304         .oobfree = { {2, 30} }
305 };
306
307 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
308         .eccbytes = 64,
309         .eccpos = {
310                 32,  33,  34,  35,  36,  37,  38,  39,
311                 40,  41,  42,  43,  44,  45,  46,  47,
312                 48,  49,  50,  51,  52,  53,  54,  55,
313                 56,  57,  58,  59,  60,  61,  62,  63,
314                 96,  97,  98,  99,  100, 101, 102, 103,
315                 104, 105, 106, 107, 108, 109, 110, 111,
316                 112, 113, 114, 115, 116, 117, 118, 119,
317                 120, 121, 122, 123, 124, 125, 126, 127},
318         /* Bootrom looks in bytes 0 & 5 for bad blocks */
319         .oobfree = { {6, 26}, { 64, 32} }
320 };
321
322 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
323         .eccbytes = 128,
324         .eccpos = {
325                 32,  33,  34,  35,  36,  37,  38,  39,
326                 40,  41,  42,  43,  44,  45,  46,  47,
327                 48,  49,  50,  51,  52,  53,  54,  55,
328                 56,  57,  58,  59,  60,  61,  62,  63},
329         .oobfree = { }
330 };
331
332 #define NDTR0_tCH(c)    (min((c), 7) << 19)
333 #define NDTR0_tCS(c)    (min((c), 7) << 16)
334 #define NDTR0_tWH(c)    (min((c), 7) << 11)
335 #define NDTR0_tWP(c)    (min((c), 7) << 8)
336 #define NDTR0_tRH(c)    (min((c), 7) << 3)
337 #define NDTR0_tRP(c)    (min((c), 7) << 0)
338
339 #define NDTR1_tR(c)     (min((c), 65535) << 16)
340 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
341 #define NDTR1_tAR(c)    (min((c), 15) << 0)
342
343 /* convert nano-seconds to nand flash controller clock cycles */
344 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
345
346 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
347 {
348         /* We only support the Armada 370/XP/38x for now */
349         return PXA3XX_NAND_VARIANT_ARMADA370;
350 }
351
352 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
353                                    const struct pxa3xx_nand_timing *t)
354 {
355         struct pxa3xx_nand_info *info = host->info_data;
356         unsigned long nand_clk = mvebu_get_nand_clock();
357         uint32_t ndtr0, ndtr1;
358
359         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
360                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
361                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
362                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
363                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
364                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
365
366         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
367                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
368                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
369
370         info->ndtr0cs0 = ndtr0;
371         info->ndtr1cs0 = ndtr1;
372         nand_writel(info, NDTR0CS0, ndtr0);
373         nand_writel(info, NDTR1CS0, ndtr1);
374 }
375
376 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
377                                        const struct nand_sdr_timings *t)
378 {
379         struct pxa3xx_nand_info *info = host->info_data;
380         struct nand_chip *chip = &host->chip;
381         unsigned long nand_clk = mvebu_get_nand_clock();
382         uint32_t ndtr0, ndtr1;
383
384         u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
385         u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
386         u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
387         u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
388         u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
389         u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
390         u32 tR = chip->chip_delay * 1000;
391         u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
392         u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
393
394         /* fallback to a default value if tR = 0 */
395         if (!tR)
396                 tR = 20000;
397
398         ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
399                 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
400                 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
401                 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
402                 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
403                 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
404
405         ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
406                 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
407                 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
408
409         info->ndtr0cs0 = ndtr0;
410         info->ndtr1cs0 = ndtr1;
411         nand_writel(info, NDTR0CS0, ndtr0);
412         nand_writel(info, NDTR1CS0, ndtr1);
413 }
414
415 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
416 {
417         const struct nand_sdr_timings *timings;
418         struct nand_chip *chip = &host->chip;
419         struct pxa3xx_nand_info *info = host->info_data;
420         const struct pxa3xx_nand_flash *f = NULL;
421         struct mtd_info *mtd = nand_to_mtd(&host->chip);
422         int mode, id, ntypes, i;
423
424         mode = onfi_get_async_timing_mode(chip);
425         if (mode == ONFI_TIMING_MODE_UNKNOWN) {
426                 ntypes = ARRAY_SIZE(builtin_flash_types);
427
428                 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
429
430                 id = chip->read_byte(mtd);
431                 id |= chip->read_byte(mtd) << 0x8;
432
433                 for (i = 0; i < ntypes; i++) {
434                         f = &builtin_flash_types[i];
435
436                         if (f->chip_id == id)
437                                 break;
438                 }
439
440                 if (i == ntypes) {
441                         dev_err(&info->pdev->dev, "Error: timings not found\n");
442                         return -EINVAL;
443                 }
444
445                 pxa3xx_nand_set_timing(host, f->timing);
446
447                 if (f->flash_width == 16) {
448                         info->reg_ndcr |= NDCR_DWIDTH_M;
449                         chip->options |= NAND_BUSWIDTH_16;
450                 }
451
452                 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
453         } else {
454                 mode = fls(mode) - 1;
455                 if (mode < 0)
456                         mode = 0;
457
458                 timings = onfi_async_timing_mode_to_sdr_timings(mode);
459                 if (IS_ERR(timings))
460                         return PTR_ERR(timings);
461
462                 pxa3xx_nand_set_sdr_timing(host, timings);
463         }
464
465         return 0;
466 }
467
468 /**
469  * NOTE: it is a must to set ND_RUN first, then write
470  * command buffer, otherwise, it does not work.
471  * We enable all the interrupt at the same time, and
472  * let pxa3xx_nand_irq to handle all logic.
473  */
474 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
475 {
476         uint32_t ndcr;
477
478         ndcr = info->reg_ndcr;
479
480         if (info->use_ecc) {
481                 ndcr |= NDCR_ECC_EN;
482                 if (info->ecc_bch)
483                         nand_writel(info, NDECCCTRL, 0x1);
484         } else {
485                 ndcr &= ~NDCR_ECC_EN;
486                 if (info->ecc_bch)
487                         nand_writel(info, NDECCCTRL, 0x0);
488         }
489
490         ndcr &= ~NDCR_DMA_EN;
491
492         if (info->use_spare)
493                 ndcr |= NDCR_SPARE_EN;
494         else
495                 ndcr &= ~NDCR_SPARE_EN;
496
497         ndcr |= NDCR_ND_RUN;
498
499         /* clear status bits and run */
500         nand_writel(info, NDSR, NDSR_MASK);
501         nand_writel(info, NDCR, 0);
502         nand_writel(info, NDCR, ndcr);
503 }
504
505 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
506 {
507         uint32_t ndcr;
508
509         ndcr = nand_readl(info, NDCR);
510         nand_writel(info, NDCR, ndcr | int_mask);
511 }
512
513 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
514 {
515         if (info->ecc_bch) {
516                 u32 ts;
517
518                 /*
519                  * According to the datasheet, when reading from NDDB
520                  * with BCH enabled, after each 32 bytes reads, we
521                  * have to make sure that the NDSR.RDDREQ bit is set.
522                  *
523                  * Drain the FIFO 8 32 bits reads at a time, and skip
524                  * the polling on the last read.
525                  */
526                 while (len > 8) {
527                         readsl(info->mmio_base + NDDB, data, 8);
528
529                         ts = get_timer(0);
530                         while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
531                                 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
532                                         dev_err(&info->pdev->dev,
533                                                 "Timeout on RDDREQ while draining the FIFO\n");
534                                         return;
535                                 }
536                         }
537
538                         data += 32;
539                         len -= 8;
540                 }
541         }
542
543         readsl(info->mmio_base + NDDB, data, len);
544 }
545
546 static void handle_data_pio(struct pxa3xx_nand_info *info)
547 {
548         switch (info->state) {
549         case STATE_PIO_WRITING:
550                 if (info->step_chunk_size)
551                         writesl(info->mmio_base + NDDB,
552                                 info->data_buff + info->data_buff_pos,
553                                 DIV_ROUND_UP(info->step_chunk_size, 4));
554
555                 if (info->step_spare_size)
556                         writesl(info->mmio_base + NDDB,
557                                 info->oob_buff + info->oob_buff_pos,
558                                 DIV_ROUND_UP(info->step_spare_size, 4));
559                 break;
560         case STATE_PIO_READING:
561                 if (info->step_chunk_size)
562                         drain_fifo(info,
563                                    info->data_buff + info->data_buff_pos,
564                                    DIV_ROUND_UP(info->step_chunk_size, 4));
565
566                 if (info->step_spare_size)
567                         drain_fifo(info,
568                                    info->oob_buff + info->oob_buff_pos,
569                                    DIV_ROUND_UP(info->step_spare_size, 4));
570                 break;
571         default:
572                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
573                                 info->state);
574                 BUG();
575         }
576
577         /* Update buffer pointers for multi-page read/write */
578         info->data_buff_pos += info->step_chunk_size;
579         info->oob_buff_pos += info->step_spare_size;
580 }
581
582 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
583 {
584         handle_data_pio(info);
585
586         info->state = STATE_CMD_DONE;
587         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
588 }
589
590 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
591 {
592         unsigned int status, is_completed = 0, is_ready = 0;
593         unsigned int ready, cmd_done;
594         irqreturn_t ret = IRQ_HANDLED;
595
596         if (info->cs == 0) {
597                 ready           = NDSR_FLASH_RDY;
598                 cmd_done        = NDSR_CS0_CMDD;
599         } else {
600                 ready           = NDSR_RDY;
601                 cmd_done        = NDSR_CS1_CMDD;
602         }
603
604         status = nand_readl(info, NDSR);
605
606         if (status & NDSR_UNCORERR)
607                 info->retcode = ERR_UNCORERR;
608         if (status & NDSR_CORERR) {
609                 info->retcode = ERR_CORERR;
610                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
611                     info->ecc_bch)
612                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
613                 else
614                         info->ecc_err_cnt = 1;
615
616                 /*
617                  * Each chunk composing a page is corrected independently,
618                  * and we need to store maximum number of corrected bitflips
619                  * to return it to the MTD layer in ecc.read_page().
620                  */
621                 info->max_bitflips = max_t(unsigned int,
622                                            info->max_bitflips,
623                                            info->ecc_err_cnt);
624         }
625         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
626                 info->state = (status & NDSR_RDDREQ) ?
627                         STATE_PIO_READING : STATE_PIO_WRITING;
628                 /* Call the IRQ thread in U-Boot directly */
629                 pxa3xx_nand_irq_thread(info);
630                 return 0;
631         }
632         if (status & cmd_done) {
633                 info->state = STATE_CMD_DONE;
634                 is_completed = 1;
635         }
636         if (status & ready) {
637                 info->state = STATE_READY;
638                 is_ready = 1;
639         }
640
641         /*
642          * Clear all status bit before issuing the next command, which
643          * can and will alter the status bits and will deserve a new
644          * interrupt on its own. This lets the controller exit the IRQ
645          */
646         nand_writel(info, NDSR, status);
647
648         if (status & NDSR_WRCMDREQ) {
649                 status &= ~NDSR_WRCMDREQ;
650                 info->state = STATE_CMD_HANDLE;
651
652                 /*
653                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
654                  * must be loaded by writing directly either 12 or 16
655                  * bytes directly to NDCB0, four bytes at a time.
656                  *
657                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
658                  * but each NDCBx register can be read.
659                  */
660                 nand_writel(info, NDCB0, info->ndcb0);
661                 nand_writel(info, NDCB0, info->ndcb1);
662                 nand_writel(info, NDCB0, info->ndcb2);
663
664                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
665                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
666                         nand_writel(info, NDCB0, info->ndcb3);
667         }
668
669         if (is_completed)
670                 info->cmd_complete = 1;
671         if (is_ready)
672                 info->dev_ready = 1;
673
674         return ret;
675 }
676
677 static inline int is_buf_blank(uint8_t *buf, size_t len)
678 {
679         for (; len > 0; len--)
680                 if (*buf++ != 0xff)
681                         return 0;
682         return 1;
683 }
684
685 static void set_command_address(struct pxa3xx_nand_info *info,
686                 unsigned int page_size, uint16_t column, int page_addr)
687 {
688         /* small page addr setting */
689         if (page_size < PAGE_CHUNK_SIZE) {
690                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
691                                 | (column & 0xFF);
692
693                 info->ndcb2 = 0;
694         } else {
695                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
696                                 | (column & 0xFFFF);
697
698                 if (page_addr & 0xFF0000)
699                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
700                 else
701                         info->ndcb2 = 0;
702         }
703 }
704
705 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
706 {
707         struct pxa3xx_nand_host *host = info->host[info->cs];
708         struct mtd_info *mtd = nand_to_mtd(&host->chip);
709
710         /* reset data and oob column point to handle data */
711         info->buf_start         = 0;
712         info->buf_count         = 0;
713         info->data_buff_pos     = 0;
714         info->oob_buff_pos      = 0;
715         info->step_chunk_size   = 0;
716         info->step_spare_size   = 0;
717         info->cur_chunk         = 0;
718         info->use_ecc           = 0;
719         info->use_spare         = 1;
720         info->retcode           = ERR_NONE;
721         info->ecc_err_cnt       = 0;
722         info->ndcb3             = 0;
723         info->need_wait         = 0;
724
725         switch (command) {
726         case NAND_CMD_READ0:
727         case NAND_CMD_PAGEPROG:
728                 info->use_ecc = 1;
729                 break;
730         case NAND_CMD_PARAM:
731                 info->use_spare = 0;
732                 break;
733         default:
734                 info->ndcb1 = 0;
735                 info->ndcb2 = 0;
736                 break;
737         }
738
739         /*
740          * If we are about to issue a read command, or about to set
741          * the write address, then clean the data buffer.
742          */
743         if (command == NAND_CMD_READ0 ||
744             command == NAND_CMD_READOOB ||
745             command == NAND_CMD_SEQIN) {
746                 info->buf_count = mtd->writesize + mtd->oobsize;
747                 memset(info->data_buff, 0xFF, info->buf_count);
748         }
749 }
750
751 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
752                 int ext_cmd_type, uint16_t column, int page_addr)
753 {
754         int addr_cycle, exec_cmd;
755         struct pxa3xx_nand_host *host;
756         struct mtd_info *mtd;
757
758         host = info->host[info->cs];
759         mtd = nand_to_mtd(&host->chip);
760         addr_cycle = 0;
761         exec_cmd = 1;
762
763         if (info->cs != 0)
764                 info->ndcb0 = NDCB0_CSEL;
765         else
766                 info->ndcb0 = 0;
767
768         if (command == NAND_CMD_SEQIN)
769                 exec_cmd = 0;
770
771         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
772                                     + host->col_addr_cycles);
773
774         switch (command) {
775         case NAND_CMD_READOOB:
776         case NAND_CMD_READ0:
777                 info->buf_start = column;
778                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
779                                 | addr_cycle
780                                 | NAND_CMD_READ0;
781
782                 if (command == NAND_CMD_READOOB)
783                         info->buf_start += mtd->writesize;
784
785                 if (info->cur_chunk < info->nfullchunks) {
786                         info->step_chunk_size = info->chunk_size;
787                         info->step_spare_size = info->spare_size;
788                 } else {
789                         info->step_chunk_size = info->last_chunk_size;
790                         info->step_spare_size = info->last_spare_size;
791                 }
792
793                 /*
794                  * Multiple page read needs an 'extended command type' field,
795                  * which is either naked-read or last-read according to the
796                  * state.
797                  */
798                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
799                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
800                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
801                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
802                                         | NDCB0_LEN_OVRD
803                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
804                         info->ndcb3 = info->step_chunk_size +
805                                 info->step_spare_size;
806                 }
807
808                 set_command_address(info, mtd->writesize, column, page_addr);
809                 break;
810
811         case NAND_CMD_SEQIN:
812
813                 info->buf_start = column;
814                 set_command_address(info, mtd->writesize, 0, page_addr);
815
816                 /*
817                  * Multiple page programming needs to execute the initial
818                  * SEQIN command that sets the page address.
819                  */
820                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
821                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
822                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
823                                 | addr_cycle
824                                 | command;
825                         exec_cmd = 1;
826                 }
827                 break;
828
829         case NAND_CMD_PAGEPROG:
830                 if (is_buf_blank(info->data_buff,
831                                  (mtd->writesize + mtd->oobsize))) {
832                         exec_cmd = 0;
833                         break;
834                 }
835
836                 if (info->cur_chunk < info->nfullchunks) {
837                         info->step_chunk_size = info->chunk_size;
838                         info->step_spare_size = info->spare_size;
839                 } else {
840                         info->step_chunk_size = info->last_chunk_size;
841                         info->step_spare_size = info->last_spare_size;
842                 }
843
844                 /* Second command setting for large pages */
845                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
846                         /*
847                          * Multiple page write uses the 'extended command'
848                          * field. This can be used to issue a command dispatch
849                          * or a naked-write depending on the current stage.
850                          */
851                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
852                                         | NDCB0_LEN_OVRD
853                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
854                         info->ndcb3 = info->step_chunk_size +
855                                       info->step_spare_size;
856
857                         /*
858                          * This is the command dispatch that completes a chunked
859                          * page program operation.
860                          */
861                         if (info->cur_chunk == info->ntotalchunks) {
862                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
863                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
864                                         | command;
865                                 info->ndcb1 = 0;
866                                 info->ndcb2 = 0;
867                                 info->ndcb3 = 0;
868                         }
869                 } else {
870                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
871                                         | NDCB0_AUTO_RS
872                                         | NDCB0_ST_ROW_EN
873                                         | NDCB0_DBC
874                                         | (NAND_CMD_PAGEPROG << 8)
875                                         | NAND_CMD_SEQIN
876                                         | addr_cycle;
877                 }
878                 break;
879
880         case NAND_CMD_PARAM:
881                 info->buf_count = INIT_BUFFER_SIZE;
882                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
883                                 | NDCB0_ADDR_CYC(1)
884                                 | NDCB0_LEN_OVRD
885                                 | command;
886                 info->ndcb1 = (column & 0xFF);
887                 info->ndcb3 = INIT_BUFFER_SIZE;
888                 info->step_chunk_size = INIT_BUFFER_SIZE;
889                 break;
890
891         case NAND_CMD_READID:
892                 info->buf_count = READ_ID_BYTES;
893                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
894                                 | NDCB0_ADDR_CYC(1)
895                                 | command;
896                 info->ndcb1 = (column & 0xFF);
897
898                 info->step_chunk_size = 8;
899                 break;
900         case NAND_CMD_STATUS:
901                 info->buf_count = 1;
902                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
903                                 | NDCB0_ADDR_CYC(1)
904                                 | command;
905
906                 info->step_chunk_size = 8;
907                 break;
908
909         case NAND_CMD_ERASE1:
910                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
911                                 | NDCB0_AUTO_RS
912                                 | NDCB0_ADDR_CYC(3)
913                                 | NDCB0_DBC
914                                 | (NAND_CMD_ERASE2 << 8)
915                                 | NAND_CMD_ERASE1;
916                 info->ndcb1 = page_addr;
917                 info->ndcb2 = 0;
918
919                 break;
920         case NAND_CMD_RESET:
921                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
922                                 | command;
923
924                 break;
925
926         case NAND_CMD_ERASE2:
927                 exec_cmd = 0;
928                 break;
929
930         default:
931                 exec_cmd = 0;
932                 dev_err(&info->pdev->dev, "non-supported command %x\n",
933                         command);
934                 break;
935         }
936
937         return exec_cmd;
938 }
939
940 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
941                          int column, int page_addr)
942 {
943         struct nand_chip *chip = mtd_to_nand(mtd);
944         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
945         struct pxa3xx_nand_info *info = host->info_data;
946         int exec_cmd;
947
948         /*
949          * if this is a x16 device ,then convert the input
950          * "byte" address into a "word" address appropriate
951          * for indexing a word-oriented device
952          */
953         if (info->reg_ndcr & NDCR_DWIDTH_M)
954                 column /= 2;
955
956         /*
957          * There may be different NAND chip hooked to
958          * different chip select, so check whether
959          * chip select has been changed, if yes, reset the timing
960          */
961         if (info->cs != host->cs) {
962                 info->cs = host->cs;
963                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
964                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
965         }
966
967         prepare_start_command(info, command);
968
969         info->state = STATE_PREPARED;
970         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
971
972         if (exec_cmd) {
973                 u32 ts;
974
975                 info->cmd_complete = 0;
976                 info->dev_ready = 0;
977                 info->need_wait = 1;
978                 pxa3xx_nand_start(info);
979
980                 ts = get_timer(0);
981                 while (1) {
982                         u32 status;
983
984                         status = nand_readl(info, NDSR);
985                         if (status)
986                                 pxa3xx_nand_irq(info);
987
988                         if (info->cmd_complete)
989                                 break;
990
991                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
992                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
993                                 return;
994                         }
995                 }
996         }
997         info->state = STATE_IDLE;
998 }
999
1000 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1001                                   const unsigned command,
1002                                   int column, int page_addr)
1003 {
1004         struct nand_chip *chip = mtd_to_nand(mtd);
1005         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1006         struct pxa3xx_nand_info *info = host->info_data;
1007         int exec_cmd, ext_cmd_type;
1008
1009         /*
1010          * if this is a x16 device then convert the input
1011          * "byte" address into a "word" address appropriate
1012          * for indexing a word-oriented device
1013          */
1014         if (info->reg_ndcr & NDCR_DWIDTH_M)
1015                 column /= 2;
1016
1017         /*
1018          * There may be different NAND chip hooked to
1019          * different chip select, so check whether
1020          * chip select has been changed, if yes, reset the timing
1021          */
1022         if (info->cs != host->cs) {
1023                 info->cs = host->cs;
1024                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1025                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1026         }
1027
1028         /* Select the extended command for the first command */
1029         switch (command) {
1030         case NAND_CMD_READ0:
1031         case NAND_CMD_READOOB:
1032                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1033                 break;
1034         case NAND_CMD_SEQIN:
1035                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1036                 break;
1037         case NAND_CMD_PAGEPROG:
1038                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1039                 break;
1040         default:
1041                 ext_cmd_type = 0;
1042                 break;
1043         }
1044
1045         prepare_start_command(info, command);
1046
1047         /*
1048          * Prepare the "is ready" completion before starting a command
1049          * transaction sequence. If the command is not executed the
1050          * completion will be completed, see below.
1051          *
1052          * We can do that inside the loop because the command variable
1053          * is invariant and thus so is the exec_cmd.
1054          */
1055         info->need_wait = 1;
1056         info->dev_ready = 0;
1057
1058         do {
1059                 u32 ts;
1060
1061                 info->state = STATE_PREPARED;
1062                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1063                                                column, page_addr);
1064                 if (!exec_cmd) {
1065                         info->need_wait = 0;
1066                         info->dev_ready = 1;
1067                         break;
1068                 }
1069
1070                 info->cmd_complete = 0;
1071                 pxa3xx_nand_start(info);
1072
1073                 ts = get_timer(0);
1074                 while (1) {
1075                         u32 status;
1076
1077                         status = nand_readl(info, NDSR);
1078                         if (status)
1079                                 pxa3xx_nand_irq(info);
1080
1081                         if (info->cmd_complete)
1082                                 break;
1083
1084                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1085                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1086                                 return;
1087                         }
1088                 }
1089
1090                 /* Only a few commands need several steps */
1091                 if (command != NAND_CMD_PAGEPROG &&
1092                     command != NAND_CMD_READ0    &&
1093                     command != NAND_CMD_READOOB)
1094                         break;
1095
1096                 info->cur_chunk++;
1097
1098                 /* Check if the sequence is complete */
1099                 if (info->cur_chunk == info->ntotalchunks &&
1100                     command != NAND_CMD_PAGEPROG)
1101                         break;
1102
1103                 /*
1104                  * After a splitted program command sequence has issued
1105                  * the command dispatch, the command sequence is complete.
1106                  */
1107                 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1108                     command == NAND_CMD_PAGEPROG &&
1109                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1110                         break;
1111
1112                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1113                         /* Last read: issue a 'last naked read' */
1114                         if (info->cur_chunk == info->ntotalchunks - 1)
1115                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1116                         else
1117                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1118
1119                 /*
1120                  * If a splitted program command has no more data to transfer,
1121                  * the command dispatch must be issued to complete.
1122                  */
1123                 } else if (command == NAND_CMD_PAGEPROG &&
1124                            info->cur_chunk == info->ntotalchunks) {
1125                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1126                 }
1127         } while (1);
1128
1129         info->state = STATE_IDLE;
1130 }
1131
1132 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1133                 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1134                 int page)
1135 {
1136         chip->write_buf(mtd, buf, mtd->writesize);
1137         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1138
1139         return 0;
1140 }
1141
1142 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1143                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1144                 int page)
1145 {
1146         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1147         struct pxa3xx_nand_info *info = host->info_data;
1148
1149         chip->read_buf(mtd, buf, mtd->writesize);
1150         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1151
1152         if (info->retcode == ERR_CORERR && info->use_ecc) {
1153                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1154
1155         } else if (info->retcode == ERR_UNCORERR) {
1156                 /*
1157                  * for blank page (all 0xff), HW will calculate its ECC as
1158                  * 0, which is different from the ECC information within
1159                  * OOB, ignore such uncorrectable errors
1160                  */
1161                 if (is_buf_blank(buf, mtd->writesize))
1162                         info->retcode = ERR_NONE;
1163                 else
1164                         mtd->ecc_stats.failed++;
1165         }
1166
1167         return info->max_bitflips;
1168 }
1169
1170 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1171 {
1172         struct nand_chip *chip = mtd_to_nand(mtd);
1173         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1174         struct pxa3xx_nand_info *info = host->info_data;
1175         char retval = 0xFF;
1176
1177         if (info->buf_start < info->buf_count)
1178                 /* Has just send a new command? */
1179                 retval = info->data_buff[info->buf_start++];
1180
1181         return retval;
1182 }
1183
1184 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1185 {
1186         struct nand_chip *chip = mtd_to_nand(mtd);
1187         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1188         struct pxa3xx_nand_info *info = host->info_data;
1189         u16 retval = 0xFFFF;
1190
1191         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1192                 retval = *((u16 *)(info->data_buff+info->buf_start));
1193                 info->buf_start += 2;
1194         }
1195         return retval;
1196 }
1197
1198 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1199 {
1200         struct nand_chip *chip = mtd_to_nand(mtd);
1201         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1202         struct pxa3xx_nand_info *info = host->info_data;
1203         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1204
1205         memcpy(buf, info->data_buff + info->buf_start, real_len);
1206         info->buf_start += real_len;
1207 }
1208
1209 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1210                 const uint8_t *buf, int len)
1211 {
1212         struct nand_chip *chip = mtd_to_nand(mtd);
1213         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1214         struct pxa3xx_nand_info *info = host->info_data;
1215         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1216
1217         memcpy(info->data_buff + info->buf_start, buf, real_len);
1218         info->buf_start += real_len;
1219 }
1220
1221 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1222 {
1223         return;
1224 }
1225
1226 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1227 {
1228         struct nand_chip *chip = mtd_to_nand(mtd);
1229         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1230         struct pxa3xx_nand_info *info = host->info_data;
1231
1232         if (info->need_wait) {
1233                 u32 ts;
1234
1235                 info->need_wait = 0;
1236
1237                 ts = get_timer(0);
1238                 while (1) {
1239                         u32 status;
1240
1241                         status = nand_readl(info, NDSR);
1242                         if (status)
1243                                 pxa3xx_nand_irq(info);
1244
1245                         if (info->dev_ready)
1246                                 break;
1247
1248                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1249                                 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1250                                 return NAND_STATUS_FAIL;
1251                         }
1252                 }
1253         }
1254
1255         /* pxa3xx_nand_send_command has waited for command complete */
1256         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1257                 if (info->retcode == ERR_NONE)
1258                         return 0;
1259                 else
1260                         return NAND_STATUS_FAIL;
1261         }
1262
1263         return NAND_STATUS_READY;
1264 }
1265
1266 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1267 {
1268         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1269
1270         /* Configure default flash values */
1271         info->chunk_size = PAGE_CHUNK_SIZE;
1272         info->reg_ndcr = 0x0; /* enable all interrupts */
1273         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1274         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1275         info->reg_ndcr |= NDCR_SPARE_EN;
1276
1277         return 0;
1278 }
1279
1280 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1281 {
1282         struct pxa3xx_nand_host *host = info->host[info->cs];
1283         struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1284         struct nand_chip *chip = mtd_to_nand(mtd);
1285
1286         info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1287         info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1288         info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1289 }
1290
1291 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1292 {
1293         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1294         uint32_t ndcr = nand_readl(info, NDCR);
1295
1296         /* Set an initial chunk size */
1297         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1298         info->reg_ndcr = ndcr &
1299                 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1300         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1301         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1302         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1303 }
1304
1305 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1306 {
1307         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1308         if (info->data_buff == NULL)
1309                 return -ENOMEM;
1310         return 0;
1311 }
1312
1313 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1314 {
1315         struct pxa3xx_nand_info *info = host->info_data;
1316         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1317         struct mtd_info *mtd;
1318         struct nand_chip *chip;
1319         const struct nand_sdr_timings *timings;
1320         int ret;
1321
1322         mtd = nand_to_mtd(&info->host[info->cs]->chip);
1323         chip = mtd_to_nand(mtd);
1324
1325         /* configure default flash values */
1326         info->reg_ndcr = 0x0; /* enable all interrupts */
1327         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1328         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1329         info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1330
1331         /* use the common timing to make a try */
1332         timings = onfi_async_timing_mode_to_sdr_timings(0);
1333         if (IS_ERR(timings))
1334                 return PTR_ERR(timings);
1335
1336         pxa3xx_nand_set_sdr_timing(host, timings);
1337
1338         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1339         ret = chip->waitfunc(mtd, chip);
1340         if (ret & NAND_STATUS_FAIL)
1341                 return -ENODEV;
1342
1343         return 0;
1344 }
1345
1346 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1347                         struct nand_ecc_ctrl *ecc,
1348                         int strength, int ecc_stepsize, int page_size)
1349 {
1350         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1351                 info->nfullchunks = 1;
1352                 info->ntotalchunks = 1;
1353                 info->chunk_size = 2048;
1354                 info->spare_size = 40;
1355                 info->ecc_size = 24;
1356                 ecc->mode = NAND_ECC_HW;
1357                 ecc->size = 512;
1358                 ecc->strength = 1;
1359
1360         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1361                 info->nfullchunks = 1;
1362                 info->ntotalchunks = 1;
1363                 info->chunk_size = 512;
1364                 info->spare_size = 8;
1365                 info->ecc_size = 8;
1366                 ecc->mode = NAND_ECC_HW;
1367                 ecc->size = 512;
1368                 ecc->strength = 1;
1369
1370         /*
1371          * Required ECC: 4-bit correction per 512 bytes
1372          * Select: 16-bit correction per 2048 bytes
1373          */
1374         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1375                 info->ecc_bch = 1;
1376                 info->nfullchunks = 1;
1377                 info->ntotalchunks = 1;
1378                 info->chunk_size = 2048;
1379                 info->spare_size = 32;
1380                 info->ecc_size = 32;
1381                 ecc->mode = NAND_ECC_HW;
1382                 ecc->size = info->chunk_size;
1383                 ecc->layout = &ecc_layout_2KB_bch4bit;
1384                 ecc->strength = 16;
1385
1386         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1387                 info->ecc_bch = 1;
1388                 info->nfullchunks = 2;
1389                 info->ntotalchunks = 2;
1390                 info->chunk_size = 2048;
1391                 info->spare_size = 32;
1392                 info->ecc_size = 32;
1393                 ecc->mode = NAND_ECC_HW;
1394                 ecc->size = info->chunk_size;
1395                 ecc->layout = &ecc_layout_4KB_bch4bit;
1396                 ecc->strength = 16;
1397
1398         /*
1399          * Required ECC: 8-bit correction per 512 bytes
1400          * Select: 16-bit correction per 1024 bytes
1401          */
1402         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1403                 info->ecc_bch = 1;
1404                 info->nfullchunks = 4;
1405                 info->ntotalchunks = 5;
1406                 info->chunk_size = 1024;
1407                 info->spare_size = 0;
1408                 info->last_chunk_size = 0;
1409                 info->last_spare_size = 64;
1410                 info->ecc_size = 32;
1411                 ecc->mode = NAND_ECC_HW;
1412                 ecc->size = info->chunk_size;
1413                 ecc->layout = &ecc_layout_4KB_bch8bit;
1414                 ecc->strength = 16;
1415         } else {
1416                 dev_err(&info->pdev->dev,
1417                         "ECC strength %d at page size %d is not supported\n",
1418                         strength, page_size);
1419                 return -ENODEV;
1420         }
1421
1422         return 0;
1423 }
1424
1425 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1426 {
1427         struct nand_chip *chip = mtd_to_nand(mtd);
1428         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1429         struct pxa3xx_nand_info *info = host->info_data;
1430         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1431         int ret;
1432         uint16_t ecc_strength, ecc_step;
1433
1434         if (pdata->keep_config) {
1435                 pxa3xx_nand_detect_config(info);
1436         } else {
1437                 ret = pxa3xx_nand_config_ident(info);
1438                 if (ret)
1439                         return ret;
1440                 ret = pxa3xx_nand_sensing(host);
1441                 if (ret) {
1442                         dev_info(&info->pdev->dev,
1443                                  "There is no chip on cs %d!\n",
1444                                  info->cs);
1445                         return ret;
1446                 }
1447         }
1448
1449         /* Device detection must be done with ECC disabled */
1450         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1451                 nand_writel(info, NDECCCTRL, 0x0);
1452
1453         if (nand_scan_ident(mtd, 1, NULL))
1454                 return -ENODEV;
1455
1456         if (!pdata->keep_config) {
1457                 ret = pxa3xx_nand_init_timings(host);
1458                 if (ret) {
1459                         dev_err(&info->pdev->dev,
1460                                 "Failed to set timings: %d\n", ret);
1461                         return ret;
1462                 }
1463         }
1464
1465 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1466         /*
1467          * We'll use a bad block table stored in-flash and don't
1468          * allow writing the bad block marker to the flash.
1469          */
1470         chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1471         chip->bbt_td = &bbt_main_descr;
1472         chip->bbt_md = &bbt_mirror_descr;
1473 #endif
1474
1475         /*
1476          * If the page size is bigger than the FIFO size, let's check
1477          * we are given the right variant and then switch to the extended
1478          * (aka splitted) command handling,
1479          */
1480         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1481                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1482                         chip->cmdfunc = nand_cmdfunc_extended;
1483                 } else {
1484                         dev_err(&info->pdev->dev,
1485                                 "unsupported page size on this variant\n");
1486                         return -ENODEV;
1487                 }
1488         }
1489
1490         if (pdata->ecc_strength && pdata->ecc_step_size) {
1491                 ecc_strength = pdata->ecc_strength;
1492                 ecc_step = pdata->ecc_step_size;
1493         } else {
1494                 ecc_strength = chip->ecc_strength_ds;
1495                 ecc_step = chip->ecc_step_ds;
1496         }
1497
1498         /* Set default ECC strength requirements on non-ONFI devices */
1499         if (ecc_strength < 1 && ecc_step < 1) {
1500                 ecc_strength = 1;
1501                 ecc_step = 512;
1502         }
1503
1504         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1505                            ecc_step, mtd->writesize);
1506         if (ret)
1507                 return ret;
1508
1509         /* calculate addressing information */
1510         if (mtd->writesize >= 2048)
1511                 host->col_addr_cycles = 2;
1512         else
1513                 host->col_addr_cycles = 1;
1514
1515         /* release the initial buffer */
1516         kfree(info->data_buff);
1517
1518         /* allocate the real data + oob buffer */
1519         info->buf_size = mtd->writesize + mtd->oobsize;
1520         ret = pxa3xx_nand_init_buff(info);
1521         if (ret)
1522                 return ret;
1523         info->oob_buff = info->data_buff + mtd->writesize;
1524
1525         if ((mtd->size >> chip->page_shift) > 65536)
1526                 host->row_addr_cycles = 3;
1527         else
1528                 host->row_addr_cycles = 2;
1529
1530         if (!pdata->keep_config)
1531                 pxa3xx_nand_config_tail(info);
1532
1533         return nand_scan_tail(mtd);
1534 }
1535
1536 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1537 {
1538         struct pxa3xx_nand_platform_data *pdata;
1539         struct pxa3xx_nand_host *host;
1540         struct nand_chip *chip = NULL;
1541         struct mtd_info *mtd;
1542         int ret, cs;
1543
1544         pdata = info->pdata;
1545         if (pdata->num_cs <= 0)
1546                 return -ENODEV;
1547
1548         info->variant = pxa3xx_nand_get_variant();
1549         for (cs = 0; cs < pdata->num_cs; cs++) {
1550                 chip = (struct nand_chip *)
1551                         ((u8 *)&info[1] + sizeof(*host) * cs);
1552                 mtd = nand_to_mtd(chip);
1553                 host = (struct pxa3xx_nand_host *)chip;
1554                 info->host[cs] = host;
1555                 host->cs = cs;
1556                 host->info_data = info;
1557                 mtd->owner = THIS_MODULE;
1558
1559                 nand_set_controller_data(chip, host);
1560                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1561                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1562                 chip->controller        = &info->controller;
1563                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1564                 chip->select_chip       = pxa3xx_nand_select_chip;
1565                 chip->read_word         = pxa3xx_nand_read_word;
1566                 chip->read_byte         = pxa3xx_nand_read_byte;
1567                 chip->read_buf          = pxa3xx_nand_read_buf;
1568                 chip->write_buf         = pxa3xx_nand_write_buf;
1569                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1570                 chip->cmdfunc           = nand_cmdfunc;
1571         }
1572
1573         /* Allocate a buffer to allow flash detection */
1574         info->buf_size = INIT_BUFFER_SIZE;
1575         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1576         if (info->data_buff == NULL) {
1577                 ret = -ENOMEM;
1578                 goto fail_disable_clk;
1579         }
1580
1581         /* initialize all interrupts to be disabled */
1582         disable_int(info, NDSR_MASK);
1583
1584         return 0;
1585
1586         kfree(info->data_buff);
1587 fail_disable_clk:
1588         return ret;
1589 }
1590
1591 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1592 {
1593         struct pxa3xx_nand_platform_data *pdata;
1594         const void *blob = gd->fdt_blob;
1595         int node = -1;
1596
1597         pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1598         if (!pdata)
1599                 return -ENOMEM;
1600
1601         /* Get address decoding nodes from the FDT blob */
1602         do {
1603                 node = fdt_node_offset_by_compatible(blob, node,
1604                                                      "marvell,mvebu-pxa3xx-nand");
1605                 if (node < 0)
1606                         break;
1607
1608                 /* Bypass disabeld nodes */
1609                 if (!fdtdec_get_is_enabled(blob, node))
1610                         continue;
1611
1612                 /* Get the first enabled NAND controler base address */
1613                 info->mmio_base =
1614                         (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1615                                         blob, node, "reg", 0, NULL, true);
1616
1617                 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1618                 if (pdata->num_cs != 1) {
1619                         pr_err("pxa3xx driver supports single CS only\n");
1620                         break;
1621                 }
1622
1623                 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1624                         pdata->enable_arbiter = 1;
1625
1626                 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1627                         pdata->keep_config = 1;
1628
1629                 /*
1630                  * ECC parameters.
1631                  * If these are not set, they will be selected according
1632                  * to the detected flash type.
1633                  */
1634                 /* ECC strength */
1635                 pdata->ecc_strength = fdtdec_get_int(blob, node,
1636                                                      "nand-ecc-strength", 0);
1637
1638                 /* ECC step size */
1639                 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1640                                                       "nand-ecc-step-size", 0);
1641
1642                 info->pdata = pdata;
1643
1644                 /* Currently support only a single NAND controller */
1645                 return 0;
1646
1647         } while (node >= 0);
1648
1649         return -EINVAL;
1650 }
1651
1652 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1653 {
1654         struct pxa3xx_nand_platform_data *pdata;
1655         int ret, cs, probe_success;
1656
1657         ret = pxa3xx_nand_probe_dt(info);
1658         if (ret)
1659                 return ret;
1660
1661         pdata = info->pdata;
1662
1663         ret = alloc_nand_resource(info);
1664         if (ret) {
1665                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1666                 return ret;
1667         }
1668
1669         probe_success = 0;
1670         for (cs = 0; cs < pdata->num_cs; cs++) {
1671                 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1672
1673                 /*
1674                  * The mtd name matches the one used in 'mtdparts' kernel
1675                  * parameter. This name cannot be changed or otherwise
1676                  * user's mtd partitions configuration would get broken.
1677                  */
1678                 mtd->name = "pxa3xx_nand-0";
1679                 info->cs = cs;
1680                 ret = pxa3xx_nand_scan(mtd);
1681                 if (ret) {
1682                         dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1683                                  cs);
1684                         continue;
1685                 }
1686
1687                 if (nand_register(cs, mtd))
1688                         continue;
1689
1690                 probe_success = 1;
1691         }
1692
1693         if (!probe_success)
1694                 return -ENODEV;
1695
1696         return 0;
1697 }
1698
1699 /*
1700  * Main initialization routine
1701  */
1702 void board_nand_init(void)
1703 {
1704         struct pxa3xx_nand_info *info;
1705         struct pxa3xx_nand_host *host;
1706         int ret;
1707
1708         info = kzalloc(sizeof(*info) +
1709                        sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1710                        GFP_KERNEL);
1711         if (!info)
1712                 return;
1713
1714         ret = pxa3xx_nand_probe(info);
1715         if (ret)
1716                 return;
1717 }