mtd: nand: pxa3xx: Fix READOOB implementation
[platform/kernel/u-boot.git] / drivers / mtd / nand / pxa3xx_nand.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/mtd/nand/pxa3xx_nand.c
4  *
5  * Copyright © 2005 Intel Corporation
6  * Copyright © 2006 Marvell International Ltd.
7  */
8
9 #include <common.h>
10 #include <malloc.h>
11 #include <fdtdec.h>
12 #include <nand.h>
13 #include <linux/errno.h>
14 #include <asm/io.h>
15 #include <asm/arch/cpu.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/types.h>
19
20 #include "pxa3xx_nand.h"
21
22 DECLARE_GLOBAL_DATA_PTR;
23
24 #define TIMEOUT_DRAIN_FIFO      5       /* in ms */
25 #define CHIP_DELAY_TIMEOUT      200
26 #define NAND_STOP_DELAY         40
27 #define PAGE_CHUNK_SIZE         (2048)
28
29 /*
30  * Define a buffer size for the initial command that detects the flash device:
31  * STATUS, READID and PARAM.
32  * ONFI param page is 256 bytes, and there are three redundant copies
33  * to be read. JEDEC param page is 512 bytes, and there are also three
34  * redundant copies to be read.
35  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
36  */
37 #define INIT_BUFFER_SIZE        2048
38
39 /* registers and bit definitions */
40 #define NDCR            (0x00) /* Control register */
41 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
42 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
43 #define NDSR            (0x14) /* Status Register */
44 #define NDPCR           (0x18) /* Page Count Register */
45 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
46 #define NDBDR1          (0x20) /* Bad Block Register 1 */
47 #define NDECCCTRL       (0x28) /* ECC control */
48 #define NDDB            (0x40) /* Data Buffer */
49 #define NDCB0           (0x48) /* Command Buffer0 */
50 #define NDCB1           (0x4C) /* Command Buffer1 */
51 #define NDCB2           (0x50) /* Command Buffer2 */
52
53 #define NDCR_SPARE_EN           (0x1 << 31)
54 #define NDCR_ECC_EN             (0x1 << 30)
55 #define NDCR_DMA_EN             (0x1 << 29)
56 #define NDCR_ND_RUN             (0x1 << 28)
57 #define NDCR_DWIDTH_C           (0x1 << 27)
58 #define NDCR_DWIDTH_M           (0x1 << 26)
59 #define NDCR_PAGE_SZ            (0x1 << 24)
60 #define NDCR_NCSX               (0x1 << 23)
61 #define NDCR_ND_MODE            (0x3 << 21)
62 #define NDCR_NAND_MODE          (0x0)
63 #define NDCR_CLR_PG_CNT         (0x1 << 20)
64 #define NFCV1_NDCR_ARB_CNTL     (0x1 << 19)
65 #define NFCV2_NDCR_STOP_ON_UNCOR        (0x1 << 19)
66 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
67 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
68
69 #define NDCR_RA_START           (0x1 << 15)
70 #define NDCR_PG_PER_BLK         (0x1 << 14)
71 #define NDCR_ND_ARB_EN          (0x1 << 12)
72 #define NDCR_INT_MASK           (0xFFF)
73
74 #define NDSR_MASK               (0xfff)
75 #define NDSR_ERR_CNT_OFF        (16)
76 #define NDSR_ERR_CNT_MASK       (0x1f)
77 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
78 #define NDSR_RDY                (0x1 << 12)
79 #define NDSR_FLASH_RDY          (0x1 << 11)
80 #define NDSR_CS0_PAGED          (0x1 << 10)
81 #define NDSR_CS1_PAGED          (0x1 << 9)
82 #define NDSR_CS0_CMDD           (0x1 << 8)
83 #define NDSR_CS1_CMDD           (0x1 << 7)
84 #define NDSR_CS0_BBD            (0x1 << 6)
85 #define NDSR_CS1_BBD            (0x1 << 5)
86 #define NDSR_UNCORERR           (0x1 << 4)
87 #define NDSR_CORERR             (0x1 << 3)
88 #define NDSR_WRDREQ             (0x1 << 2)
89 #define NDSR_RDDREQ             (0x1 << 1)
90 #define NDSR_WRCMDREQ           (0x1)
91
92 #define NDCB0_LEN_OVRD          (0x1 << 28)
93 #define NDCB0_ST_ROW_EN         (0x1 << 26)
94 #define NDCB0_AUTO_RS           (0x1 << 25)
95 #define NDCB0_CSEL              (0x1 << 24)
96 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
97 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
98 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
99 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
100 #define NDCB0_NC                (0x1 << 20)
101 #define NDCB0_DBC               (0x1 << 19)
102 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
103 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
104 #define NDCB0_CMD2_MASK         (0xff << 8)
105 #define NDCB0_CMD1_MASK         (0xff)
106 #define NDCB0_ADDR_CYC_SHIFT    (16)
107
108 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
109 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
110 #define EXT_CMD_TYPE_READ       4 /* Read */
111 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
112 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
113 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
114 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
115
116 /*
117  * This should be large enough to read 'ONFI' and 'JEDEC'.
118  * Let's use 7 bytes, which is the maximum ID count supported
119  * by the controller (see NDCR_RD_ID_CNT_MASK).
120  */
121 #define READ_ID_BYTES           7
122
123 /* macros for registers read/write */
124 #define nand_writel(info, off, val)     \
125         writel((val), (info)->mmio_base + (off))
126
127 #define nand_readl(info, off)           \
128         readl((info)->mmio_base + (off))
129
130 /* error code and state */
131 enum {
132         ERR_NONE        = 0,
133         ERR_DMABUSERR   = -1,
134         ERR_SENDCMD     = -2,
135         ERR_UNCORERR    = -3,
136         ERR_BBERR       = -4,
137         ERR_CORERR      = -5,
138 };
139
140 enum {
141         STATE_IDLE = 0,
142         STATE_PREPARED,
143         STATE_CMD_HANDLE,
144         STATE_DMA_READING,
145         STATE_DMA_WRITING,
146         STATE_DMA_DONE,
147         STATE_PIO_READING,
148         STATE_PIO_WRITING,
149         STATE_CMD_DONE,
150         STATE_READY,
151 };
152
153 enum pxa3xx_nand_variant {
154         PXA3XX_NAND_VARIANT_PXA,
155         PXA3XX_NAND_VARIANT_ARMADA370,
156 };
157
158 struct pxa3xx_nand_host {
159         struct nand_chip        chip;
160         void                    *info_data;
161
162         /* page size of attached chip */
163         int                     use_ecc;
164         int                     cs;
165
166         /* calculated from pxa3xx_nand_flash data */
167         unsigned int            col_addr_cycles;
168         unsigned int            row_addr_cycles;
169 };
170
171 struct pxa3xx_nand_info {
172         struct nand_hw_control  controller;
173         struct pxa3xx_nand_platform_data *pdata;
174
175         struct clk              *clk;
176         void __iomem            *mmio_base;
177         unsigned long           mmio_phys;
178         int                     cmd_complete, dev_ready;
179
180         unsigned int            buf_start;
181         unsigned int            buf_count;
182         unsigned int            buf_size;
183         unsigned int            data_buff_pos;
184         unsigned int            oob_buff_pos;
185
186         unsigned char           *data_buff;
187         unsigned char           *oob_buff;
188
189         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
190         unsigned int            state;
191
192         /*
193          * This driver supports NFCv1 (as found in PXA SoC)
194          * and NFCv2 (as found in Armada 370/XP SoC).
195          */
196         enum pxa3xx_nand_variant variant;
197
198         int                     cs;
199         int                     use_ecc;        /* use HW ECC ? */
200         int                     ecc_bch;        /* using BCH ECC? */
201         int                     use_spare;      /* use spare ? */
202         int                     need_wait;
203
204         /* Amount of real data per full chunk */
205         unsigned int            chunk_size;
206
207         /* Amount of spare data per full chunk */
208         unsigned int            spare_size;
209
210         /* Number of full chunks (i.e chunk_size + spare_size) */
211         unsigned int            nfullchunks;
212
213         /*
214          * Total number of chunks. If equal to nfullchunks, then there
215          * are only full chunks. Otherwise, there is one last chunk of
216          * size (last_chunk_size + last_spare_size)
217          */
218         unsigned int            ntotalchunks;
219
220         /* Amount of real data in the last chunk */
221         unsigned int            last_chunk_size;
222
223         /* Amount of spare data in the last chunk */
224         unsigned int            last_spare_size;
225
226         unsigned int            ecc_size;
227         unsigned int            ecc_err_cnt;
228         unsigned int            max_bitflips;
229         int                     retcode;
230
231         /*
232          * Variables only valid during command
233          * execution. step_chunk_size and step_spare_size is the
234          * amount of real data and spare data in the current
235          * chunk. cur_chunk is the current chunk being
236          * read/programmed.
237          */
238         unsigned int            step_chunk_size;
239         unsigned int            step_spare_size;
240         unsigned int            cur_chunk;
241
242         /* cached register value */
243         uint32_t                reg_ndcr;
244         uint32_t                ndtr0cs0;
245         uint32_t                ndtr1cs0;
246
247         /* generated NDCBx register values */
248         uint32_t                ndcb0;
249         uint32_t                ndcb1;
250         uint32_t                ndcb2;
251         uint32_t                ndcb3;
252 };
253
254 static struct pxa3xx_nand_timing timing[] = {
255         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
256         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
257         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
258         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
259 };
260
261 static struct pxa3xx_nand_flash builtin_flash_types[] = {
262         { 0x46ec, 16, 16, &timing[1] },
263         { 0xdaec,  8,  8, &timing[1] },
264         { 0xd7ec,  8,  8, &timing[1] },
265         { 0xa12c,  8,  8, &timing[2] },
266         { 0xb12c, 16, 16, &timing[2] },
267         { 0xdc2c,  8,  8, &timing[2] },
268         { 0xcc2c, 16, 16, &timing[2] },
269         { 0xba20, 16, 16, &timing[3] },
270 };
271
272 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
273 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
274 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
275
276 static struct nand_bbt_descr bbt_main_descr = {
277         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
278                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
279         .offs = 8,
280         .len = 6,
281         .veroffs = 14,
282         .maxblocks = 8,         /* Last 8 blocks in each chip */
283         .pattern = bbt_pattern
284 };
285
286 static struct nand_bbt_descr bbt_mirror_descr = {
287         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
288                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
289         .offs = 8,
290         .len = 6,
291         .veroffs = 14,
292         .maxblocks = 8,         /* Last 8 blocks in each chip */
293         .pattern = bbt_mirror_pattern
294 };
295 #endif
296
297 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
298         .eccbytes = 32,
299         .eccpos = {
300                 32, 33, 34, 35, 36, 37, 38, 39,
301                 40, 41, 42, 43, 44, 45, 46, 47,
302                 48, 49, 50, 51, 52, 53, 54, 55,
303                 56, 57, 58, 59, 60, 61, 62, 63},
304         .oobfree = { {2, 30} }
305 };
306
307 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
308         .eccbytes = 64,
309         .eccpos = {
310                 32,  33,  34,  35,  36,  37,  38,  39,
311                 40,  41,  42,  43,  44,  45,  46,  47,
312                 48,  49,  50,  51,  52,  53,  54,  55,
313                 56,  57,  58,  59,  60,  61,  62,  63,
314                 96,  97,  98,  99,  100, 101, 102, 103,
315                 104, 105, 106, 107, 108, 109, 110, 111,
316                 112, 113, 114, 115, 116, 117, 118, 119,
317                 120, 121, 122, 123, 124, 125, 126, 127},
318         /* Bootrom looks in bytes 0 & 5 for bad blocks */
319         .oobfree = { {6, 26}, { 64, 32} }
320 };
321
322 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
323         .eccbytes = 128,
324         .eccpos = {
325                 32,  33,  34,  35,  36,  37,  38,  39,
326                 40,  41,  42,  43,  44,  45,  46,  47,
327                 48,  49,  50,  51,  52,  53,  54,  55,
328                 56,  57,  58,  59,  60,  61,  62,  63},
329         .oobfree = { }
330 };
331
332 #define NDTR0_tCH(c)    (min((c), 7) << 19)
333 #define NDTR0_tCS(c)    (min((c), 7) << 16)
334 #define NDTR0_tWH(c)    (min((c), 7) << 11)
335 #define NDTR0_tWP(c)    (min((c), 7) << 8)
336 #define NDTR0_tRH(c)    (min((c), 7) << 3)
337 #define NDTR0_tRP(c)    (min((c), 7) << 0)
338
339 #define NDTR1_tR(c)     (min((c), 65535) << 16)
340 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
341 #define NDTR1_tAR(c)    (min((c), 15) << 0)
342
343 /* convert nano-seconds to nand flash controller clock cycles */
344 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
345
346 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
347 {
348         /* We only support the Armada 370/XP/38x for now */
349         return PXA3XX_NAND_VARIANT_ARMADA370;
350 }
351
352 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
353                                    const struct pxa3xx_nand_timing *t)
354 {
355         struct pxa3xx_nand_info *info = host->info_data;
356         unsigned long nand_clk = mvebu_get_nand_clock();
357         uint32_t ndtr0, ndtr1;
358
359         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
360                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
361                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
362                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
363                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
364                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
365
366         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
367                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
368                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
369
370         info->ndtr0cs0 = ndtr0;
371         info->ndtr1cs0 = ndtr1;
372         nand_writel(info, NDTR0CS0, ndtr0);
373         nand_writel(info, NDTR1CS0, ndtr1);
374 }
375
376 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
377                                        const struct nand_sdr_timings *t)
378 {
379         struct pxa3xx_nand_info *info = host->info_data;
380         struct nand_chip *chip = &host->chip;
381         unsigned long nand_clk = mvebu_get_nand_clock();
382         uint32_t ndtr0, ndtr1;
383
384         u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
385         u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
386         u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
387         u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
388         u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
389         u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
390         u32 tR = chip->chip_delay * 1000;
391         u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
392         u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
393
394         /* fallback to a default value if tR = 0 */
395         if (!tR)
396                 tR = 20000;
397
398         ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
399                 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
400                 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
401                 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
402                 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
403                 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
404
405         ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
406                 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
407                 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
408
409         info->ndtr0cs0 = ndtr0;
410         info->ndtr1cs0 = ndtr1;
411         nand_writel(info, NDTR0CS0, ndtr0);
412         nand_writel(info, NDTR1CS0, ndtr1);
413 }
414
415 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
416 {
417         const struct nand_sdr_timings *timings;
418         struct nand_chip *chip = &host->chip;
419         struct pxa3xx_nand_info *info = host->info_data;
420         const struct pxa3xx_nand_flash *f = NULL;
421         struct mtd_info *mtd = nand_to_mtd(&host->chip);
422         int mode, id, ntypes, i;
423
424         mode = onfi_get_async_timing_mode(chip);
425         if (mode == ONFI_TIMING_MODE_UNKNOWN) {
426                 ntypes = ARRAY_SIZE(builtin_flash_types);
427
428                 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
429
430                 id = chip->read_byte(mtd);
431                 id |= chip->read_byte(mtd) << 0x8;
432
433                 for (i = 0; i < ntypes; i++) {
434                         f = &builtin_flash_types[i];
435
436                         if (f->chip_id == id)
437                                 break;
438                 }
439
440                 if (i == ntypes) {
441                         dev_err(&info->pdev->dev, "Error: timings not found\n");
442                         return -EINVAL;
443                 }
444
445                 pxa3xx_nand_set_timing(host, f->timing);
446
447                 if (f->flash_width == 16) {
448                         info->reg_ndcr |= NDCR_DWIDTH_M;
449                         chip->options |= NAND_BUSWIDTH_16;
450                 }
451
452                 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
453         } else {
454                 mode = fls(mode) - 1;
455                 if (mode < 0)
456                         mode = 0;
457
458                 timings = onfi_async_timing_mode_to_sdr_timings(mode);
459                 if (IS_ERR(timings))
460                         return PTR_ERR(timings);
461
462                 pxa3xx_nand_set_sdr_timing(host, timings);
463         }
464
465         return 0;
466 }
467
468 /**
469  * NOTE: it is a must to set ND_RUN first, then write
470  * command buffer, otherwise, it does not work.
471  * We enable all the interrupt at the same time, and
472  * let pxa3xx_nand_irq to handle all logic.
473  */
474 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
475 {
476         uint32_t ndcr;
477
478         ndcr = info->reg_ndcr;
479
480         if (info->use_ecc) {
481                 ndcr |= NDCR_ECC_EN;
482                 if (info->ecc_bch)
483                         nand_writel(info, NDECCCTRL, 0x1);
484         } else {
485                 ndcr &= ~NDCR_ECC_EN;
486                 if (info->ecc_bch)
487                         nand_writel(info, NDECCCTRL, 0x0);
488         }
489
490         ndcr &= ~NDCR_DMA_EN;
491
492         if (info->use_spare)
493                 ndcr |= NDCR_SPARE_EN;
494         else
495                 ndcr &= ~NDCR_SPARE_EN;
496
497         ndcr |= NDCR_ND_RUN;
498
499         /* clear status bits and run */
500         nand_writel(info, NDSR, NDSR_MASK);
501         nand_writel(info, NDCR, 0);
502         nand_writel(info, NDCR, ndcr);
503 }
504
505 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
506 {
507         uint32_t ndcr;
508
509         ndcr = nand_readl(info, NDCR);
510         nand_writel(info, NDCR, ndcr | int_mask);
511 }
512
513 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
514 {
515         if (info->ecc_bch) {
516                 u32 ts;
517
518                 /*
519                  * According to the datasheet, when reading from NDDB
520                  * with BCH enabled, after each 32 bytes reads, we
521                  * have to make sure that the NDSR.RDDREQ bit is set.
522                  *
523                  * Drain the FIFO 8 32 bits reads at a time, and skip
524                  * the polling on the last read.
525                  */
526                 while (len > 8) {
527                         readsl(info->mmio_base + NDDB, data, 8);
528
529                         ts = get_timer(0);
530                         while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
531                                 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
532                                         dev_err(&info->pdev->dev,
533                                                 "Timeout on RDDREQ while draining the FIFO\n");
534                                         return;
535                                 }
536                         }
537
538                         data += 32;
539                         len -= 8;
540                 }
541         }
542
543         readsl(info->mmio_base + NDDB, data, len);
544 }
545
546 static void handle_data_pio(struct pxa3xx_nand_info *info)
547 {
548         switch (info->state) {
549         case STATE_PIO_WRITING:
550                 if (info->step_chunk_size)
551                         writesl(info->mmio_base + NDDB,
552                                 info->data_buff + info->data_buff_pos,
553                                 DIV_ROUND_UP(info->step_chunk_size, 4));
554
555                 if (info->step_spare_size)
556                         writesl(info->mmio_base + NDDB,
557                                 info->oob_buff + info->oob_buff_pos,
558                                 DIV_ROUND_UP(info->step_spare_size, 4));
559                 break;
560         case STATE_PIO_READING:
561                 if (info->step_chunk_size)
562                         drain_fifo(info,
563                                    info->data_buff + info->data_buff_pos,
564                                    DIV_ROUND_UP(info->step_chunk_size, 4));
565
566                 if (info->step_spare_size)
567                         drain_fifo(info,
568                                    info->oob_buff + info->oob_buff_pos,
569                                    DIV_ROUND_UP(info->step_spare_size, 4));
570                 break;
571         default:
572                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
573                                 info->state);
574                 BUG();
575         }
576
577         /* Update buffer pointers for multi-page read/write */
578         info->data_buff_pos += info->step_chunk_size;
579         info->oob_buff_pos += info->step_spare_size;
580 }
581
582 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
583 {
584         handle_data_pio(info);
585
586         info->state = STATE_CMD_DONE;
587         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
588 }
589
590 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
591 {
592         unsigned int status, is_completed = 0, is_ready = 0;
593         unsigned int ready, cmd_done;
594         irqreturn_t ret = IRQ_HANDLED;
595
596         if (info->cs == 0) {
597                 ready           = NDSR_FLASH_RDY;
598                 cmd_done        = NDSR_CS0_CMDD;
599         } else {
600                 ready           = NDSR_RDY;
601                 cmd_done        = NDSR_CS1_CMDD;
602         }
603
604         status = nand_readl(info, NDSR);
605
606         if (status & NDSR_UNCORERR)
607                 info->retcode = ERR_UNCORERR;
608         if (status & NDSR_CORERR) {
609                 info->retcode = ERR_CORERR;
610                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
611                     info->ecc_bch)
612                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
613                 else
614                         info->ecc_err_cnt = 1;
615
616                 /*
617                  * Each chunk composing a page is corrected independently,
618                  * and we need to store maximum number of corrected bitflips
619                  * to return it to the MTD layer in ecc.read_page().
620                  */
621                 info->max_bitflips = max_t(unsigned int,
622                                            info->max_bitflips,
623                                            info->ecc_err_cnt);
624         }
625         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
626                 info->state = (status & NDSR_RDDREQ) ?
627                         STATE_PIO_READING : STATE_PIO_WRITING;
628                 /* Call the IRQ thread in U-Boot directly */
629                 pxa3xx_nand_irq_thread(info);
630                 return 0;
631         }
632         if (status & cmd_done) {
633                 info->state = STATE_CMD_DONE;
634                 is_completed = 1;
635         }
636         if (status & ready) {
637                 info->state = STATE_READY;
638                 is_ready = 1;
639         }
640
641         /*
642          * Clear all status bit before issuing the next command, which
643          * can and will alter the status bits and will deserve a new
644          * interrupt on its own. This lets the controller exit the IRQ
645          */
646         nand_writel(info, NDSR, status);
647
648         if (status & NDSR_WRCMDREQ) {
649                 status &= ~NDSR_WRCMDREQ;
650                 info->state = STATE_CMD_HANDLE;
651
652                 /*
653                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
654                  * must be loaded by writing directly either 12 or 16
655                  * bytes directly to NDCB0, four bytes at a time.
656                  *
657                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
658                  * but each NDCBx register can be read.
659                  */
660                 nand_writel(info, NDCB0, info->ndcb0);
661                 nand_writel(info, NDCB0, info->ndcb1);
662                 nand_writel(info, NDCB0, info->ndcb2);
663
664                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
665                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
666                         nand_writel(info, NDCB0, info->ndcb3);
667         }
668
669         if (is_completed)
670                 info->cmd_complete = 1;
671         if (is_ready)
672                 info->dev_ready = 1;
673
674         return ret;
675 }
676
677 static inline int is_buf_blank(uint8_t *buf, size_t len)
678 {
679         for (; len > 0; len--)
680                 if (*buf++ != 0xff)
681                         return 0;
682         return 1;
683 }
684
685 static void set_command_address(struct pxa3xx_nand_info *info,
686                 unsigned int page_size, uint16_t column, int page_addr)
687 {
688         /* small page addr setting */
689         if (page_size < PAGE_CHUNK_SIZE) {
690                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
691                                 | (column & 0xFF);
692
693                 info->ndcb2 = 0;
694         } else {
695                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
696                                 | (column & 0xFFFF);
697
698                 if (page_addr & 0xFF0000)
699                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
700                 else
701                         info->ndcb2 = 0;
702         }
703 }
704
705 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
706 {
707         struct pxa3xx_nand_host *host = info->host[info->cs];
708         struct mtd_info *mtd = nand_to_mtd(&host->chip);
709
710         /* reset data and oob column point to handle data */
711         info->buf_start         = 0;
712         info->buf_count         = 0;
713         info->data_buff_pos     = 0;
714         info->oob_buff_pos      = 0;
715         info->step_chunk_size   = 0;
716         info->step_spare_size   = 0;
717         info->cur_chunk         = 0;
718         info->use_ecc           = 0;
719         info->use_spare         = 1;
720         info->retcode           = ERR_NONE;
721         info->ecc_err_cnt       = 0;
722         info->ndcb3             = 0;
723         info->need_wait         = 0;
724
725         switch (command) {
726         case NAND_CMD_READ0:
727         case NAND_CMD_READOOB:
728         case NAND_CMD_PAGEPROG:
729                 info->use_ecc = 1;
730                 break;
731         case NAND_CMD_PARAM:
732                 info->use_spare = 0;
733                 break;
734         default:
735                 info->ndcb1 = 0;
736                 info->ndcb2 = 0;
737                 break;
738         }
739
740         /*
741          * If we are about to issue a read command, or about to set
742          * the write address, then clean the data buffer.
743          */
744         if (command == NAND_CMD_READ0 ||
745             command == NAND_CMD_READOOB ||
746             command == NAND_CMD_SEQIN) {
747                 info->buf_count = mtd->writesize + mtd->oobsize;
748                 memset(info->data_buff, 0xFF, info->buf_count);
749         }
750 }
751
752 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
753                 int ext_cmd_type, uint16_t column, int page_addr)
754 {
755         int addr_cycle, exec_cmd;
756         struct pxa3xx_nand_host *host;
757         struct mtd_info *mtd;
758
759         host = info->host[info->cs];
760         mtd = nand_to_mtd(&host->chip);
761         addr_cycle = 0;
762         exec_cmd = 1;
763
764         if (info->cs != 0)
765                 info->ndcb0 = NDCB0_CSEL;
766         else
767                 info->ndcb0 = 0;
768
769         if (command == NAND_CMD_SEQIN)
770                 exec_cmd = 0;
771
772         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
773                                     + host->col_addr_cycles);
774
775         switch (command) {
776         case NAND_CMD_READOOB:
777         case NAND_CMD_READ0:
778                 info->buf_start = column;
779                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
780                                 | addr_cycle
781                                 | NAND_CMD_READ0;
782
783                 if (command == NAND_CMD_READOOB)
784                         info->buf_start += mtd->writesize;
785
786                 if (info->cur_chunk < info->nfullchunks) {
787                         info->step_chunk_size = info->chunk_size;
788                         info->step_spare_size = info->spare_size;
789                 } else {
790                         info->step_chunk_size = info->last_chunk_size;
791                         info->step_spare_size = info->last_spare_size;
792                 }
793
794                 /*
795                  * Multiple page read needs an 'extended command type' field,
796                  * which is either naked-read or last-read according to the
797                  * state.
798                  */
799                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
800                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
801                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
802                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
803                                         | NDCB0_LEN_OVRD
804                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
805                         info->ndcb3 = info->step_chunk_size +
806                                 info->step_spare_size;
807                 }
808
809                 set_command_address(info, mtd->writesize, column, page_addr);
810                 break;
811
812         case NAND_CMD_SEQIN:
813
814                 info->buf_start = column;
815                 set_command_address(info, mtd->writesize, 0, page_addr);
816
817                 /*
818                  * Multiple page programming needs to execute the initial
819                  * SEQIN command that sets the page address.
820                  */
821                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
822                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
823                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
824                                 | addr_cycle
825                                 | command;
826                         exec_cmd = 1;
827                 }
828                 break;
829
830         case NAND_CMD_PAGEPROG:
831                 if (is_buf_blank(info->data_buff,
832                                  (mtd->writesize + mtd->oobsize))) {
833                         exec_cmd = 0;
834                         break;
835                 }
836
837                 if (info->cur_chunk < info->nfullchunks) {
838                         info->step_chunk_size = info->chunk_size;
839                         info->step_spare_size = info->spare_size;
840                 } else {
841                         info->step_chunk_size = info->last_chunk_size;
842                         info->step_spare_size = info->last_spare_size;
843                 }
844
845                 /* Second command setting for large pages */
846                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
847                         /*
848                          * Multiple page write uses the 'extended command'
849                          * field. This can be used to issue a command dispatch
850                          * or a naked-write depending on the current stage.
851                          */
852                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
853                                         | NDCB0_LEN_OVRD
854                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
855                         info->ndcb3 = info->step_chunk_size +
856                                       info->step_spare_size;
857
858                         /*
859                          * This is the command dispatch that completes a chunked
860                          * page program operation.
861                          */
862                         if (info->cur_chunk == info->ntotalchunks) {
863                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
864                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
865                                         | command;
866                                 info->ndcb1 = 0;
867                                 info->ndcb2 = 0;
868                                 info->ndcb3 = 0;
869                         }
870                 } else {
871                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
872                                         | NDCB0_AUTO_RS
873                                         | NDCB0_ST_ROW_EN
874                                         | NDCB0_DBC
875                                         | (NAND_CMD_PAGEPROG << 8)
876                                         | NAND_CMD_SEQIN
877                                         | addr_cycle;
878                 }
879                 break;
880
881         case NAND_CMD_PARAM:
882                 info->buf_count = INIT_BUFFER_SIZE;
883                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
884                                 | NDCB0_ADDR_CYC(1)
885                                 | NDCB0_LEN_OVRD
886                                 | command;
887                 info->ndcb1 = (column & 0xFF);
888                 info->ndcb3 = INIT_BUFFER_SIZE;
889                 info->step_chunk_size = INIT_BUFFER_SIZE;
890                 break;
891
892         case NAND_CMD_READID:
893                 info->buf_count = READ_ID_BYTES;
894                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
895                                 | NDCB0_ADDR_CYC(1)
896                                 | command;
897                 info->ndcb1 = (column & 0xFF);
898
899                 info->step_chunk_size = 8;
900                 break;
901         case NAND_CMD_STATUS:
902                 info->buf_count = 1;
903                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
904                                 | NDCB0_ADDR_CYC(1)
905                                 | command;
906
907                 info->step_chunk_size = 8;
908                 break;
909
910         case NAND_CMD_ERASE1:
911                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
912                                 | NDCB0_AUTO_RS
913                                 | NDCB0_ADDR_CYC(3)
914                                 | NDCB0_DBC
915                                 | (NAND_CMD_ERASE2 << 8)
916                                 | NAND_CMD_ERASE1;
917                 info->ndcb1 = page_addr;
918                 info->ndcb2 = 0;
919
920                 break;
921         case NAND_CMD_RESET:
922                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
923                                 | command;
924
925                 break;
926
927         case NAND_CMD_ERASE2:
928                 exec_cmd = 0;
929                 break;
930
931         default:
932                 exec_cmd = 0;
933                 dev_err(&info->pdev->dev, "non-supported command %x\n",
934                         command);
935                 break;
936         }
937
938         return exec_cmd;
939 }
940
941 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
942                          int column, int page_addr)
943 {
944         struct nand_chip *chip = mtd_to_nand(mtd);
945         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
946         struct pxa3xx_nand_info *info = host->info_data;
947         int exec_cmd;
948
949         /*
950          * if this is a x16 device ,then convert the input
951          * "byte" address into a "word" address appropriate
952          * for indexing a word-oriented device
953          */
954         if (info->reg_ndcr & NDCR_DWIDTH_M)
955                 column /= 2;
956
957         /*
958          * There may be different NAND chip hooked to
959          * different chip select, so check whether
960          * chip select has been changed, if yes, reset the timing
961          */
962         if (info->cs != host->cs) {
963                 info->cs = host->cs;
964                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
965                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
966         }
967
968         prepare_start_command(info, command);
969
970         info->state = STATE_PREPARED;
971         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
972
973         if (exec_cmd) {
974                 u32 ts;
975
976                 info->cmd_complete = 0;
977                 info->dev_ready = 0;
978                 info->need_wait = 1;
979                 pxa3xx_nand_start(info);
980
981                 ts = get_timer(0);
982                 while (1) {
983                         u32 status;
984
985                         status = nand_readl(info, NDSR);
986                         if (status)
987                                 pxa3xx_nand_irq(info);
988
989                         if (info->cmd_complete)
990                                 break;
991
992                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
993                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
994                                 return;
995                         }
996                 }
997         }
998         info->state = STATE_IDLE;
999 }
1000
1001 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1002                                   const unsigned command,
1003                                   int column, int page_addr)
1004 {
1005         struct nand_chip *chip = mtd_to_nand(mtd);
1006         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1007         struct pxa3xx_nand_info *info = host->info_data;
1008         int exec_cmd, ext_cmd_type;
1009
1010         /*
1011          * if this is a x16 device then convert the input
1012          * "byte" address into a "word" address appropriate
1013          * for indexing a word-oriented device
1014          */
1015         if (info->reg_ndcr & NDCR_DWIDTH_M)
1016                 column /= 2;
1017
1018         /*
1019          * There may be different NAND chip hooked to
1020          * different chip select, so check whether
1021          * chip select has been changed, if yes, reset the timing
1022          */
1023         if (info->cs != host->cs) {
1024                 info->cs = host->cs;
1025                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1026                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1027         }
1028
1029         /* Select the extended command for the first command */
1030         switch (command) {
1031         case NAND_CMD_READ0:
1032         case NAND_CMD_READOOB:
1033                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1034                 break;
1035         case NAND_CMD_SEQIN:
1036                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1037                 break;
1038         case NAND_CMD_PAGEPROG:
1039                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1040                 break;
1041         default:
1042                 ext_cmd_type = 0;
1043                 break;
1044         }
1045
1046         prepare_start_command(info, command);
1047
1048         /*
1049          * Prepare the "is ready" completion before starting a command
1050          * transaction sequence. If the command is not executed the
1051          * completion will be completed, see below.
1052          *
1053          * We can do that inside the loop because the command variable
1054          * is invariant and thus so is the exec_cmd.
1055          */
1056         info->need_wait = 1;
1057         info->dev_ready = 0;
1058
1059         do {
1060                 u32 ts;
1061
1062                 info->state = STATE_PREPARED;
1063                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1064                                                column, page_addr);
1065                 if (!exec_cmd) {
1066                         info->need_wait = 0;
1067                         info->dev_ready = 1;
1068                         break;
1069                 }
1070
1071                 info->cmd_complete = 0;
1072                 pxa3xx_nand_start(info);
1073
1074                 ts = get_timer(0);
1075                 while (1) {
1076                         u32 status;
1077
1078                         status = nand_readl(info, NDSR);
1079                         if (status)
1080                                 pxa3xx_nand_irq(info);
1081
1082                         if (info->cmd_complete)
1083                                 break;
1084
1085                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1086                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1087                                 return;
1088                         }
1089                 }
1090
1091                 /* Only a few commands need several steps */
1092                 if (command != NAND_CMD_PAGEPROG &&
1093                     command != NAND_CMD_READ0    &&
1094                     command != NAND_CMD_READOOB)
1095                         break;
1096
1097                 info->cur_chunk++;
1098
1099                 /* Check if the sequence is complete */
1100                 if (info->cur_chunk == info->ntotalchunks &&
1101                     command != NAND_CMD_PAGEPROG)
1102                         break;
1103
1104                 /*
1105                  * After a splitted program command sequence has issued
1106                  * the command dispatch, the command sequence is complete.
1107                  */
1108                 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1109                     command == NAND_CMD_PAGEPROG &&
1110                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1111                         break;
1112
1113                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1114                         /* Last read: issue a 'last naked read' */
1115                         if (info->cur_chunk == info->ntotalchunks - 1)
1116                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1117                         else
1118                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1119
1120                 /*
1121                  * If a splitted program command has no more data to transfer,
1122                  * the command dispatch must be issued to complete.
1123                  */
1124                 } else if (command == NAND_CMD_PAGEPROG &&
1125                            info->cur_chunk == info->ntotalchunks) {
1126                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1127                 }
1128         } while (1);
1129
1130         info->state = STATE_IDLE;
1131 }
1132
1133 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1134                 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1135                 int page)
1136 {
1137         chip->write_buf(mtd, buf, mtd->writesize);
1138         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1139
1140         return 0;
1141 }
1142
1143 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1144                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1145                 int page)
1146 {
1147         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1148         struct pxa3xx_nand_info *info = host->info_data;
1149
1150         chip->read_buf(mtd, buf, mtd->writesize);
1151         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1152
1153         if (info->retcode == ERR_CORERR && info->use_ecc) {
1154                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1155
1156         } else if (info->retcode == ERR_UNCORERR) {
1157                 /*
1158                  * for blank page (all 0xff), HW will calculate its ECC as
1159                  * 0, which is different from the ECC information within
1160                  * OOB, ignore such uncorrectable errors
1161                  */
1162                 if (is_buf_blank(buf, mtd->writesize))
1163                         info->retcode = ERR_NONE;
1164                 else
1165                         mtd->ecc_stats.failed++;
1166         }
1167
1168         return info->max_bitflips;
1169 }
1170
1171 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1172 {
1173         struct nand_chip *chip = mtd_to_nand(mtd);
1174         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1175         struct pxa3xx_nand_info *info = host->info_data;
1176         char retval = 0xFF;
1177
1178         if (info->buf_start < info->buf_count)
1179                 /* Has just send a new command? */
1180                 retval = info->data_buff[info->buf_start++];
1181
1182         return retval;
1183 }
1184
1185 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1186 {
1187         struct nand_chip *chip = mtd_to_nand(mtd);
1188         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1189         struct pxa3xx_nand_info *info = host->info_data;
1190         u16 retval = 0xFFFF;
1191
1192         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1193                 retval = *((u16 *)(info->data_buff+info->buf_start));
1194                 info->buf_start += 2;
1195         }
1196         return retval;
1197 }
1198
1199 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1200 {
1201         struct nand_chip *chip = mtd_to_nand(mtd);
1202         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1203         struct pxa3xx_nand_info *info = host->info_data;
1204         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1205
1206         memcpy(buf, info->data_buff + info->buf_start, real_len);
1207         info->buf_start += real_len;
1208 }
1209
1210 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1211                 const uint8_t *buf, int len)
1212 {
1213         struct nand_chip *chip = mtd_to_nand(mtd);
1214         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1215         struct pxa3xx_nand_info *info = host->info_data;
1216         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1217
1218         memcpy(info->data_buff + info->buf_start, buf, real_len);
1219         info->buf_start += real_len;
1220 }
1221
1222 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1223 {
1224         return;
1225 }
1226
1227 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1228 {
1229         struct nand_chip *chip = mtd_to_nand(mtd);
1230         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1231         struct pxa3xx_nand_info *info = host->info_data;
1232
1233         if (info->need_wait) {
1234                 u32 ts;
1235
1236                 info->need_wait = 0;
1237
1238                 ts = get_timer(0);
1239                 while (1) {
1240                         u32 status;
1241
1242                         status = nand_readl(info, NDSR);
1243                         if (status)
1244                                 pxa3xx_nand_irq(info);
1245
1246                         if (info->dev_ready)
1247                                 break;
1248
1249                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1250                                 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1251                                 return NAND_STATUS_FAIL;
1252                         }
1253                 }
1254         }
1255
1256         /* pxa3xx_nand_send_command has waited for command complete */
1257         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1258                 if (info->retcode == ERR_NONE)
1259                         return 0;
1260                 else
1261                         return NAND_STATUS_FAIL;
1262         }
1263
1264         return NAND_STATUS_READY;
1265 }
1266
1267 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1268 {
1269         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1270
1271         /* Configure default flash values */
1272         info->chunk_size = PAGE_CHUNK_SIZE;
1273         info->reg_ndcr = 0x0; /* enable all interrupts */
1274         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1275         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1276         info->reg_ndcr |= NDCR_SPARE_EN;
1277
1278         return 0;
1279 }
1280
1281 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1282 {
1283         struct pxa3xx_nand_host *host = info->host[info->cs];
1284         struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1285         struct nand_chip *chip = mtd_to_nand(mtd);
1286
1287         info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1288         info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1289         info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1290 }
1291
1292 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1293 {
1294         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1295         uint32_t ndcr = nand_readl(info, NDCR);
1296
1297         /* Set an initial chunk size */
1298         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1299         info->reg_ndcr = ndcr &
1300                 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1301         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1302         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1303         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1304 }
1305
1306 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1307 {
1308         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1309         if (info->data_buff == NULL)
1310                 return -ENOMEM;
1311         return 0;
1312 }
1313
1314 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1315 {
1316         struct pxa3xx_nand_info *info = host->info_data;
1317         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1318         struct mtd_info *mtd;
1319         struct nand_chip *chip;
1320         const struct nand_sdr_timings *timings;
1321         int ret;
1322
1323         mtd = nand_to_mtd(&info->host[info->cs]->chip);
1324         chip = mtd_to_nand(mtd);
1325
1326         /* configure default flash values */
1327         info->reg_ndcr = 0x0; /* enable all interrupts */
1328         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1329         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1330         info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1331
1332         /* use the common timing to make a try */
1333         timings = onfi_async_timing_mode_to_sdr_timings(0);
1334         if (IS_ERR(timings))
1335                 return PTR_ERR(timings);
1336
1337         pxa3xx_nand_set_sdr_timing(host, timings);
1338
1339         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1340         ret = chip->waitfunc(mtd, chip);
1341         if (ret & NAND_STATUS_FAIL)
1342                 return -ENODEV;
1343
1344         return 0;
1345 }
1346
1347 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1348                         struct nand_ecc_ctrl *ecc,
1349                         int strength, int ecc_stepsize, int page_size)
1350 {
1351         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1352                 info->nfullchunks = 1;
1353                 info->ntotalchunks = 1;
1354                 info->chunk_size = 2048;
1355                 info->spare_size = 40;
1356                 info->ecc_size = 24;
1357                 ecc->mode = NAND_ECC_HW;
1358                 ecc->size = 512;
1359                 ecc->strength = 1;
1360
1361         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1362                 info->nfullchunks = 1;
1363                 info->ntotalchunks = 1;
1364                 info->chunk_size = 512;
1365                 info->spare_size = 8;
1366                 info->ecc_size = 8;
1367                 ecc->mode = NAND_ECC_HW;
1368                 ecc->size = 512;
1369                 ecc->strength = 1;
1370
1371         /*
1372          * Required ECC: 4-bit correction per 512 bytes
1373          * Select: 16-bit correction per 2048 bytes
1374          */
1375         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1376                 info->ecc_bch = 1;
1377                 info->nfullchunks = 1;
1378                 info->ntotalchunks = 1;
1379                 info->chunk_size = 2048;
1380                 info->spare_size = 32;
1381                 info->ecc_size = 32;
1382                 ecc->mode = NAND_ECC_HW;
1383                 ecc->size = info->chunk_size;
1384                 ecc->layout = &ecc_layout_2KB_bch4bit;
1385                 ecc->strength = 16;
1386
1387         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1388                 info->ecc_bch = 1;
1389                 info->nfullchunks = 2;
1390                 info->ntotalchunks = 2;
1391                 info->chunk_size = 2048;
1392                 info->spare_size = 32;
1393                 info->ecc_size = 32;
1394                 ecc->mode = NAND_ECC_HW;
1395                 ecc->size = info->chunk_size;
1396                 ecc->layout = &ecc_layout_4KB_bch4bit;
1397                 ecc->strength = 16;
1398
1399         /*
1400          * Required ECC: 8-bit correction per 512 bytes
1401          * Select: 16-bit correction per 1024 bytes
1402          */
1403         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1404                 info->ecc_bch = 1;
1405                 info->nfullchunks = 4;
1406                 info->ntotalchunks = 5;
1407                 info->chunk_size = 1024;
1408                 info->spare_size = 0;
1409                 info->last_chunk_size = 0;
1410                 info->last_spare_size = 64;
1411                 info->ecc_size = 32;
1412                 ecc->mode = NAND_ECC_HW;
1413                 ecc->size = info->chunk_size;
1414                 ecc->layout = &ecc_layout_4KB_bch8bit;
1415                 ecc->strength = 16;
1416         } else {
1417                 dev_err(&info->pdev->dev,
1418                         "ECC strength %d at page size %d is not supported\n",
1419                         strength, page_size);
1420                 return -ENODEV;
1421         }
1422
1423         return 0;
1424 }
1425
1426 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1427 {
1428         struct nand_chip *chip = mtd_to_nand(mtd);
1429         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1430         struct pxa3xx_nand_info *info = host->info_data;
1431         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1432         int ret;
1433         uint16_t ecc_strength, ecc_step;
1434
1435         if (pdata->keep_config) {
1436                 pxa3xx_nand_detect_config(info);
1437         } else {
1438                 ret = pxa3xx_nand_config_ident(info);
1439                 if (ret)
1440                         return ret;
1441                 ret = pxa3xx_nand_sensing(host);
1442                 if (ret) {
1443                         dev_info(&info->pdev->dev,
1444                                  "There is no chip on cs %d!\n",
1445                                  info->cs);
1446                         return ret;
1447                 }
1448         }
1449
1450         /* Device detection must be done with ECC disabled */
1451         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1452                 nand_writel(info, NDECCCTRL, 0x0);
1453
1454         if (nand_scan_ident(mtd, 1, NULL))
1455                 return -ENODEV;
1456
1457         if (!pdata->keep_config) {
1458                 ret = pxa3xx_nand_init_timings(host);
1459                 if (ret) {
1460                         dev_err(&info->pdev->dev,
1461                                 "Failed to set timings: %d\n", ret);
1462                         return ret;
1463                 }
1464         }
1465
1466 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1467         /*
1468          * We'll use a bad block table stored in-flash and don't
1469          * allow writing the bad block marker to the flash.
1470          */
1471         chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1472         chip->bbt_td = &bbt_main_descr;
1473         chip->bbt_md = &bbt_mirror_descr;
1474 #endif
1475
1476         /*
1477          * If the page size is bigger than the FIFO size, let's check
1478          * we are given the right variant and then switch to the extended
1479          * (aka splitted) command handling,
1480          */
1481         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1482                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1483                         chip->cmdfunc = nand_cmdfunc_extended;
1484                 } else {
1485                         dev_err(&info->pdev->dev,
1486                                 "unsupported page size on this variant\n");
1487                         return -ENODEV;
1488                 }
1489         }
1490
1491         if (pdata->ecc_strength && pdata->ecc_step_size) {
1492                 ecc_strength = pdata->ecc_strength;
1493                 ecc_step = pdata->ecc_step_size;
1494         } else {
1495                 ecc_strength = chip->ecc_strength_ds;
1496                 ecc_step = chip->ecc_step_ds;
1497         }
1498
1499         /* Set default ECC strength requirements on non-ONFI devices */
1500         if (ecc_strength < 1 && ecc_step < 1) {
1501                 ecc_strength = 1;
1502                 ecc_step = 512;
1503         }
1504
1505         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1506                            ecc_step, mtd->writesize);
1507         if (ret)
1508                 return ret;
1509
1510         /* calculate addressing information */
1511         if (mtd->writesize >= 2048)
1512                 host->col_addr_cycles = 2;
1513         else
1514                 host->col_addr_cycles = 1;
1515
1516         /* release the initial buffer */
1517         kfree(info->data_buff);
1518
1519         /* allocate the real data + oob buffer */
1520         info->buf_size = mtd->writesize + mtd->oobsize;
1521         ret = pxa3xx_nand_init_buff(info);
1522         if (ret)
1523                 return ret;
1524         info->oob_buff = info->data_buff + mtd->writesize;
1525
1526         if ((mtd->size >> chip->page_shift) > 65536)
1527                 host->row_addr_cycles = 3;
1528         else
1529                 host->row_addr_cycles = 2;
1530
1531         if (!pdata->keep_config)
1532                 pxa3xx_nand_config_tail(info);
1533
1534         return nand_scan_tail(mtd);
1535 }
1536
1537 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1538 {
1539         struct pxa3xx_nand_platform_data *pdata;
1540         struct pxa3xx_nand_host *host;
1541         struct nand_chip *chip = NULL;
1542         struct mtd_info *mtd;
1543         int ret, cs;
1544
1545         pdata = info->pdata;
1546         if (pdata->num_cs <= 0)
1547                 return -ENODEV;
1548
1549         info->variant = pxa3xx_nand_get_variant();
1550         for (cs = 0; cs < pdata->num_cs; cs++) {
1551                 chip = (struct nand_chip *)
1552                         ((u8 *)&info[1] + sizeof(*host) * cs);
1553                 mtd = nand_to_mtd(chip);
1554                 host = (struct pxa3xx_nand_host *)chip;
1555                 info->host[cs] = host;
1556                 host->cs = cs;
1557                 host->info_data = info;
1558                 mtd->owner = THIS_MODULE;
1559
1560                 nand_set_controller_data(chip, host);
1561                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1562                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1563                 chip->controller        = &info->controller;
1564                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1565                 chip->select_chip       = pxa3xx_nand_select_chip;
1566                 chip->read_word         = pxa3xx_nand_read_word;
1567                 chip->read_byte         = pxa3xx_nand_read_byte;
1568                 chip->read_buf          = pxa3xx_nand_read_buf;
1569                 chip->write_buf         = pxa3xx_nand_write_buf;
1570                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1571                 chip->cmdfunc           = nand_cmdfunc;
1572         }
1573
1574         /* Allocate a buffer to allow flash detection */
1575         info->buf_size = INIT_BUFFER_SIZE;
1576         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1577         if (info->data_buff == NULL) {
1578                 ret = -ENOMEM;
1579                 goto fail_disable_clk;
1580         }
1581
1582         /* initialize all interrupts to be disabled */
1583         disable_int(info, NDSR_MASK);
1584
1585         return 0;
1586
1587         kfree(info->data_buff);
1588 fail_disable_clk:
1589         return ret;
1590 }
1591
1592 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1593 {
1594         struct pxa3xx_nand_platform_data *pdata;
1595         const void *blob = gd->fdt_blob;
1596         int node = -1;
1597
1598         pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1599         if (!pdata)
1600                 return -ENOMEM;
1601
1602         /* Get address decoding nodes from the FDT blob */
1603         do {
1604                 node = fdt_node_offset_by_compatible(blob, node,
1605                                                      "marvell,mvebu-pxa3xx-nand");
1606                 if (node < 0)
1607                         break;
1608
1609                 /* Bypass disabeld nodes */
1610                 if (!fdtdec_get_is_enabled(blob, node))
1611                         continue;
1612
1613                 /* Get the first enabled NAND controler base address */
1614                 info->mmio_base =
1615                         (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1616                                         blob, node, "reg", 0, NULL, true);
1617
1618                 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1619                 if (pdata->num_cs != 1) {
1620                         pr_err("pxa3xx driver supports single CS only\n");
1621                         break;
1622                 }
1623
1624                 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1625                         pdata->enable_arbiter = 1;
1626
1627                 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1628                         pdata->keep_config = 1;
1629
1630                 /*
1631                  * ECC parameters.
1632                  * If these are not set, they will be selected according
1633                  * to the detected flash type.
1634                  */
1635                 /* ECC strength */
1636                 pdata->ecc_strength = fdtdec_get_int(blob, node,
1637                                                      "nand-ecc-strength", 0);
1638
1639                 /* ECC step size */
1640                 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1641                                                       "nand-ecc-step-size", 0);
1642
1643                 info->pdata = pdata;
1644
1645                 /* Currently support only a single NAND controller */
1646                 return 0;
1647
1648         } while (node >= 0);
1649
1650         return -EINVAL;
1651 }
1652
1653 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1654 {
1655         struct pxa3xx_nand_platform_data *pdata;
1656         int ret, cs, probe_success;
1657
1658         ret = pxa3xx_nand_probe_dt(info);
1659         if (ret)
1660                 return ret;
1661
1662         pdata = info->pdata;
1663
1664         ret = alloc_nand_resource(info);
1665         if (ret) {
1666                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1667                 return ret;
1668         }
1669
1670         probe_success = 0;
1671         for (cs = 0; cs < pdata->num_cs; cs++) {
1672                 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1673
1674                 /*
1675                  * The mtd name matches the one used in 'mtdparts' kernel
1676                  * parameter. This name cannot be changed or otherwise
1677                  * user's mtd partitions configuration would get broken.
1678                  */
1679                 mtd->name = "pxa3xx_nand-0";
1680                 info->cs = cs;
1681                 ret = pxa3xx_nand_scan(mtd);
1682                 if (ret) {
1683                         dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1684                                  cs);
1685                         continue;
1686                 }
1687
1688                 if (nand_register(cs, mtd))
1689                         continue;
1690
1691                 probe_success = 1;
1692         }
1693
1694         if (!probe_success)
1695                 return -ENODEV;
1696
1697         return 0;
1698 }
1699
1700 /*
1701  * Main initialization routine
1702  */
1703 void board_nand_init(void)
1704 {
1705         struct pxa3xx_nand_info *info;
1706         struct pxa3xx_nand_host *host;
1707         int ret;
1708
1709         info = kzalloc(sizeof(*info) +
1710                        sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1711                        GFP_KERNEL);
1712         if (!info)
1713                 return;
1714
1715         ret = pxa3xx_nand_probe(info);
1716         if (ret)
1717                 return;
1718 }