Merge tag 'for-linus-20121219' of git://git.infradead.org/linux-mtd
[platform/kernel/linux-exynos.git] / drivers / mtd / nand / sh_flctl.c
1 /*
2  * SuperH FLCTL nand controller
3  *
4  * Copyright (c) 2008 Renesas Solutions Corp.
5  * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
6  *
7  * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
21  *
22  */
23
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/completion.h>
27 #include <linux/delay.h>
28 #include <linux/dmaengine.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/interrupt.h>
31 #include <linux/io.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/of_mtd.h>
35 #include <linux/platform_device.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/sh_dma.h>
38 #include <linux/slab.h>
39 #include <linux/string.h>
40
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/nand.h>
43 #include <linux/mtd/partitions.h>
44 #include <linux/mtd/sh_flctl.h>
45
46 static struct nand_ecclayout flctl_4secc_oob_16 = {
47         .eccbytes = 10,
48         .eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
49         .oobfree = {
50                 {.offset = 12,
51                 . length = 4} },
52 };
53
54 static struct nand_ecclayout flctl_4secc_oob_64 = {
55         .eccbytes = 4 * 10,
56         .eccpos = {
57                  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
58                 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
59                 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
60                 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
61         .oobfree = {
62                 {.offset =  2, .length = 4},
63                 {.offset = 16, .length = 6},
64                 {.offset = 32, .length = 6},
65                 {.offset = 48, .length = 6} },
66 };
67
68 static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
69
70 static struct nand_bbt_descr flctl_4secc_smallpage = {
71         .options = NAND_BBT_SCAN2NDPAGE,
72         .offs = 11,
73         .len = 1,
74         .pattern = scan_ff_pattern,
75 };
76
77 static struct nand_bbt_descr flctl_4secc_largepage = {
78         .options = NAND_BBT_SCAN2NDPAGE,
79         .offs = 0,
80         .len = 2,
81         .pattern = scan_ff_pattern,
82 };
83
84 static void empty_fifo(struct sh_flctl *flctl)
85 {
86         writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
87         writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
88 }
89
90 static void start_translation(struct sh_flctl *flctl)
91 {
92         writeb(TRSTRT, FLTRCR(flctl));
93 }
94
95 static void timeout_error(struct sh_flctl *flctl, const char *str)
96 {
97         dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
98 }
99
100 static void wait_completion(struct sh_flctl *flctl)
101 {
102         uint32_t timeout = LOOP_TIMEOUT_MAX;
103
104         while (timeout--) {
105                 if (readb(FLTRCR(flctl)) & TREND) {
106                         writeb(0x0, FLTRCR(flctl));
107                         return;
108                 }
109                 udelay(1);
110         }
111
112         timeout_error(flctl, __func__);
113         writeb(0x0, FLTRCR(flctl));
114 }
115
116 static void flctl_dma_complete(void *param)
117 {
118         struct sh_flctl *flctl = param;
119
120         complete(&flctl->dma_complete);
121 }
122
123 static void flctl_release_dma(struct sh_flctl *flctl)
124 {
125         if (flctl->chan_fifo0_rx) {
126                 dma_release_channel(flctl->chan_fifo0_rx);
127                 flctl->chan_fifo0_rx = NULL;
128         }
129         if (flctl->chan_fifo0_tx) {
130                 dma_release_channel(flctl->chan_fifo0_tx);
131                 flctl->chan_fifo0_tx = NULL;
132         }
133 }
134
135 static void flctl_setup_dma(struct sh_flctl *flctl)
136 {
137         dma_cap_mask_t mask;
138         struct dma_slave_config cfg;
139         struct platform_device *pdev = flctl->pdev;
140         struct sh_flctl_platform_data *pdata = pdev->dev.platform_data;
141         int ret;
142
143         if (!pdata)
144                 return;
145
146         if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
147                 return;
148
149         /* We can only either use DMA for both Tx and Rx or not use it at all */
150         dma_cap_zero(mask);
151         dma_cap_set(DMA_SLAVE, mask);
152
153         flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
154                                             (void *)pdata->slave_id_fifo0_tx);
155         dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
156                 flctl->chan_fifo0_tx);
157
158         if (!flctl->chan_fifo0_tx)
159                 return;
160
161         memset(&cfg, 0, sizeof(cfg));
162         cfg.slave_id = pdata->slave_id_fifo0_tx;
163         cfg.direction = DMA_MEM_TO_DEV;
164         cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
165         cfg.src_addr = 0;
166         ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
167         if (ret < 0)
168                 goto err;
169
170         flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
171                                             (void *)pdata->slave_id_fifo0_rx);
172         dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
173                 flctl->chan_fifo0_rx);
174
175         if (!flctl->chan_fifo0_rx)
176                 goto err;
177
178         cfg.slave_id = pdata->slave_id_fifo0_rx;
179         cfg.direction = DMA_DEV_TO_MEM;
180         cfg.dst_addr = 0;
181         cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
182         ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
183         if (ret < 0)
184                 goto err;
185
186         init_completion(&flctl->dma_complete);
187
188         return;
189
190 err:
191         flctl_release_dma(flctl);
192 }
193
194 static void set_addr(struct mtd_info *mtd, int column, int page_addr)
195 {
196         struct sh_flctl *flctl = mtd_to_flctl(mtd);
197         uint32_t addr = 0;
198
199         if (column == -1) {
200                 addr = page_addr;       /* ERASE1 */
201         } else if (page_addr != -1) {
202                 /* SEQIN, READ0, etc.. */
203                 if (flctl->chip.options & NAND_BUSWIDTH_16)
204                         column >>= 1;
205                 if (flctl->page_size) {
206                         addr = column & 0x0FFF;
207                         addr |= (page_addr & 0xff) << 16;
208                         addr |= ((page_addr >> 8) & 0xff) << 24;
209                         /* big than 128MB */
210                         if (flctl->rw_ADRCNT == ADRCNT2_E) {
211                                 uint32_t        addr2;
212                                 addr2 = (page_addr >> 16) & 0xff;
213                                 writel(addr2, FLADR2(flctl));
214                         }
215                 } else {
216                         addr = column;
217                         addr |= (page_addr & 0xff) << 8;
218                         addr |= ((page_addr >> 8) & 0xff) << 16;
219                         addr |= ((page_addr >> 16) & 0xff) << 24;
220                 }
221         }
222         writel(addr, FLADR(flctl));
223 }
224
225 static void wait_rfifo_ready(struct sh_flctl *flctl)
226 {
227         uint32_t timeout = LOOP_TIMEOUT_MAX;
228
229         while (timeout--) {
230                 uint32_t val;
231                 /* check FIFO */
232                 val = readl(FLDTCNTR(flctl)) >> 16;
233                 if (val & 0xFF)
234                         return;
235                 udelay(1);
236         }
237         timeout_error(flctl, __func__);
238 }
239
240 static void wait_wfifo_ready(struct sh_flctl *flctl)
241 {
242         uint32_t len, timeout = LOOP_TIMEOUT_MAX;
243
244         while (timeout--) {
245                 /* check FIFO */
246                 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
247                 if (len >= 4)
248                         return;
249                 udelay(1);
250         }
251         timeout_error(flctl, __func__);
252 }
253
254 static enum flctl_ecc_res_t wait_recfifo_ready
255                 (struct sh_flctl *flctl, int sector_number)
256 {
257         uint32_t timeout = LOOP_TIMEOUT_MAX;
258         void __iomem *ecc_reg[4];
259         int i;
260         int state = FL_SUCCESS;
261         uint32_t data, size;
262
263         /*
264          * First this loops checks in FLDTCNTR if we are ready to read out the
265          * oob data. This is the case if either all went fine without errors or
266          * if the bottom part of the loop corrected the errors or marked them as
267          * uncorrectable and the controller is given time to push the data into
268          * the FIFO.
269          */
270         while (timeout--) {
271                 /* check if all is ok and we can read out the OOB */
272                 size = readl(FLDTCNTR(flctl)) >> 24;
273                 if ((size & 0xFF) == 4)
274                         return state;
275
276                 /* check if a correction code has been calculated */
277                 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
278                         /*
279                          * either we wait for the fifo to be filled or a
280                          * correction pattern is being generated
281                          */
282                         udelay(1);
283                         continue;
284                 }
285
286                 /* check for an uncorrectable error */
287                 if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
288                         /* check if we face a non-empty page */
289                         for (i = 0; i < 512; i++) {
290                                 if (flctl->done_buff[i] != 0xff) {
291                                         state = FL_ERROR; /* can't correct */
292                                         break;
293                                 }
294                         }
295
296                         if (state == FL_SUCCESS)
297                                 dev_dbg(&flctl->pdev->dev,
298                                 "reading empty sector %d, ecc error ignored\n",
299                                 sector_number);
300
301                         writel(0, FL4ECCCR(flctl));
302                         continue;
303                 }
304
305                 /* start error correction */
306                 ecc_reg[0] = FL4ECCRESULT0(flctl);
307                 ecc_reg[1] = FL4ECCRESULT1(flctl);
308                 ecc_reg[2] = FL4ECCRESULT2(flctl);
309                 ecc_reg[3] = FL4ECCRESULT3(flctl);
310
311                 for (i = 0; i < 3; i++) {
312                         uint8_t org;
313                         unsigned int index;
314
315                         data = readl(ecc_reg[i]);
316
317                         if (flctl->page_size)
318                                 index = (512 * sector_number) +
319                                         (data >> 16);
320                         else
321                                 index = data >> 16;
322
323                         org = flctl->done_buff[index];
324                         flctl->done_buff[index] = org ^ (data & 0xFF);
325                 }
326                 state = FL_REPAIRABLE;
327                 writel(0, FL4ECCCR(flctl));
328         }
329
330         timeout_error(flctl, __func__);
331         return FL_TIMEOUT;      /* timeout */
332 }
333
334 static void wait_wecfifo_ready(struct sh_flctl *flctl)
335 {
336         uint32_t timeout = LOOP_TIMEOUT_MAX;
337         uint32_t len;
338
339         while (timeout--) {
340                 /* check FLECFIFO */
341                 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
342                 if (len >= 4)
343                         return;
344                 udelay(1);
345         }
346         timeout_error(flctl, __func__);
347 }
348
349 static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
350                                         int len, enum dma_data_direction dir)
351 {
352         struct dma_async_tx_descriptor *desc = NULL;
353         struct dma_chan *chan;
354         enum dma_transfer_direction tr_dir;
355         dma_addr_t dma_addr;
356         dma_cookie_t cookie = -EINVAL;
357         uint32_t reg;
358         int ret;
359
360         if (dir == DMA_FROM_DEVICE) {
361                 chan = flctl->chan_fifo0_rx;
362                 tr_dir = DMA_DEV_TO_MEM;
363         } else {
364                 chan = flctl->chan_fifo0_tx;
365                 tr_dir = DMA_MEM_TO_DEV;
366         }
367
368         dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
369
370         if (dma_addr)
371                 desc = dmaengine_prep_slave_single(chan, dma_addr, len,
372                         tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
373
374         if (desc) {
375                 reg = readl(FLINTDMACR(flctl));
376                 reg |= DREQ0EN;
377                 writel(reg, FLINTDMACR(flctl));
378
379                 desc->callback = flctl_dma_complete;
380                 desc->callback_param = flctl;
381                 cookie = dmaengine_submit(desc);
382
383                 dma_async_issue_pending(chan);
384         } else {
385                 /* DMA failed, fall back to PIO */
386                 flctl_release_dma(flctl);
387                 dev_warn(&flctl->pdev->dev,
388                          "DMA failed, falling back to PIO\n");
389                 ret = -EIO;
390                 goto out;
391         }
392
393         ret =
394         wait_for_completion_timeout(&flctl->dma_complete,
395                                 msecs_to_jiffies(3000));
396
397         if (ret <= 0) {
398                 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
399                 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
400         }
401
402 out:
403         reg = readl(FLINTDMACR(flctl));
404         reg &= ~DREQ0EN;
405         writel(reg, FLINTDMACR(flctl));
406
407         dma_unmap_single(chan->device->dev, dma_addr, len, dir);
408
409         /* ret > 0 is success */
410         return ret;
411 }
412
413 static void read_datareg(struct sh_flctl *flctl, int offset)
414 {
415         unsigned long data;
416         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
417
418         wait_completion(flctl);
419
420         data = readl(FLDATAR(flctl));
421         *buf = le32_to_cpu(data);
422 }
423
424 static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
425 {
426         int i, len_4align;
427         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
428
429         len_4align = (rlen + 3) / 4;
430
431         /* initiate DMA transfer */
432         if (flctl->chan_fifo0_rx && rlen >= 32 &&
433                 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
434                         goto convert;   /* DMA success */
435
436         /* do polling transfer */
437         for (i = 0; i < len_4align; i++) {
438                 wait_rfifo_ready(flctl);
439                 buf[i] = readl(FLDTFIFO(flctl));
440         }
441
442 convert:
443         for (i = 0; i < len_4align; i++)
444                 buf[i] = be32_to_cpu(buf[i]);
445 }
446
447 static enum flctl_ecc_res_t read_ecfiforeg
448                 (struct sh_flctl *flctl, uint8_t *buff, int sector)
449 {
450         int i;
451         enum flctl_ecc_res_t res;
452         unsigned long *ecc_buf = (unsigned long *)buff;
453
454         res = wait_recfifo_ready(flctl , sector);
455
456         if (res != FL_ERROR) {
457                 for (i = 0; i < 4; i++) {
458                         ecc_buf[i] = readl(FLECFIFO(flctl));
459                         ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
460                 }
461         }
462
463         return res;
464 }
465
466 static void write_fiforeg(struct sh_flctl *flctl, int rlen,
467                                                 unsigned int offset)
468 {
469         int i, len_4align;
470         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
471
472         len_4align = (rlen + 3) / 4;
473         for (i = 0; i < len_4align; i++) {
474                 wait_wfifo_ready(flctl);
475                 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
476         }
477 }
478
479 static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
480                                                 unsigned int offset)
481 {
482         int i, len_4align;
483         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
484
485         len_4align = (rlen + 3) / 4;
486
487         for (i = 0; i < len_4align; i++)
488                 buf[i] = cpu_to_be32(buf[i]);
489
490         /* initiate DMA transfer */
491         if (flctl->chan_fifo0_tx && rlen >= 32 &&
492                 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
493                         return; /* DMA success */
494
495         /* do polling transfer */
496         for (i = 0; i < len_4align; i++) {
497                 wait_wecfifo_ready(flctl);
498                 writel(buf[i], FLECFIFO(flctl));
499         }
500 }
501
502 static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
503 {
504         struct sh_flctl *flctl = mtd_to_flctl(mtd);
505         uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
506         uint32_t flcmdcr_val, addr_len_bytes = 0;
507
508         /* Set SNAND bit if page size is 2048byte */
509         if (flctl->page_size)
510                 flcmncr_val |= SNAND_E;
511         else
512                 flcmncr_val &= ~SNAND_E;
513
514         /* default FLCMDCR val */
515         flcmdcr_val = DOCMD1_E | DOADR_E;
516
517         /* Set for FLCMDCR */
518         switch (cmd) {
519         case NAND_CMD_ERASE1:
520                 addr_len_bytes = flctl->erase_ADRCNT;
521                 flcmdcr_val |= DOCMD2_E;
522                 break;
523         case NAND_CMD_READ0:
524         case NAND_CMD_READOOB:
525         case NAND_CMD_RNDOUT:
526                 addr_len_bytes = flctl->rw_ADRCNT;
527                 flcmdcr_val |= CDSRC_E;
528                 if (flctl->chip.options & NAND_BUSWIDTH_16)
529                         flcmncr_val |= SEL_16BIT;
530                 break;
531         case NAND_CMD_SEQIN:
532                 /* This case is that cmd is READ0 or READ1 or READ00 */
533                 flcmdcr_val &= ~DOADR_E;        /* ONLY execute 1st cmd */
534                 break;
535         case NAND_CMD_PAGEPROG:
536                 addr_len_bytes = flctl->rw_ADRCNT;
537                 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
538                 if (flctl->chip.options & NAND_BUSWIDTH_16)
539                         flcmncr_val |= SEL_16BIT;
540                 break;
541         case NAND_CMD_READID:
542                 flcmncr_val &= ~SNAND_E;
543                 flcmdcr_val |= CDSRC_E;
544                 addr_len_bytes = ADRCNT_1;
545                 break;
546         case NAND_CMD_STATUS:
547         case NAND_CMD_RESET:
548                 flcmncr_val &= ~SNAND_E;
549                 flcmdcr_val &= ~(DOADR_E | DOSR_E);
550                 break;
551         default:
552                 break;
553         }
554
555         /* Set address bytes parameter */
556         flcmdcr_val |= addr_len_bytes;
557
558         /* Now actually write */
559         writel(flcmncr_val, FLCMNCR(flctl));
560         writel(flcmdcr_val, FLCMDCR(flctl));
561         writel(flcmcdr_val, FLCMCDR(flctl));
562 }
563
564 static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
565                                 uint8_t *buf, int oob_required, int page)
566 {
567         chip->read_buf(mtd, buf, mtd->writesize);
568         if (oob_required)
569                 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
570         return 0;
571 }
572
573 static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
574                                    const uint8_t *buf, int oob_required)
575 {
576         chip->write_buf(mtd, buf, mtd->writesize);
577         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
578         return 0;
579 }
580
581 static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
582 {
583         struct sh_flctl *flctl = mtd_to_flctl(mtd);
584         int sector, page_sectors;
585         enum flctl_ecc_res_t ecc_result;
586
587         page_sectors = flctl->page_size ? 4 : 1;
588
589         set_cmd_regs(mtd, NAND_CMD_READ0,
590                 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
591
592         writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
593                  FLCMNCR(flctl));
594         writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
595         writel(page_addr << 2, FLADR(flctl));
596
597         empty_fifo(flctl);
598         start_translation(flctl);
599
600         for (sector = 0; sector < page_sectors; sector++) {
601                 read_fiforeg(flctl, 512, 512 * sector);
602
603                 ecc_result = read_ecfiforeg(flctl,
604                         &flctl->done_buff[mtd->writesize + 16 * sector],
605                         sector);
606
607                 switch (ecc_result) {
608                 case FL_REPAIRABLE:
609                         dev_info(&flctl->pdev->dev,
610                                 "applied ecc on page 0x%x", page_addr);
611                         flctl->mtd.ecc_stats.corrected++;
612                         break;
613                 case FL_ERROR:
614                         dev_warn(&flctl->pdev->dev,
615                                 "page 0x%x contains corrupted data\n",
616                                 page_addr);
617                         flctl->mtd.ecc_stats.failed++;
618                         break;
619                 default:
620                         ;
621                 }
622         }
623
624         wait_completion(flctl);
625
626         writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
627                         FLCMNCR(flctl));
628 }
629
630 static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
631 {
632         struct sh_flctl *flctl = mtd_to_flctl(mtd);
633         int page_sectors = flctl->page_size ? 4 : 1;
634         int i;
635
636         set_cmd_regs(mtd, NAND_CMD_READ0,
637                 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
638
639         empty_fifo(flctl);
640
641         for (i = 0; i < page_sectors; i++) {
642                 set_addr(mtd, (512 + 16) * i + 512 , page_addr);
643                 writel(16, FLDTCNTR(flctl));
644
645                 start_translation(flctl);
646                 read_fiforeg(flctl, 16, 16 * i);
647                 wait_completion(flctl);
648         }
649 }
650
651 static void execmd_write_page_sector(struct mtd_info *mtd)
652 {
653         struct sh_flctl *flctl = mtd_to_flctl(mtd);
654         int page_addr = flctl->seqin_page_addr;
655         int sector, page_sectors;
656
657         page_sectors = flctl->page_size ? 4 : 1;
658
659         set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
660                         (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
661
662         empty_fifo(flctl);
663         writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
664         writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
665         writel(page_addr << 2, FLADR(flctl));
666         start_translation(flctl);
667
668         for (sector = 0; sector < page_sectors; sector++) {
669                 write_fiforeg(flctl, 512, 512 * sector);
670                 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
671         }
672
673         wait_completion(flctl);
674         writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
675 }
676
677 static void execmd_write_oob(struct mtd_info *mtd)
678 {
679         struct sh_flctl *flctl = mtd_to_flctl(mtd);
680         int page_addr = flctl->seqin_page_addr;
681         int sector, page_sectors;
682
683         page_sectors = flctl->page_size ? 4 : 1;
684
685         set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
686                         (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
687
688         for (sector = 0; sector < page_sectors; sector++) {
689                 empty_fifo(flctl);
690                 set_addr(mtd, sector * 528 + 512, page_addr);
691                 writel(16, FLDTCNTR(flctl));    /* set read size */
692
693                 start_translation(flctl);
694                 write_fiforeg(flctl, 16, 16 * sector);
695                 wait_completion(flctl);
696         }
697 }
698
699 static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
700                         int column, int page_addr)
701 {
702         struct sh_flctl *flctl = mtd_to_flctl(mtd);
703         uint32_t read_cmd = 0;
704
705         pm_runtime_get_sync(&flctl->pdev->dev);
706
707         flctl->read_bytes = 0;
708         if (command != NAND_CMD_PAGEPROG)
709                 flctl->index = 0;
710
711         switch (command) {
712         case NAND_CMD_READ1:
713         case NAND_CMD_READ0:
714                 if (flctl->hwecc) {
715                         /* read page with hwecc */
716                         execmd_read_page_sector(mtd, page_addr);
717                         break;
718                 }
719                 if (flctl->page_size)
720                         set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
721                                 | command);
722                 else
723                         set_cmd_regs(mtd, command, command);
724
725                 set_addr(mtd, 0, page_addr);
726
727                 flctl->read_bytes = mtd->writesize + mtd->oobsize;
728                 if (flctl->chip.options & NAND_BUSWIDTH_16)
729                         column >>= 1;
730                 flctl->index += column;
731                 goto read_normal_exit;
732
733         case NAND_CMD_READOOB:
734                 if (flctl->hwecc) {
735                         /* read page with hwecc */
736                         execmd_read_oob(mtd, page_addr);
737                         break;
738                 }
739
740                 if (flctl->page_size) {
741                         set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
742                                 | NAND_CMD_READ0);
743                         set_addr(mtd, mtd->writesize, page_addr);
744                 } else {
745                         set_cmd_regs(mtd, command, command);
746                         set_addr(mtd, 0, page_addr);
747                 }
748                 flctl->read_bytes = mtd->oobsize;
749                 goto read_normal_exit;
750
751         case NAND_CMD_RNDOUT:
752                 if (flctl->hwecc)
753                         break;
754
755                 if (flctl->page_size)
756                         set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
757                                 | command);
758                 else
759                         set_cmd_regs(mtd, command, command);
760
761                 set_addr(mtd, column, 0);
762
763                 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
764                 goto read_normal_exit;
765
766         case NAND_CMD_READID:
767                 set_cmd_regs(mtd, command, command);
768
769                 /* READID is always performed using an 8-bit bus */
770                 if (flctl->chip.options & NAND_BUSWIDTH_16)
771                         column <<= 1;
772                 set_addr(mtd, column, 0);
773
774                 flctl->read_bytes = 8;
775                 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
776                 empty_fifo(flctl);
777                 start_translation(flctl);
778                 read_fiforeg(flctl, flctl->read_bytes, 0);
779                 wait_completion(flctl);
780                 break;
781
782         case NAND_CMD_ERASE1:
783                 flctl->erase1_page_addr = page_addr;
784                 break;
785
786         case NAND_CMD_ERASE2:
787                 set_cmd_regs(mtd, NAND_CMD_ERASE1,
788                         (command << 8) | NAND_CMD_ERASE1);
789                 set_addr(mtd, -1, flctl->erase1_page_addr);
790                 start_translation(flctl);
791                 wait_completion(flctl);
792                 break;
793
794         case NAND_CMD_SEQIN:
795                 if (!flctl->page_size) {
796                         /* output read command */
797                         if (column >= mtd->writesize) {
798                                 column -= mtd->writesize;
799                                 read_cmd = NAND_CMD_READOOB;
800                         } else if (column < 256) {
801                                 read_cmd = NAND_CMD_READ0;
802                         } else {
803                                 column -= 256;
804                                 read_cmd = NAND_CMD_READ1;
805                         }
806                 }
807                 flctl->seqin_column = column;
808                 flctl->seqin_page_addr = page_addr;
809                 flctl->seqin_read_cmd = read_cmd;
810                 break;
811
812         case NAND_CMD_PAGEPROG:
813                 empty_fifo(flctl);
814                 if (!flctl->page_size) {
815                         set_cmd_regs(mtd, NAND_CMD_SEQIN,
816                                         flctl->seqin_read_cmd);
817                         set_addr(mtd, -1, -1);
818                         writel(0, FLDTCNTR(flctl));     /* set 0 size */
819                         start_translation(flctl);
820                         wait_completion(flctl);
821                 }
822                 if (flctl->hwecc) {
823                         /* write page with hwecc */
824                         if (flctl->seqin_column == mtd->writesize)
825                                 execmd_write_oob(mtd);
826                         else if (!flctl->seqin_column)
827                                 execmd_write_page_sector(mtd);
828                         else
829                                 printk(KERN_ERR "Invalid address !?\n");
830                         break;
831                 }
832                 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
833                 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
834                 writel(flctl->index, FLDTCNTR(flctl));  /* set write size */
835                 start_translation(flctl);
836                 write_fiforeg(flctl, flctl->index, 0);
837                 wait_completion(flctl);
838                 break;
839
840         case NAND_CMD_STATUS:
841                 set_cmd_regs(mtd, command, command);
842                 set_addr(mtd, -1, -1);
843
844                 flctl->read_bytes = 1;
845                 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
846                 start_translation(flctl);
847                 read_datareg(flctl, 0); /* read and end */
848                 break;
849
850         case NAND_CMD_RESET:
851                 set_cmd_regs(mtd, command, command);
852                 set_addr(mtd, -1, -1);
853
854                 writel(0, FLDTCNTR(flctl));     /* set 0 size */
855                 start_translation(flctl);
856                 wait_completion(flctl);
857                 break;
858
859         default:
860                 break;
861         }
862         goto runtime_exit;
863
864 read_normal_exit:
865         writel(flctl->read_bytes, FLDTCNTR(flctl));     /* set read size */
866         empty_fifo(flctl);
867         start_translation(flctl);
868         read_fiforeg(flctl, flctl->read_bytes, 0);
869         wait_completion(flctl);
870 runtime_exit:
871         pm_runtime_put_sync(&flctl->pdev->dev);
872         return;
873 }
874
875 static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
876 {
877         struct sh_flctl *flctl = mtd_to_flctl(mtd);
878         int ret;
879
880         switch (chipnr) {
881         case -1:
882                 flctl->flcmncr_base &= ~CE0_ENABLE;
883
884                 pm_runtime_get_sync(&flctl->pdev->dev);
885                 writel(flctl->flcmncr_base, FLCMNCR(flctl));
886
887                 if (flctl->qos_request) {
888                         dev_pm_qos_remove_request(&flctl->pm_qos);
889                         flctl->qos_request = 0;
890                 }
891
892                 pm_runtime_put_sync(&flctl->pdev->dev);
893                 break;
894         case 0:
895                 flctl->flcmncr_base |= CE0_ENABLE;
896
897                 if (!flctl->qos_request) {
898                         ret = dev_pm_qos_add_request(&flctl->pdev->dev,
899                                                         &flctl->pm_qos,
900                                                         DEV_PM_QOS_LATENCY,
901                                                         100);
902                         if (ret < 0)
903                                 dev_err(&flctl->pdev->dev,
904                                         "PM QoS request failed: %d\n", ret);
905                         flctl->qos_request = 1;
906                 }
907
908                 if (flctl->holden) {
909                         pm_runtime_get_sync(&flctl->pdev->dev);
910                         writel(HOLDEN, FLHOLDCR(flctl));
911                         pm_runtime_put_sync(&flctl->pdev->dev);
912                 }
913                 break;
914         default:
915                 BUG();
916         }
917 }
918
919 static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
920 {
921         struct sh_flctl *flctl = mtd_to_flctl(mtd);
922
923         memcpy(&flctl->done_buff[flctl->index], buf, len);
924         flctl->index += len;
925 }
926
927 static uint8_t flctl_read_byte(struct mtd_info *mtd)
928 {
929         struct sh_flctl *flctl = mtd_to_flctl(mtd);
930         uint8_t data;
931
932         data = flctl->done_buff[flctl->index];
933         flctl->index++;
934         return data;
935 }
936
937 static uint16_t flctl_read_word(struct mtd_info *mtd)
938 {
939         struct sh_flctl *flctl = mtd_to_flctl(mtd);
940         uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
941
942         flctl->index += 2;
943         return *buf;
944 }
945
946 static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
947 {
948         struct sh_flctl *flctl = mtd_to_flctl(mtd);
949
950         memcpy(buf, &flctl->done_buff[flctl->index], len);
951         flctl->index += len;
952 }
953
954 static int flctl_chip_init_tail(struct mtd_info *mtd)
955 {
956         struct sh_flctl *flctl = mtd_to_flctl(mtd);
957         struct nand_chip *chip = &flctl->chip;
958
959         if (mtd->writesize == 512) {
960                 flctl->page_size = 0;
961                 if (chip->chipsize > (32 << 20)) {
962                         /* big than 32MB */
963                         flctl->rw_ADRCNT = ADRCNT_4;
964                         flctl->erase_ADRCNT = ADRCNT_3;
965                 } else if (chip->chipsize > (2 << 16)) {
966                         /* big than 128KB */
967                         flctl->rw_ADRCNT = ADRCNT_3;
968                         flctl->erase_ADRCNT = ADRCNT_2;
969                 } else {
970                         flctl->rw_ADRCNT = ADRCNT_2;
971                         flctl->erase_ADRCNT = ADRCNT_1;
972                 }
973         } else {
974                 flctl->page_size = 1;
975                 if (chip->chipsize > (128 << 20)) {
976                         /* big than 128MB */
977                         flctl->rw_ADRCNT = ADRCNT2_E;
978                         flctl->erase_ADRCNT = ADRCNT_3;
979                 } else if (chip->chipsize > (8 << 16)) {
980                         /* big than 512KB */
981                         flctl->rw_ADRCNT = ADRCNT_4;
982                         flctl->erase_ADRCNT = ADRCNT_2;
983                 } else {
984                         flctl->rw_ADRCNT = ADRCNT_3;
985                         flctl->erase_ADRCNT = ADRCNT_1;
986                 }
987         }
988
989         if (flctl->hwecc) {
990                 if (mtd->writesize == 512) {
991                         chip->ecc.layout = &flctl_4secc_oob_16;
992                         chip->badblock_pattern = &flctl_4secc_smallpage;
993                 } else {
994                         chip->ecc.layout = &flctl_4secc_oob_64;
995                         chip->badblock_pattern = &flctl_4secc_largepage;
996                 }
997
998                 chip->ecc.size = 512;
999                 chip->ecc.bytes = 10;
1000                 chip->ecc.strength = 4;
1001                 chip->ecc.read_page = flctl_read_page_hwecc;
1002                 chip->ecc.write_page = flctl_write_page_hwecc;
1003                 chip->ecc.mode = NAND_ECC_HW;
1004
1005                 /* 4 symbols ECC enabled */
1006                 flctl->flcmncr_base |= _4ECCEN;
1007         } else {
1008                 chip->ecc.mode = NAND_ECC_SOFT;
1009         }
1010
1011         return 0;
1012 }
1013
1014 static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
1015 {
1016         struct sh_flctl *flctl = dev_id;
1017
1018         dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
1019         writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
1020
1021         return IRQ_HANDLED;
1022 }
1023
1024 #ifdef CONFIG_OF
1025 struct flctl_soc_config {
1026         unsigned long flcmncr_val;
1027         unsigned has_hwecc:1;
1028         unsigned use_holden:1;
1029 };
1030
1031 static struct flctl_soc_config flctl_sh7372_config = {
1032         .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
1033         .has_hwecc = 1,
1034         .use_holden = 1,
1035 };
1036
1037 static const struct of_device_id of_flctl_match[] = {
1038         { .compatible = "renesas,shmobile-flctl-sh7372",
1039                                 .data = &flctl_sh7372_config },
1040         {},
1041 };
1042 MODULE_DEVICE_TABLE(of, of_flctl_match);
1043
1044 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
1045 {
1046         const struct of_device_id *match;
1047         struct flctl_soc_config *config;
1048         struct sh_flctl_platform_data *pdata;
1049         struct device_node *dn = dev->of_node;
1050         int ret;
1051
1052         match = of_match_device(of_flctl_match, dev);
1053         if (match)
1054                 config = (struct flctl_soc_config *)match->data;
1055         else {
1056                 dev_err(dev, "%s: no OF configuration attached\n", __func__);
1057                 return NULL;
1058         }
1059
1060         pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
1061                                                                 GFP_KERNEL);
1062         if (!pdata) {
1063                 dev_err(dev, "%s: failed to allocate config data\n", __func__);
1064                 return NULL;
1065         }
1066
1067         /* set SoC specific options */
1068         pdata->flcmncr_val = config->flcmncr_val;
1069         pdata->has_hwecc = config->has_hwecc;
1070         pdata->use_holden = config->use_holden;
1071
1072         /* parse user defined options */
1073         ret = of_get_nand_bus_width(dn);
1074         if (ret == 16)
1075                 pdata->flcmncr_val |= SEL_16BIT;
1076         else if (ret != 8) {
1077                 dev_err(dev, "%s: invalid bus width\n", __func__);
1078                 return NULL;
1079         }
1080
1081         return pdata;
1082 }
1083 #else /* CONFIG_OF */
1084 #define of_flctl_match NULL
1085 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
1086 {
1087         return NULL;
1088 }
1089 #endif /* CONFIG_OF */
1090
1091 static int flctl_probe(struct platform_device *pdev)
1092 {
1093         struct resource *res;
1094         struct sh_flctl *flctl;
1095         struct mtd_info *flctl_mtd;
1096         struct nand_chip *nand;
1097         struct sh_flctl_platform_data *pdata;
1098         int ret = -ENXIO;
1099         int irq;
1100         struct mtd_part_parser_data ppdata = {};
1101
1102         flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
1103         if (!flctl) {
1104                 dev_err(&pdev->dev, "failed to allocate driver data\n");
1105                 return -ENOMEM;
1106         }
1107
1108         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1109         if (!res) {
1110                 dev_err(&pdev->dev, "failed to get I/O memory\n");
1111                 goto err_iomap;
1112         }
1113
1114         flctl->reg = ioremap(res->start, resource_size(res));
1115         if (flctl->reg == NULL) {
1116                 dev_err(&pdev->dev, "failed to remap I/O memory\n");
1117                 goto err_iomap;
1118         }
1119
1120         irq = platform_get_irq(pdev, 0);
1121         if (irq < 0) {
1122                 dev_err(&pdev->dev, "failed to get flste irq data\n");
1123                 goto err_flste;
1124         }
1125
1126         ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl);
1127         if (ret) {
1128                 dev_err(&pdev->dev, "request interrupt failed.\n");
1129                 goto err_flste;
1130         }
1131
1132         if (pdev->dev.of_node)
1133                 pdata = flctl_parse_dt(&pdev->dev);
1134         else
1135                 pdata = pdev->dev.platform_data;
1136
1137         if (!pdata) {
1138                 dev_err(&pdev->dev, "no setup data defined\n");
1139                 ret = -EINVAL;
1140                 goto err_pdata;
1141         }
1142
1143         platform_set_drvdata(pdev, flctl);
1144         flctl_mtd = &flctl->mtd;
1145         nand = &flctl->chip;
1146         flctl_mtd->priv = nand;
1147         flctl->pdev = pdev;
1148         flctl->hwecc = pdata->has_hwecc;
1149         flctl->holden = pdata->use_holden;
1150         flctl->flcmncr_base = pdata->flcmncr_val;
1151         flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
1152
1153         /* Set address of hardware control function */
1154         /* 20 us command delay time */
1155         nand->chip_delay = 20;
1156
1157         nand->read_byte = flctl_read_byte;
1158         nand->write_buf = flctl_write_buf;
1159         nand->read_buf = flctl_read_buf;
1160         nand->select_chip = flctl_select_chip;
1161         nand->cmdfunc = flctl_cmdfunc;
1162
1163         if (pdata->flcmncr_val & SEL_16BIT) {
1164                 nand->options |= NAND_BUSWIDTH_16;
1165                 nand->read_word = flctl_read_word;
1166         }
1167
1168         pm_runtime_enable(&pdev->dev);
1169         pm_runtime_resume(&pdev->dev);
1170
1171         flctl_setup_dma(flctl);
1172
1173         ret = nand_scan_ident(flctl_mtd, 1, NULL);
1174         if (ret)
1175                 goto err_chip;
1176
1177         ret = flctl_chip_init_tail(flctl_mtd);
1178         if (ret)
1179                 goto err_chip;
1180
1181         ret = nand_scan_tail(flctl_mtd);
1182         if (ret)
1183                 goto err_chip;
1184
1185         ppdata.of_node = pdev->dev.of_node;
1186         ret = mtd_device_parse_register(flctl_mtd, NULL, &ppdata, pdata->parts,
1187                         pdata->nr_parts);
1188
1189         return 0;
1190
1191 err_chip:
1192         flctl_release_dma(flctl);
1193         pm_runtime_disable(&pdev->dev);
1194 err_pdata:
1195         free_irq(irq, flctl);
1196 err_flste:
1197         iounmap(flctl->reg);
1198 err_iomap:
1199         kfree(flctl);
1200         return ret;
1201 }
1202
1203 static int flctl_remove(struct platform_device *pdev)
1204 {
1205         struct sh_flctl *flctl = platform_get_drvdata(pdev);
1206
1207         flctl_release_dma(flctl);
1208         nand_release(&flctl->mtd);
1209         pm_runtime_disable(&pdev->dev);
1210         free_irq(platform_get_irq(pdev, 0), flctl);
1211         iounmap(flctl->reg);
1212         kfree(flctl);
1213
1214         return 0;
1215 }
1216
1217 static struct platform_driver flctl_driver = {
1218         .remove         = flctl_remove,
1219         .driver = {
1220                 .name   = "sh_flctl",
1221                 .owner  = THIS_MODULE,
1222                 .of_match_table = of_flctl_match,
1223         },
1224 };
1225
1226 static int __init flctl_nand_init(void)
1227 {
1228         return platform_driver_probe(&flctl_driver, flctl_probe);
1229 }
1230
1231 static void __exit flctl_nand_cleanup(void)
1232 {
1233         platform_driver_unregister(&flctl_driver);
1234 }
1235
1236 module_init(flctl_nand_init);
1237 module_exit(flctl_nand_cleanup);
1238
1239 MODULE_LICENSE("GPL");
1240 MODULE_AUTHOR("Yoshihiro Shimoda");
1241 MODULE_DESCRIPTION("SuperH FLCTL driver");
1242 MODULE_ALIAS("platform:sh_flctl");