023c8b36426bd7c071948753ccc1dd5eaffc0653
[platform/kernel/linux-starfive.git] / drivers / mtd / nand / raw / qcom_nandc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4  */
5 #include <linux/clk.h>
6 #include <linux/slab.h>
7 #include <linux/bitops.h>
8 #include <linux/dma/qcom_adm.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/module.h>
12 #include <linux/mtd/rawnand.h>
13 #include <linux/mtd/partitions.h>
14 #include <linux/of.h>
15 #include <linux/of_device.h>
16 #include <linux/delay.h>
17 #include <linux/dma/qcom_bam_dma.h>
18
19 /* NANDc reg offsets */
20 #define NAND_FLASH_CMD                  0x00
21 #define NAND_ADDR0                      0x04
22 #define NAND_ADDR1                      0x08
23 #define NAND_FLASH_CHIP_SELECT          0x0c
24 #define NAND_EXEC_CMD                   0x10
25 #define NAND_FLASH_STATUS               0x14
26 #define NAND_BUFFER_STATUS              0x18
27 #define NAND_DEV0_CFG0                  0x20
28 #define NAND_DEV0_CFG1                  0x24
29 #define NAND_DEV0_ECC_CFG               0x28
30 #define NAND_AUTO_STATUS_EN             0x2c
31 #define NAND_DEV1_CFG0                  0x30
32 #define NAND_DEV1_CFG1                  0x34
33 #define NAND_READ_ID                    0x40
34 #define NAND_READ_STATUS                0x44
35 #define NAND_DEV_CMD0                   0xa0
36 #define NAND_DEV_CMD1                   0xa4
37 #define NAND_DEV_CMD2                   0xa8
38 #define NAND_DEV_CMD_VLD                0xac
39 #define SFLASHC_BURST_CFG               0xe0
40 #define NAND_ERASED_CW_DETECT_CFG       0xe8
41 #define NAND_ERASED_CW_DETECT_STATUS    0xec
42 #define NAND_EBI2_ECC_BUF_CFG           0xf0
43 #define FLASH_BUF_ACC                   0x100
44
45 #define NAND_CTRL                       0xf00
46 #define NAND_VERSION                    0xf08
47 #define NAND_READ_LOCATION_0            0xf20
48 #define NAND_READ_LOCATION_1            0xf24
49 #define NAND_READ_LOCATION_2            0xf28
50 #define NAND_READ_LOCATION_3            0xf2c
51 #define NAND_READ_LOCATION_LAST_CW_0    0xf40
52 #define NAND_READ_LOCATION_LAST_CW_1    0xf44
53 #define NAND_READ_LOCATION_LAST_CW_2    0xf48
54 #define NAND_READ_LOCATION_LAST_CW_3    0xf4c
55
56 /* dummy register offsets, used by write_reg_dma */
57 #define NAND_DEV_CMD1_RESTORE           0xdead
58 #define NAND_DEV_CMD_VLD_RESTORE        0xbeef
59
60 /* NAND_FLASH_CMD bits */
61 #define PAGE_ACC                        BIT(4)
62 #define LAST_PAGE                       BIT(5)
63
64 /* NAND_FLASH_CHIP_SELECT bits */
65 #define NAND_DEV_SEL                    0
66 #define DM_EN                           BIT(2)
67
68 /* NAND_FLASH_STATUS bits */
69 #define FS_OP_ERR                       BIT(4)
70 #define FS_READY_BSY_N                  BIT(5)
71 #define FS_MPU_ERR                      BIT(8)
72 #define FS_DEVICE_STS_ERR               BIT(16)
73 #define FS_DEVICE_WP                    BIT(23)
74
75 /* NAND_BUFFER_STATUS bits */
76 #define BS_UNCORRECTABLE_BIT            BIT(8)
77 #define BS_CORRECTABLE_ERR_MSK          0x1f
78
79 /* NAND_DEVn_CFG0 bits */
80 #define DISABLE_STATUS_AFTER_WRITE      4
81 #define CW_PER_PAGE                     6
82 #define UD_SIZE_BYTES                   9
83 #define UD_SIZE_BYTES_MASK              GENMASK(18, 9)
84 #define ECC_PARITY_SIZE_BYTES_RS        19
85 #define SPARE_SIZE_BYTES                23
86 #define SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
87 #define NUM_ADDR_CYCLES                 27
88 #define STATUS_BFR_READ                 30
89 #define SET_RD_MODE_AFTER_STATUS        31
90
91 /* NAND_DEVn_CFG0 bits */
92 #define DEV0_CFG1_ECC_DISABLE           0
93 #define WIDE_FLASH                      1
94 #define NAND_RECOVERY_CYCLES            2
95 #define CS_ACTIVE_BSY                   5
96 #define BAD_BLOCK_BYTE_NUM              6
97 #define BAD_BLOCK_IN_SPARE_AREA         16
98 #define WR_RD_BSY_GAP                   17
99 #define ENABLE_BCH_ECC                  27
100
101 /* NAND_DEV0_ECC_CFG bits */
102 #define ECC_CFG_ECC_DISABLE             0
103 #define ECC_SW_RESET                    1
104 #define ECC_MODE                        4
105 #define ECC_PARITY_SIZE_BYTES_BCH       8
106 #define ECC_NUM_DATA_BYTES              16
107 #define ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
108 #define ECC_FORCE_CLK_OPEN              30
109
110 /* NAND_DEV_CMD1 bits */
111 #define READ_ADDR                       0
112
113 /* NAND_DEV_CMD_VLD bits */
114 #define READ_START_VLD                  BIT(0)
115 #define READ_STOP_VLD                   BIT(1)
116 #define WRITE_START_VLD                 BIT(2)
117 #define ERASE_START_VLD                 BIT(3)
118 #define SEQ_READ_START_VLD              BIT(4)
119
120 /* NAND_EBI2_ECC_BUF_CFG bits */
121 #define NUM_STEPS                       0
122
123 /* NAND_ERASED_CW_DETECT_CFG bits */
124 #define ERASED_CW_ECC_MASK              1
125 #define AUTO_DETECT_RES                 0
126 #define MASK_ECC                        BIT(ERASED_CW_ECC_MASK)
127 #define RESET_ERASED_DET                BIT(AUTO_DETECT_RES)
128 #define ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
129 #define CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
130 #define SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
131
132 /* NAND_ERASED_CW_DETECT_STATUS bits */
133 #define PAGE_ALL_ERASED                 BIT(7)
134 #define CODEWORD_ALL_ERASED             BIT(6)
135 #define PAGE_ERASED                     BIT(5)
136 #define CODEWORD_ERASED                 BIT(4)
137 #define ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
138 #define ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
139
140 /* NAND_READ_LOCATION_n bits */
141 #define READ_LOCATION_OFFSET            0
142 #define READ_LOCATION_SIZE              16
143 #define READ_LOCATION_LAST              31
144
145 /* Version Mask */
146 #define NAND_VERSION_MAJOR_MASK         0xf0000000
147 #define NAND_VERSION_MAJOR_SHIFT        28
148 #define NAND_VERSION_MINOR_MASK         0x0fff0000
149 #define NAND_VERSION_MINOR_SHIFT        16
150
151 /* NAND OP_CMDs */
152 #define OP_PAGE_READ                    0x2
153 #define OP_PAGE_READ_WITH_ECC           0x3
154 #define OP_PAGE_READ_WITH_ECC_SPARE     0x4
155 #define OP_PAGE_READ_ONFI_READ          0x5
156 #define OP_PROGRAM_PAGE                 0x6
157 #define OP_PAGE_PROGRAM_WITH_ECC        0x7
158 #define OP_PROGRAM_PAGE_SPARE           0x9
159 #define OP_BLOCK_ERASE                  0xa
160 #define OP_CHECK_STATUS                 0xc
161 #define OP_FETCH_ID                     0xb
162 #define OP_RESET_DEVICE                 0xd
163
164 /* Default Value for NAND_DEV_CMD_VLD */
165 #define NAND_DEV_CMD_VLD_VAL            (READ_START_VLD | WRITE_START_VLD | \
166                                          ERASE_START_VLD | SEQ_READ_START_VLD)
167
168 /* NAND_CTRL bits */
169 #define BAM_MODE_EN                     BIT(0)
170
171 /*
172  * the NAND controller performs reads/writes with ECC in 516 byte chunks.
173  * the driver calls the chunks 'step' or 'codeword' interchangeably
174  */
175 #define NANDC_STEP_SIZE                 512
176
177 /*
178  * the largest page size we support is 8K, this will have 16 steps/codewords
179  * of 512 bytes each
180  */
181 #define MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
182
183 /* we read at most 3 registers per codeword scan */
184 #define MAX_REG_RD                      (3 * MAX_NUM_STEPS)
185
186 /* ECC modes supported by the controller */
187 #define ECC_NONE        BIT(0)
188 #define ECC_RS_4BIT     BIT(1)
189 #define ECC_BCH_4BIT    BIT(2)
190 #define ECC_BCH_8BIT    BIT(3)
191
192 #define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc)     \
193 nandc_set_reg(chip, reg,                        \
194               ((cw_offset) << READ_LOCATION_OFFSET) |           \
195               ((read_size) << READ_LOCATION_SIZE) |                     \
196               ((is_last_read_loc) << READ_LOCATION_LAST))
197
198 #define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc)      \
199 nandc_set_reg(chip, reg,                        \
200               ((cw_offset) << READ_LOCATION_OFFSET) |           \
201               ((read_size) << READ_LOCATION_SIZE) |                     \
202               ((is_last_read_loc) << READ_LOCATION_LAST))
203 /*
204  * Returns the actual register address for all NAND_DEV_ registers
205  * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
206  */
207 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
208
209 /* Returns the NAND register physical address */
210 #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
211
212 /* Returns the dma address for reg read buffer */
213 #define reg_buf_dma_addr(chip, vaddr) \
214         ((chip)->reg_read_dma + \
215         ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
216
217 #define QPIC_PER_CW_CMD_ELEMENTS        32
218 #define QPIC_PER_CW_CMD_SGL             32
219 #define QPIC_PER_CW_DATA_SGL            8
220
221 #define QPIC_NAND_COMPLETION_TIMEOUT    msecs_to_jiffies(2000)
222
223 /*
224  * Flags used in DMA descriptor preparation helper functions
225  * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
226  */
227 /* Don't set the EOT in current tx BAM sgl */
228 #define NAND_BAM_NO_EOT                 BIT(0)
229 /* Set the NWD flag in current BAM sgl */
230 #define NAND_BAM_NWD                    BIT(1)
231 /* Finish writing in the current BAM sgl and start writing in another BAM sgl */
232 #define NAND_BAM_NEXT_SGL               BIT(2)
233 /*
234  * Erased codeword status is being used two times in single transfer so this
235  * flag will determine the current value of erased codeword status register
236  */
237 #define NAND_ERASED_CW_SET              BIT(4)
238
239 #define MAX_ADDRESS_CYCLE               5
240
241 /*
242  * This data type corresponds to the BAM transaction which will be used for all
243  * NAND transfers.
244  * @bam_ce - the array of BAM command elements
245  * @cmd_sgl - sgl for NAND BAM command pipe
246  * @data_sgl - sgl for NAND BAM consumer/producer pipe
247  * @last_data_desc - last DMA desc in data channel (tx/rx).
248  * @last_cmd_desc - last DMA desc in command channel.
249  * @txn_done - completion for NAND transfer.
250  * @bam_ce_pos - the index in bam_ce which is available for next sgl
251  * @bam_ce_start - the index in bam_ce which marks the start position ce
252  *                 for current sgl. It will be used for size calculation
253  *                 for current sgl
254  * @cmd_sgl_pos - current index in command sgl.
255  * @cmd_sgl_start - start index in command sgl.
256  * @tx_sgl_pos - current index in data sgl for tx.
257  * @tx_sgl_start - start index in data sgl for tx.
258  * @rx_sgl_pos - current index in data sgl for rx.
259  * @rx_sgl_start - start index in data sgl for rx.
260  * @wait_second_completion - wait for second DMA desc completion before making
261  *                           the NAND transfer completion.
262  */
263 struct bam_transaction {
264         struct bam_cmd_element *bam_ce;
265         struct scatterlist *cmd_sgl;
266         struct scatterlist *data_sgl;
267         struct dma_async_tx_descriptor *last_data_desc;
268         struct dma_async_tx_descriptor *last_cmd_desc;
269         struct completion txn_done;
270         u32 bam_ce_pos;
271         u32 bam_ce_start;
272         u32 cmd_sgl_pos;
273         u32 cmd_sgl_start;
274         u32 tx_sgl_pos;
275         u32 tx_sgl_start;
276         u32 rx_sgl_pos;
277         u32 rx_sgl_start;
278         bool wait_second_completion;
279 };
280
281 /*
282  * This data type corresponds to the nand dma descriptor
283  * @dma_desc - low level DMA engine descriptor
284  * @list - list for desc_info
285  *
286  * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
287  *            ADM
288  * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
289  * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
290  * @dir - DMA transfer direction
291  */
292 struct desc_info {
293         struct dma_async_tx_descriptor *dma_desc;
294         struct list_head node;
295
296         union {
297                 struct scatterlist adm_sgl;
298                 struct {
299                         struct scatterlist *bam_sgl;
300                         int sgl_cnt;
301                 };
302         };
303         enum dma_data_direction dir;
304 };
305
306 /*
307  * holds the current register values that we want to write. acts as a contiguous
308  * chunk of memory which we use to write the controller registers through DMA.
309  */
310 struct nandc_regs {
311         __le32 cmd;
312         __le32 addr0;
313         __le32 addr1;
314         __le32 chip_sel;
315         __le32 exec;
316
317         __le32 cfg0;
318         __le32 cfg1;
319         __le32 ecc_bch_cfg;
320
321         __le32 clrflashstatus;
322         __le32 clrreadstatus;
323
324         __le32 cmd1;
325         __le32 vld;
326
327         __le32 orig_cmd1;
328         __le32 orig_vld;
329
330         __le32 ecc_buf_cfg;
331         __le32 read_location0;
332         __le32 read_location1;
333         __le32 read_location2;
334         __le32 read_location3;
335         __le32 read_location_last0;
336         __le32 read_location_last1;
337         __le32 read_location_last2;
338         __le32 read_location_last3;
339
340         __le32 erased_cw_detect_cfg_clr;
341         __le32 erased_cw_detect_cfg_set;
342 };
343
344 /*
345  * NAND controller data struct
346  *
347  * @dev:                        parent device
348  *
349  * @base:                       MMIO base
350  *
351  * @core_clk:                   controller clock
352  * @aon_clk:                    another controller clock
353  *
354  * @regs:                       a contiguous chunk of memory for DMA register
355  *                              writes. contains the register values to be
356  *                              written to controller
357  *
358  * @props:                      properties of current NAND controller,
359  *                              initialized via DT match data
360  *
361  * @controller:                 base controller structure
362  * @host_list:                  list containing all the chips attached to the
363  *                              controller
364  *
365  * @chan:                       dma channel
366  * @cmd_crci:                   ADM DMA CRCI for command flow control
367  * @data_crci:                  ADM DMA CRCI for data flow control
368  *
369  * @desc_list:                  DMA descriptor list (list of desc_infos)
370  *
371  * @data_buffer:                our local DMA buffer for page read/writes,
372  *                              used when we can't use the buffer provided
373  *                              by upper layers directly
374  * @reg_read_buf:               local buffer for reading back registers via DMA
375  *
376  * @base_phys:                  physical base address of controller registers
377  * @base_dma:                   dma base address of controller registers
378  * @reg_read_dma:               contains dma address for register read buffer
379  *
380  * @buf_size/count/start:       markers for chip->legacy.read_buf/write_buf
381  *                              functions
382  * @max_cwperpage:              maximum QPIC codewords required. calculated
383  *                              from all connected NAND devices pagesize
384  *
385  * @reg_read_pos:               marker for data read in reg_read_buf
386  *
387  * @cmd1/vld:                   some fixed controller register values
388  *
389  * @exec_opwrite:               flag to select correct number of code word
390  *                              while reading status
391  */
392 struct qcom_nand_controller {
393         struct device *dev;
394
395         void __iomem *base;
396
397         struct clk *core_clk;
398         struct clk *aon_clk;
399
400         struct nandc_regs *regs;
401         struct bam_transaction *bam_txn;
402
403         const struct qcom_nandc_props *props;
404
405         struct nand_controller controller;
406         struct list_head host_list;
407
408         union {
409                 /* will be used only by QPIC for BAM DMA */
410                 struct {
411                         struct dma_chan *tx_chan;
412                         struct dma_chan *rx_chan;
413                         struct dma_chan *cmd_chan;
414                 };
415
416                 /* will be used only by EBI2 for ADM DMA */
417                 struct {
418                         struct dma_chan *chan;
419                         unsigned int cmd_crci;
420                         unsigned int data_crci;
421                 };
422         };
423
424         struct list_head desc_list;
425
426         u8              *data_buffer;
427         __le32          *reg_read_buf;
428
429         phys_addr_t base_phys;
430         dma_addr_t base_dma;
431         dma_addr_t reg_read_dma;
432
433         int             buf_size;
434         int             buf_count;
435         int             buf_start;
436         unsigned int    max_cwperpage;
437
438         int reg_read_pos;
439
440         u32 cmd1, vld;
441         bool exec_opwrite;
442 };
443
444 /*
445  * NAND special boot partitions
446  *
447  * @page_offset:                offset of the partition where spare data is not protected
448  *                              by ECC (value in pages)
449  * @page_offset:                size of the partition where spare data is not protected
450  *                              by ECC (value in pages)
451  */
452 struct qcom_nand_boot_partition {
453         u32 page_offset;
454         u32 page_size;
455 };
456
457 /*
458  * Qcom op for each exec_op transfer
459  *
460  * @data_instr:                 data instruction pointer
461  * @data_instr_idx:             data instruction index
462  * @rdy_timeout_ms:             wait ready timeout in ms
463  * @rdy_delay_ns:               Additional delay in ns
464  * @addr1_reg:                  Address1 register value
465  * @addr2_reg:                  Address2 register value
466  * @cmd_reg:                    CMD register value
467  * @flag:                       flag for misc instruction
468  */
469 struct qcom_op {
470         const struct nand_op_instr *data_instr;
471         unsigned int data_instr_idx;
472         unsigned int rdy_timeout_ms;
473         unsigned int rdy_delay_ns;
474         u32 addr1_reg;
475         u32 addr2_reg;
476         u32 cmd_reg;
477         u8 flag;
478 };
479
480 /*
481  * NAND chip structure
482  *
483  * @boot_partitions:            array of boot partitions where offset and size of the
484  *                              boot partitions are stored
485  *
486  * @chip:                       base NAND chip structure
487  * @node:                       list node to add itself to host_list in
488  *                              qcom_nand_controller
489  *
490  * @nr_boot_partitions:         count of the boot partitions where spare data is not
491  *                              protected by ECC
492  *
493  * @cs:                         chip select value for this chip
494  * @cw_size:                    the number of bytes in a single step/codeword
495  *                              of a page, consisting of all data, ecc, spare
496  *                              and reserved bytes
497  * @cw_data:                    the number of bytes within a codeword protected
498  *                              by ECC
499  * @ecc_bytes_hw:               ECC bytes used by controller hardware for this
500  *                              chip
501  *
502  * @last_command:               keeps track of last command on this chip. used
503  *                              for reading correct status
504  *
505  * @cfg0, cfg1, cfg0_raw..:     NANDc register configurations needed for
506  *                              ecc/non-ecc mode for the current nand flash
507  *                              device
508  *
509  * @status:                     value to be returned if NAND_CMD_STATUS command
510  *                              is executed
511  * @codeword_fixup:             keep track of the current layout used by
512  *                              the driver for read/write operation.
513  * @use_ecc:                    request the controller to use ECC for the
514  *                              upcoming read/write
515  * @bch_enabled:                flag to tell whether BCH ECC mode is used
516  */
517 struct qcom_nand_host {
518         struct qcom_nand_boot_partition *boot_partitions;
519
520         struct nand_chip chip;
521         struct list_head node;
522
523         int nr_boot_partitions;
524
525         int cs;
526         int cw_size;
527         int cw_data;
528         int ecc_bytes_hw;
529         int spare_bytes;
530         int bbm_size;
531
532         int last_command;
533
534         u32 cfg0, cfg1;
535         u32 cfg0_raw, cfg1_raw;
536         u32 ecc_buf_cfg;
537         u32 ecc_bch_cfg;
538         u32 clrflashstatus;
539         u32 clrreadstatus;
540
541         u8 status;
542         bool codeword_fixup;
543         bool use_ecc;
544         bool bch_enabled;
545 };
546
547 /*
548  * This data type corresponds to the NAND controller properties which varies
549  * among different NAND controllers.
550  * @ecc_modes - ecc mode for NAND
551  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
552  * @is_bam - whether NAND controller is using BAM
553  * @is_qpic - whether NAND CTRL is part of qpic IP
554  * @qpic_v2 - flag to indicate QPIC IP version 2
555  * @use_codeword_fixup - whether NAND has different layout for boot partitions
556  */
557 struct qcom_nandc_props {
558         u32 ecc_modes;
559         u32 dev_cmd_reg_start;
560         bool is_bam;
561         bool is_qpic;
562         bool qpic_v2;
563         bool use_codeword_fixup;
564 };
565
566 /* Frees the BAM transaction memory */
567 static void free_bam_transaction(struct qcom_nand_controller *nandc)
568 {
569         struct bam_transaction *bam_txn = nandc->bam_txn;
570
571         devm_kfree(nandc->dev, bam_txn);
572 }
573
574 /* Allocates and Initializes the BAM transaction */
575 static struct bam_transaction *
576 alloc_bam_transaction(struct qcom_nand_controller *nandc)
577 {
578         struct bam_transaction *bam_txn;
579         size_t bam_txn_size;
580         unsigned int num_cw = nandc->max_cwperpage;
581         void *bam_txn_buf;
582
583         bam_txn_size =
584                 sizeof(*bam_txn) + num_cw *
585                 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
586                 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
587                 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
588
589         bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
590         if (!bam_txn_buf)
591                 return NULL;
592
593         bam_txn = bam_txn_buf;
594         bam_txn_buf += sizeof(*bam_txn);
595
596         bam_txn->bam_ce = bam_txn_buf;
597         bam_txn_buf +=
598                 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
599
600         bam_txn->cmd_sgl = bam_txn_buf;
601         bam_txn_buf +=
602                 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
603
604         bam_txn->data_sgl = bam_txn_buf;
605
606         init_completion(&bam_txn->txn_done);
607
608         return bam_txn;
609 }
610
611 /* Clears the BAM transaction indexes */
612 static void clear_bam_transaction(struct qcom_nand_controller *nandc)
613 {
614         struct bam_transaction *bam_txn = nandc->bam_txn;
615
616         if (!nandc->props->is_bam)
617                 return;
618
619         bam_txn->bam_ce_pos = 0;
620         bam_txn->bam_ce_start = 0;
621         bam_txn->cmd_sgl_pos = 0;
622         bam_txn->cmd_sgl_start = 0;
623         bam_txn->tx_sgl_pos = 0;
624         bam_txn->tx_sgl_start = 0;
625         bam_txn->rx_sgl_pos = 0;
626         bam_txn->rx_sgl_start = 0;
627         bam_txn->last_data_desc = NULL;
628         bam_txn->wait_second_completion = false;
629
630         sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
631                       QPIC_PER_CW_CMD_SGL);
632         sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
633                       QPIC_PER_CW_DATA_SGL);
634
635         reinit_completion(&bam_txn->txn_done);
636 }
637
638 /* Callback for DMA descriptor completion */
639 static void qpic_bam_dma_done(void *data)
640 {
641         struct bam_transaction *bam_txn = data;
642
643         /*
644          * In case of data transfer with NAND, 2 callbacks will be generated.
645          * One for command channel and another one for data channel.
646          * If current transaction has data descriptors
647          * (i.e. wait_second_completion is true), then set this to false
648          * and wait for second DMA descriptor completion.
649          */
650         if (bam_txn->wait_second_completion)
651                 bam_txn->wait_second_completion = false;
652         else
653                 complete(&bam_txn->txn_done);
654 }
655
656 static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
657 {
658         return container_of(chip, struct qcom_nand_host, chip);
659 }
660
661 static inline struct qcom_nand_controller *
662 get_qcom_nand_controller(struct nand_chip *chip)
663 {
664         return container_of(chip->controller, struct qcom_nand_controller,
665                             controller);
666 }
667
668 static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
669 {
670         return ioread32(nandc->base + offset);
671 }
672
673 static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
674                                u32 val)
675 {
676         iowrite32(val, nandc->base + offset);
677 }
678
679 static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
680                                           bool is_cpu)
681 {
682         if (!nandc->props->is_bam)
683                 return;
684
685         if (is_cpu)
686                 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
687                                         MAX_REG_RD *
688                                         sizeof(*nandc->reg_read_buf),
689                                         DMA_FROM_DEVICE);
690         else
691                 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
692                                            MAX_REG_RD *
693                                            sizeof(*nandc->reg_read_buf),
694                                            DMA_FROM_DEVICE);
695 }
696
697 static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
698 {
699         switch (offset) {
700         case NAND_FLASH_CMD:
701                 return &regs->cmd;
702         case NAND_ADDR0:
703                 return &regs->addr0;
704         case NAND_ADDR1:
705                 return &regs->addr1;
706         case NAND_FLASH_CHIP_SELECT:
707                 return &regs->chip_sel;
708         case NAND_EXEC_CMD:
709                 return &regs->exec;
710         case NAND_FLASH_STATUS:
711                 return &regs->clrflashstatus;
712         case NAND_DEV0_CFG0:
713                 return &regs->cfg0;
714         case NAND_DEV0_CFG1:
715                 return &regs->cfg1;
716         case NAND_DEV0_ECC_CFG:
717                 return &regs->ecc_bch_cfg;
718         case NAND_READ_STATUS:
719                 return &regs->clrreadstatus;
720         case NAND_DEV_CMD1:
721                 return &regs->cmd1;
722         case NAND_DEV_CMD1_RESTORE:
723                 return &regs->orig_cmd1;
724         case NAND_DEV_CMD_VLD:
725                 return &regs->vld;
726         case NAND_DEV_CMD_VLD_RESTORE:
727                 return &regs->orig_vld;
728         case NAND_EBI2_ECC_BUF_CFG:
729                 return &regs->ecc_buf_cfg;
730         case NAND_READ_LOCATION_0:
731                 return &regs->read_location0;
732         case NAND_READ_LOCATION_1:
733                 return &regs->read_location1;
734         case NAND_READ_LOCATION_2:
735                 return &regs->read_location2;
736         case NAND_READ_LOCATION_3:
737                 return &regs->read_location3;
738         case NAND_READ_LOCATION_LAST_CW_0:
739                 return &regs->read_location_last0;
740         case NAND_READ_LOCATION_LAST_CW_1:
741                 return &regs->read_location_last1;
742         case NAND_READ_LOCATION_LAST_CW_2:
743                 return &regs->read_location_last2;
744         case NAND_READ_LOCATION_LAST_CW_3:
745                 return &regs->read_location_last3;
746         default:
747                 return NULL;
748         }
749 }
750
751 static void nandc_set_reg(struct nand_chip *chip, int offset,
752                           u32 val)
753 {
754         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
755         struct nandc_regs *regs = nandc->regs;
756         __le32 *reg;
757
758         reg = offset_to_nandc_reg(regs, offset);
759
760         if (reg)
761                 *reg = cpu_to_le32(val);
762 }
763
764 /* Helper to check the code word, whether it is last cw or not */
765 static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
766 {
767         return cw == (ecc->steps - 1);
768 }
769
770 /* helper to configure location register values */
771 static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
772                                int cw_offset, int read_size, int is_last_read_loc)
773 {
774         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
775         struct nand_ecc_ctrl *ecc = &chip->ecc;
776         int reg_base = NAND_READ_LOCATION_0;
777
778         if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
779                 reg_base = NAND_READ_LOCATION_LAST_CW_0;
780
781         reg_base += reg * 4;
782
783         if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
784                 return nandc_set_read_loc_last(chip, reg_base, cw_offset,
785                                 read_size, is_last_read_loc);
786         else
787                 return nandc_set_read_loc_first(chip, reg_base, cw_offset,
788                                 read_size, is_last_read_loc);
789 }
790
791 /* helper to configure address register values */
792 static void set_address(struct qcom_nand_host *host, u16 column, int page)
793 {
794         struct nand_chip *chip = &host->chip;
795
796         if (chip->options & NAND_BUSWIDTH_16)
797                 column >>= 1;
798
799         nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
800         nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
801 }
802
803 /*
804  * update_rw_regs:      set up read/write register values, these will be
805  *                      written to the NAND controller registers via DMA
806  *
807  * @num_cw:             number of steps for the read/write operation
808  * @read:               read or write operation
809  * @cw  :               which code word
810  */
811 static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
812 {
813         struct nand_chip *chip = &host->chip;
814         u32 cmd, cfg0, cfg1, ecc_bch_cfg;
815         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
816
817         if (read) {
818                 if (host->use_ecc)
819                         cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
820                 else
821                         cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
822         } else {
823                 cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
824         }
825
826         if (host->use_ecc) {
827                 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
828                                 (num_cw - 1) << CW_PER_PAGE;
829
830                 cfg1 = host->cfg1;
831                 ecc_bch_cfg = host->ecc_bch_cfg;
832         } else {
833                 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
834                                 (num_cw - 1) << CW_PER_PAGE;
835
836                 cfg1 = host->cfg1_raw;
837                 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
838         }
839
840         nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
841         nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
842         nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
843         nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
844         if (!nandc->props->qpic_v2)
845                 nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
846         nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
847         nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
848         nandc_set_reg(chip, NAND_EXEC_CMD, 1);
849
850         if (read)
851                 nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
852                                    host->cw_data : host->cw_size, 1);
853 }
854
855 /*
856  * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
857  * for BAM. This descriptor will be added in the NAND DMA descriptor queue
858  * which will be submitted to DMA engine.
859  */
860 static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
861                                   struct dma_chan *chan,
862                                   unsigned long flags)
863 {
864         struct desc_info *desc;
865         struct scatterlist *sgl;
866         unsigned int sgl_cnt;
867         int ret;
868         struct bam_transaction *bam_txn = nandc->bam_txn;
869         enum dma_transfer_direction dir_eng;
870         struct dma_async_tx_descriptor *dma_desc;
871
872         desc = kzalloc(sizeof(*desc), GFP_KERNEL);
873         if (!desc)
874                 return -ENOMEM;
875
876         if (chan == nandc->cmd_chan) {
877                 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
878                 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
879                 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
880                 dir_eng = DMA_MEM_TO_DEV;
881                 desc->dir = DMA_TO_DEVICE;
882         } else if (chan == nandc->tx_chan) {
883                 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
884                 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
885                 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
886                 dir_eng = DMA_MEM_TO_DEV;
887                 desc->dir = DMA_TO_DEVICE;
888         } else {
889                 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
890                 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
891                 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
892                 dir_eng = DMA_DEV_TO_MEM;
893                 desc->dir = DMA_FROM_DEVICE;
894         }
895
896         sg_mark_end(sgl + sgl_cnt - 1);
897         ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
898         if (ret == 0) {
899                 dev_err(nandc->dev, "failure in mapping desc\n");
900                 kfree(desc);
901                 return -ENOMEM;
902         }
903
904         desc->sgl_cnt = sgl_cnt;
905         desc->bam_sgl = sgl;
906
907         dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
908                                            flags);
909
910         if (!dma_desc) {
911                 dev_err(nandc->dev, "failure in prep desc\n");
912                 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
913                 kfree(desc);
914                 return -EINVAL;
915         }
916
917         desc->dma_desc = dma_desc;
918
919         /* update last data/command descriptor */
920         if (chan == nandc->cmd_chan)
921                 bam_txn->last_cmd_desc = dma_desc;
922         else
923                 bam_txn->last_data_desc = dma_desc;
924
925         list_add_tail(&desc->node, &nandc->desc_list);
926
927         return 0;
928 }
929
930 /*
931  * Prepares the command descriptor for BAM DMA which will be used for NAND
932  * register reads and writes. The command descriptor requires the command
933  * to be formed in command element type so this function uses the command
934  * element from bam transaction ce array and fills the same with required
935  * data. A single SGL can contain multiple command elements so
936  * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
937  * after the current command element.
938  */
939 static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
940                                  int reg_off, const void *vaddr,
941                                  int size, unsigned int flags)
942 {
943         int bam_ce_size;
944         int i, ret;
945         struct bam_cmd_element *bam_ce_buffer;
946         struct bam_transaction *bam_txn = nandc->bam_txn;
947
948         bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
949
950         /* fill the command desc */
951         for (i = 0; i < size; i++) {
952                 if (read)
953                         bam_prep_ce(&bam_ce_buffer[i],
954                                     nandc_reg_phys(nandc, reg_off + 4 * i),
955                                     BAM_READ_COMMAND,
956                                     reg_buf_dma_addr(nandc,
957                                                      (__le32 *)vaddr + i));
958                 else
959                         bam_prep_ce_le32(&bam_ce_buffer[i],
960                                          nandc_reg_phys(nandc, reg_off + 4 * i),
961                                          BAM_WRITE_COMMAND,
962                                          *((__le32 *)vaddr + i));
963         }
964
965         bam_txn->bam_ce_pos += size;
966
967         /* use the separate sgl after this command */
968         if (flags & NAND_BAM_NEXT_SGL) {
969                 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
970                 bam_ce_size = (bam_txn->bam_ce_pos -
971                                 bam_txn->bam_ce_start) *
972                                 sizeof(struct bam_cmd_element);
973                 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
974                            bam_ce_buffer, bam_ce_size);
975                 bam_txn->cmd_sgl_pos++;
976                 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
977
978                 if (flags & NAND_BAM_NWD) {
979                         ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
980                                                      DMA_PREP_FENCE |
981                                                      DMA_PREP_CMD);
982                         if (ret)
983                                 return ret;
984                 }
985         }
986
987         return 0;
988 }
989
990 /*
991  * Prepares the data descriptor for BAM DMA which will be used for NAND
992  * data reads and writes.
993  */
994 static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
995                                   const void *vaddr,
996                                   int size, unsigned int flags)
997 {
998         int ret;
999         struct bam_transaction *bam_txn = nandc->bam_txn;
1000
1001         if (read) {
1002                 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
1003                            vaddr, size);
1004                 bam_txn->rx_sgl_pos++;
1005         } else {
1006                 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
1007                            vaddr, size);
1008                 bam_txn->tx_sgl_pos++;
1009
1010                 /*
1011                  * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
1012                  * is not set, form the DMA descriptor
1013                  */
1014                 if (!(flags & NAND_BAM_NO_EOT)) {
1015                         ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
1016                                                      DMA_PREP_INTERRUPT);
1017                         if (ret)
1018                                 return ret;
1019                 }
1020         }
1021
1022         return 0;
1023 }
1024
1025 static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
1026                              int reg_off, const void *vaddr, int size,
1027                              bool flow_control)
1028 {
1029         struct desc_info *desc;
1030         struct dma_async_tx_descriptor *dma_desc;
1031         struct scatterlist *sgl;
1032         struct dma_slave_config slave_conf;
1033         struct qcom_adm_peripheral_config periph_conf = {};
1034         enum dma_transfer_direction dir_eng;
1035         int ret;
1036
1037         desc = kzalloc(sizeof(*desc), GFP_KERNEL);
1038         if (!desc)
1039                 return -ENOMEM;
1040
1041         sgl = &desc->adm_sgl;
1042
1043         sg_init_one(sgl, vaddr, size);
1044
1045         if (read) {
1046                 dir_eng = DMA_DEV_TO_MEM;
1047                 desc->dir = DMA_FROM_DEVICE;
1048         } else {
1049                 dir_eng = DMA_MEM_TO_DEV;
1050                 desc->dir = DMA_TO_DEVICE;
1051         }
1052
1053         ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
1054         if (ret == 0) {
1055                 ret = -ENOMEM;
1056                 goto err;
1057         }
1058
1059         memset(&slave_conf, 0x00, sizeof(slave_conf));
1060
1061         slave_conf.device_fc = flow_control;
1062         if (read) {
1063                 slave_conf.src_maxburst = 16;
1064                 slave_conf.src_addr = nandc->base_dma + reg_off;
1065                 if (nandc->data_crci) {
1066                         periph_conf.crci = nandc->data_crci;
1067                         slave_conf.peripheral_config = &periph_conf;
1068                         slave_conf.peripheral_size = sizeof(periph_conf);
1069                 }
1070         } else {
1071                 slave_conf.dst_maxburst = 16;
1072                 slave_conf.dst_addr = nandc->base_dma + reg_off;
1073                 if (nandc->cmd_crci) {
1074                         periph_conf.crci = nandc->cmd_crci;
1075                         slave_conf.peripheral_config = &periph_conf;
1076                         slave_conf.peripheral_size = sizeof(periph_conf);
1077                 }
1078         }
1079
1080         ret = dmaengine_slave_config(nandc->chan, &slave_conf);
1081         if (ret) {
1082                 dev_err(nandc->dev, "failed to configure dma channel\n");
1083                 goto err;
1084         }
1085
1086         dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
1087         if (!dma_desc) {
1088                 dev_err(nandc->dev, "failed to prepare desc\n");
1089                 ret = -EINVAL;
1090                 goto err;
1091         }
1092
1093         desc->dma_desc = dma_desc;
1094
1095         list_add_tail(&desc->node, &nandc->desc_list);
1096
1097         return 0;
1098 err:
1099         kfree(desc);
1100
1101         return ret;
1102 }
1103
1104 /*
1105  * read_reg_dma:        prepares a descriptor to read a given number of
1106  *                      contiguous registers to the reg_read_buf pointer
1107  *
1108  * @first:              offset of the first register in the contiguous block
1109  * @num_regs:           number of registers to read
1110  * @flags:              flags to control DMA descriptor preparation
1111  */
1112 static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
1113                         int num_regs, unsigned int flags)
1114 {
1115         bool flow_control = false;
1116         void *vaddr;
1117
1118         vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
1119         nandc->reg_read_pos += num_regs;
1120
1121         if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
1122                 first = dev_cmd_reg_addr(nandc, first);
1123
1124         if (nandc->props->is_bam)
1125                 return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
1126                                              num_regs, flags);
1127
1128         if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
1129                 flow_control = true;
1130
1131         return prep_adm_dma_desc(nandc, true, first, vaddr,
1132                                  num_regs * sizeof(u32), flow_control);
1133 }
1134
1135 /*
1136  * write_reg_dma:       prepares a descriptor to write a given number of
1137  *                      contiguous registers
1138  *
1139  * @first:              offset of the first register in the contiguous block
1140  * @num_regs:           number of registers to write
1141  * @flags:              flags to control DMA descriptor preparation
1142  */
1143 static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
1144                          int num_regs, unsigned int flags)
1145 {
1146         bool flow_control = false;
1147         struct nandc_regs *regs = nandc->regs;
1148         void *vaddr;
1149
1150         vaddr = offset_to_nandc_reg(regs, first);
1151
1152         if (first == NAND_ERASED_CW_DETECT_CFG) {
1153                 if (flags & NAND_ERASED_CW_SET)
1154                         vaddr = &regs->erased_cw_detect_cfg_set;
1155                 else
1156                         vaddr = &regs->erased_cw_detect_cfg_clr;
1157         }
1158
1159         if (first == NAND_EXEC_CMD)
1160                 flags |= NAND_BAM_NWD;
1161
1162         if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1163                 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1164
1165         if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1166                 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1167
1168         if (nandc->props->is_bam)
1169                 return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
1170                                              num_regs, flags);
1171
1172         if (first == NAND_FLASH_CMD)
1173                 flow_control = true;
1174
1175         return prep_adm_dma_desc(nandc, false, first, vaddr,
1176                                  num_regs * sizeof(u32), flow_control);
1177 }
1178
1179 /*
1180  * read_data_dma:       prepares a DMA descriptor to transfer data from the
1181  *                      controller's internal buffer to the buffer 'vaddr'
1182  *
1183  * @reg_off:            offset within the controller's data buffer
1184  * @vaddr:              virtual address of the buffer we want to write to
1185  * @size:               DMA transaction size in bytes
1186  * @flags:              flags to control DMA descriptor preparation
1187  */
1188 static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1189                          const u8 *vaddr, int size, unsigned int flags)
1190 {
1191         if (nandc->props->is_bam)
1192                 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1193
1194         return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1195 }
1196
1197 /*
1198  * write_data_dma:      prepares a DMA descriptor to transfer data from
1199  *                      'vaddr' to the controller's internal buffer
1200  *
1201  * @reg_off:            offset within the controller's data buffer
1202  * @vaddr:              virtual address of the buffer we want to read from
1203  * @size:               DMA transaction size in bytes
1204  * @flags:              flags to control DMA descriptor preparation
1205  */
1206 static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1207                           const u8 *vaddr, int size, unsigned int flags)
1208 {
1209         if (nandc->props->is_bam)
1210                 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1211
1212         return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1213 }
1214
1215 /*
1216  * Helper to prepare DMA descriptors for configuring registers
1217  * before reading a NAND page.
1218  */
1219 static void config_nand_page_read(struct nand_chip *chip)
1220 {
1221         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1222
1223         write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1224         write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1225         if (!nandc->props->qpic_v2)
1226                 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1227         write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1228         write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1229                       NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1230 }
1231
1232 /*
1233  * Helper to prepare DMA descriptors for configuring registers
1234  * before reading each codeword in NAND page.
1235  */
1236 static void
1237 config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
1238 {
1239         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1240         struct nand_ecc_ctrl *ecc = &chip->ecc;
1241
1242         int reg = NAND_READ_LOCATION_0;
1243
1244         if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
1245                 reg = NAND_READ_LOCATION_LAST_CW_0;
1246
1247         if (nandc->props->is_bam)
1248                 write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
1249
1250         write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1251         write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1252
1253         if (use_ecc) {
1254                 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1255                 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1256                              NAND_BAM_NEXT_SGL);
1257         } else {
1258                 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1259         }
1260 }
1261
1262 /*
1263  * Helper to prepare dma descriptors to configure registers needed for reading a
1264  * single codeword in page
1265  */
1266 static void
1267 config_nand_single_cw_page_read(struct nand_chip *chip,
1268                                 bool use_ecc, int cw)
1269 {
1270         config_nand_page_read(chip);
1271         config_nand_cw_read(chip, use_ecc, cw);
1272 }
1273
1274 /*
1275  * Helper to prepare DMA descriptors used to configure registers needed for
1276  * before writing a NAND page.
1277  */
1278 static void config_nand_page_write(struct nand_chip *chip)
1279 {
1280         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1281
1282         write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1283         write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1284         if (!nandc->props->qpic_v2)
1285                 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1286                               NAND_BAM_NEXT_SGL);
1287 }
1288
1289 /*
1290  * Helper to prepare DMA descriptors for configuring registers
1291  * before writing each codeword in NAND page.
1292  */
1293 static void config_nand_cw_write(struct nand_chip *chip)
1294 {
1295         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1296
1297         write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1298         write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1299
1300         read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1301
1302         write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1303         write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1304 }
1305
1306 /* helpers to submit/free our list of dma descriptors */
1307 static int submit_descs(struct qcom_nand_controller *nandc)
1308 {
1309         struct desc_info *desc;
1310         dma_cookie_t cookie = 0;
1311         struct bam_transaction *bam_txn = nandc->bam_txn;
1312         int r;
1313
1314         if (nandc->props->is_bam) {
1315                 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1316                         r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1317                         if (r)
1318                                 return r;
1319                 }
1320
1321                 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1322                         r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1323                                                    DMA_PREP_INTERRUPT);
1324                         if (r)
1325                                 return r;
1326                 }
1327
1328                 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1329                         r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1330                                                    DMA_PREP_CMD);
1331                         if (r)
1332                                 return r;
1333                 }
1334         }
1335
1336         list_for_each_entry(desc, &nandc->desc_list, node)
1337                 cookie = dmaengine_submit(desc->dma_desc);
1338
1339         if (nandc->props->is_bam) {
1340                 bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1341                 bam_txn->last_cmd_desc->callback_param = bam_txn;
1342                 if (bam_txn->last_data_desc) {
1343                         bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1344                         bam_txn->last_data_desc->callback_param = bam_txn;
1345                         bam_txn->wait_second_completion = true;
1346                 }
1347
1348                 dma_async_issue_pending(nandc->tx_chan);
1349                 dma_async_issue_pending(nandc->rx_chan);
1350                 dma_async_issue_pending(nandc->cmd_chan);
1351
1352                 if (!wait_for_completion_timeout(&bam_txn->txn_done,
1353                                                  QPIC_NAND_COMPLETION_TIMEOUT))
1354                         return -ETIMEDOUT;
1355         } else {
1356                 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1357                         return -ETIMEDOUT;
1358         }
1359
1360         return 0;
1361 }
1362
1363 static void free_descs(struct qcom_nand_controller *nandc)
1364 {
1365         struct desc_info *desc, *n;
1366
1367         list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1368                 list_del(&desc->node);
1369
1370                 if (nandc->props->is_bam)
1371                         dma_unmap_sg(nandc->dev, desc->bam_sgl,
1372                                      desc->sgl_cnt, desc->dir);
1373                 else
1374                         dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1375                                      desc->dir);
1376
1377                 kfree(desc);
1378         }
1379 }
1380
1381 /* reset the register read buffer for next NAND operation */
1382 static void clear_read_regs(struct qcom_nand_controller *nandc)
1383 {
1384         nandc->reg_read_pos = 0;
1385         nandc_read_buffer_sync(nandc, false);
1386 }
1387
1388 /*
1389  * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1390  * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1391  *
1392  * when using RS ECC, the HW reports the same erros when reading an erased CW,
1393  * but it notifies that it is an erased CW by placing special characters at
1394  * certain offsets in the buffer.
1395  *
1396  * verify if the page is erased or not, and fix up the page for RS ECC by
1397  * replacing the special characters with 0xff.
1398  */
1399 static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1400 {
1401         u8 empty1, empty2;
1402
1403         /*
1404          * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1405          * is erased by looking for 0x54s at offsets 3 and 175 from the
1406          * beginning of each codeword
1407          */
1408
1409         empty1 = data_buf[3];
1410         empty2 = data_buf[175];
1411
1412         /*
1413          * if the erased codework markers, if they exist override them with
1414          * 0xffs
1415          */
1416         if ((empty1 == 0x54 && empty2 == 0xff) ||
1417             (empty1 == 0xff && empty2 == 0x54)) {
1418                 data_buf[3] = 0xff;
1419                 data_buf[175] = 0xff;
1420         }
1421
1422         /*
1423          * check if the entire chunk contains 0xffs or not. if it doesn't, then
1424          * restore the original values at the special offsets
1425          */
1426         if (memchr_inv(data_buf, 0xff, data_len)) {
1427                 data_buf[3] = empty1;
1428                 data_buf[175] = empty2;
1429
1430                 return false;
1431         }
1432
1433         return true;
1434 }
1435
1436 struct read_stats {
1437         __le32 flash;
1438         __le32 buffer;
1439         __le32 erased_cw;
1440 };
1441
1442 /* reads back FLASH_STATUS register set by the controller */
1443 static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
1444 {
1445         struct nand_chip *chip = &host->chip;
1446         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1447         int i;
1448
1449         nandc_read_buffer_sync(nandc, true);
1450
1451         for (i = 0; i < cw_cnt; i++) {
1452                 u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
1453
1454                 if (flash & (FS_OP_ERR | FS_MPU_ERR))
1455                         return -EIO;
1456         }
1457
1458         return 0;
1459 }
1460
1461 /* performs raw read for one codeword */
1462 static int
1463 qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
1464                        u8 *data_buf, u8 *oob_buf, int page, int cw)
1465 {
1466         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1467         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1468         struct nand_ecc_ctrl *ecc = &chip->ecc;
1469         int data_size1, data_size2, oob_size1, oob_size2;
1470         int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1471         int raw_cw = cw;
1472
1473         nand_read_page_op(chip, page, 0, NULL, 0);
1474         host->use_ecc = false;
1475
1476         if (nandc->props->qpic_v2)
1477                 raw_cw = ecc->steps - 1;
1478
1479         clear_bam_transaction(nandc);
1480         set_address(host, host->cw_size * cw, page);
1481         update_rw_regs(host, 1, true, raw_cw);
1482         config_nand_page_read(chip);
1483
1484         data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1485         oob_size1 = host->bbm_size;
1486
1487         if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
1488                 data_size2 = ecc->size - data_size1 -
1489                              ((ecc->steps - 1) * 4);
1490                 oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
1491                             host->spare_bytes;
1492         } else {
1493                 data_size2 = host->cw_data - data_size1;
1494                 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1495         }
1496
1497         if (nandc->props->is_bam) {
1498                 nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
1499                 read_loc += data_size1;
1500
1501                 nandc_set_read_loc(chip, cw, 1, read_loc, oob_size1, 0);
1502                 read_loc += oob_size1;
1503
1504                 nandc_set_read_loc(chip, cw, 2, read_loc, data_size2, 0);
1505                 read_loc += data_size2;
1506
1507                 nandc_set_read_loc(chip, cw, 3, read_loc, oob_size2, 1);
1508         }
1509
1510         config_nand_cw_read(chip, false, raw_cw);
1511
1512         read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1513         reg_off += data_size1;
1514
1515         read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1516         reg_off += oob_size1;
1517
1518         read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
1519         reg_off += data_size2;
1520
1521         read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1522
1523         ret = submit_descs(nandc);
1524         free_descs(nandc);
1525         if (ret) {
1526                 dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
1527                 return ret;
1528         }
1529
1530         return check_flash_errors(host, 1);
1531 }
1532
1533 /*
1534  * Bitflips can happen in erased codewords also so this function counts the
1535  * number of 0 in each CW for which ECC engine returns the uncorrectable
1536  * error. The page will be assumed as erased if this count is less than or
1537  * equal to the ecc->strength for each CW.
1538  *
1539  * 1. Both DATA and OOB need to be checked for number of 0. The
1540  *    top-level API can be called with only data buf or OOB buf so use
1541  *    chip->data_buf if data buf is null and chip->oob_poi if oob buf
1542  *    is null for copying the raw bytes.
1543  * 2. Perform raw read for all the CW which has uncorrectable errors.
1544  * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
1545  *    The BBM and spare bytes bit flip won’t affect the ECC so don’t check
1546  *    the number of bitflips in this area.
1547  */
1548 static int
1549 check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
1550                       u8 *oob_buf, unsigned long uncorrectable_cws,
1551                       int page, unsigned int max_bitflips)
1552 {
1553         struct nand_chip *chip = &host->chip;
1554         struct mtd_info *mtd = nand_to_mtd(chip);
1555         struct nand_ecc_ctrl *ecc = &chip->ecc;
1556         u8 *cw_data_buf, *cw_oob_buf;
1557         int cw, data_size, oob_size, ret = 0;
1558
1559         if (!data_buf)
1560                 data_buf = nand_get_data_buf(chip);
1561
1562         if (!oob_buf) {
1563                 nand_get_data_buf(chip);
1564                 oob_buf = chip->oob_poi;
1565         }
1566
1567         for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
1568                 if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
1569                         data_size = ecc->size - ((ecc->steps - 1) * 4);
1570                         oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
1571                 } else {
1572                         data_size = host->cw_data;
1573                         oob_size = host->ecc_bytes_hw;
1574                 }
1575
1576                 /* determine starting buffer address for current CW */
1577                 cw_data_buf = data_buf + (cw * host->cw_data);
1578                 cw_oob_buf = oob_buf + (cw * ecc->bytes);
1579
1580                 ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
1581                                              cw_oob_buf, page, cw);
1582                 if (ret)
1583                         return ret;
1584
1585                 /*
1586                  * make sure it isn't an erased page reported
1587                  * as not-erased by HW because of a few bitflips
1588                  */
1589                 ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
1590                                                   cw_oob_buf + host->bbm_size,
1591                                                   oob_size, NULL,
1592                                                   0, ecc->strength);
1593                 if (ret < 0) {
1594                         mtd->ecc_stats.failed++;
1595                 } else {
1596                         mtd->ecc_stats.corrected += ret;
1597                         max_bitflips = max_t(unsigned int, max_bitflips, ret);
1598                 }
1599         }
1600
1601         return max_bitflips;
1602 }
1603
1604 /*
1605  * reads back status registers set by the controller to notify page read
1606  * errors. this is equivalent to what 'ecc->correct()' would do.
1607  */
1608 static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1609                              u8 *oob_buf, int page)
1610 {
1611         struct nand_chip *chip = &host->chip;
1612         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1613         struct mtd_info *mtd = nand_to_mtd(chip);
1614         struct nand_ecc_ctrl *ecc = &chip->ecc;
1615         unsigned int max_bitflips = 0, uncorrectable_cws = 0;
1616         struct read_stats *buf;
1617         bool flash_op_err = false, erased;
1618         int i;
1619         u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1620
1621         buf = (struct read_stats *)nandc->reg_read_buf;
1622         nandc_read_buffer_sync(nandc, true);
1623
1624         for (i = 0; i < ecc->steps; i++, buf++) {
1625                 u32 flash, buffer, erased_cw;
1626                 int data_len, oob_len;
1627
1628                 if (qcom_nandc_is_last_cw(ecc, i)) {
1629                         data_len = ecc->size - ((ecc->steps - 1) << 2);
1630                         oob_len = ecc->steps << 2;
1631                 } else {
1632                         data_len = host->cw_data;
1633                         oob_len = 0;
1634                 }
1635
1636                 flash = le32_to_cpu(buf->flash);
1637                 buffer = le32_to_cpu(buf->buffer);
1638                 erased_cw = le32_to_cpu(buf->erased_cw);
1639
1640                 /*
1641                  * Check ECC failure for each codeword. ECC failure can
1642                  * happen in either of the following conditions
1643                  * 1. If number of bitflips are greater than ECC engine
1644                  *    capability.
1645                  * 2. If this codeword contains all 0xff for which erased
1646                  *    codeword detection check will be done.
1647                  */
1648                 if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1649                         /*
1650                          * For BCH ECC, ignore erased codeword errors, if
1651                          * ERASED_CW bits are set.
1652                          */
1653                         if (host->bch_enabled) {
1654                                 erased = (erased_cw & ERASED_CW) == ERASED_CW;
1655                         /*
1656                          * For RS ECC, HW reports the erased CW by placing
1657                          * special characters at certain offsets in the buffer.
1658                          * These special characters will be valid only if
1659                          * complete page is read i.e. data_buf is not NULL.
1660                          */
1661                         } else if (data_buf) {
1662                                 erased = erased_chunk_check_and_fixup(data_buf,
1663                                                                       data_len);
1664                         } else {
1665                                 erased = false;
1666                         }
1667
1668                         if (!erased)
1669                                 uncorrectable_cws |= BIT(i);
1670                 /*
1671                  * Check if MPU or any other operational error (timeout,
1672                  * device failure, etc.) happened for this codeword and
1673                  * make flash_op_err true. If flash_op_err is set, then
1674                  * EIO will be returned for page read.
1675                  */
1676                 } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1677                         flash_op_err = true;
1678                 /*
1679                  * No ECC or operational errors happened. Check the number of
1680                  * bits corrected and update the ecc_stats.corrected.
1681                  */
1682                 } else {
1683                         unsigned int stat;
1684
1685                         stat = buffer & BS_CORRECTABLE_ERR_MSK;
1686                         mtd->ecc_stats.corrected += stat;
1687                         max_bitflips = max(max_bitflips, stat);
1688                 }
1689
1690                 if (data_buf)
1691                         data_buf += data_len;
1692                 if (oob_buf)
1693                         oob_buf += oob_len + ecc->bytes;
1694         }
1695
1696         if (flash_op_err)
1697                 return -EIO;
1698
1699         if (!uncorrectable_cws)
1700                 return max_bitflips;
1701
1702         return check_for_erased_page(host, data_buf_start, oob_buf_start,
1703                                      uncorrectable_cws, page,
1704                                      max_bitflips);
1705 }
1706
1707 /*
1708  * helper to perform the actual page read operation, used by ecc->read_page(),
1709  * ecc->read_oob()
1710  */
1711 static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1712                          u8 *oob_buf, int page)
1713 {
1714         struct nand_chip *chip = &host->chip;
1715         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1716         struct nand_ecc_ctrl *ecc = &chip->ecc;
1717         u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1718         int i, ret;
1719
1720         config_nand_page_read(chip);
1721
1722         /* queue cmd descs for each codeword */
1723         for (i = 0; i < ecc->steps; i++) {
1724                 int data_size, oob_size;
1725
1726                 if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
1727                         data_size = ecc->size - ((ecc->steps - 1) << 2);
1728                         oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1729                                    host->spare_bytes;
1730                 } else {
1731                         data_size = host->cw_data;
1732                         oob_size = host->ecc_bytes_hw + host->spare_bytes;
1733                 }
1734
1735                 if (nandc->props->is_bam) {
1736                         if (data_buf && oob_buf) {
1737                                 nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
1738                                 nandc_set_read_loc(chip, i, 1, data_size,
1739                                                    oob_size, 1);
1740                         } else if (data_buf) {
1741                                 nandc_set_read_loc(chip, i, 0, 0, data_size, 1);
1742                         } else {
1743                                 nandc_set_read_loc(chip, i, 0, data_size,
1744                                                    oob_size, 1);
1745                         }
1746                 }
1747
1748                 config_nand_cw_read(chip, true, i);
1749
1750                 if (data_buf)
1751                         read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1752                                       data_size, 0);
1753
1754                 /*
1755                  * when ecc is enabled, the controller doesn't read the real
1756                  * or dummy bad block markers in each chunk. To maintain a
1757                  * consistent layout across RAW and ECC reads, we just
1758                  * leave the real/dummy BBM offsets empty (i.e, filled with
1759                  * 0xffs)
1760                  */
1761                 if (oob_buf) {
1762                         int j;
1763
1764                         for (j = 0; j < host->bbm_size; j++)
1765                                 *oob_buf++ = 0xff;
1766
1767                         read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1768                                       oob_buf, oob_size, 0);
1769                 }
1770
1771                 if (data_buf)
1772                         data_buf += data_size;
1773                 if (oob_buf)
1774                         oob_buf += oob_size;
1775         }
1776
1777         ret = submit_descs(nandc);
1778         free_descs(nandc);
1779
1780         if (ret) {
1781                 dev_err(nandc->dev, "failure to read page/oob\n");
1782                 return ret;
1783         }
1784
1785         return parse_read_errors(host, data_buf_start, oob_buf_start, page);
1786 }
1787
1788 /*
1789  * a helper that copies the last step/codeword of a page (containing free oob)
1790  * into our local buffer
1791  */
1792 static int copy_last_cw(struct qcom_nand_host *host, int page)
1793 {
1794         struct nand_chip *chip = &host->chip;
1795         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1796         struct nand_ecc_ctrl *ecc = &chip->ecc;
1797         int size;
1798         int ret;
1799
1800         clear_read_regs(nandc);
1801
1802         size = host->use_ecc ? host->cw_data : host->cw_size;
1803
1804         /* prepare a clean read buffer */
1805         memset(nandc->data_buffer, 0xff, size);
1806
1807         set_address(host, host->cw_size * (ecc->steps - 1), page);
1808         update_rw_regs(host, 1, true, ecc->steps - 1);
1809
1810         config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
1811
1812         read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
1813
1814         ret = submit_descs(nandc);
1815         if (ret)
1816                 dev_err(nandc->dev, "failed to copy last codeword\n");
1817
1818         free_descs(nandc);
1819
1820         return ret;
1821 }
1822
1823 static bool qcom_nandc_is_boot_partition(struct qcom_nand_host *host, int page)
1824 {
1825         struct qcom_nand_boot_partition *boot_partition;
1826         u32 start, end;
1827         int i;
1828
1829         /*
1830          * Since the frequent access will be to the non-boot partitions like rootfs,
1831          * optimize the page check by:
1832          *
1833          * 1. Checking if the page lies after the last boot partition.
1834          * 2. Checking from the boot partition end.
1835          */
1836
1837         /* First check the last boot partition */
1838         boot_partition = &host->boot_partitions[host->nr_boot_partitions - 1];
1839         start = boot_partition->page_offset;
1840         end = start + boot_partition->page_size;
1841
1842         /* Page is after the last boot partition end. This is NOT a boot partition */
1843         if (page > end)
1844                 return false;
1845
1846         /* Actually check if it's a boot partition */
1847         if (page < end && page >= start)
1848                 return true;
1849
1850         /* Check the other boot partitions starting from the second-last partition */
1851         for (i = host->nr_boot_partitions - 2; i >= 0; i--) {
1852                 boot_partition = &host->boot_partitions[i];
1853                 start = boot_partition->page_offset;
1854                 end = start + boot_partition->page_size;
1855
1856                 if (page < end && page >= start)
1857                         return true;
1858         }
1859
1860         return false;
1861 }
1862
1863 static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
1864 {
1865         bool codeword_fixup = qcom_nandc_is_boot_partition(host, page);
1866
1867         /* Skip conf write if we are already in the correct mode */
1868         if (codeword_fixup == host->codeword_fixup)
1869                 return;
1870
1871         host->codeword_fixup = codeword_fixup;
1872
1873         host->cw_data = codeword_fixup ? 512 : 516;
1874         host->spare_bytes = host->cw_size - host->ecc_bytes_hw -
1875                             host->bbm_size - host->cw_data;
1876
1877         host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
1878         host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
1879                       host->cw_data << UD_SIZE_BYTES;
1880
1881         host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
1882         host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
1883         host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
1884 }
1885
1886 /* implements ecc->read_page() */
1887 static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
1888                                 int oob_required, int page)
1889 {
1890         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1891         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1892         struct nand_ecc_ctrl *ecc = &chip->ecc;
1893         u8 *data_buf, *oob_buf = NULL;
1894
1895         if (host->nr_boot_partitions)
1896                 qcom_nandc_codeword_fixup(host, page);
1897
1898         nand_read_page_op(chip, page, 0, NULL, 0);
1899         nandc->buf_count = 0;
1900         nandc->buf_start = 0;
1901         host->use_ecc = true;
1902         clear_read_regs(nandc);
1903         set_address(host, 0, page);
1904         update_rw_regs(host, ecc->steps, true, 0);
1905
1906         data_buf = buf;
1907         oob_buf = oob_required ? chip->oob_poi : NULL;
1908
1909         clear_bam_transaction(nandc);
1910
1911         return read_page_ecc(host, data_buf, oob_buf, page);
1912 }
1913
1914 /* implements ecc->read_page_raw() */
1915 static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1916                                     int oob_required, int page)
1917 {
1918         struct mtd_info *mtd = nand_to_mtd(chip);
1919         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1920         struct nand_ecc_ctrl *ecc = &chip->ecc;
1921         int cw, ret;
1922         u8 *data_buf = buf, *oob_buf = chip->oob_poi;
1923
1924         if (host->nr_boot_partitions)
1925                 qcom_nandc_codeword_fixup(host, page);
1926
1927         for (cw = 0; cw < ecc->steps; cw++) {
1928                 ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
1929                                              page, cw);
1930                 if (ret)
1931                         return ret;
1932
1933                 data_buf += host->cw_data;
1934                 oob_buf += ecc->bytes;
1935         }
1936
1937         return 0;
1938 }
1939
1940 /* implements ecc->read_oob() */
1941 static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
1942 {
1943         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1944         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1945         struct nand_ecc_ctrl *ecc = &chip->ecc;
1946
1947         if (host->nr_boot_partitions)
1948                 qcom_nandc_codeword_fixup(host, page);
1949
1950         clear_read_regs(nandc);
1951         clear_bam_transaction(nandc);
1952
1953         host->use_ecc = true;
1954         set_address(host, 0, page);
1955         update_rw_regs(host, ecc->steps, true, 0);
1956
1957         return read_page_ecc(host, NULL, chip->oob_poi, page);
1958 }
1959
1960 /* implements ecc->write_page() */
1961 static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
1962                                  int oob_required, int page)
1963 {
1964         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1965         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1966         struct nand_ecc_ctrl *ecc = &chip->ecc;
1967         u8 *data_buf, *oob_buf;
1968         int i, ret;
1969
1970         if (host->nr_boot_partitions)
1971                 qcom_nandc_codeword_fixup(host, page);
1972
1973         nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1974
1975         set_address(host, 0, page);
1976         nandc->buf_count = 0;
1977         nandc->buf_start = 0;
1978         clear_read_regs(nandc);
1979         clear_bam_transaction(nandc);
1980
1981         data_buf = (u8 *)buf;
1982         oob_buf = chip->oob_poi;
1983
1984         host->use_ecc = true;
1985         update_rw_regs(host, ecc->steps, false, 0);
1986         config_nand_page_write(chip);
1987
1988         for (i = 0; i < ecc->steps; i++) {
1989                 int data_size, oob_size;
1990
1991                 if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
1992                         data_size = ecc->size - ((ecc->steps - 1) << 2);
1993                         oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1994                                    host->spare_bytes;
1995                 } else {
1996                         data_size = host->cw_data;
1997                         oob_size = ecc->bytes;
1998                 }
1999
2000
2001                 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
2002                                i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
2003
2004                 /*
2005                  * when ECC is enabled, we don't really need to write anything
2006                  * to oob for the first n - 1 codewords since these oob regions
2007                  * just contain ECC bytes that's written by the controller
2008                  * itself. For the last codeword, we skip the bbm positions and
2009                  * write to the free oob area.
2010                  */
2011                 if (qcom_nandc_is_last_cw(ecc, i)) {
2012                         oob_buf += host->bbm_size;
2013
2014                         write_data_dma(nandc, FLASH_BUF_ACC + data_size,
2015                                        oob_buf, oob_size, 0);
2016                 }
2017
2018                 config_nand_cw_write(chip);
2019
2020                 data_buf += data_size;
2021                 oob_buf += oob_size;
2022         }
2023
2024         ret = submit_descs(nandc);
2025         if (ret)
2026                 dev_err(nandc->dev, "failure to write page\n");
2027
2028         free_descs(nandc);
2029
2030         if (!ret)
2031                 ret = nand_prog_page_end_op(chip);
2032
2033         return ret;
2034 }
2035
2036 /* implements ecc->write_page_raw() */
2037 static int qcom_nandc_write_page_raw(struct nand_chip *chip,
2038                                      const uint8_t *buf, int oob_required,
2039                                      int page)
2040 {
2041         struct mtd_info *mtd = nand_to_mtd(chip);
2042         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2043         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2044         struct nand_ecc_ctrl *ecc = &chip->ecc;
2045         u8 *data_buf, *oob_buf;
2046         int i, ret;
2047
2048         if (host->nr_boot_partitions)
2049                 qcom_nandc_codeword_fixup(host, page);
2050
2051         nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2052         clear_read_regs(nandc);
2053         clear_bam_transaction(nandc);
2054
2055         data_buf = (u8 *)buf;
2056         oob_buf = chip->oob_poi;
2057
2058         host->use_ecc = false;
2059         update_rw_regs(host, ecc->steps, false, 0);
2060         config_nand_page_write(chip);
2061
2062         for (i = 0; i < ecc->steps; i++) {
2063                 int data_size1, data_size2, oob_size1, oob_size2;
2064                 int reg_off = FLASH_BUF_ACC;
2065
2066                 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2067                 oob_size1 = host->bbm_size;
2068
2069                 if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
2070                         data_size2 = ecc->size - data_size1 -
2071                                      ((ecc->steps - 1) << 2);
2072                         oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2073                                     host->spare_bytes;
2074                 } else {
2075                         data_size2 = host->cw_data - data_size1;
2076                         oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2077                 }
2078
2079                 write_data_dma(nandc, reg_off, data_buf, data_size1,
2080                                NAND_BAM_NO_EOT);
2081                 reg_off += data_size1;
2082                 data_buf += data_size1;
2083
2084                 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
2085                                NAND_BAM_NO_EOT);
2086                 reg_off += oob_size1;
2087                 oob_buf += oob_size1;
2088
2089                 write_data_dma(nandc, reg_off, data_buf, data_size2,
2090                                NAND_BAM_NO_EOT);
2091                 reg_off += data_size2;
2092                 data_buf += data_size2;
2093
2094                 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2095                 oob_buf += oob_size2;
2096
2097                 config_nand_cw_write(chip);
2098         }
2099
2100         ret = submit_descs(nandc);
2101         if (ret)
2102                 dev_err(nandc->dev, "failure to write raw page\n");
2103
2104         free_descs(nandc);
2105
2106         if (!ret)
2107                 ret = nand_prog_page_end_op(chip);
2108
2109         return ret;
2110 }
2111
2112 /*
2113  * implements ecc->write_oob()
2114  *
2115  * the NAND controller cannot write only data or only OOB within a codeword
2116  * since ECC is calculated for the combined codeword. So update the OOB from
2117  * chip->oob_poi, and pad the data area with OxFF before writing.
2118  */
2119 static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
2120 {
2121         struct mtd_info *mtd = nand_to_mtd(chip);
2122         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2123         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2124         struct nand_ecc_ctrl *ecc = &chip->ecc;
2125         u8 *oob = chip->oob_poi;
2126         int data_size, oob_size;
2127         int ret;
2128
2129         if (host->nr_boot_partitions)
2130                 qcom_nandc_codeword_fixup(host, page);
2131
2132         host->use_ecc = true;
2133         clear_bam_transaction(nandc);
2134
2135         /* calculate the data and oob size for the last codeword/step */
2136         data_size = ecc->size - ((ecc->steps - 1) << 2);
2137         oob_size = mtd->oobavail;
2138
2139         memset(nandc->data_buffer, 0xff, host->cw_data);
2140         /* override new oob content to last codeword */
2141         mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2142                                     0, mtd->oobavail);
2143
2144         set_address(host, host->cw_size * (ecc->steps - 1), page);
2145         update_rw_regs(host, 1, false, 0);
2146
2147         config_nand_page_write(chip);
2148         write_data_dma(nandc, FLASH_BUF_ACC,
2149                        nandc->data_buffer, data_size + oob_size, 0);
2150         config_nand_cw_write(chip);
2151
2152         ret = submit_descs(nandc);
2153
2154         free_descs(nandc);
2155
2156         if (ret) {
2157                 dev_err(nandc->dev, "failure to write oob\n");
2158                 return -EIO;
2159         }
2160
2161         return nand_prog_page_end_op(chip);
2162 }
2163
2164 static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
2165 {
2166         struct mtd_info *mtd = nand_to_mtd(chip);
2167         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2168         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2169         struct nand_ecc_ctrl *ecc = &chip->ecc;
2170         int page, ret, bbpos, bad = 0;
2171
2172         page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2173
2174         /*
2175          * configure registers for a raw sub page read, the address is set to
2176          * the beginning of the last codeword, we don't care about reading ecc
2177          * portion of oob. we just want the first few bytes from this codeword
2178          * that contains the BBM
2179          */
2180         host->use_ecc = false;
2181
2182         clear_bam_transaction(nandc);
2183         ret = copy_last_cw(host, page);
2184         if (ret)
2185                 goto err;
2186
2187         if (check_flash_errors(host, 1)) {
2188                 dev_warn(nandc->dev, "error when trying to read BBM\n");
2189                 goto err;
2190         }
2191
2192         bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2193
2194         bad = nandc->data_buffer[bbpos] != 0xff;
2195
2196         if (chip->options & NAND_BUSWIDTH_16)
2197                 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2198 err:
2199         return bad;
2200 }
2201
2202 static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
2203 {
2204         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2205         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2206         struct nand_ecc_ctrl *ecc = &chip->ecc;
2207         int page, ret;
2208
2209         clear_read_regs(nandc);
2210         clear_bam_transaction(nandc);
2211
2212         /*
2213          * to mark the BBM as bad, we flash the entire last codeword with 0s.
2214          * we don't care about the rest of the content in the codeword since
2215          * we aren't going to use this block again
2216          */
2217         memset(nandc->data_buffer, 0x00, host->cw_size);
2218
2219         page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2220
2221         /* prepare write */
2222         host->use_ecc = false;
2223         set_address(host, host->cw_size * (ecc->steps - 1), page);
2224         update_rw_regs(host, 1, false, ecc->steps - 1);
2225
2226         config_nand_page_write(chip);
2227         write_data_dma(nandc, FLASH_BUF_ACC,
2228                        nandc->data_buffer, host->cw_size, 0);
2229         config_nand_cw_write(chip);
2230
2231         ret = submit_descs(nandc);
2232
2233         free_descs(nandc);
2234
2235         if (ret) {
2236                 dev_err(nandc->dev, "failure to update BBM\n");
2237                 return -EIO;
2238         }
2239
2240         return nand_prog_page_end_op(chip);
2241 }
2242
2243 /*
2244  * NAND controller page layout info
2245  *
2246  * Layout with ECC enabled:
2247  *
2248  * |----------------------|  |---------------------------------|
2249  * |           xx.......yy|  |             *********xx.......yy|
2250  * |    DATA   xx..ECC..yy|  |    DATA     **SPARE**xx..ECC..yy|
2251  * |   (516)   xx.......yy|  |  (516-n*4)  **(n*4)**xx.......yy|
2252  * |           xx.......yy|  |             *********xx.......yy|
2253  * |----------------------|  |---------------------------------|
2254  *     codeword 1,2..n-1                  codeword n
2255  *  <---(528/532 Bytes)-->    <-------(528/532 Bytes)--------->
2256  *
2257  * n = Number of codewords in the page
2258  * . = ECC bytes
2259  * * = Spare/free bytes
2260  * x = Unused byte(s)
2261  * y = Reserved byte(s)
2262  *
2263  * 2K page: n = 4, spare = 16 bytes
2264  * 4K page: n = 8, spare = 32 bytes
2265  * 8K page: n = 16, spare = 64 bytes
2266  *
2267  * the qcom nand controller operates at a sub page/codeword level. each
2268  * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2269  * the number of ECC bytes vary based on the ECC strength and the bus width.
2270  *
2271  * the first n - 1 codewords contains 516 bytes of user data, the remaining
2272  * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2273  * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2274  *
2275  * When we access a page with ECC enabled, the reserved bytes(s) are not
2276  * accessible at all. When reading, we fill up these unreadable positions
2277  * with 0xffs. When writing, the controller skips writing the inaccessible
2278  * bytes.
2279  *
2280  * Layout with ECC disabled:
2281  *
2282  * |------------------------------|  |---------------------------------------|
2283  * |         yy          xx.......|  |         bb          *********xx.......|
2284  * |  DATA1  yy  DATA2   xx..ECC..|  |  DATA1  bb  DATA2   **SPARE**xx..ECC..|
2285  * | (size1) yy (size2)  xx.......|  | (size1) bb (size2)  **(n*4)**xx.......|
2286  * |         yy          xx.......|  |         bb          *********xx.......|
2287  * |------------------------------|  |---------------------------------------|
2288  *         codeword 1,2..n-1                        codeword n
2289  *  <-------(528/532 Bytes)------>    <-----------(528/532 Bytes)----------->
2290  *
2291  * n = Number of codewords in the page
2292  * . = ECC bytes
2293  * * = Spare/free bytes
2294  * x = Unused byte(s)
2295  * y = Dummy Bad Bock byte(s)
2296  * b = Real Bad Block byte(s)
2297  * size1/size2 = function of codeword size and 'n'
2298  *
2299  * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2300  * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2301  * Block Markers. In the last codeword, this position contains the real BBM
2302  *
2303  * In order to have a consistent layout between RAW and ECC modes, we assume
2304  * the following OOB layout arrangement:
2305  *
2306  * |-----------|  |--------------------|
2307  * |yyxx.......|  |bb*********xx.......|
2308  * |yyxx..ECC..|  |bb*FREEOOB*xx..ECC..|
2309  * |yyxx.......|  |bb*********xx.......|
2310  * |yyxx.......|  |bb*********xx.......|
2311  * |-----------|  |--------------------|
2312  *  first n - 1       nth OOB region
2313  *  OOB regions
2314  *
2315  * n = Number of codewords in the page
2316  * . = ECC bytes
2317  * * = FREE OOB bytes
2318  * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2319  * x = Unused byte(s)
2320  * b = Real bad block byte(s) (inaccessible when ECC enabled)
2321  *
2322  * This layout is read as is when ECC is disabled. When ECC is enabled, the
2323  * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2324  * and assumed as 0xffs when we read a page/oob. The ECC, unused and
2325  * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2326  * the sum of the three).
2327  */
2328 static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2329                                    struct mtd_oob_region *oobregion)
2330 {
2331         struct nand_chip *chip = mtd_to_nand(mtd);
2332         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2333         struct nand_ecc_ctrl *ecc = &chip->ecc;
2334
2335         if (section > 1)
2336                 return -ERANGE;
2337
2338         if (!section) {
2339                 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2340                                     host->bbm_size;
2341                 oobregion->offset = 0;
2342         } else {
2343                 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2344                 oobregion->offset = mtd->oobsize - oobregion->length;
2345         }
2346
2347         return 0;
2348 }
2349
2350 static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2351                                      struct mtd_oob_region *oobregion)
2352 {
2353         struct nand_chip *chip = mtd_to_nand(mtd);
2354         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2355         struct nand_ecc_ctrl *ecc = &chip->ecc;
2356
2357         if (section)
2358                 return -ERANGE;
2359
2360         oobregion->length = ecc->steps * 4;
2361         oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2362
2363         return 0;
2364 }
2365
2366 static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2367         .ecc = qcom_nand_ooblayout_ecc,
2368         .free = qcom_nand_ooblayout_free,
2369 };
2370
2371 static int
2372 qcom_nandc_calc_ecc_bytes(int step_size, int strength)
2373 {
2374         return strength == 4 ? 12 : 16;
2375 }
2376 NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
2377                      NANDC_STEP_SIZE, 4, 8);
2378
2379 static int qcom_nand_attach_chip(struct nand_chip *chip)
2380 {
2381         struct mtd_info *mtd = nand_to_mtd(chip);
2382         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2383         struct nand_ecc_ctrl *ecc = &chip->ecc;
2384         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2385         int cwperpage, bad_block_byte, ret;
2386         bool wide_bus;
2387         int ecc_mode = 1;
2388
2389         /* controller only supports 512 bytes data steps */
2390         ecc->size = NANDC_STEP_SIZE;
2391         wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2392         cwperpage = mtd->writesize / NANDC_STEP_SIZE;
2393
2394         /*
2395          * Each CW has 4 available OOB bytes which will be protected with ECC
2396          * so remaining bytes can be used for ECC.
2397          */
2398         ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
2399                                    mtd->oobsize - (cwperpage * 4));
2400         if (ret) {
2401                 dev_err(nandc->dev, "No valid ECC settings possible\n");
2402                 return ret;
2403         }
2404
2405         if (ecc->strength >= 8) {
2406                 /* 8 bit ECC defaults to BCH ECC on all platforms */
2407                 host->bch_enabled = true;
2408                 ecc_mode = 1;
2409
2410                 if (wide_bus) {
2411                         host->ecc_bytes_hw = 14;
2412                         host->spare_bytes = 0;
2413                         host->bbm_size = 2;
2414                 } else {
2415                         host->ecc_bytes_hw = 13;
2416                         host->spare_bytes = 2;
2417                         host->bbm_size = 1;
2418                 }
2419         } else {
2420                 /*
2421                  * if the controller supports BCH for 4 bit ECC, the controller
2422                  * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2423                  * always 10 bytes
2424                  */
2425                 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2426                         /* BCH */
2427                         host->bch_enabled = true;
2428                         ecc_mode = 0;
2429
2430                         if (wide_bus) {
2431                                 host->ecc_bytes_hw = 8;
2432                                 host->spare_bytes = 2;
2433                                 host->bbm_size = 2;
2434                         } else {
2435                                 host->ecc_bytes_hw = 7;
2436                                 host->spare_bytes = 4;
2437                                 host->bbm_size = 1;
2438                         }
2439                 } else {
2440                         /* RS */
2441                         host->ecc_bytes_hw = 10;
2442
2443                         if (wide_bus) {
2444                                 host->spare_bytes = 0;
2445                                 host->bbm_size = 2;
2446                         } else {
2447                                 host->spare_bytes = 1;
2448                                 host->bbm_size = 1;
2449                         }
2450                 }
2451         }
2452
2453         /*
2454          * we consider ecc->bytes as the sum of all the non-data content in a
2455          * step. It gives us a clean representation of the oob area (even if
2456          * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2457          * ECC and 12 bytes for 4 bit ECC
2458          */
2459         ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2460
2461         ecc->read_page          = qcom_nandc_read_page;
2462         ecc->read_page_raw      = qcom_nandc_read_page_raw;
2463         ecc->read_oob           = qcom_nandc_read_oob;
2464         ecc->write_page         = qcom_nandc_write_page;
2465         ecc->write_page_raw     = qcom_nandc_write_page_raw;
2466         ecc->write_oob          = qcom_nandc_write_oob;
2467
2468         ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2469
2470         mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2471         /* Free the initially allocated BAM transaction for reading the ONFI params */
2472         if (nandc->props->is_bam)
2473                 free_bam_transaction(nandc);
2474
2475         nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2476                                      cwperpage);
2477
2478         /* Now allocate the BAM transaction based on updated max_cwperpage */
2479         if (nandc->props->is_bam) {
2480                 nandc->bam_txn = alloc_bam_transaction(nandc);
2481                 if (!nandc->bam_txn) {
2482                         dev_err(nandc->dev,
2483                                 "failed to allocate bam transaction\n");
2484                         return -ENOMEM;
2485                 }
2486         }
2487
2488         /*
2489          * DATA_UD_BYTES varies based on whether the read/write command protects
2490          * spare data with ECC too. We protect spare data by default, so we set
2491          * it to main + spare data, which are 512 and 4 bytes respectively.
2492          */
2493         host->cw_data = 516;
2494
2495         /*
2496          * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2497          * for 8 bit ECC
2498          */
2499         host->cw_size = host->cw_data + ecc->bytes;
2500         bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2501
2502         host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2503                                 | host->cw_data << UD_SIZE_BYTES
2504                                 | 0 << DISABLE_STATUS_AFTER_WRITE
2505                                 | 5 << NUM_ADDR_CYCLES
2506                                 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2507                                 | 0 << STATUS_BFR_READ
2508                                 | 1 << SET_RD_MODE_AFTER_STATUS
2509                                 | host->spare_bytes << SPARE_SIZE_BYTES;
2510
2511         host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2512                                 | 0 <<  CS_ACTIVE_BSY
2513                                 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2514                                 | 0 << BAD_BLOCK_IN_SPARE_AREA
2515                                 | 2 << WR_RD_BSY_GAP
2516                                 | wide_bus << WIDE_FLASH
2517                                 | host->bch_enabled << ENABLE_BCH_ECC;
2518
2519         host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2520                                 | host->cw_size << UD_SIZE_BYTES
2521                                 | 5 << NUM_ADDR_CYCLES
2522                                 | 0 << SPARE_SIZE_BYTES;
2523
2524         host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2525                                 | 0 << CS_ACTIVE_BSY
2526                                 | 17 << BAD_BLOCK_BYTE_NUM
2527                                 | 1 << BAD_BLOCK_IN_SPARE_AREA
2528                                 | 2 << WR_RD_BSY_GAP
2529                                 | wide_bus << WIDE_FLASH
2530                                 | 1 << DEV0_CFG1_ECC_DISABLE;
2531
2532         host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2533                                 | 0 << ECC_SW_RESET
2534                                 | host->cw_data << ECC_NUM_DATA_BYTES
2535                                 | 1 << ECC_FORCE_CLK_OPEN
2536                                 | ecc_mode << ECC_MODE
2537                                 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2538
2539         if (!nandc->props->qpic_v2)
2540                 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2541
2542         host->clrflashstatus = FS_READY_BSY_N;
2543         host->clrreadstatus = 0xc0;
2544         nandc->regs->erased_cw_detect_cfg_clr =
2545                 cpu_to_le32(CLR_ERASED_PAGE_DET);
2546         nandc->regs->erased_cw_detect_cfg_set =
2547                 cpu_to_le32(SET_ERASED_PAGE_DET);
2548
2549         dev_dbg(nandc->dev,
2550                 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2551                 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2552                 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2553                 cwperpage);
2554
2555         return 0;
2556 }
2557
2558 static int qcom_op_cmd_mapping(struct qcom_nand_controller *nandc, u8 cmd,
2559                                struct qcom_op *q_op)
2560 {
2561         int ret;
2562
2563         switch (cmd) {
2564         case NAND_CMD_RESET:
2565                 ret = OP_RESET_DEVICE;
2566                 break;
2567         case NAND_CMD_READID:
2568                 ret = OP_FETCH_ID;
2569                 break;
2570         case NAND_CMD_PARAM:
2571                 if (nandc->props->qpic_v2)
2572                         ret = OP_PAGE_READ_ONFI_READ;
2573                 else
2574                         ret = OP_PAGE_READ;
2575                 break;
2576         case NAND_CMD_ERASE1:
2577         case NAND_CMD_ERASE2:
2578                 ret = OP_BLOCK_ERASE;
2579                 break;
2580         case NAND_CMD_STATUS:
2581                 ret = OP_CHECK_STATUS;
2582                 break;
2583         case NAND_CMD_PAGEPROG:
2584                 ret = OP_PROGRAM_PAGE;
2585                 q_op->flag = OP_PROGRAM_PAGE;
2586                 nandc->exec_opwrite = true;
2587                 break;
2588         }
2589
2590         return ret;
2591 }
2592
2593 /* NAND framework ->exec_op() hooks and related helpers */
2594 static void qcom_parse_instructions(struct nand_chip *chip,
2595                                     const struct nand_subop *subop,
2596                                         struct qcom_op *q_op)
2597 {
2598         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2599         const struct nand_op_instr *instr = NULL;
2600         unsigned int op_id;
2601         int i;
2602
2603         memset(q_op, 0, sizeof(*q_op));
2604
2605         for (op_id = 0; op_id < subop->ninstrs; op_id++) {
2606                 unsigned int offset, naddrs;
2607                 const u8 *addrs;
2608
2609                 instr = &subop->instrs[op_id];
2610
2611                 switch (instr->type) {
2612                 case NAND_OP_CMD_INSTR:
2613                         q_op->cmd_reg = qcom_op_cmd_mapping(nandc, instr->ctx.cmd.opcode, q_op);
2614                         q_op->rdy_delay_ns = instr->delay_ns;
2615                         break;
2616
2617                 case NAND_OP_ADDR_INSTR:
2618                         offset = nand_subop_get_addr_start_off(subop, op_id);
2619                         naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
2620                         addrs = &instr->ctx.addr.addrs[offset];
2621                         for (i = 0; i < MAX_ADDRESS_CYCLE; i++) {
2622                                 if (i < 4)
2623                                         q_op->addr1_reg |= (u32)addrs[i] << i * 8;
2624                                 else
2625                                         q_op->addr2_reg |= addrs[i];
2626                         }
2627                         q_op->rdy_delay_ns = instr->delay_ns;
2628                         break;
2629
2630                 case NAND_OP_DATA_IN_INSTR:
2631                         q_op->data_instr = instr;
2632                         q_op->data_instr_idx = op_id;
2633                         q_op->rdy_delay_ns = instr->delay_ns;
2634                         fallthrough;
2635                 case NAND_OP_DATA_OUT_INSTR:
2636                         q_op->rdy_delay_ns = instr->delay_ns;
2637                         break;
2638
2639                 case NAND_OP_WAITRDY_INSTR:
2640                         q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
2641                         q_op->rdy_delay_ns = instr->delay_ns;
2642                         break;
2643                 }
2644         }
2645 }
2646
2647 static void qcom_delay_ns(unsigned int ns)
2648 {
2649         if (!ns)
2650                 return;
2651
2652         if (ns < 10000)
2653                 ndelay(ns);
2654         else
2655                 udelay(DIV_ROUND_UP(ns, 1000));
2656 }
2657
2658 static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
2659 {
2660         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2661         unsigned long start = jiffies + msecs_to_jiffies(time_ms);
2662         u32 flash;
2663
2664         nandc_read_buffer_sync(nandc, true);
2665
2666         do {
2667                 flash = le32_to_cpu(nandc->reg_read_buf[0]);
2668                 if (flash & FS_READY_BSY_N)
2669                         return 0;
2670                 cpu_relax();
2671         } while (time_after(start, jiffies));
2672
2673         dev_err(nandc->dev, "Timeout waiting for device to be ready:0x%08x\n", flash);
2674
2675         return -ETIMEDOUT;
2676 }
2677
2678 static int qcom_read_status_exec(struct nand_chip *chip,
2679                                  const struct nand_subop *subop)
2680 {
2681         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2682         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2683         struct nand_ecc_ctrl *ecc = &chip->ecc;
2684         struct qcom_op q_op;
2685         const struct nand_op_instr *instr = NULL;
2686         unsigned int op_id = 0;
2687         unsigned int len = 0;
2688         int ret = 0, num_cw, i;
2689         u32 flash_status;
2690
2691         host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2692
2693         qcom_parse_instructions(chip, subop, &q_op);
2694
2695         num_cw = nandc->exec_opwrite ? ecc->steps : 1;
2696         nandc->exec_opwrite = false;
2697
2698         nandc->buf_count = 0;
2699         nandc->buf_start = 0;
2700         host->use_ecc = false;
2701
2702         clear_read_regs(nandc);
2703         clear_bam_transaction(nandc);
2704
2705         nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
2706         nandc_set_reg(chip, NAND_EXEC_CMD, 1);
2707
2708         write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
2709         write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
2710         read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
2711
2712         ret = submit_descs(nandc);
2713         if (ret) {
2714                 dev_err(nandc->dev, "failure in submitting status descriptor\n");
2715                 free_descs(nandc);
2716                 goto err_out;
2717         }
2718         free_descs(nandc);
2719
2720         nandc_read_buffer_sync(nandc, true);
2721
2722         for (i = 0; i < num_cw; i++) {
2723                 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
2724
2725         if (flash_status & FS_MPU_ERR)
2726                 host->status &= ~NAND_STATUS_WP;
2727
2728         if (flash_status & FS_OP_ERR ||
2729          (i == (num_cw - 1) && (flash_status & FS_DEVICE_STS_ERR)))
2730                 host->status |= NAND_STATUS_FAIL;
2731         }
2732
2733         flash_status = host->status;
2734         instr = q_op.data_instr;
2735         op_id = q_op.data_instr_idx;
2736         len = nand_subop_get_data_len(subop, op_id);
2737         memcpy(instr->ctx.data.buf.in, &flash_status, len);
2738
2739 err_out:
2740         return ret;
2741 }
2742
2743 static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
2744 {
2745         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2746         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2747         struct qcom_op q_op;
2748         const struct nand_op_instr *instr = NULL;
2749         unsigned int op_id = 0;
2750         unsigned int len = 0;
2751         int ret = 0;
2752
2753         qcom_parse_instructions(chip, subop, &q_op);
2754
2755         nandc->buf_count = 0;
2756         nandc->buf_start = 0;
2757         host->use_ecc = false;
2758
2759         clear_read_regs(nandc);
2760         clear_bam_transaction(nandc);
2761
2762         nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
2763         nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
2764         nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
2765         nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
2766                       nandc->props->is_bam ? 0 : DM_EN);
2767
2768         nandc_set_reg(chip, NAND_EXEC_CMD, 1);
2769
2770         write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
2771         write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
2772
2773         read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
2774
2775         ret = submit_descs(nandc);
2776         if (ret) {
2777                 dev_err(nandc->dev, "failure in submitting read id descriptor\n");
2778                 free_descs(nandc);
2779                 goto err_out;
2780         }
2781         free_descs(nandc);
2782
2783         instr = q_op.data_instr;
2784         op_id = q_op.data_instr_idx;
2785         len = nand_subop_get_data_len(subop, op_id);
2786
2787         nandc_read_buffer_sync(nandc, true);
2788         memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
2789
2790 err_out:
2791         return ret;
2792 }
2793
2794 static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
2795 {
2796         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2797         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2798         struct qcom_op q_op;
2799         int ret = 0;
2800
2801         qcom_parse_instructions(chip, subop, &q_op);
2802
2803         if (q_op.flag == OP_PROGRAM_PAGE)
2804                 goto wait_rdy;
2805
2806         nandc->buf_count = 0;
2807         nandc->buf_start = 0;
2808         host->use_ecc = false;
2809
2810         clear_read_regs(nandc);
2811         clear_bam_transaction(nandc);
2812
2813         nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
2814         nandc_set_reg(chip, NAND_EXEC_CMD, 1);
2815
2816         write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
2817         write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
2818
2819         read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
2820
2821         ret = submit_descs(nandc);
2822         if (ret) {
2823                 dev_err(nandc->dev, "failure in submitting misc descriptor\n");
2824                 free_descs(nandc);
2825                 goto err_out;
2826         }
2827         free_descs(nandc);
2828
2829 wait_rdy:
2830         qcom_delay_ns(q_op.rdy_delay_ns);
2831         ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
2832
2833 err_out:
2834         return ret;
2835 }
2836
2837 static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_subop *subop)
2838 {
2839         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2840         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2841         struct qcom_op q_op;
2842         const struct nand_op_instr *instr = NULL;
2843         unsigned int op_id = 0;
2844         unsigned int len = 0;
2845         int ret = 0;
2846
2847         qcom_parse_instructions(chip, subop, &q_op);
2848
2849         q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
2850
2851         nandc->buf_count = 0;
2852         nandc->buf_start = 0;
2853         host->use_ecc = false;
2854         clear_read_regs(nandc);
2855         clear_bam_transaction(nandc);
2856
2857         nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
2858
2859         nandc_set_reg(chip, NAND_ADDR0, 0);
2860         nandc_set_reg(chip, NAND_ADDR1, 0);
2861         nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
2862                                         | 512 << UD_SIZE_BYTES
2863                                         | 5 << NUM_ADDR_CYCLES
2864                                         | 0 << SPARE_SIZE_BYTES);
2865         nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
2866                                         | 0 << CS_ACTIVE_BSY
2867                                         | 17 << BAD_BLOCK_BYTE_NUM
2868                                         | 1 << BAD_BLOCK_IN_SPARE_AREA
2869                                         | 2 << WR_RD_BSY_GAP
2870                                         | 0 << WIDE_FLASH
2871                                         | 1 << DEV0_CFG1_ECC_DISABLE);
2872         if (!nandc->props->qpic_v2)
2873                 nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
2874
2875         /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
2876         if (!nandc->props->qpic_v2) {
2877                 nandc_set_reg(chip, NAND_DEV_CMD_VLD,
2878                               (nandc->vld & ~READ_START_VLD));
2879                 nandc_set_reg(chip, NAND_DEV_CMD1,
2880                               (nandc->cmd1 & ~(0xFF << READ_ADDR))
2881                               | NAND_CMD_PARAM << READ_ADDR);
2882         }
2883
2884         nandc_set_reg(chip, NAND_EXEC_CMD, 1);
2885
2886         if (!nandc->props->qpic_v2) {
2887                 nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
2888                 nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
2889         }
2890
2891         instr = q_op.data_instr;
2892         op_id = q_op.data_instr_idx;
2893         len = nand_subop_get_data_len(subop, op_id);
2894
2895         nandc_set_read_loc(chip, 0, 0, 0, len, 1);
2896
2897         if (!nandc->props->qpic_v2) {
2898                 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
2899                 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
2900         }
2901
2902         nandc->buf_count = len;
2903         memset(nandc->data_buffer, 0xff, nandc->buf_count);
2904
2905         config_nand_single_cw_page_read(chip, false, 0);
2906
2907         read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
2908                       nandc->buf_count, 0);
2909
2910         /* restore CMD1 and VLD regs */
2911         if (!nandc->props->qpic_v2) {
2912                 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
2913                 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
2914         }
2915
2916         ret = submit_descs(nandc);
2917         if (ret) {
2918                 dev_err(nandc->dev, "failure in submitting param page descriptor\n");
2919                 free_descs(nandc);
2920                 goto err_out;
2921         }
2922         free_descs(nandc);
2923
2924         ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
2925         if (ret)
2926                 goto err_out;
2927
2928         memcpy(instr->ctx.data.buf.in, nandc->data_buffer, len);
2929
2930 err_out:
2931         return ret;
2932 }
2933
2934 static int qcom_erase_cmd_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
2935 {
2936         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2937         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2938         struct qcom_op q_op;
2939         int ret = 0;
2940
2941         qcom_parse_instructions(chip, subop, &q_op);
2942
2943         q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
2944
2945         nandc->buf_count = 0;
2946         nandc->buf_start = 0;
2947         host->use_ecc = false;
2948         clear_read_regs(nandc);
2949         clear_bam_transaction(nandc);
2950
2951         nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
2952         nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
2953         nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
2954         nandc_set_reg(chip, NAND_DEV0_CFG0,
2955                       host->cfg0_raw & ~(7 << CW_PER_PAGE));
2956         nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
2957         nandc_set_reg(chip, NAND_EXEC_CMD, 1);
2958
2959         write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
2960         write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
2961         write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
2962
2963         ret = submit_descs(nandc);
2964         if (ret) {
2965                 dev_err(nandc->dev, "failure in submitting erase descriptor\n");
2966                 free_descs(nandc);
2967                 goto err_out;
2968         }
2969         free_descs(nandc);
2970
2971         ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
2972         if (ret)
2973                 goto err_out;
2974
2975 err_out:
2976         return ret;
2977 }
2978
2979 static const struct nand_op_parser qcom_op_parser = NAND_OP_PARSER(
2980                 NAND_OP_PARSER_PATTERN(
2981                         qcom_misc_cmd_type_exec,
2982                         NAND_OP_PARSER_PAT_CMD_ELEM(false),
2983                         NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2984                 NAND_OP_PARSER_PATTERN(
2985                         qcom_read_id_type_exec,
2986                         NAND_OP_PARSER_PAT_CMD_ELEM(false),
2987                         NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
2988                         NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
2989                 NAND_OP_PARSER_PATTERN(
2990                         qcom_read_status_exec,
2991                         NAND_OP_PARSER_PAT_CMD_ELEM(false),
2992                         NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
2993                 NAND_OP_PARSER_PATTERN(
2994                         qcom_param_page_type_exec,
2995                         NAND_OP_PARSER_PAT_CMD_ELEM(false),
2996                         NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
2997                         NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
2998                         NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 512)),
2999                 NAND_OP_PARSER_PATTERN(
3000                         qcom_erase_cmd_type_exec,
3001                         NAND_OP_PARSER_PAT_CMD_ELEM(false),
3002                         NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
3003                         NAND_OP_PARSER_PAT_CMD_ELEM(false),
3004                         NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
3005                 );
3006
3007 static int qcom_check_op(struct nand_chip *chip,
3008                          const struct nand_operation *op)
3009 {
3010         const struct nand_op_instr *instr;
3011         int op_id;
3012
3013         for (op_id = 0; op_id < op->ninstrs; op_id++) {
3014                 instr = &op->instrs[op_id];
3015
3016                 switch (instr->type) {
3017                 case NAND_OP_CMD_INSTR:
3018                         if (instr->ctx.cmd.opcode != NAND_CMD_RESET ||
3019                             instr->ctx.cmd.opcode != NAND_CMD_READID ||
3020                             instr->ctx.cmd.opcode != NAND_CMD_PARAM ||
3021                             instr->ctx.cmd.opcode != NAND_CMD_ERASE1 ||
3022                             instr->ctx.cmd.opcode != NAND_CMD_ERASE2 ||
3023                             instr->ctx.cmd.opcode != NAND_CMD_STATUS ||
3024                             instr->ctx.cmd.opcode != NAND_CMD_PAGEPROG)
3025                                 return -ENOTSUPP;
3026                         break;
3027                 default:
3028                         break;
3029                 }
3030         }
3031
3032         return 0;
3033 }
3034
3035 static int qcom_nand_exec_op(struct nand_chip *chip,
3036                              const struct nand_operation *op,
3037                         bool check_only)
3038 {
3039         if (check_only)
3040                 return qcom_check_op(chip, op);
3041
3042         return nand_op_parser_exec_op(chip, &qcom_op_parser,
3043                         op, check_only);
3044 }
3045
3046 static const struct nand_controller_ops qcom_nandc_ops = {
3047         .attach_chip = qcom_nand_attach_chip,
3048         .exec_op = qcom_nand_exec_op,
3049 };
3050
3051 static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
3052 {
3053         if (nandc->props->is_bam) {
3054                 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
3055                         dma_unmap_single(nandc->dev, nandc->reg_read_dma,
3056                                          MAX_REG_RD *
3057                                          sizeof(*nandc->reg_read_buf),
3058                                          DMA_FROM_DEVICE);
3059
3060                 if (nandc->tx_chan)
3061                         dma_release_channel(nandc->tx_chan);
3062
3063                 if (nandc->rx_chan)
3064                         dma_release_channel(nandc->rx_chan);
3065
3066                 if (nandc->cmd_chan)
3067                         dma_release_channel(nandc->cmd_chan);
3068         } else {
3069                 if (nandc->chan)
3070                         dma_release_channel(nandc->chan);
3071         }
3072 }
3073
3074 static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
3075 {
3076         int ret;
3077
3078         ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
3079         if (ret) {
3080                 dev_err(nandc->dev, "failed to set DMA mask\n");
3081                 return ret;
3082         }
3083
3084         /*
3085          * we use the internal buffer for reading ONFI params, reading small
3086          * data like ID and status, and preforming read-copy-write operations
3087          * when writing to a codeword partially. 532 is the maximum possible
3088          * size of a codeword for our nand controller
3089          */
3090         nandc->buf_size = 532;
3091
3092         nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
3093                                         GFP_KERNEL);
3094         if (!nandc->data_buffer)
3095                 return -ENOMEM;
3096
3097         nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
3098                                         GFP_KERNEL);
3099         if (!nandc->regs)
3100                 return -ENOMEM;
3101
3102         nandc->reg_read_buf = devm_kcalloc(nandc->dev,
3103                                 MAX_REG_RD, sizeof(*nandc->reg_read_buf),
3104                                 GFP_KERNEL);
3105         if (!nandc->reg_read_buf)
3106                 return -ENOMEM;
3107
3108         if (nandc->props->is_bam) {
3109                 nandc->reg_read_dma =
3110                         dma_map_single(nandc->dev, nandc->reg_read_buf,
3111                                        MAX_REG_RD *
3112                                        sizeof(*nandc->reg_read_buf),
3113                                        DMA_FROM_DEVICE);
3114                 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
3115                         dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
3116                         return -EIO;
3117                 }
3118
3119                 nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
3120                 if (IS_ERR(nandc->tx_chan)) {
3121                         ret = PTR_ERR(nandc->tx_chan);
3122                         nandc->tx_chan = NULL;
3123                         dev_err_probe(nandc->dev, ret,
3124                                       "tx DMA channel request failed\n");
3125                         goto unalloc;
3126                 }
3127
3128                 nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
3129                 if (IS_ERR(nandc->rx_chan)) {
3130                         ret = PTR_ERR(nandc->rx_chan);
3131                         nandc->rx_chan = NULL;
3132                         dev_err_probe(nandc->dev, ret,
3133                                       "rx DMA channel request failed\n");
3134                         goto unalloc;
3135                 }
3136
3137                 nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
3138                 if (IS_ERR(nandc->cmd_chan)) {
3139                         ret = PTR_ERR(nandc->cmd_chan);
3140                         nandc->cmd_chan = NULL;
3141                         dev_err_probe(nandc->dev, ret,
3142                                       "cmd DMA channel request failed\n");
3143                         goto unalloc;
3144                 }
3145
3146                 /*
3147                  * Initially allocate BAM transaction to read ONFI param page.
3148                  * After detecting all the devices, this BAM transaction will
3149                  * be freed and the next BAM tranasction will be allocated with
3150                  * maximum codeword size
3151                  */
3152                 nandc->max_cwperpage = 1;
3153                 nandc->bam_txn = alloc_bam_transaction(nandc);
3154                 if (!nandc->bam_txn) {
3155                         dev_err(nandc->dev,
3156                                 "failed to allocate bam transaction\n");
3157                         ret = -ENOMEM;
3158                         goto unalloc;
3159                 }
3160         } else {
3161                 nandc->chan = dma_request_chan(nandc->dev, "rxtx");
3162                 if (IS_ERR(nandc->chan)) {
3163                         ret = PTR_ERR(nandc->chan);
3164                         nandc->chan = NULL;
3165                         dev_err_probe(nandc->dev, ret,
3166                                       "rxtx DMA channel request failed\n");
3167                         return ret;
3168                 }
3169         }
3170
3171         INIT_LIST_HEAD(&nandc->desc_list);
3172         INIT_LIST_HEAD(&nandc->host_list);
3173
3174         nand_controller_init(&nandc->controller);
3175         nandc->controller.ops = &qcom_nandc_ops;
3176
3177         return 0;
3178 unalloc:
3179         qcom_nandc_unalloc(nandc);
3180         return ret;
3181 }
3182
3183 /* one time setup of a few nand controller registers */
3184 static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
3185 {
3186         u32 nand_ctrl;
3187
3188         /* kill onenand */
3189         if (!nandc->props->is_qpic)
3190                 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
3191
3192         if (!nandc->props->qpic_v2)
3193                 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
3194                             NAND_DEV_CMD_VLD_VAL);
3195
3196         /* enable ADM or BAM DMA */
3197         if (nandc->props->is_bam) {
3198                 nand_ctrl = nandc_read(nandc, NAND_CTRL);
3199
3200                 /*
3201                  *NAND_CTRL is an operational registers, and CPU
3202                  * access to operational registers are read only
3203                  * in BAM mode. So update the NAND_CTRL register
3204                  * only if it is not in BAM mode. In most cases BAM
3205                  * mode will be enabled in bootloader
3206                  */
3207                 if (!(nand_ctrl & BAM_MODE_EN))
3208                         nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
3209         } else {
3210                 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
3211         }
3212
3213         /* save the original values of these registers */
3214         if (!nandc->props->qpic_v2) {
3215                 nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
3216                 nandc->vld = NAND_DEV_CMD_VLD_VAL;
3217         }
3218
3219         return 0;
3220 }
3221
3222 static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
3223
3224 static int qcom_nand_host_parse_boot_partitions(struct qcom_nand_controller *nandc,
3225                                                 struct qcom_nand_host *host,
3226                                                 struct device_node *dn)
3227 {
3228         struct nand_chip *chip = &host->chip;
3229         struct mtd_info *mtd = nand_to_mtd(chip);
3230         struct qcom_nand_boot_partition *boot_partition;
3231         struct device *dev = nandc->dev;
3232         int partitions_count, i, j, ret;
3233
3234         if (!of_property_present(dn, "qcom,boot-partitions"))
3235                 return 0;
3236
3237         partitions_count = of_property_count_u32_elems(dn, "qcom,boot-partitions");
3238         if (partitions_count <= 0) {
3239                 dev_err(dev, "Error parsing boot partition\n");
3240                 return partitions_count ? partitions_count : -EINVAL;
3241         }
3242
3243         host->nr_boot_partitions = partitions_count / 2;
3244         host->boot_partitions = devm_kcalloc(dev, host->nr_boot_partitions,
3245                                              sizeof(*host->boot_partitions), GFP_KERNEL);
3246         if (!host->boot_partitions) {
3247                 host->nr_boot_partitions = 0;
3248                 return -ENOMEM;
3249         }
3250
3251         for (i = 0, j = 0; i < host->nr_boot_partitions; i++, j += 2) {
3252                 boot_partition = &host->boot_partitions[i];
3253
3254                 ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j,
3255                                                  &boot_partition->page_offset);
3256                 if (ret) {
3257                         dev_err(dev, "Error parsing boot partition offset at index %d\n", i);
3258                         host->nr_boot_partitions = 0;
3259                         return ret;
3260                 }
3261
3262                 if (boot_partition->page_offset % mtd->writesize) {
3263                         dev_err(dev, "Boot partition offset not multiple of writesize at index %i\n",
3264                                 i);
3265                         host->nr_boot_partitions = 0;
3266                         return -EINVAL;
3267                 }
3268                 /* Convert offset to nand pages */
3269                 boot_partition->page_offset /= mtd->writesize;
3270
3271                 ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j + 1,
3272                                                  &boot_partition->page_size);
3273                 if (ret) {
3274                         dev_err(dev, "Error parsing boot partition size at index %d\n", i);
3275                         host->nr_boot_partitions = 0;
3276                         return ret;
3277                 }
3278
3279                 if (boot_partition->page_size % mtd->writesize) {
3280                         dev_err(dev, "Boot partition size not multiple of writesize at index %i\n",
3281                                 i);
3282                         host->nr_boot_partitions = 0;
3283                         return -EINVAL;
3284                 }
3285                 /* Convert size to nand pages */
3286                 boot_partition->page_size /= mtd->writesize;
3287         }
3288
3289         return 0;
3290 }
3291
3292 static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
3293                                             struct qcom_nand_host *host,
3294                                             struct device_node *dn)
3295 {
3296         struct nand_chip *chip = &host->chip;
3297         struct mtd_info *mtd = nand_to_mtd(chip);
3298         struct device *dev = nandc->dev;
3299         int ret;
3300
3301         ret = of_property_read_u32(dn, "reg", &host->cs);
3302         if (ret) {
3303                 dev_err(dev, "can't get chip-select\n");
3304                 return -ENXIO;
3305         }
3306
3307         nand_set_flash_node(chip, dn);
3308         mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
3309         if (!mtd->name)
3310                 return -ENOMEM;
3311
3312         mtd->owner = THIS_MODULE;
3313         mtd->dev.parent = dev;
3314
3315         /*
3316          * the bad block marker is readable only when we read the last codeword
3317          * of a page with ECC disabled. currently, the nand_base and nand_bbt
3318          * helpers don't allow us to read BB from a nand chip with ECC
3319          * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
3320          * and block_markbad helpers until we permanently switch to using
3321          * MTD_OPS_RAW for all drivers (with the help of badblockbits)
3322          */
3323         chip->legacy.block_bad          = qcom_nandc_block_bad;
3324         chip->legacy.block_markbad      = qcom_nandc_block_markbad;
3325
3326         chip->controller = &nandc->controller;
3327         chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
3328                          NAND_SKIP_BBTSCAN;
3329
3330         /* set up initial status value */
3331         host->status = NAND_STATUS_READY | NAND_STATUS_WP;
3332
3333         ret = nand_scan(chip, 1);
3334         if (ret)
3335                 return ret;
3336
3337         ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
3338         if (ret)
3339                 goto err;
3340
3341         if (nandc->props->use_codeword_fixup) {
3342                 ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn);
3343                 if (ret)
3344                         goto err;
3345         }
3346
3347         return 0;
3348
3349 err:
3350         nand_cleanup(chip);
3351         return ret;
3352 }
3353
3354 static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
3355 {
3356         struct device *dev = nandc->dev;
3357         struct device_node *dn = dev->of_node, *child;
3358         struct qcom_nand_host *host;
3359         int ret = -ENODEV;
3360
3361         for_each_available_child_of_node(dn, child) {
3362                 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
3363                 if (!host) {
3364                         of_node_put(child);
3365                         return -ENOMEM;
3366                 }
3367
3368                 ret = qcom_nand_host_init_and_register(nandc, host, child);
3369                 if (ret) {
3370                         devm_kfree(dev, host);
3371                         continue;
3372                 }
3373
3374                 list_add_tail(&host->node, &nandc->host_list);
3375         }
3376
3377         return ret;
3378 }
3379
3380 /* parse custom DT properties here */
3381 static int qcom_nandc_parse_dt(struct platform_device *pdev)
3382 {
3383         struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3384         struct device_node *np = nandc->dev->of_node;
3385         int ret;
3386
3387         if (!nandc->props->is_bam) {
3388                 ret = of_property_read_u32(np, "qcom,cmd-crci",
3389                                            &nandc->cmd_crci);
3390                 if (ret) {
3391                         dev_err(nandc->dev, "command CRCI unspecified\n");
3392                         return ret;
3393                 }
3394
3395                 ret = of_property_read_u32(np, "qcom,data-crci",
3396                                            &nandc->data_crci);
3397                 if (ret) {
3398                         dev_err(nandc->dev, "data CRCI unspecified\n");
3399                         return ret;
3400                 }
3401         }
3402
3403         return 0;
3404 }
3405
3406 static int qcom_nandc_probe(struct platform_device *pdev)
3407 {
3408         struct qcom_nand_controller *nandc;
3409         const void *dev_data;
3410         struct device *dev = &pdev->dev;
3411         struct resource *res;
3412         int ret;
3413
3414         nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
3415         if (!nandc)
3416                 return -ENOMEM;
3417
3418         platform_set_drvdata(pdev, nandc);
3419         nandc->dev = dev;
3420
3421         dev_data = of_device_get_match_data(dev);
3422         if (!dev_data) {
3423                 dev_err(&pdev->dev, "failed to get device data\n");
3424                 return -ENODEV;
3425         }
3426
3427         nandc->props = dev_data;
3428
3429         nandc->core_clk = devm_clk_get(dev, "core");
3430         if (IS_ERR(nandc->core_clk))
3431                 return PTR_ERR(nandc->core_clk);
3432
3433         nandc->aon_clk = devm_clk_get(dev, "aon");
3434         if (IS_ERR(nandc->aon_clk))
3435                 return PTR_ERR(nandc->aon_clk);
3436
3437         ret = qcom_nandc_parse_dt(pdev);
3438         if (ret)
3439                 return ret;
3440
3441         nandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
3442         if (IS_ERR(nandc->base))
3443                 return PTR_ERR(nandc->base);
3444
3445         nandc->base_phys = res->start;
3446         nandc->base_dma = dma_map_resource(dev, res->start,
3447                                            resource_size(res),
3448                                            DMA_BIDIRECTIONAL, 0);
3449         if (dma_mapping_error(dev, nandc->base_dma))
3450                 return -ENXIO;
3451
3452         ret = clk_prepare_enable(nandc->core_clk);
3453         if (ret)
3454                 goto err_core_clk;
3455
3456         ret = clk_prepare_enable(nandc->aon_clk);
3457         if (ret)
3458                 goto err_aon_clk;
3459
3460         ret = qcom_nandc_alloc(nandc);
3461         if (ret)
3462                 goto err_nandc_alloc;
3463
3464         ret = qcom_nandc_setup(nandc);
3465         if (ret)
3466                 goto err_setup;
3467
3468         ret = qcom_probe_nand_devices(nandc);
3469         if (ret)
3470                 goto err_setup;
3471
3472         return 0;
3473
3474 err_setup:
3475         qcom_nandc_unalloc(nandc);
3476 err_nandc_alloc:
3477         clk_disable_unprepare(nandc->aon_clk);
3478 err_aon_clk:
3479         clk_disable_unprepare(nandc->core_clk);
3480 err_core_clk:
3481         dma_unmap_resource(dev, res->start, resource_size(res),
3482                            DMA_BIDIRECTIONAL, 0);
3483         return ret;
3484 }
3485
3486 static void qcom_nandc_remove(struct platform_device *pdev)
3487 {
3488         struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3489         struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3490         struct qcom_nand_host *host;
3491         struct nand_chip *chip;
3492         int ret;
3493
3494         list_for_each_entry(host, &nandc->host_list, node) {
3495                 chip = &host->chip;
3496                 ret = mtd_device_unregister(nand_to_mtd(chip));
3497                 WARN_ON(ret);
3498                 nand_cleanup(chip);
3499         }
3500
3501         qcom_nandc_unalloc(nandc);
3502
3503         clk_disable_unprepare(nandc->aon_clk);
3504         clk_disable_unprepare(nandc->core_clk);
3505
3506         dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
3507                            DMA_BIDIRECTIONAL, 0);
3508 }
3509
3510 static const struct qcom_nandc_props ipq806x_nandc_props = {
3511         .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
3512         .is_bam = false,
3513         .use_codeword_fixup = true,
3514         .dev_cmd_reg_start = 0x0,
3515 };
3516
3517 static const struct qcom_nandc_props ipq4019_nandc_props = {
3518         .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3519         .is_bam = true,
3520         .is_qpic = true,
3521         .dev_cmd_reg_start = 0x0,
3522 };
3523
3524 static const struct qcom_nandc_props ipq8074_nandc_props = {
3525         .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3526         .is_bam = true,
3527         .is_qpic = true,
3528         .dev_cmd_reg_start = 0x7000,
3529 };
3530
3531 static const struct qcom_nandc_props sdx55_nandc_props = {
3532         .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3533         .is_bam = true,
3534         .is_qpic = true,
3535         .qpic_v2 = true,
3536         .dev_cmd_reg_start = 0x7000,
3537 };
3538
3539 /*
3540  * data will hold a struct pointer containing more differences once we support
3541  * more controller variants
3542  */
3543 static const struct of_device_id qcom_nandc_of_match[] = {
3544         {
3545                 .compatible = "qcom,ipq806x-nand",
3546                 .data = &ipq806x_nandc_props,
3547         },
3548         {
3549                 .compatible = "qcom,ipq4019-nand",
3550                 .data = &ipq4019_nandc_props,
3551         },
3552         {
3553                 .compatible = "qcom,ipq6018-nand",
3554                 .data = &ipq8074_nandc_props,
3555         },
3556         {
3557                 .compatible = "qcom,ipq8074-nand",
3558                 .data = &ipq8074_nandc_props,
3559         },
3560         {
3561                 .compatible = "qcom,sdx55-nand",
3562                 .data = &sdx55_nandc_props,
3563         },
3564         {}
3565 };
3566 MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
3567
3568 static struct platform_driver qcom_nandc_driver = {
3569         .driver = {
3570                 .name = "qcom-nandc",
3571                 .of_match_table = qcom_nandc_of_match,
3572         },
3573         .probe   = qcom_nandc_probe,
3574         .remove_new = qcom_nandc_remove,
3575 };
3576 module_platform_driver(qcom_nandc_driver);
3577
3578 MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3579 MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3580 MODULE_LICENSE("GPL v2");