1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2010-2015 Broadcom Corporation
7 #include <linux/version.h>
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/platform_device.h>
13 #include <linux/err.h>
14 #include <linux/completion.h>
15 #include <linux/interrupt.h>
16 #include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/ioport.h>
19 #include <linux/bug.h>
20 #include <linux/kernel.h>
21 #include <linux/bitops.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/rawnand.h>
25 #include <linux/mtd/partitions.h>
27 #include <linux/of_platform.h>
28 #include <linux/slab.h>
29 #include <linux/list.h>
30 #include <linux/log2.h>
35 * This flag controls if WP stays on between erase/write commands to mitigate
36 * flash corruption due to power glitches. Values:
37 * 0: NAND_WP is not used or not available
38 * 1: NAND_WP is set by default, cleared for erase/write operations
39 * 2: NAND_WP is always cleared
42 module_param(wp_on, int, 0444);
44 /***********************************************************************
46 ***********************************************************************/
48 #define DRV_NAME "brcmnand"
51 #define CMD_PAGE_READ 0x01
52 #define CMD_SPARE_AREA_READ 0x02
53 #define CMD_STATUS_READ 0x03
54 #define CMD_PROGRAM_PAGE 0x04
55 #define CMD_PROGRAM_SPARE_AREA 0x05
56 #define CMD_COPY_BACK 0x06
57 #define CMD_DEVICE_ID_READ 0x07
58 #define CMD_BLOCK_ERASE 0x08
59 #define CMD_FLASH_RESET 0x09
60 #define CMD_BLOCKS_LOCK 0x0a
61 #define CMD_BLOCKS_LOCK_DOWN 0x0b
62 #define CMD_BLOCKS_UNLOCK 0x0c
63 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
64 #define CMD_PARAMETER_READ 0x0e
65 #define CMD_PARAMETER_CHANGE_COL 0x0f
66 #define CMD_LOW_LEVEL_OP 0x10
68 struct brcm_nand_dma_desc {
83 /* Bitfields for brcm_nand_dma_desc::status_valid */
84 #define FLASH_DMA_ECC_ERROR (1 << 8)
85 #define FLASH_DMA_CORR_ERROR (1 << 9)
87 /* Bitfields for DMA_MODE */
88 #define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */
89 #define FLASH_DMA_MODE_MODE BIT(0) /* link list */
90 #define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \
93 /* 512B flash cache in the NAND controller HW */
96 #define FC_WORDS (FC_BYTES >> 2)
98 #define BRCMNAND_MIN_PAGESIZE 512
99 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
100 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
102 #define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
103 #define NAND_POLL_STATUS_TIMEOUT_MS 100
105 /* flash_dma registers */
107 FLASH_DMA_REVISION = 0,
108 FLASH_DMA_FIRST_DESC,
109 FLASH_DMA_FIRST_DESC_EXT,
113 FLASH_DMA_INTERRUPT_DESC,
114 FLASH_DMA_INTERRUPT_DESC_EXT,
115 FLASH_DMA_ERROR_STATUS,
116 FLASH_DMA_CURRENT_DESC,
117 FLASH_DMA_CURRENT_DESC_EXT,
120 /* flash_dma registers v1*/
121 static const u16 flash_dma_regs_v1[] = {
122 [FLASH_DMA_REVISION] = 0x00,
123 [FLASH_DMA_FIRST_DESC] = 0x04,
124 [FLASH_DMA_FIRST_DESC_EXT] = 0x08,
125 [FLASH_DMA_CTRL] = 0x0c,
126 [FLASH_DMA_MODE] = 0x10,
127 [FLASH_DMA_STATUS] = 0x14,
128 [FLASH_DMA_INTERRUPT_DESC] = 0x18,
129 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x1c,
130 [FLASH_DMA_ERROR_STATUS] = 0x20,
131 [FLASH_DMA_CURRENT_DESC] = 0x24,
132 [FLASH_DMA_CURRENT_DESC_EXT] = 0x28,
135 /* flash_dma registers v4 */
136 static const u16 flash_dma_regs_v4[] = {
137 [FLASH_DMA_REVISION] = 0x00,
138 [FLASH_DMA_FIRST_DESC] = 0x08,
139 [FLASH_DMA_FIRST_DESC_EXT] = 0x0c,
140 [FLASH_DMA_CTRL] = 0x10,
141 [FLASH_DMA_MODE] = 0x14,
142 [FLASH_DMA_STATUS] = 0x18,
143 [FLASH_DMA_INTERRUPT_DESC] = 0x20,
144 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x24,
145 [FLASH_DMA_ERROR_STATUS] = 0x28,
146 [FLASH_DMA_CURRENT_DESC] = 0x30,
147 [FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
150 /* Controller feature flags */
152 BRCMNAND_HAS_1K_SECTORS = BIT(0),
153 BRCMNAND_HAS_PREFETCH = BIT(1),
154 BRCMNAND_HAS_CACHE_MODE = BIT(2),
155 BRCMNAND_HAS_WP = BIT(3),
158 struct brcmnand_controller {
160 struct nand_controller controller;
161 void __iomem *nand_base;
162 void __iomem *nand_fc; /* flash cache */
163 void __iomem *flash_dma_base;
165 unsigned int dma_irq;
168 /* Some SoCs provide custom interrupt status register(s) */
169 struct brcmnand_soc *soc;
171 /* Some SoCs have a gateable clock for the controller */
176 struct completion done;
177 struct completion dma_done;
179 /* List of NAND hosts (one for each chip-select) */
180 struct list_head host_list;
183 const u16 *flash_dma_offsets;
184 struct brcm_nand_dma_desc *dma_desc;
187 /* in-memory cache of the FLASH_CACHE, used only for some commands */
188 u8 flash_cache[FC_BYTES];
190 /* Controller revision details */
191 const u16 *reg_offsets;
192 unsigned int reg_spacing; /* between CS1, CS2, ... regs */
193 const u8 *cs_offsets; /* within each chip-select */
194 const u8 *cs0_offsets; /* within CS0, if different */
195 unsigned int max_block_size;
196 const unsigned int *block_sizes;
197 unsigned int max_page_size;
198 const unsigned int *page_sizes;
199 unsigned int max_oob;
202 /* for low-power standby/resume only */
203 u32 nand_cs_nand_select;
204 u32 nand_cs_nand_xor;
205 u32 corr_stat_threshold;
210 struct brcmnand_cfg {
212 unsigned int block_size;
213 unsigned int page_size;
214 unsigned int spare_area_size;
215 unsigned int device_width;
216 unsigned int col_adr_bytes;
217 unsigned int blk_adr_bytes;
218 unsigned int ful_adr_bytes;
219 unsigned int sector_size_1k;
220 unsigned int ecc_level;
221 /* use for low-power standby/resume only */
229 struct brcmnand_host {
230 struct list_head node;
232 struct nand_chip chip;
233 struct platform_device *pdev;
236 unsigned int last_cmd;
237 unsigned int last_byte;
239 struct brcmnand_cfg hwcfg;
240 struct brcmnand_controller *ctrl;
244 BRCMNAND_CMD_START = 0,
245 BRCMNAND_CMD_EXT_ADDRESS,
246 BRCMNAND_CMD_ADDRESS,
247 BRCMNAND_INTFC_STATUS,
252 BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */
253 BRCMNAND_CORR_THRESHOLD,
254 BRCMNAND_CORR_THRESHOLD_EXT,
255 BRCMNAND_UNCORR_COUNT,
257 BRCMNAND_CORR_EXT_ADDR,
259 BRCMNAND_UNCORR_EXT_ADDR,
260 BRCMNAND_UNCORR_ADDR,
265 BRCMNAND_OOB_READ_BASE,
266 BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */
267 BRCMNAND_OOB_WRITE_BASE,
268 BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */
273 static const u16 brcmnand_regs_v40[] = {
274 [BRCMNAND_CMD_START] = 0x04,
275 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
276 [BRCMNAND_CMD_ADDRESS] = 0x0c,
277 [BRCMNAND_INTFC_STATUS] = 0x6c,
278 [BRCMNAND_CS_SELECT] = 0x14,
279 [BRCMNAND_CS_XOR] = 0x18,
280 [BRCMNAND_LL_OP] = 0x178,
281 [BRCMNAND_CS0_BASE] = 0x40,
282 [BRCMNAND_CS1_BASE] = 0xd0,
283 [BRCMNAND_CORR_THRESHOLD] = 0x84,
284 [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
285 [BRCMNAND_UNCORR_COUNT] = 0,
286 [BRCMNAND_CORR_COUNT] = 0,
287 [BRCMNAND_CORR_EXT_ADDR] = 0x70,
288 [BRCMNAND_CORR_ADDR] = 0x74,
289 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
290 [BRCMNAND_UNCORR_ADDR] = 0x7c,
291 [BRCMNAND_SEMAPHORE] = 0x58,
292 [BRCMNAND_ID] = 0x60,
293 [BRCMNAND_ID_EXT] = 0x64,
294 [BRCMNAND_LL_RDATA] = 0x17c,
295 [BRCMNAND_OOB_READ_BASE] = 0x20,
296 [BRCMNAND_OOB_READ_10_BASE] = 0x130,
297 [BRCMNAND_OOB_WRITE_BASE] = 0x30,
298 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
299 [BRCMNAND_FC_BASE] = 0x200,
303 static const u16 brcmnand_regs_v50[] = {
304 [BRCMNAND_CMD_START] = 0x04,
305 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
306 [BRCMNAND_CMD_ADDRESS] = 0x0c,
307 [BRCMNAND_INTFC_STATUS] = 0x6c,
308 [BRCMNAND_CS_SELECT] = 0x14,
309 [BRCMNAND_CS_XOR] = 0x18,
310 [BRCMNAND_LL_OP] = 0x178,
311 [BRCMNAND_CS0_BASE] = 0x40,
312 [BRCMNAND_CS1_BASE] = 0xd0,
313 [BRCMNAND_CORR_THRESHOLD] = 0x84,
314 [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
315 [BRCMNAND_UNCORR_COUNT] = 0,
316 [BRCMNAND_CORR_COUNT] = 0,
317 [BRCMNAND_CORR_EXT_ADDR] = 0x70,
318 [BRCMNAND_CORR_ADDR] = 0x74,
319 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
320 [BRCMNAND_UNCORR_ADDR] = 0x7c,
321 [BRCMNAND_SEMAPHORE] = 0x58,
322 [BRCMNAND_ID] = 0x60,
323 [BRCMNAND_ID_EXT] = 0x64,
324 [BRCMNAND_LL_RDATA] = 0x17c,
325 [BRCMNAND_OOB_READ_BASE] = 0x20,
326 [BRCMNAND_OOB_READ_10_BASE] = 0x130,
327 [BRCMNAND_OOB_WRITE_BASE] = 0x30,
328 [BRCMNAND_OOB_WRITE_10_BASE] = 0x140,
329 [BRCMNAND_FC_BASE] = 0x200,
332 /* BRCMNAND v6.0 - v7.1 */
333 static const u16 brcmnand_regs_v60[] = {
334 [BRCMNAND_CMD_START] = 0x04,
335 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
336 [BRCMNAND_CMD_ADDRESS] = 0x0c,
337 [BRCMNAND_INTFC_STATUS] = 0x14,
338 [BRCMNAND_CS_SELECT] = 0x18,
339 [BRCMNAND_CS_XOR] = 0x1c,
340 [BRCMNAND_LL_OP] = 0x20,
341 [BRCMNAND_CS0_BASE] = 0x50,
342 [BRCMNAND_CS1_BASE] = 0,
343 [BRCMNAND_CORR_THRESHOLD] = 0xc0,
344 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
345 [BRCMNAND_UNCORR_COUNT] = 0xfc,
346 [BRCMNAND_CORR_COUNT] = 0x100,
347 [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
348 [BRCMNAND_CORR_ADDR] = 0x110,
349 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
350 [BRCMNAND_UNCORR_ADDR] = 0x118,
351 [BRCMNAND_SEMAPHORE] = 0x150,
352 [BRCMNAND_ID] = 0x194,
353 [BRCMNAND_ID_EXT] = 0x198,
354 [BRCMNAND_LL_RDATA] = 0x19c,
355 [BRCMNAND_OOB_READ_BASE] = 0x200,
356 [BRCMNAND_OOB_READ_10_BASE] = 0,
357 [BRCMNAND_OOB_WRITE_BASE] = 0x280,
358 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
359 [BRCMNAND_FC_BASE] = 0x400,
363 static const u16 brcmnand_regs_v71[] = {
364 [BRCMNAND_CMD_START] = 0x04,
365 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
366 [BRCMNAND_CMD_ADDRESS] = 0x0c,
367 [BRCMNAND_INTFC_STATUS] = 0x14,
368 [BRCMNAND_CS_SELECT] = 0x18,
369 [BRCMNAND_CS_XOR] = 0x1c,
370 [BRCMNAND_LL_OP] = 0x20,
371 [BRCMNAND_CS0_BASE] = 0x50,
372 [BRCMNAND_CS1_BASE] = 0,
373 [BRCMNAND_CORR_THRESHOLD] = 0xdc,
374 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
375 [BRCMNAND_UNCORR_COUNT] = 0xfc,
376 [BRCMNAND_CORR_COUNT] = 0x100,
377 [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
378 [BRCMNAND_CORR_ADDR] = 0x110,
379 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
380 [BRCMNAND_UNCORR_ADDR] = 0x118,
381 [BRCMNAND_SEMAPHORE] = 0x150,
382 [BRCMNAND_ID] = 0x194,
383 [BRCMNAND_ID_EXT] = 0x198,
384 [BRCMNAND_LL_RDATA] = 0x19c,
385 [BRCMNAND_OOB_READ_BASE] = 0x200,
386 [BRCMNAND_OOB_READ_10_BASE] = 0,
387 [BRCMNAND_OOB_WRITE_BASE] = 0x280,
388 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
389 [BRCMNAND_FC_BASE] = 0x400,
393 static const u16 brcmnand_regs_v72[] = {
394 [BRCMNAND_CMD_START] = 0x04,
395 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
396 [BRCMNAND_CMD_ADDRESS] = 0x0c,
397 [BRCMNAND_INTFC_STATUS] = 0x14,
398 [BRCMNAND_CS_SELECT] = 0x18,
399 [BRCMNAND_CS_XOR] = 0x1c,
400 [BRCMNAND_LL_OP] = 0x20,
401 [BRCMNAND_CS0_BASE] = 0x50,
402 [BRCMNAND_CS1_BASE] = 0,
403 [BRCMNAND_CORR_THRESHOLD] = 0xdc,
404 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
405 [BRCMNAND_UNCORR_COUNT] = 0xfc,
406 [BRCMNAND_CORR_COUNT] = 0x100,
407 [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
408 [BRCMNAND_CORR_ADDR] = 0x110,
409 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
410 [BRCMNAND_UNCORR_ADDR] = 0x118,
411 [BRCMNAND_SEMAPHORE] = 0x150,
412 [BRCMNAND_ID] = 0x194,
413 [BRCMNAND_ID_EXT] = 0x198,
414 [BRCMNAND_LL_RDATA] = 0x19c,
415 [BRCMNAND_OOB_READ_BASE] = 0x200,
416 [BRCMNAND_OOB_READ_10_BASE] = 0,
417 [BRCMNAND_OOB_WRITE_BASE] = 0x400,
418 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
419 [BRCMNAND_FC_BASE] = 0x600,
422 enum brcmnand_cs_reg {
423 BRCMNAND_CS_CFG_EXT = 0,
425 BRCMNAND_CS_ACC_CONTROL,
430 /* Per chip-select offsets for v7.1 */
431 static const u8 brcmnand_cs_offsets_v71[] = {
432 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
433 [BRCMNAND_CS_CFG_EXT] = 0x04,
434 [BRCMNAND_CS_CFG] = 0x08,
435 [BRCMNAND_CS_TIMING1] = 0x0c,
436 [BRCMNAND_CS_TIMING2] = 0x10,
439 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
440 static const u8 brcmnand_cs_offsets[] = {
441 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
442 [BRCMNAND_CS_CFG_EXT] = 0x04,
443 [BRCMNAND_CS_CFG] = 0x04,
444 [BRCMNAND_CS_TIMING1] = 0x08,
445 [BRCMNAND_CS_TIMING2] = 0x0c,
448 /* Per chip-select offset for <= v5.0 on CS0 only */
449 static const u8 brcmnand_cs_offsets_cs0[] = {
450 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
451 [BRCMNAND_CS_CFG_EXT] = 0x08,
452 [BRCMNAND_CS_CFG] = 0x08,
453 [BRCMNAND_CS_TIMING1] = 0x10,
454 [BRCMNAND_CS_TIMING2] = 0x14,
458 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
459 * one config register, but once the bitfields overflowed, newer controllers
460 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
463 CFG_BLK_ADR_BYTES_SHIFT = 8,
464 CFG_COL_ADR_BYTES_SHIFT = 12,
465 CFG_FUL_ADR_BYTES_SHIFT = 16,
466 CFG_BUS_WIDTH_SHIFT = 23,
467 CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT),
468 CFG_DEVICE_SIZE_SHIFT = 24,
470 /* Only for pre-v7.1 (with no CFG_EXT register) */
471 CFG_PAGE_SIZE_SHIFT = 20,
472 CFG_BLK_SIZE_SHIFT = 28,
474 /* Only for v7.1+ (with CFG_EXT register) */
475 CFG_EXT_PAGE_SIZE_SHIFT = 0,
476 CFG_EXT_BLK_SIZE_SHIFT = 4,
479 /* BRCMNAND_INTFC_STATUS */
481 INTFC_FLASH_STATUS = GENMASK(7, 0),
483 INTFC_ERASED = BIT(27),
484 INTFC_OOB_VALID = BIT(28),
485 INTFC_CACHE_VALID = BIT(29),
486 INTFC_FLASH_READY = BIT(30),
487 INTFC_CTLR_READY = BIT(31),
490 static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
492 return brcmnand_readl(ctrl->nand_base + offs);
495 static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
498 brcmnand_writel(val, ctrl->nand_base + offs);
501 static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
503 static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
504 static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
505 static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
507 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
509 /* Only support v4.0+? */
510 if (ctrl->nand_version < 0x0400) {
511 dev_err(ctrl->dev, "version %#x not supported\n",
516 /* Register offsets */
517 if (ctrl->nand_version >= 0x0702)
518 ctrl->reg_offsets = brcmnand_regs_v72;
519 else if (ctrl->nand_version == 0x0701)
520 ctrl->reg_offsets = brcmnand_regs_v71;
521 else if (ctrl->nand_version >= 0x0600)
522 ctrl->reg_offsets = brcmnand_regs_v60;
523 else if (ctrl->nand_version >= 0x0500)
524 ctrl->reg_offsets = brcmnand_regs_v50;
525 else if (ctrl->nand_version >= 0x0400)
526 ctrl->reg_offsets = brcmnand_regs_v40;
528 /* Chip-select stride */
529 if (ctrl->nand_version >= 0x0701)
530 ctrl->reg_spacing = 0x14;
532 ctrl->reg_spacing = 0x10;
534 /* Per chip-select registers */
535 if (ctrl->nand_version >= 0x0701) {
536 ctrl->cs_offsets = brcmnand_cs_offsets_v71;
538 ctrl->cs_offsets = brcmnand_cs_offsets;
540 /* v5.0 and earlier has a different CS0 offset layout */
541 if (ctrl->nand_version <= 0x0500)
542 ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
545 /* Page / block sizes */
546 if (ctrl->nand_version >= 0x0701) {
547 /* >= v7.1 use nice power-of-2 values! */
548 ctrl->max_page_size = 16 * 1024;
549 ctrl->max_block_size = 2 * 1024 * 1024;
551 ctrl->page_sizes = page_sizes;
552 if (ctrl->nand_version >= 0x0600)
553 ctrl->block_sizes = block_sizes_v6;
555 ctrl->block_sizes = block_sizes_v4;
557 if (ctrl->nand_version < 0x0400) {
558 ctrl->max_page_size = 4096;
559 ctrl->max_block_size = 512 * 1024;
563 /* Maximum spare area sector size (per 512B) */
564 if (ctrl->nand_version == 0x0702)
566 else if (ctrl->nand_version >= 0x0600)
568 else if (ctrl->nand_version >= 0x0500)
573 /* v6.0 and newer (except v6.1) have prefetch support */
574 if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
575 ctrl->features |= BRCMNAND_HAS_PREFETCH;
578 * v6.x has cache mode, but it's implemented differently. Ignore it for
581 if (ctrl->nand_version >= 0x0700)
582 ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
584 if (ctrl->nand_version >= 0x0500)
585 ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
587 if (ctrl->nand_version >= 0x0700)
588 ctrl->features |= BRCMNAND_HAS_WP;
589 else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
590 ctrl->features |= BRCMNAND_HAS_WP;
595 static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
597 /* flash_dma register offsets */
598 if (ctrl->nand_version >= 0x0703)
599 ctrl->flash_dma_offsets = flash_dma_regs_v4;
601 ctrl->flash_dma_offsets = flash_dma_regs_v1;
604 static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
605 enum brcmnand_reg reg)
607 u16 offs = ctrl->reg_offsets[reg];
610 return nand_readreg(ctrl, offs);
615 static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
616 enum brcmnand_reg reg, u32 val)
618 u16 offs = ctrl->reg_offsets[reg];
621 nand_writereg(ctrl, offs, val);
624 static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
625 enum brcmnand_reg reg, u32 mask, unsigned
628 u32 tmp = brcmnand_read_reg(ctrl, reg);
632 brcmnand_write_reg(ctrl, reg, tmp);
635 static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
637 return __raw_readl(ctrl->nand_fc + word * 4);
640 static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
643 __raw_writel(val, ctrl->nand_fc + word * 4);
646 static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
649 /* Clear error addresses */
650 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
651 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
652 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
653 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
656 static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
660 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
661 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
662 BRCMNAND_UNCORR_EXT_ADDR)
668 static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
672 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
673 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
674 BRCMNAND_CORR_EXT_ADDR)
680 static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr)
682 struct nand_chip *chip = mtd_to_nand(mtd);
683 struct brcmnand_host *host = nand_get_controller_data(chip);
684 struct brcmnand_controller *ctrl = host->ctrl;
686 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
687 (host->cs << 16) | ((addr >> 32) & 0xffff));
688 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
689 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
690 lower_32_bits(addr));
691 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
694 static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
695 enum brcmnand_cs_reg reg)
697 u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
698 u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
701 if (cs == 0 && ctrl->cs0_offsets)
702 cs_offs = ctrl->cs0_offsets[reg];
704 cs_offs = ctrl->cs_offsets[reg];
707 return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
709 return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
712 static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
714 if (ctrl->nand_version < 0x0600)
716 return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
719 static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
721 struct brcmnand_controller *ctrl = host->ctrl;
722 unsigned int shift = 0, bits;
723 enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
726 if (ctrl->nand_version == 0x0702)
728 else if (ctrl->nand_version >= 0x0600)
730 else if (ctrl->nand_version >= 0x0500)
735 if (ctrl->nand_version >= 0x0702) {
737 reg = BRCMNAND_CORR_THRESHOLD_EXT;
738 shift = (cs % 4) * bits;
739 } else if (ctrl->nand_version >= 0x0600) {
741 reg = BRCMNAND_CORR_THRESHOLD_EXT;
742 shift = (cs % 5) * bits;
744 brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
747 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
749 if (ctrl->nand_version < 0x0602)
754 /***********************************************************************
755 * NAND ACC CONTROL bitfield
757 * Some bits have remained constant throughout hardware revision, while
758 * others have shifted around.
759 ***********************************************************************/
761 /* Constant for all versions (where supported) */
763 /* See BRCMNAND_HAS_CACHE_MODE */
764 ACC_CONTROL_CACHE_MODE = BIT(22),
766 /* See BRCMNAND_HAS_PREFETCH */
767 ACC_CONTROL_PREFETCH = BIT(23),
769 ACC_CONTROL_PAGE_HIT = BIT(24),
770 ACC_CONTROL_WR_PREEMPT = BIT(25),
771 ACC_CONTROL_PARTIAL_PAGE = BIT(26),
772 ACC_CONTROL_RD_ERASED = BIT(27),
773 ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
774 ACC_CONTROL_WR_ECC = BIT(30),
775 ACC_CONTROL_RD_ECC = BIT(31),
778 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
780 if (ctrl->nand_version == 0x0702)
781 return GENMASK(7, 0);
782 else if (ctrl->nand_version >= 0x0600)
783 return GENMASK(6, 0);
785 return GENMASK(5, 0);
788 #define NAND_ACC_CONTROL_ECC_SHIFT 16
789 #define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
791 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
793 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
795 mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
797 /* v7.2 includes additional ECC levels */
798 if (ctrl->nand_version >= 0x0702)
799 mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
804 static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
806 struct brcmnand_controller *ctrl = host->ctrl;
807 u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
808 u32 acc_control = nand_readreg(ctrl, offs);
809 u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
812 acc_control |= ecc_flags; /* enable RD/WR ECC */
813 acc_control |= host->hwcfg.ecc_level
814 << NAND_ACC_CONTROL_ECC_SHIFT;
816 acc_control &= ~ecc_flags; /* disable RD/WR ECC */
817 acc_control &= ~brcmnand_ecc_level_mask(ctrl);
820 nand_writereg(ctrl, offs, acc_control);
823 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
825 if (ctrl->nand_version >= 0x0702)
827 else if (ctrl->nand_version >= 0x0600)
829 else if (ctrl->nand_version >= 0x0500)
835 static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
837 struct brcmnand_controller *ctrl = host->ctrl;
838 int shift = brcmnand_sector_1k_shift(ctrl);
839 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
840 BRCMNAND_CS_ACC_CONTROL);
845 return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
848 static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
850 struct brcmnand_controller *ctrl = host->ctrl;
851 int shift = brcmnand_sector_1k_shift(ctrl);
852 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
853 BRCMNAND_CS_ACC_CONTROL);
859 tmp = nand_readreg(ctrl, acc_control_offs);
860 tmp &= ~(1 << shift);
861 tmp |= (!!val) << shift;
862 nand_writereg(ctrl, acc_control_offs, tmp);
865 /***********************************************************************
867 ***********************************************************************/
870 CS_SELECT_NAND_WP = BIT(29),
871 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
874 static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
875 u32 mask, u32 expected_val,
876 unsigned long timeout_ms)
882 timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
884 limit = jiffies + msecs_to_jiffies(timeout_ms);
886 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
887 if ((val & mask) == expected_val)
891 } while (time_after(limit, jiffies));
893 dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
894 expected_val, val & mask);
899 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
901 u32 val = en ? CS_SELECT_NAND_WP : 0;
903 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
906 /***********************************************************************
908 ***********************************************************************/
910 static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
912 return ctrl->flash_dma_base;
915 static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
917 if (ctrl->pio_poll_mode)
920 if (has_flash_dma(ctrl)) {
921 ctrl->flash_dma_base = 0;
922 disable_irq(ctrl->dma_irq);
925 disable_irq(ctrl->irq);
926 ctrl->pio_poll_mode = true;
929 static inline bool flash_dma_buf_ok(const void *buf)
931 return buf && !is_vmalloc_addr(buf) &&
932 likely(IS_ALIGNED((uintptr_t)buf, 4));
935 static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
936 enum flash_dma_reg dma_reg, u32 val)
938 u16 offs = ctrl->flash_dma_offsets[dma_reg];
940 brcmnand_writel(val, ctrl->flash_dma_base + offs);
943 static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
944 enum flash_dma_reg dma_reg)
946 u16 offs = ctrl->flash_dma_offsets[dma_reg];
948 return brcmnand_readl(ctrl->flash_dma_base + offs);
951 /* Low-level operation types: command, address, write, or read */
952 enum brcmnand_llop_type {
959 /***********************************************************************
960 * Internal support functions
961 ***********************************************************************/
963 static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
964 struct brcmnand_cfg *cfg)
966 if (ctrl->nand_version <= 0x0701)
967 return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
968 cfg->ecc_level == 15;
970 return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
971 cfg->ecc_level == 15) ||
972 (cfg->spare_area_size == 28 && cfg->ecc_level == 16));
976 * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
977 * the layout/configuration.
978 * Returns -ERRCODE on failure.
980 static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section,
981 struct mtd_oob_region *oobregion)
983 struct nand_chip *chip = mtd_to_nand(mtd);
984 struct brcmnand_host *host = nand_get_controller_data(chip);
985 struct brcmnand_cfg *cfg = &host->hwcfg;
986 int sas = cfg->spare_area_size << cfg->sector_size_1k;
987 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
989 if (section >= sectors)
992 oobregion->offset = (section * sas) + 6;
993 oobregion->length = 3;
998 static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
999 struct mtd_oob_region *oobregion)
1001 struct nand_chip *chip = mtd_to_nand(mtd);
1002 struct brcmnand_host *host = nand_get_controller_data(chip);
1003 struct brcmnand_cfg *cfg = &host->hwcfg;
1004 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1005 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
1007 if (section >= sectors * 2)
1010 oobregion->offset = (section / 2) * sas;
1013 oobregion->offset += 9;
1014 oobregion->length = 7;
1016 oobregion->length = 6;
1018 /* First sector of each page may have BBI */
1021 * Small-page NAND use byte 6 for BBI while large-page
1024 if (cfg->page_size > 512)
1025 oobregion->offset++;
1026 oobregion->length--;
1033 static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = {
1034 .ecc = brcmnand_hamming_ooblayout_ecc,
1035 .free = brcmnand_hamming_ooblayout_free,
1038 static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
1039 struct mtd_oob_region *oobregion)
1041 struct nand_chip *chip = mtd_to_nand(mtd);
1042 struct brcmnand_host *host = nand_get_controller_data(chip);
1043 struct brcmnand_cfg *cfg = &host->hwcfg;
1044 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1045 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
1047 if (section >= sectors)
1050 oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes;
1051 oobregion->length = chip->ecc.bytes;
1056 static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section,
1057 struct mtd_oob_region *oobregion)
1059 struct nand_chip *chip = mtd_to_nand(mtd);
1060 struct brcmnand_host *host = nand_get_controller_data(chip);
1061 struct brcmnand_cfg *cfg = &host->hwcfg;
1062 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1063 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
1065 if (section >= sectors)
1068 if (sas <= chip->ecc.bytes)
1071 oobregion->offset = section * sas;
1072 oobregion->length = sas - chip->ecc.bytes;
1075 oobregion->offset++;
1076 oobregion->length--;
1082 static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section,
1083 struct mtd_oob_region *oobregion)
1085 struct nand_chip *chip = mtd_to_nand(mtd);
1086 struct brcmnand_host *host = nand_get_controller_data(chip);
1087 struct brcmnand_cfg *cfg = &host->hwcfg;
1088 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1090 if (section > 1 || sas - chip->ecc.bytes < 6 ||
1091 (section && sas - chip->ecc.bytes == 6))
1095 oobregion->offset = 0;
1096 oobregion->length = 5;
1098 oobregion->offset = 6;
1099 oobregion->length = sas - chip->ecc.bytes - 6;
1105 static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = {
1106 .ecc = brcmnand_bch_ooblayout_ecc,
1107 .free = brcmnand_bch_ooblayout_free_lp,
1110 static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = {
1111 .ecc = brcmnand_bch_ooblayout_ecc,
1112 .free = brcmnand_bch_ooblayout_free_sp,
1115 static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
1117 struct brcmnand_cfg *p = &host->hwcfg;
1118 struct mtd_info *mtd = nand_to_mtd(&host->chip);
1119 struct nand_ecc_ctrl *ecc = &host->chip.ecc;
1120 unsigned int ecc_level = p->ecc_level;
1121 int sas = p->spare_area_size << p->sector_size_1k;
1122 int sectors = p->page_size / (512 << p->sector_size_1k);
1124 if (p->sector_size_1k)
1127 if (is_hamming_ecc(host->ctrl, p)) {
1128 ecc->bytes = 3 * sectors;
1129 mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
1134 * CONTROLLER_VERSION:
1135 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
1136 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
1137 * But we will just be conservative.
1139 ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8);
1140 if (p->page_size == 512)
1141 mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops);
1143 mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops);
1145 if (ecc->bytes >= sas) {
1146 dev_err(&host->pdev->dev,
1147 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
1155 static void brcmnand_wp(struct mtd_info *mtd, int wp)
1157 struct nand_chip *chip = mtd_to_nand(mtd);
1158 struct brcmnand_host *host = nand_get_controller_data(chip);
1159 struct brcmnand_controller *ctrl = host->ctrl;
1161 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
1162 static int old_wp = -1;
1166 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
1171 * make sure ctrl/flash ready before and after
1172 * changing state of #WP pin
1174 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
1177 NAND_STATUS_READY, 0);
1181 brcmnand_set_wp(ctrl, wp);
1182 nand_status_op(chip, NULL);
1183 /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
1184 ret = bcmnand_ctrl_poll_status(ctrl,
1190 (wp ? 0 : NAND_STATUS_WP), 0);
1193 dev_err_ratelimited(&host->pdev->dev,
1194 "nand #WP expected %s\n",
1199 /* Helper functions for reading and writing OOB registers */
1200 static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
1202 u16 offset0, offset10, reg_offs;
1204 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
1205 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
1207 if (offs >= ctrl->max_oob)
1210 if (offs >= 16 && offset10)
1211 reg_offs = offset10 + ((offs - 0x10) & ~0x03);
1213 reg_offs = offset0 + (offs & ~0x03);
1215 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
1218 static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
1221 u16 offset0, offset10, reg_offs;
1223 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
1224 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
1226 if (offs >= ctrl->max_oob)
1229 if (offs >= 16 && offset10)
1230 reg_offs = offset10 + ((offs - 0x10) & ~0x03);
1232 reg_offs = offset0 + (offs & ~0x03);
1234 nand_writereg(ctrl, reg_offs, data);
1238 * read_oob_from_regs - read data from OOB registers
1239 * @ctrl: NAND controller
1240 * @i: sub-page sector index
1241 * @oob: buffer to read to
1242 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1243 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1245 static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
1246 int sas, int sector_1k)
1248 int tbytes = sas << sector_1k;
1251 /* Adjust OOB values for 1K sector size */
1252 if (sector_1k && (i & 0x01))
1253 tbytes = max(0, tbytes - (int)ctrl->max_oob);
1254 tbytes = min_t(int, tbytes, ctrl->max_oob);
1256 for (j = 0; j < tbytes; j++)
1257 oob[j] = oob_reg_read(ctrl, j);
1262 * write_oob_to_regs - write data to OOB registers
1263 * @i: sub-page sector index
1264 * @oob: buffer to write from
1265 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1266 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1268 static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
1269 const u8 *oob, int sas, int sector_1k)
1271 int tbytes = sas << sector_1k;
1274 /* Adjust OOB values for 1K sector size */
1275 if (sector_1k && (i & 0x01))
1276 tbytes = max(0, tbytes - (int)ctrl->max_oob);
1277 tbytes = min_t(int, tbytes, ctrl->max_oob);
1279 for (j = 0; j < tbytes; j += 4)
1280 oob_reg_write(ctrl, j,
1281 (oob[j + 0] << 24) |
1282 (oob[j + 1] << 16) |
1288 static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
1290 struct brcmnand_controller *ctrl = data;
1292 /* Discard all NAND_CTLRDY interrupts during DMA */
1293 if (ctrl->dma_pending)
1296 complete(&ctrl->done);
1300 /* Handle SoC-specific interrupt hardware */
1301 static irqreturn_t brcmnand_irq(int irq, void *data)
1303 struct brcmnand_controller *ctrl = data;
1305 if (ctrl->soc->ctlrdy_ack(ctrl->soc))
1306 return brcmnand_ctlrdy_irq(irq, data);
1311 static irqreturn_t brcmnand_dma_irq(int irq, void *data)
1313 struct brcmnand_controller *ctrl = data;
1315 complete(&ctrl->dma_done);
1320 static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
1322 struct brcmnand_controller *ctrl = host->ctrl;
1326 cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1328 dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
1330 BUG_ON(ctrl->cmd_pending != 0);
1331 ctrl->cmd_pending = cmd;
1333 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
1336 mb(); /* flush previous writes */
1337 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
1338 cmd << brcmnand_cmd_shift(ctrl));
1341 /***********************************************************************
1342 * NAND MTD API: read/program/erase
1343 ***********************************************************************/
1345 static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
1348 /* intentionally left blank */
1351 static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
1353 struct brcmnand_host *host = nand_get_controller_data(chip);
1354 struct brcmnand_controller *ctrl = host->ctrl;
1355 struct mtd_info *mtd = nand_to_mtd(chip);
1359 if (mtd->oops_panic_write) {
1360 /* switch to interrupt polling and PIO mode */
1361 disable_ctrl_irqs(ctrl);
1362 sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
1364 err = (sts < 0) ? true : false;
1366 unsigned long timeo = msecs_to_jiffies(
1367 NAND_POLL_STATUS_TIMEOUT_MS);
1368 /* wait for completion interrupt */
1369 sts = wait_for_completion_timeout(&ctrl->done, timeo);
1370 err = (sts <= 0) ? true : false;
1376 static int brcmnand_waitfunc(struct nand_chip *chip)
1378 struct brcmnand_host *host = nand_get_controller_data(chip);
1379 struct brcmnand_controller *ctrl = host->ctrl;
1382 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
1383 if (ctrl->cmd_pending)
1384 err = brcmstb_nand_wait_for_completion(chip);
1387 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
1388 >> brcmnand_cmd_shift(ctrl);
1390 dev_err_ratelimited(ctrl->dev,
1391 "timeout waiting for command %#02x\n", cmd);
1392 dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
1393 brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
1395 ctrl->cmd_pending = 0;
1396 return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1405 LLOP_RETURN_IDLE = BIT(31),
1407 LLOP_DATA_MASK = GENMASK(15, 0),
1410 static int brcmnand_low_level_op(struct brcmnand_host *host,
1411 enum brcmnand_llop_type type, u32 data,
1414 struct nand_chip *chip = &host->chip;
1415 struct brcmnand_controller *ctrl = host->ctrl;
1418 tmp = data & LLOP_DATA_MASK;
1421 tmp |= LLOP_WE | LLOP_CLE;
1425 tmp |= LLOP_WE | LLOP_ALE;
1438 tmp |= LLOP_RETURN_IDLE;
1440 dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
1442 brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
1443 (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
1445 brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
1446 return brcmnand_waitfunc(chip);
1449 static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
1450 int column, int page_addr)
1452 struct mtd_info *mtd = nand_to_mtd(chip);
1453 struct brcmnand_host *host = nand_get_controller_data(chip);
1454 struct brcmnand_controller *ctrl = host->ctrl;
1455 u64 addr = (u64)page_addr << chip->page_shift;
1458 if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
1459 command == NAND_CMD_RNDOUT)
1461 /* Avoid propagating a negative, don't-care address */
1462 else if (page_addr < 0)
1465 dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
1466 (unsigned long long)addr);
1468 host->last_cmd = command;
1469 host->last_byte = 0;
1470 host->last_addr = addr;
1473 case NAND_CMD_RESET:
1474 native_cmd = CMD_FLASH_RESET;
1476 case NAND_CMD_STATUS:
1477 native_cmd = CMD_STATUS_READ;
1479 case NAND_CMD_READID:
1480 native_cmd = CMD_DEVICE_ID_READ;
1482 case NAND_CMD_READOOB:
1483 native_cmd = CMD_SPARE_AREA_READ;
1485 case NAND_CMD_ERASE1:
1486 native_cmd = CMD_BLOCK_ERASE;
1487 brcmnand_wp(mtd, 0);
1489 case NAND_CMD_PARAM:
1490 native_cmd = CMD_PARAMETER_READ;
1492 case NAND_CMD_SET_FEATURES:
1493 case NAND_CMD_GET_FEATURES:
1494 brcmnand_low_level_op(host, LL_OP_CMD, command, false);
1495 brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
1497 case NAND_CMD_RNDOUT:
1498 native_cmd = CMD_PARAMETER_CHANGE_COL;
1499 addr &= ~((u64)(FC_BYTES - 1));
1501 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
1502 * NB: hwcfg.sector_size_1k may not be initialized yet
1504 if (brcmnand_get_sector_size_1k(host)) {
1505 host->hwcfg.sector_size_1k =
1506 brcmnand_get_sector_size_1k(host);
1507 brcmnand_set_sector_size_1k(host, 0);
1515 brcmnand_set_cmd_addr(mtd, addr);
1516 brcmnand_send_cmd(host, native_cmd);
1517 brcmnand_waitfunc(chip);
1519 if (native_cmd == CMD_PARAMETER_READ ||
1520 native_cmd == CMD_PARAMETER_CHANGE_COL) {
1521 /* Copy flash cache word-wise */
1522 u32 *flash_cache = (u32 *)ctrl->flash_cache;
1525 brcmnand_soc_data_bus_prepare(ctrl->soc, true);
1528 * Must cache the FLASH_CACHE now, since changes in
1529 * SECTOR_SIZE_1K may invalidate it
1531 for (i = 0; i < FC_WORDS; i++)
1533 * Flash cache is big endian for parameter pages, at
1536 flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
1538 brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
1540 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
1541 if (host->hwcfg.sector_size_1k)
1542 brcmnand_set_sector_size_1k(host,
1543 host->hwcfg.sector_size_1k);
1546 /* Re-enable protection is necessary only after erase */
1547 if (command == NAND_CMD_ERASE1)
1548 brcmnand_wp(mtd, 1);
1551 static uint8_t brcmnand_read_byte(struct nand_chip *chip)
1553 struct brcmnand_host *host = nand_get_controller_data(chip);
1554 struct brcmnand_controller *ctrl = host->ctrl;
1558 switch (host->last_cmd) {
1559 case NAND_CMD_READID:
1560 if (host->last_byte < 4)
1561 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
1562 (24 - (host->last_byte << 3));
1563 else if (host->last_byte < 8)
1564 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
1565 (56 - (host->last_byte << 3));
1568 case NAND_CMD_READOOB:
1569 ret = oob_reg_read(ctrl, host->last_byte);
1572 case NAND_CMD_STATUS:
1573 ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1575 if (wp_on) /* hide WP status */
1576 ret |= NAND_STATUS_WP;
1579 case NAND_CMD_PARAM:
1580 case NAND_CMD_RNDOUT:
1581 addr = host->last_addr + host->last_byte;
1582 offs = addr & (FC_BYTES - 1);
1584 /* At FC_BYTES boundary, switch to next column */
1585 if (host->last_byte > 0 && offs == 0)
1586 nand_change_read_column_op(chip, addr, NULL, 0, false);
1588 ret = ctrl->flash_cache[offs];
1590 case NAND_CMD_GET_FEATURES:
1591 if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
1594 bool last = host->last_byte ==
1595 ONFI_SUBFEATURE_PARAM_LEN - 1;
1596 brcmnand_low_level_op(host, LL_OP_RD, 0, last);
1597 ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
1601 dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
1607 static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
1611 for (i = 0; i < len; i++, buf++)
1612 *buf = brcmnand_read_byte(chip);
1615 static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
1619 struct brcmnand_host *host = nand_get_controller_data(chip);
1621 switch (host->last_cmd) {
1622 case NAND_CMD_SET_FEATURES:
1623 for (i = 0; i < len; i++)
1624 brcmnand_low_level_op(host, LL_OP_WR, buf[i],
1634 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
1635 * following ahead of time:
1636 * - Is this descriptor the beginning or end of a linked list?
1637 * - What is the (DMA) address of the next descriptor in the linked list?
1639 static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
1640 struct brcm_nand_dma_desc *desc, u64 addr,
1641 dma_addr_t buf, u32 len, u8 dma_cmd,
1642 bool begin, bool end,
1643 dma_addr_t next_desc)
1645 memset(desc, 0, sizeof(*desc));
1646 /* Descriptors are written in native byte order (wordwise) */
1647 desc->next_desc = lower_32_bits(next_desc);
1648 desc->next_desc_ext = upper_32_bits(next_desc);
1649 desc->cmd_irq = (dma_cmd << 24) |
1650 (end ? (0x03 << 8) : 0) | /* IRQ | STOP */
1651 (!!begin) | ((!!end) << 1); /* head, tail */
1652 #ifdef CONFIG_CPU_BIG_ENDIAN
1653 desc->cmd_irq |= 0x01 << 12;
1655 desc->dram_addr = lower_32_bits(buf);
1656 desc->dram_addr_ext = upper_32_bits(buf);
1657 desc->tfr_len = len;
1658 desc->total_len = len;
1659 desc->flash_addr = lower_32_bits(addr);
1660 desc->flash_addr_ext = upper_32_bits(addr);
1661 desc->cs = host->cs;
1662 desc->status_valid = 0x01;
1667 * Kick the FLASH_DMA engine, with a given DMA descriptor
1669 static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
1671 struct brcmnand_controller *ctrl = host->ctrl;
1672 unsigned long timeo = msecs_to_jiffies(100);
1674 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
1675 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
1676 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
1677 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
1679 /* Start FLASH_DMA engine */
1680 ctrl->dma_pending = true;
1681 mb(); /* flush previous writes */
1682 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
1684 if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
1686 "timeout waiting for DMA; status %#x, error status %#x\n",
1687 flash_dma_readl(ctrl, FLASH_DMA_STATUS),
1688 flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
1690 ctrl->dma_pending = false;
1691 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
1694 static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
1695 u32 len, u8 dma_cmd)
1697 struct brcmnand_controller *ctrl = host->ctrl;
1699 int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1701 buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
1702 if (dma_mapping_error(ctrl->dev, buf_pa)) {
1703 dev_err(ctrl->dev, "unable to map buffer for DMA\n");
1707 brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
1708 dma_cmd, true, true, 0);
1710 brcmnand_dma_run(host, ctrl->dma_pa);
1712 dma_unmap_single(ctrl->dev, buf_pa, len, dir);
1714 if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
1716 else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
1723 * Assumes proper CS is already set
1725 static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
1726 u64 addr, unsigned int trans, u32 *buf,
1727 u8 *oob, u64 *err_addr)
1729 struct brcmnand_host *host = nand_get_controller_data(chip);
1730 struct brcmnand_controller *ctrl = host->ctrl;
1733 brcmnand_clear_ecc_addr(ctrl);
1735 for (i = 0; i < trans; i++, addr += FC_BYTES) {
1736 brcmnand_set_cmd_addr(mtd, addr);
1737 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
1738 brcmnand_send_cmd(host, CMD_PAGE_READ);
1739 brcmnand_waitfunc(chip);
1742 brcmnand_soc_data_bus_prepare(ctrl->soc, false);
1744 for (j = 0; j < FC_WORDS; j++, buf++)
1745 *buf = brcmnand_read_fc(ctrl, j);
1747 brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
1751 oob += read_oob_from_regs(ctrl, i, oob,
1752 mtd->oobsize / trans,
1753 host->hwcfg.sector_size_1k);
1756 *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
1763 *err_addr = brcmnand_get_correcc_addr(ctrl);
1774 * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
1777 * Because the HW ECC signals an ECC error if an erase paged has even a single
1778 * bitflip, we must check each ECC error to see if it is actually an erased
1779 * page with bitflips, not a truly corrupted page.
1781 * On a real error, return a negative error code (-EBADMSG for ECC error), and
1782 * buf will contain raw data.
1783 * Otherwise, buf gets filled with 0xffs and return the maximum number of
1784 * bitflips-per-ECC-sector to the caller.
1787 static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
1788 struct nand_chip *chip, void *buf, u64 addr)
1791 void *oob = chip->oob_poi;
1793 int page = addr >> chip->page_shift;
1797 buf = nand_get_data_buf(chip);
1799 sas = mtd->oobsize / chip->ecc.steps;
1801 /* read without ecc for verification */
1802 ret = chip->ecc.read_page_raw(chip, buf, true, page);
1806 for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
1807 ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size,
1809 chip->ecc.strength);
1813 bitflips = max(bitflips, ret);
1819 static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
1820 u64 addr, unsigned int trans, u32 *buf, u8 *oob)
1822 struct brcmnand_host *host = nand_get_controller_data(chip);
1823 struct brcmnand_controller *ctrl = host->ctrl;
1828 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
1831 brcmnand_clear_ecc_addr(ctrl);
1833 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
1834 err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
1837 if (mtd_is_bitflip_or_eccerr(err))
1844 memset(oob, 0x99, mtd->oobsize);
1846 err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
1850 if (mtd_is_eccerr(err)) {
1852 * On controller version and 7.0, 7.1 , DMA read after a
1853 * prior PIO read that reported uncorrectable error,
1854 * the DMA engine captures this error following DMA read
1855 * cleared only on subsequent DMA read, so just retry once
1856 * to clear a possible false error reported for current DMA
1859 if ((ctrl->nand_version == 0x0700) ||
1860 (ctrl->nand_version == 0x0701)) {
1868 * Controller version 7.2 has hw encoder to detect erased page
1869 * bitflips, apply sw verification for older controllers only
1871 if (ctrl->nand_version < 0x0702) {
1872 err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
1874 /* erased page bitflips corrected */
1879 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
1880 (unsigned long long)err_addr);
1881 mtd->ecc_stats.failed++;
1882 /* NAND layer expects zero on ECC errors */
1886 if (mtd_is_bitflip(err)) {
1887 unsigned int corrected = brcmnand_count_corrected(ctrl);
1889 dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
1890 (unsigned long long)err_addr);
1891 mtd->ecc_stats.corrected += corrected;
1892 /* Always exceed the software-imposed threshold */
1893 return max(mtd->bitflip_threshold, corrected);
1899 static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
1900 int oob_required, int page)
1902 struct mtd_info *mtd = nand_to_mtd(chip);
1903 struct brcmnand_host *host = nand_get_controller_data(chip);
1904 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
1906 nand_read_page_op(chip, page, 0, NULL, 0);
1908 return brcmnand_read(mtd, chip, host->last_addr,
1909 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
1912 static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1913 int oob_required, int page)
1915 struct brcmnand_host *host = nand_get_controller_data(chip);
1916 struct mtd_info *mtd = nand_to_mtd(chip);
1917 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
1920 nand_read_page_op(chip, page, 0, NULL, 0);
1922 brcmnand_set_ecc_enabled(host, 0);
1923 ret = brcmnand_read(mtd, chip, host->last_addr,
1924 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
1925 brcmnand_set_ecc_enabled(host, 1);
1929 static int brcmnand_read_oob(struct nand_chip *chip, int page)
1931 struct mtd_info *mtd = nand_to_mtd(chip);
1933 return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
1934 mtd->writesize >> FC_SHIFT,
1935 NULL, (u8 *)chip->oob_poi);
1938 static int brcmnand_read_oob_raw(struct nand_chip *chip, int page)
1940 struct mtd_info *mtd = nand_to_mtd(chip);
1941 struct brcmnand_host *host = nand_get_controller_data(chip);
1943 brcmnand_set_ecc_enabled(host, 0);
1944 brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
1945 mtd->writesize >> FC_SHIFT,
1946 NULL, (u8 *)chip->oob_poi);
1947 brcmnand_set_ecc_enabled(host, 1);
1951 static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
1952 u64 addr, const u32 *buf, u8 *oob)
1954 struct brcmnand_host *host = nand_get_controller_data(chip);
1955 struct brcmnand_controller *ctrl = host->ctrl;
1956 unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
1957 int status, ret = 0;
1959 dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
1961 if (unlikely((unsigned long)buf & 0x03)) {
1962 dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
1963 buf = (u32 *)((unsigned long)buf & ~0x03);
1966 brcmnand_wp(mtd, 0);
1968 for (i = 0; i < ctrl->max_oob; i += 4)
1969 oob_reg_write(ctrl, i, 0xffffffff);
1971 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
1972 if (brcmnand_dma_trans(host, addr, (u32 *)buf,
1973 mtd->writesize, CMD_PROGRAM_PAGE))
1978 for (i = 0; i < trans; i++, addr += FC_BYTES) {
1979 /* full address MUST be set before populating FC */
1980 brcmnand_set_cmd_addr(mtd, addr);
1983 brcmnand_soc_data_bus_prepare(ctrl->soc, false);
1985 for (j = 0; j < FC_WORDS; j++, buf++)
1986 brcmnand_write_fc(ctrl, j, *buf);
1988 brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
1990 for (j = 0; j < FC_WORDS; j++)
1991 brcmnand_write_fc(ctrl, j, 0xffffffff);
1995 oob += write_oob_to_regs(ctrl, i, oob,
1996 mtd->oobsize / trans,
1997 host->hwcfg.sector_size_1k);
2000 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
2001 brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
2002 status = brcmnand_waitfunc(chip);
2004 if (status & NAND_STATUS_FAIL) {
2005 dev_info(ctrl->dev, "program failed at %llx\n",
2006 (unsigned long long)addr);
2012 brcmnand_wp(mtd, 1);
2016 static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
2017 int oob_required, int page)
2019 struct mtd_info *mtd = nand_to_mtd(chip);
2020 struct brcmnand_host *host = nand_get_controller_data(chip);
2021 void *oob = oob_required ? chip->oob_poi : NULL;
2023 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2024 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
2026 return nand_prog_page_end_op(chip);
2029 static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
2030 int oob_required, int page)
2032 struct mtd_info *mtd = nand_to_mtd(chip);
2033 struct brcmnand_host *host = nand_get_controller_data(chip);
2034 void *oob = oob_required ? chip->oob_poi : NULL;
2036 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2037 brcmnand_set_ecc_enabled(host, 0);
2038 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
2039 brcmnand_set_ecc_enabled(host, 1);
2041 return nand_prog_page_end_op(chip);
2044 static int brcmnand_write_oob(struct nand_chip *chip, int page)
2046 return brcmnand_write(nand_to_mtd(chip), chip,
2047 (u64)page << chip->page_shift, NULL,
2051 static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
2053 struct mtd_info *mtd = nand_to_mtd(chip);
2054 struct brcmnand_host *host = nand_get_controller_data(chip);
2057 brcmnand_set_ecc_enabled(host, 0);
2058 ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
2059 (u8 *)chip->oob_poi);
2060 brcmnand_set_ecc_enabled(host, 1);
2065 /***********************************************************************
2066 * Per-CS setup (1 NAND device)
2067 ***********************************************************************/
2069 static int brcmnand_set_cfg(struct brcmnand_host *host,
2070 struct brcmnand_cfg *cfg)
2072 struct brcmnand_controller *ctrl = host->ctrl;
2073 struct nand_chip *chip = &host->chip;
2074 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2075 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2076 BRCMNAND_CS_CFG_EXT);
2077 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2078 BRCMNAND_CS_ACC_CONTROL);
2079 u8 block_size = 0, page_size = 0, device_size = 0;
2082 if (ctrl->block_sizes) {
2085 for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
2086 if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
2091 dev_warn(ctrl->dev, "invalid block size %u\n",
2096 block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
2099 if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
2100 cfg->block_size > ctrl->max_block_size)) {
2101 dev_warn(ctrl->dev, "invalid block size %u\n",
2106 if (ctrl->page_sizes) {
2109 for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
2110 if (ctrl->page_sizes[i] == cfg->page_size) {
2115 dev_warn(ctrl->dev, "invalid page size %u\n",
2120 page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
2123 if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
2124 cfg->page_size > ctrl->max_page_size)) {
2125 dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
2129 if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
2130 dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
2131 (unsigned long long)cfg->device_size);
2134 device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
2136 tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
2137 (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
2138 (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
2139 (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
2140 (device_size << CFG_DEVICE_SIZE_SHIFT);
2141 if (cfg_offs == cfg_ext_offs) {
2142 tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) |
2143 (block_size << CFG_BLK_SIZE_SHIFT);
2144 nand_writereg(ctrl, cfg_offs, tmp);
2146 nand_writereg(ctrl, cfg_offs, tmp);
2147 tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
2148 (block_size << CFG_EXT_BLK_SIZE_SHIFT);
2149 nand_writereg(ctrl, cfg_ext_offs, tmp);
2152 tmp = nand_readreg(ctrl, acc_control_offs);
2153 tmp &= ~brcmnand_ecc_level_mask(ctrl);
2154 tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
2155 tmp &= ~brcmnand_spare_area_mask(ctrl);
2156 tmp |= cfg->spare_area_size;
2157 nand_writereg(ctrl, acc_control_offs, tmp);
2159 brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
2161 /* threshold = ceil(BCH-level * 0.75) */
2162 brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
2167 static void brcmnand_print_cfg(struct brcmnand_host *host,
2168 char *buf, struct brcmnand_cfg *cfg)
2171 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
2172 (unsigned long long)cfg->device_size >> 20,
2173 cfg->block_size >> 10,
2174 cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
2175 cfg->page_size >= 1024 ? "KiB" : "B",
2176 cfg->spare_area_size, cfg->device_width);
2178 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
2179 if (is_hamming_ecc(host->ctrl, cfg))
2180 sprintf(buf, ", Hamming ECC");
2181 else if (cfg->sector_size_1k)
2182 sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
2184 sprintf(buf, ", BCH-%u", cfg->ecc_level);
2188 * Minimum number of bytes to address a page. Calculated as:
2189 * roundup(log2(size / page-size) / 8)
2191 * NB: the following does not "round up" for non-power-of-2 'size'; but this is
2192 * OK because many other things will break if 'size' is irregular...
2194 static inline int get_blk_adr_bytes(u64 size, u32 writesize)
2196 return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
2199 static int brcmnand_setup_dev(struct brcmnand_host *host)
2201 struct mtd_info *mtd = nand_to_mtd(&host->chip);
2202 struct nand_chip *chip = &host->chip;
2203 struct brcmnand_controller *ctrl = host->ctrl;
2204 struct brcmnand_cfg *cfg = &host->hwcfg;
2206 u32 offs, tmp, oob_sector;
2209 memset(cfg, 0, sizeof(*cfg));
2211 ret = of_property_read_u32(nand_get_flash_node(chip),
2212 "brcm,nand-oob-sector-size",
2215 /* Use detected size */
2216 cfg->spare_area_size = mtd->oobsize /
2217 (mtd->writesize >> FC_SHIFT);
2219 cfg->spare_area_size = oob_sector;
2221 if (cfg->spare_area_size > ctrl->max_oob)
2222 cfg->spare_area_size = ctrl->max_oob;
2224 * Set oobsize to be consistent with controller's spare_area_size, as
2225 * the rest is inaccessible.
2227 mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
2229 cfg->device_size = mtd->size;
2230 cfg->block_size = mtd->erasesize;
2231 cfg->page_size = mtd->writesize;
2232 cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
2233 cfg->col_adr_bytes = 2;
2234 cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
2236 if (chip->ecc.mode != NAND_ECC_HW) {
2237 dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
2242 if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
2243 if (chip->ecc.strength == 1 && chip->ecc.size == 512)
2244 /* Default to Hamming for 1-bit ECC, if unspecified */
2245 chip->ecc.algo = NAND_ECC_HAMMING;
2247 /* Otherwise, BCH */
2248 chip->ecc.algo = NAND_ECC_BCH;
2251 if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 ||
2252 chip->ecc.size != 512)) {
2253 dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
2254 chip->ecc.strength, chip->ecc.size);
2258 if (chip->ecc.mode != NAND_ECC_NONE &&
2259 (!chip->ecc.size || !chip->ecc.strength)) {
2260 if (chip->base.eccreq.step_size && chip->base.eccreq.strength) {
2261 /* use detected ECC parameters */
2262 chip->ecc.size = chip->base.eccreq.step_size;
2263 chip->ecc.strength = chip->base.eccreq.strength;
2264 dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
2265 chip->ecc.size, chip->ecc.strength);
2269 switch (chip->ecc.size) {
2271 if (chip->ecc.algo == NAND_ECC_HAMMING)
2272 cfg->ecc_level = 15;
2274 cfg->ecc_level = chip->ecc.strength;
2275 cfg->sector_size_1k = 0;
2278 if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
2279 dev_err(ctrl->dev, "1KB sectors not supported\n");
2282 if (chip->ecc.strength & 0x1) {
2284 "odd ECC not supported with 1KB sectors\n");
2288 cfg->ecc_level = chip->ecc.strength >> 1;
2289 cfg->sector_size_1k = 1;
2292 dev_err(ctrl->dev, "unsupported ECC size: %d\n",
2297 cfg->ful_adr_bytes = cfg->blk_adr_bytes;
2298 if (mtd->writesize > 512)
2299 cfg->ful_adr_bytes += cfg->col_adr_bytes;
2301 cfg->ful_adr_bytes += 1;
2303 ret = brcmnand_set_cfg(host, cfg);
2307 brcmnand_set_ecc_enabled(host, 1);
2309 brcmnand_print_cfg(host, msg, cfg);
2310 dev_info(ctrl->dev, "detected %s\n", msg);
2312 /* Configure ACC_CONTROL */
2313 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
2314 tmp = nand_readreg(ctrl, offs);
2315 tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
2316 tmp &= ~ACC_CONTROL_RD_ERASED;
2318 /* We need to turn on Read from erased paged protected by ECC */
2319 if (ctrl->nand_version >= 0x0702)
2320 tmp |= ACC_CONTROL_RD_ERASED;
2321 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
2322 if (ctrl->features & BRCMNAND_HAS_PREFETCH)
2323 tmp &= ~ACC_CONTROL_PREFETCH;
2325 nand_writereg(ctrl, offs, tmp);
2330 static int brcmnand_attach_chip(struct nand_chip *chip)
2332 struct mtd_info *mtd = nand_to_mtd(chip);
2333 struct brcmnand_host *host = nand_get_controller_data(chip);
2336 chip->options |= NAND_NO_SUBPAGE_WRITE;
2338 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
2339 * to/from, and have nand_base pass us a bounce buffer instead, as
2342 chip->options |= NAND_USE_BOUNCE_BUFFER;
2344 if (chip->bbt_options & NAND_BBT_USE_FLASH)
2345 chip->bbt_options |= NAND_BBT_NO_OOB;
2347 if (brcmnand_setup_dev(host))
2350 chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
2352 /* only use our internal HW threshold */
2353 mtd->bitflip_threshold = 1;
2355 ret = brcmstb_choose_ecc_layout(host);
2360 static const struct nand_controller_ops brcmnand_controller_ops = {
2361 .attach_chip = brcmnand_attach_chip,
2364 static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
2366 struct brcmnand_controller *ctrl = host->ctrl;
2367 struct platform_device *pdev = host->pdev;
2368 struct mtd_info *mtd;
2369 struct nand_chip *chip;
2373 ret = of_property_read_u32(dn, "reg", &host->cs);
2375 dev_err(&pdev->dev, "can't get chip-select\n");
2379 mtd = nand_to_mtd(&host->chip);
2382 nand_set_flash_node(chip, dn);
2383 nand_set_controller_data(chip, host);
2384 mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
2389 mtd->owner = THIS_MODULE;
2390 mtd->dev.parent = &pdev->dev;
2392 chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
2393 chip->legacy.cmdfunc = brcmnand_cmdfunc;
2394 chip->legacy.waitfunc = brcmnand_waitfunc;
2395 chip->legacy.read_byte = brcmnand_read_byte;
2396 chip->legacy.read_buf = brcmnand_read_buf;
2397 chip->legacy.write_buf = brcmnand_write_buf;
2399 chip->ecc.mode = NAND_ECC_HW;
2400 chip->ecc.read_page = brcmnand_read_page;
2401 chip->ecc.write_page = brcmnand_write_page;
2402 chip->ecc.read_page_raw = brcmnand_read_page_raw;
2403 chip->ecc.write_page_raw = brcmnand_write_page_raw;
2404 chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
2405 chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
2406 chip->ecc.read_oob = brcmnand_read_oob;
2407 chip->ecc.write_oob = brcmnand_write_oob;
2409 chip->controller = &ctrl->controller;
2412 * The bootloader might have configured 16bit mode but
2413 * NAND READID command only works in 8bit mode. We force
2414 * 8bit mode here to ensure that NAND READID commands works.
2416 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2417 nand_writereg(ctrl, cfg_offs,
2418 nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
2420 ret = nand_scan(chip, 1);
2424 ret = mtd_device_register(mtd, NULL, 0);
2431 static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
2434 struct brcmnand_controller *ctrl = host->ctrl;
2435 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2436 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2437 BRCMNAND_CS_CFG_EXT);
2438 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2439 BRCMNAND_CS_ACC_CONTROL);
2440 u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
2441 u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
2444 nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
2445 if (cfg_offs != cfg_ext_offs)
2446 nand_writereg(ctrl, cfg_ext_offs,
2447 host->hwcfg.config_ext);
2448 nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
2449 nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
2450 nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
2452 host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
2453 if (cfg_offs != cfg_ext_offs)
2454 host->hwcfg.config_ext =
2455 nand_readreg(ctrl, cfg_ext_offs);
2456 host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
2457 host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
2458 host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
2462 static int brcmnand_suspend(struct device *dev)
2464 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2465 struct brcmnand_host *host;
2467 list_for_each_entry(host, &ctrl->host_list, node)
2468 brcmnand_save_restore_cs_config(host, 0);
2470 ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
2471 ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
2472 ctrl->corr_stat_threshold =
2473 brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
2475 if (has_flash_dma(ctrl))
2476 ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
2481 static int brcmnand_resume(struct device *dev)
2483 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2484 struct brcmnand_host *host;
2486 if (has_flash_dma(ctrl)) {
2487 flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
2488 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2491 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
2492 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
2493 brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
2494 ctrl->corr_stat_threshold);
2496 /* Clear/re-enable interrupt */
2497 ctrl->soc->ctlrdy_ack(ctrl->soc);
2498 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
2501 list_for_each_entry(host, &ctrl->host_list, node) {
2502 struct nand_chip *chip = &host->chip;
2504 brcmnand_save_restore_cs_config(host, 1);
2506 /* Reset the chip, required by some chips after power-up */
2507 nand_reset_op(chip);
2513 const struct dev_pm_ops brcmnand_pm_ops = {
2514 .suspend = brcmnand_suspend,
2515 .resume = brcmnand_resume,
2517 EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
2519 static const struct of_device_id brcmnand_of_match[] = {
2520 { .compatible = "brcm,brcmnand-v4.0" },
2521 { .compatible = "brcm,brcmnand-v5.0" },
2522 { .compatible = "brcm,brcmnand-v6.0" },
2523 { .compatible = "brcm,brcmnand-v6.1" },
2524 { .compatible = "brcm,brcmnand-v6.2" },
2525 { .compatible = "brcm,brcmnand-v7.0" },
2526 { .compatible = "brcm,brcmnand-v7.1" },
2527 { .compatible = "brcm,brcmnand-v7.2" },
2528 { .compatible = "brcm,brcmnand-v7.3" },
2531 MODULE_DEVICE_TABLE(of, brcmnand_of_match);
2533 /***********************************************************************
2534 * Platform driver setup (per controller)
2535 ***********************************************************************/
2537 int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
2539 struct device *dev = &pdev->dev;
2540 struct device_node *dn = dev->of_node, *child;
2541 struct brcmnand_controller *ctrl;
2542 struct resource *res;
2545 /* We only support device-tree instantiation */
2549 if (!of_match_node(brcmnand_of_match, dn))
2552 ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
2556 dev_set_drvdata(dev, ctrl);
2559 init_completion(&ctrl->done);
2560 init_completion(&ctrl->dma_done);
2561 nand_controller_init(&ctrl->controller);
2562 ctrl->controller.ops = &brcmnand_controller_ops;
2563 INIT_LIST_HEAD(&ctrl->host_list);
2565 /* NAND register range */
2566 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2567 ctrl->nand_base = devm_ioremap_resource(dev, res);
2568 if (IS_ERR(ctrl->nand_base))
2569 return PTR_ERR(ctrl->nand_base);
2571 /* Enable clock before using NAND registers */
2572 ctrl->clk = devm_clk_get(dev, "nand");
2573 if (!IS_ERR(ctrl->clk)) {
2574 ret = clk_prepare_enable(ctrl->clk);
2578 ret = PTR_ERR(ctrl->clk);
2579 if (ret == -EPROBE_DEFER)
2585 /* Initialize NAND revision */
2586 ret = brcmnand_revision_init(ctrl);
2591 * Most chips have this cache at a fixed offset within 'nand' block.
2592 * Some must specify this region separately.
2594 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
2596 ctrl->nand_fc = devm_ioremap_resource(dev, res);
2597 if (IS_ERR(ctrl->nand_fc)) {
2598 ret = PTR_ERR(ctrl->nand_fc);
2602 ctrl->nand_fc = ctrl->nand_base +
2603 ctrl->reg_offsets[BRCMNAND_FC_BASE];
2607 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
2609 ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
2610 if (IS_ERR(ctrl->flash_dma_base)) {
2611 ret = PTR_ERR(ctrl->flash_dma_base);
2615 /* initialize the dma version */
2616 brcmnand_flash_dma_revision_init(ctrl);
2618 /* linked-list and stop on error */
2619 flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
2620 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2622 /* Allocate descriptor(s) */
2623 ctrl->dma_desc = dmam_alloc_coherent(dev,
2624 sizeof(*ctrl->dma_desc),
2625 &ctrl->dma_pa, GFP_KERNEL);
2626 if (!ctrl->dma_desc) {
2631 ctrl->dma_irq = platform_get_irq(pdev, 1);
2632 if ((int)ctrl->dma_irq < 0) {
2633 dev_err(dev, "missing FLASH_DMA IRQ\n");
2638 ret = devm_request_irq(dev, ctrl->dma_irq,
2639 brcmnand_dma_irq, 0, DRV_NAME,
2642 dev_err(dev, "can't allocate IRQ %d: error %d\n",
2643 ctrl->dma_irq, ret);
2647 dev_info(dev, "enabling FLASH_DMA\n");
2650 /* Disable automatic device ID config, direct addressing */
2651 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
2652 CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
2653 /* Disable XOR addressing */
2654 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
2656 if (ctrl->features & BRCMNAND_HAS_WP) {
2657 /* Permanently disable write protection */
2659 brcmnand_set_wp(ctrl, false);
2665 ctrl->irq = platform_get_irq(pdev, 0);
2666 if ((int)ctrl->irq < 0) {
2667 dev_err(dev, "no IRQ defined\n");
2673 * Some SoCs integrate this controller (e.g., its interrupt bits) in
2679 ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
2682 /* Enable interrupt */
2683 ctrl->soc->ctlrdy_ack(ctrl->soc);
2684 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
2686 /* Use standard interrupt infrastructure */
2687 ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
2691 dev_err(dev, "can't allocate IRQ %d: error %d\n",
2696 for_each_available_child_of_node(dn, child) {
2697 if (of_device_is_compatible(child, "brcm,nandcs")) {
2698 struct brcmnand_host *host;
2700 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2709 ret = brcmnand_init_cs(host, child);
2711 devm_kfree(dev, host);
2712 continue; /* Try all chip-selects */
2715 list_add_tail(&host->node, &ctrl->host_list);
2719 /* No chip-selects could initialize properly */
2720 if (list_empty(&ctrl->host_list)) {
2728 clk_disable_unprepare(ctrl->clk);
2732 EXPORT_SYMBOL_GPL(brcmnand_probe);
2734 int brcmnand_remove(struct platform_device *pdev)
2736 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
2737 struct brcmnand_host *host;
2739 list_for_each_entry(host, &ctrl->host_list, node)
2740 nand_release(&host->chip);
2742 clk_disable_unprepare(ctrl->clk);
2744 dev_set_drvdata(&pdev->dev, NULL);
2748 EXPORT_SYMBOL_GPL(brcmnand_remove);
2750 MODULE_LICENSE("GPL v2");
2751 MODULE_AUTHOR("Kevin Cernekee");
2752 MODULE_AUTHOR("Brian Norris");
2753 MODULE_DESCRIPTION("NAND driver for Broadcom chips");
2754 MODULE_ALIAS("platform:brcmnand");