Merge tag 'mtd/for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Jul 2019 22:42:44 +0000 (15:42 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Jul 2019 22:42:44 +0000 (15:42 -0700)
Pull MTD updates from Miquel Raynal:
 "This contains the following changes for MTD:

  MTD core changes:
   - New Hyperbus framework
   - New _is_locked (concat) implementation
   - Various cleanups

  NAND core changes:
   - use longest matching pattern in ->exec_op() default parser
   - export NAND operation tracer
   - add flag to indicate panic_write in MTD
   - use kzalloc() instead of kmalloc() and memset()

  Raw NAND controller drivers changes:
   - brcmnand:
       - fix BCH ECC layout for large page NAND parts
       - fallback to detected ecc-strength, ecc-step-size
       - when oops in progress use pio and interrupt polling
       - code refactor code to introduce helper functions
       - add support for v7.3 controller
   - FSMC:
       - use nand_op_trace for operation tracing
   - GPMI:
       - move all driver code into single file
       - various cleanups (including dmaengine changes)
       - use runtime PM to manage clocks
       - implement exec_op
   - MTK:
       - correct low level time calculation of r/w cycle
       - improve data sampling timing for read cycle
       - add validity check for CE# pin setting
       - fix wrongly assigned OOB buffer pointer issue
       - re-license MTK NAND driver as Dual MIT/GPL
   - STM32:
       - manage the get_irq error case
       - increase DMA completion timeouts

  Raw NAND chips drivers changes:
   - Macronix: add read-retry support

  Onenand driver changes:
   - add support for 8Gb datasize chips
   - avoid fall-through warnings

  SPI-NAND changes:
   - define macros for page-read ops with three-byte addresses
   - add support for two-byte device IDs and then for GigaDevice
     GD5F1GQ4UFxxG
   - add initial support for Paragon PN26G0xA
   - handle the case where the last page read has bitflips

  SPI-NOR core changes:
   - add support for the mt25ql02g and w25q16jv flashes
   - print error in case of jedec read id fails
   - is25lp256: add post BFPT fix to correct the addr_width

  SPI NOR controller drivers changes:
   - intel-spi: Add support for Intel Elkhart Lake SPI serial flash
   - smt32: remove the driver as the driver was replaced by spi-stm32-qspi.c
   - cadence-quadspi: add reset control"

* tag 'mtd/for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux: (60 commits)
  mtd: concat: implement _is_locked mtd operation
  mtd: concat: refactor concat_lock/concat_unlock
  mtd: abi: do not use C++ style comments in uapi header
  mtd: afs: remove unneeded NULL check
  mtd: rawnand: stm32_fmc2: increase DMA completion timeouts
  mtd: rawnand: Use kzalloc() instead of kmalloc() and memset()
  mtd: hyperbus: Add driver for TI's HyperBus memory controller
  mtd: spinand: read returns badly if the last page has bitflips
  mtd: spinand: Add initial support for Paragon PN26G0xA
  mtd: rawnand: mtk: Re-license MTK NAND driver as Dual MIT/GPL
  mtd: rawnand: gpmi: remove double assignment to block_size
  dt-bindings: mtd: brcmnand: Add brcmnand, brcmnand-v7.3 support
  mtd: rawnand: brcmnand: Add support for v7.3 controller
  mtd: rawnand: brcmnand: Refactored code to introduce helper functions
  mtd: rawnand: brcmnand: When oops in progress use pio and interrupt polling
  mtd: Add flag to indicate panic_write
  mtd: rawnand: Add Macronix NAND read retry support
  mtd: onenand: Avoid fall-through warnings
  mtd: spinand: Add support for GigaDevice GD5F1GQ4UFxxG
  mtd: spinand: Add support for two-byte device IDs
  ...

49 files changed:
Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/stm32-quadspi.txt [deleted file]
Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt [new file with mode: 0644]
MAINTAINERS
drivers/dma/mxs-dma.c
drivers/mtd/Kconfig
drivers/mtd/Makefile
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/hyperbus/Kconfig [new file with mode: 0644]
drivers/mtd/hyperbus/Makefile [new file with mode: 0644]
drivers/mtd/hyperbus/hbmc-am654.c [new file with mode: 0644]
drivers/mtd/hyperbus/hyperbus-core.c [new file with mode: 0644]
drivers/mtd/mtdconcat.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/onenand/onenand_base.c
drivers/mtd/nand/raw/brcmnand/brcmnand.c
drivers/mtd/nand/raw/fsmc_nand.c
drivers/mtd/nand/raw/gpmi-nand/Makefile
drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c [deleted file]
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
drivers/mtd/nand/raw/mtk_ecc.c
drivers/mtd/nand/raw/mtk_ecc.h
drivers/mtd/nand/raw/mtk_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/nand_bch.c
drivers/mtd/nand/raw/nand_macronix.c
drivers/mtd/nand/raw/stm32_fmc2_nand.c
drivers/mtd/nand/spi/Makefile
drivers/mtd/nand/spi/core.c
drivers/mtd/nand/spi/gigadevice.c
drivers/mtd/nand/spi/paragon.c [new file with mode: 0644]
drivers/mtd/parsers/afs.c
drivers/mtd/spi-nor/Kconfig
drivers/mtd/spi-nor/Makefile
drivers/mtd/spi-nor/cadence-quadspi.c
drivers/mtd/spi-nor/intel-spi-pci.c
drivers/mtd/spi-nor/spi-nor.c
drivers/mtd/spi-nor/stm32-quadspi.c [deleted file]
include/linux/dma/mxs-dma.h [new file with mode: 0644]
include/linux/mtd/cfi.h
include/linux/mtd/hyperbus.h [new file with mode: 0644]
include/linux/mtd/mtd.h
include/linux/mtd/onenand_regs.h
include/linux/mtd/rawnand.h
include/linux/mtd/spinand.h
include/uapi/mtd/mtd-abi.h

index 0b7c373..82156dc 100644 (file)
@@ -28,6 +28,7 @@ Required properties:
                          brcm,brcmnand-v7.0
                          brcm,brcmnand-v7.1
                          brcm,brcmnand-v7.2
+                         brcm,brcmnand-v7.3
                          brcm,brcmnand
 - reg              : the register start and length for NAND register region.
                      (optional) Flash DMA register range (if present)
@@ -101,10 +102,10 @@ Required properties:
                               number (e.g., 0, 1, 2, etc.)
 - #address-cells            : see partition.txt
 - #size-cells               : see partition.txt
-- nand-ecc-strength         : see nand-controller.yaml
-- nand-ecc-step-size        : must be 512 or 1024. See nand-controller.yaml
 
 Optional properties:
+- nand-ecc-strength         : see nand-controller.yaml
+- nand-ecc-step-size        : must be 512 or 1024. See nand-controller.yaml
 - nand-on-flash-bbt         : boolean, to enable the on-flash BBT for this
                               chip-select. See nand-controller.yaml
 - brcm,nand-oob-sector-size : integer, to denote the spare area sector size
index 4345c3a..945be7d 100644 (file)
@@ -35,6 +35,9 @@ custom properties:
                  (qspi_n_ss_out).
 - cdns,tslch-ns : Delay in nanoseconds between setting qspi_n_ss_out low
                   and first bit transfer.
+- resets       : Must contain an entry for each entry in reset-names.
+                 See ../reset/reset.txt for details.
+- reset-names  : Must include either "qspi" and/or "qspi-ocp".
 
 Example:
 
@@ -50,6 +53,8 @@ Example:
                cdns,fifo-depth = <128>;
                cdns,fifo-width = <4>;
                cdns,trigger-address = <0x00000000>;
+               resets = <&rst QSPI_RESET>, <&rst QSPI_OCP_RESET>;
+               reset-names = "qspi", "qspi-ocp";
 
                flash0: n25q00@0 {
                        ...
diff --git a/Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt b/Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt
new file mode 100644 (file)
index 0000000..ad42f4d
--- /dev/null
@@ -0,0 +1,13 @@
+Bindings for HyperFlash NOR flash chips compliant with Cypress HyperBus
+specification and supports Cypress CFI specification 1.5 command set.
+
+Required properties:
+- compatible : "cypress,hyperflash", "cfi-flash" for HyperFlash NOR chips
+- reg : Address of flash's memory map
+
+Example:
+
+       flash@0 {
+               compatible = "cypress,hyperflash", "cfi-flash";
+               reg = <0x0 0x4000000>;
+       };
diff --git a/Documentation/devicetree/bindings/mtd/stm32-quadspi.txt b/Documentation/devicetree/bindings/mtd/stm32-quadspi.txt
deleted file mode 100644 (file)
index ddd18c1..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-* STMicroelectronics Quad Serial Peripheral Interface(QuadSPI)
-
-Required properties:
-- compatible: should be "st,stm32f469-qspi"
-- reg: the first contains the register location and length.
-       the second contains the memory mapping address and length
-- reg-names: should contain the reg names "qspi" "qspi_mm"
-- interrupts: should contain the interrupt for the device
-- clocks: the phandle of the clock needed by the QSPI controller
-- A pinctrl must be defined to set pins in mode of operation for QSPI transfer
-
-Optional properties:
-- resets: must contain the phandle to the reset controller.
-
-A spi flash must be a child of the nor_flash node and could have some
-properties. Also see jedec,spi-nor.txt.
-
-Required properties:
-- reg: chip-Select number (QSPI controller may connect 2 nor flashes)
-- spi-max-frequency: max frequency of spi bus
-
-Optional property:
-- spi-rx-bus-width: see ../spi/spi-bus.txt for the description
-
-Example:
-
-qspi: spi@a0001000 {
-       compatible = "st,stm32f469-qspi";
-       reg = <0xa0001000 0x1000>, <0x90000000 0x10000000>;
-       reg-names = "qspi", "qspi_mm";
-       interrupts = <91>;
-       resets = <&rcc STM32F4_AHB3_RESET(QSPI)>;
-       clocks = <&rcc 0 STM32F4_AHB3_CLOCK(QSPI)>;
-       pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_qspi0>;
-
-       flash@0 {
-               reg = <0>;
-               spi-rx-bus-width = <4>;
-               spi-max-frequency = <108000000>;
-               ...
-       };
-};
diff --git a/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt b/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt
new file mode 100644 (file)
index 0000000..faa81c2
--- /dev/null
@@ -0,0 +1,51 @@
+Bindings for HyperBus Memory Controller (HBMC) on TI's K3 family of SoCs
+
+Required properties:
+- compatible : "ti,am654-hbmc" for AM654 SoC
+- reg : Two entries:
+       First entry pointed to the register space of HBMC controller
+       Second entry pointing to the memory map region dedicated for
+       MMIO access to attached flash devices
+- ranges : Address translation from offset within CS to allocated MMIO
+          space in SoC
+
+Optional properties:
+- mux-controls : phandle to the multiplexer that controls selection of
+                HBMC vs OSPI inside Flash SubSystem (FSS). Default is OSPI,
+                if property is absent.
+                See Documentation/devicetree/bindings/mux/reg-mux.txt
+                for mmio-mux binding details
+
+Example:
+
+       system-controller@47000000 {
+               compatible = "syscon", "simple-mfd";
+               reg = <0x0 0x47000000 0x0 0x100>;
+               #address-cells = <2>;
+               #size-cells = <2>;
+               ranges;
+
+               hbmc_mux: multiplexer {
+                       compatible = "mmio-mux";
+                       #mux-control-cells = <1>;
+                       mux-reg-masks = <0x4 0x2>; /* 0: reg 0x4, bit 1 */
+               };
+       };
+
+       hbmc: hyperbus@47034000 {
+               compatible = "ti,am654-hbmc";
+               reg = <0x0 0x47034000 0x0 0x100>,
+                       <0x5 0x00000000 0x1 0x0000000>;
+               power-domains = <&k3_pds 55>;
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges = <0x0 0x0 0x5 0x00000000 0x4000000>, /* CS0 - 64MB */
+                        <0x1 0x0 0x5 0x04000000 0x4000000>; /* CS1 - 64MB */
+               mux-controls = <&hbmc_mux 0>;
+
+               /* Slave flash node */
+               flash@0,0 {
+                       compatible = "cypress,hyperflash", "cfi-flash";
+                       reg = <0x0 0x0 0x4000000>;
+               };
+       };
index 32bb628..211ea3a 100644 (file)
@@ -7435,6 +7435,14 @@ F:       include/asm-generic/mshyperv.h
 F:     tools/hv/
 F:     Documentation/ABI/stable/sysfs-bus-vmbus
 
+HYPERBUS SUPPORT
+M:     Vignesh Raghavendra <vigneshr@ti.com>
+S:     Supported
+F:     drivers/mtd/hyperbus/
+F:     include/linux/mtd/hyperbus.h
+F:     Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt
+F:     Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt
+
 HYPERVISOR VIRTUAL CONSOLE DRIVER
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Odd Fixes
index 22cc7f6..20a9cb7 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
 #include <linux/list.h>
+#include <linux/dma/mxs-dma.h>
 
 #include <asm/irq.h>
 
@@ -77,6 +78,7 @@
 #define BM_CCW_COMMAND         (3 << 0)
 #define CCW_CHAIN              (1 << 2)
 #define CCW_IRQ                        (1 << 3)
+#define CCW_WAIT4RDY           (1 << 5)
 #define CCW_DEC_SEM            (1 << 6)
 #define CCW_WAIT4END           (1 << 7)
 #define CCW_HALT_ON_TERM       (1 << 8)
@@ -477,16 +479,16 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
  *            ......
  *            ->device_prep_slave_sg(0);
  *            ......
- *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ *            ->device_prep_slave_sg(DMA_CTRL_ACK);
  *            ......
  *    [3] If there are more than two DMA commands in the DMA chain, the code
  *        should be:
  *            ......
  *            ->device_prep_slave_sg(0);                                // First
  *            ......
- *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
+ *            ->device_prep_slave_sg(DMA_CTRL_ACK]);
  *            ......
- *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
+ *            ->device_prep_slave_sg(DMA_CTRL_ACK); // Last
  *            ......
  */
 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
@@ -500,13 +502,12 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
        struct scatterlist *sg;
        u32 i, j;
        u32 *pio;
-       bool append = flags & DMA_PREP_INTERRUPT;
-       int idx = append ? mxs_chan->desc_count : 0;
+       int idx = 0;
 
-       if (mxs_chan->status == DMA_IN_PROGRESS && !append)
-               return NULL;
+       if (mxs_chan->status == DMA_IN_PROGRESS)
+               idx = mxs_chan->desc_count;
 
-       if (sg_len + (append ? idx : 0) > NUM_CCW) {
+       if (sg_len + idx > NUM_CCW) {
                dev_err(mxs_dma->dma_device.dev,
                                "maximum number of sg exceeded: %d > %d\n",
                                sg_len, NUM_CCW);
@@ -520,7 +521,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
         * If the sg is prepared with append flag set, the sg
         * will be appended to the last prepared sg.
         */
-       if (append) {
+       if (idx) {
                BUG_ON(idx < 1);
                ccw = &mxs_chan->ccw[idx - 1];
                ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
@@ -541,12 +542,14 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                ccw->bits = 0;
                ccw->bits |= CCW_IRQ;
                ccw->bits |= CCW_DEC_SEM;
-               if (flags & DMA_CTRL_ACK)
+               if (flags & MXS_DMA_CTRL_WAIT4END)
                        ccw->bits |= CCW_WAIT4END;
                ccw->bits |= CCW_HALT_ON_TERM;
                ccw->bits |= CCW_TERM_FLUSH;
                ccw->bits |= BF_CCW(sg_len, PIO_NUM);
                ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
+               if (flags & MXS_DMA_CTRL_WAIT4RDY)
+                       ccw->bits |= CCW_WAIT4RDY;
        } else {
                for_each_sg(sgl, sg, sg_len, i) {
                        if (sg_dma_len(sg) > MAX_XFER_BYTES) {
@@ -573,7 +576,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                                ccw->bits &= ~CCW_CHAIN;
                                ccw->bits |= CCW_IRQ;
                                ccw->bits |= CCW_DEC_SEM;
-                               if (flags & DMA_CTRL_ACK)
+                               if (flags & MXS_DMA_CTRL_WAIT4END)
                                        ccw->bits |= CCW_WAIT4END;
                        }
                }
index fb31a7f..80a6e2d 100644 (file)
@@ -274,4 +274,6 @@ source "drivers/mtd/spi-nor/Kconfig"
 
 source "drivers/mtd/ubi/Kconfig"
 
+source "drivers/mtd/hyperbus/Kconfig"
+
 endif # MTD
index 806287e..62d649a 100644 (file)
@@ -34,3 +34,4 @@ obj-y         += chips/ lpddr/ maps/ devices/ nand/ tests/
 
 obj-$(CONFIG_MTD_SPI_NOR)      += spi-nor/
 obj-$(CONFIG_MTD_UBI)          += ubi/
+obj-$(CONFIG_MTD_HYPERBUS)     += hyperbus/
index c8fa590..f4da7bd 100644 (file)
 #define SST49LF008A            0x005a
 #define AT49BV6416             0x00d6
 
+/*
+ * Status Register bit description. Used by flash devices that don't
+ * support DQ polling (e.g. HyperFlash)
+ */
+#define CFI_SR_DRB             BIT(7)
+#define CFI_SR_ESB             BIT(5)
+#define CFI_SR_PSB             BIT(4)
+#define CFI_SR_WBASB           BIT(3)
+#define CFI_SR_SLSB            BIT(1)
+
 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -97,6 +107,50 @@ static struct mtd_chip_driver cfi_amdstd_chipdrv = {
        .module         = THIS_MODULE
 };
 
+/*
+ * Use status register to poll for Erase/write completion when DQ is not
+ * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
+ * CFI Primary Vendor-Specific Extended Query table 1.5
+ */
+static int cfi_use_status_reg(struct cfi_private *cfi)
+{
+       struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+       u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
+
+       return extp->MinorVersion >= '5' &&
+               (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
+}
+
+static void cfi_check_err_status(struct map_info *map, struct flchip *chip,
+                                unsigned long adr)
+{
+       struct cfi_private *cfi = map->fldrv_priv;
+       map_word status;
+
+       if (!cfi_use_status_reg(cfi))
+               return;
+
+       cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
+                        cfi->device_type, NULL);
+       status = map_read(map, adr);
+
+       if (map_word_bitsset(map, status, CMD(0x3a))) {
+               unsigned long chipstatus = MERGESTATUS(status);
+
+               if (chipstatus & CFI_SR_ESB)
+                       pr_err("%s erase operation failed, status %lx\n",
+                              map->name, chipstatus);
+               if (chipstatus & CFI_SR_PSB)
+                       pr_err("%s program operation failed, status %lx\n",
+                              map->name, chipstatus);
+               if (chipstatus & CFI_SR_WBASB)
+                       pr_err("%s buffer program command aborted, status %lx\n",
+                              map->name, chipstatus);
+               if (chipstatus & CFI_SR_SLSB)
+                       pr_err("%s sector write protected, status %lx\n",
+                              map->name, chipstatus);
+       }
+}
 
 /* #define DEBUG_CFI_FEATURES */
 
@@ -742,10 +796,25 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
  * correctly and is therefore not done (particularly with interleaved chips
  * as each chip must be checked independently of the others).
  */
-static int __xipram chip_ready(struct map_info *map, unsigned long addr)
+static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
+                              unsigned long addr)
 {
+       struct cfi_private *cfi = map->fldrv_priv;
        map_word d, t;
 
+       if (cfi_use_status_reg(cfi)) {
+               map_word ready = CMD(CFI_SR_DRB);
+               /*
+                * For chips that support status register, check device
+                * ready bit
+                */
+               cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
+                                cfi->device_type, NULL);
+               d = map_read(map, addr);
+
+               return map_word_andequal(map, d, ready, ready);
+       }
+
        d = map_read(map, addr);
        t = map_read(map, addr);
 
@@ -767,10 +836,30 @@ static int __xipram chip_ready(struct map_info *map, unsigned long addr)
  * as each chip must be checked independently of the others).
  *
  */
-static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
+static int __xipram chip_good(struct map_info *map, struct flchip *chip,
+                             unsigned long addr, map_word expected)
 {
+       struct cfi_private *cfi = map->fldrv_priv;
        map_word oldd, curd;
 
+       if (cfi_use_status_reg(cfi)) {
+               map_word ready = CMD(CFI_SR_DRB);
+               map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB);
+               /*
+                * For chips that support status register, check device
+                * ready bit and Erase/Program status bit to know if
+                * operation succeeded.
+                */
+               cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
+                                cfi->device_type, NULL);
+               curd = map_read(map, addr);
+
+               if (map_word_andequal(map, curd, ready, ready))
+                       return !map_word_bitsset(map, curd, err);
+
+               return 0;
+       }
+
        oldd = map_read(map, addr);
        curd = map_read(map, addr);
 
@@ -792,7 +881,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
 
        case FL_STATUS:
                for (;;) {
-                       if (chip_ready(map, adr))
+                       if (chip_ready(map, chip, adr))
                                break;
 
                        if (time_after(jiffies, timeo)) {
@@ -830,7 +919,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
                chip->state = FL_ERASE_SUSPENDING;
                chip->erase_suspended = 1;
                for (;;) {
-                       if (chip_ready(map, adr))
+                       if (chip_ready(map, chip, adr))
                                break;
 
                        if (time_after(jiffies, timeo)) {
@@ -1362,7 +1451,7 @@ static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
        /* wait for chip to become ready */
        timeo = jiffies + msecs_to_jiffies(2);
        for (;;) {
-               if (chip_ready(map, adr))
+               if (chip_ready(map, chip, adr))
                        break;
 
                if (time_after(jiffies, timeo)) {
@@ -1628,22 +1717,24 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
                        continue;
                }
 
-               if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
+               if (time_after(jiffies, timeo) &&
+                   !chip_ready(map, chip, adr)) {
                        xip_enable(map, chip, adr);
                        printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
                        xip_disable(map, chip, adr);
                        break;
                }
 
-               if (chip_ready(map, adr))
+               if (chip_ready(map, chip, adr))
                        break;
 
                /* Latency issues. Drop the lock, wait a while and retry */
                UDELAY(map, chip, adr, 1);
        }
        /* Did we succeed? */
-       if (!chip_good(map, adr, datum)) {
+       if (!chip_good(map, chip, adr, datum)) {
                /* reset on all failures. */
+               cfi_check_err_status(map, chip, adr);
                map_write(map, CMD(0xF0), chip->start);
                /* FIXME - should have reset delay before continuing */
 
@@ -1881,10 +1972,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
                 * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
                 * the failure due to scheduling.
                 */
-               if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
+               if (time_after(jiffies, timeo) &&
+                   !chip_good(map, chip, adr, datum))
                        break;
 
-               if (chip_good(map, adr, datum)) {
+               if (chip_good(map, chip, adr, datum)) {
                        xip_enable(map, chip, adr);
                        goto op_done;
                }
@@ -1901,6 +1993,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
         * See e.g.
         * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
         */
+       cfi_check_err_status(map, chip, adr);
        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
                         cfi->device_type, NULL);
        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
@@ -2018,7 +2111,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
         * If the driver thinks the chip is idle, and no toggle bits
         * are changing, then the chip is actually idle for sure.
         */
-       if (chip->state == FL_READY && chip_ready(map, adr))
+       if (chip->state == FL_READY && chip_ready(map, chip, adr))
                return 0;
 
        /*
@@ -2035,7 +2128,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
 
                /* wait for the chip to become ready */
                for (i = 0; i < jiffies_to_usecs(timeo); i++) {
-                       if (chip_ready(map, adr))
+                       if (chip_ready(map, chip, adr))
                                return 0;
 
                        udelay(1);
@@ -2099,14 +2192,15 @@ retry:
        map_write(map, datum, adr);
 
        for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
-               if (chip_ready(map, adr))
+               if (chip_ready(map, chip, adr))
                        break;
 
                udelay(1);
        }
 
-       if (!chip_good(map, adr, datum)) {
+       if (!chip_good(map, chip, adr, datum)) {
                /* reset on all failures. */
+               cfi_check_err_status(map, chip, adr);
                map_write(map, CMD(0xF0), chip->start);
                /* FIXME - should have reset delay before continuing */
 
@@ -2300,7 +2394,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
                        chip->erase_suspended = 0;
                }
 
-               if (chip_good(map, adr, map_word_ff(map)))
+               if (chip_good(map, chip, adr, map_word_ff(map)))
                        break;
 
                if (time_after(jiffies, timeo)) {
@@ -2316,6 +2410,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
        /* Did we succeed? */
        if (ret) {
                /* reset on all failures. */
+               cfi_check_err_status(map, chip, adr);
                map_write(map, CMD(0xF0), chip->start);
                /* FIXME - should have reset delay before continuing */
 
@@ -2396,7 +2491,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
                        chip->erase_suspended = 0;
                }
 
-               if (chip_good(map, adr, map_word_ff(map)))
+               if (chip_good(map, chip, adr, map_word_ff(map)))
                        break;
 
                if (time_after(jiffies, timeo)) {
@@ -2412,6 +2507,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
        /* Did we succeed? */
        if (ret) {
                /* reset on all failures. */
+               cfi_check_err_status(map, chip, adr);
                map_write(map, CMD(0xF0), chip->start);
                /* FIXME - should have reset delay before continuing */
 
@@ -2533,8 +2629,6 @@ struct ppb_lock {
        int locked;
 };
 
-#define MAX_SECTORS                    512
-
 #define DO_XXLOCK_ONEBLOCK_LOCK                ((void *)1)
 #define DO_XXLOCK_ONEBLOCK_UNLOCK      ((void *)2)
 #define DO_XXLOCK_ONEBLOCK_GETLOCK     ((void *)3)
@@ -2589,7 +2683,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
         */
        timeo = jiffies + msecs_to_jiffies(2000);       /* 2s max (un)locking */
        for (;;) {
-               if (chip_ready(map, adr))
+               if (chip_ready(map, chip, adr))
                        break;
 
                if (time_after(jiffies, timeo)) {
@@ -2633,6 +2727,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
        int i;
        int sectors;
        int ret;
+       int max_sectors;
 
        /*
         * PPB unlocking always unlocks all sectors of the flash chip.
@@ -2640,7 +2735,11 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
         * first check the locking status of all sectors and save
         * it for future use.
         */
-       sect = kcalloc(MAX_SECTORS, sizeof(struct ppb_lock), GFP_KERNEL);
+       max_sectors = 0;
+       for (i = 0; i < mtd->numeraseregions; i++)
+               max_sectors += regions[i].numblocks;
+
+       sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
        if (!sect)
                return -ENOMEM;
 
@@ -2689,9 +2788,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
                }
 
                sectors++;
-               if (sectors >= MAX_SECTORS) {
+               if (sectors >= max_sectors) {
                        printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
-                              MAX_SECTORS);
+                              max_sectors);
                        kfree(sect);
                        return -EINVAL;
                }
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
new file mode 100644 (file)
index 0000000..cff6bbd
--- /dev/null
@@ -0,0 +1,23 @@
+menuconfig MTD_HYPERBUS
+       tristate "HyperBus support"
+       select MTD_CFI
+       select MTD_MAP_BANK_WIDTH_2
+       select MTD_CFI_AMDSTD
+       select MTD_COMPLEX_MAPPINGS
+       help
+         This is the framework for the HyperBus which can be used by
+         the HyperBus Controller driver to communicate with
+         HyperFlash. See Cypress HyperBus specification for more
+         details
+
+if MTD_HYPERBUS
+
+config HBMC_AM654
+       tristate "HyperBus controller driver for AM65x SoC"
+       select MULTIPLEXER
+       select MUX_MMIO
+       help
+        This is the driver for HyperBus controller on TI's AM65x and
+        other SoCs
+
+endif # MTD_HYPERBUS
diff --git a/drivers/mtd/hyperbus/Makefile b/drivers/mtd/hyperbus/Makefile
new file mode 100644 (file)
index 0000000..8a936e0
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_MTD_HYPERBUS)     += hyperbus-core.o
+obj-$(CONFIG_HBMC_AM654)       += hbmc-am654.o
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
new file mode 100644 (file)
index 0000000..08d543b
--- /dev/null
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// Author: Vignesh Raghavendra <vigneshr@ti.com>
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/hyperbus.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+
+#define AM654_HBMC_CALIB_COUNT 25
+
+struct am654_hbmc_priv {
+       struct hyperbus_ctlr ctlr;
+       struct hyperbus_device hbdev;
+       struct mux_control *mux_ctrl;
+};
+
+static int am654_hbmc_calibrate(struct hyperbus_device *hbdev)
+{
+       struct map_info *map = &hbdev->map;
+       struct cfi_private cfi;
+       int count = AM654_HBMC_CALIB_COUNT;
+       int pass_count = 0;
+       int ret;
+
+       cfi.interleave = 1;
+       cfi.device_type = CFI_DEVICETYPE_X16;
+       cfi_send_gen_cmd(0xF0, 0, 0, map, &cfi, cfi.device_type, NULL);
+       cfi_send_gen_cmd(0x98, 0x55, 0, map, &cfi, cfi.device_type, NULL);
+
+       while (count--) {
+               ret = cfi_qry_present(map, 0, &cfi);
+               if (ret)
+                       pass_count++;
+               else
+                       pass_count = 0;
+               if (pass_count == 5)
+                       break;
+       }
+
+       cfi_qry_mode_off(0, map, &cfi);
+
+       return ret;
+}
+
+static const struct hyperbus_ops am654_hbmc_ops = {
+       .calibrate = am654_hbmc_calibrate,
+};
+
+static int am654_hbmc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct am654_hbmc_priv *priv;
+       int ret;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, priv);
+
+       if (of_property_read_bool(dev->of_node, "mux-controls")) {
+               struct mux_control *control = devm_mux_control_get(dev, NULL);
+
+               if (IS_ERR(control))
+                       return PTR_ERR(control);
+
+               ret = mux_control_select(control, 1);
+               if (ret) {
+                       dev_err(dev, "Failed to select HBMC mux\n");
+                       return ret;
+               }
+               priv->mux_ctrl = control;
+       }
+
+       pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0) {
+               pm_runtime_put_noidle(dev);
+               goto disable_pm;
+       }
+
+       priv->ctlr.dev = dev;
+       priv->ctlr.ops = &am654_hbmc_ops;
+       priv->hbdev.ctlr = &priv->ctlr;
+       priv->hbdev.np = of_get_next_child(dev->of_node, NULL);
+       ret = hyperbus_register_device(&priv->hbdev);
+       if (ret) {
+               dev_err(dev, "failed to register controller\n");
+               pm_runtime_put_sync(&pdev->dev);
+               goto disable_pm;
+       }
+
+       return 0;
+disable_pm:
+       pm_runtime_disable(dev);
+       if (priv->mux_ctrl)
+               mux_control_deselect(priv->mux_ctrl);
+       return ret;
+}
+
+static int am654_hbmc_remove(struct platform_device *pdev)
+{
+       struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = hyperbus_unregister_device(&priv->hbdev);
+       if (priv->mux_ctrl)
+               mux_control_deselect(priv->mux_ctrl);
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
+       return ret;
+}
+
+static const struct of_device_id am654_hbmc_dt_ids[] = {
+       {
+               .compatible = "ti,am654-hbmc",
+       },
+       { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids);
+
+static struct platform_driver am654_hbmc_platform_driver = {
+       .probe = am654_hbmc_probe,
+       .remove = am654_hbmc_remove,
+       .driver = {
+               .name = "hbmc-am654",
+               .of_match_table = am654_hbmc_dt_ids,
+       },
+};
+
+module_platform_driver(am654_hbmc_platform_driver);
+
+MODULE_DESCRIPTION("HBMC driver for AM654 SoC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hbmc-am654");
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c
new file mode 100644 (file)
index 0000000..6af9ea3
--- /dev/null
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// Author: Vignesh Raghavendra <vigneshr@ti.com>
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/hyperbus.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/types.h>
+
+static struct hyperbus_device *map_to_hbdev(struct map_info *map)
+{
+       return container_of(map, struct hyperbus_device, map);
+}
+
+static map_word hyperbus_read16(struct map_info *map, unsigned long addr)
+{
+       struct hyperbus_device *hbdev = map_to_hbdev(map);
+       struct hyperbus_ctlr *ctlr = hbdev->ctlr;
+       map_word read_data;
+
+       read_data.x[0] = ctlr->ops->read16(hbdev, addr);
+
+       return read_data;
+}
+
+static void hyperbus_write16(struct map_info *map, map_word d,
+                            unsigned long addr)
+{
+       struct hyperbus_device *hbdev = map_to_hbdev(map);
+       struct hyperbus_ctlr *ctlr = hbdev->ctlr;
+
+       ctlr->ops->write16(hbdev, addr, d.x[0]);
+}
+
+static void hyperbus_copy_from(struct map_info *map, void *to,
+                              unsigned long from, ssize_t len)
+{
+       struct hyperbus_device *hbdev = map_to_hbdev(map);
+       struct hyperbus_ctlr *ctlr = hbdev->ctlr;
+
+       ctlr->ops->copy_from(hbdev, to, from, len);
+}
+
+static void hyperbus_copy_to(struct map_info *map, unsigned long to,
+                            const void *from, ssize_t len)
+{
+       struct hyperbus_device *hbdev = map_to_hbdev(map);
+       struct hyperbus_ctlr *ctlr = hbdev->ctlr;
+
+       ctlr->ops->copy_to(hbdev, to, from, len);
+}
+
+int hyperbus_register_device(struct hyperbus_device *hbdev)
+{
+       const struct hyperbus_ops *ops;
+       struct hyperbus_ctlr *ctlr;
+       struct device_node *np;
+       struct map_info *map;
+       struct resource res;
+       struct device *dev;
+       int ret;
+
+       if (!hbdev || !hbdev->np || !hbdev->ctlr || !hbdev->ctlr->dev) {
+               pr_err("hyperbus: please fill all the necessary fields!\n");
+               return -EINVAL;
+       }
+
+       np = hbdev->np;
+       ctlr = hbdev->ctlr;
+       if (!of_device_is_compatible(np, "cypress,hyperflash"))
+               return -ENODEV;
+
+       hbdev->memtype = HYPERFLASH;
+
+       ret = of_address_to_resource(np, 0, &res);
+       if (ret)
+               return ret;
+
+       dev = ctlr->dev;
+       map = &hbdev->map;
+       map->size = resource_size(&res);
+       map->virt = devm_ioremap_resource(dev, &res);
+       if (IS_ERR(map->virt))
+               return PTR_ERR(map->virt);
+
+       map->name = dev_name(dev);
+       map->bankwidth = 2;
+       map->device_node = np;
+
+       simple_map_init(map);
+       ops = ctlr->ops;
+       if (ops) {
+               if (ops->read16)
+                       map->read = hyperbus_read16;
+               if (ops->write16)
+                       map->write = hyperbus_write16;
+               if (ops->copy_to)
+                       map->copy_to = hyperbus_copy_to;
+               if (ops->copy_from)
+                       map->copy_from = hyperbus_copy_from;
+
+               if (ops->calibrate && !ctlr->calibrated) {
+                       ret = ops->calibrate(hbdev);
+                       if (!ret) {
+                               dev_err(dev, "Calibration failed\n");
+                               return -ENODEV;
+                       }
+                       ctlr->calibrated = true;
+               }
+       }
+
+       hbdev->mtd = do_map_probe("cfi_probe", map);
+       if (!hbdev->mtd) {
+               dev_err(dev, "probing of hyperbus device failed\n");
+               return -ENODEV;
+       }
+
+       hbdev->mtd->dev.parent = dev;
+       mtd_set_of_node(hbdev->mtd, np);
+
+       ret = mtd_device_register(hbdev->mtd, NULL, 0);
+       if (ret) {
+               dev_err(dev, "failed to register mtd device\n");
+               map_destroy(hbdev->mtd);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(hyperbus_register_device);
+
+int hyperbus_unregister_device(struct hyperbus_device *hbdev)
+{
+       int ret = 0;
+
+       if (hbdev && hbdev->mtd) {
+               ret = mtd_device_unregister(hbdev->mtd);
+               map_destroy(hbdev->mtd);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(hyperbus_unregister_device);
+
+MODULE_DESCRIPTION("HyperBus Framework");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
index 7324ff8..170a722 100644 (file)
@@ -437,7 +437,8 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
        return err;
 }
 
-static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int concat_xxlock(struct mtd_info *mtd, loff_t ofs, uint64_t len,
+                        bool is_lock)
 {
        struct mtd_concat *concat = CONCAT(mtd);
        int i, err = -EINVAL;
@@ -456,7 +457,10 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
                else
                        size = len;
 
-               err = mtd_lock(subdev, ofs, size);
+               if (is_lock)
+                       err = mtd_lock(subdev, ofs, size);
+               else
+                       err = mtd_unlock(subdev, ofs, size);
                if (err)
                        break;
 
@@ -471,35 +475,33 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
        return err;
 }
 
+static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+       return concat_xxlock(mtd, ofs, len, true);
+}
+
 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
+       return concat_xxlock(mtd, ofs, len, false);
+}
+
+static int concat_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
        struct mtd_concat *concat = CONCAT(mtd);
-       int i, err = 0;
+       int i, err = -EINVAL;
 
        for (i = 0; i < concat->num_subdev; i++) {
                struct mtd_info *subdev = concat->subdev[i];
-               uint64_t size;
 
                if (ofs >= subdev->size) {
-                       size = 0;
                        ofs -= subdev->size;
                        continue;
                }
-               if (ofs + len > subdev->size)
-                       size = subdev->size - ofs;
-               else
-                       size = len;
-
-               err = mtd_unlock(subdev, ofs, size);
-               if (err)
-                       break;
 
-               len -= size;
-               if (len == 0)
+               if (ofs + len > subdev->size)
                        break;
 
-               err = -EINVAL;
-               ofs = 0;
+               return mtd_is_locked(subdev, ofs, len);
        }
 
        return err;
@@ -704,6 +706,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],       /* subdevices to c
        concat->mtd._sync = concat_sync;
        concat->mtd._lock = concat_lock;
        concat->mtd._unlock = concat_unlock;
+       concat->mtd._is_locked = concat_is_locked;
        concat->mtd._suspend = concat_suspend;
        concat->mtd._resume = concat_resume;
 
index 453242d..408615f 100644 (file)
@@ -1124,6 +1124,9 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
                return -EROFS;
        if (!len)
                return 0;
+       if (!mtd->oops_panic_write)
+               mtd->oops_panic_write = true;
+
        return mtd->_panic_write(mtd, to, len, retlen, buf);
 }
 EXPORT_SYMBOL_GPL(mtd_panic_write);
index d759c02..a1f8fe1 100644 (file)
@@ -3257,6 +3257,8 @@ static void onenand_check_features(struct mtd_info *mtd)
 
        /* Lock scheme */
        switch (density) {
+       case ONENAND_DEVICE_DENSITY_8Gb:
+               this->options |= ONENAND_HAS_NOP_1;
        case ONENAND_DEVICE_DENSITY_4Gb:
                if (ONENAND_IS_DDP(this))
                        this->options |= ONENAND_HAS_2PLANE;
@@ -3277,12 +3279,15 @@ static void onenand_check_features(struct mtd_info *mtd)
                        if ((this->version_id & 0xf) == 0xe)
                                this->options |= ONENAND_HAS_NOP_1;
                }
+               this->options |= ONENAND_HAS_UNLOCK_ALL;
+               break;
 
        case ONENAND_DEVICE_DENSITY_2Gb:
                /* 2Gb DDP does not have 2 plane */
                if (!ONENAND_IS_DDP(this))
                        this->options |= ONENAND_HAS_2PLANE;
                this->options |= ONENAND_HAS_UNLOCK_ALL;
+               break;
 
        case ONENAND_DEVICE_DENSITY_1Gb:
                /* A-Die has all block unlock */
index 8735277..33310b8 100644 (file)
@@ -84,6 +84,12 @@ struct brcm_nand_dma_desc {
 #define FLASH_DMA_ECC_ERROR    (1 << 8)
 #define FLASH_DMA_CORR_ERROR   (1 << 9)
 
+/* Bitfields for DMA_MODE */
+#define FLASH_DMA_MODE_STOP_ON_ERROR   BIT(1) /* stop in Uncorr ECC error */
+#define FLASH_DMA_MODE_MODE            BIT(0) /* link list */
+#define FLASH_DMA_MODE_MASK            (FLASH_DMA_MODE_STOP_ON_ERROR | \
+                                               FLASH_DMA_MODE_MODE)
+
 /* 512B flash cache in the NAND controller HW */
 #define FC_SHIFT               9U
 #define FC_BYTES               512U
@@ -96,6 +102,51 @@ struct brcm_nand_dma_desc {
 #define NAND_CTRL_RDY                  (INTFC_CTLR_READY | INTFC_FLASH_READY)
 #define NAND_POLL_STATUS_TIMEOUT_MS    100
 
+/* flash_dma registers */
+enum flash_dma_reg {
+       FLASH_DMA_REVISION = 0,
+       FLASH_DMA_FIRST_DESC,
+       FLASH_DMA_FIRST_DESC_EXT,
+       FLASH_DMA_CTRL,
+       FLASH_DMA_MODE,
+       FLASH_DMA_STATUS,
+       FLASH_DMA_INTERRUPT_DESC,
+       FLASH_DMA_INTERRUPT_DESC_EXT,
+       FLASH_DMA_ERROR_STATUS,
+       FLASH_DMA_CURRENT_DESC,
+       FLASH_DMA_CURRENT_DESC_EXT,
+};
+
+/* flash_dma registers v1*/
+static const u16 flash_dma_regs_v1[] = {
+       [FLASH_DMA_REVISION]            = 0x00,
+       [FLASH_DMA_FIRST_DESC]          = 0x04,
+       [FLASH_DMA_FIRST_DESC_EXT]      = 0x08,
+       [FLASH_DMA_CTRL]                = 0x0c,
+       [FLASH_DMA_MODE]                = 0x10,
+       [FLASH_DMA_STATUS]              = 0x14,
+       [FLASH_DMA_INTERRUPT_DESC]      = 0x18,
+       [FLASH_DMA_INTERRUPT_DESC_EXT]  = 0x1c,
+       [FLASH_DMA_ERROR_STATUS]        = 0x20,
+       [FLASH_DMA_CURRENT_DESC]        = 0x24,
+       [FLASH_DMA_CURRENT_DESC_EXT]    = 0x28,
+};
+
+/* flash_dma registers v4 */
+static const u16 flash_dma_regs_v4[] = {
+       [FLASH_DMA_REVISION]            = 0x00,
+       [FLASH_DMA_FIRST_DESC]          = 0x08,
+       [FLASH_DMA_FIRST_DESC_EXT]      = 0x0c,
+       [FLASH_DMA_CTRL]                = 0x10,
+       [FLASH_DMA_MODE]                = 0x14,
+       [FLASH_DMA_STATUS]              = 0x18,
+       [FLASH_DMA_INTERRUPT_DESC]      = 0x20,
+       [FLASH_DMA_INTERRUPT_DESC_EXT]  = 0x24,
+       [FLASH_DMA_ERROR_STATUS]        = 0x28,
+       [FLASH_DMA_CURRENT_DESC]        = 0x30,
+       [FLASH_DMA_CURRENT_DESC_EXT]    = 0x34,
+};
+
 /* Controller feature flags */
 enum {
        BRCMNAND_HAS_1K_SECTORS                 = BIT(0),
@@ -128,6 +179,8 @@ struct brcmnand_controller {
        /* List of NAND hosts (one for each chip-select) */
        struct list_head host_list;
 
+       /* flash_dma reg */
+       const u16               *flash_dma_offsets;
        struct brcm_nand_dma_desc *dma_desc;
        dma_addr_t              dma_pa;
 
@@ -151,6 +204,7 @@ struct brcmnand_controller {
        u32                     nand_cs_nand_xor;
        u32                     corr_stat_threshold;
        u32                     flash_dma_mode;
+       bool                    pio_poll_mode;
 };
 
 struct brcmnand_cfg {
@@ -462,7 +516,7 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
        /* Register offsets */
        if (ctrl->nand_version >= 0x0702)
                ctrl->reg_offsets = brcmnand_regs_v72;
-       else if (ctrl->nand_version >= 0x0701)
+       else if (ctrl->nand_version == 0x0701)
                ctrl->reg_offsets = brcmnand_regs_v71;
        else if (ctrl->nand_version >= 0x0600)
                ctrl->reg_offsets = brcmnand_regs_v60;
@@ -507,7 +561,7 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
        }
 
        /* Maximum spare area sector size (per 512B) */
-       if (ctrl->nand_version >= 0x0702)
+       if (ctrl->nand_version == 0x0702)
                ctrl->max_oob = 128;
        else if (ctrl->nand_version >= 0x0600)
                ctrl->max_oob = 64;
@@ -538,6 +592,15 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
        return 0;
 }
 
+static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
+{
+       /* flash_dma register offsets */
+       if (ctrl->nand_version >= 0x0703)
+               ctrl->flash_dma_offsets = flash_dma_regs_v4;
+       else
+               ctrl->flash_dma_offsets = flash_dma_regs_v1;
+}
+
 static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
                enum brcmnand_reg reg)
 {
@@ -580,6 +643,54 @@ static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
        __raw_writel(val, ctrl->nand_fc + word * 4);
 }
 
+static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
+{
+
+       /* Clear error addresses */
+       brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
+       brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
+       brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
+       brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
+}
+
+static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
+{
+       u64 err_addr;
+
+       err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
+       err_addr |= ((u64)(brcmnand_read_reg(ctrl,
+                                            BRCMNAND_UNCORR_EXT_ADDR)
+                                            & 0xffff) << 32);
+
+       return err_addr;
+}
+
+static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
+{
+       u64 err_addr;
+
+       err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
+       err_addr |= ((u64)(brcmnand_read_reg(ctrl,
+                                            BRCMNAND_CORR_EXT_ADDR)
+                                            & 0xffff) << 32);
+
+       return err_addr;
+}
+
+static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr)
+{
+       struct nand_chip *chip =  mtd_to_nand(mtd);
+       struct brcmnand_host *host = nand_get_controller_data(chip);
+       struct brcmnand_controller *ctrl = host->ctrl;
+
+       brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+                          (host->cs << 16) | ((addr >> 32) & 0xffff));
+       (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+       brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+                          lower_32_bits(addr));
+       (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+}
+
 static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
                                     enum brcmnand_cs_reg reg)
 {
@@ -612,7 +723,7 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
        enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
        int cs = host->cs;
 
-       if (ctrl->nand_version >= 0x0702)
+       if (ctrl->nand_version == 0x0702)
                bits = 7;
        else if (ctrl->nand_version >= 0x0600)
                bits = 6;
@@ -666,7 +777,7 @@ enum {
 
 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
 {
-       if (ctrl->nand_version >= 0x0702)
+       if (ctrl->nand_version == 0x0702)
                return GENMASK(7, 0);
        else if (ctrl->nand_version >= 0x0600)
                return GENMASK(6, 0);
@@ -796,39 +907,44 @@ static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
  * Flash DMA
  ***********************************************************************/
 
-enum flash_dma_reg {
-       FLASH_DMA_REVISION              = 0x00,
-       FLASH_DMA_FIRST_DESC            = 0x04,
-       FLASH_DMA_FIRST_DESC_EXT        = 0x08,
-       FLASH_DMA_CTRL                  = 0x0c,
-       FLASH_DMA_MODE                  = 0x10,
-       FLASH_DMA_STATUS                = 0x14,
-       FLASH_DMA_INTERRUPT_DESC        = 0x18,
-       FLASH_DMA_INTERRUPT_DESC_EXT    = 0x1c,
-       FLASH_DMA_ERROR_STATUS          = 0x20,
-       FLASH_DMA_CURRENT_DESC          = 0x24,
-       FLASH_DMA_CURRENT_DESC_EXT      = 0x28,
-};
-
 static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
 {
        return ctrl->flash_dma_base;
 }
 
+static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
+{
+       if (ctrl->pio_poll_mode)
+               return;
+
+       if (has_flash_dma(ctrl)) {
+               ctrl->flash_dma_base = 0;
+               disable_irq(ctrl->dma_irq);
+       }
+
+       disable_irq(ctrl->irq);
+       ctrl->pio_poll_mode = true;
+}
+
 static inline bool flash_dma_buf_ok(const void *buf)
 {
        return buf && !is_vmalloc_addr(buf) &&
                likely(IS_ALIGNED((uintptr_t)buf, 4));
 }
 
-static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs,
-                                   u32 val)
+static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
+                                   enum flash_dma_reg dma_reg, u32 val)
 {
+       u16 offs = ctrl->flash_dma_offsets[dma_reg];
+
        brcmnand_writel(val, ctrl->flash_dma_base + offs);
 }
 
-static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs)
+static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
+                                 enum flash_dma_reg dma_reg)
 {
+       u16 offs = ctrl->flash_dma_offsets[dma_reg];
+
        return brcmnand_readl(ctrl->flash_dma_base + offs);
 }
 
@@ -931,7 +1047,7 @@ static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
        if (section >= sectors)
                return -ERANGE;
 
-       oobregion->offset = (section * (sas + 1)) - chip->ecc.bytes;
+       oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes;
        oobregion->length = chip->ecc.bytes;
 
        return 0;
@@ -1205,9 +1321,12 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
 {
        struct brcmnand_controller *ctrl = host->ctrl;
        int ret;
+       u64 cmd_addr;
+
+       cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+
+       dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
 
-       dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
-               brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
        BUG_ON(ctrl->cmd_pending != 0);
        ctrl->cmd_pending = cmd;
 
@@ -1229,15 +1348,42 @@ static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
        /* intentionally left blank */
 }
 
+static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
+{
+       struct brcmnand_host *host = nand_get_controller_data(chip);
+       struct brcmnand_controller *ctrl = host->ctrl;
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       bool err = false;
+       int sts;
+
+       if (mtd->oops_panic_write) {
+               /* switch to interrupt polling and PIO mode */
+               disable_ctrl_irqs(ctrl);
+               sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
+                                              NAND_CTRL_RDY, 0);
+               err = (sts < 0) ? true : false;
+       } else {
+               unsigned long timeo = msecs_to_jiffies(
+                                               NAND_POLL_STATUS_TIMEOUT_MS);
+               /* wait for completion interrupt */
+               sts = wait_for_completion_timeout(&ctrl->done, timeo);
+               err = (sts <= 0) ? true : false;
+       }
+
+       return err;
+}
+
 static int brcmnand_waitfunc(struct nand_chip *chip)
 {
        struct brcmnand_host *host = nand_get_controller_data(chip);
        struct brcmnand_controller *ctrl = host->ctrl;
-       unsigned long timeo = msecs_to_jiffies(100);
+       bool err = false;
 
        dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
-       if (ctrl->cmd_pending &&
-                       wait_for_completion_timeout(&ctrl->done, timeo) <= 0) {
+       if (ctrl->cmd_pending)
+               err = brcmstb_nand_wait_for_completion(chip);
+
+       if (err) {
                u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
                                        >> brcmnand_cmd_shift(ctrl);
 
@@ -1366,12 +1512,7 @@ static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
        if (!native_cmd)
                return;
 
-       brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
-               (host->cs << 16) | ((addr >> 32) & 0xffff));
-       (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
-       brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr));
-       (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
-
+       brcmnand_set_cmd_addr(mtd, addr);
        brcmnand_send_cmd(host, native_cmd);
        brcmnand_waitfunc(chip);
 
@@ -1589,20 +1730,10 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
        struct brcmnand_controller *ctrl = host->ctrl;
        int i, j, ret = 0;
 
-       /* Clear error addresses */
-       brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
-       brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
-       brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
-       brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
-
-       brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
-                       (host->cs << 16) | ((addr >> 32) & 0xffff));
-       (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+       brcmnand_clear_ecc_addr(ctrl);
 
        for (i = 0; i < trans; i++, addr += FC_BYTES) {
-               brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
-                                  lower_32_bits(addr));
-               (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+               brcmnand_set_cmd_addr(mtd, addr);
                /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
                brcmnand_send_cmd(host, CMD_PAGE_READ);
                brcmnand_waitfunc(chip);
@@ -1622,21 +1753,15 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
                                        host->hwcfg.sector_size_1k);
 
                if (!ret) {
-                       *err_addr = brcmnand_read_reg(ctrl,
-                                       BRCMNAND_UNCORR_ADDR) |
-                               ((u64)(brcmnand_read_reg(ctrl,
-                                               BRCMNAND_UNCORR_EXT_ADDR)
-                                       & 0xffff) << 32);
+                       *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
+
                        if (*err_addr)
                                ret = -EBADMSG;
                }
 
                if (!ret) {
-                       *err_addr = brcmnand_read_reg(ctrl,
-                                       BRCMNAND_CORR_ADDR) |
-                               ((u64)(brcmnand_read_reg(ctrl,
-                                               BRCMNAND_CORR_EXT_ADDR)
-                                       & 0xffff) << 32);
+                       *err_addr = brcmnand_get_correcc_addr(ctrl);
+
                        if (*err_addr)
                                ret = -EUCLEAN;
                }
@@ -1703,7 +1828,7 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
        dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
 
 try_dmaread:
-       brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
+       brcmnand_clear_ecc_addr(ctrl);
 
        if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
                err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
@@ -1850,15 +1975,9 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
                goto out;
        }
 
-       brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
-                       (host->cs << 16) | ((addr >> 32) & 0xffff));
-       (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
-
        for (i = 0; i < trans; i++, addr += FC_BYTES) {
                /* full address MUST be set before populating FC */
-               brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
-                                  lower_32_bits(addr));
-               (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+               brcmnand_set_cmd_addr(mtd, addr);
 
                if (buf) {
                        brcmnand_soc_data_bus_prepare(ctrl->soc, false);
@@ -2136,6 +2255,17 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
                return -EINVAL;
        }
 
+       if (chip->ecc.mode != NAND_ECC_NONE &&
+           (!chip->ecc.size || !chip->ecc.strength)) {
+               if (chip->base.eccreq.step_size && chip->base.eccreq.strength) {
+                       /* use detected ECC parameters */
+                       chip->ecc.size = chip->base.eccreq.step_size;
+                       chip->ecc.strength = chip->base.eccreq.strength;
+                       dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
+                               chip->ecc.size, chip->ecc.strength);
+               }
+       }
+
        switch (chip->ecc.size) {
        case 512:
                if (chip->ecc.algo == NAND_ECC_HAMMING)
@@ -2395,6 +2525,7 @@ static const struct of_device_id brcmnand_of_match[] = {
        { .compatible = "brcm,brcmnand-v7.0" },
        { .compatible = "brcm,brcmnand-v7.1" },
        { .compatible = "brcm,brcmnand-v7.2" },
+       { .compatible = "brcm,brcmnand-v7.3" },
        {},
 };
 MODULE_DEVICE_TABLE(of, brcmnand_of_match);
@@ -2481,7 +2612,11 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
                        goto err;
                }
 
-               flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */
+               /* initialize the dma version */
+               brcmnand_flash_dma_revision_init(ctrl);
+
+               /* linked-list and stop on error */
+               flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
                flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
 
                /* Allocate descriptor(s) */
index 6c7ca41..a6964fe 100644 (file)
@@ -613,28 +613,20 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
        for (op_id = 0; op_id < op->ninstrs; op_id++) {
                instr = &op->instrs[op_id];
 
+               nand_op_trace("  ", instr);
+
                switch (instr->type) {
                case NAND_OP_CMD_INSTR:
-                       pr_debug("  ->CMD      [0x%02x]\n",
-                                instr->ctx.cmd.opcode);
-
                        writeb_relaxed(instr->ctx.cmd.opcode, host->cmd_va);
                        break;
 
                case NAND_OP_ADDR_INSTR:
-                       pr_debug("  ->ADDR     [%d cyc]",
-                                instr->ctx.addr.naddrs);
-
                        for (i = 0; i < instr->ctx.addr.naddrs; i++)
                                writeb_relaxed(instr->ctx.addr.addrs[i],
                                               host->addr_va);
                        break;
 
                case NAND_OP_DATA_IN_INSTR:
-                       pr_debug("  ->DATA_IN  [%d B%s]\n", instr->ctx.data.len,
-                                instr->ctx.data.force_8bit ?
-                                ", force 8-bit" : "");
-
                        if (host->mode == USE_DMA_ACCESS)
                                fsmc_read_buf_dma(host, instr->ctx.data.buf.in,
                                                  instr->ctx.data.len);
@@ -644,10 +636,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
                        break;
 
                case NAND_OP_DATA_OUT_INSTR:
-                       pr_debug("  ->DATA_OUT [%d B%s]\n", instr->ctx.data.len,
-                                instr->ctx.data.force_8bit ?
-                                ", force 8-bit" : "");
-
                        if (host->mode == USE_DMA_ACCESS)
                                fsmc_write_buf_dma(host,
                                                   instr->ctx.data.buf.out,
@@ -658,9 +646,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
                        break;
 
                case NAND_OP_WAITRDY_INSTR:
-                       pr_debug("  ->WAITRDY  [max %d ms]\n",
-                                instr->ctx.waitrdy.timeout_ms);
-
                        ret = nand_soft_waitrdy(chip,
                                                instr->ctx.waitrdy.timeout_ms);
                        break;
index 30ceee9..9bd81a3 100644 (file)
@@ -1,4 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
 gpmi_nand-objs += gpmi-nand.o
-gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
deleted file mode 100644 (file)
index a8b26d2..0000000
+++ /dev/null
@@ -1,934 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Freescale GPMI NAND Flash Driver
- *
- * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
- * Copyright (C) 2008 Embedded Alley Solutions, Inc.
- */
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-
-#include "gpmi-nand.h"
-#include "gpmi-regs.h"
-#include "bch-regs.h"
-
-/* Converts time to clock cycles */
-#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
-
-#define MXS_SET_ADDR           0x4
-#define MXS_CLR_ADDR           0x8
-/*
- * Clear the bit and poll it cleared.  This is usually called with
- * a reset address and mask being either SFTRST(bit 31) or CLKGATE
- * (bit 30).
- */
-static int clear_poll_bit(void __iomem *addr, u32 mask)
-{
-       int timeout = 0x400;
-
-       /* clear the bit */
-       writel(mask, addr + MXS_CLR_ADDR);
-
-       /*
-        * SFTRST needs 3 GPMI clocks to settle, the reference manual
-        * recommends to wait 1us.
-        */
-       udelay(1);
-
-       /* poll the bit becoming clear */
-       while ((readl(addr) & mask) && --timeout)
-               /* nothing */;
-
-       return !timeout;
-}
-
-#define MODULE_CLKGATE         (1 << 30)
-#define MODULE_SFTRST          (1 << 31)
-/*
- * The current mxs_reset_block() will do two things:
- *  [1] enable the module.
- *  [2] reset the module.
- *
- * In most of the cases, it's ok.
- * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
- * If you try to soft reset the BCH block, it becomes unusable until
- * the next hard reset. This case occurs in the NAND boot mode. When the board
- * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
- * So If the driver tries to reset the BCH again, the BCH will not work anymore.
- * You will see a DMA timeout in this case. The bug has been fixed
- * in the following chips, such as MX28.
- *
- * To avoid this bug, just add a new parameter `just_enable` for
- * the mxs_reset_block(), and rewrite it here.
- */
-static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
-{
-       int ret;
-       int timeout = 0x400;
-
-       /* clear and poll SFTRST */
-       ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
-       if (unlikely(ret))
-               goto error;
-
-       /* clear CLKGATE */
-       writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
-
-       if (!just_enable) {
-               /* set SFTRST to reset the block */
-               writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
-               udelay(1);
-
-               /* poll CLKGATE becoming set */
-               while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
-                       /* nothing */;
-               if (unlikely(!timeout))
-                       goto error;
-       }
-
-       /* clear and poll SFTRST */
-       ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
-       if (unlikely(ret))
-               goto error;
-
-       /* clear and poll CLKGATE */
-       ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
-       if (unlikely(ret))
-               goto error;
-
-       return 0;
-
-error:
-       pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
-       return -ETIMEDOUT;
-}
-
-static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
-{
-       struct clk *clk;
-       int ret;
-       int i;
-
-       for (i = 0; i < GPMI_CLK_MAX; i++) {
-               clk = this->resources.clock[i];
-               if (!clk)
-                       break;
-
-               if (v) {
-                       ret = clk_prepare_enable(clk);
-                       if (ret)
-                               goto err_clk;
-               } else {
-                       clk_disable_unprepare(clk);
-               }
-       }
-       return 0;
-
-err_clk:
-       for (; i > 0; i--)
-               clk_disable_unprepare(this->resources.clock[i - 1]);
-       return ret;
-}
-
-int gpmi_enable_clk(struct gpmi_nand_data *this)
-{
-       return __gpmi_enable_clk(this, true);
-}
-
-int gpmi_disable_clk(struct gpmi_nand_data *this)
-{
-       return __gpmi_enable_clk(this, false);
-}
-
-int gpmi_init(struct gpmi_nand_data *this)
-{
-       struct resources *r = &this->resources;
-       int ret;
-
-       ret = gpmi_enable_clk(this);
-       if (ret)
-               return ret;
-       ret = gpmi_reset_block(r->gpmi_regs, false);
-       if (ret)
-               goto err_out;
-
-       /*
-        * Reset BCH here, too. We got failures otherwise :(
-        * See later BCH reset for explanation of MX23 and MX28 handling
-        */
-       ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
-       if (ret)
-               goto err_out;
-
-       /* Choose NAND mode. */
-       writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
-
-       /* Set the IRQ polarity. */
-       writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
-                               r->gpmi_regs + HW_GPMI_CTRL1_SET);
-
-       /* Disable Write-Protection. */
-       writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
-
-       /* Select BCH ECC. */
-       writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
-
-       /*
-        * Decouple the chip select from dma channel. We use dma0 for all
-        * the chips.
-        */
-       writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
-
-       gpmi_disable_clk(this);
-       return 0;
-err_out:
-       gpmi_disable_clk(this);
-       return ret;
-}
-
-/* This function is very useful. It is called only when the bug occur. */
-void gpmi_dump_info(struct gpmi_nand_data *this)
-{
-       struct resources *r = &this->resources;
-       struct bch_geometry *geo = &this->bch_geometry;
-       u32 reg;
-       int i;
-
-       dev_err(this->dev, "Show GPMI registers :\n");
-       for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
-               reg = readl(r->gpmi_regs + i * 0x10);
-               dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
-       }
-
-       /* start to print out the BCH info */
-       dev_err(this->dev, "Show BCH registers :\n");
-       for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
-               reg = readl(r->bch_regs + i * 0x10);
-               dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
-       }
-       dev_err(this->dev, "BCH Geometry :\n"
-               "GF length              : %u\n"
-               "ECC Strength           : %u\n"
-               "Page Size in Bytes     : %u\n"
-               "Metadata Size in Bytes : %u\n"
-               "ECC Chunk Size in Bytes: %u\n"
-               "ECC Chunk Count        : %u\n"
-               "Payload Size in Bytes  : %u\n"
-               "Auxiliary Size in Bytes: %u\n"
-               "Auxiliary Status Offset: %u\n"
-               "Block Mark Byte Offset : %u\n"
-               "Block Mark Bit Offset  : %u\n",
-               geo->gf_len,
-               geo->ecc_strength,
-               geo->page_size,
-               geo->metadata_size,
-               geo->ecc_chunk_size,
-               geo->ecc_chunk_count,
-               geo->payload_size,
-               geo->auxiliary_size,
-               geo->auxiliary_status_offset,
-               geo->block_mark_byte_offset,
-               geo->block_mark_bit_offset);
-}
-
-/* Configures the geometry for BCH.  */
-int bch_set_geometry(struct gpmi_nand_data *this)
-{
-       struct resources *r = &this->resources;
-       struct bch_geometry *bch_geo = &this->bch_geometry;
-       unsigned int block_count;
-       unsigned int block_size;
-       unsigned int metadata_size;
-       unsigned int ecc_strength;
-       unsigned int page_size;
-       unsigned int gf_len;
-       int ret;
-
-       ret = common_nfc_set_geometry(this);
-       if (ret)
-               return ret;
-
-       block_count   = bch_geo->ecc_chunk_count - 1;
-       block_size    = bch_geo->ecc_chunk_size;
-       metadata_size = bch_geo->metadata_size;
-       ecc_strength  = bch_geo->ecc_strength >> 1;
-       page_size     = bch_geo->page_size;
-       gf_len        = bch_geo->gf_len;
-
-       ret = gpmi_enable_clk(this);
-       if (ret)
-               return ret;
-
-       /*
-       * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
-       * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
-       * and MX28.
-       */
-       ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
-       if (ret)
-               goto err_out;
-
-       /* Configure layout 0. */
-       writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
-                       | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
-                       | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
-                       | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
-                       | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
-                       r->bch_regs + HW_BCH_FLASH0LAYOUT0);
-
-       writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
-                       | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
-                       | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
-                       | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
-                       r->bch_regs + HW_BCH_FLASH0LAYOUT1);
-
-       /* Set *all* chip selects to use layout 0. */
-       writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
-
-       /* Enable interrupts. */
-       writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
-                               r->bch_regs + HW_BCH_CTRL_SET);
-
-       gpmi_disable_clk(this);
-       return 0;
-err_out:
-       gpmi_disable_clk(this);
-       return ret;
-}
-
-/*
- * <1> Firstly, we should know what's the GPMI-clock means.
- *     The GPMI-clock is the internal clock in the gpmi nand controller.
- *     If you set 100MHz to gpmi nand controller, the GPMI-clock's period
- *     is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
- *
- * <2> Secondly, we should know what's the frequency on the nand chip pins.
- *     The frequency on the nand chip pins is derived from the GPMI-clock.
- *     We can get it from the following equation:
- *
- *         F = G / (DS + DH)
- *
- *         F  : the frequency on the nand chip pins.
- *         G  : the GPMI clock, such as 100MHz.
- *         DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
- *         DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
- *
- * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
- *     the nand EDO(extended Data Out) timing could be applied.
- *     The GPMI implements a feedback read strobe to sample the read data.
- *     The feedback read strobe can be delayed to support the nand EDO timing
- *     where the read strobe may deasserts before the read data is valid, and
- *     read data is valid for some time after read strobe.
- *
- *     The following figure illustrates some aspects of a NAND Flash read:
- *
- *                   |<---tREA---->|
- *                   |             |
- *                   |         |   |
- *                   |<--tRP-->|   |
- *                   |         |   |
- *                  __          ___|__________________________________
- *     RDN            \________/   |
- *                                 |
- *                                 /---------\
- *     Read Data    --------------<           >---------
- *                                 \---------/
- *                                |     |
- *                                |<-D->|
- *     FeedbackRDN  ________             ____________
- *                          \___________/
- *
- *          D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
- *
- *
- * <4> Now, we begin to describe how to compute the right RDN_DELAY.
- *
- *  4.1) From the aspect of the nand chip pins:
- *        Delay = (tREA + C - tRP)               {1}
- *
- *        tREA : the maximum read access time.
- *        C    : a constant to adjust the delay. default is 4000ps.
- *        tRP  : the read pulse width, which is exactly:
- *                   tRP = (GPMI-clock-period) * DATA_SETUP
- *
- *  4.2) From the aspect of the GPMI nand controller:
- *         Delay = RDN_DELAY * 0.125 * RP        {2}
- *
- *         RP   : the DLL reference period.
- *            if (GPMI-clock-period > DLL_THRETHOLD)
- *                   RP = GPMI-clock-period / 2;
- *            else
- *                   RP = GPMI-clock-period;
- *
- *            Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
- *            is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
- *            is 16000ps, but in mx6q, we use 12000ps.
- *
- *  4.3) since {1} equals {2}, we get:
- *
- *                     (tREA + 4000 - tRP) * 8
- *         RDN_DELAY = -----------------------     {3}
- *                           RP
- */
-static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
-                                    const struct nand_sdr_timings *sdr)
-{
-       struct gpmi_nfc_hardware_timing *hw = &this->hw;
-       unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
-       unsigned int period_ps, reference_period_ps;
-       unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
-       unsigned int tRP_ps;
-       bool use_half_period;
-       int sample_delay_ps, sample_delay_factor;
-       u16 busy_timeout_cycles;
-       u8 wrn_dly_sel;
-
-       if (sdr->tRC_min >= 30000) {
-               /* ONFI non-EDO modes [0-3] */
-               hw->clk_rate = 22000000;
-               wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
-       } else if (sdr->tRC_min >= 25000) {
-               /* ONFI EDO mode 4 */
-               hw->clk_rate = 80000000;
-               wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
-       } else {
-               /* ONFI EDO mode 5 */
-               hw->clk_rate = 100000000;
-               wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
-       }
-
-       /* SDR core timings are given in picoseconds */
-       period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
-
-       addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
-       data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
-       data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
-       busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
-
-       hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
-                     BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
-                     BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
-       hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
-
-       /*
-        * Derive NFC ideal delay from {3}:
-        *
-        *                     (tREA + 4000 - tRP) * 8
-        *         RDN_DELAY = -----------------------
-        *                                RP
-        */
-       if (period_ps > dll_threshold_ps) {
-               use_half_period = true;
-               reference_period_ps = period_ps / 2;
-       } else {
-               use_half_period = false;
-               reference_period_ps = period_ps;
-       }
-
-       tRP_ps = data_setup_cycles * period_ps;
-       sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
-       if (sample_delay_ps > 0)
-               sample_delay_factor = sample_delay_ps / reference_period_ps;
-       else
-               sample_delay_factor = 0;
-
-       hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
-       if (sample_delay_factor)
-               hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
-                             BM_GPMI_CTRL1_DLL_ENABLE |
-                             (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
-}
-
-void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
-{
-       struct gpmi_nfc_hardware_timing *hw = &this->hw;
-       struct resources *r = &this->resources;
-       void __iomem *gpmi_regs = r->gpmi_regs;
-       unsigned int dll_wait_time_us;
-
-       clk_set_rate(r->clock[0], hw->clk_rate);
-
-       writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
-       writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
-
-       /*
-        * Clear several CTRL1 fields, DLL must be disabled when setting
-        * RDN_DELAY or HALF_PERIOD.
-        */
-       writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
-       writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
-
-       /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
-       dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
-       if (!dll_wait_time_us)
-               dll_wait_time_us = 1;
-
-       /* Wait for the DLL to settle. */
-       udelay(dll_wait_time_us);
-}
-
-int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
-                             const struct nand_data_interface *conf)
-{
-       struct gpmi_nand_data *this = nand_get_controller_data(chip);
-       const struct nand_sdr_timings *sdr;
-
-       /* Retrieve required NAND timings */
-       sdr = nand_get_sdr_timings(conf);
-       if (IS_ERR(sdr))
-               return PTR_ERR(sdr);
-
-       /* Only MX6 GPMI controller can reach EDO timings */
-       if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
-               return -ENOTSUPP;
-
-       /* Stop here if this call was just a check */
-       if (chipnr < 0)
-               return 0;
-
-       /* Do the actual derivation of the controller timings */
-       gpmi_nfc_compute_timings(this, sdr);
-
-       this->hw.must_apply_timings = true;
-
-       return 0;
-}
-
-/* Clears a BCH interrupt. */
-void gpmi_clear_bch(struct gpmi_nand_data *this)
-{
-       struct resources *r = &this->resources;
-       writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
-}
-
-/* Returns the Ready/Busy status of the given chip. */
-int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
-{
-       struct resources *r = &this->resources;
-       uint32_t mask = 0;
-       uint32_t reg = 0;
-
-       if (GPMI_IS_MX23(this)) {
-               mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
-               reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
-       } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
-               /*
-                * In the imx6, all the ready/busy pins are bound
-                * together. So we only need to check chip 0.
-                */
-               if (GPMI_IS_MX6(this))
-                       chip = 0;
-
-               /* MX28 shares the same R/B register as MX6Q. */
-               mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
-               reg = readl(r->gpmi_regs + HW_GPMI_STAT);
-       } else
-               dev_err(this->dev, "unknown arch.\n");
-       return reg & mask;
-}
-
-int gpmi_send_command(struct gpmi_nand_data *this)
-{
-       struct dma_chan *channel = get_dma_chan(this);
-       struct dma_async_tx_descriptor *desc;
-       struct scatterlist *sgl;
-       int chip = this->current_chip;
-       int ret;
-       u32 pio[3];
-
-       /* [1] send out the PIO words */
-       pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
-               | BM_GPMI_CTRL0_WORD_LENGTH
-               | BF_GPMI_CTRL0_CS(chip, this)
-               | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-               | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
-               | BM_GPMI_CTRL0_ADDRESS_INCREMENT
-               | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
-       pio[1] = pio[2] = 0;
-       desc = dmaengine_prep_slave_sg(channel,
-                                       (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
-       if (!desc)
-               return -EINVAL;
-
-       /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
-       sgl = &this->cmd_sgl;
-
-       sg_init_one(sgl, this->cmd_buffer, this->command_length);
-       dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
-       desc = dmaengine_prep_slave_sg(channel,
-                               sgl, 1, DMA_MEM_TO_DEV,
-                               DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       if (!desc)
-               return -EINVAL;
-
-       /* [3] submit the DMA */
-       ret = start_dma_without_bch_irq(this, desc);
-
-       dma_unmap_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
-
-       return ret;
-}
-
-int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len)
-{
-       struct dma_async_tx_descriptor *desc;
-       struct dma_chan *channel = get_dma_chan(this);
-       int chip = this->current_chip;
-       int ret;
-       uint32_t command_mode;
-       uint32_t address;
-       u32 pio[2];
-
-       /* [1] PIO */
-       command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
-       address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
-
-       pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
-               | BM_GPMI_CTRL0_WORD_LENGTH
-               | BF_GPMI_CTRL0_CS(chip, this)
-               | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-               | BF_GPMI_CTRL0_ADDRESS(address)
-               | BF_GPMI_CTRL0_XFER_COUNT(len);
-       pio[1] = 0;
-       desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
-       if (!desc)
-               return -EINVAL;
-
-       /* [2] send DMA request */
-       prepare_data_dma(this, buf, len, DMA_TO_DEVICE);
-       desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
-                                       1, DMA_MEM_TO_DEV,
-                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       if (!desc)
-               return -EINVAL;
-
-       /* [3] submit the DMA */
-       ret = start_dma_without_bch_irq(this, desc);
-
-       dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
-
-       return ret;
-}
-
-int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len)
-{
-       struct dma_async_tx_descriptor *desc;
-       struct dma_chan *channel = get_dma_chan(this);
-       int chip = this->current_chip;
-       int ret;
-       u32 pio[2];
-       bool direct;
-
-       /* [1] : send PIO */
-       pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
-               | BM_GPMI_CTRL0_WORD_LENGTH
-               | BF_GPMI_CTRL0_CS(chip, this)
-               | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-               | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
-               | BF_GPMI_CTRL0_XFER_COUNT(len);
-       pio[1] = 0;
-       desc = dmaengine_prep_slave_sg(channel,
-                                       (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
-       if (!desc)
-               return -EINVAL;
-
-       /* [2] : send DMA request */
-       direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE);
-       desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
-                                       1, DMA_DEV_TO_MEM,
-                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       if (!desc)
-               return -EINVAL;
-
-       /* [3] : submit the DMA */
-
-       ret = start_dma_without_bch_irq(this, desc);
-
-       dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
-       if (!direct)
-               memcpy(buf, this->data_buffer_dma, len);
-
-       return ret;
-}
-
-int gpmi_send_page(struct gpmi_nand_data *this,
-                       dma_addr_t payload, dma_addr_t auxiliary)
-{
-       struct bch_geometry *geo = &this->bch_geometry;
-       uint32_t command_mode;
-       uint32_t address;
-       uint32_t ecc_command;
-       uint32_t buffer_mask;
-       struct dma_async_tx_descriptor *desc;
-       struct dma_chan *channel = get_dma_chan(this);
-       int chip = this->current_chip;
-       u32 pio[6];
-
-       /* A DMA descriptor that does an ECC page read. */
-       command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
-       address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
-       ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
-       buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
-                               BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
-
-       pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
-               | BM_GPMI_CTRL0_WORD_LENGTH
-               | BF_GPMI_CTRL0_CS(chip, this)
-               | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-               | BF_GPMI_CTRL0_ADDRESS(address)
-               | BF_GPMI_CTRL0_XFER_COUNT(0);
-       pio[1] = 0;
-       pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
-               | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
-               | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
-       pio[3] = geo->page_size;
-       pio[4] = payload;
-       pio[5] = auxiliary;
-
-       desc = dmaengine_prep_slave_sg(channel,
-                                       (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_TRANS_NONE,
-                                       DMA_CTRL_ACK);
-       if (!desc)
-               return -EINVAL;
-
-       return start_dma_with_bch_irq(this, desc);
-}
-
-int gpmi_read_page(struct gpmi_nand_data *this,
-                               dma_addr_t payload, dma_addr_t auxiliary)
-{
-       struct bch_geometry *geo = &this->bch_geometry;
-       uint32_t command_mode;
-       uint32_t address;
-       uint32_t ecc_command;
-       uint32_t buffer_mask;
-       struct dma_async_tx_descriptor *desc;
-       struct dma_chan *channel = get_dma_chan(this);
-       int chip = this->current_chip;
-       u32 pio[6];
-
-       /* [1] Wait for the chip to report ready. */
-       command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
-       address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
-
-       pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
-               | BM_GPMI_CTRL0_WORD_LENGTH
-               | BF_GPMI_CTRL0_CS(chip, this)
-               | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-               | BF_GPMI_CTRL0_ADDRESS(address)
-               | BF_GPMI_CTRL0_XFER_COUNT(0);
-       pio[1] = 0;
-       desc = dmaengine_prep_slave_sg(channel,
-                               (struct scatterlist *)pio, 2,
-                               DMA_TRANS_NONE, 0);
-       if (!desc)
-               return -EINVAL;
-
-       /* [2] Enable the BCH block and read. */
-       command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
-       address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
-       ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
-       buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
-                       | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
-
-       pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
-               | BM_GPMI_CTRL0_WORD_LENGTH
-               | BF_GPMI_CTRL0_CS(chip, this)
-               | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-               | BF_GPMI_CTRL0_ADDRESS(address)
-               | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
-
-       pio[1] = 0;
-       pio[2] =  BM_GPMI_ECCCTRL_ENABLE_ECC
-               | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
-               | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
-       pio[3] = geo->page_size;
-       pio[4] = payload;
-       pio[5] = auxiliary;
-       desc = dmaengine_prep_slave_sg(channel,
-                                       (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_TRANS_NONE,
-                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       if (!desc)
-               return -EINVAL;
-
-       /* [3] Disable the BCH block */
-       command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
-       address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
-
-       pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
-               | BM_GPMI_CTRL0_WORD_LENGTH
-               | BF_GPMI_CTRL0_CS(chip, this)
-               | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-               | BF_GPMI_CTRL0_ADDRESS(address)
-               | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
-       pio[1] = 0;
-       pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
-       desc = dmaengine_prep_slave_sg(channel,
-                               (struct scatterlist *)pio, 3,
-                               DMA_TRANS_NONE,
-                               DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       if (!desc)
-               return -EINVAL;
-
-       /* [4] submit the DMA */
-       return start_dma_with_bch_irq(this, desc);
-}
-
-/**
- * gpmi_copy_bits - copy bits from one memory region to another
- * @dst: destination buffer
- * @dst_bit_off: bit offset we're starting to write at
- * @src: source buffer
- * @src_bit_off: bit offset we're starting to read from
- * @nbits: number of bits to copy
- *
- * This functions copies bits from one memory region to another, and is used by
- * the GPMI driver to copy ECC sections which are not guaranteed to be byte
- * aligned.
- *
- * src and dst should not overlap.
- *
- */
-void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
-                   const u8 *src, size_t src_bit_off,
-                   size_t nbits)
-{
-       size_t i;
-       size_t nbytes;
-       u32 src_buffer = 0;
-       size_t bits_in_src_buffer = 0;
-
-       if (!nbits)
-               return;
-
-       /*
-        * Move src and dst pointers to the closest byte pointer and store bit
-        * offsets within a byte.
-        */
-       src += src_bit_off / 8;
-       src_bit_off %= 8;
-
-       dst += dst_bit_off / 8;
-       dst_bit_off %= 8;
-
-       /*
-        * Initialize the src_buffer value with bits available in the first
-        * byte of data so that we end up with a byte aligned src pointer.
-        */
-       if (src_bit_off) {
-               src_buffer = src[0] >> src_bit_off;
-               if (nbits >= (8 - src_bit_off)) {
-                       bits_in_src_buffer += 8 - src_bit_off;
-               } else {
-                       src_buffer &= GENMASK(nbits - 1, 0);
-                       bits_in_src_buffer += nbits;
-               }
-               nbits -= bits_in_src_buffer;
-               src++;
-       }
-
-       /* Calculate the number of bytes that can be copied from src to dst. */
-       nbytes = nbits / 8;
-
-       /* Try to align dst to a byte boundary. */
-       if (dst_bit_off) {
-               if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
-                       src_buffer |= src[0] << bits_in_src_buffer;
-                       bits_in_src_buffer += 8;
-                       src++;
-                       nbytes--;
-               }
-
-               if (bits_in_src_buffer >= (8 - dst_bit_off)) {
-                       dst[0] &= GENMASK(dst_bit_off - 1, 0);
-                       dst[0] |= src_buffer << dst_bit_off;
-                       src_buffer >>= (8 - dst_bit_off);
-                       bits_in_src_buffer -= (8 - dst_bit_off);
-                       dst_bit_off = 0;
-                       dst++;
-                       if (bits_in_src_buffer > 7) {
-                               bits_in_src_buffer -= 8;
-                               dst[0] = src_buffer;
-                               dst++;
-                               src_buffer >>= 8;
-                       }
-               }
-       }
-
-       if (!bits_in_src_buffer && !dst_bit_off) {
-               /*
-                * Both src and dst pointers are byte aligned, thus we can
-                * just use the optimized memcpy function.
-                */
-               if (nbytes)
-                       memcpy(dst, src, nbytes);
-       } else {
-               /*
-                * src buffer is not byte aligned, hence we have to copy each
-                * src byte to the src_buffer variable before extracting a byte
-                * to store in dst.
-                */
-               for (i = 0; i < nbytes; i++) {
-                       src_buffer |= src[i] << bits_in_src_buffer;
-                       dst[i] = src_buffer;
-                       src_buffer >>= 8;
-               }
-       }
-       /* Update dst and src pointers */
-       dst += nbytes;
-       src += nbytes;
-
-       /*
-        * nbits is the number of remaining bits. It should not exceed 8 as
-        * we've already copied as much bytes as possible.
-        */
-       nbits %= 8;
-
-       /*
-        * If there's no more bits to copy to the destination and src buffer
-        * was already byte aligned, then we're done.
-        */
-       if (!nbits && !bits_in_src_buffer)
-               return;
-
-       /* Copy the remaining bits to src_buffer */
-       if (nbits)
-               src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
-                             bits_in_src_buffer;
-       bits_in_src_buffer += nbits;
-
-       /*
-        * In case there were not enough bits to get a byte aligned dst buffer
-        * prepare the src_buffer variable to match the dst organization (shift
-        * src_buffer by dst_bit_off and retrieve the least significant bits
-        * from dst).
-        */
-       if (dst_bit_off)
-               src_buffer = (src_buffer << dst_bit_off) |
-                            (*dst & GENMASK(dst_bit_off - 1, 0));
-       bits_in_src_buffer += dst_bit_off;
-
-       /*
-        * Keep most significant bits from dst if we end up with an unaligned
-        * number of bits.
-        */
-       nbytes = bits_in_src_buffer / 8;
-       if (bits_in_src_buffer % 8) {
-               src_buffer |= (dst[nbytes] &
-                              GENMASK(7, bits_in_src_buffer % 8)) <<
-                             (nbytes * 8);
-               nbytes++;
-       }
-
-       /* Copy the remaining bytes to dst */
-       for (i = 0; i < nbytes; i++) {
-               dst[i] = src_buffer;
-               src_buffer >>= 8;
-       }
-}
index 40df20d..334fe31 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2008 Embedded Alley Solutions, Inc.
  */
 #include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/sched/task_stack.h>
 #include <linux/interrupt.h>
 #include <linux/mtd/partitions.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma/mxs-dma.h>
 #include "gpmi-nand.h"
+#include "gpmi-regs.h"
 #include "bch-regs.h"
 
 /* Resource names for the GPMI NAND driver. */
 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
 
-/* add our owner bbt descriptor */
-static uint8_t scan_ff_pattern[] = { 0xff };
-static struct nand_bbt_descr gpmi_bbt_descr = {
-       .options        = 0,
-       .offs           = 0,
-       .len            = 1,
-       .pattern        = scan_ff_pattern
-};
+/* Converts time to clock cycles */
+#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
 
+#define MXS_SET_ADDR           0x4
+#define MXS_CLR_ADDR           0x8
 /*
- * We may change the layout if we can get the ECC info from the datasheet,
- * else we will use all the (page + OOB).
+ * Clear the bit and poll it cleared.  This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
  */
-static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
-                             struct mtd_oob_region *oobregion)
+static int clear_poll_bit(void __iomem *addr, u32 mask)
 {
-       struct nand_chip *chip = mtd_to_nand(mtd);
-       struct gpmi_nand_data *this = nand_get_controller_data(chip);
-       struct bch_geometry *geo = &this->bch_geometry;
+       int timeout = 0x400;
 
-       if (section)
-               return -ERANGE;
+       /* clear the bit */
+       writel(mask, addr + MXS_CLR_ADDR);
 
-       oobregion->offset = 0;
-       oobregion->length = geo->page_size - mtd->writesize;
+       /*
+        * SFTRST needs 3 GPMI clocks to settle, the reference manual
+        * recommends to wait 1us.
+        */
+       udelay(1);
 
-       return 0;
+       /* poll the bit becoming clear */
+       while ((readl(addr) & mask) && --timeout)
+               /* nothing */;
+
+       return !timeout;
 }
 
-static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
-                              struct mtd_oob_region *oobregion)
+#define MODULE_CLKGATE         (1 << 30)
+#define MODULE_SFTRST          (1 << 31)
+/*
+ * The current mxs_reset_block() will do two things:
+ *  [1] enable the module.
+ *  [2] reset the module.
+ *
+ * In most of the cases, it's ok.
+ * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
+ * If you try to soft reset the BCH block, it becomes unusable until
+ * the next hard reset. This case occurs in the NAND boot mode. When the board
+ * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
+ * So If the driver tries to reset the BCH again, the BCH will not work anymore.
+ * You will see a DMA timeout in this case. The bug has been fixed
+ * in the following chips, such as MX28.
+ *
+ * To avoid this bug, just add a new parameter `just_enable` for
+ * the mxs_reset_block(), and rewrite it here.
+ */
+static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
 {
-       struct nand_chip *chip = mtd_to_nand(mtd);
-       struct gpmi_nand_data *this = nand_get_controller_data(chip);
-       struct bch_geometry *geo = &this->bch_geometry;
+       int ret;
+       int timeout = 0x400;
+
+       /* clear and poll SFTRST */
+       ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+       if (unlikely(ret))
+               goto error;
+
+       /* clear CLKGATE */
+       writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+       if (!just_enable) {
+               /* set SFTRST to reset the block */
+               writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
+               udelay(1);
+
+               /* poll CLKGATE becoming set */
+               while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
+                       /* nothing */;
+               if (unlikely(!timeout))
+                       goto error;
+       }
 
-       if (section)
-               return -ERANGE;
+       /* clear and poll SFTRST */
+       ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+       if (unlikely(ret))
+               goto error;
 
-       /* The available oob size we have. */
-       if (geo->page_size < mtd->writesize + mtd->oobsize) {
-               oobregion->offset = geo->page_size - mtd->writesize;
-               oobregion->length = mtd->oobsize - oobregion->offset;
-       }
+       /* clear and poll CLKGATE */
+       ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
+       if (unlikely(ret))
+               goto error;
 
        return 0;
+
+error:
+       pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+       return -ETIMEDOUT;
 }
 
-static const char * const gpmi_clks_for_mx2x[] = {
-       "gpmi_io",
-};
+static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
+{
+       struct clk *clk;
+       int ret;
+       int i;
 
-static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
-       .ecc = gpmi_ooblayout_ecc,
-       .free = gpmi_ooblayout_free,
-};
+       for (i = 0; i < GPMI_CLK_MAX; i++) {
+               clk = this->resources.clock[i];
+               if (!clk)
+                       break;
 
-static const struct gpmi_devdata gpmi_devdata_imx23 = {
-       .type = IS_MX23,
-       .bch_max_ecc_strength = 20,
-       .max_chain_delay = 16000,
-       .clks = gpmi_clks_for_mx2x,
-       .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
-};
+               if (v) {
+                       ret = clk_prepare_enable(clk);
+                       if (ret)
+                               goto err_clk;
+               } else {
+                       clk_disable_unprepare(clk);
+               }
+       }
+       return 0;
 
-static const struct gpmi_devdata gpmi_devdata_imx28 = {
-       .type = IS_MX28,
-       .bch_max_ecc_strength = 20,
-       .max_chain_delay = 16000,
-       .clks = gpmi_clks_for_mx2x,
-       .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
-};
+err_clk:
+       for (; i > 0; i--)
+               clk_disable_unprepare(this->resources.clock[i - 1]);
+       return ret;
+}
 
-static const char * const gpmi_clks_for_mx6[] = {
-       "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
-};
+static int gpmi_init(struct gpmi_nand_data *this)
+{
+       struct resources *r = &this->resources;
+       int ret;
 
-static const struct gpmi_devdata gpmi_devdata_imx6q = {
-       .type = IS_MX6Q,
-       .bch_max_ecc_strength = 40,
-       .max_chain_delay = 12000,
-       .clks = gpmi_clks_for_mx6,
-       .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
-};
+       ret = gpmi_reset_block(r->gpmi_regs, false);
+       if (ret)
+               goto err_out;
 
-static const struct gpmi_devdata gpmi_devdata_imx6sx = {
-       .type = IS_MX6SX,
-       .bch_max_ecc_strength = 62,
-       .max_chain_delay = 12000,
-       .clks = gpmi_clks_for_mx6,
-       .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
-};
+       /*
+        * Reset BCH here, too. We got failures otherwise :(
+        * See later BCH reset for explanation of MX23 and MX28 handling
+        */
+       ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
+       if (ret)
+               goto err_out;
 
-static const char * const gpmi_clks_for_mx7d[] = {
-       "gpmi_io", "gpmi_bch_apb",
-};
+       /* Choose NAND mode. */
+       writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
 
-static const struct gpmi_devdata gpmi_devdata_imx7d = {
-       .type = IS_MX7D,
-       .bch_max_ecc_strength = 62,
-       .max_chain_delay = 12000,
-       .clks = gpmi_clks_for_mx7d,
-       .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
-};
+       /* Set the IRQ polarity. */
+       writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
+                               r->gpmi_regs + HW_GPMI_CTRL1_SET);
 
-static irqreturn_t bch_irq(int irq, void *cookie)
-{
-       struct gpmi_nand_data *this = cookie;
+       /* Disable Write-Protection. */
+       writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 
-       gpmi_clear_bch(this);
-       complete(&this->bch_done);
-       return IRQ_HANDLED;
+       /* Select BCH ECC. */
+       writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+       /*
+        * Decouple the chip select from dma channel. We use dma0 for all
+        * the chips.
+        */
+       writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+       return 0;
+err_out:
+       return ret;
 }
 
-/*
- *  Calculate the ECC strength by hand:
- *     E : The ECC strength.
- *     G : the length of Galois Field.
- *     N : The chunk count of per page.
- *     O : the oobsize of the NAND chip.
- *     M : the metasize of per page.
- *
- *     The formula is :
- *             E * G * N
- *           ------------ <= (O - M)
- *                  8
- *
- *      So, we get E by:
- *                    (O - M) * 8
- *              E <= -------------
- *                       G * N
- */
-static inline int get_ecc_strength(struct gpmi_nand_data *this)
+/* This function is very useful. It is called only when the bug occur. */
+static void gpmi_dump_info(struct gpmi_nand_data *this)
 {
+       struct resources *r = &this->resources;
        struct bch_geometry *geo = &this->bch_geometry;
-       struct mtd_info *mtd = nand_to_mtd(&this->nand);
-       int ecc_strength;
+       u32 reg;
+       int i;
 
-       ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
-                       / (geo->gf_len * geo->ecc_chunk_count);
+       dev_err(this->dev, "Show GPMI registers :\n");
+       for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
+               reg = readl(r->gpmi_regs + i * 0x10);
+               dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+       }
 
-       /* We need the minor even number. */
-       return round_down(ecc_strength, 2);
+       /* start to print out the BCH info */
+       dev_err(this->dev, "Show BCH registers :\n");
+       for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
+               reg = readl(r->bch_regs + i * 0x10);
+               dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+       }
+       dev_err(this->dev, "BCH Geometry :\n"
+               "GF length              : %u\n"
+               "ECC Strength           : %u\n"
+               "Page Size in Bytes     : %u\n"
+               "Metadata Size in Bytes : %u\n"
+               "ECC Chunk Size in Bytes: %u\n"
+               "ECC Chunk Count        : %u\n"
+               "Payload Size in Bytes  : %u\n"
+               "Auxiliary Size in Bytes: %u\n"
+               "Auxiliary Status Offset: %u\n"
+               "Block Mark Byte Offset : %u\n"
+               "Block Mark Bit Offset  : %u\n",
+               geo->gf_len,
+               geo->ecc_strength,
+               geo->page_size,
+               geo->metadata_size,
+               geo->ecc_chunk_size,
+               geo->ecc_chunk_count,
+               geo->payload_size,
+               geo->auxiliary_size,
+               geo->auxiliary_status_offset,
+               geo->block_mark_byte_offset,
+               geo->block_mark_bit_offset);
 }
 
 static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
@@ -296,6 +359,37 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
        return 0;
 }
 
+/*
+ *  Calculate the ECC strength by hand:
+ *     E : The ECC strength.
+ *     G : the length of Galois Field.
+ *     N : The chunk count of per page.
+ *     O : the oobsize of the NAND chip.
+ *     M : the metasize of per page.
+ *
+ *     The formula is :
+ *             E * G * N
+ *           ------------ <= (O - M)
+ *                  8
+ *
+ *      So, we get E by:
+ *                    (O - M) * 8
+ *              E <= -------------
+ *                       G * N
+ */
+static inline int get_ecc_strength(struct gpmi_nand_data *this)
+{
+       struct bch_geometry *geo = &this->bch_geometry;
+       struct mtd_info *mtd = nand_to_mtd(&this->nand);
+       int ecc_strength;
+
+       ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
+                       / (geo->gf_len * geo->ecc_chunk_count);
+
+       /* We need the minor even number. */
+       return round_down(ecc_strength, 2);
+}
+
 static int legacy_set_geometry(struct gpmi_nand_data *this)
 {
        struct bch_geometry *geo = &this->bch_geometry;
@@ -408,7 +502,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
        return 0;
 }
 
-int common_nfc_set_geometry(struct gpmi_nand_data *this)
+static int common_nfc_set_geometry(struct gpmi_nand_data *this)
 {
        struct nand_chip *chip = &this->nand;
 
@@ -430,18 +524,288 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
        return 0;
 }
 
-struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
+/* Configures the geometry for BCH.  */
+static int bch_set_geometry(struct gpmi_nand_data *this)
+{
+       struct resources *r = &this->resources;
+       int ret;
+
+       ret = common_nfc_set_geometry(this);
+       if (ret)
+               return ret;
+
+       ret = pm_runtime_get_sync(this->dev);
+       if (ret < 0)
+               return ret;
+
+       /*
+       * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
+       * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
+       * and MX28.
+       */
+       ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
+       if (ret)
+               goto err_out;
+
+       /* Set *all* chip selects to use layout 0. */
+       writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
+
+       ret = 0;
+err_out:
+       pm_runtime_mark_last_busy(this->dev);
+       pm_runtime_put_autosuspend(this->dev);
+
+       return ret;
+}
+
+/*
+ * <1> Firstly, we should know what's the GPMI-clock means.
+ *     The GPMI-clock is the internal clock in the gpmi nand controller.
+ *     If you set 100MHz to gpmi nand controller, the GPMI-clock's period
+ *     is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
+ *
+ * <2> Secondly, we should know what's the frequency on the nand chip pins.
+ *     The frequency on the nand chip pins is derived from the GPMI-clock.
+ *     We can get it from the following equation:
+ *
+ *         F = G / (DS + DH)
+ *
+ *         F  : the frequency on the nand chip pins.
+ *         G  : the GPMI clock, such as 100MHz.
+ *         DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
+ *         DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
+ *
+ * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
+ *     the nand EDO(extended Data Out) timing could be applied.
+ *     The GPMI implements a feedback read strobe to sample the read data.
+ *     The feedback read strobe can be delayed to support the nand EDO timing
+ *     where the read strobe may deasserts before the read data is valid, and
+ *     read data is valid for some time after read strobe.
+ *
+ *     The following figure illustrates some aspects of a NAND Flash read:
+ *
+ *                   |<---tREA---->|
+ *                   |             |
+ *                   |         |   |
+ *                   |<--tRP-->|   |
+ *                   |         |   |
+ *                  __          ___|__________________________________
+ *     RDN            \________/   |
+ *                                 |
+ *                                 /---------\
+ *     Read Data    --------------<           >---------
+ *                                 \---------/
+ *                                |     |
+ *                                |<-D->|
+ *     FeedbackRDN  ________             ____________
+ *                          \___________/
+ *
+ *          D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
+ *
+ *
+ * <4> Now, we begin to describe how to compute the right RDN_DELAY.
+ *
+ *  4.1) From the aspect of the nand chip pins:
+ *        Delay = (tREA + C - tRP)               {1}
+ *
+ *        tREA : the maximum read access time.
+ *        C    : a constant to adjust the delay. default is 4000ps.
+ *        tRP  : the read pulse width, which is exactly:
+ *                   tRP = (GPMI-clock-period) * DATA_SETUP
+ *
+ *  4.2) From the aspect of the GPMI nand controller:
+ *         Delay = RDN_DELAY * 0.125 * RP        {2}
+ *
+ *         RP   : the DLL reference period.
+ *            if (GPMI-clock-period > DLL_THRETHOLD)
+ *                   RP = GPMI-clock-period / 2;
+ *            else
+ *                   RP = GPMI-clock-period;
+ *
+ *            Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
+ *            is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
+ *            is 16000ps, but in mx6q, we use 12000ps.
+ *
+ *  4.3) since {1} equals {2}, we get:
+ *
+ *                     (tREA + 4000 - tRP) * 8
+ *         RDN_DELAY = -----------------------     {3}
+ *                           RP
+ */
+static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
+                                    const struct nand_sdr_timings *sdr)
+{
+       struct gpmi_nfc_hardware_timing *hw = &this->hw;
+       unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
+       unsigned int period_ps, reference_period_ps;
+       unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
+       unsigned int tRP_ps;
+       bool use_half_period;
+       int sample_delay_ps, sample_delay_factor;
+       u16 busy_timeout_cycles;
+       u8 wrn_dly_sel;
+
+       if (sdr->tRC_min >= 30000) {
+               /* ONFI non-EDO modes [0-3] */
+               hw->clk_rate = 22000000;
+               wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
+       } else if (sdr->tRC_min >= 25000) {
+               /* ONFI EDO mode 4 */
+               hw->clk_rate = 80000000;
+               wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
+       } else {
+               /* ONFI EDO mode 5 */
+               hw->clk_rate = 100000000;
+               wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
+       }
+
+       /* SDR core timings are given in picoseconds */
+       period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
+
+       addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
+       data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
+       data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
+       busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
+
+       hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
+                     BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
+                     BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
+       hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
+
+       /*
+        * Derive NFC ideal delay from {3}:
+        *
+        *                     (tREA + 4000 - tRP) * 8
+        *         RDN_DELAY = -----------------------
+        *                                RP
+        */
+       if (period_ps > dll_threshold_ps) {
+               use_half_period = true;
+               reference_period_ps = period_ps / 2;
+       } else {
+               use_half_period = false;
+               reference_period_ps = period_ps;
+       }
+
+       tRP_ps = data_setup_cycles * period_ps;
+       sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
+       if (sample_delay_ps > 0)
+               sample_delay_factor = sample_delay_ps / reference_period_ps;
+       else
+               sample_delay_factor = 0;
+
+       hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
+       if (sample_delay_factor)
+               hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
+                             BM_GPMI_CTRL1_DLL_ENABLE |
+                             (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
+}
+
+static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
+{
+       struct gpmi_nfc_hardware_timing *hw = &this->hw;
+       struct resources *r = &this->resources;
+       void __iomem *gpmi_regs = r->gpmi_regs;
+       unsigned int dll_wait_time_us;
+
+       clk_set_rate(r->clock[0], hw->clk_rate);
+
+       writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
+       writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
+
+       /*
+        * Clear several CTRL1 fields, DLL must be disabled when setting
+        * RDN_DELAY or HALF_PERIOD.
+        */
+       writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
+       writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+       /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
+       dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
+       if (!dll_wait_time_us)
+               dll_wait_time_us = 1;
+
+       /* Wait for the DLL to settle. */
+       udelay(dll_wait_time_us);
+}
+
+static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
+                                    const struct nand_data_interface *conf)
+{
+       struct gpmi_nand_data *this = nand_get_controller_data(chip);
+       const struct nand_sdr_timings *sdr;
+
+       /* Retrieve required NAND timings */
+       sdr = nand_get_sdr_timings(conf);
+       if (IS_ERR(sdr))
+               return PTR_ERR(sdr);
+
+       /* Only MX6 GPMI controller can reach EDO timings */
+       if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
+               return -ENOTSUPP;
+
+       /* Stop here if this call was just a check */
+       if (chipnr < 0)
+               return 0;
+
+       /* Do the actual derivation of the controller timings */
+       gpmi_nfc_compute_timings(this, sdr);
+
+       this->hw.must_apply_timings = true;
+
+       return 0;
+}
+
+/* Clears a BCH interrupt. */
+static void gpmi_clear_bch(struct gpmi_nand_data *this)
+{
+       struct resources *r = &this->resources;
+       writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
+}
+
+static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
 {
        /* We use the DMA channel 0 to access all the nand chips. */
        return this->dma_chans[0];
 }
 
+/* This will be called after the DMA operation is finished. */
+static void dma_irq_callback(void *param)
+{
+       struct gpmi_nand_data *this = param;
+       struct completion *dma_c = &this->dma_done;
+
+       complete(dma_c);
+}
+
+static irqreturn_t bch_irq(int irq, void *cookie)
+{
+       struct gpmi_nand_data *this = cookie;
+
+       gpmi_clear_bch(this);
+       complete(&this->bch_done);
+       return IRQ_HANDLED;
+}
+
+static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
+{
+       /*
+        * raw_len is the length to read/write including bch data which
+        * we are passed in exec_op. Calculate the data length from it.
+        */
+       if (this->bch)
+               return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
+       else
+               return raw_len;
+}
+
 /* Can we use the upper's buffer directly for DMA? */
-bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, int len,
-                     enum dma_data_direction dr)
+static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
+                            int raw_len, struct scatterlist *sgl,
+                            enum dma_data_direction dr)
 {
-       struct scatterlist *sgl = &this->data_sgl;
        int ret;
+       int len = gpmi_raw_len_to_len(this, raw_len);
 
        /* first try to map the upper buffer directly */
        if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
@@ -457,7 +821,7 @@ map_fail:
        /* We have to use our own DMA buffer. */
        sg_init_one(sgl, this->data_buffer_dma, len);
 
-       if (dr == DMA_TO_DEVICE)
+       if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
                memcpy(this->data_buffer_dma, buf, len);
 
        dma_map_sg(this->dev, sgl, 1, dr);
@@ -465,67 +829,263 @@ map_fail:
        return false;
 }
 
-/* This will be called after the DMA operation is finished. */
-static void dma_irq_callback(void *param)
+/**
+ * gpmi_copy_bits - copy bits from one memory region to another
+ * @dst: destination buffer
+ * @dst_bit_off: bit offset we're starting to write at
+ * @src: source buffer
+ * @src_bit_off: bit offset we're starting to read from
+ * @nbits: number of bits to copy
+ *
+ * This functions copies bits from one memory region to another, and is used by
+ * the GPMI driver to copy ECC sections which are not guaranteed to be byte
+ * aligned.
+ *
+ * src and dst should not overlap.
+ *
+ */
+static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src,
+                          size_t src_bit_off, size_t nbits)
 {
-       struct gpmi_nand_data *this = param;
-       struct completion *dma_c = &this->dma_done;
+       size_t i;
+       size_t nbytes;
+       u32 src_buffer = 0;
+       size_t bits_in_src_buffer = 0;
 
-       complete(dma_c);
-}
+       if (!nbits)
+               return;
 
-int start_dma_without_bch_irq(struct gpmi_nand_data *this,
-                               struct dma_async_tx_descriptor *desc)
-{
-       struct completion *dma_c = &this->dma_done;
-       unsigned long timeout;
+       /*
+        * Move src and dst pointers to the closest byte pointer and store bit
+        * offsets within a byte.
+        */
+       src += src_bit_off / 8;
+       src_bit_off %= 8;
 
-       init_completion(dma_c);
+       dst += dst_bit_off / 8;
+       dst_bit_off %= 8;
 
-       desc->callback          = dma_irq_callback;
-       desc->callback_param    = this;
-       dmaengine_submit(desc);
-       dma_async_issue_pending(get_dma_chan(this));
+       /*
+        * Initialize the src_buffer value with bits available in the first
+        * byte of data so that we end up with a byte aligned src pointer.
+        */
+       if (src_bit_off) {
+               src_buffer = src[0] >> src_bit_off;
+               if (nbits >= (8 - src_bit_off)) {
+                       bits_in_src_buffer += 8 - src_bit_off;
+               } else {
+                       src_buffer &= GENMASK(nbits - 1, 0);
+                       bits_in_src_buffer += nbits;
+               }
+               nbits -= bits_in_src_buffer;
+               src++;
+       }
 
-       /* Wait for the interrupt from the DMA block. */
-       timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
-       if (!timeout) {
-               dev_err(this->dev, "DMA timeout, last DMA\n");
-               gpmi_dump_info(this);
-               return -ETIMEDOUT;
+       /* Calculate the number of bytes that can be copied from src to dst. */
+       nbytes = nbits / 8;
+
+       /* Try to align dst to a byte boundary. */
+       if (dst_bit_off) {
+               if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
+                       src_buffer |= src[0] << bits_in_src_buffer;
+                       bits_in_src_buffer += 8;
+                       src++;
+                       nbytes--;
+               }
+
+               if (bits_in_src_buffer >= (8 - dst_bit_off)) {
+                       dst[0] &= GENMASK(dst_bit_off - 1, 0);
+                       dst[0] |= src_buffer << dst_bit_off;
+                       src_buffer >>= (8 - dst_bit_off);
+                       bits_in_src_buffer -= (8 - dst_bit_off);
+                       dst_bit_off = 0;
+                       dst++;
+                       if (bits_in_src_buffer > 7) {
+                               bits_in_src_buffer -= 8;
+                               dst[0] = src_buffer;
+                               dst++;
+                               src_buffer >>= 8;
+                       }
+               }
+       }
+
+       if (!bits_in_src_buffer && !dst_bit_off) {
+               /*
+                * Both src and dst pointers are byte aligned, thus we can
+                * just use the optimized memcpy function.
+                */
+               if (nbytes)
+                       memcpy(dst, src, nbytes);
+       } else {
+               /*
+                * src buffer is not byte aligned, hence we have to copy each
+                * src byte to the src_buffer variable before extracting a byte
+                * to store in dst.
+                */
+               for (i = 0; i < nbytes; i++) {
+                       src_buffer |= src[i] << bits_in_src_buffer;
+                       dst[i] = src_buffer;
+                       src_buffer >>= 8;
+               }
+       }
+       /* Update dst and src pointers */
+       dst += nbytes;
+       src += nbytes;
+
+       /*
+        * nbits is the number of remaining bits. It should not exceed 8 as
+        * we've already copied as much bytes as possible.
+        */
+       nbits %= 8;
+
+       /*
+        * If there's no more bits to copy to the destination and src buffer
+        * was already byte aligned, then we're done.
+        */
+       if (!nbits && !bits_in_src_buffer)
+               return;
+
+       /* Copy the remaining bits to src_buffer */
+       if (nbits)
+               src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
+                             bits_in_src_buffer;
+       bits_in_src_buffer += nbits;
+
+       /*
+        * In case there were not enough bits to get a byte aligned dst buffer
+        * prepare the src_buffer variable to match the dst organization (shift
+        * src_buffer by dst_bit_off and retrieve the least significant bits
+        * from dst).
+        */
+       if (dst_bit_off)
+               src_buffer = (src_buffer << dst_bit_off) |
+                            (*dst & GENMASK(dst_bit_off - 1, 0));
+       bits_in_src_buffer += dst_bit_off;
+
+       /*
+        * Keep most significant bits from dst if we end up with an unaligned
+        * number of bits.
+        */
+       nbytes = bits_in_src_buffer / 8;
+       if (bits_in_src_buffer % 8) {
+               src_buffer |= (dst[nbytes] &
+                              GENMASK(7, bits_in_src_buffer % 8)) <<
+                             (nbytes * 8);
+               nbytes++;
+       }
+
+       /* Copy the remaining bytes to dst */
+       for (i = 0; i < nbytes; i++) {
+               dst[i] = src_buffer;
+               src_buffer >>= 8;
        }
-       return 0;
 }
 
+/* add our owner bbt descriptor */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr gpmi_bbt_descr = {
+       .options        = 0,
+       .offs           = 0,
+       .len            = 1,
+       .pattern        = scan_ff_pattern
+};
+
 /*
- * This function is used in BCH reading or BCH writing pages.
- * It will wait for the BCH interrupt as long as ONE second.
- * Actually, we must wait for two interrupts :
- *     [1] firstly the DMA interrupt and
- *     [2] secondly the BCH interrupt.
+ * We may change the layout if we can get the ECC info from the datasheet,
+ * else we will use all the (page + OOB).
  */
-int start_dma_with_bch_irq(struct gpmi_nand_data *this,
-                       struct dma_async_tx_descriptor *desc)
+static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
+                             struct mtd_oob_region *oobregion)
 {
-       struct completion *bch_c = &this->bch_done;
-       unsigned long timeout;
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct gpmi_nand_data *this = nand_get_controller_data(chip);
+       struct bch_geometry *geo = &this->bch_geometry;
 
-       /* Prepare to receive an interrupt from the BCH block. */
-       init_completion(bch_c);
+       if (section)
+               return -ERANGE;
 
-       /* start the DMA */
-       start_dma_without_bch_irq(this, desc);
+       oobregion->offset = 0;
+       oobregion->length = geo->page_size - mtd->writesize;
 
-       /* Wait for the interrupt from the BCH block. */
-       timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
-       if (!timeout) {
-               dev_err(this->dev, "BCH timeout\n");
-               gpmi_dump_info(this);
-               return -ETIMEDOUT;
+       return 0;
+}
+
+static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
+                              struct mtd_oob_region *oobregion)
+{
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct gpmi_nand_data *this = nand_get_controller_data(chip);
+       struct bch_geometry *geo = &this->bch_geometry;
+
+       if (section)
+               return -ERANGE;
+
+       /* The available oob size we have. */
+       if (geo->page_size < mtd->writesize + mtd->oobsize) {
+               oobregion->offset = geo->page_size - mtd->writesize;
+               oobregion->length = mtd->oobsize - oobregion->offset;
        }
+
        return 0;
 }
 
+static const char * const gpmi_clks_for_mx2x[] = {
+       "gpmi_io",
+};
+
+static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
+       .ecc = gpmi_ooblayout_ecc,
+       .free = gpmi_ooblayout_free,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx23 = {
+       .type = IS_MX23,
+       .bch_max_ecc_strength = 20,
+       .max_chain_delay = 16000,
+       .clks = gpmi_clks_for_mx2x,
+       .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx28 = {
+       .type = IS_MX28,
+       .bch_max_ecc_strength = 20,
+       .max_chain_delay = 16000,
+       .clks = gpmi_clks_for_mx2x,
+       .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
+};
+
+static const char * const gpmi_clks_for_mx6[] = {
+       "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6q = {
+       .type = IS_MX6Q,
+       .bch_max_ecc_strength = 40,
+       .max_chain_delay = 12000,
+       .clks = gpmi_clks_for_mx6,
+       .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6sx = {
+       .type = IS_MX6SX,
+       .bch_max_ecc_strength = 62,
+       .max_chain_delay = 12000,
+       .clks = gpmi_clks_for_mx6,
+       .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
+};
+
+static const char * const gpmi_clks_for_mx7d[] = {
+       "gpmi_io", "gpmi_bch_apb",
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx7d = {
+       .type = IS_MX7D,
+       .bch_max_ecc_strength = 62,
+       .max_chain_delay = 12000,
+       .clks = gpmi_clks_for_mx7d,
+       .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
+};
+
 static int acquire_register_block(struct gpmi_nand_data *this,
                                  const char *res_name)
 {
@@ -667,68 +1227,20 @@ static void release_resources(struct gpmi_nand_data *this)
        release_dma_channels(this);
 }
 
-static int send_page_prepare(struct gpmi_nand_data *this,
-                       const void *source, unsigned length,
-                       void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
-                       const void **use_virt, dma_addr_t *use_phys)
-{
-       struct device *dev = this->dev;
-
-       if (virt_addr_valid(source)) {
-               dma_addr_t source_phys;
-
-               source_phys = dma_map_single(dev, (void *)source, length,
-                                               DMA_TO_DEVICE);
-               if (dma_mapping_error(dev, source_phys)) {
-                       if (alt_size < length) {
-                               dev_err(dev, "Alternate buffer is too small\n");
-                               return -ENOMEM;
-                       }
-                       goto map_failed;
-               }
-               *use_virt = source;
-               *use_phys = source_phys;
-               return 0;
-       }
-map_failed:
-       /*
-        * Copy the content of the source buffer into the alternate
-        * buffer and set up the return values accordingly.
-        */
-       memcpy(alt_virt, source, length);
-
-       *use_virt = alt_virt;
-       *use_phys = alt_phys;
-       return 0;
-}
-
-static void send_page_end(struct gpmi_nand_data *this,
-                       const void *source, unsigned length,
-                       void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
-                       const void *used_virt, dma_addr_t used_phys)
-{
-       struct device *dev = this->dev;
-       if (used_virt == source)
-               dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
-}
-
 static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
 {
        struct device *dev = this->dev;
+       struct bch_geometry *geo = &this->bch_geometry;
 
-       if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
-               dma_free_coherent(dev, this->page_buffer_size,
-                                       this->page_buffer_virt,
-                                       this->page_buffer_phys);
-       kfree(this->cmd_buffer);
+       if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
+               dma_free_coherent(dev, geo->auxiliary_size,
+                                       this->auxiliary_virt,
+                                       this->auxiliary_phys);
        kfree(this->data_buffer_dma);
        kfree(this->raw_buffer);
 
-       this->cmd_buffer        = NULL;
        this->data_buffer_dma   = NULL;
        this->raw_buffer        = NULL;
-       this->page_buffer_virt  = NULL;
-       this->page_buffer_size  =  0;
 }
 
 /* Allocate the DMA buffers */
@@ -738,11 +1250,6 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
        struct device *dev = this->dev;
        struct mtd_info *mtd = nand_to_mtd(&this->nand);
 
-       /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
-       this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
-       if (this->cmd_buffer == NULL)
-               goto error_alloc;
-
        /*
         * [2] Allocate a read/write data buffer.
         *     The gpmi_alloc_dma_buffer can be called twice.
@@ -756,29 +1263,15 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
        if (this->data_buffer_dma == NULL)
                goto error_alloc;
 
-       /*
-        * [3] Allocate the page buffer.
-        *
-        * Both the payload buffer and the auxiliary buffer must appear on
-        * 32-bit boundaries. We presume the size of the payload buffer is a
-        * power of two and is much larger than four, which guarantees the
-        * auxiliary buffer will appear on a 32-bit boundary.
-        */
-       this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
-       this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
-                                       &this->page_buffer_phys, GFP_DMA);
-       if (!this->page_buffer_virt)
+       this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
+                                       &this->auxiliary_phys, GFP_DMA);
+       if (!this->auxiliary_virt)
                goto error_alloc;
 
-       this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+       this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
        if (!this->raw_buffer)
                goto error_alloc;
 
-       /* Slice up the page buffer. */
-       this->payload_virt = this->page_buffer_virt;
-       this->payload_phys = this->page_buffer_phys;
-       this->auxiliary_virt = this->payload_virt + geo->payload_size;
-       this->auxiliary_phys = this->payload_phys + geo->payload_size;
        return 0;
 
 error_alloc:
@@ -786,106 +1279,6 @@ error_alloc:
        return -ENOMEM;
 }
 
-static void gpmi_cmd_ctrl(struct nand_chip *chip, int data, unsigned int ctrl)
-{
-       struct gpmi_nand_data *this = nand_get_controller_data(chip);
-       int ret;
-
-       /*
-        * Every operation begins with a command byte and a series of zero or
-        * more address bytes. These are distinguished by either the Address
-        * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
-        * asserted. When MTD is ready to execute the command, it will deassert
-        * both latch enables.
-        *
-        * Rather than run a separate DMA operation for every single byte, we
-        * queue them up and run a single DMA operation for the entire series
-        * of command and data bytes. NAND_CMD_NONE means the END of the queue.
-        */
-       if ((ctrl & (NAND_ALE | NAND_CLE))) {
-               if (data != NAND_CMD_NONE)
-                       this->cmd_buffer[this->command_length++] = data;
-               return;
-       }
-
-       if (!this->command_length)
-               return;
-
-       ret = gpmi_send_command(this);
-       if (ret)
-               dev_err(this->dev, "Chip: %u, Error %d\n",
-                       this->current_chip, ret);
-
-       this->command_length = 0;
-}
-
-static int gpmi_dev_ready(struct nand_chip *chip)
-{
-       struct gpmi_nand_data *this = nand_get_controller_data(chip);
-
-       return gpmi_is_ready(this, this->current_chip);
-}
-
-static void gpmi_select_chip(struct nand_chip *chip, int chipnr)
-{
-       struct gpmi_nand_data *this = nand_get_controller_data(chip);
-       int ret;
-
-       /*
-        * For power consumption matters, disable/enable the clock each time a
-        * die is selected/unselected.
-        */
-       if (this->current_chip < 0 && chipnr >= 0) {
-               ret = gpmi_enable_clk(this);
-               if (ret)
-                       dev_err(this->dev, "Failed to enable the clock\n");
-       } else if (this->current_chip >= 0 && chipnr < 0) {
-               ret = gpmi_disable_clk(this);
-               if (ret)
-                       dev_err(this->dev, "Failed to disable the clock\n");
-       }
-
-       /*
-        * This driver currently supports only one NAND chip. Plus, dies share
-        * the same configuration. So once timings have been applied on the
-        * controller side, they will not change anymore. When the time will
-        * come, the check on must_apply_timings will have to be dropped.
-        */
-       if (chipnr >= 0 && this->hw.must_apply_timings) {
-               this->hw.must_apply_timings = false;
-               gpmi_nfc_apply_timings(this);
-       }
-
-       this->current_chip = chipnr;
-}
-
-static void gpmi_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
-{
-       struct gpmi_nand_data *this = nand_get_controller_data(chip);
-
-       dev_dbg(this->dev, "len is %d\n", len);
-
-       gpmi_read_data(this, buf, len);
-}
-
-static void gpmi_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
-{
-       struct gpmi_nand_data *this = nand_get_controller_data(chip);
-
-       dev_dbg(this->dev, "len is %d\n", len);
-
-       gpmi_send_data(this, buf, len);
-}
-
-static uint8_t gpmi_read_byte(struct nand_chip *chip)
-{
-       struct gpmi_nand_data *this = nand_get_controller_data(chip);
-       uint8_t *buf = this->data_buffer_dma;
-
-       gpmi_read_buf(chip, buf, 1);
-       return buf[0];
-}
-
 /*
  * Handles block mark swapping.
  * It can be called in swapping the block mark, or swapping it back,
@@ -934,54 +1327,20 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
        p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
 }
 
-static int gpmi_ecc_read_page_data(struct nand_chip *chip,
-                                  uint8_t *buf, int oob_required,
-                                  int page)
+static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
+                              int last, int meta)
 {
        struct gpmi_nand_data *this = nand_get_controller_data(chip);
        struct bch_geometry *nfc_geo = &this->bch_geometry;
        struct mtd_info *mtd = nand_to_mtd(chip);
-       dma_addr_t    payload_phys;
-       unsigned int  i;
+       int i;
        unsigned char *status;
-       unsigned int  max_bitflips = 0;
-       int           ret;
-       bool          direct = false;
-
-       dev_dbg(this->dev, "page number is : %d\n", page);
-
-       payload_phys = this->payload_phys;
-
-       if (virt_addr_valid(buf)) {
-               dma_addr_t dest_phys;
-
-               dest_phys = dma_map_single(this->dev, buf, nfc_geo->payload_size,
-                                          DMA_FROM_DEVICE);
-               if (!dma_mapping_error(this->dev, dest_phys)) {
-                       payload_phys = dest_phys;
-                       direct = true;
-               }
-       }
-
-       /* go! */
-       ret = gpmi_read_page(this, payload_phys, this->auxiliary_phys);
-
-       if (direct)
-               dma_unmap_single(this->dev, payload_phys, nfc_geo->payload_size,
-                                DMA_FROM_DEVICE);
-
-       if (ret) {
-               dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
-               return ret;
-       }
+       unsigned int max_bitflips = 0;
 
        /* Loop over status bytes, accumulating ECC status. */
-       status = this->auxiliary_virt + nfc_geo->auxiliary_status_offset;
+       status = this->auxiliary_virt + ALIGN(meta, 4);
 
-       if (!direct)
-               memcpy(buf, this->payload_virt, nfc_geo->payload_size);
-
-       for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
+       for (i = first; i < last; i++, status++) {
                if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
                        continue;
 
@@ -1061,6 +1420,50 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
                max_bitflips = max_t(unsigned int, max_bitflips, *status);
        }
 
+       return max_bitflips;
+}
+
+static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
+{
+       struct bch_geometry *geo = &this->bch_geometry;
+       unsigned int ecc_strength = geo->ecc_strength >> 1;
+       unsigned int gf_len = geo->gf_len;
+       unsigned int block_size = geo->ecc_chunk_size;
+
+       this->bch_flashlayout0 =
+               BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
+               BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
+               BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
+               BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
+               BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
+
+       this->bch_flashlayout1 =
+               BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
+               BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
+               BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
+               BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
+}
+
+static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
+                             int oob_required, int page)
+{
+       struct gpmi_nand_data *this = nand_get_controller_data(chip);
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       struct bch_geometry *geo = &this->bch_geometry;
+       unsigned int max_bitflips;
+       int ret;
+
+       gpmi_bch_layout_std(this);
+       this->bch = true;
+
+       ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
+       if (ret)
+               return ret;
+
+       max_bitflips = gpmi_count_bitflips(chip, buf, 0,
+                                          geo->ecc_chunk_count,
+                                          geo->auxiliary_status_offset);
+
        /* handle the block mark swapping */
        block_mark_swapping(this, buf, this->auxiliary_virt);
 
@@ -1082,30 +1485,20 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
        return max_bitflips;
 }
 
-static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
-                             int oob_required, int page)
-{
-       nand_read_page_op(chip, page, 0, NULL, 0);
-
-       return gpmi_ecc_read_page_data(chip, buf, oob_required, page);
-}
-
 /* Fake a virtual small page for the subpage read */
 static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
                                 uint32_t len, uint8_t *buf, int page)
 {
        struct gpmi_nand_data *this = nand_get_controller_data(chip);
-       void __iomem *bch_regs = this->resources.bch_regs;
-       struct bch_geometry old_geo = this->bch_geometry;
        struct bch_geometry *geo = &this->bch_geometry;
        int size = chip->ecc.size; /* ECC chunk size */
        int meta, n, page_size;
-       u32 r1_old, r2_old, r1_new, r2_new;
        unsigned int max_bitflips;
+       unsigned int ecc_strength;
        int first, last, marker_pos;
        int ecc_parity_size;
        int col = 0;
-       int old_swap_block_mark = this->swap_block_mark;
+       int ret;
 
        /* The size of ECC parity */
        ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
@@ -1138,43 +1531,33 @@ static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
                buf = buf + first * size;
        }
 
-       nand_read_page_op(chip, page, col, NULL, 0);
-
-       /* Save the old environment */
-       r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
-       r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
+       ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
 
-       /* change the BCH registers and bch_geometry{} */
        n = last - first + 1;
        page_size = meta + (size + ecc_parity_size) * n;
+       ecc_strength = geo->ecc_strength >> 1;
+
+       this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
+               BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
+               BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
+               BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
+               BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
 
-       r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS |
-                       BM_BCH_FLASH0LAYOUT0_META_SIZE);
-       r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1)
-                       | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta);
-       writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0);
+       this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
+               BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
+               BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
+               BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
 
-       r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE;
-       r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size);
-       writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1);
+       this->bch = true;
 
-       geo->ecc_chunk_count = n;
-       geo->payload_size = n * size;
-       geo->page_size = page_size;
-       geo->auxiliary_status_offset = ALIGN(meta, 4);
+       ret = nand_read_page_op(chip, page, col, buf, page_size);
+       if (ret)
+               return ret;
 
        dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
                page, offs, len, col, first, n, page_size);
 
-       /* Read the subpage now */
-       this->swap_block_mark = false;
-       max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page);
-
-       /* Restore */
-       writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
-       writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
-       this->bch_geometry = old_geo;
-       this->swap_block_mark = old_swap_block_mark;
+       max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
 
        return max_bitflips;
 }
@@ -1185,81 +1568,29 @@ static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
        struct mtd_info *mtd = nand_to_mtd(chip);
        struct gpmi_nand_data *this = nand_get_controller_data(chip);
        struct bch_geometry *nfc_geo = &this->bch_geometry;
-       const void *payload_virt;
-       dma_addr_t payload_phys;
-       const void *auxiliary_virt;
-       dma_addr_t auxiliary_phys;
-       int        ret;
+       int ret;
 
        dev_dbg(this->dev, "ecc write page.\n");
 
-       nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+       gpmi_bch_layout_std(this);
+       this->bch = true;
+
+       memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
 
        if (this->swap_block_mark) {
                /*
-                * If control arrives here, we're doing block mark swapping.
-                * Since we can't modify the caller's buffers, we must copy them
-                * into our own.
-                */
-               memcpy(this->payload_virt, buf, mtd->writesize);
-               payload_virt = this->payload_virt;
-               payload_phys = this->payload_phys;
-
-               memcpy(this->auxiliary_virt, chip->oob_poi,
-                               nfc_geo->auxiliary_size);
-               auxiliary_virt = this->auxiliary_virt;
-               auxiliary_phys = this->auxiliary_phys;
-
-               /* Handle block mark swapping. */
-               block_mark_swapping(this,
-                               (void *)payload_virt, (void *)auxiliary_virt);
-       } else {
-               /*
-                * If control arrives here, we're not doing block mark swapping,
-                * so we can to try and use the caller's buffers.
+                * When doing bad block marker swapping we must always copy the
+                * input buffer as we can't modify the const buffer.
                 */
-               ret = send_page_prepare(this,
-                               buf, mtd->writesize,
-                               this->payload_virt, this->payload_phys,
-                               nfc_geo->payload_size,
-                               &payload_virt, &payload_phys);
-               if (ret) {
-                       dev_err(this->dev, "Inadequate payload DMA buffer\n");
-                       return 0;
-               }
-
-               ret = send_page_prepare(this,
-                               chip->oob_poi, mtd->oobsize,
-                               this->auxiliary_virt, this->auxiliary_phys,
-                               nfc_geo->auxiliary_size,
-                               &auxiliary_virt, &auxiliary_phys);
-               if (ret) {
-                       dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
-                       goto exit_auxiliary;
-               }
-       }
-
-       /* Ask the NFC. */
-       ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
-       if (ret)
-               dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
-
-       if (!this->swap_block_mark) {
-               send_page_end(this, chip->oob_poi, mtd->oobsize,
-                               this->auxiliary_virt, this->auxiliary_phys,
-                               nfc_geo->auxiliary_size,
-                               auxiliary_virt, auxiliary_phys);
-exit_auxiliary:
-               send_page_end(this, buf, mtd->writesize,
-                               this->payload_virt, this->payload_phys,
-                               nfc_geo->payload_size,
-                               payload_virt, payload_phys);
+               memcpy(this->data_buffer_dma, buf, mtd->writesize);
+               buf = this->data_buffer_dma;
+               block_mark_swapping(this, this->data_buffer_dma,
+                                   this->auxiliary_virt);
        }
 
-       if (ret)
-               return ret;
+       ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
 
-       return nand_prog_page_end_op(chip);
+       return ret;
 }
 
 /*
@@ -1326,14 +1657,16 @@ static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
        struct gpmi_nand_data *this = nand_get_controller_data(chip);
+       int ret;
 
-       dev_dbg(this->dev, "page number is %d\n", page);
        /* clear the OOB buffer */
        memset(chip->oob_poi, ~0, mtd->oobsize);
 
        /* Read out the conventional OOB. */
-       nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
-       chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
+       ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
+                               mtd->oobsize);
+       if (ret)
+               return ret;
 
        /*
         * Now, we want to make sure the block mark is correct. In the
@@ -1342,8 +1675,9 @@ static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
         */
        if (GPMI_IS_MX23(this)) {
                /* Read the block mark into the first byte of the OOB buffer. */
-               nand_read_page_op(chip, page, 0, NULL, 0);
-               chip->oob_poi[0] = chip->legacy.read_byte(chip);
+               ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
+               if (ret)
+                       return ret;
        }
 
        return 0;
@@ -1392,9 +1726,12 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
        size_t oob_byte_off;
        uint8_t *oob = chip->oob_poi;
        int step;
+       int ret;
 
-       nand_read_page_op(chip, page, 0, tmp_buf,
-                         mtd->writesize + mtd->oobsize);
+       ret = nand_read_page_op(chip, page, 0, tmp_buf,
+                               mtd->writesize + mtd->oobsize);
+       if (ret)
+               return ret;
 
        /*
         * If required, swap the bad block marker and the data stored in the
@@ -1606,13 +1943,12 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
        unsigned int stride;
        unsigned int page;
        u8 *buffer = nand_get_data_buf(chip);
-       int saved_chip_number;
        int found_an_ncb_fingerprint = false;
+       int ret;
 
        /* Compute the number of strides in a search area. */
        search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
 
-       saved_chip_number = this->current_chip;
        nand_select_target(chip, 0);
 
        /*
@@ -1630,8 +1966,10 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
                 * Read the NCB fingerprint. The fingerprint is four bytes long
                 * and starts in the 12th byte of the page.
                 */
-               nand_read_page_op(chip, page, 12, NULL, 0);
-               chip->legacy.read_buf(chip, buffer, strlen(fingerprint));
+               ret = nand_read_page_op(chip, page, 12, buffer,
+                                       strlen(fingerprint));
+               if (ret)
+                       continue;
 
                /* Look for the fingerprint. */
                if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
@@ -1641,10 +1979,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
 
        }
 
-       if (saved_chip_number >= 0)
-               nand_select_target(chip, saved_chip_number);
-       else
-               nand_deselect_target(chip);
+       nand_deselect_target(chip);
 
        if (found_an_ncb_fingerprint)
                dev_dbg(dev, "\tFound a fingerprint\n");
@@ -1668,7 +2003,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
        unsigned int stride;
        unsigned int page;
        u8 *buffer = nand_get_data_buf(chip);
-       int saved_chip_number;
        int status;
 
        /* Compute the search area geometry. */
@@ -1685,8 +2019,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
        dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
        dev_dbg(dev, "\tin Pages  : %u\n", search_area_size_in_pages);
 
-       /* Select chip 0. */
-       saved_chip_number = this->current_chip;
        nand_select_target(chip, 0);
 
        /* Loop over blocks in the first search area, erasing them. */
@@ -1718,11 +2050,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
                        dev_err(dev, "[%s] Write failed.\n", __func__);
        }
 
-       /* Deselect chip 0. */
-       if (saved_chip_number >= 0)
-               nand_select_target(chip, saved_chip_number);
-       else
-               nand_deselect_target(chip);
+       nand_deselect_target(chip);
 
        return 0;
 }
@@ -1773,10 +2101,13 @@ static int mx23_boot_init(struct gpmi_nand_data  *this)
 
                /* Send the command to read the conventional block mark. */
                nand_select_target(chip, chipnr);
-               nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
-               block_mark = chip->legacy.read_byte(chip);
+               ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
+                                       1);
                nand_deselect_target(chip);
 
+               if (ret)
+                       continue;
+
                /*
                 * Check if the block is marked bad. If so, we need to mark it
                 * again, but this time the result will be a mark in the
@@ -1890,9 +2221,330 @@ static int gpmi_nand_attach_chip(struct nand_chip *chip)
        return 0;
 }
 
+static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
+{
+       struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
+
+       this->ntransfers++;
+
+       if (this->ntransfers == GPMI_MAX_TRANSFERS)
+               return NULL;
+
+       return transfer;
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_command(
+       struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
+{
+       struct dma_chan *channel = get_dma_chan(this);
+       struct dma_async_tx_descriptor *desc;
+       struct gpmi_transfer *transfer;
+       int chip = this->nand.cur_cs;
+       u32 pio[3];
+
+       /* [1] send out the PIO words */
+       pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+               | BM_GPMI_CTRL0_WORD_LENGTH
+               | BF_GPMI_CTRL0_CS(chip, this)
+               | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+               | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
+               | BM_GPMI_CTRL0_ADDRESS_INCREMENT
+               | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
+       pio[1] = 0;
+       pio[2] = 0;
+       desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
+                                     DMA_TRANS_NONE, 0);
+       if (!desc)
+               return NULL;
+
+       transfer = get_next_transfer(this);
+       if (!transfer)
+               return NULL;
+
+       transfer->cmdbuf[0] = cmd;
+       if (naddr)
+               memcpy(&transfer->cmdbuf[1], addr, naddr);
+
+       sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
+       dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
+
+       transfer->direction = DMA_TO_DEVICE;
+
+       desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
+                                      MXS_DMA_CTRL_WAIT4END);
+       return desc;
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
+       struct gpmi_nand_data *this)
+{
+       struct dma_chan *channel = get_dma_chan(this);
+       u32 pio[2];
+
+       pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
+               | BM_GPMI_CTRL0_WORD_LENGTH
+               | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
+               | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+               | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+               | BF_GPMI_CTRL0_XFER_COUNT(0);
+       pio[1] = 0;
+
+       return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
+                               MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_data_read(
+       struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
+{
+       struct dma_async_tx_descriptor *desc;
+       struct dma_chan *channel = get_dma_chan(this);
+       struct gpmi_transfer *transfer;
+       u32 pio[6] = {};
+
+       transfer = get_next_transfer(this);
+       if (!transfer)
+               return NULL;
+
+       transfer->direction = DMA_FROM_DEVICE;
+
+       *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
+                                  DMA_FROM_DEVICE);
+
+       pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
+               | BM_GPMI_CTRL0_WORD_LENGTH
+               | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
+               | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+               | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+               | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
+
+       if (this->bch) {
+               pio[2] =  BM_GPMI_ECCCTRL_ENABLE_ECC
+                       | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
+                       | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
+                               | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
+               pio[3] = raw_len;
+               pio[4] = transfer->sgl.dma_address;
+               pio[5] = this->auxiliary_phys;
+       }
+
+       desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
+                                     DMA_TRANS_NONE, 0);
+       if (!desc)
+               return NULL;
+
+       if (!this->bch)
+               desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
+                                            DMA_DEV_TO_MEM,
+                                            MXS_DMA_CTRL_WAIT4END);
+
+       return desc;
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_data_write(
+       struct gpmi_nand_data *this, const void *buf, int raw_len)
+{
+       struct dma_chan *channel = get_dma_chan(this);
+       struct dma_async_tx_descriptor *desc;
+       struct gpmi_transfer *transfer;
+       u32 pio[6] = {};
+
+       transfer = get_next_transfer(this);
+       if (!transfer)
+               return NULL;
+
+       transfer->direction = DMA_TO_DEVICE;
+
+       prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
+
+       pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+               | BM_GPMI_CTRL0_WORD_LENGTH
+               | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
+               | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+               | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+               | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
+
+       if (this->bch) {
+               pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+                       | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
+                       | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
+                                       BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
+               pio[3] = raw_len;
+               pio[4] = transfer->sgl.dma_address;
+               pio[5] = this->auxiliary_phys;
+       }
+
+       desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
+                                     DMA_TRANS_NONE,
+                                     (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
+       if (!desc)
+               return NULL;
+
+       if (!this->bch)
+               desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
+                                              DMA_MEM_TO_DEV,
+                                              MXS_DMA_CTRL_WAIT4END);
+
+       return desc;
+}
+
+static int gpmi_nfc_exec_op(struct nand_chip *chip,
+                            const struct nand_operation *op,
+                            bool check_only)
+{
+       const struct nand_op_instr *instr;
+       struct gpmi_nand_data *this = nand_get_controller_data(chip);
+       struct dma_async_tx_descriptor *desc = NULL;
+       int i, ret, buf_len = 0, nbufs = 0;
+       u8 cmd = 0;
+       void *buf_read = NULL;
+       const void *buf_write = NULL;
+       bool direct = false;
+       struct completion *completion;
+       unsigned long to;
+
+       this->ntransfers = 0;
+       for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
+               this->transfers[i].direction = DMA_NONE;
+
+       ret = pm_runtime_get_sync(this->dev);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * This driver currently supports only one NAND chip. Plus, dies share
+        * the same configuration. So once timings have been applied on the
+        * controller side, they will not change anymore. When the time will
+        * come, the check on must_apply_timings will have to be dropped.
+        */
+       if (this->hw.must_apply_timings) {
+               this->hw.must_apply_timings = false;
+               gpmi_nfc_apply_timings(this);
+       }
+
+       dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
+
+       for (i = 0; i < op->ninstrs; i++) {
+               instr = &op->instrs[i];
+
+               nand_op_trace("  ", instr);
+
+               switch (instr->type) {
+               case NAND_OP_WAITRDY_INSTR:
+                       desc = gpmi_chain_wait_ready(this);
+                       break;
+               case NAND_OP_CMD_INSTR:
+                       cmd = instr->ctx.cmd.opcode;
+
+                       /*
+                        * When this command has an address cycle chain it
+                        * together with the address cycle
+                        */
+                       if (i + 1 != op->ninstrs &&
+                           op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
+                               continue;
+
+                       desc = gpmi_chain_command(this, cmd, NULL, 0);
+
+                       break;
+               case NAND_OP_ADDR_INSTR:
+                       desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
+                                                 instr->ctx.addr.naddrs);
+                       break;
+               case NAND_OP_DATA_OUT_INSTR:
+                       buf_write = instr->ctx.data.buf.out;
+                       buf_len = instr->ctx.data.len;
+                       nbufs++;
+
+                       desc = gpmi_chain_data_write(this, buf_write, buf_len);
+
+                       break;
+               case NAND_OP_DATA_IN_INSTR:
+                       if (!instr->ctx.data.len)
+                               break;
+                       buf_read = instr->ctx.data.buf.in;
+                       buf_len = instr->ctx.data.len;
+                       nbufs++;
+
+                       desc = gpmi_chain_data_read(this, buf_read, buf_len,
+                                                  &direct);
+                       break;
+               }
+
+               if (!desc) {
+                       ret = -ENXIO;
+                       goto unmap;
+               }
+       }
+
+       dev_dbg(this->dev, "%s setup done\n", __func__);
+
+       if (nbufs > 1) {
+               dev_err(this->dev, "Multiple data instructions not supported\n");
+               ret = -EINVAL;
+               goto unmap;
+       }
+
+       if (this->bch) {
+               writel(this->bch_flashlayout0,
+                      this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
+               writel(this->bch_flashlayout1,
+                      this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
+       }
+
+       if (this->bch && buf_read) {
+               writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+                      this->resources.bch_regs + HW_BCH_CTRL_SET);
+               completion = &this->bch_done;
+       } else {
+               desc->callback = dma_irq_callback;
+               desc->callback_param = this;
+               completion = &this->dma_done;
+       }
+
+       init_completion(completion);
+
+       dmaengine_submit(desc);
+       dma_async_issue_pending(get_dma_chan(this));
+
+       to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000));
+       if (!to) {
+               dev_err(this->dev, "DMA timeout, last DMA\n");
+               gpmi_dump_info(this);
+               ret = -ETIMEDOUT;
+               goto unmap;
+       }
+
+       writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+              this->resources.bch_regs + HW_BCH_CTRL_CLR);
+       gpmi_clear_bch(this);
+
+       ret = 0;
+
+unmap:
+       for (i = 0; i < this->ntransfers; i++) {
+               struct gpmi_transfer *transfer = &this->transfers[i];
+
+               if (transfer->direction != DMA_NONE)
+                       dma_unmap_sg(this->dev, &transfer->sgl, 1,
+                                    transfer->direction);
+       }
+
+       if (!ret && buf_read && !direct)
+               memcpy(buf_read, this->data_buffer_dma,
+                      gpmi_raw_len_to_len(this, buf_len));
+
+       this->bch = false;
+
+       pm_runtime_mark_last_busy(this->dev);
+       pm_runtime_put_autosuspend(this->dev);
+
+       return ret;
+}
+
 static const struct nand_controller_ops gpmi_nand_controller_ops = {
        .attach_chip = gpmi_nand_attach_chip,
        .setup_data_interface = gpmi_setup_data_interface,
+       .exec_op = gpmi_nfc_exec_op,
 };
 
 static int gpmi_nand_init(struct gpmi_nand_data *this)
@@ -1901,9 +2553,6 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
        struct mtd_info  *mtd = nand_to_mtd(chip);
        int ret;
 
-       /* init current chip */
-       this->current_chip      = -1;
-
        /* init the MTD data structures */
        mtd->name               = "gpmi-nand";
        mtd->dev.parent         = this->dev;
@@ -1911,14 +2560,8 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
        /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
        nand_set_controller_data(chip, this);
        nand_set_flash_node(chip, this->pdev->dev.of_node);
-       chip->legacy.select_chip        = gpmi_select_chip;
-       chip->legacy.cmd_ctrl   = gpmi_cmd_ctrl;
-       chip->legacy.dev_ready  = gpmi_dev_ready;
-       chip->legacy.read_byte  = gpmi_read_byte;
-       chip->legacy.read_buf   = gpmi_read_buf;
-       chip->legacy.write_buf  = gpmi_write_buf;
-       chip->badblock_pattern  = &gpmi_bbt_descr;
        chip->legacy.block_markbad = gpmi_block_markbad;
+       chip->badblock_pattern  = &gpmi_bbt_descr;
        chip->options           |= NAND_NO_SUBPAGE_WRITE;
 
        /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
@@ -1934,7 +2577,10 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
        if (ret)
                goto err_out;
 
-       chip->legacy.dummy_controller.ops = &gpmi_nand_controller_ops;
+       nand_controller_init(&this->base);
+       this->base.ops = &gpmi_nand_controller_ops;
+       chip->controller = &this->base;
+
        ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
        if (ret)
                goto err_out;
@@ -2004,6 +2650,16 @@ static int gpmi_nand_probe(struct platform_device *pdev)
        if (ret)
                goto exit_acquire_resources;
 
+       ret = __gpmi_enable_clk(this, true);
+       if (ret)
+               goto exit_nfc_init;
+
+       pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_get_sync(&pdev->dev);
+
        ret = gpmi_init(this);
        if (ret)
                goto exit_nfc_init;
@@ -2012,11 +2668,16 @@ static int gpmi_nand_probe(struct platform_device *pdev)
        if (ret)
                goto exit_nfc_init;
 
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_put_autosuspend(&pdev->dev);
+
        dev_info(this->dev, "driver registered.\n");
 
        return 0;
 
 exit_nfc_init:
+       pm_runtime_put(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
        release_resources(this);
 exit_acquire_resources:
 
@@ -2027,6 +2688,9 @@ static int gpmi_nand_remove(struct platform_device *pdev)
 {
        struct gpmi_nand_data *this = platform_get_drvdata(pdev);
 
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
        nand_release(&this->nand);
        gpmi_free_dma_buffer(this);
        release_resources(this);
@@ -2069,8 +2733,23 @@ static int gpmi_pm_resume(struct device *dev)
 }
 #endif /* CONFIG_PM_SLEEP */
 
+static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
+{
+       struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+       return __gpmi_enable_clk(this, false);
+}
+
+static int __maybe_unused gpmi_runtime_resume(struct device *dev)
+{
+       struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+       return __gpmi_enable_clk(this, true);
+}
+
 static const struct dev_pm_ops gpmi_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
+       SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
 };
 
 static struct platform_driver gpmi_nand_driver = {
index a804a4a..fdc5ed7 100644 (file)
@@ -103,6 +103,14 @@ struct gpmi_nfc_hardware_timing {
        u32 ctrl1n;
 };
 
+#define GPMI_MAX_TRANSFERS     8
+
+struct gpmi_transfer {
+       u8 cmdbuf[8];
+       struct scatterlist sgl;
+       enum dma_data_direction direction;
+};
+
 struct gpmi_nand_data {
        /* Devdata */
        const struct gpmi_devdata *devdata;
@@ -126,25 +134,18 @@ struct gpmi_nand_data {
        struct boot_rom_geometry rom_geometry;
 
        /* MTD / NAND */
+       struct nand_controller  base;
        struct nand_chip        nand;
 
-       /* General-use Variables */
-       int                     current_chip;
-       unsigned int            command_length;
+       struct gpmi_transfer    transfers[GPMI_MAX_TRANSFERS];
+       int                     ntransfers;
 
-       struct scatterlist      cmd_sgl;
-       char                    *cmd_buffer;
+       bool                    bch;
+       uint32_t                bch_flashlayout0;
+       uint32_t                bch_flashlayout1;
 
-       struct scatterlist      data_sgl;
        char                    *data_buffer_dma;
 
-       void                    *page_buffer_virt;
-       dma_addr_t              page_buffer_phys;
-       unsigned int            page_buffer_size;
-
-       void                    *payload_virt;
-       dma_addr_t              payload_phys;
-
        void                    *auxiliary_virt;
        dma_addr_t              auxiliary_phys;
 
@@ -154,45 +155,8 @@ struct gpmi_nand_data {
 #define DMA_CHANS              8
        struct dma_chan         *dma_chans[DMA_CHANS];
        struct completion       dma_done;
-
-       /* private */
-       void                    *private;
 };
 
-/* Common Services */
-int common_nfc_set_geometry(struct gpmi_nand_data *);
-struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
-bool prepare_data_dma(struct gpmi_nand_data *, const void *buf, int len,
-                     enum dma_data_direction dr);
-int start_dma_without_bch_irq(struct gpmi_nand_data *,
-                             struct dma_async_tx_descriptor *);
-int start_dma_with_bch_irq(struct gpmi_nand_data *,
-                          struct dma_async_tx_descriptor *);
-
-/* GPMI-NAND helper function library */
-int gpmi_init(struct gpmi_nand_data *);
-void gpmi_clear_bch(struct gpmi_nand_data *);
-void gpmi_dump_info(struct gpmi_nand_data *);
-int bch_set_geometry(struct gpmi_nand_data *);
-int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
-int gpmi_send_command(struct gpmi_nand_data *);
-int gpmi_enable_clk(struct gpmi_nand_data *this);
-int gpmi_disable_clk(struct gpmi_nand_data *this);
-int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
-                             const struct nand_data_interface *conf);
-void gpmi_nfc_apply_timings(struct gpmi_nand_data *this);
-int gpmi_read_data(struct gpmi_nand_data *, void *buf, int len);
-int gpmi_send_data(struct gpmi_nand_data *, const void *buf, int len);
-
-int gpmi_send_page(struct gpmi_nand_data *,
-                  dma_addr_t payload, dma_addr_t auxiliary);
-int gpmi_read_page(struct gpmi_nand_data *,
-                  dma_addr_t payload, dma_addr_t auxiliary);
-
-void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
-                   const u8 *src, size_t src_bit_off,
-                   size_t nbits);
-
 /* BCH : Status Block Completion Codes */
 #define STATUS_GOOD            0x00
 #define STATUS_ERASED          0xff
index 0f90e06..74595b6 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /*
  * MTK ECC controller driver.
  * Copyright (C) 2016  MediaTek Inc.
@@ -596,4 +596,4 @@ module_platform_driver(mtk_ecc_driver);
 
 MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
 MODULE_DESCRIPTION("MTK Nand ECC Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual MIT/GPL");
index aa52e94..0e48c36 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /*
  * MTK SDG1 ECC controller
  *
index dceff28..373d47d 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /*
  * MTK NAND Flash controller driver.
  * Copyright (C) 2016 MediaTek Inc.
 #define NFI_FDMM(x)            (0xA4 + (x) * sizeof(u32) * 2)
 #define NFI_FDM_MAX_SIZE       (8)
 #define NFI_FDM_MIN_SIZE       (1)
+#define NFI_DEBUG_CON1         (0x220)
+#define                STROBE_MASK             GENMASK(4, 3)
+#define                STROBE_SHIFT            (3)
+#define                MAX_STROBE_DLY          (3)
 #define NFI_MASTER_STA         (0x224)
 #define                MASTER_STA_MASK         (0x0FFF)
 #define NFI_EMPTY_THRESH       (0x23C)
@@ -150,6 +154,8 @@ struct mtk_nfc {
        struct list_head chips;
 
        u8 *buffer;
+
+       unsigned long assigned_cs;
 };
 
 /*
@@ -500,7 +506,8 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
 {
        struct mtk_nfc *nfc = nand_get_controller_data(chip);
        const struct nand_sdr_timings *timings;
-       u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt;
+       u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0;
+       u32 temp, tsel = 0;
 
        timings = nand_get_sdr_timings(conf);
        if (IS_ERR(timings))
@@ -536,14 +543,53 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
        twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
        twh &= 0xf;
 
-       twst = timings->tWP_min / 1000;
+       /* Calculate real WE#/RE# hold time in nanosecond */
+       temp = (twh + 1) * 1000000 / rate;
+       /* nanosecond to picosecond */
+       temp *= 1000;
+
+       /*
+        * WE# low level time should be expaned to meet WE# pulse time
+        * and WE# cycle time at the same time.
+        */
+       if (temp < timings->tWC_min)
+               twst = timings->tWC_min - temp;
+       twst = max(timings->tWP_min, twst) / 1000;
        twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
        twst &= 0xf;
 
-       trlt = max(timings->tREA_max, timings->tRP_min) / 1000;
+       /*
+        * RE# low level time should be expaned to meet RE# pulse time
+        * and RE# cycle time at the same time.
+        */
+       if (temp < timings->tRC_min)
+               trlt = timings->tRC_min - temp;
+       trlt = max(trlt, timings->tRP_min) / 1000;
        trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
        trlt &= 0xf;
 
+       /* Calculate RE# pulse time in nanosecond. */
+       temp = (trlt + 1) * 1000000 / rate;
+       /* nanosecond to picosecond */
+       temp *= 1000;
+       /*
+        * If RE# access time is bigger than RE# pulse time,
+        * delay sampling data timing.
+        */
+       if (temp < timings->tREA_max) {
+               tsel = timings->tREA_max / 1000;
+               tsel = DIV_ROUND_UP(tsel * rate, 1000000);
+               tsel -= (trlt + 1);
+               if (tsel > MAX_STROBE_DLY) {
+                       trlt += tsel - MAX_STROBE_DLY;
+                       tsel = MAX_STROBE_DLY;
+               }
+       }
+       temp = nfi_readl(nfc, NFI_DEBUG_CON1);
+       temp &= ~STROBE_MASK;
+       temp |= tsel << STROBE_SHIFT;
+       nfi_writel(nfc, temp, NFI_DEBUG_CON1);
+
        /*
         * ACCON: access timing control register
         * -------------------------------------
@@ -835,19 +881,21 @@ static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page)
        return mtk_nfc_write_page_raw(chip, NULL, 1, page);
 }
 
-static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
+static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
+                                   u32 sectors)
 {
        struct nand_chip *chip = mtd_to_nand(mtd);
        struct mtk_nfc *nfc = nand_get_controller_data(chip);
        struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
        struct mtk_ecc_stats stats;
+       u32 reg_size = mtk_nand->fdm.reg_size;
        int rc, i;
 
        rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
        if (rc) {
                memset(buf, 0xff, sectors * chip->ecc.size);
                for (i = 0; i < sectors; i++)
-                       memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
+                       memset(oob_ptr(chip, start + i), 0xff, reg_size);
                return 0;
        }
 
@@ -867,7 +915,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
        u32 spare = mtk_nand->spare_per_sector;
        u32 column, sectors, start, end, reg;
        dma_addr_t addr;
-       int bitflips;
+       int bitflips = 0;
        size_t len;
        u8 *buf;
        int rc;
@@ -934,14 +982,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
        if (rc < 0) {
                dev_err(nfc->dev, "subpage done timeout\n");
                bitflips = -EIO;
-       } else {
-               bitflips = 0;
-               if (!raw) {
-                       rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
-                       bitflips = rc < 0 ? -ETIMEDOUT :
-                               mtk_nfc_update_ecc_stats(mtd, buf, sectors);
-                       mtk_nfc_read_fdm(chip, start, sectors);
-               }
+       } else if (!raw) {
+               rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+               bitflips = rc < 0 ? -ETIMEDOUT :
+                       mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
+               mtk_nfc_read_fdm(chip, start, sectors);
        }
 
        dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
@@ -1315,6 +1360,17 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
                        dev_err(dev, "reg property failure : %d\n", ret);
                        return ret;
                }
+
+               if (tmp >= MTK_NAND_MAX_NSELS) {
+                       dev_err(dev, "invalid CS: %u\n", tmp);
+                       return -EINVAL;
+               }
+
+               if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+                       dev_err(dev, "CS %u already assigned\n", tmp);
+                       return -EINVAL;
+               }
+
                chip->sels[i] = tmp;
        }
 
@@ -1589,6 +1645,6 @@ static struct platform_driver mtk_nfc_driver = {
 
 module_platform_driver(mtk_nfc_driver);
 
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual MIT/GPL");
 MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
 MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
index 6eb1312..91f046d 100644 (file)
@@ -2111,35 +2111,7 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
                if (instr == &ctx->subop.instrs[0])
                        prefix = "    ->";
 
-               switch (instr->type) {
-               case NAND_OP_CMD_INSTR:
-                       pr_debug("%sCMD      [0x%02x]\n", prefix,
-                                instr->ctx.cmd.opcode);
-                       break;
-               case NAND_OP_ADDR_INSTR:
-                       pr_debug("%sADDR     [%d cyc: %*ph]\n", prefix,
-                                instr->ctx.addr.naddrs,
-                                instr->ctx.addr.naddrs < 64 ?
-                                instr->ctx.addr.naddrs : 64,
-                                instr->ctx.addr.addrs);
-                       break;
-               case NAND_OP_DATA_IN_INSTR:
-                       pr_debug("%sDATA_IN  [%d B%s]\n", prefix,
-                                instr->ctx.data.len,
-                                instr->ctx.data.force_8bit ?
-                                ", force 8-bit" : "");
-                       break;
-               case NAND_OP_DATA_OUT_INSTR:
-                       pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
-                                instr->ctx.data.len,
-                                instr->ctx.data.force_8bit ?
-                                ", force 8-bit" : "");
-                       break;
-               case NAND_OP_WAITRDY_INSTR:
-                       pr_debug("%sWAITRDY  [max %d ms]\n", prefix,
-                                instr->ctx.waitrdy.timeout_ms);
-                       break;
-               }
+               nand_op_trace(prefix, instr);
 
                if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
                        prefix = "      ";
@@ -2152,6 +2124,22 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
 }
 #endif
 
+static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
+                                 const struct nand_op_parser_ctx *b)
+{
+       if (a->subop.ninstrs < b->subop.ninstrs)
+               return -1;
+       else if (a->subop.ninstrs > b->subop.ninstrs)
+               return 1;
+
+       if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
+               return -1;
+       else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
+               return 1;
+
+       return 0;
+}
+
 /**
  * nand_op_parser_exec_op - exec_op parser
  * @chip: the NAND chip
@@ -2186,32 +2174,40 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
        unsigned int i;
 
        while (ctx.subop.instrs < op->instrs + op->ninstrs) {
-               int ret;
+               const struct nand_op_parser_pattern *pattern;
+               struct nand_op_parser_ctx best_ctx;
+               int ret, best_pattern = -1;
 
                for (i = 0; i < parser->npatterns; i++) {
-                       const struct nand_op_parser_pattern *pattern;
+                       struct nand_op_parser_ctx test_ctx = ctx;
 
                        pattern = &parser->patterns[i];
-                       if (!nand_op_parser_match_pat(pattern, &ctx))
+                       if (!nand_op_parser_match_pat(pattern, &test_ctx))
                                continue;
 
-                       nand_op_parser_trace(&ctx);
-
-                       if (check_only)
-                               break;
-
-                       ret = pattern->exec(chip, &ctx.subop);
-                       if (ret)
-                               return ret;
+                       if (best_pattern >= 0 &&
+                           nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
+                               continue;
 
-                       break;
+                       best_pattern = i;
+                       best_ctx = test_ctx;
                }
 
-               if (i == parser->npatterns) {
+               if (best_pattern < 0) {
                        pr_debug("->exec_op() parser: pattern not found!\n");
                        return -ENOTSUPP;
                }
 
+               ctx = best_ctx;
+               nand_op_parser_trace(&ctx);
+
+               if (!check_only) {
+                       pattern = &parser->patterns[best_pattern];
+                       ret = pattern->exec(chip, &ctx.subop);
+                       if (ret)
+                               return ret;
+               }
+
                /*
                 * Update the context structure by pointing to the start of the
                 * next subop.
index 55aa4c1..1752731 100644 (file)
@@ -170,7 +170,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
                goto fail;
        }
 
-       nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
+       nbc->eccmask = kzalloc(eccbytes, GFP_KERNEL);
        nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL);
        if (!nbc->eccmask || !nbc->errloc)
                goto fail;
@@ -182,7 +182,6 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
                goto fail;
 
        memset(erased_page, 0xff, eccsize);
-       memset(nbc->eccmask, 0, eccbytes);
        encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
        kfree(erased_page);
 
index fad57c3..58511ae 100644 (file)
@@ -8,6 +8,50 @@
 
 #include "internals.h"
 
+#define MACRONIX_READ_RETRY_BIT BIT(0)
+#define MACRONIX_NUM_READ_RETRY_MODES 6
+
+struct nand_onfi_vendor_macronix {
+       u8 reserved;
+       u8 reliability_func;
+} __packed;
+
+static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode)
+{
+       u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+
+       if (!chip->parameters.supports_set_get_features ||
+           !test_bit(ONFI_FEATURE_ADDR_READ_RETRY,
+                     chip->parameters.set_feature_list))
+               return -ENOTSUPP;
+
+       feature[0] = mode;
+       return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
+}
+
+static void macronix_nand_onfi_init(struct nand_chip *chip)
+{
+       struct nand_parameters *p = &chip->parameters;
+       struct nand_onfi_vendor_macronix *mxic;
+
+       if (!p->onfi)
+               return;
+
+       mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
+       if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
+               return;
+
+       chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES;
+       chip->setup_read_retry = macronix_nand_setup_read_retry;
+
+       if (p->supports_set_get_features) {
+               bitmap_set(p->set_feature_list,
+                          ONFI_FEATURE_ADDR_READ_RETRY, 1);
+               bitmap_set(p->get_feature_list,
+                          ONFI_FEATURE_ADDR_READ_RETRY, 1);
+       }
+}
+
 /*
  * Macronix AC series does not support using SET/GET_FEATURES to change
  * the timings unlike what is declared in the parameter page. Unflag
@@ -56,6 +100,7 @@ static int macronix_nand_init(struct nand_chip *chip)
                chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
 
        macronix_nand_fix_broken_get_timings(chip);
+       macronix_nand_onfi_init(chip);
 
        return 0;
 }
index 999ca6a..e63acc0 100644 (file)
@@ -37,6 +37,8 @@
 /* Max ECC buffer length */
 #define FMC2_MAX_ECC_BUF_LEN           (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
 
+#define FMC2_TIMEOUT_MS                        1000
+
 /* Timings */
 #define FMC2_THIZ                      1
 #define FMC2_TIO                       8000
@@ -530,7 +532,8 @@ static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data,
        int ret;
 
        ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR,
-                                        sr, sr & FMC2_SR_NWRF, 10, 1000);
+                                        sr, sr & FMC2_SR_NWRF, 10,
+                                        FMC2_TIMEOUT_MS);
        if (ret) {
                dev_err(fmc2->dev, "ham timeout\n");
                return ret;
@@ -611,7 +614,7 @@ static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
 
        /* Wait until the BCH code is ready */
        if (!wait_for_completion_timeout(&fmc2->complete,
-                                        msecs_to_jiffies(1000))) {
+                                        msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
                dev_err(fmc2->dev, "bch timeout\n");
                stm32_fmc2_disable_bch_irq(fmc2);
                return -ETIMEDOUT;
@@ -696,7 +699,7 @@ static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat,
 
        /* Wait until the decoding error is ready */
        if (!wait_for_completion_timeout(&fmc2->complete,
-                                        msecs_to_jiffies(1000))) {
+                                        msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
                dev_err(fmc2->dev, "bch timeout\n");
                stm32_fmc2_disable_bch_irq(fmc2);
                return -ETIMEDOUT;
@@ -969,7 +972,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
 
        /* Wait end of sequencer transfer */
        if (!wait_for_completion_timeout(&fmc2->complete,
-                                        msecs_to_jiffies(1000))) {
+                                        msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
                dev_err(fmc2->dev, "seq timeout\n");
                stm32_fmc2_disable_seq_irq(fmc2);
                dmaengine_terminate_all(dma_ch);
@@ -981,7 +984,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
 
        /* Wait DMA data transfer completion */
        if (!wait_for_completion_timeout(&fmc2->dma_data_complete,
-                                        msecs_to_jiffies(100))) {
+                                        msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
                dev_err(fmc2->dev, "data DMA timeout\n");
                dmaengine_terminate_all(dma_ch);
                ret = -ETIMEDOUT;
@@ -990,7 +993,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
        /* Wait DMA ECC transfer completion */
        if (!write_data && !raw) {
                if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete,
-                                                msecs_to_jiffies(100))) {
+                                       msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
                        dev_err(fmc2->dev, "ECC DMA timeout\n");
                        dmaengine_terminate_all(fmc2->dma_ecc_ch);
                        ret = -ETIMEDOUT;
@@ -1909,6 +1912,12 @@ static int stm32_fmc2_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               if (irq != -EPROBE_DEFER)
+                       dev_err(dev, "IRQ error missing or invalid\n");
+               return irq;
+       }
+
        ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
                               dev_name(dev), fmc2);
        if (ret) {
index 7531250..9662b9c 100644 (file)
@@ -1,3 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o gigadevice.o macronix.o micron.o toshiba.o winbond.o
+spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o
 obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
index 4c15bb5..89f6bee 100644 (file)
@@ -511,12 +511,12 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
                if (ret == -EBADMSG) {
                        ecc_failed = true;
                        mtd->ecc_stats.failed++;
-                       ret = 0;
                } else {
                        mtd->ecc_stats.corrected += ret;
                        max_bitflips = max_t(unsigned int, max_bitflips, ret);
                }
 
+               ret = 0;
                ops->retlen += iter.req.datalen;
                ops->oobretlen += iter.req.ooblen;
        }
@@ -757,6 +757,7 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
        &gigadevice_spinand_manufacturer,
        &macronix_spinand_manufacturer,
        &micron_spinand_manufacturer,
+       &paragon_spinand_manufacturer,
        &toshiba_spinand_manufacturer,
        &winbond_spinand_manufacturer,
 };
@@ -845,7 +846,7 @@ spinand_select_op_variant(struct spinand_device *spinand,
  */
 int spinand_match_and_init(struct spinand_device *spinand,
                           const struct spinand_info *table,
-                          unsigned int table_size, u8 devid)
+                          unsigned int table_size, u16 devid)
 {
        struct nand_device *nand = spinand_to_nand(spinand);
        unsigned int i;
index e6c6460..e99d425 100644 (file)
@@ -9,11 +9,17 @@
 #include <linux/mtd/spinand.h>
 
 #define SPINAND_MFR_GIGADEVICE                 0xC8
+
 #define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS     (1 << 4)
 #define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS       (3 << 4)
 
 #define GD5FXGQ4UEXXG_REG_STATUS2              0xf0
 
+#define GD5FXGQ4UXFXXG_STATUS_ECC_MASK         (7 << 4)
+#define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS  (0 << 4)
+#define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4)
+#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR  (7 << 4)
+
 static SPINAND_OP_VARIANTS(read_cache_variants,
                SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
                SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -22,6 +28,14 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
                SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
                SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
 
+static SPINAND_OP_VARIANTS(read_cache_variants_f,
+               SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
+
 static SPINAND_OP_VARIANTS(write_cache_variants,
                SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
                SPINAND_PROG_LOAD(true, 0, NULL, 0));
@@ -59,6 +73,11 @@ static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section,
        return 0;
 }
 
+static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
+       .ecc = gd5fxgq4xa_ooblayout_ecc,
+       .free = gd5fxgq4xa_ooblayout_free,
+};
+
 static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
                                         u8 status)
 {
@@ -83,7 +102,7 @@ static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
        return -EINVAL;
 }
 
-static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
+static int gd5fxgq4_variant2_ooblayout_ecc(struct mtd_info *mtd, int section,
                                       struct mtd_oob_region *region)
 {
        if (section)
@@ -95,7 +114,7 @@ static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
        return 0;
 }
 
-static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section,
+static int gd5fxgq4_variant2_ooblayout_free(struct mtd_info *mtd, int section,
                                        struct mtd_oob_region *region)
 {
        if (section)
@@ -108,6 +127,11 @@ static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section,
        return 0;
 }
 
+static const struct mtd_ooblayout_ops gd5fxgq4_variant2_ooblayout = {
+       .ecc = gd5fxgq4_variant2_ooblayout_ecc,
+       .free = gd5fxgq4_variant2_ooblayout_free,
+};
+
 static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
                                        u8 status)
 {
@@ -150,15 +174,25 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
        return -EINVAL;
 }
 
-static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
-       .ecc = gd5fxgq4xa_ooblayout_ecc,
-       .free = gd5fxgq4xa_ooblayout_free,
-};
+static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
+                                       u8 status)
+{
+       switch (status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) {
+       case GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS:
+               return 0;
 
-static const struct mtd_ooblayout_ops gd5fxgq4uexxg_ooblayout = {
-       .ecc = gd5fxgq4uexxg_ooblayout_ecc,
-       .free = gd5fxgq4uexxg_ooblayout_free,
-};
+       case GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS:
+               return 3;
+
+       case GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR:
+               return -EBADMSG;
+
+       default: /* (2 << 4) through (6 << 4) are 4-8 corrected errors */
+               return ((status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) >> 4) + 2;
+       }
+
+       return -EINVAL;
+}
 
 static const struct spinand_info gigadevice_spinand_table[] = {
        SPINAND_INFO("GD5F1GQ4xA", 0xF1,
@@ -195,25 +229,40 @@ static const struct spinand_info gigadevice_spinand_table[] = {
                                              &write_cache_variants,
                                              &update_cache_variants),
                     0,
-                    SPINAND_ECCINFO(&gd5fxgq4uexxg_ooblayout,
+                    SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
                                     gd5fxgq4uexxg_ecc_get_status)),
+       SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148,
+                    NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
+                                    gd5fxgq4ufxxg_ecc_get_status)),
 };
 
 static int gigadevice_spinand_detect(struct spinand_device *spinand)
 {
        u8 *id = spinand->id.data;
+       u16 did;
        int ret;
 
        /*
-        * For GD NANDs, There is an address byte needed to shift in before IDs
-        * are read out, so the first byte in raw_id is dummy.
+        * Earlier GDF5-series devices (A,E) return [0][MID][DID]
+        * Later (F) devices return [MID][DID1][DID2]
         */
-       if (id[1] != SPINAND_MFR_GIGADEVICE)
+
+       if (id[0] == SPINAND_MFR_GIGADEVICE)
+               did = (id[1] << 8) + id[2];
+       else if (id[0] == 0 && id[1] == SPINAND_MFR_GIGADEVICE)
+               did = id[2];
+       else
                return 0;
 
        ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
                                     ARRAY_SIZE(gigadevice_spinand_table),
-                                    id[2]);
+                                    did);
        if (ret)
                return ret;
 
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
new file mode 100644 (file)
index 0000000..5230768
--- /dev/null
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Jeff Kletsky
+ *
+ * Author: Jeff Kletsky <git-commits@allycomm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+
+#define SPINAND_MFR_PARAGON    0xa1
+
+
+#define PN26G0XA_STATUS_ECC_BITMASK            (3 << 4)
+
+#define PN26G0XA_STATUS_ECC_NONE_DETECTED      (0 << 4)
+#define PN26G0XA_STATUS_ECC_1_7_CORRECTED      (1 << 4)
+#define PN26G0XA_STATUS_ECC_ERRORED            (2 << 4)
+#define PN26G0XA_STATUS_ECC_8_CORRECTED                (3 << 4)
+
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+               SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+               SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+               SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+               SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+               SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+
+static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
+                                  struct mtd_oob_region *region)
+{
+       if (section > 3)
+               return -ERANGE;
+
+       region->offset = 6 + (15 * section); /* 4 BBM + 2 user bytes */
+       region->length = 13;
+
+       return 0;
+}
+
+static int pn26g0xa_ooblayout_free(struct mtd_info *mtd, int section,
+                                  struct mtd_oob_region *region)
+{
+       if (section > 4)
+               return -ERANGE;
+
+       if (section == 4) {
+               region->offset = 64;
+               region->length = 64;
+       } else {
+               region->offset = 4 + (15 * section);
+               region->length = 2;
+       }
+
+       return 0;
+}
+
+static int pn26g0xa_ecc_get_status(struct spinand_device *spinand,
+                                  u8 status)
+{
+       switch (status & PN26G0XA_STATUS_ECC_BITMASK) {
+       case PN26G0XA_STATUS_ECC_NONE_DETECTED:
+               return 0;
+
+       case PN26G0XA_STATUS_ECC_1_7_CORRECTED:
+               return 7;       /* Return upper limit by convention */
+
+       case PN26G0XA_STATUS_ECC_8_CORRECTED:
+               return 8;
+
+       case PN26G0XA_STATUS_ECC_ERRORED:
+               return -EBADMSG;
+
+       default:
+               break;
+       }
+
+       return -EINVAL;
+}
+
+static const struct mtd_ooblayout_ops pn26g0xa_ooblayout = {
+       .ecc = pn26g0xa_ooblayout_ecc,
+       .free = pn26g0xa_ooblayout_free,
+};
+
+
+static const struct spinand_info paragon_spinand_table[] = {
+       SPINAND_INFO("PN26G01A", 0xe1,
+                    NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&pn26g0xa_ooblayout,
+                                    pn26g0xa_ecc_get_status)),
+       SPINAND_INFO("PN26G02A", 0xe2,
+                    NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&pn26g0xa_ooblayout,
+                                    pn26g0xa_ecc_get_status)),
+};
+
+static int paragon_spinand_detect(struct spinand_device *spinand)
+{
+       u8 *id = spinand->id.data;
+       int ret;
+
+       /* Read ID returns [0][MID][DID] */
+
+       if (id[1] != SPINAND_MFR_PARAGON)
+               return 0;
+
+       ret = spinand_match_and_init(spinand, paragon_spinand_table,
+                                    ARRAY_SIZE(paragon_spinand_table),
+                                    id[2]);
+       if (ret)
+               return ret;
+
+       return 1;
+}
+
+static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = {
+       .detect = paragon_spinand_detect,
+};
+
+const struct spinand_manufacturer paragon_spinand_manufacturer = {
+       .id = SPINAND_MFR_PARAGON,
+       .name = "Paragon",
+       .ops = &paragon_spinand_manuf_ops,
+};
index f24d768..752b6cf 100644 (file)
@@ -371,8 +371,7 @@ static int parse_afs_partitions(struct mtd_info *mtd,
 
 out_free_parts:
        while (i >= 0) {
-               if (parts[i].name)
-                       kfree(parts[i].name);
+               kfree(parts[i].name);
                i--;
        }
        kfree(parts);
index 8e14248..6de8327 100644 (file)
@@ -105,11 +105,4 @@ config SPI_INTEL_SPI_PLATFORM
          To compile this driver as a module, choose M here: the module
          will be called intel-spi-platform.
 
-config SPI_STM32_QUADSPI
-       tristate "STM32 Quad SPI controller"
-       depends on ARCH_STM32 || COMPILE_TEST
-       help
-         This enables support for the STM32 Quad SPI controller.
-         We only connect the NOR to this controller.
-
 endif # MTD_SPI_NOR
index 189a15c..9c5ed03 100644 (file)
@@ -8,4 +8,3 @@ obj-$(CONFIG_SPI_NXP_SPIFI)     += nxp-spifi.o
 obj-$(CONFIG_SPI_INTEL_SPI)    += intel-spi.o
 obj-$(CONFIG_SPI_INTEL_SPI_PCI)        += intel-spi-pci.o
 obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM)   += intel-spi-platform.o
-obj-$(CONFIG_SPI_STM32_QUADSPI)        += stm32-quadspi.o
index 67ade2c..67f15a1 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/reset.h>
 #include <linux/sched.h>
 #include <linux/spi/spi.h>
 #include <linux/timer.h>
@@ -1325,6 +1326,7 @@ static int cqspi_probe(struct platform_device *pdev)
        struct cqspi_st *cqspi;
        struct resource *res;
        struct resource *res_ahb;
+       struct reset_control *rstc, *rstc_ocp;
        const struct cqspi_driver_platdata *ddata;
        int ret;
        int irq;
@@ -1391,6 +1393,25 @@ static int cqspi_probe(struct platform_device *pdev)
                goto probe_clk_failed;
        }
 
+       /* Obtain QSPI reset control */
+       rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
+       if (IS_ERR(rstc)) {
+               dev_err(dev, "Cannot get QSPI reset.\n");
+               return PTR_ERR(rstc);
+       }
+
+       rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
+       if (IS_ERR(rstc_ocp)) {
+               dev_err(dev, "Cannot get QSPI OCP reset.\n");
+               return PTR_ERR(rstc_ocp);
+       }
+
+       reset_control_assert(rstc);
+       reset_control_deassert(rstc);
+
+       reset_control_assert(rstc_ocp);
+       reset_control_deassert(rstc_ocp);
+
        cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
        ddata  = of_device_get_match_data(dev);
        if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY))
index 5e23447..b83c4ab 100644 (file)
@@ -64,6 +64,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
+       { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
        { },
index 0c2ec1c..03cc788 100644 (file)
@@ -200,7 +200,7 @@ struct sfdp_header {
  *         register does not modify status register 2.
  * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
  *         Read Status instruction 05h. Status register2 is read using
- *         instruction 35h. QE is set via Writ Status instruction 01h with
+ *         instruction 35h. QE is set via Write Status instruction 01h with
  *         two data bytes where bit 1 of the second byte is one.
  *         [...]
  */
@@ -1776,6 +1776,28 @@ static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
                .flags = SPI_NOR_NO_FR | SPI_S3AN,
 
 static int
+is25lp256_post_bfpt_fixups(struct spi_nor *nor,
+                          const struct sfdp_parameter_header *bfpt_header,
+                          const struct sfdp_bfpt *bfpt,
+                          struct spi_nor_flash_parameter *params)
+{
+       /*
+        * IS25LP256 supports 4B opcodes, but the BFPT advertises a
+        * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
+        * Overwrite the address width advertised by the BFPT.
+        */
+       if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
+               BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
+               nor->addr_width = 4;
+
+       return 0;
+}
+
+static struct spi_nor_fixups is25lp256_fixups = {
+       .post_bfpt = is25lp256_post_bfpt_fixups,
+};
+
+static int
 mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
                            const struct sfdp_parameter_header *bfpt_header,
                            const struct sfdp_bfpt *bfpt,
@@ -1916,7 +1938,8 @@ static const struct flash_info spi_nor_ids[] = {
                        SECT_4K | SPI_NOR_DUAL_READ) },
        { "is25lp256",  INFO(0x9d6019, 0, 64 * 1024, 512,
                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-                       SPI_NOR_4B_OPCODES) },
+                       SPI_NOR_4B_OPCODES)
+                       .fixups = &is25lp256_fixups },
        { "is25wp032",  INFO(0x9d7016, 0, 64 * 1024,  64,
                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
        { "is25wp064",  INFO(0x9d7017, 0, 64 * 1024, 128,
@@ -1969,6 +1992,9 @@ static const struct flash_info spi_nor_ids[] = {
        { "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
        { "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
        { "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+       { "mt25ql02g",   INFO(0x20ba22, 0, 64 * 1024, 4096,
+                             SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+                             NO_CHIP_ERASE) },
        { "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
 
        /* Micron */
@@ -2085,6 +2111,11 @@ static const struct flash_info spi_nor_ids[] = {
                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
        },
        { "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
+       {
+               "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024,  32,
+                       SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                       SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+       },
        { "w25q20cl", INFO(0xef4012, 0, 64 * 1024,  4, SECT_4K) },
        { "w25q20bw", INFO(0xef5012, 0, 64 * 1024,  4, SECT_4K) },
        { "w25q20ew", INFO(0xef6012, 0, 64 * 1024,  4, SECT_4K) },
@@ -2151,7 +2182,7 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
 
        tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
        if (tmp < 0) {
-               dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
+               dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
                return ERR_PTR(tmp);
        }
 
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
deleted file mode 100644 (file)
index 33534f9..0000000
+++ /dev/null
@@ -1,707 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for stm32 quadspi controller
- *
- * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
- * Author(s): Ludovic Barre author <ludovic.barre@st.com>.
- */
-#include <linux/clk.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/spi-nor.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/reset.h>
-#include <linux/sizes.h>
-
-#define QUADSPI_CR             0x00
-#define CR_EN                  BIT(0)
-#define CR_ABORT               BIT(1)
-#define CR_DMAEN               BIT(2)
-#define CR_TCEN                        BIT(3)
-#define CR_SSHIFT              BIT(4)
-#define CR_DFM                 BIT(6)
-#define CR_FSEL                        BIT(7)
-#define CR_FTHRES_SHIFT                8
-#define CR_FTHRES_MASK         GENMASK(12, 8)
-#define CR_FTHRES(n)           (((n) << CR_FTHRES_SHIFT) & CR_FTHRES_MASK)
-#define CR_TEIE                        BIT(16)
-#define CR_TCIE                        BIT(17)
-#define CR_FTIE                        BIT(18)
-#define CR_SMIE                        BIT(19)
-#define CR_TOIE                        BIT(20)
-#define CR_PRESC_SHIFT         24
-#define CR_PRESC_MASK          GENMASK(31, 24)
-#define CR_PRESC(n)            (((n) << CR_PRESC_SHIFT) & CR_PRESC_MASK)
-
-#define QUADSPI_DCR            0x04
-#define DCR_CSHT_SHIFT         8
-#define DCR_CSHT_MASK          GENMASK(10, 8)
-#define DCR_CSHT(n)            (((n) << DCR_CSHT_SHIFT) & DCR_CSHT_MASK)
-#define DCR_FSIZE_SHIFT                16
-#define DCR_FSIZE_MASK         GENMASK(20, 16)
-#define DCR_FSIZE(n)           (((n) << DCR_FSIZE_SHIFT) & DCR_FSIZE_MASK)
-
-#define QUADSPI_SR             0x08
-#define SR_TEF                 BIT(0)
-#define SR_TCF                 BIT(1)
-#define SR_FTF                 BIT(2)
-#define SR_SMF                 BIT(3)
-#define SR_TOF                 BIT(4)
-#define SR_BUSY                        BIT(5)
-#define SR_FLEVEL_SHIFT                8
-#define SR_FLEVEL_MASK         GENMASK(13, 8)
-
-#define QUADSPI_FCR            0x0c
-#define FCR_CTCF               BIT(1)
-
-#define QUADSPI_DLR            0x10
-
-#define QUADSPI_CCR            0x14
-#define CCR_INST_SHIFT         0
-#define CCR_INST_MASK          GENMASK(7, 0)
-#define CCR_INST(n)            (((n) << CCR_INST_SHIFT) & CCR_INST_MASK)
-#define CCR_IMODE_NONE         (0U << 8)
-#define CCR_IMODE_1            (1U << 8)
-#define CCR_IMODE_2            (2U << 8)
-#define CCR_IMODE_4            (3U << 8)
-#define CCR_ADMODE_NONE                (0U << 10)
-#define CCR_ADMODE_1           (1U << 10)
-#define CCR_ADMODE_2           (2U << 10)
-#define CCR_ADMODE_4           (3U << 10)
-#define CCR_ADSIZE_SHIFT       12
-#define CCR_ADSIZE_MASK                GENMASK(13, 12)
-#define CCR_ADSIZE(n)          (((n) << CCR_ADSIZE_SHIFT) & CCR_ADSIZE_MASK)
-#define CCR_ABMODE_NONE                (0U << 14)
-#define CCR_ABMODE_1           (1U << 14)
-#define CCR_ABMODE_2           (2U << 14)
-#define CCR_ABMODE_4           (3U << 14)
-#define CCR_ABSIZE_8           (0U << 16)
-#define CCR_ABSIZE_16          (1U << 16)
-#define CCR_ABSIZE_24          (2U << 16)
-#define CCR_ABSIZE_32          (3U << 16)
-#define CCR_DCYC_SHIFT         18
-#define CCR_DCYC_MASK          GENMASK(22, 18)
-#define CCR_DCYC(n)            (((n) << CCR_DCYC_SHIFT) & CCR_DCYC_MASK)
-#define CCR_DMODE_NONE         (0U << 24)
-#define CCR_DMODE_1            (1U << 24)
-#define CCR_DMODE_2            (2U << 24)
-#define CCR_DMODE_4            (3U << 24)
-#define CCR_FMODE_INDW         (0U << 26)
-#define CCR_FMODE_INDR         (1U << 26)
-#define CCR_FMODE_APM          (2U << 26)
-#define CCR_FMODE_MM           (3U << 26)
-
-#define QUADSPI_AR             0x18
-#define QUADSPI_ABR            0x1c
-#define QUADSPI_DR             0x20
-#define QUADSPI_PSMKR          0x24
-#define QUADSPI_PSMAR          0x28
-#define QUADSPI_PIR            0x2c
-#define QUADSPI_LPTR           0x30
-#define LPTR_DFT_TIMEOUT       0x10
-
-#define FSIZE_VAL(size)                (__fls(size) - 1)
-
-#define STM32_MAX_MMAP_SZ      SZ_256M
-#define STM32_MAX_NORCHIP      2
-
-#define STM32_QSPI_FIFO_SZ     32
-#define STM32_QSPI_FIFO_TIMEOUT_US 30000
-#define STM32_QSPI_BUSY_TIMEOUT_US 100000
-
-struct stm32_qspi_flash {
-       struct spi_nor nor;
-       struct stm32_qspi *qspi;
-       u32 cs;
-       u32 fsize;
-       u32 presc;
-       u32 read_mode;
-       bool registered;
-       u32 prefetch_limit;
-};
-
-struct stm32_qspi {
-       struct device *dev;
-       void __iomem *io_base;
-       void __iomem *mm_base;
-       resource_size_t mm_size;
-       u32 nor_num;
-       struct clk *clk;
-       u32 clk_rate;
-       struct stm32_qspi_flash flash[STM32_MAX_NORCHIP];
-       struct completion cmd_completion;
-
-       /*
-        * to protect device configuration, could be different between
-        * 2 flash access (bk1, bk2)
-        */
-       struct mutex lock;
-};
-
-struct stm32_qspi_cmd {
-       u8 addr_width;
-       u8 dummy;
-       bool tx_data;
-       u8 opcode;
-       u32 framemode;
-       u32 qspimode;
-       u32 addr;
-       size_t len;
-       void *buf;
-};
-
-static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi)
-{
-       u32 cr;
-       int err = 0;
-
-       if (readl_relaxed(qspi->io_base + QUADSPI_SR) & SR_TCF)
-               return 0;
-
-       reinit_completion(&qspi->cmd_completion);
-       cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
-       writel_relaxed(cr | CR_TCIE, qspi->io_base + QUADSPI_CR);
-
-       if (!wait_for_completion_interruptible_timeout(&qspi->cmd_completion,
-                                                      msecs_to_jiffies(1000)))
-               err = -ETIMEDOUT;
-
-       writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
-       return err;
-}
-
-static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
-{
-       u32 sr;
-
-       return readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR, sr,
-                                         !(sr & SR_BUSY), 10,
-                                         STM32_QSPI_BUSY_TIMEOUT_US);
-}
-
-static void stm32_qspi_set_framemode(struct spi_nor *nor,
-                                    struct stm32_qspi_cmd *cmd, bool read)
-{
-       u32 dmode = CCR_DMODE_1;
-
-       cmd->framemode = CCR_IMODE_1;
-
-       if (read) {
-               switch (nor->read_proto) {
-               default:
-               case SNOR_PROTO_1_1_1:
-                       dmode = CCR_DMODE_1;
-                       break;
-               case SNOR_PROTO_1_1_2:
-                       dmode = CCR_DMODE_2;
-                       break;
-               case SNOR_PROTO_1_1_4:
-                       dmode = CCR_DMODE_4;
-                       break;
-               }
-       }
-
-       cmd->framemode |= cmd->tx_data ? dmode : 0;
-       cmd->framemode |= cmd->addr_width ? CCR_ADMODE_1 : 0;
-}
-
-static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
-{
-       *val = readb_relaxed(addr);
-}
-
-static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
-{
-       writeb_relaxed(*val, addr);
-}
-
-static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
-                             const struct stm32_qspi_cmd *cmd)
-{
-       void (*tx_fifo)(u8 *, void __iomem *);
-       u32 len = cmd->len, sr;
-       u8 *buf = cmd->buf;
-       int ret;
-
-       if (cmd->qspimode == CCR_FMODE_INDW)
-               tx_fifo = stm32_qspi_write_fifo;
-       else
-               tx_fifo = stm32_qspi_read_fifo;
-
-       while (len--) {
-               ret = readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR,
-                                                sr, (sr & SR_FTF), 10,
-                                                STM32_QSPI_FIFO_TIMEOUT_US);
-               if (ret) {
-                       dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr);
-                       return ret;
-               }
-               tx_fifo(buf++, qspi->io_base + QUADSPI_DR);
-       }
-
-       return 0;
-}
-
-static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
-                           const struct stm32_qspi_cmd *cmd)
-{
-       memcpy_fromio(cmd->buf, qspi->mm_base + cmd->addr, cmd->len);
-       return 0;
-}
-
-static int stm32_qspi_tx(struct stm32_qspi *qspi,
-                        const struct stm32_qspi_cmd *cmd)
-{
-       if (!cmd->tx_data)
-               return 0;
-
-       if (cmd->qspimode == CCR_FMODE_MM)
-               return stm32_qspi_tx_mm(qspi, cmd);
-
-       return stm32_qspi_tx_poll(qspi, cmd);
-}
-
-static int stm32_qspi_send(struct stm32_qspi_flash *flash,
-                          const struct stm32_qspi_cmd *cmd)
-{
-       struct stm32_qspi *qspi = flash->qspi;
-       u32 ccr, dcr, cr;
-       u32 last_byte;
-       int err;
-
-       err = stm32_qspi_wait_nobusy(qspi);
-       if (err)
-               goto abort;
-
-       dcr = readl_relaxed(qspi->io_base + QUADSPI_DCR) & ~DCR_FSIZE_MASK;
-       dcr |= DCR_FSIZE(flash->fsize);
-       writel_relaxed(dcr, qspi->io_base + QUADSPI_DCR);
-
-       cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
-       cr &= ~CR_PRESC_MASK & ~CR_FSEL;
-       cr |= CR_PRESC(flash->presc);
-       cr |= flash->cs ? CR_FSEL : 0;
-       writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
-
-       if (cmd->tx_data)
-               writel_relaxed(cmd->len - 1, qspi->io_base + QUADSPI_DLR);
-
-       ccr = cmd->framemode | cmd->qspimode;
-
-       if (cmd->dummy)
-               ccr |= CCR_DCYC(cmd->dummy);
-
-       if (cmd->addr_width)
-               ccr |= CCR_ADSIZE(cmd->addr_width - 1);
-
-       ccr |= CCR_INST(cmd->opcode);
-       writel_relaxed(ccr, qspi->io_base + QUADSPI_CCR);
-
-       if (cmd->addr_width && cmd->qspimode != CCR_FMODE_MM)
-               writel_relaxed(cmd->addr, qspi->io_base + QUADSPI_AR);
-
-       err = stm32_qspi_tx(qspi, cmd);
-       if (err)
-               goto abort;
-
-       if (cmd->qspimode != CCR_FMODE_MM) {
-               err = stm32_qspi_wait_cmd(qspi);
-               if (err)
-                       goto abort;
-               writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR);
-       } else {
-               last_byte = cmd->addr + cmd->len;
-               if (last_byte > flash->prefetch_limit)
-                       goto abort;
-       }
-
-       return err;
-
-abort:
-       cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT;
-       writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
-
-       if (err)
-               dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
-
-       return err;
-}
-
-static int stm32_qspi_read_reg(struct spi_nor *nor,
-                              u8 opcode, u8 *buf, int len)
-{
-       struct stm32_qspi_flash *flash = nor->priv;
-       struct device *dev = flash->qspi->dev;
-       struct stm32_qspi_cmd cmd;
-
-       dev_dbg(dev, "read_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len);
-
-       memset(&cmd, 0, sizeof(cmd));
-       cmd.opcode = opcode;
-       cmd.tx_data = true;
-       cmd.len = len;
-       cmd.buf = buf;
-       cmd.qspimode = CCR_FMODE_INDR;
-
-       stm32_qspi_set_framemode(nor, &cmd, false);
-
-       return stm32_qspi_send(flash, &cmd);
-}
-
-static int stm32_qspi_write_reg(struct spi_nor *nor, u8 opcode,
-                               u8 *buf, int len)
-{
-       struct stm32_qspi_flash *flash = nor->priv;
-       struct device *dev = flash->qspi->dev;
-       struct stm32_qspi_cmd cmd;
-
-       dev_dbg(dev, "write_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len);
-
-       memset(&cmd, 0, sizeof(cmd));
-       cmd.opcode = opcode;
-       cmd.tx_data = !!(buf && len > 0);
-       cmd.len = len;
-       cmd.buf = buf;
-       cmd.qspimode = CCR_FMODE_INDW;
-
-       stm32_qspi_set_framemode(nor, &cmd, false);
-
-       return stm32_qspi_send(flash, &cmd);
-}
-
-static ssize_t stm32_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
-                              u_char *buf)
-{
-       struct stm32_qspi_flash *flash = nor->priv;
-       struct stm32_qspi *qspi = flash->qspi;
-       struct stm32_qspi_cmd cmd;
-       int err;
-
-       dev_dbg(qspi->dev, "read(%#.2x): buf:%pK from:%#.8x len:%#zx\n",
-               nor->read_opcode, buf, (u32)from, len);
-
-       memset(&cmd, 0, sizeof(cmd));
-       cmd.opcode = nor->read_opcode;
-       cmd.addr_width = nor->addr_width;
-       cmd.addr = (u32)from;
-       cmd.tx_data = true;
-       cmd.dummy = nor->read_dummy;
-       cmd.len = len;
-       cmd.buf = buf;
-       cmd.qspimode = flash->read_mode;
-
-       stm32_qspi_set_framemode(nor, &cmd, true);
-       err = stm32_qspi_send(flash, &cmd);
-
-       return err ? err : len;
-}
-
-static ssize_t stm32_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
-                               const u_char *buf)
-{
-       struct stm32_qspi_flash *flash = nor->priv;
-       struct device *dev = flash->qspi->dev;
-       struct stm32_qspi_cmd cmd;
-       int err;
-
-       dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#zx\n",
-               nor->program_opcode, buf, (u32)to, len);
-
-       memset(&cmd, 0, sizeof(cmd));
-       cmd.opcode = nor->program_opcode;
-       cmd.addr_width = nor->addr_width;
-       cmd.addr = (u32)to;
-       cmd.tx_data = true;
-       cmd.len = len;
-       cmd.buf = (void *)buf;
-       cmd.qspimode = CCR_FMODE_INDW;
-
-       stm32_qspi_set_framemode(nor, &cmd, false);
-       err = stm32_qspi_send(flash, &cmd);
-
-       return err ? err : len;
-}
-
-static int stm32_qspi_erase(struct spi_nor *nor, loff_t offs)
-{
-       struct stm32_qspi_flash *flash = nor->priv;
-       struct device *dev = flash->qspi->dev;
-       struct stm32_qspi_cmd cmd;
-
-       dev_dbg(dev, "erase(%#.2x):offs:%#x\n", nor->erase_opcode, (u32)offs);
-
-       memset(&cmd, 0, sizeof(cmd));
-       cmd.opcode = nor->erase_opcode;
-       cmd.addr_width = nor->addr_width;
-       cmd.addr = (u32)offs;
-       cmd.qspimode = CCR_FMODE_INDW;
-
-       stm32_qspi_set_framemode(nor, &cmd, false);
-
-       return stm32_qspi_send(flash, &cmd);
-}
-
-static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
-{
-       struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
-       u32 cr, sr, fcr = 0;
-
-       cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
-       sr = readl_relaxed(qspi->io_base + QUADSPI_SR);
-
-       if ((cr & CR_TCIE) && (sr & SR_TCF)) {
-               /* tx complete */
-               fcr |= FCR_CTCF;
-               complete(&qspi->cmd_completion);
-       } else {
-               dev_info_ratelimited(qspi->dev, "spurious interrupt\n");
-       }
-
-       writel_relaxed(fcr, qspi->io_base + QUADSPI_FCR);
-
-       return IRQ_HANDLED;
-}
-
-static int stm32_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
-{
-       struct stm32_qspi_flash *flash = nor->priv;
-       struct stm32_qspi *qspi = flash->qspi;
-
-       mutex_lock(&qspi->lock);
-       return 0;
-}
-
-static void stm32_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
-{
-       struct stm32_qspi_flash *flash = nor->priv;
-       struct stm32_qspi *qspi = flash->qspi;
-
-       mutex_unlock(&qspi->lock);
-}
-
-static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
-                                 struct device_node *np)
-{
-       struct spi_nor_hwcaps hwcaps = {
-               .mask = SNOR_HWCAPS_READ |
-                       SNOR_HWCAPS_READ_FAST |
-                       SNOR_HWCAPS_PP,
-       };
-       u32 width, presc, cs_num, max_rate = 0;
-       struct stm32_qspi_flash *flash;
-       struct mtd_info *mtd;
-       int ret;
-
-       of_property_read_u32(np, "reg", &cs_num);
-       if (cs_num >= STM32_MAX_NORCHIP)
-               return -EINVAL;
-
-       of_property_read_u32(np, "spi-max-frequency", &max_rate);
-       if (!max_rate)
-               return -EINVAL;
-
-       presc = DIV_ROUND_UP(qspi->clk_rate, max_rate) - 1;
-
-       if (of_property_read_u32(np, "spi-rx-bus-width", &width))
-               width = 1;
-
-       if (width == 4)
-               hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
-       else if (width == 2)
-               hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
-       else if (width != 1)
-               return -EINVAL;
-
-       flash = &qspi->flash[cs_num];
-       flash->qspi = qspi;
-       flash->cs = cs_num;
-       flash->presc = presc;
-
-       flash->nor.dev = qspi->dev;
-       spi_nor_set_flash_node(&flash->nor, np);
-       flash->nor.priv = flash;
-       mtd = &flash->nor.mtd;
-
-       flash->nor.read = stm32_qspi_read;
-       flash->nor.write = stm32_qspi_write;
-       flash->nor.erase = stm32_qspi_erase;
-       flash->nor.read_reg = stm32_qspi_read_reg;
-       flash->nor.write_reg = stm32_qspi_write_reg;
-       flash->nor.prepare = stm32_qspi_prep;
-       flash->nor.unprepare = stm32_qspi_unprep;
-
-       writel_relaxed(LPTR_DFT_TIMEOUT, qspi->io_base + QUADSPI_LPTR);
-
-       writel_relaxed(CR_PRESC(presc) | CR_FTHRES(3) | CR_TCEN | CR_SSHIFT
-                      | CR_EN, qspi->io_base + QUADSPI_CR);
-
-       /*
-        * in stm32 qspi controller, QUADSPI_DCR register has a fsize field
-        * which define the size of nor flash.
-        * if fsize is NULL, the controller can't sent spi-nor command.
-        * set a temporary value just to discover the nor flash with
-        * "spi_nor_scan". After, the right value (mtd->size) can be set.
-        */
-       flash->fsize = FSIZE_VAL(SZ_1K);
-
-       ret = spi_nor_scan(&flash->nor, NULL, &hwcaps);
-       if (ret) {
-               dev_err(qspi->dev, "device scan failed\n");
-               return ret;
-       }
-
-       flash->fsize = FSIZE_VAL(mtd->size);
-       flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ;
-
-       flash->read_mode = CCR_FMODE_MM;
-       if (mtd->size > qspi->mm_size)
-               flash->read_mode = CCR_FMODE_INDR;
-
-       writel_relaxed(DCR_CSHT(1), qspi->io_base + QUADSPI_DCR);
-
-       ret = mtd_device_register(mtd, NULL, 0);
-       if (ret) {
-               dev_err(qspi->dev, "mtd device parse failed\n");
-               return ret;
-       }
-
-       flash->registered = true;
-
-       dev_dbg(qspi->dev, "read mm:%s cs:%d bus:%d\n",
-               flash->read_mode == CCR_FMODE_MM ? "yes" : "no", cs_num, width);
-
-       return 0;
-}
-
-static void stm32_qspi_mtd_free(struct stm32_qspi *qspi)
-{
-       int i;
-
-       for (i = 0; i < STM32_MAX_NORCHIP; i++)
-               if (qspi->flash[i].registered)
-                       mtd_device_unregister(&qspi->flash[i].nor.mtd);
-}
-
-static int stm32_qspi_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct device_node *flash_np;
-       struct reset_control *rstc;
-       struct stm32_qspi *qspi;
-       struct resource *res;
-       int ret, irq;
-
-       qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
-       if (!qspi)
-               return -ENOMEM;
-
-       qspi->nor_num = of_get_child_count(dev->of_node);
-       if (!qspi->nor_num || qspi->nor_num > STM32_MAX_NORCHIP)
-               return -ENODEV;
-
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
-       qspi->io_base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(qspi->io_base))
-               return PTR_ERR(qspi->io_base);
-
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
-       qspi->mm_base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(qspi->mm_base))
-               return PTR_ERR(qspi->mm_base);
-
-       qspi->mm_size = resource_size(res);
-
-       irq = platform_get_irq(pdev, 0);
-       ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
-                              dev_name(dev), qspi);
-       if (ret) {
-               dev_err(dev, "failed to request irq\n");
-               return ret;
-       }
-
-       init_completion(&qspi->cmd_completion);
-
-       qspi->clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(qspi->clk))
-               return PTR_ERR(qspi->clk);
-
-       qspi->clk_rate = clk_get_rate(qspi->clk);
-       if (!qspi->clk_rate)
-               return -EINVAL;
-
-       ret = clk_prepare_enable(qspi->clk);
-       if (ret) {
-               dev_err(dev, "can not enable the clock\n");
-               return ret;
-       }
-
-       rstc = devm_reset_control_get_exclusive(dev, NULL);
-       if (!IS_ERR(rstc)) {
-               reset_control_assert(rstc);
-               udelay(2);
-               reset_control_deassert(rstc);
-       }
-
-       qspi->dev = dev;
-       platform_set_drvdata(pdev, qspi);
-       mutex_init(&qspi->lock);
-
-       for_each_available_child_of_node(dev->of_node, flash_np) {
-               ret = stm32_qspi_flash_setup(qspi, flash_np);
-               if (ret) {
-                       dev_err(dev, "unable to setup flash chip\n");
-                       goto err_flash;
-               }
-       }
-
-       return 0;
-
-err_flash:
-       mutex_destroy(&qspi->lock);
-       stm32_qspi_mtd_free(qspi);
-
-       clk_disable_unprepare(qspi->clk);
-       return ret;
-}
-
-static int stm32_qspi_remove(struct platform_device *pdev)
-{
-       struct stm32_qspi *qspi = platform_get_drvdata(pdev);
-
-       /* disable qspi */
-       writel_relaxed(0, qspi->io_base + QUADSPI_CR);
-
-       stm32_qspi_mtd_free(qspi);
-       mutex_destroy(&qspi->lock);
-
-       clk_disable_unprepare(qspi->clk);
-       return 0;
-}
-
-static const struct of_device_id stm32_qspi_match[] = {
-       {.compatible = "st,stm32f469-qspi"},
-       {}
-};
-MODULE_DEVICE_TABLE(of, stm32_qspi_match);
-
-static struct platform_driver stm32_qspi_driver = {
-       .probe  = stm32_qspi_probe,
-       .remove = stm32_qspi_remove,
-       .driver = {
-               .name = "stm32-quadspi",
-               .of_match_table = stm32_qspi_match,
-       },
-};
-module_platform_driver(stm32_qspi_driver);
-
-MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
-MODULE_LICENSE("GPL v2");
diff --git a/include/linux/dma/mxs-dma.h b/include/linux/dma/mxs-dma.h
new file mode 100644 (file)
index 0000000..069d9f5
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MXS_DMA_H_
+#define _MXS_DMA_H_
+
+#include <linux/dmaengine.h>
+
+#define MXS_DMA_CTRL_WAIT4END  BIT(31)
+#define MXS_DMA_CTRL_WAIT4RDY  BIT(30)
+
+/*
+ * The mxs dmaengine can do PIO transfers. We pass a pointer to the PIO words
+ * in the second argument to dmaengine_prep_slave_sg when the direction is
+ * set to DMA_TRANS_NONE. To make this clear and to prevent users from doing
+ * the error prone casting we have this wrapper function
+ */
+static inline struct dma_async_tx_descriptor *mxs_dmaengine_prep_pio(
+        struct dma_chan *chan, u32 *pio, unsigned int npio,
+        enum dma_transfer_direction dir, unsigned long flags)
+{
+       return dmaengine_prep_slave_sg(chan, (struct scatterlist *)pio, npio,
+                                      dir, flags);
+}
+
+#endif /* _MXS_DMA_H_ */
index 208c87c..c98a211 100644 (file)
@@ -219,6 +219,13 @@ struct cfi_pri_amdstd {
        uint8_t  VppMin;
        uint8_t  VppMax;
        uint8_t  TopBottom;
+       /* Below field are added from version 1.5 */
+       uint8_t  ProgramSuspend;
+       uint8_t  UnlockBypass;
+       uint8_t  SecureSiliconSector;
+       uint8_t  SoftwareFeatures;
+#define CFI_POLL_STATUS_REG    BIT(0)
+#define CFI_POLL_DQ            BIT(1)
 } __packed;
 
 /* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h
new file mode 100644 (file)
index 0000000..2dfe659
--- /dev/null
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef __LINUX_MTD_HYPERBUS_H__
+#define __LINUX_MTD_HYPERBUS_H__
+
+#include <linux/mtd/map.h>
+
+enum hyperbus_memtype {
+       HYPERFLASH,
+       HYPERRAM,
+};
+
+/**
+ * struct hyperbus_device - struct representing HyperBus slave device
+ * @map: map_info struct for accessing MMIO HyperBus flash memory
+ * @np: pointer to HyperBus slave device node
+ * @mtd: pointer to MTD struct
+ * @ctlr: pointer to HyperBus controller struct
+ * @memtype: type of memory device: HyperFlash or HyperRAM
+ */
+
+struct hyperbus_device {
+       struct map_info map;
+       struct device_node *np;
+       struct mtd_info *mtd;
+       struct hyperbus_ctlr *ctlr;
+       enum hyperbus_memtype memtype;
+};
+
+/**
+ * struct hyperbus_ops - struct representing custom HyperBus operations
+ * @read16: read 16 bit of data from flash in a single burst. Used to read
+ *          from non default address space, such as ID/CFI space
+ * @write16: write 16 bit of data to flash in a single burst. Used to
+ *           send cmd to flash or write single 16 bit word at a time.
+ * @copy_from: copy data from flash memory
+ * @copy_to: copy data to flash memory
+ * @calibrate: calibrate HyperBus controller
+ */
+
+struct hyperbus_ops {
+       u16 (*read16)(struct hyperbus_device *hbdev, unsigned long addr);
+       void (*write16)(struct hyperbus_device *hbdev,
+                       unsigned long addr, u16 val);
+       void (*copy_from)(struct hyperbus_device *hbdev, void *to,
+                         unsigned long from, ssize_t len);
+       void (*copy_to)(struct hyperbus_device *dev, unsigned long to,
+                       const void *from, ssize_t len);
+       int (*calibrate)(struct hyperbus_device *dev);
+};
+
+/**
+ * struct hyperbus_ctlr - struct representing HyperBus controller
+ * @dev: pointer to HyperBus controller device
+ * @calibrated: flag to indicate ctlr calibration sequence is complete
+ * @ops: HyperBus controller ops
+ */
+struct hyperbus_ctlr {
+       struct device *dev;
+       bool calibrated;
+
+       const struct hyperbus_ops *ops;
+};
+
+/**
+ * hyperbus_register_device - probe and register a HyperBus slave memory device
+ * @hbdev: hyperbus_device struct with dev, np and ctlr field populated
+ *
+ * Return: 0 for success, others for failure.
+ */
+int hyperbus_register_device(struct hyperbus_device *hbdev);
+
+/**
+ * hyperbus_unregister_device - deregister HyperBus slave memory device
+ * @hbdev: hyperbus_device to be unregistered
+ *
+ * Return: 0 for success, others for failure.
+ */
+int hyperbus_unregister_device(struct hyperbus_device *hbdev);
+
+#endif /* __LINUX_MTD_HYPERBUS_H__ */
index 936a3fd..4ca8c1c 100644 (file)
@@ -316,6 +316,12 @@ struct mtd_info {
        int (*_get_device) (struct mtd_info *mtd);
        void (*_put_device) (struct mtd_info *mtd);
 
+       /*
+        * flag indicates a panic write, low level drivers can take appropriate
+        * action if required to ensure writes go through
+        */
+       bool oops_panic_write;
+
        struct notifier_block reboot_notifier;  /* default mode before reboot */
 
        /* ECC status information */
index 2d12a1b..5f72840 100644 (file)
@@ -77,6 +77,7 @@
 #define ONENAND_DEVICE_DENSITY_1Gb     (0x003)
 #define ONENAND_DEVICE_DENSITY_2Gb     (0x004)
 #define ONENAND_DEVICE_DENSITY_4Gb     (0x005)
+#define ONENAND_DEVICE_DENSITY_8Gb     (0x006)
 
 /*
  * Version ID Register F002h (R)
index ac3884a..4ab9bcc 100644 (file)
@@ -874,6 +874,42 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
                           const struct nand_op_parser *parser,
                           const struct nand_operation *op, bool check_only);
 
+static inline void nand_op_trace(const char *prefix,
+                                const struct nand_op_instr *instr)
+{
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+       switch (instr->type) {
+       case NAND_OP_CMD_INSTR:
+               pr_debug("%sCMD      [0x%02x]\n", prefix,
+                        instr->ctx.cmd.opcode);
+               break;
+       case NAND_OP_ADDR_INSTR:
+               pr_debug("%sADDR     [%d cyc: %*ph]\n", prefix,
+                        instr->ctx.addr.naddrs,
+                        instr->ctx.addr.naddrs < 64 ?
+                        instr->ctx.addr.naddrs : 64,
+                        instr->ctx.addr.addrs);
+               break;
+       case NAND_OP_DATA_IN_INSTR:
+               pr_debug("%sDATA_IN  [%d B%s]\n", prefix,
+                        instr->ctx.data.len,
+                        instr->ctx.data.force_8bit ?
+                        ", force 8-bit" : "");
+               break;
+       case NAND_OP_DATA_OUT_INSTR:
+               pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
+                        instr->ctx.data.len,
+                        instr->ctx.data.force_8bit ?
+                        ", force 8-bit" : "");
+               break;
+       case NAND_OP_WAITRDY_INSTR:
+               pr_debug("%sWAITRDY  [max %d ms]\n", prefix,
+                        instr->ctx.waitrdy.timeout_ms);
+               break;
+       }
+#endif
+}
+
 /**
  * struct nand_controller_ops - Controller operations
  *
index 507f7e2..4ea558b 100644 (file)
                   SPI_MEM_OP_DUMMY(ndummy, 1),                         \
                   SPI_MEM_OP_DATA_IN(len, buf, 1))
 
+#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \
+       SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1),               \
+                  SPI_MEM_OP_ADDR(3, addr, 1),                         \
+                  SPI_MEM_OP_DUMMY(ndummy, 1),                         \
+                  SPI_MEM_OP_DATA_IN(len, buf, 1))
+
 #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len)     \
        SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1),                             \
                   SPI_MEM_OP_ADDR(2, addr, 1),                         \
                   SPI_MEM_OP_DUMMY(ndummy, 1),                         \
                   SPI_MEM_OP_DATA_IN(len, buf, 2))
 
+#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len)  \
+       SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1),                             \
+                  SPI_MEM_OP_ADDR(3, addr, 1),                         \
+                  SPI_MEM_OP_DUMMY(ndummy, 1),                         \
+                  SPI_MEM_OP_DATA_IN(len, buf, 2))
+
 #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len)     \
        SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1),                             \
                   SPI_MEM_OP_ADDR(2, addr, 1),                         \
                   SPI_MEM_OP_DUMMY(ndummy, 1),                         \
                   SPI_MEM_OP_DATA_IN(len, buf, 4))
 
+#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len)  \
+       SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1),                             \
+                  SPI_MEM_OP_ADDR(3, addr, 1),                         \
+                  SPI_MEM_OP_DUMMY(ndummy, 1),                         \
+                  SPI_MEM_OP_DATA_IN(len, buf, 4))
+
 #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
        SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1),                             \
                   SPI_MEM_OP_ADDR(2, addr, 2),                         \
                   SPI_MEM_OP_DUMMY(ndummy, 2),                         \
                   SPI_MEM_OP_DATA_IN(len, buf, 2))
 
+#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \
+       SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1),                             \
+                  SPI_MEM_OP_ADDR(3, addr, 2),                         \
+                  SPI_MEM_OP_DUMMY(ndummy, 2),                         \
+                  SPI_MEM_OP_DATA_IN(len, buf, 2))
+
 #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
        SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1),                             \
                   SPI_MEM_OP_ADDR(2, addr, 4),                         \
                   SPI_MEM_OP_DUMMY(ndummy, 4),                         \
                   SPI_MEM_OP_DATA_IN(len, buf, 4))
 
+#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \
+       SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1),                             \
+                  SPI_MEM_OP_ADDR(3, addr, 4),                         \
+                  SPI_MEM_OP_DUMMY(ndummy, 4),                         \
+                  SPI_MEM_OP_DATA_IN(len, buf, 4))
+
 #define SPINAND_PROG_EXEC_OP(addr)                                     \
        SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1),                             \
                   SPI_MEM_OP_ADDR(3, addr, 1),                         \
@@ -197,6 +227,7 @@ struct spinand_manufacturer {
 extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
 extern const struct spinand_manufacturer macronix_spinand_manufacturer;
 extern const struct spinand_manufacturer micron_spinand_manufacturer;
+extern const struct spinand_manufacturer paragon_spinand_manufacturer;
 extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
 extern const struct spinand_manufacturer winbond_spinand_manufacturer;
 
@@ -260,7 +291,7 @@ struct spinand_ecc_info {
  */
 struct spinand_info {
        const char *model;
-       u8 devid;
+       u16 devid;
        u32 flags;
        struct nand_memory_organization memorg;
        struct nand_ecc_req eccreq;
@@ -422,7 +453,7 @@ static inline void spinand_set_of_node(struct spinand_device *spinand,
 
 int spinand_match_and_init(struct spinand_device *dev,
                           const struct spinand_info *table,
-                          unsigned int table_size, u8 devid);
+                          unsigned int table_size, u16 devid);
 
 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
 int spinand_select_target(struct spinand_device *spinand, unsigned int target);
index aff5b5e..47ffe32 100644 (file)
@@ -113,11 +113,11 @@ struct mtd_write_req {
 #define MTD_CAP_NVRAM          (MTD_WRITEABLE | MTD_BIT_WRITEABLE | MTD_NO_ERASE)
 
 /* Obsolete ECC byte placement modes (used with obsolete MEMGETOOBSEL) */
-#define MTD_NANDECC_OFF                0       // Switch off ECC (Not recommended)
-#define MTD_NANDECC_PLACE      1       // Use the given placement in the structure (YAFFS1 legacy mode)
-#define MTD_NANDECC_AUTOPLACE  2       // Use the default placement scheme
-#define MTD_NANDECC_PLACEONLY  3       // Use the given placement in the structure (Do not store ecc result on read)
-#define MTD_NANDECC_AUTOPL_USR         4       // Use the given autoplacement scheme rather than using the default
+#define MTD_NANDECC_OFF                0       /* Switch off ECC (Not recommended) */
+#define MTD_NANDECC_PLACE      1       /* Use the given placement in the structure (YAFFS1 legacy mode) */
+#define MTD_NANDECC_AUTOPLACE  2       /* Use the default placement scheme */
+#define MTD_NANDECC_PLACEONLY  3       /* Use the given placement in the structure (Do not store ecc result on read) */
+#define MTD_NANDECC_AUTOPL_USR         4       /* Use the given autoplacement scheme rather than using the default */
 
 /* OTP mode selection */
 #define MTD_OTP_OFF            0