1 // SPDX-License-Identifier: GPL-2.0
3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
6 * Copyright (C) 2005, Intec Automation Inc.
7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
9 * Synced from Linux v4.19
13 #include <display_options.h>
17 #include <dm/device_compat.h>
18 #include <dm/devres.h>
19 #include <linux/bitops.h>
20 #include <linux/err.h>
21 #include <linux/errno.h>
22 #include <linux/log2.h>
23 #include <linux/math64.h>
24 #include <linux/sizes.h>
25 #include <linux/bitfield.h>
26 #include <linux/delay.h>
28 #include <linux/mtd/mtd.h>
29 #include <linux/mtd/spi-nor.h>
30 #include <mtd/cfi_flash.h>
34 #include "sf_internal.h"
36 /* Define max times to check status register before we give up. */
39 * For everything but full-chip erase; probably could be much smaller, but kept
40 * around for safety for now
43 #define HZ CONFIG_SYS_HZ
45 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
47 #define ROUND_UP_TO(x, y) (((x) + (y) - 1) / (y) * (y))
49 struct sfdp_parameter_header {
53 u8 length; /* in double words */
54 u8 parameter_table_pointer[3]; /* byte address */
58 #define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
59 #define SFDP_PARAM_HEADER_PTP(p) \
60 (((p)->parameter_table_pointer[2] << 16) | \
61 ((p)->parameter_table_pointer[1] << 8) | \
62 ((p)->parameter_table_pointer[0] << 0))
64 #define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
65 #define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
66 #define SFDP_SST_ID 0x01bf /* Manufacturer specific Table */
67 #define SFDP_PROFILE1_ID 0xff05 /* xSPI Profile 1.0 Table */
68 #define SFDP_SCCR_MAP_ID 0xff87 /*
69 * Status, Control and Configuration
73 #define SFDP_SIGNATURE 0x50444653U
74 #define SFDP_JESD216_MAJOR 1
75 #define SFDP_JESD216_MINOR 0
76 #define SFDP_JESD216A_MINOR 5
77 #define SFDP_JESD216B_MINOR 6
80 u32 signature; /* Ox50444653U <=> "SFDP" */
83 u8 nph; /* 0-base number of parameter headers */
86 /* Basic Flash Parameter Table. */
87 struct sfdp_parameter_header bfpt_header;
90 /* Basic Flash Parameter Table */
93 * JESD216 rev D defines a Basic Flash Parameter Table of 20 DWORDs.
94 * They are indexed from 1 but C arrays are indexed from 0.
96 #define BFPT_DWORD(i) ((i) - 1)
97 #define BFPT_DWORD_MAX 20
99 /* The first version of JESB216 defined only 9 DWORDs. */
100 #define BFPT_DWORD_MAX_JESD216 9
101 #define BFPT_DWORD_MAX_JESD216B 16
104 #define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
105 #define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
106 #define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
107 #define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
108 #define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
109 #define BFPT_DWORD1_DTR BIT(19)
110 #define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
111 #define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
112 #define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
115 #define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
116 #define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
119 #define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
120 #define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
125 * (from JESD216 rev B)
126 * Quad Enable Requirements (QER):
127 * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
128 * reads based on instruction. DQ3/HOLD# functions are hold during
130 * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
131 * two data bytes where bit 1 of the second byte is one.
133 * Writing only one byte to the status register has the side-effect of
134 * clearing status register 2, including the QE bit. The 100b code is
135 * used if writing one byte to the status register does not modify
137 * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
138 * one data byte where bit 6 is one.
140 * - 011b: QE is bit 7 of status register 2. It is set via Write status
141 * register 2 instruction 3Eh with one data byte where bit 7 is one.
143 * The status register 2 is read using instruction 3Fh.
144 * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
145 * two data bytes where bit 1 of the second byte is one.
147 * In contrast to the 001b code, writing one byte to the status
148 * register does not modify status register 2.
149 * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
150 * Read Status instruction 05h. Status register2 is read using
151 * instruction 35h. QE is set via Writ Status instruction 01h with
152 * two data bytes where bit 1 of the second byte is one.
155 #define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
156 #define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
157 #define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
158 #define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
159 #define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
160 #define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
161 #define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
163 #define BFPT_DWORD16_SOFT_RST BIT(12)
165 #define BFPT_DWORD18_CMD_EXT_MASK GENMASK(30, 29)
166 #define BFPT_DWORD18_CMD_EXT_REP (0x0UL << 29) /* Repeat */
167 #define BFPT_DWORD18_CMD_EXT_INV (0x1UL << 29) /* Invert */
168 #define BFPT_DWORD18_CMD_EXT_RES (0x2UL << 29) /* Reserved */
169 #define BFPT_DWORD18_CMD_EXT_16B (0x3UL << 29) /* 16-bit opcode */
171 /* xSPI Profile 1.0 table (from JESD216D.01). */
172 #define PROFILE1_DWORD1_RD_FAST_CMD GENMASK(15, 8)
173 #define PROFILE1_DWORD1_RDSR_DUMMY BIT(28)
174 #define PROFILE1_DWORD1_RDSR_ADDR_BYTES BIT(29)
175 #define PROFILE1_DWORD4_DUMMY_200MHZ GENMASK(11, 7)
176 #define PROFILE1_DWORD5_DUMMY_166MHZ GENMASK(31, 27)
177 #define PROFILE1_DWORD5_DUMMY_133MHZ GENMASK(21, 17)
178 #define PROFILE1_DWORD5_DUMMY_100MHZ GENMASK(11, 7)
179 #define PROFILE1_DUMMY_DEFAULT 20
181 /* Status, Control and Configuration Register Map(SCCR) */
182 #define SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE BIT(31)
185 u32 dwords[BFPT_DWORD_MAX];
189 * struct spi_nor_fixups - SPI NOR fixup hooks
190 * @default_init: called after default flash parameters init. Used to tweak
191 * flash parameters when information provided by the flash_info
192 * table is incomplete or wrong.
193 * @post_bfpt: called after the BFPT table has been parsed
194 * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
195 * that do not support RDSFDP). Typically used to tweak various
196 * parameters that could not be extracted by other means (i.e.
197 * when information provided by the SFDP/flash_info tables are
198 * incomplete or wrong).
200 * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
201 * table is broken or not available.
203 struct spi_nor_fixups {
204 void (*default_init)(struct spi_nor *nor);
205 int (*post_bfpt)(struct spi_nor *nor,
206 const struct sfdp_parameter_header *bfpt_header,
207 const struct sfdp_bfpt *bfpt,
208 struct spi_nor_flash_parameter *params);
209 void (*post_sfdp)(struct spi_nor *nor,
210 struct spi_nor_flash_parameter *params);
213 #define SPI_NOR_SRST_SLEEP_LEN 200
216 * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
218 * @nor: pointer to a 'struct spi_nor'
219 * @op: pointer to the 'struct spi_mem_op' whose properties
220 * need to be initialized.
222 * Right now, only "repeat" and "invert" are supported.
224 * Return: The opcode extension.
226 static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
227 const struct spi_mem_op *op)
229 switch (nor->cmd_ext_type) {
230 case SPI_NOR_EXT_INVERT:
231 return ~op->cmd.opcode;
233 case SPI_NOR_EXT_REPEAT:
234 return op->cmd.opcode;
237 dev_dbg(nor->dev, "Unknown command extension type\n");
243 * spi_nor_setup_op() - Set up common properties of a spi-mem op.
244 * @nor: pointer to a 'struct spi_nor'
245 * @op: pointer to the 'struct spi_mem_op' whose properties
246 * need to be initialized.
247 * @proto: the protocol from which the properties need to be set.
249 void spi_nor_setup_op(const struct spi_nor *nor,
250 struct spi_mem_op *op,
251 const enum spi_nor_protocol proto)
255 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
258 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
260 if (op->dummy.nbytes)
261 op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
264 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
266 if (spi_nor_protocol_is_dtr(proto)) {
268 * spi-mem supports mixed DTR modes, but right now we can only
269 * have all phases either DTR or STR. IOW, spi-mem can have
270 * something like 4S-4D-4D, but spi-nor can't. So, set all 4
271 * phases to either DTR or STR.
273 op->cmd.dtr = op->addr.dtr = op->dummy.dtr =
276 /* 2 bytes per clock cycle in DTR mode. */
277 op->dummy.nbytes *= 2;
279 ext = spi_nor_get_cmd_ext(nor, op);
280 op->cmd.opcode = (op->cmd.opcode << 8) | ext;
285 static int spi_nor_read_write_reg(struct spi_nor *nor, struct spi_mem_op
288 if (op->data.dir == SPI_MEM_DATA_IN)
289 op->data.buf.in = buf;
291 op->data.buf.out = buf;
292 return spi_mem_exec_op(nor->spi, op);
295 static int spi_nor_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
297 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 0),
300 SPI_MEM_OP_DATA_IN(len, NULL, 0));
303 spi_nor_setup_op(nor, &op, nor->reg_proto);
305 ret = spi_nor_read_write_reg(nor, &op, val);
307 dev_dbg(nor->dev, "error %d reading %x\n", ret, code);
312 static int spi_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
314 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0),
317 SPI_MEM_OP_DATA_OUT(len, NULL, 0));
319 spi_nor_setup_op(nor, &op, nor->reg_proto);
322 op.data.dir = SPI_MEM_NO_DATA;
324 return spi_nor_read_write_reg(nor, &op, buf);
327 #ifdef CONFIG_SPI_FLASH_SPANSION
328 static int spansion_read_any_reg(struct spi_nor *nor, u32 addr, u8 dummy,
331 struct spi_mem_op op =
332 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDAR, 1),
333 SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
334 SPI_MEM_OP_DUMMY(dummy / 8, 1),
335 SPI_MEM_OP_DATA_IN(1, NULL, 1));
337 return spi_nor_read_write_reg(nor, &op, val);
340 static int spansion_write_any_reg(struct spi_nor *nor, u32 addr, u8 val)
342 struct spi_mem_op op =
343 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRAR, 1),
344 SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
346 SPI_MEM_OP_DATA_OUT(1, NULL, 1));
348 return spi_nor_read_write_reg(nor, &op, &val);
352 static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
355 struct spi_mem_op op =
356 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
357 SPI_MEM_OP_ADDR(nor->addr_width, from, 0),
358 SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
359 SPI_MEM_OP_DATA_IN(len, buf, 0));
360 size_t remaining = len;
363 spi_nor_setup_op(nor, &op, nor->read_proto);
365 /* convert the dummy cycles to the number of bytes */
366 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
367 if (spi_nor_protocol_is_dtr(nor->read_proto))
368 op.dummy.nbytes *= 2;
371 op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX;
373 if (CONFIG_IS_ENABLED(SPI_DIRMAP) && nor->dirmap.rdesc) {
375 * Record current operation information which may be used
376 * when the address or data length exceeds address mapping.
378 memcpy(&nor->dirmap.rdesc->info.op_tmpl, &op,
379 sizeof(struct spi_mem_op));
380 ret = spi_mem_dirmap_read(nor->dirmap.rdesc,
381 op.addr.val, op.data.nbytes,
385 op.data.nbytes = ret;
387 ret = spi_mem_adjust_op_size(nor->spi, &op);
391 ret = spi_mem_exec_op(nor->spi, &op);
396 op.addr.val += op.data.nbytes;
397 remaining -= op.data.nbytes;
398 op.data.buf.in += op.data.nbytes;
404 static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
407 struct spi_mem_op op =
408 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
409 SPI_MEM_OP_ADDR(nor->addr_width, to, 0),
411 SPI_MEM_OP_DATA_OUT(len, buf, 0));
414 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
417 spi_nor_setup_op(nor, &op, nor->write_proto);
419 if (CONFIG_IS_ENABLED(SPI_DIRMAP) && nor->dirmap.wdesc) {
420 memcpy(&nor->dirmap.wdesc->info.op_tmpl, &op,
421 sizeof(struct spi_mem_op));
422 op.data.nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
423 op.data.nbytes, op.data.buf.out);
425 ret = spi_mem_adjust_op_size(nor->spi, &op);
428 op.data.nbytes = len < op.data.nbytes ? len : op.data.nbytes;
430 ret = spi_mem_exec_op(nor->spi, &op);
435 return op.data.nbytes;
439 * Read the status register, returning its value in the location
440 * Return the status register value.
441 * Returns negative if error occurred.
443 static int read_sr(struct spi_nor *nor)
445 struct spi_mem_op op;
448 u8 addr_nbytes, dummy;
450 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
451 addr_nbytes = nor->rdsr_addr_nbytes;
452 dummy = nor->rdsr_dummy;
458 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0),
459 SPI_MEM_OP_ADDR(addr_nbytes, 0, 0),
460 SPI_MEM_OP_DUMMY(dummy, 0),
461 SPI_MEM_OP_DATA_IN(1, NULL, 0));
463 spi_nor_setup_op(nor, &op, nor->reg_proto);
466 * We don't want to read only one byte in DTR mode. So, read 2 and then
467 * discard the second byte.
469 if (spi_nor_protocol_is_dtr(nor->reg_proto))
472 ret = spi_nor_read_write_reg(nor, &op, val);
474 pr_debug("error %d reading SR\n", (int)ret);
482 * Read the flag status register, returning its value in the location
483 * Return the status register value.
484 * Returns negative if error occurred.
486 static int read_fsr(struct spi_nor *nor)
488 struct spi_mem_op op;
491 u8 addr_nbytes, dummy;
493 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
494 addr_nbytes = nor->rdsr_addr_nbytes;
495 dummy = nor->rdsr_dummy;
501 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0),
502 SPI_MEM_OP_ADDR(addr_nbytes, 0, 0),
503 SPI_MEM_OP_DUMMY(dummy, 0),
504 SPI_MEM_OP_DATA_IN(1, NULL, 0));
506 spi_nor_setup_op(nor, &op, nor->reg_proto);
509 * We don't want to read only one byte in DTR mode. So, read 2 and then
510 * discard the second byte.
512 if (spi_nor_protocol_is_dtr(nor->reg_proto))
515 ret = spi_nor_read_write_reg(nor, &op, val);
517 pr_debug("error %d reading FSR\n", ret);
525 * Read configuration register, returning its value in the
526 * location. Return the configuration register value.
527 * Returns negative if error occurred.
529 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
530 static int read_cr(struct spi_nor *nor)
535 ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
537 dev_dbg(nor->dev, "error %d reading CR\n", ret);
546 * Write status register 1 byte
547 * Returns negative if error occurred.
549 static int write_sr(struct spi_nor *nor, u8 val)
551 nor->cmd_buf[0] = val;
552 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
556 * Set write enable latch with Write Enable command.
557 * Returns negative if error occurred.
559 static int write_enable(struct spi_nor *nor)
561 return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
565 * Send write disable instruction to the chip.
567 static int write_disable(struct spi_nor *nor)
569 return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
572 static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
577 #ifndef CONFIG_SPI_FLASH_BAR
578 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
582 for (i = 0; i < size; i++)
583 if (table[i][0] == opcode)
586 /* No conversion found, keep input op code. */
590 static u8 spi_nor_convert_3to4_read(u8 opcode)
592 static const u8 spi_nor_3to4_read[][2] = {
593 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
594 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
595 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
596 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
597 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
598 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
599 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
600 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
602 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
603 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
604 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
607 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
608 ARRAY_SIZE(spi_nor_3to4_read));
611 static u8 spi_nor_convert_3to4_program(u8 opcode)
613 static const u8 spi_nor_3to4_program[][2] = {
614 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
615 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
616 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
617 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
618 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
621 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
622 ARRAY_SIZE(spi_nor_3to4_program));
625 static u8 spi_nor_convert_3to4_erase(u8 opcode)
627 static const u8 spi_nor_3to4_erase[][2] = {
628 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
629 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
630 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
633 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
634 ARRAY_SIZE(spi_nor_3to4_erase));
637 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
638 const struct flash_info *info)
640 /* Do some manufacturer fixups first */
641 switch (JEDEC_MFR(info)) {
642 case SNOR_MFR_SPANSION:
643 /* No small sector erase for 4-byte command set */
644 nor->erase_opcode = SPINOR_OP_SE;
645 nor->mtd.erasesize = info->sector_size;
652 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
653 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
654 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
656 #endif /* !CONFIG_SPI_FLASH_BAR */
658 /* Enable/disable 4-byte addressing mode. */
659 static int set_4byte(struct spi_nor *nor, const struct flash_info *info,
663 bool need_wren = false;
666 switch (JEDEC_MFR(info)) {
668 case SNOR_MFR_MICRON:
669 /* Some Micron need WREN command; all will accept it */
672 case SNOR_MFR_MACRONIX:
673 case SNOR_MFR_WINBOND:
677 cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
678 status = nor->write_reg(nor, cmd, NULL, 0);
682 if (!status && !enable &&
683 JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
685 * On Winbond W25Q256FV, leaving 4byte mode causes
686 * the Extended Address Register to be set to 1, so all
687 * 3-byte-address reads come from the second 16M.
688 * We must clear the register to enable normal behavior.
692 nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
697 case SNOR_MFR_CYPRESS:
698 cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B_CYPRESS;
699 return nor->write_reg(nor, cmd, NULL, 0);
702 nor->cmd_buf[0] = enable << 7;
703 return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
707 #ifdef CONFIG_SPI_FLASH_SPANSION
709 * Read status register 1 by using Read Any Register command to support multi
712 static int spansion_sr_ready(struct spi_nor *nor, u32 addr_base, u8 dummy)
714 u32 reg_addr = addr_base + SPINOR_REG_ADDR_STR1V;
718 ret = spansion_read_any_reg(nor, reg_addr, dummy, &sr);
722 if (sr & (SR_E_ERR | SR_P_ERR)) {
724 dev_dbg(nor->dev, "Erase Error occurred\n");
726 dev_dbg(nor->dev, "Programming Error occurred\n");
728 nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
732 return !(sr & SR_WIP);
736 static int spi_nor_sr_ready(struct spi_nor *nor)
738 int sr = read_sr(nor);
743 if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
745 dev_dbg(nor->dev, "Erase Error occurred\n");
747 dev_dbg(nor->dev, "Programming Error occurred\n");
749 nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
753 return !(sr & SR_WIP);
756 static int spi_nor_fsr_ready(struct spi_nor *nor)
758 int fsr = read_fsr(nor);
763 if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
765 dev_err(nor->dev, "Erase operation failed.\n");
767 dev_err(nor->dev, "Program operation failed.\n");
769 if (fsr & FSR_PT_ERR)
771 "Attempted to modify a protected sector.\n");
773 nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
777 return fsr & FSR_READY;
780 static int spi_nor_default_ready(struct spi_nor *nor)
784 sr = spi_nor_sr_ready(nor);
787 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
793 static int spi_nor_ready(struct spi_nor *nor)
796 return nor->ready(nor);
798 return spi_nor_default_ready(nor);
802 * Service routine to read status register until ready, or timeout occurs.
803 * Returns non-zero if error.
805 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
806 unsigned long timeout)
808 unsigned long timebase;
811 timebase = get_timer(0);
813 while (get_timer(timebase) < timeout) {
814 ret = spi_nor_ready(nor);
821 dev_err(nor->dev, "flash operation timed out\n");
826 static int spi_nor_wait_till_ready(struct spi_nor *nor)
828 return spi_nor_wait_till_ready_with_timeout(nor,
829 DEFAULT_READY_WAIT_JIFFIES);
832 #ifdef CONFIG_SPI_FLASH_BAR
834 * This "clean_bar" is necessary in a situation when one was accessing
835 * spi flash memory > 16 MiB by using Bank Address Register's BA24 bit.
837 * After it the BA24 bit shall be cleared to allow access to correct
838 * memory region after SW reset (by calling "reset" command).
840 * Otherwise, the BA24 bit may be left set and then after reset, the
841 * ROM would read/write/erase SPL from 16 MiB * bank_sel address.
843 static int clean_bar(struct spi_nor *nor)
845 u8 cmd, bank_sel = 0;
847 if (nor->bank_curr == 0)
849 cmd = nor->bank_write_cmd;
853 return nor->write_reg(nor, cmd, &bank_sel, 1);
856 static int write_bar(struct spi_nor *nor, u32 offset)
861 bank_sel = offset / SZ_16M;
862 if (bank_sel == nor->bank_curr)
865 cmd = nor->bank_write_cmd;
867 ret = nor->write_reg(nor, cmd, &bank_sel, 1);
869 debug("SF: fail to write bank register\n");
874 nor->bank_curr = bank_sel;
875 return nor->bank_curr;
878 static int read_bar(struct spi_nor *nor, const struct flash_info *info)
883 switch (JEDEC_MFR(info)) {
884 case SNOR_MFR_SPANSION:
885 nor->bank_read_cmd = SPINOR_OP_BRRD;
886 nor->bank_write_cmd = SPINOR_OP_BRWR;
889 nor->bank_read_cmd = SPINOR_OP_RDEAR;
890 nor->bank_write_cmd = SPINOR_OP_WREAR;
893 ret = nor->read_reg(nor, nor->bank_read_cmd,
896 debug("SF: fail to read bank addr register\n");
899 nor->bank_curr = curr_bank;
906 * Initiate the erasure of a single sector. Returns the number of bytes erased
907 * on success, a negative error code on error.
909 static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
911 struct spi_mem_op op =
912 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 0),
913 SPI_MEM_OP_ADDR(nor->addr_width, addr, 0),
918 spi_nor_setup_op(nor, &op, nor->write_proto);
921 return nor->erase(nor, addr);
924 * Default implementation, if driver doesn't have a specialized HW
927 ret = spi_mem_exec_op(nor->spi, &op);
931 return nor->mtd.erasesize;
935 * Erase an address range on the nor chip. The address range may extend
936 * one or more erase sectors. Return an error is there is a problem erasing.
938 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
940 struct spi_nor *nor = mtd_to_spi_nor(mtd);
941 bool addr_known = false;
945 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
946 (long long)instr->len);
948 div_u64_rem(instr->len, mtd->erasesize, &rem);
957 instr->state = MTD_ERASING;
962 if (!IS_ENABLED(CONFIG_SPL_BUILD) && ctrlc()) {
967 #ifdef CONFIG_SPI_FLASH_BAR
968 ret = write_bar(nor, addr);
972 ret = write_enable(nor);
976 ret = spi_nor_erase_sector(nor, addr);
983 ret = spi_nor_wait_till_ready(nor);
990 #ifdef CONFIG_SPI_FLASH_BAR
991 err = clean_bar(nor);
995 err = write_disable(nor);
1001 instr->fail_addr = addr_known ? addr : MTD_FAIL_ADDR_UNKNOWN;
1002 instr->state = MTD_ERASE_FAILED;
1004 instr->state = MTD_ERASE_DONE;
1010 #ifdef CONFIG_SPI_FLASH_SPANSION
1012 * spansion_erase_non_uniform() - erase non-uniform sectors for Spansion/Cypress
1014 * @nor: pointer to a 'struct spi_nor'
1015 * @addr: address of the sector to erase
1016 * @opcode_4k: opcode for 4K sector erase
1017 * @ovlsz_top: size of overlaid portion at the top address
1018 * @ovlsz_btm: size of overlaid portion at the bottom address
1020 * Erase an address range on the nor chip that can contain 4KB sectors overlaid
1021 * on top and/or bottom. The appropriate erase opcode and size are chosen by
1022 * address to erase and size of overlaid portion.
1024 * Return: number of bytes erased on success, -errno otherwise.
1026 static int spansion_erase_non_uniform(struct spi_nor *nor, u32 addr,
1027 u8 opcode_4k, u32 ovlsz_top,
1030 struct spi_mem_op op =
1031 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 0),
1032 SPI_MEM_OP_ADDR(nor->addr_width, addr, 0),
1033 SPI_MEM_OP_NO_DUMMY,
1034 SPI_MEM_OP_NO_DATA);
1035 struct mtd_info *mtd = &nor->mtd;
1040 if (op.addr.val < ovlsz_btm ||
1041 op.addr.val >= mtd->size - ovlsz_top) {
1042 op.cmd.opcode = opcode_4k;
1045 /* Non-overlaid portion in the normal sector at the bottom */
1046 } else if (op.addr.val == ovlsz_btm) {
1047 op.cmd.opcode = nor->erase_opcode;
1048 erasesize = mtd->erasesize - ovlsz_btm;
1050 /* Non-overlaid portion in the normal sector at the top */
1051 } else if (op.addr.val == mtd->size - mtd->erasesize) {
1052 op.cmd.opcode = nor->erase_opcode;
1053 erasesize = mtd->erasesize - ovlsz_top;
1055 /* Normal sectors */
1057 op.cmd.opcode = nor->erase_opcode;
1058 erasesize = mtd->erasesize;
1061 spi_nor_setup_op(nor, &op, nor->write_proto);
1063 ret = spi_mem_exec_op(nor->spi, &op);
1071 #if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST)
1072 /* Write status register and ensure bits in mask match written values */
1073 static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
1078 ret = write_sr(nor, status_new);
1082 ret = spi_nor_wait_till_ready(nor);
1090 return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
1093 static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
1096 struct mtd_info *mtd = &nor->mtd;
1097 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1098 int shift = ffs(mask) - 1;
1106 pow = ((sr & mask) ^ mask) >> shift;
1107 *len = mtd->size >> pow;
1108 if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
1111 *ofs = mtd->size - *len;
1116 * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
1117 * @locked is false); 0 otherwise
1119 static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, u64 len,
1128 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
1131 /* Requested range is a sub-range of locked range */
1132 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1134 /* Requested range does not overlap with locked range */
1135 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1138 static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1141 return stm_check_lock_status_sr(nor, ofs, len, sr, true);
1144 static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1147 return stm_check_lock_status_sr(nor, ofs, len, sr, false);
1151 * Lock a region of the flash. Compatible with ST Micro and similar flash.
1152 * Supports the block protection bits BP{0,1,2} in the status register
1153 * (SR). Does not support these features found in newer SR bitfields:
1154 * - SEC: sector/block protect - only handle SEC=0 (block protect)
1155 * - CMP: complement protect - only support CMP=0 (range is not complemented)
1157 * Support for the following is provided conditionally for some flash:
1158 * - TB: top/bottom protect
1160 * Sample table portion for 8MB flash (Winbond w25q64fw):
1162 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
1163 * --------------------------------------------------------------------------
1164 * X | X | 0 | 0 | 0 | NONE | NONE
1165 * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
1166 * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
1167 * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
1168 * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
1169 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
1170 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
1171 * X | X | 1 | 1 | 1 | 8 MB | ALL
1172 * ------|-------|-------|-------|-------|---------------|-------------------
1173 * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
1174 * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
1175 * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
1176 * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
1177 * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
1178 * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
1180 * Returns negative on errors, 0 on success.
1182 static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1184 struct mtd_info *mtd = &nor->mtd;
1185 int status_old, status_new;
1186 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1187 u8 shift = ffs(mask) - 1, pow, val;
1189 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1192 status_old = read_sr(nor);
1196 /* If nothing in our range is unlocked, we don't need to do anything */
1197 if (stm_is_locked_sr(nor, ofs, len, status_old))
1200 /* If anything below us is unlocked, we can't use 'bottom' protection */
1201 if (!stm_is_locked_sr(nor, 0, ofs, status_old))
1202 can_be_bottom = false;
1204 /* If anything above us is unlocked, we can't use 'top' protection */
1205 if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1209 if (!can_be_bottom && !can_be_top)
1212 /* Prefer top, if both are valid */
1213 use_top = can_be_top;
1215 /* lock_len: length of region that should end up locked */
1217 lock_len = mtd->size - ofs;
1219 lock_len = ofs + len;
1222 * Need smallest pow such that:
1224 * 1 / (2^pow) <= (len / size)
1226 * so (assuming power-of-2 size) we do:
1228 * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
1230 pow = ilog2(mtd->size) - ilog2(lock_len);
1231 val = mask - (pow << shift);
1234 /* Don't "lock" with no region! */
1238 status_new = (status_old & ~mask & ~SR_TB) | val;
1240 /* Disallow further writes if WP pin is asserted */
1241 status_new |= SR_SRWD;
1244 status_new |= SR_TB;
1246 /* Don't bother if they're the same */
1247 if (status_new == status_old)
1250 /* Only modify protection if it will not unlock other areas */
1251 if ((status_new & mask) < (status_old & mask))
1254 return write_sr_and_check(nor, status_new, mask);
1258 * Unlock a region of the flash. See stm_lock() for more info
1260 * Returns negative on errors, 0 on success.
1262 static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1264 struct mtd_info *mtd = &nor->mtd;
1265 int status_old, status_new;
1266 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1267 u8 shift = ffs(mask) - 1, pow, val;
1269 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1272 status_old = read_sr(nor);
1276 /* If nothing in our range is locked, we don't need to do anything */
1277 if (stm_is_unlocked_sr(nor, ofs, len, status_old))
1280 /* If anything below us is locked, we can't use 'top' protection */
1281 if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
1284 /* If anything above us is locked, we can't use 'bottom' protection */
1285 if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1287 can_be_bottom = false;
1289 if (!can_be_bottom && !can_be_top)
1292 /* Prefer top, if both are valid */
1293 use_top = can_be_top;
1295 /* lock_len: length of region that should remain locked */
1297 lock_len = mtd->size - (ofs + len);
1302 * Need largest pow such that:
1304 * 1 / (2^pow) >= (len / size)
1306 * so (assuming power-of-2 size) we do:
1308 * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
1310 pow = ilog2(mtd->size) - order_base_2(lock_len);
1311 if (lock_len == 0) {
1312 val = 0; /* fully unlocked */
1314 val = mask - (pow << shift);
1315 /* Some power-of-two sizes are not supported */
1320 status_new = (status_old & ~mask & ~SR_TB) | val;
1322 /* Don't protect status register if we're fully unlocked */
1324 status_new &= ~SR_SRWD;
1327 status_new |= SR_TB;
1329 /* Don't bother if they're the same */
1330 if (status_new == status_old)
1333 /* Only modify protection if it will not lock other areas */
1334 if ((status_new & mask) > (status_old & mask))
1337 return write_sr_and_check(nor, status_new, mask);
1341 * Check if a region of the flash is (completely) unlocked. See stm_lock() for
1344 * Returns 1 if entire region is unlocked, 0 if any portion is locked, and
1345 * negative on errors.
1347 static int stm_is_unlocked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1351 status = read_sr(nor);
1355 return stm_is_unlocked_sr(nor, ofs, len, status);
1357 #endif /* CONFIG_SPI_FLASH_STMICRO */
1359 static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
1362 u8 id[SPI_NOR_MAX_ID_LEN];
1363 const struct flash_info *info;
1365 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
1367 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
1368 return ERR_PTR(tmp);
1372 for (; info->name; info++) {
1374 if (!memcmp(info->id, id, info->id_len))
1379 dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
1380 id[0], id[1], id[2]);
1381 return ERR_PTR(-ENODEV);
1384 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
1385 size_t *retlen, u_char *buf)
1387 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1390 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
1394 size_t read_len = len;
1396 #ifdef CONFIG_SPI_FLASH_BAR
1399 ret = write_bar(nor, addr);
1401 return log_ret(ret);
1402 remain_len = (SZ_16M * (nor->bank_curr + 1)) - addr;
1404 if (len < remain_len)
1407 read_len = remain_len;
1410 ret = nor->read(nor, addr, read_len, buf);
1412 /* We shouldn't see 0-length reads */
1427 #ifdef CONFIG_SPI_FLASH_BAR
1428 ret = clean_bar(nor);
1433 #ifdef CONFIG_SPI_FLASH_SST
1435 * sst26 flash series has its own block protection implementation:
1436 * 4x - 8 KByte blocks - read & write protection bits - upper addresses
1437 * 1x - 32 KByte blocks - write protection bits
1438 * rest - 64 KByte blocks - write protection bits
1439 * 1x - 32 KByte blocks - write protection bits
1440 * 4x - 8 KByte blocks - read & write protection bits - lower addresses
1442 * We'll support only per 64k lock/unlock so lower and upper 64 KByte region
1443 * will be treated as single block.
1445 #define SST26_BPR_8K_NUM 4
1446 #define SST26_MAX_BPR_REG_LEN (18 + 1)
1447 #define SST26_BOUND_REG_SIZE ((32 + SST26_BPR_8K_NUM * 8) * SZ_1K)
1455 static bool sst26_process_bpr(u32 bpr_size, u8 *cmd, u32 bit, enum lock_ctl ctl)
1458 case SST26_CTL_LOCK:
1459 cmd[bpr_size - (bit / 8) - 1] |= BIT(bit % 8);
1461 case SST26_CTL_UNLOCK:
1462 cmd[bpr_size - (bit / 8) - 1] &= ~BIT(bit % 8);
1464 case SST26_CTL_CHECK:
1465 return !!(cmd[bpr_size - (bit / 8) - 1] & BIT(bit % 8));
1472 * Lock, unlock or check lock status of the flash region of the flash (depending
1473 * on the lock_ctl value)
1475 static int sst26_lock_ctl(struct spi_nor *nor, loff_t ofs, uint64_t len, enum lock_ctl ctl)
1477 struct mtd_info *mtd = &nor->mtd;
1478 u32 i, bpr_ptr, rptr_64k, lptr_64k, bpr_size;
1479 bool lower_64k = false, upper_64k = false;
1480 u8 bpr_buff[SST26_MAX_BPR_REG_LEN] = {};
1483 /* Check length and offset for 64k alignment */
1484 if ((ofs & (SZ_64K - 1)) || (len & (SZ_64K - 1))) {
1485 dev_err(nor->dev, "length or offset is not 64KiB allighned\n");
1489 if (ofs + len > mtd->size) {
1490 dev_err(nor->dev, "range is more than device size: %#llx + %#llx > %#llx\n",
1491 ofs, len, mtd->size);
1495 /* SST26 family has only 16 Mbit, 32 Mbit and 64 Mbit IC */
1496 if (mtd->size != SZ_2M &&
1497 mtd->size != SZ_4M &&
1501 bpr_size = 2 + (mtd->size / SZ_64K / 8);
1503 ret = nor->read_reg(nor, SPINOR_OP_READ_BPR, bpr_buff, bpr_size);
1505 dev_err(nor->dev, "fail to read block-protection register\n");
1509 rptr_64k = min_t(u32, ofs + len, mtd->size - SST26_BOUND_REG_SIZE);
1510 lptr_64k = max_t(u32, ofs, SST26_BOUND_REG_SIZE);
1512 upper_64k = ((ofs + len) > (mtd->size - SST26_BOUND_REG_SIZE));
1513 lower_64k = (ofs < SST26_BOUND_REG_SIZE);
1515 /* Lower bits in block-protection register are about 64k region */
1516 bpr_ptr = lptr_64k / SZ_64K - 1;
1518 /* Process 64K blocks region */
1519 while (lptr_64k < rptr_64k) {
1520 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1527 /* 32K and 8K region bits in BPR are after 64k region bits */
1528 bpr_ptr = (mtd->size - 2 * SST26_BOUND_REG_SIZE) / SZ_64K;
1530 /* Process lower 32K block region */
1532 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1537 /* Process upper 32K block region */
1539 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1544 /* Process lower 8K block regions */
1545 for (i = 0; i < SST26_BPR_8K_NUM; i++) {
1547 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1550 /* In 8K area BPR has both read and write protection bits */
1554 /* Process upper 8K block regions */
1555 for (i = 0; i < SST26_BPR_8K_NUM; i++) {
1557 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1560 /* In 8K area BPR has both read and write protection bits */
1564 /* If we check region status we don't need to write BPR back */
1565 if (ctl == SST26_CTL_CHECK)
1568 ret = nor->write_reg(nor, SPINOR_OP_WRITE_BPR, bpr_buff, bpr_size);
1570 dev_err(nor->dev, "fail to write block-protection register\n");
1577 static int sst26_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1579 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_UNLOCK);
1582 static int sst26_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1584 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_LOCK);
1588 * Returns EACCES (positive value) if region is (partially) locked, 0 if region
1589 * is completely unlocked, and negative on errors.
1591 static int sst26_is_unlocked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1594 * is_unlocked function is used for check before reading or erasing
1595 * flash region, so offset and length might be not 64k aligned, so
1596 * adjust them to be 64k aligned as sst26_lock_ctl works only with 64k
1599 ofs -= ofs & (SZ_64K - 1);
1600 len = len & (SZ_64K - 1) ? (len & ~(SZ_64K - 1)) + SZ_64K : len;
1602 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_CHECK);
1605 static int sst_write_byteprogram(struct spi_nor *nor, loff_t to, size_t len,
1606 size_t *retlen, const u_char *buf)
1611 for (actual = 0; actual < len; actual++) {
1612 nor->program_opcode = SPINOR_OP_BP;
1615 /* write one byte. */
1616 ret = nor->write(nor, to, 1, buf + actual);
1619 ret = spi_nor_wait_till_ready(nor);
1630 static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
1631 size_t *retlen, const u_char *buf)
1633 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1634 struct spi_slave *spi = nor->spi;
1638 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1639 if (spi->mode & SPI_TX_BYTE)
1640 return sst_write_byteprogram(nor, to, len, retlen, buf);
1644 nor->sst_write_second = false;
1647 /* Start write from odd address. */
1649 nor->program_opcode = SPINOR_OP_BP;
1651 /* write one byte. */
1652 ret = nor->write(nor, to, 1, buf);
1655 ret = spi_nor_wait_till_ready(nor);
1661 /* Write out most of the data here. */
1662 for (; actual < len - 1; actual += 2) {
1663 nor->program_opcode = SPINOR_OP_AAI_WP;
1665 /* write two bytes. */
1666 ret = nor->write(nor, to, 2, buf + actual);
1669 ret = spi_nor_wait_till_ready(nor);
1673 nor->sst_write_second = true;
1675 nor->sst_write_second = false;
1678 ret = spi_nor_wait_till_ready(nor);
1682 /* Write out trailing byte if it exists. */
1683 if (actual != len) {
1686 nor->program_opcode = SPINOR_OP_BP;
1687 ret = nor->write(nor, to, 1, buf + actual);
1690 ret = spi_nor_wait_till_ready(nor);
1702 * Write an address range to the nor chip. Data must be written in
1703 * FLASH_PAGESIZE chunks. The address range may be any size provided
1704 * it is within the physical boundaries.
1706 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
1707 size_t *retlen, const u_char *buf)
1709 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1710 size_t page_offset, page_remain, i;
1713 #ifdef CONFIG_SPI_FLASH_SST
1714 /* sst nor chips use AAI word program */
1715 if (nor->info->flags & SST_WRITE)
1716 return sst_write(mtd, to, len, retlen, buf);
1719 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1721 for (i = 0; i < len; ) {
1723 loff_t addr = to + i;
1727 * If page_size is a power of two, the offset can be quickly
1728 * calculated with an AND operation. On the other cases we
1729 * need to do a modulus operation (more expensive).
1731 if (is_power_of_2(nor->page_size)) {
1732 page_offset = addr & (nor->page_size - 1);
1736 page_offset = do_div(aux, nor->page_size);
1738 /* the size of data remaining on the first page */
1739 page_remain = min_t(size_t,
1740 nor->page_size - page_offset, len - i);
1742 #ifdef CONFIG_SPI_FLASH_BAR
1743 ret = write_bar(nor, addr);
1748 ret = nor->write(nor, addr, page_remain, buf + i);
1753 ret = spi_nor_wait_till_ready(nor);
1761 #ifdef CONFIG_SPI_FLASH_BAR
1762 ret = clean_bar(nor);
1767 #if defined(CONFIG_SPI_FLASH_MACRONIX) || defined(CONFIG_SPI_FLASH_ISSI)
1769 * macronix_quad_enable() - set QE bit in Status Register.
1770 * @nor: pointer to a 'struct spi_nor'
1772 * Set the Quad Enable (QE) bit in the Status Register.
1774 * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
1776 * Return: 0 on success, -errno otherwise.
1778 static int macronix_quad_enable(struct spi_nor *nor)
1785 if (val & SR_QUAD_EN_MX)
1790 write_sr(nor, val | SR_QUAD_EN_MX);
1792 ret = spi_nor_wait_till_ready(nor);
1797 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
1798 dev_err(nor->dev, "Macronix Quad bit not set\n");
1806 #ifdef CONFIG_SPI_FLASH_SPANSION
1808 * spansion_quad_enable_volatile() - enable Quad I/O mode in volatile register.
1809 * @nor: pointer to a 'struct spi_nor'
1810 * @addr_base: base address of register (can be >0 in multi-die parts)
1811 * @dummy: number of dummy cycles for register read
1813 * It is recommended to update volatile registers in the field application due
1814 * to a risk of the non-volatile registers corruption by power interrupt. This
1815 * function sets Quad Enable bit in CFR1 volatile.
1817 * Return: 0 on success, -errno otherwise.
1819 static int spansion_quad_enable_volatile(struct spi_nor *nor, u32 addr_base,
1822 u32 addr = addr_base + SPINOR_REG_ADDR_CFR1V;
1827 /* Check current Quad Enable bit value. */
1828 ret = spansion_read_any_reg(nor, addr, dummy, &cr);
1831 "error while reading configuration register\n");
1835 if (cr & CR_QUAD_EN_SPAN)
1838 cr |= CR_QUAD_EN_SPAN;
1842 ret = spansion_write_any_reg(nor, addr, cr);
1846 "error while writing configuration register\n");
1850 /* Read back and check it. */
1851 ret = spansion_read_any_reg(nor, addr, dummy, &cr);
1852 if (ret || !(cr & CR_QUAD_EN_SPAN)) {
1853 dev_dbg(nor->dev, "Spansion Quad bit not set\n");
1861 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
1863 * Write status Register and configuration register with 2 bytes
1864 * The first byte will be written to the status register, while the
1865 * second byte will be written to the configuration register.
1866 * Return negative if error occurred.
1868 static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
1874 ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
1877 "error while writing configuration register\n");
1881 ret = spi_nor_wait_till_ready(nor);
1884 "timeout while writing configuration register\n");
1892 * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
1893 * @nor: pointer to a 'struct spi_nor'
1895 * Set the Quad Enable (QE) bit in the Configuration Register.
1896 * This function should be used with QSPI memories supporting the Read
1897 * Configuration Register (35h) instruction.
1899 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1902 * Return: 0 on success, -errno otherwise.
1904 static int spansion_read_cr_quad_enable(struct spi_nor *nor)
1909 /* Check current Quad Enable bit value. */
1913 "error while reading configuration register\n");
1917 if (ret & CR_QUAD_EN_SPAN)
1920 sr_cr[1] = ret | CR_QUAD_EN_SPAN;
1922 /* Keep the current value of the Status Register. */
1925 dev_dbg(nor->dev, "error while reading status register\n");
1930 ret = write_sr_cr(nor, sr_cr);
1934 /* Read back and check it. */
1936 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1937 dev_dbg(nor->dev, "Spansion Quad bit not set\n");
1944 #if CONFIG_IS_ENABLED(SPI_FLASH_SFDP_SUPPORT)
1946 * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
1947 * @nor: pointer to a 'struct spi_nor'
1949 * Set the Quad Enable (QE) bit in the Configuration Register.
1950 * This function should be used with QSPI memories not supporting the Read
1951 * Configuration Register (35h) instruction.
1953 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1956 * Return: 0 on success, -errno otherwise.
1958 static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
1963 /* Keep the current value of the Status Register. */
1966 dev_dbg(nor->dev, "error while reading status register\n");
1970 sr_cr[1] = CR_QUAD_EN_SPAN;
1972 return write_sr_cr(nor, sr_cr);
1975 #endif /* CONFIG_SPI_FLASH_SFDP_SUPPORT */
1976 #endif /* CONFIG_SPI_FLASH_SPANSION */
1979 spi_nor_set_read_settings(struct spi_nor_read_command *read,
1983 enum spi_nor_protocol proto)
1985 read->num_mode_clocks = num_mode_clocks;
1986 read->num_wait_states = num_wait_states;
1987 read->opcode = opcode;
1988 read->proto = proto;
1992 spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
1994 enum spi_nor_protocol proto)
1996 pp->opcode = opcode;
2000 #if CONFIG_IS_ENABLED(SPI_FLASH_SFDP_SUPPORT)
2002 * Serial Flash Discoverable Parameters (SFDP) parsing.
2006 * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
2007 * @nor: pointer to a 'struct spi_nor'
2008 * @addr: offset in the SFDP area to start reading data from
2009 * @len: number of bytes to read
2010 * @buf: buffer where the SFDP data are copied into (dma-safe memory)
2012 * Whatever the actual numbers of bytes for address and dummy cycles are
2013 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
2014 * followed by a 3-byte address and 8 dummy clock cycles.
2016 * Return: 0 on success, -errno otherwise.
2018 static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
2019 size_t len, void *buf)
2021 u8 addr_width, read_opcode, read_dummy;
2024 read_opcode = nor->read_opcode;
2025 addr_width = nor->addr_width;
2026 read_dummy = nor->read_dummy;
2028 nor->read_opcode = SPINOR_OP_RDSFDP;
2029 nor->addr_width = 3;
2030 nor->read_dummy = 8;
2033 ret = nor->read(nor, addr, len, (u8 *)buf);
2034 if (!ret || ret > len) {
2048 nor->read_opcode = read_opcode;
2049 nor->addr_width = addr_width;
2050 nor->read_dummy = read_dummy;
2055 /* Fast Read settings. */
2058 spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
2060 enum spi_nor_protocol proto)
2062 read->num_mode_clocks = (half >> 5) & 0x07;
2063 read->num_wait_states = (half >> 0) & 0x1f;
2064 read->opcode = (half >> 8) & 0xff;
2065 read->proto = proto;
2068 struct sfdp_bfpt_read {
2069 /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
2073 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
2074 * whether the Fast Read x-y-z command is supported.
2076 u32 supported_dword;
2080 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
2081 * encodes the op code, the number of mode clocks and the number of wait
2082 * states to be used by Fast Read x-y-z command.
2087 /* The SPI protocol for this Fast Read x-y-z command. */
2088 enum spi_nor_protocol proto;
2091 static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
2092 /* Fast Read 1-1-2 */
2094 SNOR_HWCAPS_READ_1_1_2,
2095 BFPT_DWORD(1), BIT(16), /* Supported bit */
2096 BFPT_DWORD(4), 0, /* Settings */
2100 /* Fast Read 1-2-2 */
2102 SNOR_HWCAPS_READ_1_2_2,
2103 BFPT_DWORD(1), BIT(20), /* Supported bit */
2104 BFPT_DWORD(4), 16, /* Settings */
2108 /* Fast Read 2-2-2 */
2110 SNOR_HWCAPS_READ_2_2_2,
2111 BFPT_DWORD(5), BIT(0), /* Supported bit */
2112 BFPT_DWORD(6), 16, /* Settings */
2116 /* Fast Read 1-1-4 */
2118 SNOR_HWCAPS_READ_1_1_4,
2119 BFPT_DWORD(1), BIT(22), /* Supported bit */
2120 BFPT_DWORD(3), 16, /* Settings */
2124 /* Fast Read 1-4-4 */
2126 SNOR_HWCAPS_READ_1_4_4,
2127 BFPT_DWORD(1), BIT(21), /* Supported bit */
2128 BFPT_DWORD(3), 0, /* Settings */
2132 /* Fast Read 4-4-4 */
2134 SNOR_HWCAPS_READ_4_4_4,
2135 BFPT_DWORD(5), BIT(4), /* Supported bit */
2136 BFPT_DWORD(7), 16, /* Settings */
2141 struct sfdp_bfpt_erase {
2143 * The half-word at offset <shift> in DWORD <dwoard> encodes the
2144 * op code and erase sector size to be used by Sector Erase commands.
2150 static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
2151 /* Erase Type 1 in DWORD8 bits[15:0] */
2154 /* Erase Type 2 in DWORD8 bits[31:16] */
2155 {BFPT_DWORD(8), 16},
2157 /* Erase Type 3 in DWORD9 bits[15:0] */
2160 /* Erase Type 4 in DWORD9 bits[31:16] */
2161 {BFPT_DWORD(9), 16},
2164 static int spi_nor_hwcaps_read2cmd(u32 hwcaps);
2167 spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2168 const struct sfdp_parameter_header *bfpt_header,
2169 const struct sfdp_bfpt *bfpt,
2170 struct spi_nor_flash_parameter *params)
2172 if (nor->fixups && nor->fixups->post_bfpt)
2173 return nor->fixups->post_bfpt(nor, bfpt_header, bfpt, params);
2179 * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
2180 * @nor: pointer to a 'struct spi_nor'
2181 * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
2182 * the Basic Flash Parameter Table length and version
2183 * @params: pointer to the 'struct spi_nor_flash_parameter' to be
2186 * The Basic Flash Parameter Table is the main and only mandatory table as
2187 * defined by the SFDP (JESD216) specification.
2188 * It provides us with the total size (memory density) of the data array and
2189 * the number of address bytes for Fast Read, Page Program and Sector Erase
2191 * For Fast READ commands, it also gives the number of mode clock cycles and
2192 * wait states (regrouped in the number of dummy clock cycles) for each
2193 * supported instruction op code.
2194 * For Page Program, the page size is now available since JESD216 rev A, however
2195 * the supported instruction op codes are still not provided.
2196 * For Sector Erase commands, this table stores the supported instruction op
2197 * codes and the associated sector sizes.
2198 * Finally, the Quad Enable Requirements (QER) are also available since JESD216
2199 * rev A. The QER bits encode the manufacturer dependent procedure to be
2200 * executed to set the Quad Enable (QE) bit in some internal register of the
2201 * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
2202 * sending any Quad SPI command to the memory. Actually, setting the QE bit
2203 * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
2204 * and IO3 hence enabling 4 (Quad) I/O lines.
2206 * Return: 0 on success, -errno otherwise.
2208 static int spi_nor_parse_bfpt(struct spi_nor *nor,
2209 const struct sfdp_parameter_header *bfpt_header,
2210 struct spi_nor_flash_parameter *params)
2212 struct mtd_info *mtd = &nor->mtd;
2213 struct sfdp_bfpt bfpt;
2219 /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
2220 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
2223 /* Read the Basic Flash Parameter Table. */
2224 len = min_t(size_t, sizeof(bfpt),
2225 bfpt_header->length * sizeof(u32));
2226 addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
2227 memset(&bfpt, 0, sizeof(bfpt));
2228 err = spi_nor_read_sfdp(nor, addr, len, &bfpt);
2232 /* Fix endianness of the BFPT DWORDs. */
2233 for (i = 0; i < BFPT_DWORD_MAX; i++)
2234 bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
2236 /* Number of address bytes. */
2237 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
2238 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
2239 case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4:
2240 nor->addr_width = 3;
2241 nor->addr_mode_nbytes = 3;
2244 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
2245 nor->addr_width = 4;
2246 nor->addr_mode_nbytes = 4;
2253 /* Flash Memory Density (in bits). */
2254 params->size = bfpt.dwords[BFPT_DWORD(2)];
2255 if (params->size & BIT(31)) {
2256 params->size &= ~BIT(31);
2259 * Prevent overflows on params->size. Anyway, a NOR of 2^64
2260 * bits is unlikely to exist so this error probably means
2261 * the BFPT we are reading is corrupted/wrong.
2263 if (params->size > 63)
2266 params->size = 1ULL << params->size;
2270 params->size >>= 3; /* Convert to bytes. */
2272 /* Fast Read settings. */
2273 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
2274 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
2275 struct spi_nor_read_command *read;
2277 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
2278 params->hwcaps.mask &= ~rd->hwcaps;
2282 params->hwcaps.mask |= rd->hwcaps;
2283 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
2284 read = ¶ms->reads[cmd];
2285 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
2286 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
2289 /* Sector Erase settings. */
2290 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
2291 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
2295 half = bfpt.dwords[er->dword] >> er->shift;
2296 erasesize = half & 0xff;
2298 /* erasesize == 0 means this Erase Type is not supported. */
2302 erasesize = 1U << erasesize;
2303 opcode = (half >> 8) & 0xff;
2304 #ifdef CONFIG_SPI_FLASH_USE_4K_SECTORS
2305 if (erasesize == SZ_4K) {
2306 nor->erase_opcode = opcode;
2307 mtd->erasesize = erasesize;
2311 if (!mtd->erasesize || mtd->erasesize < erasesize) {
2312 nor->erase_opcode = opcode;
2313 mtd->erasesize = erasesize;
2317 /* Stop here if not JESD216 rev A or later. */
2318 if (bfpt_header->length == BFPT_DWORD_MAX_JESD216)
2319 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
2322 /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
2323 params->page_size = bfpt.dwords[BFPT_DWORD(11)];
2324 params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
2325 params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
2326 params->page_size = 1U << params->page_size;
2328 /* Quad Enable Requirements. */
2329 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
2330 case BFPT_DWORD15_QER_NONE:
2331 params->quad_enable = NULL;
2333 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
2334 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
2335 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
2336 params->quad_enable = spansion_no_read_cr_quad_enable;
2339 #if defined(CONFIG_SPI_FLASH_MACRONIX) || defined(CONFIG_SPI_FLASH_ISSI)
2340 case BFPT_DWORD15_QER_SR1_BIT6:
2341 params->quad_enable = macronix_quad_enable;
2344 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
2345 case BFPT_DWORD15_QER_SR2_BIT1:
2346 params->quad_enable = spansion_read_cr_quad_enable;
2350 dev_dbg(nor->dev, "BFPT QER reserved value used\n");
2354 /* Soft Reset support. */
2355 if (bfpt.dwords[BFPT_DWORD(16)] & BFPT_DWORD16_SOFT_RST)
2356 nor->flags |= SNOR_F_SOFT_RESET;
2358 /* Stop here if JESD216 rev B. */
2359 if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
2360 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
2363 /* 8D-8D-8D command extension. */
2364 switch (bfpt.dwords[BFPT_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) {
2365 case BFPT_DWORD18_CMD_EXT_REP:
2366 nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
2369 case BFPT_DWORD18_CMD_EXT_INV:
2370 nor->cmd_ext_type = SPI_NOR_EXT_INVERT;
2373 case BFPT_DWORD18_CMD_EXT_RES:
2376 case BFPT_DWORD18_CMD_EXT_16B:
2377 dev_err(nor->dev, "16-bit opcodes not supported\n");
2381 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
2385 * spi_nor_parse_microchip_sfdp() - parse the Microchip manufacturer specific
2387 * @nor: pointer to a 'struct spi_nor'.
2388 * @param_header: pointer to the SFDP parameter header.
2390 * Return: 0 on success, -errno otherwise.
2393 spi_nor_parse_microchip_sfdp(struct spi_nor *nor,
2394 const struct sfdp_parameter_header *param_header)
2400 size = param_header->length * sizeof(u32);
2401 addr = SFDP_PARAM_HEADER_PTP(param_header);
2403 nor->manufacturer_sfdp = devm_kmalloc(nor->dev, size, GFP_KERNEL);
2404 if (!nor->manufacturer_sfdp)
2407 ret = spi_nor_read_sfdp(nor, addr, size, nor->manufacturer_sfdp);
2413 * spi_nor_parse_profile1() - parse the xSPI Profile 1.0 table
2414 * @nor: pointer to a 'struct spi_nor'
2415 * @profile1_header: pointer to the 'struct sfdp_parameter_header' describing
2416 * the 4-Byte Address Instruction Table length and version.
2417 * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
2419 * Return: 0 on success, -errno otherwise.
2421 static int spi_nor_parse_profile1(struct spi_nor *nor,
2422 const struct sfdp_parameter_header *profile1_header,
2423 struct spi_nor_flash_parameter *params)
2425 u32 *table, opcode, addr;
2430 len = profile1_header->length * sizeof(*table);
2431 table = kmalloc(len, GFP_KERNEL);
2435 addr = SFDP_PARAM_HEADER_PTP(profile1_header);
2436 ret = spi_nor_read_sfdp(nor, addr, len, table);
2440 /* Fix endianness of the table DWORDs. */
2441 for (i = 0; i < profile1_header->length; i++)
2442 table[i] = le32_to_cpu(table[i]);
2444 /* Get 8D-8D-8D fast read opcode and dummy cycles. */
2445 opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, table[0]);
2448 * We don't know what speed the controller is running at. Find the
2449 * dummy cycles for the fastest frequency the flash can run at to be
2450 * sure we are never short of dummy cycles. A value of 0 means the
2451 * frequency is not supported.
2453 * Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let
2454 * flashes set the correct value if needed in their fixup hooks.
2456 dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, table[3]);
2458 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ, table[4]);
2460 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ, table[4]);
2462 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ, table[4]);
2464 dummy = PROFILE1_DUMMY_DEFAULT;
2466 /* Round up to an even value to avoid tripping controllers up. */
2467 dummy = ROUND_UP_TO(dummy, 2);
2469 /* Update the fast read settings. */
2470 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR],
2472 SNOR_PROTO_8_8_8_DTR);
2475 * Set the Read Status Register dummy cycles and dummy address bytes.
2477 if (table[0] & PROFILE1_DWORD1_RDSR_DUMMY)
2478 params->rdsr_dummy = 8;
2480 params->rdsr_dummy = 4;
2482 if (table[0] & PROFILE1_DWORD1_RDSR_ADDR_BYTES)
2483 params->rdsr_addr_nbytes = 4;
2485 params->rdsr_addr_nbytes = 0;
2493 * spi_nor_parse_sccr() - Parse the Status, Control and Configuration Register
2495 * @nor: pointer to a 'struct spi_nor'
2496 * @sccr_header: pointer to the 'struct sfdp_parameter_header' describing
2497 * the SCCR Map table length and version.
2499 * Return: 0 on success, -errno otherwise.
2501 static int spi_nor_parse_sccr(struct spi_nor *nor,
2502 const struct sfdp_parameter_header *sccr_header)
2508 len = sccr_header->length * sizeof(*table);
2509 table = kmalloc(len, GFP_KERNEL);
2513 addr = SFDP_PARAM_HEADER_PTP(sccr_header);
2514 ret = spi_nor_read_sfdp(nor, addr, len, table);
2518 /* Fix endianness of the table DWORDs. */
2519 for (i = 0; i < sccr_header->length; i++)
2520 table[i] = le32_to_cpu(table[i]);
2522 if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, table[22]))
2523 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
2531 * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
2532 * @nor: pointer to a 'struct spi_nor'
2533 * @params: pointer to the 'struct spi_nor_flash_parameter' to be
2536 * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
2537 * specification. This is a standard which tends to supported by almost all
2538 * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
2539 * runtime the main parameters needed to perform basic SPI flash operations such
2540 * as Fast Read, Page Program or Sector Erase commands.
2542 * Return: 0 on success, -errno otherwise.
2544 static int spi_nor_parse_sfdp(struct spi_nor *nor,
2545 struct spi_nor_flash_parameter *params)
2547 const struct sfdp_parameter_header *param_header, *bfpt_header;
2548 struct sfdp_parameter_header *param_headers = NULL;
2549 struct sfdp_header header;
2553 /* Get the SFDP header. */
2554 err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header);
2558 /* Check the SFDP header version. */
2559 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
2560 header.major != SFDP_JESD216_MAJOR)
2564 * Verify that the first and only mandatory parameter header is a
2565 * Basic Flash Parameter Table header as specified in JESD216.
2567 bfpt_header = &header.bfpt_header;
2568 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
2569 bfpt_header->major != SFDP_JESD216_MAJOR)
2573 * Allocate memory then read all parameter headers with a single
2574 * Read SFDP command. These parameter headers will actually be parsed
2575 * twice: a first time to get the latest revision of the basic flash
2576 * parameter table, then a second time to handle the supported optional
2578 * Hence we read the parameter headers once for all to reduce the
2579 * processing time. Also we use kmalloc() instead of devm_kmalloc()
2580 * because we don't need to keep these parameter headers: the allocated
2581 * memory is always released with kfree() before exiting this function.
2584 psize = header.nph * sizeof(*param_headers);
2586 param_headers = kmalloc(psize, GFP_KERNEL);
2590 err = spi_nor_read_sfdp(nor, sizeof(header),
2591 psize, param_headers);
2594 "failed to read SFDP parameter headers\n");
2600 * Check other parameter headers to get the latest revision of
2601 * the basic flash parameter table.
2603 for (i = 0; i < header.nph; i++) {
2604 param_header = ¶m_headers[i];
2606 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
2607 param_header->major == SFDP_JESD216_MAJOR &&
2608 (param_header->minor > bfpt_header->minor ||
2609 (param_header->minor == bfpt_header->minor &&
2610 param_header->length > bfpt_header->length)))
2611 bfpt_header = param_header;
2614 err = spi_nor_parse_bfpt(nor, bfpt_header, params);
2618 /* Parse other parameter headers. */
2619 for (i = 0; i < header.nph; i++) {
2620 param_header = ¶m_headers[i];
2622 switch (SFDP_PARAM_HEADER_ID(param_header)) {
2623 case SFDP_SECTOR_MAP_ID:
2625 "non-uniform erase sector maps are not supported yet.\n");
2629 err = spi_nor_parse_microchip_sfdp(nor, param_header);
2632 case SFDP_PROFILE1_ID:
2633 err = spi_nor_parse_profile1(nor, param_header, params);
2636 case SFDP_SCCR_MAP_ID:
2637 err = spi_nor_parse_sccr(nor, param_header);
2646 "Failed to parse optional parameter table: %04x\n",
2647 SFDP_PARAM_HEADER_ID(param_header));
2649 * Let's not drop all information we extracted so far
2650 * if optional table parsers fail. In case of failing,
2651 * each optional parser is responsible to roll back to
2652 * the previously known spi_nor data.
2659 kfree(param_headers);
2663 static int spi_nor_parse_sfdp(struct spi_nor *nor,
2664 struct spi_nor_flash_parameter *params)
2668 #endif /* SPI_FLASH_SFDP_SUPPORT */
2671 * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
2672 * after SFDP has been parsed (is also called for SPI NORs that do not
2674 * @nor: pointer to a 'struct spi_nor'
2676 * Typically used to tweak various parameters that could not be extracted by
2677 * other means (i.e. when information provided by the SFDP/flash_info tables
2678 * are incomplete or wrong).
2680 static void spi_nor_post_sfdp_fixups(struct spi_nor *nor,
2681 struct spi_nor_flash_parameter *params)
2683 if (nor->fixups && nor->fixups->post_sfdp)
2684 nor->fixups->post_sfdp(nor, params);
2687 static void spi_nor_default_init_fixups(struct spi_nor *nor)
2689 if (nor->fixups && nor->fixups->default_init)
2690 nor->fixups->default_init(nor);
2693 static int spi_nor_init_params(struct spi_nor *nor,
2694 const struct flash_info *info,
2695 struct spi_nor_flash_parameter *params)
2697 /* Set legacy flash parameters as default. */
2698 memset(params, 0, sizeof(*params));
2700 /* Set SPI NOR sizes. */
2701 params->size = info->sector_size * info->n_sectors;
2702 params->page_size = info->page_size;
2704 if (!(info->flags & SPI_NOR_NO_FR)) {
2705 /* Default to Fast Read for DT and non-DT platform devices. */
2706 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2708 /* Mask out Fast Read if not requested at DT instantiation. */
2709 #if CONFIG_IS_ENABLED(DM_SPI)
2710 if (!ofnode_read_bool(dev_ofnode(nor->spi->dev),
2712 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
2716 /* (Fast) Read settings. */
2717 params->hwcaps.mask |= SNOR_HWCAPS_READ;
2718 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
2719 0, 0, SPINOR_OP_READ,
2722 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
2723 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
2724 0, 8, SPINOR_OP_READ_FAST,
2727 if (info->flags & SPI_NOR_DUAL_READ) {
2728 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2729 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
2730 0, 8, SPINOR_OP_READ_1_1_2,
2734 if (info->flags & SPI_NOR_QUAD_READ) {
2735 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2736 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
2737 0, 8, SPINOR_OP_READ_1_1_4,
2741 if (info->flags & SPI_NOR_OCTAL_READ) {
2742 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2743 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
2744 0, 8, SPINOR_OP_READ_1_1_8,
2748 if (info->flags & SPI_NOR_OCTAL_DTR_READ) {
2749 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
2750 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR],
2751 0, 20, SPINOR_OP_READ_FAST,
2752 SNOR_PROTO_8_8_8_DTR);
2755 /* Page Program settings. */
2756 params->hwcaps.mask |= SNOR_HWCAPS_PP;
2757 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
2758 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2761 * Since xSPI Page Program opcode is backward compatible with
2762 * Legacy SPI, use Legacy SPI opcode there as well.
2764 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR],
2765 SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
2767 if (info->flags & SPI_NOR_QUAD_READ) {
2768 params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
2769 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4],
2770 SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
2773 /* Select the procedure to set the Quad Enable bit. */
2774 if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
2775 SNOR_HWCAPS_PP_QUAD)) {
2776 switch (JEDEC_MFR(info)) {
2777 #if defined(CONFIG_SPI_FLASH_MACRONIX) || defined(CONFIG_SPI_FLASH_ISSI)
2778 case SNOR_MFR_MACRONIX:
2780 params->quad_enable = macronix_quad_enable;
2784 case SNOR_MFR_MICRON:
2788 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
2789 /* Kept only for backward compatibility purpose. */
2790 params->quad_enable = spansion_read_cr_quad_enable;
2796 spi_nor_default_init_fixups(nor);
2798 /* Override the parameters with data read from SFDP tables. */
2799 nor->addr_width = 0;
2800 nor->mtd.erasesize = 0;
2801 if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2802 SPI_NOR_OCTAL_DTR_READ)) &&
2803 !(info->flags & SPI_NOR_SKIP_SFDP)) {
2804 struct spi_nor_flash_parameter sfdp_params;
2806 memcpy(&sfdp_params, params, sizeof(sfdp_params));
2807 if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
2808 nor->addr_width = 0;
2809 nor->mtd.erasesize = 0;
2811 memcpy(params, &sfdp_params, sizeof(*params));
2815 spi_nor_post_sfdp_fixups(nor, params);
2820 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2824 for (i = 0; i < size; i++)
2825 if (table[i][0] == (int)hwcaps)
2831 static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2833 static const int hwcaps_read2cmd[][2] = {
2834 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2835 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2836 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2837 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2838 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2839 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2840 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2841 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2842 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2843 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2844 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2845 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2846 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2847 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2848 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2849 { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR },
2852 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2853 ARRAY_SIZE(hwcaps_read2cmd));
2856 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2858 static const int hwcaps_pp2cmd[][2] = {
2859 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2860 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2861 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2862 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2863 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2864 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2865 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2866 { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR },
2869 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2870 ARRAY_SIZE(hwcaps_pp2cmd));
2873 #ifdef CONFIG_SPI_FLASH_SMART_HWCAPS
2875 * spi_nor_check_op - check if the operation is supported by controller
2876 * @nor: pointer to a 'struct spi_nor'
2877 * @op: pointer to op template to be checked
2879 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2881 static int spi_nor_check_op(struct spi_nor *nor,
2882 struct spi_mem_op *op)
2885 * First test with 4 address bytes. The opcode itself might be a 3B
2886 * addressing opcode but we don't care, because SPI controller
2887 * implementation should not check the opcode, but just the sequence.
2889 op->addr.nbytes = 4;
2890 if (!spi_mem_supports_op(nor->spi, op)) {
2891 if (nor->mtd.size > SZ_16M)
2894 /* If flash size <= 16MB, 3 address bytes are sufficient */
2895 op->addr.nbytes = 3;
2896 if (!spi_mem_supports_op(nor->spi, op))
2904 * spi_nor_check_readop - check if the read op is supported by controller
2905 * @nor: pointer to a 'struct spi_nor'
2906 * @read: pointer to op template to be checked
2908 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2910 static int spi_nor_check_readop(struct spi_nor *nor,
2911 const struct spi_nor_read_command *read)
2913 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0),
2914 SPI_MEM_OP_ADDR(3, 0, 0),
2915 SPI_MEM_OP_DUMMY(1, 0),
2916 SPI_MEM_OP_DATA_IN(2, NULL, 0));
2918 spi_nor_setup_op(nor, &op, read->proto);
2920 op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2921 op.dummy.buswidth / 8;
2922 if (spi_nor_protocol_is_dtr(nor->read_proto))
2923 op.dummy.nbytes *= 2;
2925 return spi_nor_check_op(nor, &op);
2929 * spi_nor_check_pp - check if the page program op is supported by controller
2930 * @nor: pointer to a 'struct spi_nor'
2931 * @pp: pointer to op template to be checked
2933 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2935 static int spi_nor_check_pp(struct spi_nor *nor,
2936 const struct spi_nor_pp_command *pp)
2938 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0),
2939 SPI_MEM_OP_ADDR(3, 0, 0),
2940 SPI_MEM_OP_NO_DUMMY,
2941 SPI_MEM_OP_DATA_OUT(2, NULL, 0));
2943 spi_nor_setup_op(nor, &op, pp->proto);
2945 return spi_nor_check_op(nor, &op);
2949 * spi_nor_adjust_hwcaps - Find optimal Read/Write protocol based on SPI
2950 * controller capabilities
2951 * @nor: pointer to a 'struct spi_nor'
2952 * @params: pointer to the 'struct spi_nor_flash_parameter'
2953 * representing SPI NOR flash capabilities
2954 * @hwcaps: pointer to resulting capabilities after adjusting
2955 * according to controller and flash's capability
2957 * Discard caps based on what the SPI controller actually supports (using
2958 * spi_mem_supports_op()).
2961 spi_nor_adjust_hwcaps(struct spi_nor *nor,
2962 const struct spi_nor_flash_parameter *params,
2968 * Start by assuming the controller supports every capability.
2969 * We will mask them after checking what's really supported
2970 * using spi_mem_supports_op().
2972 *hwcaps = SNOR_HWCAPS_ALL & params->hwcaps.mask;
2974 /* X-X-X modes are not supported yet, mask them all. */
2975 *hwcaps &= ~SNOR_HWCAPS_X_X_X;
2978 * If the reset line is broken, we do not want to enter a stateful
2981 if (nor->flags & SNOR_F_BROKEN_RESET)
2982 *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
2984 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
2987 if (!(*hwcaps & BIT(cap)))
2990 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
2992 spi_nor_check_readop(nor, ¶ms->reads[rdidx]))
2993 *hwcaps &= ~BIT(cap);
2995 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2999 if (spi_nor_check_pp(nor, ¶ms->page_programs[ppidx]))
3000 *hwcaps &= ~BIT(cap);
3005 * spi_nor_adjust_hwcaps - Find optimal Read/Write protocol based on SPI
3006 * controller capabilities
3007 * @nor: pointer to a 'struct spi_nor'
3008 * @params: pointer to the 'struct spi_nor_flash_parameter'
3009 * representing SPI NOR flash capabilities
3010 * @hwcaps: pointer to resulting capabilities after adjusting
3011 * according to controller and flash's capability
3013 * Select caps based on what the SPI controller and SPI flash both support.
3016 spi_nor_adjust_hwcaps(struct spi_nor *nor,
3017 const struct spi_nor_flash_parameter *params,
3020 struct spi_slave *spi = nor->spi;
3021 u32 ignored_mask = (SNOR_HWCAPS_READ_2_2_2 |
3022 SNOR_HWCAPS_READ_4_4_4 |
3023 SNOR_HWCAPS_READ_8_8_8 |
3024 SNOR_HWCAPS_PP_4_4_4 |
3025 SNOR_HWCAPS_PP_8_8_8);
3026 u32 spi_hwcaps = (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST |
3029 /* Get the hardware capabilities the SPI controller supports. */
3030 if (spi->mode & SPI_RX_OCTAL) {
3031 spi_hwcaps |= SNOR_HWCAPS_READ_1_1_8;
3033 if (spi->mode & SPI_TX_OCTAL)
3034 spi_hwcaps |= (SNOR_HWCAPS_READ_1_8_8 |
3035 SNOR_HWCAPS_PP_1_1_8 |
3036 SNOR_HWCAPS_PP_1_8_8);
3037 } else if (spi->mode & SPI_RX_QUAD) {
3038 spi_hwcaps |= SNOR_HWCAPS_READ_1_1_4;
3040 if (spi->mode & SPI_TX_QUAD)
3041 spi_hwcaps |= (SNOR_HWCAPS_READ_1_4_4 |
3042 SNOR_HWCAPS_PP_1_1_4 |
3043 SNOR_HWCAPS_PP_1_4_4);
3044 } else if (spi->mode & SPI_RX_DUAL) {
3045 spi_hwcaps |= SNOR_HWCAPS_READ_1_1_2;
3047 if (spi->mode & SPI_TX_DUAL)
3048 spi_hwcaps |= SNOR_HWCAPS_READ_1_2_2;
3052 * Keep only the hardware capabilities supported by both the SPI
3053 * controller and the SPI flash memory.
3055 *hwcaps = spi_hwcaps & params->hwcaps.mask;
3056 if (*hwcaps & ignored_mask) {
3058 "SPI n-n-n protocols are not supported yet.\n");
3059 *hwcaps &= ~ignored_mask;
3062 #endif /* CONFIG_SPI_FLASH_SMART_HWCAPS */
3064 static int spi_nor_select_read(struct spi_nor *nor,
3065 const struct spi_nor_flash_parameter *params,
3068 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
3069 const struct spi_nor_read_command *read;
3074 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
3078 read = ¶ms->reads[cmd];
3079 nor->read_opcode = read->opcode;
3080 nor->read_proto = read->proto;
3083 * In the spi-nor framework, we don't need to make the difference
3084 * between mode clock cycles and wait state clock cycles.
3085 * Indeed, the value of the mode clock cycles is used by a QSPI
3086 * flash memory to know whether it should enter or leave its 0-4-4
3087 * (Continuous Read / XIP) mode.
3088 * eXecution In Place is out of the scope of the mtd sub-system.
3089 * Hence we choose to merge both mode and wait state clock cycles
3090 * into the so called dummy clock cycles.
3092 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
3096 static int spi_nor_select_pp(struct spi_nor *nor,
3097 const struct spi_nor_flash_parameter *params,
3100 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
3101 const struct spi_nor_pp_command *pp;
3106 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
3110 pp = ¶ms->page_programs[cmd];
3111 nor->program_opcode = pp->opcode;
3112 nor->write_proto = pp->proto;
3116 static int spi_nor_select_erase(struct spi_nor *nor,
3117 const struct flash_info *info)
3119 struct mtd_info *mtd = &nor->mtd;
3121 /* Do nothing if already configured from SFDP. */
3125 #ifdef CONFIG_SPI_FLASH_USE_4K_SECTORS
3126 /* prefer "small sector" erase if possible */
3127 if (info->flags & SECT_4K) {
3128 nor->erase_opcode = SPINOR_OP_BE_4K;
3129 mtd->erasesize = 4096;
3130 } else if (info->flags & SECT_4K_PMC) {
3131 nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
3132 mtd->erasesize = 4096;
3136 nor->erase_opcode = SPINOR_OP_SE;
3137 mtd->erasesize = info->sector_size;
3142 static int spi_nor_default_setup(struct spi_nor *nor,
3143 const struct flash_info *info,
3144 const struct spi_nor_flash_parameter *params)
3147 bool enable_quad_io;
3150 spi_nor_adjust_hwcaps(nor, params, &shared_mask);
3152 /* Select the (Fast) Read command. */
3153 err = spi_nor_select_read(nor, params, shared_mask);
3156 "can't select read settings supported by both the SPI controller and memory.\n");
3160 /* Select the Page Program command. */
3161 err = spi_nor_select_pp(nor, params, shared_mask);
3164 "can't select write settings supported by both the SPI controller and memory.\n");
3168 /* Select the Sector Erase command. */
3169 err = spi_nor_select_erase(nor, info);
3172 "can't select erase settings supported by both the SPI controller and memory.\n");
3176 /* Enable Quad I/O if needed. */
3177 enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
3178 spi_nor_get_protocol_width(nor->write_proto) == 4);
3179 if (enable_quad_io && params->quad_enable)
3180 nor->quad_enable = params->quad_enable;
3182 nor->quad_enable = NULL;
3187 static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
3188 const struct spi_nor_flash_parameter *params)
3193 return nor->setup(nor, info, params);
3196 #ifdef CONFIG_SPI_FLASH_SPANSION
3197 static int s25hx_t_mdp_ready(struct spi_nor *nor)
3202 for (addr = 0; addr < nor->mtd.size; addr += SZ_128M) {
3203 ret = spansion_sr_ready(nor, addr, 0);
3211 static int s25hx_t_quad_enable(struct spi_nor *nor)
3216 for (addr = 0; addr < nor->mtd.size; addr += SZ_128M) {
3217 ret = spansion_quad_enable_volatile(nor, addr, 0);
3225 static int s25hx_t_erase_non_uniform(struct spi_nor *nor, loff_t addr)
3227 /* Support 32 x 4KB sectors at bottom */
3228 return spansion_erase_non_uniform(nor, addr, SPINOR_OP_BE_4K_4B, 0,
3232 static int s25hx_t_setup(struct spi_nor *nor, const struct flash_info *info,
3233 const struct spi_nor_flash_parameter *params)
3238 #ifdef CONFIG_SPI_FLASH_BAR
3239 return -ENOTSUPP; /* Bank Address Register is not supported */
3242 * Read CFR3V to check if uniform sector is selected. If not, assign an
3243 * erase hook that supports non-uniform erase.
3245 ret = spansion_read_any_reg(nor, SPINOR_REG_ADDR_CFR3V, 0, &cfr3v);
3248 if (!(cfr3v & CFR3V_UNHYSA))
3249 nor->erase = s25hx_t_erase_non_uniform;
3252 * For the multi-die package parts, the ready() hook is needed to check
3253 * all dies' status via read any register.
3255 if (nor->mtd.size > SZ_128M)
3256 nor->ready = s25hx_t_mdp_ready;
3258 return spi_nor_default_setup(nor, info, params);
3261 static void s25hx_t_default_init(struct spi_nor *nor)
3263 nor->setup = s25hx_t_setup;
3266 static int s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
3267 const struct sfdp_parameter_header *header,
3268 const struct sfdp_bfpt *bfpt,
3269 struct spi_nor_flash_parameter *params)
3275 /* erase size in case it is set to 4K from BFPT */
3276 nor->erase_opcode = SPINOR_OP_SE_4B;
3277 nor->mtd.erasesize = nor->info->sector_size;
3279 ret = set_4byte(nor, nor->info, 1);
3282 nor->addr_width = 4;
3285 * The page_size is set to 512B from BFPT, but it actually depends on
3286 * the configuration register. Look up the CFR3V and determine the
3287 * page_size. For multi-die package parts, use 512B only when the all
3288 * dies are configured to 512B buffer.
3290 for (addr = 0; addr < params->size; addr += SZ_128M) {
3291 ret = spansion_read_any_reg(nor, addr + SPINOR_REG_ADDR_CFR3V,
3296 if (!(cfr3v & CFR3V_PGMBUF)) {
3297 params->page_size = 256;
3301 params->page_size = 512;
3306 static void s25hx_t_post_sfdp_fixup(struct spi_nor *nor,
3307 struct spi_nor_flash_parameter *params)
3309 /* READ_FAST_4B (0Ch) requires mode cycles*/
3310 params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
3311 /* PP_1_1_4 is not supported */
3312 params->hwcaps.mask &= ~SNOR_HWCAPS_PP_1_1_4;
3313 /* Use volatile register to enable quad */
3314 params->quad_enable = s25hx_t_quad_enable;
3317 static struct spi_nor_fixups s25hx_t_fixups = {
3318 .default_init = s25hx_t_default_init,
3319 .post_bfpt = s25hx_t_post_bfpt_fixup,
3320 .post_sfdp = s25hx_t_post_sfdp_fixup,
3323 static int s25fl256l_setup(struct spi_nor *nor, const struct flash_info *info,
3324 const struct spi_nor_flash_parameter *params)
3326 return -ENOTSUPP; /* Bank Address Register is not supported */
3329 static void s25fl256l_default_init(struct spi_nor *nor)
3331 nor->setup = s25fl256l_setup;
3334 static struct spi_nor_fixups s25fl256l_fixups = {
3335 .default_init = s25fl256l_default_init,
3339 #ifdef CONFIG_SPI_FLASH_S28HX_T
3341 * spi_nor_cypress_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
3342 * @nor: pointer to a 'struct spi_nor'
3344 * This also sets the memory access latency cycles to 24 to allow the flash to
3345 * run at up to 200MHz.
3347 * Return: 0 on success, -errno otherwise.
3349 static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor)
3351 struct spi_mem_op op;
3356 /* Use 24 dummy cycles for memory array reads. */
3357 ret = write_enable(nor);
3361 buf = SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24;
3362 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
3363 SPI_MEM_OP_ADDR(addr_width, SPINOR_REG_CYPRESS_CFR2V, 1),
3364 SPI_MEM_OP_NO_DUMMY,
3365 SPI_MEM_OP_DATA_OUT(1, &buf, 1));
3366 ret = spi_mem_exec_op(nor->spi, &op);
3369 "failed to set default memory latency value: %d\n",
3373 ret = spi_nor_wait_till_ready(nor);
3377 nor->read_dummy = 24;
3379 /* Set the octal and DTR enable bits. */
3380 ret = write_enable(nor);
3384 buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN;
3385 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
3386 SPI_MEM_OP_ADDR(addr_width, SPINOR_REG_CYPRESS_CFR5V, 1),
3387 SPI_MEM_OP_NO_DUMMY,
3388 SPI_MEM_OP_DATA_OUT(1, &buf, 1));
3389 ret = spi_mem_exec_op(nor->spi, &op);
3391 dev_warn(nor->dev, "Failed to enable octal DTR mode\n");
3398 static int s28hx_t_erase_non_uniform(struct spi_nor *nor, loff_t addr)
3400 /* Factory default configuration: 32 x 4 KiB sectors at bottom. */
3401 return spansion_erase_non_uniform(nor, addr, SPINOR_OP_S28_SE_4K,
3405 static int s28hx_t_setup(struct spi_nor *nor, const struct flash_info *info,
3406 const struct spi_nor_flash_parameter *params)
3408 struct spi_mem_op op;
3413 ret = spi_nor_wait_till_ready(nor);
3418 * Check CFR3V to check if non-uniform sector mode is selected. If it
3419 * is, set the erase hook to the non-uniform erase procedure.
3421 op = (struct spi_mem_op)
3422 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 1),
3423 SPI_MEM_OP_ADDR(addr_width,
3424 SPINOR_REG_CYPRESS_CFR3V, 1),
3425 SPI_MEM_OP_NO_DUMMY,
3426 SPI_MEM_OP_DATA_IN(1, &buf, 1));
3428 ret = spi_mem_exec_op(nor->spi, &op);
3432 if (!(buf & SPINOR_REG_CYPRESS_CFR3V_UNISECT))
3433 nor->erase = s28hx_t_erase_non_uniform;
3435 return spi_nor_default_setup(nor, info, params);
3438 static void s28hx_t_default_init(struct spi_nor *nor)
3440 nor->octal_dtr_enable = spi_nor_cypress_octal_dtr_enable;
3441 nor->setup = s28hx_t_setup;
3444 static void s28hx_t_post_sfdp_fixup(struct spi_nor *nor,
3445 struct spi_nor_flash_parameter *params)
3448 * On older versions of the flash the xSPI Profile 1.0 table has the
3449 * 8D-8D-8D Fast Read opcode as 0x00. But it actually should be 0xEE.
3451 if (params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
3452 params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
3453 SPINOR_OP_CYPRESS_RD_FAST;
3455 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
3457 /* This flash is also missing the 4-byte Page Program opcode bit. */
3458 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
3459 SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
3461 * Since xSPI Page Program opcode is backward compatible with
3462 * Legacy SPI, use Legacy SPI opcode there as well.
3464 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR],
3465 SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
3468 * The xSPI Profile 1.0 table advertises the number of additional
3469 * address bytes needed for Read Status Register command as 0 but the
3470 * actual value for that is 4.
3472 params->rdsr_addr_nbytes = 4;
3475 static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor,
3476 const struct sfdp_parameter_header *bfpt_header,
3477 const struct sfdp_bfpt *bfpt,
3478 struct spi_nor_flash_parameter *params)
3480 struct spi_mem_op op;
3486 * The BFPT table advertises a 512B page size but the page size is
3487 * actually configurable (with the default being 256B). Read from
3488 * CFR3V[4] and set the correct size.
3490 op = (struct spi_mem_op)
3491 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 1),
3492 SPI_MEM_OP_ADDR(addr_width, SPINOR_REG_CYPRESS_CFR3V, 1),
3493 SPI_MEM_OP_NO_DUMMY,
3494 SPI_MEM_OP_DATA_IN(1, &buf, 1));
3495 ret = spi_mem_exec_op(nor->spi, &op);
3499 if (buf & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
3500 params->page_size = 512;
3502 params->page_size = 256;
3505 * The BFPT advertises that it supports 4k erases, and the datasheet
3506 * says the same. But 4k erases did not work when testing. So, use 256k
3509 nor->erase_opcode = SPINOR_OP_SE_4B;
3510 nor->mtd.erasesize = 0x40000;
3515 static struct spi_nor_fixups s28hx_t_fixups = {
3516 .default_init = s28hx_t_default_init,
3517 .post_sfdp = s28hx_t_post_sfdp_fixup,
3518 .post_bfpt = s28hx_t_post_bfpt_fixup,
3520 #endif /* CONFIG_SPI_FLASH_S28HX_T */
3522 #ifdef CONFIG_SPI_FLASH_MT35XU
3523 static int spi_nor_micron_octal_dtr_enable(struct spi_nor *nor)
3525 struct spi_mem_op op;
3530 /* Set dummy cycles for Fast Read to the default of 20. */
3531 ret = write_enable(nor);
3536 op = (struct spi_mem_op)
3537 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
3538 SPI_MEM_OP_ADDR(addr_width, SPINOR_REG_MT_CFR1V, 1),
3539 SPI_MEM_OP_NO_DUMMY,
3540 SPI_MEM_OP_DATA_OUT(1, &buf, 1));
3541 ret = spi_mem_exec_op(nor->spi, &op);
3545 ret = spi_nor_wait_till_ready(nor);
3549 nor->read_dummy = 20;
3551 ret = write_enable(nor);
3555 buf = SPINOR_MT_OCT_DTR;
3556 op = (struct spi_mem_op)
3557 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
3558 SPI_MEM_OP_ADDR(addr_width, SPINOR_REG_MT_CFR0V, 1),
3559 SPI_MEM_OP_NO_DUMMY,
3560 SPI_MEM_OP_DATA_OUT(1, &buf, 1));
3561 ret = spi_mem_exec_op(nor->spi, &op);
3563 dev_err(nor->dev, "Failed to enable octal DTR mode\n");
3570 static void mt35xu512aba_default_init(struct spi_nor *nor)
3572 nor->octal_dtr_enable = spi_nor_micron_octal_dtr_enable;
3575 static void mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor,
3576 struct spi_nor_flash_parameter *params)
3578 /* Set the Fast Read settings. */
3579 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
3580 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR],
3581 0, 20, SPINOR_OP_MT_DTR_RD,
3582 SNOR_PROTO_8_8_8_DTR);
3584 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
3586 nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
3587 params->rdsr_dummy = 8;
3588 params->rdsr_addr_nbytes = 0;
3591 * The BFPT quad enable field is set to a reserved value so the quad
3592 * enable function is ignored by spi_nor_parse_bfpt(). Make sure we
3595 params->quad_enable = NULL;
3598 static struct spi_nor_fixups mt35xu512aba_fixups = {
3599 .default_init = mt35xu512aba_default_init,
3600 .post_sfdp = mt35xu512aba_post_sfdp_fixup,
3602 #endif /* CONFIG_SPI_FLASH_MT35XU */
3604 #if CONFIG_IS_ENABLED(SPI_FLASH_MACRONIX)
3606 * spi_nor_macronix_octal_dtr_enable() - Enable octal DTR on Macronix flashes.
3607 * @nor: pointer to a 'struct spi_nor'
3609 * Set Macronix max dummy cycles 20 to allow the flash to run at fastest frequency.
3611 * Return: 0 on success, -errno otherwise.
3613 static int spi_nor_macronix_octal_dtr_enable(struct spi_nor *nor)
3615 struct spi_mem_op op;
3619 ret = write_enable(nor);
3623 buf = SPINOR_REG_MXIC_DC_20;
3624 op = (struct spi_mem_op)
3625 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_CR2, 1),
3626 SPI_MEM_OP_ADDR(4, SPINOR_REG_MXIC_CR2_DC, 1),
3627 SPI_MEM_OP_NO_DUMMY,
3628 SPI_MEM_OP_DATA_OUT(1, &buf, 1));
3630 ret = spi_mem_exec_op(nor->spi, &op);
3634 ret = spi_nor_wait_till_ready(nor);
3638 nor->read_dummy = MXIC_MAX_DC;
3639 ret = write_enable(nor);
3643 buf = SPINOR_REG_MXIC_OPI_DTR_EN;
3644 op = (struct spi_mem_op)
3645 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_CR2, 1),
3646 SPI_MEM_OP_ADDR(4, SPINOR_REG_MXIC_CR2_MODE, 1),
3647 SPI_MEM_OP_NO_DUMMY,
3648 SPI_MEM_OP_DATA_OUT(1, &buf, 1));
3650 ret = spi_mem_exec_op(nor->spi, &op);
3652 dev_err(nor->dev, "Failed to enable octal DTR mode\n");
3655 nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
3660 static void macronix_octal_default_init(struct spi_nor *nor)
3662 nor->octal_dtr_enable = spi_nor_macronix_octal_dtr_enable;
3665 static void macronix_octal_post_sfdp_fixup(struct spi_nor *nor,
3666 struct spi_nor_flash_parameter *params)
3669 * Adding SNOR_HWCAPS_PP_8_8_8_DTR in hwcaps.mask when
3670 * SPI_NOR_OCTAL_DTR_READ flag exists.
3672 if (params->hwcaps.mask & SNOR_HWCAPS_READ_8_8_8_DTR)
3673 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
3676 static struct spi_nor_fixups macronix_octal_fixups = {
3677 .default_init = macronix_octal_default_init,
3678 .post_sfdp = macronix_octal_post_sfdp_fixup,
3680 #endif /* CONFIG_SPI_FLASH_MACRONIX */
3682 /** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed
3683 * @nor: pointer to a 'struct spi_nor'
3685 * Return: 0 on success, -errno otherwise.
3687 static int spi_nor_octal_dtr_enable(struct spi_nor *nor)
3691 if (!nor->octal_dtr_enable)
3694 if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
3695 nor->write_proto == SNOR_PROTO_8_8_8_DTR))
3698 if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
3701 ret = nor->octal_dtr_enable(nor);
3705 nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
3710 static int spi_nor_init(struct spi_nor *nor)
3714 err = spi_nor_octal_dtr_enable(nor);
3716 dev_dbg(nor->dev, "Octal DTR mode not supported\n");
3721 * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
3722 * with the software protection bits set
3724 if (IS_ENABLED(CONFIG_SPI_FLASH_UNLOCK_ALL) &&
3725 (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
3726 JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
3727 JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
3728 nor->info->flags & SPI_NOR_HAS_LOCK)) {
3731 spi_nor_wait_till_ready(nor);
3734 if (nor->quad_enable) {
3735 err = nor->quad_enable(nor);
3737 dev_dbg(nor->dev, "quad mode not supported\n");
3742 if (nor->addr_width == 4 &&
3743 !(nor->info->flags & SPI_NOR_OCTAL_DTR_READ) &&
3744 (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
3745 !(nor->info->flags & SPI_NOR_4B_OPCODES)) {
3747 * If the RESET# pin isn't hooked up properly, or the system
3748 * otherwise doesn't perform a reset command in the boot
3749 * sequence, it's impossible to 100% protect against unexpected
3750 * reboots (e.g., crashes). Warn the user (or hopefully, system
3751 * designer) that this is bad.
3753 if (nor->flags & SNOR_F_BROKEN_RESET)
3754 debug("enabling reset hack; may not recover from unexpected reboots\n");
3755 set_4byte(nor, nor->info, 1);
3761 #ifdef CONFIG_SPI_FLASH_SOFT_RESET
3763 * spi_nor_soft_reset() - perform the JEDEC Software Reset sequence
3764 * @nor: the spi_nor structure
3766 * This function can be used to switch from Octal DTR mode to legacy mode on a
3767 * flash that supports it. The soft reset is executed in Octal DTR mode.
3769 * Return: 0 for success, -errno for failure.
3771 static int spi_nor_soft_reset(struct spi_nor *nor)
3773 struct spi_mem_op op;
3775 enum spi_nor_cmd_ext ext;
3777 ext = nor->cmd_ext_type;
3778 if (nor->cmd_ext_type == SPI_NOR_EXT_NONE) {
3779 nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
3780 #if CONFIG_IS_ENABLED(SPI_NOR_BOOT_SOFT_RESET_EXT_INVERT)
3781 nor->cmd_ext_type = SPI_NOR_EXT_INVERT;
3782 #endif /* SPI_NOR_BOOT_SOFT_RESET_EXT_INVERT */
3785 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0),
3786 SPI_MEM_OP_NO_DUMMY,
3788 SPI_MEM_OP_NO_DATA);
3789 spi_nor_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
3790 ret = spi_mem_exec_op(nor->spi, &op);
3792 dev_warn(nor->dev, "Software reset enable failed: %d\n", ret);
3796 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0),
3797 SPI_MEM_OP_NO_DUMMY,
3799 SPI_MEM_OP_NO_DATA);
3800 spi_nor_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
3801 ret = spi_mem_exec_op(nor->spi, &op);
3803 dev_warn(nor->dev, "Software reset failed: %d\n", ret);
3808 * Software Reset is not instant, and the delay varies from flash to
3809 * flash. Looking at a few flashes, most range somewhere below 100
3810 * microseconds. So, wait for 200ms just to be sure.
3812 udelay(SPI_NOR_SRST_SLEEP_LEN);
3815 nor->cmd_ext_type = ext;
3818 #endif /* CONFIG_SPI_FLASH_SOFT_RESET */
3820 int spi_nor_remove(struct spi_nor *nor)
3822 #ifdef CONFIG_SPI_FLASH_SOFT_RESET
3823 if (nor->info->flags & SPI_NOR_OCTAL_DTR_READ &&
3824 nor->flags & SNOR_F_SOFT_RESET)
3825 return spi_nor_soft_reset(nor);
3831 void spi_nor_set_fixups(struct spi_nor *nor)
3833 #ifdef CONFIG_SPI_FLASH_SPANSION
3834 if (JEDEC_MFR(nor->info) == SNOR_MFR_CYPRESS) {
3835 switch (nor->info->id[1]) {
3836 case 0x2a: /* S25HL (QSPI, 3.3V) */
3837 case 0x2b: /* S25HS (QSPI, 1.8V) */
3838 nor->fixups = &s25hx_t_fixups;
3841 #ifdef CONFIG_SPI_FLASH_S28HX_T
3842 case 0x5a: /* S28HL (Octal, 3.3V) */
3843 case 0x5b: /* S28HS (Octal, 1.8V) */
3844 nor->fixups = &s28hx_t_fixups;
3853 if (CONFIG_IS_ENABLED(SPI_FLASH_BAR) &&
3854 !strcmp(nor->info->name, "s25fl256l"))
3855 nor->fixups = &s25fl256l_fixups;
3858 #ifdef CONFIG_SPI_FLASH_MT35XU
3859 if (!strcmp(nor->info->name, "mt35xu512aba"))
3860 nor->fixups = &mt35xu512aba_fixups;
3863 #if CONFIG_IS_ENABLED(SPI_FLASH_MACRONIX)
3864 nor->fixups = ¯onix_octal_fixups;
3865 #endif /* SPI_FLASH_MACRONIX */
3868 int spi_nor_scan(struct spi_nor *nor)
3870 struct spi_nor_flash_parameter params;
3871 const struct flash_info *info = NULL;
3872 struct mtd_info *mtd = &nor->mtd;
3873 struct spi_slave *spi = nor->spi;
3877 #ifdef CONFIG_FLASH_CFI_MTD
3878 cfi_mtd_nb = CFI_FLASH_BANKS;
3881 /* Reset SPI protocol for all commands. */
3882 nor->reg_proto = SNOR_PROTO_1_1_1;
3883 nor->read_proto = SNOR_PROTO_1_1_1;
3884 nor->write_proto = SNOR_PROTO_1_1_1;
3885 nor->read = spi_nor_read_data;
3886 nor->write = spi_nor_write_data;
3887 nor->read_reg = spi_nor_read_reg;
3888 nor->write_reg = spi_nor_write_reg;
3890 nor->setup = spi_nor_default_setup;
3892 #ifdef CONFIG_SPI_FLASH_SOFT_RESET_ON_BOOT
3894 * When the flash is handed to us in a stateful mode like 8D-8D-8D, it
3895 * is difficult to detect the mode the flash is in. One option is to
3896 * read SFDP in all modes and see which one gives the correct "SFDP"
3897 * signature, but not all flashes support SFDP in 8D-8D-8D mode.
3899 * Further, even if you detect the mode of the flash via SFDP, you
3900 * still have the problem of actually reading the ID. The Read ID
3901 * command is not standardized across flash vendors. Flashes can have
3902 * different dummy cycles needed for reading the ID. Some flashes even
3903 * expect a 4-byte dummy address with the Read ID command. All this
3904 * information cannot be obtained from the SFDP table.
3906 * So, perform a Software Reset sequence before reading the ID and
3907 * initializing the flash. A Soft Reset will bring back the flash in
3908 * its default protocol mode assuming no non-volatile configuration was
3909 * set. This will let us detect the flash even if ROM hands it to us in
3912 * To accommodate cases where there is more than one flash on a board,
3913 * and only one of them needs a soft reset, failure to reset is not
3914 * made fatal, and we still try to read ID if possible.
3916 spi_nor_soft_reset(nor);
3917 #endif /* CONFIG_SPI_FLASH_SOFT_RESET_ON_BOOT */
3919 info = spi_nor_read_id(nor);
3920 if (IS_ERR_OR_NULL(info))
3924 spi_nor_set_fixups(nor);
3926 /* Parse the Serial Flash Discoverable Parameters table. */
3927 ret = spi_nor_init_params(nor, info, ¶ms);
3932 sprintf(nor->mtd_name, "%s%d",
3933 MTD_DEV_TYPE(MTD_DEV_TYPE_NOR),
3934 cfi_mtd_nb + dev_seq(nor->dev));
3935 mtd->name = nor->mtd_name;
3937 mtd->dev = nor->dev;
3939 mtd->type = MTD_NORFLASH;
3941 mtd->flags = MTD_CAP_NORFLASH;
3942 mtd->size = params.size;
3943 mtd->_erase = spi_nor_erase;
3944 mtd->_read = spi_nor_read;
3945 mtd->_write = spi_nor_write;
3947 #if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST)
3948 /* NOR protection support for STmicro/Micron chips and similar */
3949 if (JEDEC_MFR(info) == SNOR_MFR_ST ||
3950 JEDEC_MFR(info) == SNOR_MFR_MICRON ||
3951 JEDEC_MFR(info) == SNOR_MFR_SST ||
3952 info->flags & SPI_NOR_HAS_LOCK) {
3953 nor->flash_lock = stm_lock;
3954 nor->flash_unlock = stm_unlock;
3955 nor->flash_is_unlocked = stm_is_unlocked;
3959 #ifdef CONFIG_SPI_FLASH_SST
3961 * sst26 series block protection implementation differs from other
3964 if (info->flags & SPI_NOR_HAS_SST26LOCK) {
3965 nor->flash_lock = sst26_lock;
3966 nor->flash_unlock = sst26_unlock;
3967 nor->flash_is_unlocked = sst26_is_unlocked;
3971 if (info->flags & USE_FSR)
3972 nor->flags |= SNOR_F_USE_FSR;
3973 if (info->flags & SPI_NOR_HAS_TB)
3974 nor->flags |= SNOR_F_HAS_SR_TB;
3975 if (info->flags & NO_CHIP_ERASE)
3976 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
3977 if (info->flags & USE_CLSR)
3978 nor->flags |= SNOR_F_USE_CLSR;
3980 if (info->flags & SPI_NOR_NO_ERASE)
3981 mtd->flags |= MTD_NO_ERASE;
3983 nor->page_size = params.page_size;
3984 mtd->writebufsize = nor->page_size;
3986 /* Some devices cannot do fast-read, no matter what DT tells us */
3987 if ((info->flags & SPI_NOR_NO_FR) || (spi->mode & SPI_RX_SLOW))
3988 params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
3991 * Configure the SPI memory:
3992 * - select op codes for (Fast) Read, Page Program and Sector Erase.
3993 * - set the number of dummy cycles (mode cycles + wait states).
3994 * - set the SPI protocols for register and memory accesses.
3995 * - set the Quad Enable bit if needed (required by SPI x-y-4 protos).
3997 ret = spi_nor_setup(nor, info, ¶ms);
4001 if (spi_nor_protocol_is_dtr(nor->read_proto)) {
4002 /* Always use 4-byte addresses in DTR mode. */
4003 nor->addr_width = 4;
4004 } else if (nor->addr_width) {
4005 /* already configured from SFDP */
4006 } else if (info->addr_width) {
4007 nor->addr_width = info->addr_width;
4009 nor->addr_width = 3;
4012 if (nor->addr_width == 3 && mtd->size > SZ_16M) {
4013 #ifndef CONFIG_SPI_FLASH_BAR
4014 /* enable 4-byte addressing if the device exceeds 16MiB */
4015 nor->addr_width = 4;
4016 if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
4017 info->flags & SPI_NOR_4B_OPCODES)
4018 spi_nor_set_4byte_opcodes(nor, info);
4020 /* Configure the BAR - discover bank cmds and read current bank */
4021 nor->addr_width = 3;
4022 ret = read_bar(nor, info);
4028 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
4029 dev_dbg(nor->dev, "address width is too large: %u\n",
4034 /* Send all the required SPI flash commands to initialize device */
4035 ret = spi_nor_init(nor);
4039 nor->rdsr_dummy = params.rdsr_dummy;
4040 nor->rdsr_addr_nbytes = params.rdsr_addr_nbytes;
4041 nor->name = info->name;
4042 nor->size = mtd->size;
4043 nor->erase_size = mtd->erasesize;
4044 nor->sector_size = mtd->erasesize;
4046 #ifndef CONFIG_SPL_BUILD
4047 printf("SF: Detected %s with page size ", nor->name);
4048 print_size(nor->page_size, ", erase size ");
4049 print_size(nor->erase_size, ", total ");
4050 print_size(nor->size, "");
4057 /* U-Boot specific functions, need to extend MTD to support these */
4058 int spi_flash_cmd_get_sw_write_prot(struct spi_nor *nor)
4060 int sr = read_sr(nor);
4065 return (sr >> 2) & 7;