1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2011-12 The Chromium OS Authors.
5 * This file is derived from the flashrom project.
8 #define LOG_CATEGORY UCLASS_SPI
11 #include <bootstage.h>
14 #include <dt-structs.h>
22 #include <spi_flash.h>
25 #include <asm/fast_spi.h>
27 #include <dm/uclass-internal.h>
29 #include <linux/bitops.h>
30 #include <linux/delay.h>
31 #include <linux/sizes.h>
36 #define debug_trace(fmt, args...) debug(fmt, ##args)
38 #define debug_trace(x, args...)
41 static u8 ich_readb(struct ich_spi_priv *priv, int reg)
43 u8 value = readb(priv->base + reg);
45 debug_trace("read %2.2x from %4.4x\n", value, reg);
50 static u16 ich_readw(struct ich_spi_priv *priv, int reg)
52 u16 value = readw(priv->base + reg);
54 debug_trace("read %4.4x from %4.4x\n", value, reg);
59 static u32 ich_readl(struct ich_spi_priv *priv, int reg)
61 u32 value = readl(priv->base + reg);
63 debug_trace("read %8.8x from %4.4x\n", value, reg);
68 static void ich_writeb(struct ich_spi_priv *priv, u8 value, int reg)
70 writeb(value, priv->base + reg);
71 debug_trace("wrote %2.2x to %4.4x\n", value, reg);
74 static void ich_writew(struct ich_spi_priv *priv, u16 value, int reg)
76 writew(value, priv->base + reg);
77 debug_trace("wrote %4.4x to %4.4x\n", value, reg);
80 static void ich_writel(struct ich_spi_priv *priv, u32 value, int reg)
82 writel(value, priv->base + reg);
83 debug_trace("wrote %8.8x to %4.4x\n", value, reg);
86 static void write_reg(struct ich_spi_priv *priv, const void *value,
87 int dest_reg, uint32_t size)
89 memcpy_toio(priv->base + dest_reg, value, size);
92 static void read_reg(struct ich_spi_priv *priv, int src_reg, void *value,
95 memcpy_fromio(value, priv->base + src_reg, size);
98 static void ich_set_bbar(struct ich_spi_priv *ctlr, uint32_t minaddr)
100 const uint32_t bbar_mask = 0x00ffff00;
101 uint32_t ichspi_bbar;
104 minaddr &= bbar_mask;
105 ichspi_bbar = ich_readl(ctlr, ctlr->bbar) & ~bbar_mask;
106 ichspi_bbar |= minaddr;
107 ich_writel(ctlr, ichspi_bbar, ctlr->bbar);
111 /* @return 1 if the SPI flash supports the 33MHz speed */
112 static bool ich9_can_do_33mhz(struct udevice *dev)
114 struct ich_spi_priv *priv = dev_get_priv(dev);
117 if (!CONFIG_IS_ENABLED(PCI) || !priv->pch)
119 /* Observe SPI Descriptor Component Section 0 */
120 dm_pci_write_config32(priv->pch, 0xb0, 0x1000);
122 /* Extract the Write/Erase SPI Frequency from descriptor */
123 dm_pci_read_config32(priv->pch, 0xb4, &fdod);
125 /* Bits 23:21 have the fast read clock frequency, 0=20MHz, 1=33MHz */
126 speed = (fdod >> 21) & 7;
131 static void spi_lock_down(struct ich_spi_plat *plat, void *sbase)
133 if (plat->ich_version == ICHV_7) {
134 struct ich7_spi_regs *ich7_spi = sbase;
136 setbits_le16(&ich7_spi->spis, SPIS_LOCK);
137 } else if (plat->ich_version == ICHV_9) {
138 struct ich9_spi_regs *ich9_spi = sbase;
140 setbits_le16(&ich9_spi->hsfs, HSFS_FLOCKDN);
144 static bool spi_lock_status(struct ich_spi_plat *plat, void *sbase)
148 if (plat->ich_version == ICHV_7) {
149 struct ich7_spi_regs *ich7_spi = sbase;
151 lock = readw(&ich7_spi->spis) & SPIS_LOCK;
152 } else if (plat->ich_version == ICHV_9) {
153 struct ich9_spi_regs *ich9_spi = sbase;
155 lock = readw(&ich9_spi->hsfs) & HSFS_FLOCKDN;
161 static int spi_setup_opcode(struct ich_spi_priv *ctlr, struct spi_trans *trans,
165 uint8_t opmenu[ctlr->menubytes];
168 /* The lock is off, so just use index 0. */
169 ich_writeb(ctlr, trans->opcode, ctlr->opmenu);
170 optypes = ich_readw(ctlr, ctlr->optype);
171 optypes = (optypes & 0xfffc) | (trans->type & 0x3);
172 ich_writew(ctlr, optypes, ctlr->optype);
175 /* The lock is on. See if what we need is on the menu. */
177 uint16_t opcode_index;
179 /* Write Enable is handled as atomic prefix */
180 if (trans->opcode == SPI_OPCODE_WREN)
183 read_reg(ctlr, ctlr->opmenu, opmenu, sizeof(opmenu));
184 for (opcode_index = 0; opcode_index < ctlr->menubytes;
186 if (opmenu[opcode_index] == trans->opcode)
190 if (opcode_index == ctlr->menubytes) {
191 debug("ICH SPI: Opcode %x not found\n", trans->opcode);
195 optypes = ich_readw(ctlr, ctlr->optype);
196 optype = (optypes >> (opcode_index * 2)) & 0x3;
198 if (optype != trans->type) {
199 debug("ICH SPI: Transaction doesn't fit type %d\n",
208 * Wait for up to 6s til status register bit(s) turn 1 (in case wait_til_set
209 * below is true) or 0. In case the wait was for the bit(s) to set - write
210 * those bits back, which would cause resetting them.
212 * Return the last read status value on success or -1 on failure.
214 static int ich_status_poll(struct ich_spi_priv *ctlr, u16 bitmask,
217 int timeout = 600000; /* This will result in 6s */
221 status = ich_readw(ctlr, ctlr->status);
222 if (wait_til_set ^ ((status & bitmask) == 0)) {
224 ich_writew(ctlr, status & bitmask,
231 debug("ICH SPI: SCIP timeout, read %x, expected %x, wts %x %x\n",
232 status, bitmask, wait_til_set, status & bitmask);
237 static void ich_spi_config_opcode(struct udevice *dev)
239 struct ich_spi_priv *ctlr = dev_get_priv(dev);
242 * PREOP, OPTYPE, OPMENU1/OPMENU2 registers can be locked down
243 * to prevent accidental or intentional writes. Before they get
244 * locked down, these registers should be initialized properly.
246 ich_writew(ctlr, SPI_OPPREFIX, ctlr->preop);
247 ich_writew(ctlr, SPI_OPTYPE, ctlr->optype);
248 ich_writel(ctlr, SPI_OPMENU_LOWER, ctlr->opmenu);
249 ich_writel(ctlr, SPI_OPMENU_UPPER, ctlr->opmenu + sizeof(u32));
252 static int ich_spi_exec_op_swseq(struct spi_slave *slave,
253 const struct spi_mem_op *op)
255 struct udevice *bus = dev_get_parent(slave->dev);
256 struct ich_spi_plat *plat = dev_get_plat(bus);
257 struct ich_spi_priv *ctlr = dev_get_priv(bus);
259 int16_t opcode_index;
262 struct spi_trans *trans = &ctlr->trans;
263 bool lock = spi_lock_status(plat, ctlr->base);
270 if (op->data.nbytes) {
271 if (op->data.dir == SPI_MEM_DATA_IN) {
272 trans->in = op->data.buf.in;
273 trans->bytesin = op->data.nbytes;
275 trans->out = op->data.buf.out;
276 trans->bytesout = op->data.nbytes;
280 if (trans->opcode != op->cmd.opcode)
281 trans->opcode = op->cmd.opcode;
283 if (lock && trans->opcode == SPI_OPCODE_WRDIS)
286 if (trans->opcode == SPI_OPCODE_WREN) {
288 * Treat Write Enable as Atomic Pre-Op if possible
289 * in order to prevent the Management Engine from
290 * issuing a transaction between WREN and DATA.
293 ich_writew(ctlr, trans->opcode, ctlr->preop);
297 ret = ich_status_poll(ctlr, SPIS_SCIP, 0);
301 if (plat->ich_version == ICHV_7)
302 ich_writew(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
304 ich_writeb(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
306 /* Try to guess spi transaction type */
307 if (op->data.dir == SPI_MEM_DATA_OUT) {
309 trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
311 trans->type = SPI_OPCODE_TYPE_WRITE_NO_ADDRESS;
314 trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS;
316 trans->type = SPI_OPCODE_TYPE_READ_NO_ADDRESS;
318 /* Special erase case handling */
319 if (op->addr.nbytes && !op->data.buswidth)
320 trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
322 opcode_index = spi_setup_opcode(ctlr, trans, lock);
323 if (opcode_index < 0)
326 if (op->addr.nbytes) {
327 trans->offset = op->addr.val;
331 if (ctlr->speed && ctlr->max_speed >= 33000000) {
334 byte = ich_readb(ctlr, ctlr->speed);
335 if (ctlr->cur_speed >= 33000000)
336 byte |= SSFC_SCF_33MHZ;
338 byte &= ~SSFC_SCF_33MHZ;
339 ich_writeb(ctlr, byte, ctlr->speed);
342 /* Preset control fields */
343 control = SPIC_SCGO | ((opcode_index & 0x07) << 4);
345 /* Issue atomic preop cycle if needed */
346 if (ich_readw(ctlr, ctlr->preop))
349 if (!trans->bytesout && !trans->bytesin) {
350 /* SPI addresses are 24 bit only */
352 ich_writel(ctlr, trans->offset & 0x00FFFFFF,
356 * This is a 'no data' command (like Write Enable), its
357 * bitesout size was 1, decremented to zero while executing
358 * spi_setup_opcode() above. Tell the chip to send the
361 ich_writew(ctlr, control, ctlr->control);
363 /* wait for the result */
364 status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
368 if (status & SPIS_FCERR) {
369 debug("ICH SPI: Command transaction error\n");
376 while (trans->bytesout || trans->bytesin) {
377 uint32_t data_length;
379 /* SPI addresses are 24 bit only */
380 ich_writel(ctlr, trans->offset & 0x00FFFFFF, ctlr->addr);
383 data_length = min(trans->bytesout, ctlr->databytes);
385 data_length = min(trans->bytesin, ctlr->databytes);
387 /* Program data into FDATA0 to N */
388 if (trans->bytesout) {
389 write_reg(ctlr, trans->out, ctlr->data, data_length);
390 trans->bytesout -= data_length;
393 /* Add proper control fields' values */
394 control &= ~((ctlr->databytes - 1) << 8);
396 control |= (data_length - 1) << 8;
399 ich_writew(ctlr, control, ctlr->control);
401 /* Wait for Cycle Done Status or Flash Cycle Error */
402 status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
406 if (status & SPIS_FCERR) {
407 debug("ICH SPI: Data transaction error %x\n", status);
411 if (trans->bytesin) {
412 read_reg(ctlr, ctlr->data, trans->in, data_length);
413 trans->bytesin -= data_length;
417 /* Clear atomic preop now that xfer is done */
419 ich_writew(ctlr, 0, ctlr->preop);
425 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
426 * that the operation does not cross page boundary.
428 static uint get_xfer_len(u32 offset, int len, int page_size)
430 uint xfer_len = min(len, SPIBAR_FDATA_FIFO_SIZE);
431 uint bytes_left = ALIGN(offset, page_size) - offset;
434 xfer_len = min(xfer_len, bytes_left);
439 /* Fill FDATAn FIFO in preparation for a write transaction */
440 static void fill_xfer_fifo(struct fast_spi_regs *regs, const void *data,
443 memcpy(regs->fdata, data, len);
446 /* Drain FDATAn FIFO after a read transaction populates data */
447 static void drain_xfer_fifo(struct fast_spi_regs *regs, void *dest, uint len)
449 memcpy(dest, regs->fdata, len);
452 /* Fire up a transfer using the hardware sequencer */
453 static void start_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
454 uint offset, uint len)
456 /* Make sure all W1C status bits get cleared */
459 hsfsts = readl(®s->hsfsts_ctl);
460 hsfsts &= ~(HSFSTS_FCYCLE_MASK | HSFSTS_FDBC_MASK);
461 hsfsts |= HSFSTS_AEL | HSFSTS_FCERR | HSFSTS_FDONE;
463 /* Set up transaction parameters */
464 hsfsts |= hsfsts_cycle << HSFSTS_FCYCLE_SHIFT;
465 hsfsts |= ((len - 1) << HSFSTS_FDBC_SHIFT) & HSFSTS_FDBC_MASK;
466 hsfsts |= HSFSTS_FGO;
468 writel(offset, ®s->faddr);
469 writel(hsfsts, ®s->hsfsts_ctl);
472 static int wait_for_hwseq_xfer(struct fast_spi_regs *regs, uint offset)
477 start = get_timer(0);
479 hsfsts = readl(®s->hsfsts_ctl);
480 if (hsfsts & HSFSTS_FCERR) {
481 debug("SPI transaction error at offset %x HSFSTS = %08x\n",
485 if (hsfsts & HSFSTS_AEL)
488 if (hsfsts & HSFSTS_FDONE)
490 } while (get_timer(start) < SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
492 debug("SPI transaction timeout at offset %x HSFSTS = %08x, timer %d\n",
493 offset, hsfsts, (uint)get_timer(start));
499 * exec_sync_hwseq_xfer() - Execute flash transfer by hardware sequencing
501 * This waits until complete or timeout
503 * @regs: SPI registers
504 * @hsfsts_cycle: Cycle type (enum hsfsts_cycle_t)
505 * @offset: Offset to access
506 * @len: Number of bytes to transfer (can be 0)
507 * Return: 0 if OK, -EIO on flash-cycle error (FCERR), -EPERM on access error
508 * (AEL), -ETIMEDOUT on timeout
510 static int exec_sync_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
511 uint offset, uint len)
513 start_hwseq_xfer(regs, hsfsts_cycle, offset, len);
515 return wait_for_hwseq_xfer(regs, offset);
518 static int ich_spi_exec_op_hwseq(struct spi_slave *slave,
519 const struct spi_mem_op *op)
521 struct spi_flash *flash = dev_get_uclass_priv(slave->dev);
522 struct udevice *bus = dev_get_parent(slave->dev);
523 struct ich_spi_priv *priv = dev_get_priv(bus);
524 struct fast_spi_regs *regs = priv->base;
533 offset = op->addr.val;
534 len = op->data.nbytes;
536 switch (op->cmd.opcode) {
538 cycle = HSFSTS_CYCLE_RDID;
540 case SPINOR_OP_READ_FAST:
541 cycle = HSFSTS_CYCLE_READ;
544 cycle = HSFSTS_CYCLE_WRITE;
547 /* Nothing needs to be done */
550 cycle = HSFSTS_CYCLE_WR_STATUS;
553 cycle = HSFSTS_CYCLE_RD_STATUS;
556 return 0; /* ignore */
557 case SPINOR_OP_BE_4K:
558 cycle = HSFSTS_CYCLE_4K_ERASE;
559 ret = exec_sync_hwseq_xfer(regs, cycle, offset, 0);
562 debug("Unknown cycle %x\n", op->cmd.opcode);
566 out = op->data.dir == SPI_MEM_DATA_OUT;
567 buf = out ? (u8 *)op->data.buf.out : op->data.buf.in;
568 page_size = flash->page_size ? : 256;
571 uint xfer_len = get_xfer_len(offset, len, page_size);
574 fill_xfer_fifo(regs, buf, xfer_len);
576 ret = exec_sync_hwseq_xfer(regs, cycle, offset, xfer_len);
581 drain_xfer_fifo(regs, buf, xfer_len);
591 static int ich_spi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
593 struct udevice *bus = dev_get_parent(slave->dev);
594 struct ich_spi_plat *plat = dev_get_plat(bus);
597 bootstage_start(BOOTSTAGE_ID_ACCUM_SPI, "fast_spi");
599 ret = ich_spi_exec_op_hwseq(slave, op);
601 ret = ich_spi_exec_op_swseq(slave, op);
602 bootstage_accum(BOOTSTAGE_ID_ACCUM_SPI);
607 #if CONFIG_IS_ENABLED(OF_REAL)
609 * ich_spi_get_basics() - Get basic information about the ICH device
611 * This works without probing any devices if requested.
613 * @bus: SPI controller to use
614 * @can_probe: true if this function is allowed to probe the PCH
615 * @pchp: Returns a pointer to the pch, or NULL if not found
616 * @ich_versionp: Returns ICH version detected on success
617 * @mmio_basep: Returns the address of the SPI registers on success
618 * Return: 0 if OK, -EPROTOTYPE if the PCH could not be found, -EAGAIN if
619 * the function cannot success without probing, possible another error if
620 * pch_get_spi_base() fails
622 static int ich_spi_get_basics(struct udevice *bus, bool can_probe,
623 struct udevice **pchp,
624 enum ich_version *ich_versionp, ulong *mmio_basep)
626 struct udevice *pch = NULL;
629 /* Find a PCH if there is one */
631 pch = dev_get_parent(bus);
632 if (device_get_uclass_id(pch) != UCLASS_PCH) {
633 uclass_first_device(UCLASS_PCH, &pch);
635 ; /* ignore this error since we don't need it */
639 *ich_versionp = dev_get_driver_data(bus);
640 if (*ich_versionp == ICHV_APL)
641 *mmio_basep = dm_pci_read_bar32(bus, 0);
643 ret = pch_get_spi_base(pch, mmio_basep);
653 * ich_get_mmap_bus() - Handle the get_mmap() method for a bus
655 * There are several cases to consider:
656 * 1. Using of-platdata, in which case we have the BDF and can access the
657 * registers by reading the BAR
658 * 2. Not using of-platdata, but still with a SPI controller that is on its own
659 * PCI PDF. In this case we read the BDF from the parent plat and again get
660 * the registers by reading the BAR
661 * 3. Using a SPI controller that is a child of the PCH, in which case we try
662 * to find the registers by asking the PCH. This only works if the PCH has
663 * been probed (which it will be if the bus is probed since parents are
664 * probed before children), since the PCH may not have a PCI address until
665 * its parent (the PCI bus itself) has been probed. If you are using this
666 * method then you should make sure the SPI bus is probed.
668 * The first two cases are useful in early init. The last one is more useful
671 static int ich_get_mmap_bus(struct udevice *bus, ulong *map_basep,
672 uint *map_sizep, uint *offsetp)
675 #if CONFIG_IS_ENABLED(OF_REAL)
676 if (device_is_on_pci_bus(bus)) {
677 struct pci_child_plat *pplat;
679 pplat = dev_get_parent_plat(bus);
680 spi_bdf = pplat->devfn;
682 enum ich_version ich_version;
683 struct fast_spi_regs *regs;
688 ret = ich_spi_get_basics(bus, device_active(bus), &pch,
689 &ich_version, &mmio_base);
691 return log_msg_ret("basics", ret);
692 regs = (struct fast_spi_regs *)mmio_base;
694 return fast_spi_get_bios_mmap_regs(regs, map_basep, map_sizep,
698 struct ich_spi_plat *plat = dev_get_plat(bus);
701 * We cannot rely on plat->bdf being set up yet since this method can
702 * be called before the device is probed. Use the of-platdata directly
705 spi_bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
708 return fast_spi_get_bios_mmap(spi_bdf, map_basep, map_sizep, offsetp);
711 static int ich_get_mmap(struct udevice *dev, ulong *map_basep, uint *map_sizep,
714 struct udevice *bus = dev_get_parent(dev);
716 return ich_get_mmap_bus(bus, map_basep, map_sizep, offsetp);
719 static int ich_spi_adjust_size(struct spi_slave *slave, struct spi_mem_op *op)
721 unsigned int page_offset;
722 int addr = op->addr.val;
723 unsigned int byte_count = op->data.nbytes;
725 if (hweight32(ICH_BOUNDARY) == 1) {
726 page_offset = addr & (ICH_BOUNDARY - 1);
730 page_offset = do_div(aux, ICH_BOUNDARY);
733 if (op->data.dir == SPI_MEM_DATA_IN) {
734 if (slave->max_read_size) {
735 op->data.nbytes = min(ICH_BOUNDARY - page_offset,
736 slave->max_read_size);
738 } else if (slave->max_write_size) {
739 op->data.nbytes = min(ICH_BOUNDARY - page_offset,
740 slave->max_write_size);
743 op->data.nbytes = min(op->data.nbytes, byte_count);
748 static int ich_protect_lockdown(struct udevice *dev)
750 struct ich_spi_plat *plat = dev_get_plat(dev);
751 struct ich_spi_priv *priv = dev_get_priv(dev);
754 /* Disable the BIOS write protect so write commands are allowed */
756 ret = pch_set_spi_protect(priv->pch, false);
757 if (ret == -ENOSYS) {
760 bios_cntl = ich_readb(priv, priv->bcr);
761 bios_cntl &= ~BIT(5); /* clear Enable InSMM_STS (EISS) */
762 bios_cntl |= 1; /* Write Protect Disable (WPD) */
763 ich_writeb(priv, bios_cntl, priv->bcr);
765 debug("%s: Failed to disable write-protect: err=%d\n",
770 /* Lock down SPI controller settings if required */
771 if (plat->lockdown) {
772 ich_spi_config_opcode(dev);
773 spi_lock_down(plat, priv->base);
779 static int ich_init_controller(struct udevice *dev,
780 struct ich_spi_plat *plat,
781 struct ich_spi_priv *ctlr)
783 if (spl_phase() == PHASE_TPL) {
784 struct ich_spi_plat *plat = dev_get_plat(dev);
787 ret = fast_spi_early_init(plat->bdf, plat->mmio_base);
792 ctlr->base = (void *)plat->mmio_base;
793 if (plat->ich_version == ICHV_7) {
794 struct ich7_spi_regs *ich7_spi = ctlr->base;
796 ctlr->opmenu = offsetof(struct ich7_spi_regs, opmenu);
797 ctlr->menubytes = sizeof(ich7_spi->opmenu);
798 ctlr->optype = offsetof(struct ich7_spi_regs, optype);
799 ctlr->addr = offsetof(struct ich7_spi_regs, spia);
800 ctlr->data = offsetof(struct ich7_spi_regs, spid);
801 ctlr->databytes = sizeof(ich7_spi->spid);
802 ctlr->status = offsetof(struct ich7_spi_regs, spis);
803 ctlr->control = offsetof(struct ich7_spi_regs, spic);
804 ctlr->bbar = offsetof(struct ich7_spi_regs, bbar);
805 ctlr->preop = offsetof(struct ich7_spi_regs, preop);
806 } else if (plat->ich_version == ICHV_9) {
807 struct ich9_spi_regs *ich9_spi = ctlr->base;
809 ctlr->opmenu = offsetof(struct ich9_spi_regs, opmenu);
810 ctlr->menubytes = sizeof(ich9_spi->opmenu);
811 ctlr->optype = offsetof(struct ich9_spi_regs, optype);
812 ctlr->addr = offsetof(struct ich9_spi_regs, faddr);
813 ctlr->data = offsetof(struct ich9_spi_regs, fdata);
814 ctlr->databytes = sizeof(ich9_spi->fdata);
815 ctlr->status = offsetof(struct ich9_spi_regs, ssfs);
816 ctlr->control = offsetof(struct ich9_spi_regs, ssfc);
817 ctlr->speed = ctlr->control + 2;
818 ctlr->bbar = offsetof(struct ich9_spi_regs, bbar);
819 ctlr->preop = offsetof(struct ich9_spi_regs, preop);
820 ctlr->bcr = offsetof(struct ich9_spi_regs, bcr);
821 ctlr->pr = &ich9_spi->pr[0];
822 } else if (plat->ich_version == ICHV_APL) {
824 debug("ICH SPI: Unrecognised ICH version %d\n",
829 /* Work out the maximum speed we can support */
830 ctlr->max_speed = 20000000;
831 if (plat->ich_version == ICHV_9 && ich9_can_do_33mhz(dev))
832 ctlr->max_speed = 33000000;
833 debug("ICH SPI: Version ID %d detected at %lx, speed %ld\n",
834 plat->ich_version, plat->mmio_base, ctlr->max_speed);
836 ich_set_bbar(ctlr, 0);
841 static int ich_cache_bios_region(struct udevice *dev)
849 ret = ich_get_mmap_bus(dev, &map_base, &map_size, &offset);
853 /* Don't use WRBACK since we are not supposed to write to SPI flash */
854 base = SZ_4G - map_size;
855 mtrr_set_next_var(MTRR_TYPE_WRPROT, base, map_size);
856 log_debug("BIOS cache base=%lx, size=%x\n", base, (uint)map_size);
861 static int ich_spi_probe(struct udevice *dev)
863 struct ich_spi_plat *plat = dev_get_plat(dev);
864 struct ich_spi_priv *priv = dev_get_priv(dev);
867 ret = ich_init_controller(dev, plat, priv);
871 if (spl_phase() == PHASE_TPL) {
872 /* Cache the BIOS to speed things up */
873 ret = ich_cache_bios_region(dev);
877 ret = ich_protect_lockdown(dev);
881 priv->cur_speed = priv->max_speed;
886 static int ich_spi_remove(struct udevice *bus)
889 * Configure SPI controller so that the Linux MTD driver can fully
890 * access the SPI NOR chip
892 ich_spi_config_opcode(bus);
897 static int ich_spi_set_speed(struct udevice *bus, uint speed)
899 struct ich_spi_priv *priv = dev_get_priv(bus);
901 priv->cur_speed = speed;
906 static int ich_spi_set_mode(struct udevice *bus, uint mode)
908 debug("%s: mode=%d\n", __func__, mode);
913 static int ich_spi_child_pre_probe(struct udevice *dev)
915 struct udevice *bus = dev_get_parent(dev);
916 struct ich_spi_plat *plat = dev_get_plat(bus);
917 struct ich_spi_priv *priv = dev_get_priv(bus);
918 struct spi_slave *slave = dev_get_parent_priv(dev);
921 * Yes this controller can only transfer a small number of bytes at
922 * once! The limit is typically 64 bytes. For hardware sequencing a
923 * a loop is used to get around this.
926 slave->max_read_size = priv->databytes;
927 slave->max_write_size = priv->databytes;
930 * ICH 7 SPI controller only supports array read command
931 * and byte program command for SST flash
933 if (plat->ich_version == ICHV_7)
934 slave->mode = SPI_RX_SLOW | SPI_TX_BYTE;
939 static int ich_spi_of_to_plat(struct udevice *dev)
941 struct ich_spi_plat *plat = dev_get_plat(dev);
943 #if CONFIG_IS_ENABLED(OF_REAL)
944 struct ich_spi_priv *priv = dev_get_priv(dev);
947 ret = ich_spi_get_basics(dev, true, &priv->pch, &plat->ich_version,
950 return log_msg_ret("basics", ret);
951 plat->lockdown = dev_read_bool(dev, "intel,spi-lock-down");
953 * Use an int so that the property is present in of-platdata even
956 plat->hwseq = dev_read_u32_default(dev, "intel,hardware-seq", 0);
958 plat->ich_version = ICHV_APL;
959 plat->mmio_base = plat->dtplat.early_regs[0];
960 plat->bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
961 plat->hwseq = plat->dtplat.intel_hardware_seq;
963 debug("%s: mmio_base=%lx\n", __func__, plat->mmio_base);
968 static const struct spi_controller_mem_ops ich_controller_mem_ops = {
969 .adjust_op_size = ich_spi_adjust_size,
971 .exec_op = ich_spi_exec_op,
974 static const struct dm_spi_ops ich_spi_ops = {
975 /* xfer is not supported */
976 .set_speed = ich_spi_set_speed,
977 .set_mode = ich_spi_set_mode,
978 .mem_ops = &ich_controller_mem_ops,
979 .get_mmap = ich_get_mmap,
981 * cs_info is not needed, since we require all chip selects to be
982 * in the device tree explicitly
986 static const struct udevice_id ich_spi_ids[] = {
987 { .compatible = "intel,ich7-spi", ICHV_7 },
988 { .compatible = "intel,ich9-spi", ICHV_9 },
989 { .compatible = "intel,fast-spi", ICHV_APL },
993 U_BOOT_DRIVER(intel_fast_spi) = {
994 .name = "intel_fast_spi",
996 .of_match = ich_spi_ids,
998 .of_to_plat = ich_spi_of_to_plat,
999 .plat_auto = sizeof(struct ich_spi_plat),
1000 .priv_auto = sizeof(struct ich_spi_priv),
1001 .child_pre_probe = ich_spi_child_pre_probe,
1002 .probe = ich_spi_probe,
1003 .remove = ich_spi_remove,
1004 .flags = DM_FLAG_OS_PREPARE,