1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Exceet Electronics GmbH
4 * Copyright (C) 2018 Bootlin
6 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
10 #include <dm/devres.h>
11 #include <linux/dmaengine.h>
12 #include <linux/pm_runtime.h>
13 #include "internals.h"
15 #include <dm/device_compat.h>
22 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
24 * @ctlr: the SPI controller requesting this dma_map()
25 * @op: the memory operation containing the buffer to map
26 * @sgt: a pointer to a non-initialized sg_table that will be filled by this
29 * Some controllers might want to do DMA on the data buffer embedded in @op.
30 * This helper prepares everything for you and provides a ready-to-use
31 * sg_table. This function is not intended to be called from spi drivers.
32 * Only SPI controller drivers should use it.
33 * Note that the caller must ensure the memory region pointed by
34 * op->data.buf.{in,out} is DMA-able before calling this function.
36 * Return: 0 in case of success, a negative error code otherwise.
38 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
39 const struct spi_mem_op *op,
42 struct device *dmadev;
47 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
48 dmadev = ctlr->dma_tx->device->dev;
49 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
50 dmadev = ctlr->dma_rx->device->dev;
52 dmadev = ctlr->dev.parent;
57 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
58 op->data.dir == SPI_MEM_DATA_IN ?
59 DMA_FROM_DEVICE : DMA_TO_DEVICE);
61 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
64 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
66 * @ctlr: the SPI controller requesting this dma_unmap()
67 * @op: the memory operation containing the buffer to unmap
68 * @sgt: a pointer to an sg_table previously initialized by
69 * spi_controller_dma_map_mem_op_data()
71 * Some controllers might want to do DMA on the data buffer embedded in @op.
72 * This helper prepares things so that the CPU can access the
73 * op->data.buf.{in,out} buffer again.
75 * This function is not intended to be called from SPI drivers. Only SPI
76 * controller drivers should use it.
78 * This function should be called after the DMA operation has finished and is
79 * only valid if the previous spi_controller_dma_map_mem_op_data() call
82 * Return: 0 in case of success, a negative error code otherwise.
84 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
85 const struct spi_mem_op *op,
88 struct device *dmadev;
93 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
94 dmadev = ctlr->dma_tx->device->dev;
95 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
96 dmadev = ctlr->dma_rx->device->dev;
98 dmadev = ctlr->dev.parent;
100 spi_unmap_buf(ctlr, dmadev, sgt,
101 op->data.dir == SPI_MEM_DATA_IN ?
102 DMA_FROM_DEVICE : DMA_TO_DEVICE);
104 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
105 #endif /* __UBOOT__ */
107 static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx)
109 u32 mode = slave->mode;
116 if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
117 (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
123 if ((tx && (mode & SPI_TX_QUAD)) ||
124 (!tx && (mode & SPI_RX_QUAD)))
129 if ((tx && (mode & SPI_TX_OCTAL)) ||
130 (!tx && (mode & SPI_RX_OCTAL)))
142 bool spi_mem_default_supports_op(struct spi_slave *slave,
143 const struct spi_mem_op *op)
145 if (spi_check_buswidth_req(slave, op->cmd.buswidth, true))
148 if (op->addr.nbytes &&
149 spi_check_buswidth_req(slave, op->addr.buswidth, true))
152 if (op->dummy.nbytes &&
153 spi_check_buswidth_req(slave, op->dummy.buswidth, true))
156 if (op->data.dir != SPI_MEM_NO_DATA &&
157 spi_check_buswidth_req(slave, op->data.buswidth,
158 op->data.dir == SPI_MEM_DATA_OUT))
163 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
166 * spi_mem_supports_op() - Check if a memory device and the controller it is
167 * connected to support a specific memory operation
168 * @slave: the SPI device
169 * @op: the memory operation to check
171 * Some controllers are only supporting Single or Dual IOs, others might only
172 * support specific opcodes, or it can even be that the controller and device
173 * both support Quad IOs but the hardware prevents you from using it because
174 * only 2 IO lines are connected.
176 * This function checks whether a specific operation is supported.
178 * Return: true if @op is supported, false otherwise.
180 bool spi_mem_supports_op(struct spi_slave *slave,
181 const struct spi_mem_op *op)
183 struct udevice *bus = slave->dev->parent;
184 struct dm_spi_ops *ops = spi_get_ops(bus);
186 if (ops->mem_ops && ops->mem_ops->supports_op)
187 return ops->mem_ops->supports_op(slave, op);
189 return spi_mem_default_supports_op(slave, op);
191 EXPORT_SYMBOL_GPL(spi_mem_supports_op);
194 * spi_mem_exec_op() - Execute a memory operation
195 * @slave: the SPI device
196 * @op: the memory operation to execute
198 * Executes a memory operation.
200 * This function first checks that @op is supported and then tries to execute
203 * Return: 0 in case of success, a negative error code otherwise.
205 int spi_mem_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
207 struct udevice *bus = slave->dev->parent;
208 struct dm_spi_ops *ops = spi_get_ops(bus);
209 unsigned int pos = 0;
210 const u8 *tx_buf = NULL;
217 if (!spi_mem_supports_op(slave, op))
220 ret = spi_claim_bus(slave);
224 if (ops->mem_ops && ops->mem_ops->exec_op) {
227 * Flush the message queue before executing our SPI memory
228 * operation to prevent preemption of regular SPI transfers.
230 spi_flush_queue(ctlr);
232 if (ctlr->auto_runtime_pm) {
233 ret = pm_runtime_get_sync(ctlr->dev.parent);
236 "Failed to power device: %d\n",
242 mutex_lock(&ctlr->bus_lock_mutex);
243 mutex_lock(&ctlr->io_mutex);
245 ret = ops->mem_ops->exec_op(slave, op);
248 mutex_unlock(&ctlr->io_mutex);
249 mutex_unlock(&ctlr->bus_lock_mutex);
251 if (ctlr->auto_runtime_pm)
252 pm_runtime_put(ctlr->dev.parent);
256 * Some controllers only optimize specific paths (typically the
257 * read path) and expect the core to use the regular SPI
258 * interface in other cases.
260 if (!ret || ret != -ENOTSUPP) {
261 spi_release_bus(slave);
267 tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
271 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
272 * we're guaranteed that this buffer is DMA-able, as required by the
275 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
279 spi_message_init(&msg);
281 tmpbuf[0] = op->cmd.opcode;
282 xfers[xferpos].tx_buf = tmpbuf;
283 xfers[xferpos].len = sizeof(op->cmd.opcode);
284 xfers[xferpos].tx_nbits = op->cmd.buswidth;
285 spi_message_add_tail(&xfers[xferpos], &msg);
289 if (op->addr.nbytes) {
292 for (i = 0; i < op->addr.nbytes; i++)
293 tmpbuf[i + 1] = op->addr.val >>
294 (8 * (op->addr.nbytes - i - 1));
296 xfers[xferpos].tx_buf = tmpbuf + 1;
297 xfers[xferpos].len = op->addr.nbytes;
298 xfers[xferpos].tx_nbits = op->addr.buswidth;
299 spi_message_add_tail(&xfers[xferpos], &msg);
301 totalxferlen += op->addr.nbytes;
304 if (op->dummy.nbytes) {
305 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
306 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
307 xfers[xferpos].len = op->dummy.nbytes;
308 xfers[xferpos].tx_nbits = op->dummy.buswidth;
309 spi_message_add_tail(&xfers[xferpos], &msg);
311 totalxferlen += op->dummy.nbytes;
314 if (op->data.nbytes) {
315 if (op->data.dir == SPI_MEM_DATA_IN) {
316 xfers[xferpos].rx_buf = op->data.buf.in;
317 xfers[xferpos].rx_nbits = op->data.buswidth;
319 xfers[xferpos].tx_buf = op->data.buf.out;
320 xfers[xferpos].tx_nbits = op->data.buswidth;
323 xfers[xferpos].len = op->data.nbytes;
324 spi_message_add_tail(&xfers[xferpos], &msg);
326 totalxferlen += op->data.nbytes;
329 ret = spi_sync(slave, &msg);
336 if (msg.actual_length != totalxferlen)
340 if (op->data.nbytes) {
341 if (op->data.dir == SPI_MEM_DATA_IN)
342 rx_buf = op->data.buf.in;
344 tx_buf = op->data.buf.out;
347 op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
350 * Avoid using malloc() here so that we can use this code in SPL where
351 * simple malloc may be used. That implementation does not allow free()
352 * so repeated calls to this code can exhaust the space.
354 * The value of op_len is small, since it does not include the actual
355 * data being sent, only the op-code and address. In fact, it should be
356 * possible to just use a small fixed value here instead of op_len.
360 op_buf[pos++] = op->cmd.opcode;
362 if (op->addr.nbytes) {
363 for (i = 0; i < op->addr.nbytes; i++)
364 op_buf[pos + i] = op->addr.val >>
365 (8 * (op->addr.nbytes - i - 1));
367 pos += op->addr.nbytes;
370 if (op->dummy.nbytes)
371 memset(op_buf + pos, 0xff, op->dummy.nbytes);
373 /* 1st transfer: opcode + address + dummy cycles */
374 flag = SPI_XFER_BEGIN;
375 /* Make sure to set END bit if no tx or rx data messages follow */
376 if (!tx_buf && !rx_buf)
377 flag |= SPI_XFER_END;
379 ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag);
383 /* 2nd transfer: rx or tx data path */
384 if (tx_buf || rx_buf) {
385 ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf,
386 rx_buf, SPI_XFER_END);
391 spi_release_bus(slave);
393 for (i = 0; i < pos; i++)
394 debug("%02x ", op_buf[i]);
396 tx_buf || rx_buf ? op->data.nbytes : 0,
397 tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-");
398 for (i = 0; i < op->data.nbytes; i++)
399 debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]);
400 debug("[ret %d]\n", ret);
404 #endif /* __UBOOT__ */
408 EXPORT_SYMBOL_GPL(spi_mem_exec_op);
411 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
412 * match controller limitations
413 * @slave: the SPI device
414 * @op: the operation to adjust
416 * Some controllers have FIFO limitations and must split a data transfer
417 * operation into multiple ones, others require a specific alignment for
418 * optimized accesses. This function allows SPI mem drivers to split a single
419 * operation into multiple sub-operations when required.
421 * Return: a negative error code if the controller can't properly adjust @op,
422 * 0 otherwise. Note that @op->data.nbytes will be updated if @op
423 * can't be handled in a single step.
425 int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op)
427 struct udevice *bus = slave->dev->parent;
428 struct dm_spi_ops *ops = spi_get_ops(bus);
430 if (ops->mem_ops && ops->mem_ops->adjust_op_size)
431 return ops->mem_ops->adjust_op_size(slave, op);
433 if (!ops->mem_ops || !ops->mem_ops->exec_op) {
436 len = sizeof(op->cmd.opcode) + op->addr.nbytes +
438 if (slave->max_write_size && len > slave->max_write_size)
441 if (op->data.dir == SPI_MEM_DATA_IN) {
442 if (slave->max_read_size)
443 op->data.nbytes = min(op->data.nbytes,
444 slave->max_read_size);
445 } else if (slave->max_write_size) {
446 op->data.nbytes = min(op->data.nbytes,
447 slave->max_write_size - len);
450 if (!op->data.nbytes)
456 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
459 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
461 return container_of(drv, struct spi_mem_driver, spidrv.driver);
464 static int spi_mem_probe(struct spi_device *spi)
466 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
469 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
474 spi_set_drvdata(spi, mem);
476 return memdrv->probe(mem);
479 static int spi_mem_remove(struct spi_device *spi)
481 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
482 struct spi_mem *mem = spi_get_drvdata(spi);
485 return memdrv->remove(mem);
490 static void spi_mem_shutdown(struct spi_device *spi)
492 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
493 struct spi_mem *mem = spi_get_drvdata(spi);
495 if (memdrv->shutdown)
496 memdrv->shutdown(mem);
500 * spi_mem_driver_register_with_owner() - Register a SPI memory driver
501 * @memdrv: the SPI memory driver to register
502 * @owner: the owner of this driver
504 * Registers a SPI memory driver.
506 * Return: 0 in case of success, a negative error core otherwise.
509 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
510 struct module *owner)
512 memdrv->spidrv.probe = spi_mem_probe;
513 memdrv->spidrv.remove = spi_mem_remove;
514 memdrv->spidrv.shutdown = spi_mem_shutdown;
516 return __spi_register_driver(owner, &memdrv->spidrv);
518 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
521 * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
522 * @memdrv: the SPI memory driver to unregister
524 * Unregisters a SPI memory driver.
526 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
528 spi_unregister_driver(&memdrv->spidrv);
530 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
531 #endif /* __UBOOT__ */