struct device *dev;
void __iomem *base;
u8 *buffer;
+ dma_addr_t buffer_dma;
struct clk *spi_clk;
struct clk *ctlr_clk;
unsigned int spi_freq;
}
}
+static bool need_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK);
+}
+
static bool mtk_nor_match_read(const struct spi_mem_op *op)
{
int dummy = 0;
static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
+ struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
+
if (!op->data.nbytes)
return 0;
if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
(op->data.nbytes < MTK_NOR_DMA_ALIGN))
op->data.nbytes = 1;
- else if (!((ulong)(op->data.buf.in) &
- MTK_NOR_DMA_ALIGN_MASK))
+ else if (!need_bounce(sp, op))
op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
}
-static int mtk_nor_read_dma(struct mtk_nor *sp, u32 from, unsigned int length,
- u8 *buffer)
+static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length,
+ dma_addr_t dma_addr)
{
int ret = 0;
ulong delay;
u32 reg;
- dma_addr_t dma_addr;
-
- dma_addr = dma_map_single(sp->dev, buffer, length, DMA_FROM_DEVICE);
- if (dma_mapping_error(sp->dev, dma_addr)) {
- dev_err(sp->dev, "failed to map dma buffer.\n");
- return -EINVAL;
- }
writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR);
(delay + 1) * 100);
}
- dma_unmap_single(sp->dev, dma_addr, length, DMA_FROM_DEVICE);
if (ret < 0)
dev_err(sp->dev, "dma read timeout.\n");
return ret;
}
-static int mtk_nor_read_bounce(struct mtk_nor *sp, u32 from,
- unsigned int length, u8 *buffer)
+static int mtk_nor_read_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
{
unsigned int rdlen;
int ret;
- if (length & MTK_NOR_DMA_ALIGN_MASK)
- rdlen = (length + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
+ if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK)
+ rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
else
- rdlen = length;
+ rdlen = op->data.nbytes;
- ret = mtk_nor_read_dma(sp, from, rdlen, sp->buffer);
- if (ret)
- return ret;
+ ret = mtk_nor_dma_exec(sp, op->addr.val, rdlen, sp->buffer_dma);
- memcpy(buffer, sp->buffer, length);
- return 0;
+ if (!ret)
+ memcpy(op->data.buf.in, sp->buffer, op->data.nbytes);
+
+ return ret;
+}
+
+static int mtk_nor_read_dma(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ int ret;
+ dma_addr_t dma_addr;
+
+ if (need_bounce(sp, op))
+ return mtk_nor_read_bounce(sp, op);
+
+ dma_addr = dma_map_single(sp->dev, op->data.buf.in,
+ op->data.nbytes, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(sp->dev, dma_addr))
+ return -EINVAL;
+
+ ret = mtk_nor_dma_exec(sp, op->addr.val, op->data.nbytes, dma_addr);
+
+ dma_unmap_single(sp->dev, dma_addr, op->data.nbytes, DMA_FROM_DEVICE);
+
+ return ret;
}
static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
if (op->data.nbytes == 1) {
mtk_nor_set_addr(sp, op);
return mtk_nor_read_pio(sp, op);
- } else if (((ulong)(op->data.buf.in) &
- MTK_NOR_DMA_ALIGN_MASK)) {
- return mtk_nor_read_bounce(sp, op->addr.val,
- op->data.nbytes,
- op->data.buf.in);
} else {
- return mtk_nor_read_dma(sp, op->addr.val,
- op->data.nbytes,
- op->data.buf.in);
+ return mtk_nor_read_dma(sp, op);
}
}
struct spi_controller *ctlr;
struct mtk_nor *sp;
void __iomem *base;
- u8 *buffer;
struct clk *spi_clk, *ctlr_clk;
int ret, irq;
if (IS_ERR(ctlr_clk))
return PTR_ERR(ctlr_clk);
- buffer = devm_kmalloc(&pdev->dev,
- MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
- GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK)
- buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) &
- ~MTK_NOR_DMA_ALIGN_MASK);
-
ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp));
if (!ctlr) {
dev_err(&pdev->dev, "failed to allocate spi controller\n");
sp = spi_controller_get_devdata(ctlr);
sp->base = base;
- sp->buffer = buffer;
sp->has_irq = false;
sp->wbuf_en = false;
sp->ctlr = ctlr;
sp->dev = &pdev->dev;
sp->spi_clk = spi_clk;
sp->ctlr_clk = ctlr_clk;
+ sp->buffer = dmam_alloc_coherent(&pdev->dev,
+ MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
+ &sp->buffer_dma, GFP_KERNEL);
+ if (!sp->buffer)
+ return -ENOMEM;
+
+ if ((uintptr_t)sp->buffer & MTK_NOR_DMA_ALIGN_MASK) {
+ dev_err(sp->dev, "misaligned allocation of internal buffer.\n");
+ return -ENOMEM;
+ }
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {