struct circ_buf *xmit = &sport->port.state->xmit;
struct scatterlist *sgl = sport->tx_sgl;
struct device *dev = sport->port.dev;
+ struct dma_chan *chan = sport->dma_tx_chan;
int ret;
if (sport->dma_tx_in_progress)
sg_set_buf(sgl + 1, xmit->buf, xmit->head);
}
- ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ ret = dma_map_sg(chan->device->dev, sgl, sport->dma_tx_nents,
+ DMA_TO_DEVICE);
if (!ret) {
dev_err(dev, "DMA mapping error for TX.\n");
return;
}
- sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl,
+ sport->dma_tx_desc = dmaengine_prep_slave_sg(chan, sgl,
ret, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!sport->dma_tx_desc) {
- dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents,
+ DMA_TO_DEVICE);
dev_err(dev, "Cannot prepare TX slave DMA!\n");
return;
}
sport->dma_tx_desc->callback_param = sport;
sport->dma_tx_in_progress = true;
sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
- dma_async_issue_pending(sport->dma_tx_chan);
+ dma_async_issue_pending(chan);
}
static bool lpuart_stopped_or_empty(struct uart_port *port)
struct lpuart_port *sport = arg;
struct scatterlist *sgl = &sport->tx_sgl[0];
struct circ_buf *xmit = &sport->port.state->xmit;
+ struct dma_chan *chan = sport->dma_tx_chan;
unsigned long flags;
spin_lock_irqsave(&sport->port.lock, flags);
- dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents,
+ DMA_TO_DEVICE);
xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1);
static void lpuart_flush_buffer(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ struct dma_chan *chan = sport->dma_tx_chan;
u32 val;
if (sport->lpuart_dma_tx_use) {
if (sport->dma_tx_in_progress) {
- dma_unmap_sg(sport->port.dev, &sport->tx_sgl[0],
+ dma_unmap_sg(chan->device->dev, &sport->tx_sgl[0],
sport->dma_tx_nents, DMA_TO_DEVICE);
sport->dma_tx_in_progress = false;
}
- dmaengine_terminate_all(sport->dma_tx_chan);
+ dmaengine_terminate_all(chan);
}
if (lpuart_is_32(sport)) {
struct tty_port *port = &sport->port.state->port;
struct dma_tx_state state;
enum dma_status dmastat;
+ struct dma_chan *chan = sport->dma_rx_chan;
struct circ_buf *ring = &sport->rx_ring;
unsigned long flags;
int count = 0;
spin_lock_irqsave(&sport->port.lock, flags);
- dmastat = dmaengine_tx_status(sport->dma_rx_chan,
- sport->dma_rx_cookie,
- &state);
-
+ dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
if (dmastat == DMA_ERROR) {
dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
spin_unlock_irqrestore(&sport->port.lock, flags);
}
/* CPU claims ownership of RX DMA buffer */
- dma_sync_sg_for_cpu(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ dma_sync_sg_for_cpu(chan->device->dev, &sport->rx_sgl, 1,
+ DMA_FROM_DEVICE);
/*
* ring->head points to the end of data already written by the DMA.
sport->port.icount.rx += count;
}
- dma_sync_sg_for_device(sport->port.dev, &sport->rx_sgl, 1,
+ dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
DMA_FROM_DEVICE);
spin_unlock_irqrestore(&sport->port.lock, flags);
struct tty_port *port = &sport->port.state->port;
struct tty_struct *tty = port->tty;
struct ktermios *termios = &tty->termios;
+ struct dma_chan *chan = sport->dma_rx_chan;
baud = tty_get_baud_rate(tty);
return -ENOMEM;
sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
- nent = dma_map_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ nent = dma_map_sg(chan->device->dev, &sport->rx_sgl, 1,
+ DMA_FROM_DEVICE);
if (!nent) {
dev_err(sport->port.dev, "DMA Rx mapping error\n");
dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_rx_sconfig.src_maxburst = 1;
dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
- ret = dmaengine_slave_config(sport->dma_rx_chan, &dma_rx_sconfig);
+ ret = dmaengine_slave_config(chan, &dma_rx_sconfig);
if (ret < 0) {
dev_err(sport->port.dev,
return ret;
}
- sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan,
+ sport->dma_rx_desc = dmaengine_prep_dma_cyclic(chan,
sg_dma_address(&sport->rx_sgl),
sport->rx_sgl.length,
sport->rx_sgl.length / 2,
sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
sport->dma_rx_desc->callback_param = sport;
sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
- dma_async_issue_pending(sport->dma_rx_chan);
+ dma_async_issue_pending(chan);
if (lpuart_is_32(sport)) {
unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
+ struct dma_chan *chan = sport->dma_rx_chan;
- if (sport->dma_rx_chan)
- dmaengine_terminate_all(sport->dma_rx_chan);
+ if (chan)
+ dmaengine_terminate_all(chan);
- dma_unmap_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
kfree(sport->rx_ring.buf);
sport->rx_ring.tail = 0;
sport->rx_ring.head = 0;