1 // SPDX-License-Identifier: GPL-2.0+
3 * Driver for AMBA serial ports
5 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
7 * Copyright 1999 ARM Limited
8 * Copyright (C) 2000 Deep Blue Solutions Ltd.
9 * Copyright (C) 2010 ST-Ericsson SA
11 * This is a generic driver for ARM AMBA-type serial ports. They
12 * have a lot of 16550-like features, but are not register compatible.
13 * Note that although they do have CTS, DCD and DSR inputs, they do
14 * not have an RI input, nor do they have DTR or RTS outputs. If
15 * required, these have to be supplied via some other means (eg, GPIO)
16 * and hooked into this driver.
19 #include <linux/module.h>
20 #include <linux/ioport.h>
21 #include <linux/init.h>
22 #include <linux/console.h>
23 #include <linux/sysrq.h>
24 #include <linux/device.h>
25 #include <linux/tty.h>
26 #include <linux/tty_flip.h>
27 #include <linux/serial_core.h>
28 #include <linux/serial.h>
29 #include <linux/amba/bus.h>
30 #include <linux/amba/serial.h>
31 #include <linux/clk.h>
32 #include <linux/slab.h>
33 #include <linux/dmaengine.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/scatterlist.h>
36 #include <linux/delay.h>
37 #include <linux/types.h>
39 #include <linux/of_device.h>
40 #include <linux/pinctrl/consumer.h>
41 #include <linux/sizes.h>
43 #include <linux/acpi.h>
45 #include "amba-pl011.h"
49 #define SERIAL_AMBA_MAJOR 204
50 #define SERIAL_AMBA_MINOR 64
51 #define SERIAL_AMBA_NR UART_NR
53 #define AMBA_ISR_PASS_LIMIT 256
55 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
56 #define UART_DUMMY_DR_RX (1 << 16)
58 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
59 [REG_DR] = UART01x_DR,
60 [REG_FR] = UART01x_FR,
61 [REG_LCRH_RX] = UART011_LCRH,
62 [REG_LCRH_TX] = UART011_LCRH,
63 [REG_IBRD] = UART011_IBRD,
64 [REG_FBRD] = UART011_FBRD,
65 [REG_CR] = UART011_CR,
66 [REG_IFLS] = UART011_IFLS,
67 [REG_IMSC] = UART011_IMSC,
68 [REG_RIS] = UART011_RIS,
69 [REG_MIS] = UART011_MIS,
70 [REG_ICR] = UART011_ICR,
71 [REG_DMACR] = UART011_DMACR,
74 /* There is by now at least one vendor with differing details, so handle it */
76 const u16 *reg_offset;
86 bool cts_event_workaround;
90 unsigned int (*get_fifosize)(struct amba_device *dev);
93 static unsigned int get_fifosize_arm(struct amba_device *dev)
95 return amba_rev(dev) < 3 ? 16 : 32;
98 static struct vendor_data vendor_arm = {
99 .reg_offset = pl011_std_offsets,
100 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
101 .fr_busy = UART01x_FR_BUSY,
102 .fr_dsr = UART01x_FR_DSR,
103 .fr_cts = UART01x_FR_CTS,
104 .fr_ri = UART011_FR_RI,
105 .oversampling = false,
106 .dma_threshold = false,
107 .cts_event_workaround = false,
108 .always_enabled = false,
109 .fixed_options = false,
110 .get_fifosize = get_fifosize_arm,
113 static const struct vendor_data vendor_sbsa = {
114 .reg_offset = pl011_std_offsets,
115 .fr_busy = UART01x_FR_BUSY,
116 .fr_dsr = UART01x_FR_DSR,
117 .fr_cts = UART01x_FR_CTS,
118 .fr_ri = UART011_FR_RI,
120 .oversampling = false,
121 .dma_threshold = false,
122 .cts_event_workaround = false,
123 .always_enabled = true,
124 .fixed_options = true,
127 #ifdef CONFIG_ACPI_SPCR_TABLE
128 static const struct vendor_data vendor_qdt_qdf2400_e44 = {
129 .reg_offset = pl011_std_offsets,
130 .fr_busy = UART011_FR_TXFE,
131 .fr_dsr = UART01x_FR_DSR,
132 .fr_cts = UART01x_FR_CTS,
133 .fr_ri = UART011_FR_RI,
134 .inv_fr = UART011_FR_TXFE,
136 .oversampling = false,
137 .dma_threshold = false,
138 .cts_event_workaround = false,
139 .always_enabled = true,
140 .fixed_options = true,
144 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
145 [REG_DR] = UART01x_DR,
146 [REG_ST_DMAWM] = ST_UART011_DMAWM,
147 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
148 [REG_FR] = UART01x_FR,
149 [REG_LCRH_RX] = ST_UART011_LCRH_RX,
150 [REG_LCRH_TX] = ST_UART011_LCRH_TX,
151 [REG_IBRD] = UART011_IBRD,
152 [REG_FBRD] = UART011_FBRD,
153 [REG_CR] = UART011_CR,
154 [REG_IFLS] = UART011_IFLS,
155 [REG_IMSC] = UART011_IMSC,
156 [REG_RIS] = UART011_RIS,
157 [REG_MIS] = UART011_MIS,
158 [REG_ICR] = UART011_ICR,
159 [REG_DMACR] = UART011_DMACR,
160 [REG_ST_XFCR] = ST_UART011_XFCR,
161 [REG_ST_XON1] = ST_UART011_XON1,
162 [REG_ST_XON2] = ST_UART011_XON2,
163 [REG_ST_XOFF1] = ST_UART011_XOFF1,
164 [REG_ST_XOFF2] = ST_UART011_XOFF2,
165 [REG_ST_ITCR] = ST_UART011_ITCR,
166 [REG_ST_ITIP] = ST_UART011_ITIP,
167 [REG_ST_ABCR] = ST_UART011_ABCR,
168 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
171 static unsigned int get_fifosize_st(struct amba_device *dev)
176 static struct vendor_data vendor_st = {
177 .reg_offset = pl011_st_offsets,
178 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
179 .fr_busy = UART01x_FR_BUSY,
180 .fr_dsr = UART01x_FR_DSR,
181 .fr_cts = UART01x_FR_CTS,
182 .fr_ri = UART011_FR_RI,
183 .oversampling = true,
184 .dma_threshold = true,
185 .cts_event_workaround = true,
186 .always_enabled = false,
187 .fixed_options = false,
188 .get_fifosize = get_fifosize_st,
191 static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
192 [REG_DR] = ZX_UART011_DR,
193 [REG_FR] = ZX_UART011_FR,
194 [REG_LCRH_RX] = ZX_UART011_LCRH,
195 [REG_LCRH_TX] = ZX_UART011_LCRH,
196 [REG_IBRD] = ZX_UART011_IBRD,
197 [REG_FBRD] = ZX_UART011_FBRD,
198 [REG_CR] = ZX_UART011_CR,
199 [REG_IFLS] = ZX_UART011_IFLS,
200 [REG_IMSC] = ZX_UART011_IMSC,
201 [REG_RIS] = ZX_UART011_RIS,
202 [REG_MIS] = ZX_UART011_MIS,
203 [REG_ICR] = ZX_UART011_ICR,
204 [REG_DMACR] = ZX_UART011_DMACR,
207 static unsigned int get_fifosize_zte(struct amba_device *dev)
212 static struct vendor_data vendor_zte = {
213 .reg_offset = pl011_zte_offsets,
215 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
216 .fr_busy = ZX_UART01x_FR_BUSY,
217 .fr_dsr = ZX_UART01x_FR_DSR,
218 .fr_cts = ZX_UART01x_FR_CTS,
219 .fr_ri = ZX_UART011_FR_RI,
220 .get_fifosize = get_fifosize_zte,
223 /* Deals with DMA transactions */
226 struct scatterlist sg;
230 struct pl011_dmarx_data {
231 struct dma_chan *chan;
232 struct completion complete;
234 struct pl011_sgbuf sgbuf_a;
235 struct pl011_sgbuf sgbuf_b;
238 struct timer_list timer;
239 unsigned int last_residue;
240 unsigned long last_jiffies;
242 unsigned int poll_rate;
243 unsigned int poll_timeout;
246 struct pl011_dmatx_data {
247 struct dma_chan *chan;
248 struct scatterlist sg;
254 * We wrap our port structure around the generic uart_port.
256 struct uart_amba_port {
257 struct uart_port port;
258 const u16 *reg_offset;
260 const struct vendor_data *vendor;
261 unsigned int dmacr; /* dma control reg */
262 unsigned int im; /* interrupt mask */
263 unsigned int old_status;
264 unsigned int fifosize; /* vendor-specific */
265 unsigned int old_cr; /* state during shutdown */
266 unsigned int fixed_baud; /* vendor-set fixed baud rate */
268 bool rs485_tx_started;
269 unsigned int rs485_tx_drain_interval; /* usecs */
270 #ifdef CONFIG_DMA_ENGINE
274 struct pl011_dmarx_data dmarx;
275 struct pl011_dmatx_data dmatx;
280 static unsigned int pl011_tx_empty(struct uart_port *port);
282 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
285 return uap->reg_offset[reg];
288 static unsigned int pl011_read(const struct uart_amba_port *uap,
291 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
293 return (uap->port.iotype == UPIO_MEM32) ?
294 readl_relaxed(addr) : readw_relaxed(addr);
297 static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
300 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
302 if (uap->port.iotype == UPIO_MEM32)
303 writel_relaxed(val, addr);
305 writew_relaxed(val, addr);
309 * Reads up to 256 characters from the FIFO or until it's empty and
310 * inserts them into the TTY layer. Returns the number of characters
311 * read from the FIFO.
313 static int pl011_fifo_to_tty(struct uart_amba_port *uap)
315 unsigned int ch, flag, fifotaken;
319 for (fifotaken = 0; fifotaken != 256; fifotaken++) {
320 status = pl011_read(uap, REG_FR);
321 if (status & UART01x_FR_RXFE)
324 /* Take chars from the FIFO and update status */
325 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
327 uap->port.icount.rx++;
329 if (unlikely(ch & UART_DR_ERROR)) {
330 if (ch & UART011_DR_BE) {
331 ch &= ~(UART011_DR_FE | UART011_DR_PE);
332 uap->port.icount.brk++;
333 if (uart_handle_break(&uap->port))
335 } else if (ch & UART011_DR_PE)
336 uap->port.icount.parity++;
337 else if (ch & UART011_DR_FE)
338 uap->port.icount.frame++;
339 if (ch & UART011_DR_OE)
340 uap->port.icount.overrun++;
342 ch &= uap->port.read_status_mask;
344 if (ch & UART011_DR_BE)
346 else if (ch & UART011_DR_PE)
348 else if (ch & UART011_DR_FE)
352 spin_unlock(&uap->port.lock);
353 sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
354 spin_lock(&uap->port.lock);
357 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
365 * All the DMA operation mode stuff goes inside this ifdef.
366 * This assumes that you have a generic DMA device interface,
367 * no custom DMA interfaces are supported.
369 #ifdef CONFIG_DMA_ENGINE
371 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
373 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
374 enum dma_data_direction dir)
378 sg->buf = dma_alloc_coherent(chan->device->dev,
379 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
383 sg_init_table(&sg->sg, 1);
384 sg_set_page(&sg->sg, phys_to_page(dma_addr),
385 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
386 sg_dma_address(&sg->sg) = dma_addr;
387 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
392 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
393 enum dma_data_direction dir)
396 dma_free_coherent(chan->device->dev,
397 PL011_DMA_BUFFER_SIZE, sg->buf,
398 sg_dma_address(&sg->sg));
402 static void pl011_dma_probe(struct uart_amba_port *uap)
404 /* DMA is the sole user of the platform data right now */
405 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
406 struct device *dev = uap->port.dev;
407 struct dma_slave_config tx_conf = {
408 .dst_addr = uap->port.mapbase +
409 pl011_reg_to_offset(uap, REG_DR),
410 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
411 .direction = DMA_MEM_TO_DEV,
412 .dst_maxburst = uap->fifosize >> 1,
415 struct dma_chan *chan;
418 uap->dma_probed = true;
419 chan = dma_request_chan(dev, "tx");
421 if (PTR_ERR(chan) == -EPROBE_DEFER) {
422 uap->dma_probed = false;
426 /* We need platform data */
427 if (!plat || !plat->dma_filter) {
428 dev_info(uap->port.dev, "no DMA platform data\n");
432 /* Try to acquire a generic DMA engine slave TX channel */
434 dma_cap_set(DMA_SLAVE, mask);
436 chan = dma_request_channel(mask, plat->dma_filter,
439 dev_err(uap->port.dev, "no TX DMA channel!\n");
444 dmaengine_slave_config(chan, &tx_conf);
445 uap->dmatx.chan = chan;
447 dev_info(uap->port.dev, "DMA channel TX %s\n",
448 dma_chan_name(uap->dmatx.chan));
450 /* Optionally make use of an RX channel as well */
451 chan = dma_request_slave_channel(dev, "rx");
453 if (!chan && plat && plat->dma_rx_param) {
454 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
457 dev_err(uap->port.dev, "no RX DMA channel!\n");
463 struct dma_slave_config rx_conf = {
464 .src_addr = uap->port.mapbase +
465 pl011_reg_to_offset(uap, REG_DR),
466 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
467 .direction = DMA_DEV_TO_MEM,
468 .src_maxburst = uap->fifosize >> 2,
471 struct dma_slave_caps caps;
474 * Some DMA controllers provide information on their capabilities.
475 * If the controller does, check for suitable residue processing
476 * otherwise assime all is well.
478 if (0 == dma_get_slave_caps(chan, &caps)) {
479 if (caps.residue_granularity ==
480 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
481 dma_release_channel(chan);
482 dev_info(uap->port.dev,
483 "RX DMA disabled - no residue processing\n");
487 dmaengine_slave_config(chan, &rx_conf);
488 uap->dmarx.chan = chan;
490 uap->dmarx.auto_poll_rate = false;
491 if (plat && plat->dma_rx_poll_enable) {
492 /* Set poll rate if specified. */
493 if (plat->dma_rx_poll_rate) {
494 uap->dmarx.auto_poll_rate = false;
495 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
498 * 100 ms defaults to poll rate if not
499 * specified. This will be adjusted with
500 * the baud rate at set_termios.
502 uap->dmarx.auto_poll_rate = true;
503 uap->dmarx.poll_rate = 100;
505 /* 3 secs defaults poll_timeout if not specified. */
506 if (plat->dma_rx_poll_timeout)
507 uap->dmarx.poll_timeout =
508 plat->dma_rx_poll_timeout;
510 uap->dmarx.poll_timeout = 3000;
511 } else if (!plat && dev->of_node) {
512 uap->dmarx.auto_poll_rate = of_property_read_bool(
513 dev->of_node, "auto-poll");
514 if (uap->dmarx.auto_poll_rate) {
517 if (0 == of_property_read_u32(dev->of_node,
519 uap->dmarx.poll_rate = x;
521 uap->dmarx.poll_rate = 100;
522 if (0 == of_property_read_u32(dev->of_node,
523 "poll-timeout-ms", &x))
524 uap->dmarx.poll_timeout = x;
526 uap->dmarx.poll_timeout = 3000;
529 dev_info(uap->port.dev, "DMA channel RX %s\n",
530 dma_chan_name(uap->dmarx.chan));
534 static void pl011_dma_remove(struct uart_amba_port *uap)
537 dma_release_channel(uap->dmatx.chan);
539 dma_release_channel(uap->dmarx.chan);
542 /* Forward declare these for the refill routine */
543 static int pl011_dma_tx_refill(struct uart_amba_port *uap);
544 static void pl011_start_tx_pio(struct uart_amba_port *uap);
547 * The current DMA TX buffer has been sent.
548 * Try to queue up another DMA buffer.
550 static void pl011_dma_tx_callback(void *data)
552 struct uart_amba_port *uap = data;
553 struct pl011_dmatx_data *dmatx = &uap->dmatx;
557 spin_lock_irqsave(&uap->port.lock, flags);
558 if (uap->dmatx.queued)
559 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
563 uap->dmacr = dmacr & ~UART011_TXDMAE;
564 pl011_write(uap->dmacr, uap, REG_DMACR);
567 * If TX DMA was disabled, it means that we've stopped the DMA for
568 * some reason (eg, XOFF received, or we want to send an X-char.)
570 * Note: we need to be careful here of a potential race between DMA
571 * and the rest of the driver - if the driver disables TX DMA while
572 * a TX buffer completing, we must update the tx queued status to
573 * get further refills (hence we check dmacr).
575 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
576 uart_circ_empty(&uap->port.state->xmit)) {
577 uap->dmatx.queued = false;
578 spin_unlock_irqrestore(&uap->port.lock, flags);
582 if (pl011_dma_tx_refill(uap) <= 0)
584 * We didn't queue a DMA buffer for some reason, but we
585 * have data pending to be sent. Re-enable the TX IRQ.
587 pl011_start_tx_pio(uap);
589 spin_unlock_irqrestore(&uap->port.lock, flags);
593 * Try to refill the TX DMA buffer.
594 * Locking: called with port lock held and IRQs disabled.
596 * 1 if we queued up a TX DMA buffer.
597 * 0 if we didn't want to handle this by DMA
600 static int pl011_dma_tx_refill(struct uart_amba_port *uap)
602 struct pl011_dmatx_data *dmatx = &uap->dmatx;
603 struct dma_chan *chan = dmatx->chan;
604 struct dma_device *dma_dev = chan->device;
605 struct dma_async_tx_descriptor *desc;
606 struct circ_buf *xmit = &uap->port.state->xmit;
610 * Try to avoid the overhead involved in using DMA if the
611 * transaction fits in the first half of the FIFO, by using
612 * the standard interrupt handling. This ensures that we
613 * issue a uart_write_wakeup() at the appropriate time.
615 count = uart_circ_chars_pending(xmit);
616 if (count < (uap->fifosize >> 1)) {
617 uap->dmatx.queued = false;
622 * Bodge: don't send the last character by DMA, as this
623 * will prevent XON from notifying us to restart DMA.
627 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
628 if (count > PL011_DMA_BUFFER_SIZE)
629 count = PL011_DMA_BUFFER_SIZE;
631 if (xmit->tail < xmit->head)
632 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
634 size_t first = UART_XMIT_SIZE - xmit->tail;
639 second = count - first;
641 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
643 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
646 dmatx->sg.length = count;
648 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
649 uap->dmatx.queued = false;
650 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
654 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
655 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
657 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
658 uap->dmatx.queued = false;
660 * If DMA cannot be used right now, we complete this
661 * transaction via IRQ and let the TTY layer retry.
663 dev_dbg(uap->port.dev, "TX DMA busy\n");
667 /* Some data to go along to the callback */
668 desc->callback = pl011_dma_tx_callback;
669 desc->callback_param = uap;
671 /* All errors should happen at prepare time */
672 dmaengine_submit(desc);
674 /* Fire the DMA transaction */
675 dma_dev->device_issue_pending(chan);
677 uap->dmacr |= UART011_TXDMAE;
678 pl011_write(uap->dmacr, uap, REG_DMACR);
679 uap->dmatx.queued = true;
682 * Now we know that DMA will fire, so advance the ring buffer
683 * with the stuff we just dispatched.
685 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
686 uap->port.icount.tx += count;
688 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
689 uart_write_wakeup(&uap->port);
695 * We received a transmit interrupt without a pending X-char but with
696 * pending characters.
697 * Locking: called with port lock held and IRQs disabled.
699 * false if we want to use PIO to transmit
700 * true if we queued a DMA buffer
702 static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
704 if (!uap->using_tx_dma)
708 * If we already have a TX buffer queued, but received a
709 * TX interrupt, it will be because we've just sent an X-char.
710 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
712 if (uap->dmatx.queued) {
713 uap->dmacr |= UART011_TXDMAE;
714 pl011_write(uap->dmacr, uap, REG_DMACR);
715 uap->im &= ~UART011_TXIM;
716 pl011_write(uap->im, uap, REG_IMSC);
721 * We don't have a TX buffer queued, so try to queue one.
722 * If we successfully queued a buffer, mask the TX IRQ.
724 if (pl011_dma_tx_refill(uap) > 0) {
725 uap->im &= ~UART011_TXIM;
726 pl011_write(uap->im, uap, REG_IMSC);
733 * Stop the DMA transmit (eg, due to received XOFF).
734 * Locking: called with port lock held and IRQs disabled.
736 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
738 if (uap->dmatx.queued) {
739 uap->dmacr &= ~UART011_TXDMAE;
740 pl011_write(uap->dmacr, uap, REG_DMACR);
745 * Try to start a DMA transmit, or in the case of an XON/OFF
746 * character queued for send, try to get that character out ASAP.
747 * Locking: called with port lock held and IRQs disabled.
749 * false if we want the TX IRQ to be enabled
750 * true if we have a buffer queued
752 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
756 if (!uap->using_tx_dma)
759 if (!uap->port.x_char) {
760 /* no X-char, try to push chars out in DMA mode */
763 if (!uap->dmatx.queued) {
764 if (pl011_dma_tx_refill(uap) > 0) {
765 uap->im &= ~UART011_TXIM;
766 pl011_write(uap->im, uap, REG_IMSC);
769 } else if (!(uap->dmacr & UART011_TXDMAE)) {
770 uap->dmacr |= UART011_TXDMAE;
771 pl011_write(uap->dmacr, uap, REG_DMACR);
777 * We have an X-char to send. Disable DMA to prevent it loading
778 * the TX fifo, and then see if we can stuff it into the FIFO.
781 uap->dmacr &= ~UART011_TXDMAE;
782 pl011_write(uap->dmacr, uap, REG_DMACR);
784 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
786 * No space in the FIFO, so enable the transmit interrupt
787 * so we know when there is space. Note that once we've
788 * loaded the character, we should just re-enable DMA.
793 pl011_write(uap->port.x_char, uap, REG_DR);
794 uap->port.icount.tx++;
795 uap->port.x_char = 0;
797 /* Success - restore the DMA state */
799 pl011_write(dmacr, uap, REG_DMACR);
805 * Flush the transmit buffer.
806 * Locking: called with port lock held and IRQs disabled.
808 static void pl011_dma_flush_buffer(struct uart_port *port)
809 __releases(&uap->port.lock)
810 __acquires(&uap->port.lock)
812 struct uart_amba_port *uap =
813 container_of(port, struct uart_amba_port, port);
815 if (!uap->using_tx_dma)
818 dmaengine_terminate_async(uap->dmatx.chan);
820 if (uap->dmatx.queued) {
821 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
823 uap->dmatx.queued = false;
824 uap->dmacr &= ~UART011_TXDMAE;
825 pl011_write(uap->dmacr, uap, REG_DMACR);
829 static void pl011_dma_rx_callback(void *data);
831 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
833 struct dma_chan *rxchan = uap->dmarx.chan;
834 struct pl011_dmarx_data *dmarx = &uap->dmarx;
835 struct dma_async_tx_descriptor *desc;
836 struct pl011_sgbuf *sgbuf;
841 /* Start the RX DMA job */
842 sgbuf = uap->dmarx.use_buf_b ?
843 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
844 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
846 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
848 * If the DMA engine is busy and cannot prepare a
849 * channel, no big deal, the driver will fall back
850 * to interrupt mode as a result of this error code.
853 uap->dmarx.running = false;
854 dmaengine_terminate_all(rxchan);
858 /* Some data to go along to the callback */
859 desc->callback = pl011_dma_rx_callback;
860 desc->callback_param = uap;
861 dmarx->cookie = dmaengine_submit(desc);
862 dma_async_issue_pending(rxchan);
864 uap->dmacr |= UART011_RXDMAE;
865 pl011_write(uap->dmacr, uap, REG_DMACR);
866 uap->dmarx.running = true;
868 uap->im &= ~UART011_RXIM;
869 pl011_write(uap->im, uap, REG_IMSC);
875 * This is called when either the DMA job is complete, or
876 * the FIFO timeout interrupt occurred. This must be called
877 * with the port spinlock uap->port.lock held.
879 static void pl011_dma_rx_chars(struct uart_amba_port *uap,
880 u32 pending, bool use_buf_b,
883 struct tty_port *port = &uap->port.state->port;
884 struct pl011_sgbuf *sgbuf = use_buf_b ?
885 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
887 u32 fifotaken = 0; /* only used for vdbg() */
889 struct pl011_dmarx_data *dmarx = &uap->dmarx;
892 if (uap->dmarx.poll_rate) {
893 /* The data can be taken by polling */
894 dmataken = sgbuf->sg.length - dmarx->last_residue;
895 /* Recalculate the pending size */
896 if (pending >= dmataken)
900 /* Pick the remain data from the DMA */
904 * First take all chars in the DMA pipe, then look in the FIFO.
905 * Note that tty_insert_flip_buf() tries to take as many chars
908 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
911 uap->port.icount.rx += dma_count;
912 if (dma_count < pending)
913 dev_warn(uap->port.dev,
914 "couldn't insert all characters (TTY is full?)\n");
917 /* Reset the last_residue for Rx DMA poll */
918 if (uap->dmarx.poll_rate)
919 dmarx->last_residue = sgbuf->sg.length;
922 * Only continue with trying to read the FIFO if all DMA chars have
925 if (dma_count == pending && readfifo) {
926 /* Clear any error flags */
927 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
928 UART011_FEIS, uap, REG_ICR);
931 * If we read all the DMA'd characters, and we had an
932 * incomplete buffer, that could be due to an rx error, or
933 * maybe we just timed out. Read any pending chars and check
936 * Error conditions will only occur in the FIFO, these will
937 * trigger an immediate interrupt and stop the DMA job, so we
938 * will always find the error in the FIFO, never in the DMA
941 fifotaken = pl011_fifo_to_tty(uap);
944 dev_vdbg(uap->port.dev,
945 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
946 dma_count, fifotaken);
947 tty_flip_buffer_push(port);
950 static void pl011_dma_rx_irq(struct uart_amba_port *uap)
952 struct pl011_dmarx_data *dmarx = &uap->dmarx;
953 struct dma_chan *rxchan = dmarx->chan;
954 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
955 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
957 struct dma_tx_state state;
958 enum dma_status dmastat;
961 * Pause the transfer so we can trust the current counter,
962 * do this before we pause the PL011 block, else we may
965 if (dmaengine_pause(rxchan))
966 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
967 dmastat = rxchan->device->device_tx_status(rxchan,
968 dmarx->cookie, &state);
969 if (dmastat != DMA_PAUSED)
970 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
972 /* Disable RX DMA - incoming data will wait in the FIFO */
973 uap->dmacr &= ~UART011_RXDMAE;
974 pl011_write(uap->dmacr, uap, REG_DMACR);
975 uap->dmarx.running = false;
977 pending = sgbuf->sg.length - state.residue;
978 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
979 /* Then we terminate the transfer - we now know our residue */
980 dmaengine_terminate_all(rxchan);
983 * This will take the chars we have so far and insert
984 * into the framework.
986 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
988 /* Switch buffer & re-trigger DMA job */
989 dmarx->use_buf_b = !dmarx->use_buf_b;
990 if (pl011_dma_rx_trigger_dma(uap)) {
991 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
992 "fall back to interrupt mode\n");
993 uap->im |= UART011_RXIM;
994 pl011_write(uap->im, uap, REG_IMSC);
998 static void pl011_dma_rx_callback(void *data)
1000 struct uart_amba_port *uap = data;
1001 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1002 struct dma_chan *rxchan = dmarx->chan;
1003 bool lastbuf = dmarx->use_buf_b;
1004 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
1005 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
1007 struct dma_tx_state state;
1011 * This completion interrupt occurs typically when the
1012 * RX buffer is totally stuffed but no timeout has yet
1013 * occurred. When that happens, we just want the RX
1014 * routine to flush out the secondary DMA buffer while
1015 * we immediately trigger the next DMA job.
1017 spin_lock_irq(&uap->port.lock);
1019 * Rx data can be taken by the UART interrupts during
1020 * the DMA irq handler. So we check the residue here.
1022 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1023 pending = sgbuf->sg.length - state.residue;
1024 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1025 /* Then we terminate the transfer - we now know our residue */
1026 dmaengine_terminate_all(rxchan);
1028 uap->dmarx.running = false;
1029 dmarx->use_buf_b = !lastbuf;
1030 ret = pl011_dma_rx_trigger_dma(uap);
1032 pl011_dma_rx_chars(uap, pending, lastbuf, false);
1033 spin_unlock_irq(&uap->port.lock);
1035 * Do this check after we picked the DMA chars so we don't
1036 * get some IRQ immediately from RX.
1039 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1040 "fall back to interrupt mode\n");
1041 uap->im |= UART011_RXIM;
1042 pl011_write(uap->im, uap, REG_IMSC);
1047 * Stop accepting received characters, when we're shutting down or
1048 * suspending this port.
1049 * Locking: called with port lock held and IRQs disabled.
1051 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1053 /* FIXME. Just disable the DMA enable */
1054 uap->dmacr &= ~UART011_RXDMAE;
1055 pl011_write(uap->dmacr, uap, REG_DMACR);
1059 * Timer handler for Rx DMA polling.
1060 * Every polling, It checks the residue in the dma buffer and transfer
1061 * data to the tty. Also, last_residue is updated for the next polling.
1063 static void pl011_dma_rx_poll(struct timer_list *t)
1065 struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
1066 struct tty_port *port = &uap->port.state->port;
1067 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1068 struct dma_chan *rxchan = uap->dmarx.chan;
1069 unsigned long flags;
1070 unsigned int dmataken = 0;
1071 unsigned int size = 0;
1072 struct pl011_sgbuf *sgbuf;
1074 struct dma_tx_state state;
1076 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1077 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1078 if (likely(state.residue < dmarx->last_residue)) {
1079 dmataken = sgbuf->sg.length - dmarx->last_residue;
1080 size = dmarx->last_residue - state.residue;
1081 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1083 if (dma_count == size)
1084 dmarx->last_residue = state.residue;
1085 dmarx->last_jiffies = jiffies;
1087 tty_flip_buffer_push(port);
1090 * If no data is received in poll_timeout, the driver will fall back
1091 * to interrupt mode. We will retrigger DMA at the first interrupt.
1093 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1094 > uap->dmarx.poll_timeout) {
1096 spin_lock_irqsave(&uap->port.lock, flags);
1097 pl011_dma_rx_stop(uap);
1098 uap->im |= UART011_RXIM;
1099 pl011_write(uap->im, uap, REG_IMSC);
1100 spin_unlock_irqrestore(&uap->port.lock, flags);
1102 uap->dmarx.running = false;
1103 dmaengine_terminate_all(rxchan);
1104 del_timer(&uap->dmarx.timer);
1106 mod_timer(&uap->dmarx.timer,
1107 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1111 static void pl011_dma_startup(struct uart_amba_port *uap)
1115 if (!uap->dma_probed)
1116 pl011_dma_probe(uap);
1118 if (!uap->dmatx.chan)
1121 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1122 if (!uap->dmatx.buf) {
1123 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1124 uap->port.fifosize = uap->fifosize;
1128 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1130 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1131 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1132 uap->using_tx_dma = true;
1134 if (!uap->dmarx.chan)
1137 /* Allocate and map DMA RX buffers */
1138 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1141 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1142 "RX buffer A", ret);
1146 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1149 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1150 "RX buffer B", ret);
1151 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1156 uap->using_rx_dma = true;
1159 /* Turn on DMA error (RX/TX will be enabled on demand) */
1160 uap->dmacr |= UART011_DMAONERR;
1161 pl011_write(uap->dmacr, uap, REG_DMACR);
1164 * ST Micro variants has some specific dma burst threshold
1165 * compensation. Set this to 16 bytes, so burst will only
1166 * be issued above/below 16 bytes.
1168 if (uap->vendor->dma_threshold)
1169 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1172 if (uap->using_rx_dma) {
1173 if (pl011_dma_rx_trigger_dma(uap))
1174 dev_dbg(uap->port.dev, "could not trigger initial "
1175 "RX DMA job, fall back to interrupt mode\n");
1176 if (uap->dmarx.poll_rate) {
1177 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
1178 mod_timer(&uap->dmarx.timer,
1180 msecs_to_jiffies(uap->dmarx.poll_rate));
1181 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1182 uap->dmarx.last_jiffies = jiffies;
1187 static void pl011_dma_shutdown(struct uart_amba_port *uap)
1189 if (!(uap->using_tx_dma || uap->using_rx_dma))
1192 /* Disable RX and TX DMA */
1193 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1196 spin_lock_irq(&uap->port.lock);
1197 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1198 pl011_write(uap->dmacr, uap, REG_DMACR);
1199 spin_unlock_irq(&uap->port.lock);
1201 if (uap->using_tx_dma) {
1202 /* In theory, this should already be done by pl011_dma_flush_buffer */
1203 dmaengine_terminate_all(uap->dmatx.chan);
1204 if (uap->dmatx.queued) {
1205 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1207 uap->dmatx.queued = false;
1210 kfree(uap->dmatx.buf);
1211 uap->using_tx_dma = false;
1214 if (uap->using_rx_dma) {
1215 dmaengine_terminate_all(uap->dmarx.chan);
1216 /* Clean up the RX DMA */
1217 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1218 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1219 if (uap->dmarx.poll_rate)
1220 del_timer_sync(&uap->dmarx.timer);
1221 uap->using_rx_dma = false;
1225 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1227 return uap->using_rx_dma;
1230 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1232 return uap->using_rx_dma && uap->dmarx.running;
1236 /* Blank functions if the DMA engine is not available */
1237 static inline void pl011_dma_remove(struct uart_amba_port *uap)
1241 static inline void pl011_dma_startup(struct uart_amba_port *uap)
1245 static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1249 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1254 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1258 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1263 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1267 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1271 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1276 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1281 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1286 #define pl011_dma_flush_buffer NULL
1289 static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
1292 * To be on the safe side only time out after twice as many iterations
1295 const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2;
1296 struct uart_port *port = &uap->port;
1300 /* Wait until hardware tx queue is empty */
1301 while (!pl011_tx_empty(port)) {
1302 if (i > MAX_TX_DRAIN_ITERS) {
1304 "timeout while draining hardware tx queue\n");
1308 udelay(uap->rs485_tx_drain_interval);
1312 if (port->rs485.delay_rts_after_send)
1313 mdelay(port->rs485.delay_rts_after_send);
1315 cr = pl011_read(uap, REG_CR);
1317 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
1318 cr &= ~UART011_CR_RTS;
1320 cr |= UART011_CR_RTS;
1322 /* Disable the transmitter and reenable the transceiver */
1323 cr &= ~UART011_CR_TXE;
1324 cr |= UART011_CR_RXE;
1325 pl011_write(cr, uap, REG_CR);
1327 uap->rs485_tx_started = false;
1330 static void pl011_stop_tx(struct uart_port *port)
1332 struct uart_amba_port *uap =
1333 container_of(port, struct uart_amba_port, port);
1335 uap->im &= ~UART011_TXIM;
1336 pl011_write(uap->im, uap, REG_IMSC);
1337 pl011_dma_tx_stop(uap);
1339 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
1340 pl011_rs485_tx_stop(uap);
1343 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1345 /* Start TX with programmed I/O only (no DMA) */
1346 static void pl011_start_tx_pio(struct uart_amba_port *uap)
1348 if (pl011_tx_chars(uap, false)) {
1349 uap->im |= UART011_TXIM;
1350 pl011_write(uap->im, uap, REG_IMSC);
1354 static void pl011_start_tx(struct uart_port *port)
1356 struct uart_amba_port *uap =
1357 container_of(port, struct uart_amba_port, port);
1359 if (!pl011_dma_tx_start(uap))
1360 pl011_start_tx_pio(uap);
1363 static void pl011_stop_rx(struct uart_port *port)
1365 struct uart_amba_port *uap =
1366 container_of(port, struct uart_amba_port, port);
1368 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1369 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1370 pl011_write(uap->im, uap, REG_IMSC);
1372 pl011_dma_rx_stop(uap);
1375 static void pl011_throttle_rx(struct uart_port *port)
1377 unsigned long flags;
1379 spin_lock_irqsave(&port->lock, flags);
1380 pl011_stop_rx(port);
1381 spin_unlock_irqrestore(&port->lock, flags);
1384 static void pl011_enable_ms(struct uart_port *port)
1386 struct uart_amba_port *uap =
1387 container_of(port, struct uart_amba_port, port);
1389 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1390 pl011_write(uap->im, uap, REG_IMSC);
1393 static void pl011_rx_chars(struct uart_amba_port *uap)
1394 __releases(&uap->port.lock)
1395 __acquires(&uap->port.lock)
1397 pl011_fifo_to_tty(uap);
1399 spin_unlock(&uap->port.lock);
1400 tty_flip_buffer_push(&uap->port.state->port);
1402 * If we were temporarily out of DMA mode for a while,
1403 * attempt to switch back to DMA mode again.
1405 if (pl011_dma_rx_available(uap)) {
1406 if (pl011_dma_rx_trigger_dma(uap)) {
1407 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1408 "fall back to interrupt mode again\n");
1409 uap->im |= UART011_RXIM;
1410 pl011_write(uap->im, uap, REG_IMSC);
1412 #ifdef CONFIG_DMA_ENGINE
1413 /* Start Rx DMA poll */
1414 if (uap->dmarx.poll_rate) {
1415 uap->dmarx.last_jiffies = jiffies;
1416 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1417 mod_timer(&uap->dmarx.timer,
1419 msecs_to_jiffies(uap->dmarx.poll_rate));
1424 spin_lock(&uap->port.lock);
1427 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1430 if (unlikely(!from_irq) &&
1431 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1432 return false; /* unable to transmit character */
1434 pl011_write(c, uap, REG_DR);
1435 uap->port.icount.tx++;
1440 static void pl011_rs485_tx_start(struct uart_amba_port *uap)
1442 struct uart_port *port = &uap->port;
1445 /* Enable transmitter */
1446 cr = pl011_read(uap, REG_CR);
1447 cr |= UART011_CR_TXE;
1449 /* Disable receiver if half-duplex */
1450 if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
1451 cr &= ~UART011_CR_RXE;
1453 if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
1454 cr &= ~UART011_CR_RTS;
1456 cr |= UART011_CR_RTS;
1458 pl011_write(cr, uap, REG_CR);
1460 if (port->rs485.delay_rts_before_send)
1461 mdelay(port->rs485.delay_rts_before_send);
1463 uap->rs485_tx_started = true;
1466 /* Returns true if tx interrupts have to be (kept) enabled */
1467 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1469 struct circ_buf *xmit = &uap->port.state->xmit;
1470 int count = uap->fifosize >> 1;
1472 if (uap->port.x_char) {
1473 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1475 uap->port.x_char = 0;
1478 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1479 pl011_stop_tx(&uap->port);
1483 if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
1484 !uap->rs485_tx_started)
1485 pl011_rs485_tx_start(uap);
1487 /* If we are using DMA mode, try to send some characters. */
1488 if (pl011_dma_tx_irq(uap))
1492 if (likely(from_irq) && count-- == 0)
1495 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1498 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1499 } while (!uart_circ_empty(xmit));
1501 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1502 uart_write_wakeup(&uap->port);
1504 if (uart_circ_empty(xmit)) {
1505 pl011_stop_tx(&uap->port);
1511 static void pl011_modem_status(struct uart_amba_port *uap)
1513 unsigned int status, delta;
1515 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1517 delta = status ^ uap->old_status;
1518 uap->old_status = status;
1523 if (delta & UART01x_FR_DCD)
1524 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1526 if (delta & uap->vendor->fr_dsr)
1527 uap->port.icount.dsr++;
1529 if (delta & uap->vendor->fr_cts)
1530 uart_handle_cts_change(&uap->port,
1531 status & uap->vendor->fr_cts);
1533 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1536 static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1538 if (!uap->vendor->cts_event_workaround)
1541 /* workaround to make sure that all bits are unlocked.. */
1542 pl011_write(0x00, uap, REG_ICR);
1545 * WA: introduce 26ns(1 uart clk) delay before W1C;
1546 * single apb access will incur 2 pclk(133.12Mhz) delay,
1547 * so add 2 dummy reads
1549 pl011_read(uap, REG_ICR);
1550 pl011_read(uap, REG_ICR);
1553 static irqreturn_t pl011_int(int irq, void *dev_id)
1555 struct uart_amba_port *uap = dev_id;
1556 unsigned long flags;
1557 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1560 spin_lock_irqsave(&uap->port.lock, flags);
1561 status = pl011_read(uap, REG_RIS) & uap->im;
1564 check_apply_cts_event_workaround(uap);
1566 pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1570 if (status & (UART011_RTIS|UART011_RXIS)) {
1571 if (pl011_dma_rx_running(uap))
1572 pl011_dma_rx_irq(uap);
1574 pl011_rx_chars(uap);
1576 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1577 UART011_CTSMIS|UART011_RIMIS))
1578 pl011_modem_status(uap);
1579 if (status & UART011_TXIS)
1580 pl011_tx_chars(uap, true);
1582 if (pass_counter-- == 0)
1585 status = pl011_read(uap, REG_RIS) & uap->im;
1586 } while (status != 0);
1590 spin_unlock_irqrestore(&uap->port.lock, flags);
1592 return IRQ_RETVAL(handled);
1595 static unsigned int pl011_tx_empty(struct uart_port *port)
1597 struct uart_amba_port *uap =
1598 container_of(port, struct uart_amba_port, port);
1600 /* Allow feature register bits to be inverted to work around errata */
1601 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1603 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1607 static unsigned int pl011_get_mctrl(struct uart_port *port)
1609 struct uart_amba_port *uap =
1610 container_of(port, struct uart_amba_port, port);
1611 unsigned int result = 0;
1612 unsigned int status = pl011_read(uap, REG_FR);
1614 #define TIOCMBIT(uartbit, tiocmbit) \
1615 if (status & uartbit) \
1618 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1619 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1620 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1621 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1626 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1628 struct uart_amba_port *uap =
1629 container_of(port, struct uart_amba_port, port);
1632 cr = pl011_read(uap, REG_CR);
1634 #define TIOCMBIT(tiocmbit, uartbit) \
1635 if (mctrl & tiocmbit) \
1640 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1641 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1642 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1643 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1644 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1646 if (port->status & UPSTAT_AUTORTS) {
1647 /* We need to disable auto-RTS if we want to turn RTS off */
1648 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1652 pl011_write(cr, uap, REG_CR);
1655 static void pl011_break_ctl(struct uart_port *port, int break_state)
1657 struct uart_amba_port *uap =
1658 container_of(port, struct uart_amba_port, port);
1659 unsigned long flags;
1662 spin_lock_irqsave(&uap->port.lock, flags);
1663 lcr_h = pl011_read(uap, REG_LCRH_TX);
1664 if (break_state == -1)
1665 lcr_h |= UART01x_LCRH_BRK;
1667 lcr_h &= ~UART01x_LCRH_BRK;
1668 pl011_write(lcr_h, uap, REG_LCRH_TX);
1669 spin_unlock_irqrestore(&uap->port.lock, flags);
1672 #ifdef CONFIG_CONSOLE_POLL
1674 static void pl011_quiesce_irqs(struct uart_port *port)
1676 struct uart_amba_port *uap =
1677 container_of(port, struct uart_amba_port, port);
1679 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1681 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1682 * we simply mask it. start_tx() will unmask it.
1684 * Note we can race with start_tx(), and if the race happens, the
1685 * polling user might get another interrupt just after we clear it.
1686 * But it should be OK and can happen even w/o the race, e.g.
1687 * controller immediately got some new data and raised the IRQ.
1689 * And whoever uses polling routines assumes that it manages the device
1690 * (including tx queue), so we're also fine with start_tx()'s caller
1693 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1697 static int pl011_get_poll_char(struct uart_port *port)
1699 struct uart_amba_port *uap =
1700 container_of(port, struct uart_amba_port, port);
1701 unsigned int status;
1704 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1707 pl011_quiesce_irqs(port);
1709 status = pl011_read(uap, REG_FR);
1710 if (status & UART01x_FR_RXFE)
1711 return NO_POLL_CHAR;
1713 return pl011_read(uap, REG_DR);
1716 static void pl011_put_poll_char(struct uart_port *port,
1719 struct uart_amba_port *uap =
1720 container_of(port, struct uart_amba_port, port);
1722 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1725 pl011_write(ch, uap, REG_DR);
1728 #endif /* CONFIG_CONSOLE_POLL */
1730 static int pl011_hwinit(struct uart_port *port)
1732 struct uart_amba_port *uap =
1733 container_of(port, struct uart_amba_port, port);
1736 /* Optionaly enable pins to be muxed in and configured */
1737 pinctrl_pm_select_default_state(port->dev);
1740 * Try to enable the clock producer.
1742 retval = clk_prepare_enable(uap->clk);
1746 uap->port.uartclk = clk_get_rate(uap->clk);
1748 /* Clear pending error and receive interrupts */
1749 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1750 UART011_FEIS | UART011_RTIS | UART011_RXIS,
1754 * Save interrupts enable mask, and enable RX interrupts in case if
1755 * the interrupt is used for NMI entry.
1757 uap->im = pl011_read(uap, REG_IMSC);
1758 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1760 if (dev_get_platdata(uap->port.dev)) {
1761 struct amba_pl011_data *plat;
1763 plat = dev_get_platdata(uap->port.dev);
1770 static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1772 return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1773 pl011_reg_to_offset(uap, REG_LCRH_TX);
1776 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1778 pl011_write(lcr_h, uap, REG_LCRH_RX);
1779 if (pl011_split_lcrh(uap)) {
1782 * Wait 10 PCLKs before writing LCRH_TX register,
1783 * to get this delay write read only register 10 times
1785 for (i = 0; i < 10; ++i)
1786 pl011_write(0xff, uap, REG_MIS);
1787 pl011_write(lcr_h, uap, REG_LCRH_TX);
1791 static int pl011_allocate_irq(struct uart_amba_port *uap)
1793 pl011_write(uap->im, uap, REG_IMSC);
1795 return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap);
1799 * Enable interrupts, only timeouts when using DMA
1800 * if initial RX DMA job failed, start in interrupt mode
1803 static void pl011_enable_interrupts(struct uart_amba_port *uap)
1805 unsigned long flags;
1808 spin_lock_irqsave(&uap->port.lock, flags);
1810 /* Clear out any spuriously appearing RX interrupts */
1811 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1814 * RXIS is asserted only when the RX FIFO transitions from below
1815 * to above the trigger threshold. If the RX FIFO is already
1816 * full to the threshold this can't happen and RXIS will now be
1817 * stuck off. Drain the RX FIFO explicitly to fix this:
1819 for (i = 0; i < uap->fifosize * 2; ++i) {
1820 if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
1823 pl011_read(uap, REG_DR);
1826 uap->im = UART011_RTIM;
1827 if (!pl011_dma_rx_running(uap))
1828 uap->im |= UART011_RXIM;
1829 pl011_write(uap->im, uap, REG_IMSC);
1830 spin_unlock_irqrestore(&uap->port.lock, flags);
1833 static void pl011_unthrottle_rx(struct uart_port *port)
1835 struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
1837 pl011_enable_interrupts(uap);
1840 static int pl011_startup(struct uart_port *port)
1842 struct uart_amba_port *uap =
1843 container_of(port, struct uart_amba_port, port);
1847 retval = pl011_hwinit(port);
1851 retval = pl011_allocate_irq(uap);
1855 pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1857 spin_lock_irq(&uap->port.lock);
1859 /* restore RTS and DTR */
1860 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1861 cr |= UART01x_CR_UARTEN | UART011_CR_RXE;
1863 if (!(port->rs485.flags & SER_RS485_ENABLED))
1864 cr |= UART011_CR_TXE;
1866 pl011_write(cr, uap, REG_CR);
1868 spin_unlock_irq(&uap->port.lock);
1871 * initialise the old status of the modem signals
1873 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1876 pl011_dma_startup(uap);
1878 pl011_enable_interrupts(uap);
1883 clk_disable_unprepare(uap->clk);
1887 static int sbsa_uart_startup(struct uart_port *port)
1889 struct uart_amba_port *uap =
1890 container_of(port, struct uart_amba_port, port);
1893 retval = pl011_hwinit(port);
1897 retval = pl011_allocate_irq(uap);
1901 /* The SBSA UART does not support any modem status lines. */
1902 uap->old_status = 0;
1904 pl011_enable_interrupts(uap);
1909 static void pl011_shutdown_channel(struct uart_amba_port *uap,
1914 val = pl011_read(uap, lcrh);
1915 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1916 pl011_write(val, uap, lcrh);
1920 * disable the port. It should not disable RTS and DTR.
1921 * Also RTS and DTR state should be preserved to restore
1922 * it during startup().
1924 static void pl011_disable_uart(struct uart_amba_port *uap)
1928 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1929 spin_lock_irq(&uap->port.lock);
1930 cr = pl011_read(uap, REG_CR);
1932 cr &= UART011_CR_RTS | UART011_CR_DTR;
1933 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1934 pl011_write(cr, uap, REG_CR);
1935 spin_unlock_irq(&uap->port.lock);
1938 * disable break condition and fifos
1940 pl011_shutdown_channel(uap, REG_LCRH_RX);
1941 if (pl011_split_lcrh(uap))
1942 pl011_shutdown_channel(uap, REG_LCRH_TX);
1945 static void pl011_disable_interrupts(struct uart_amba_port *uap)
1947 spin_lock_irq(&uap->port.lock);
1949 /* mask all interrupts and clear all pending ones */
1951 pl011_write(uap->im, uap, REG_IMSC);
1952 pl011_write(0xffff, uap, REG_ICR);
1954 spin_unlock_irq(&uap->port.lock);
1957 static void pl011_shutdown(struct uart_port *port)
1959 struct uart_amba_port *uap =
1960 container_of(port, struct uart_amba_port, port);
1962 pl011_disable_interrupts(uap);
1964 pl011_dma_shutdown(uap);
1966 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
1967 pl011_rs485_tx_stop(uap);
1969 free_irq(uap->port.irq, uap);
1971 pl011_disable_uart(uap);
1974 * Shut down the clock producer
1976 clk_disable_unprepare(uap->clk);
1977 /* Optionally let pins go into sleep states */
1978 pinctrl_pm_select_sleep_state(port->dev);
1980 if (dev_get_platdata(uap->port.dev)) {
1981 struct amba_pl011_data *plat;
1983 plat = dev_get_platdata(uap->port.dev);
1988 if (uap->port.ops->flush_buffer)
1989 uap->port.ops->flush_buffer(port);
1992 static void sbsa_uart_shutdown(struct uart_port *port)
1994 struct uart_amba_port *uap =
1995 container_of(port, struct uart_amba_port, port);
1997 pl011_disable_interrupts(uap);
1999 free_irq(uap->port.irq, uap);
2001 if (uap->port.ops->flush_buffer)
2002 uap->port.ops->flush_buffer(port);
2006 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
2008 port->read_status_mask = UART011_DR_OE | 255;
2009 if (termios->c_iflag & INPCK)
2010 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
2011 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2012 port->read_status_mask |= UART011_DR_BE;
2015 * Characters to ignore
2017 port->ignore_status_mask = 0;
2018 if (termios->c_iflag & IGNPAR)
2019 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
2020 if (termios->c_iflag & IGNBRK) {
2021 port->ignore_status_mask |= UART011_DR_BE;
2023 * If we're ignoring parity and break indicators,
2024 * ignore overruns too (for real raw support).
2026 if (termios->c_iflag & IGNPAR)
2027 port->ignore_status_mask |= UART011_DR_OE;
2031 * Ignore all characters if CREAD is not set.
2033 if ((termios->c_cflag & CREAD) == 0)
2034 port->ignore_status_mask |= UART_DUMMY_DR_RX;
2038 pl011_set_termios(struct uart_port *port, struct ktermios *termios,
2039 struct ktermios *old)
2041 struct uart_amba_port *uap =
2042 container_of(port, struct uart_amba_port, port);
2043 unsigned int lcr_h, old_cr;
2044 unsigned long flags;
2045 unsigned int baud, quot, clkdiv;
2048 if (uap->vendor->oversampling)
2054 * Ask the core to calculate the divisor for us.
2056 baud = uart_get_baud_rate(port, termios, old, 0,
2057 port->uartclk / clkdiv);
2058 #ifdef CONFIG_DMA_ENGINE
2060 * Adjust RX DMA polling rate with baud rate if not specified.
2062 if (uap->dmarx.auto_poll_rate)
2063 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
2066 if (baud > port->uartclk/16)
2067 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
2069 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
2071 switch (termios->c_cflag & CSIZE) {
2073 lcr_h = UART01x_LCRH_WLEN_5;
2076 lcr_h = UART01x_LCRH_WLEN_6;
2079 lcr_h = UART01x_LCRH_WLEN_7;
2082 lcr_h = UART01x_LCRH_WLEN_8;
2085 if (termios->c_cflag & CSTOPB)
2086 lcr_h |= UART01x_LCRH_STP2;
2087 if (termios->c_cflag & PARENB) {
2088 lcr_h |= UART01x_LCRH_PEN;
2089 if (!(termios->c_cflag & PARODD))
2090 lcr_h |= UART01x_LCRH_EPS;
2091 if (termios->c_cflag & CMSPAR)
2092 lcr_h |= UART011_LCRH_SPS;
2094 if (uap->fifosize > 1)
2095 lcr_h |= UART01x_LCRH_FEN;
2097 bits = tty_get_frame_size(termios->c_cflag);
2099 spin_lock_irqsave(&port->lock, flags);
2102 * Update the per-port timeout.
2104 uart_update_timeout(port, termios->c_cflag, baud);
2107 * Calculate the approximated time it takes to transmit one character
2108 * with the given baud rate. We use this as the poll interval when we
2109 * wait for the tx queue to empty.
2111 uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud);
2113 pl011_setup_status_masks(port, termios);
2115 if (UART_ENABLE_MS(port, termios->c_cflag))
2116 pl011_enable_ms(port);
2118 if (port->rs485.flags & SER_RS485_ENABLED)
2119 termios->c_cflag &= ~CRTSCTS;
2121 old_cr = pl011_read(uap, REG_CR);
2123 if (termios->c_cflag & CRTSCTS) {
2124 if (old_cr & UART011_CR_RTS)
2125 old_cr |= UART011_CR_RTSEN;
2127 old_cr |= UART011_CR_CTSEN;
2128 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
2130 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2131 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
2134 if (uap->vendor->oversampling) {
2135 if (baud > port->uartclk / 16)
2136 old_cr |= ST_UART011_CR_OVSFACT;
2138 old_cr &= ~ST_UART011_CR_OVSFACT;
2142 * Workaround for the ST Micro oversampling variants to
2143 * increase the bitrate slightly, by lowering the divisor,
2144 * to avoid delayed sampling of start bit at high speeds,
2145 * else we see data corruption.
2147 if (uap->vendor->oversampling) {
2148 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2150 else if ((baud > 3250000) && (quot > 2))
2154 pl011_write(quot & 0x3f, uap, REG_FBRD);
2155 pl011_write(quot >> 6, uap, REG_IBRD);
2158 * ----------v----------v----------v----------v-----
2159 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
2160 * REG_FBRD & REG_IBRD.
2161 * ----------^----------^----------^----------^-----
2163 pl011_write_lcr_h(uap, lcr_h);
2164 pl011_write(old_cr, uap, REG_CR);
2166 spin_unlock_irqrestore(&port->lock, flags);
2170 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2171 struct ktermios *old)
2173 struct uart_amba_port *uap =
2174 container_of(port, struct uart_amba_port, port);
2175 unsigned long flags;
2177 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2179 /* The SBSA UART only supports 8n1 without hardware flow control. */
2180 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2181 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2182 termios->c_cflag |= CS8 | CLOCAL;
2184 spin_lock_irqsave(&port->lock, flags);
2185 uart_update_timeout(port, CS8, uap->fixed_baud);
2186 pl011_setup_status_masks(port, termios);
2187 spin_unlock_irqrestore(&port->lock, flags);
2190 static const char *pl011_type(struct uart_port *port)
2192 struct uart_amba_port *uap =
2193 container_of(port, struct uart_amba_port, port);
2194 return uap->port.type == PORT_AMBA ? uap->type : NULL;
2198 * Configure/autoconfigure the port.
2200 static void pl011_config_port(struct uart_port *port, int flags)
2202 if (flags & UART_CONFIG_TYPE)
2203 port->type = PORT_AMBA;
2207 * verify the new serial_struct (for TIOCSSERIAL).
2209 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2212 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2214 if (ser->irq < 0 || ser->irq >= nr_irqs)
2216 if (ser->baud_base < 9600)
2218 if (port->mapbase != (unsigned long) ser->iomem_base)
2223 static int pl011_rs485_config(struct uart_port *port,
2224 struct serial_rs485 *rs485)
2226 struct uart_amba_port *uap =
2227 container_of(port, struct uart_amba_port, port);
2229 /* pick sane settings if the user hasn't */
2230 if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
2231 !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
2232 rs485->flags |= SER_RS485_RTS_ON_SEND;
2233 rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
2235 /* clamp the delays to [0, 100ms] */
2236 rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
2237 rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
2238 memset(rs485->padding, 0, sizeof(rs485->padding));
2240 if (port->rs485.flags & SER_RS485_ENABLED)
2241 pl011_rs485_tx_stop(uap);
2243 /* Set new configuration */
2244 port->rs485 = *rs485;
2246 /* Make sure auto RTS is disabled */
2247 if (port->rs485.flags & SER_RS485_ENABLED) {
2248 u32 cr = pl011_read(uap, REG_CR);
2250 cr &= ~UART011_CR_RTSEN;
2251 pl011_write(cr, uap, REG_CR);
2252 port->status &= ~UPSTAT_AUTORTS;
2258 static const struct uart_ops amba_pl011_pops = {
2259 .tx_empty = pl011_tx_empty,
2260 .set_mctrl = pl011_set_mctrl,
2261 .get_mctrl = pl011_get_mctrl,
2262 .stop_tx = pl011_stop_tx,
2263 .start_tx = pl011_start_tx,
2264 .stop_rx = pl011_stop_rx,
2265 .throttle = pl011_throttle_rx,
2266 .unthrottle = pl011_unthrottle_rx,
2267 .enable_ms = pl011_enable_ms,
2268 .break_ctl = pl011_break_ctl,
2269 .startup = pl011_startup,
2270 .shutdown = pl011_shutdown,
2271 .flush_buffer = pl011_dma_flush_buffer,
2272 .set_termios = pl011_set_termios,
2274 .config_port = pl011_config_port,
2275 .verify_port = pl011_verify_port,
2276 #ifdef CONFIG_CONSOLE_POLL
2277 .poll_init = pl011_hwinit,
2278 .poll_get_char = pl011_get_poll_char,
2279 .poll_put_char = pl011_put_poll_char,
2283 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2287 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2292 static const struct uart_ops sbsa_uart_pops = {
2293 .tx_empty = pl011_tx_empty,
2294 .set_mctrl = sbsa_uart_set_mctrl,
2295 .get_mctrl = sbsa_uart_get_mctrl,
2296 .stop_tx = pl011_stop_tx,
2297 .start_tx = pl011_start_tx,
2298 .stop_rx = pl011_stop_rx,
2299 .startup = sbsa_uart_startup,
2300 .shutdown = sbsa_uart_shutdown,
2301 .set_termios = sbsa_uart_set_termios,
2303 .config_port = pl011_config_port,
2304 .verify_port = pl011_verify_port,
2305 #ifdef CONFIG_CONSOLE_POLL
2306 .poll_init = pl011_hwinit,
2307 .poll_get_char = pl011_get_poll_char,
2308 .poll_put_char = pl011_put_poll_char,
2312 static struct uart_amba_port *amba_ports[UART_NR];
2314 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2316 static void pl011_console_putchar(struct uart_port *port, int ch)
2318 struct uart_amba_port *uap =
2319 container_of(port, struct uart_amba_port, port);
2321 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2323 pl011_write(ch, uap, REG_DR);
2327 pl011_console_write(struct console *co, const char *s, unsigned int count)
2329 struct uart_amba_port *uap = amba_ports[co->index];
2330 unsigned int old_cr = 0, new_cr;
2331 unsigned long flags;
2334 clk_enable(uap->clk);
2336 local_irq_save(flags);
2337 if (uap->port.sysrq)
2339 else if (oops_in_progress)
2340 locked = spin_trylock(&uap->port.lock);
2342 spin_lock(&uap->port.lock);
2345 * First save the CR then disable the interrupts
2347 if (!uap->vendor->always_enabled) {
2348 old_cr = pl011_read(uap, REG_CR);
2349 new_cr = old_cr & ~UART011_CR_CTSEN;
2350 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2351 pl011_write(new_cr, uap, REG_CR);
2354 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2357 * Finally, wait for transmitter to become empty and restore the
2358 * TCR. Allow feature register bits to be inverted to work around
2361 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2362 & uap->vendor->fr_busy)
2364 if (!uap->vendor->always_enabled)
2365 pl011_write(old_cr, uap, REG_CR);
2368 spin_unlock(&uap->port.lock);
2369 local_irq_restore(flags);
2371 clk_disable(uap->clk);
2374 static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2375 int *parity, int *bits)
2377 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2378 unsigned int lcr_h, ibrd, fbrd;
2380 lcr_h = pl011_read(uap, REG_LCRH_TX);
2383 if (lcr_h & UART01x_LCRH_PEN) {
2384 if (lcr_h & UART01x_LCRH_EPS)
2390 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2395 ibrd = pl011_read(uap, REG_IBRD);
2396 fbrd = pl011_read(uap, REG_FBRD);
2398 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2400 if (uap->vendor->oversampling) {
2401 if (pl011_read(uap, REG_CR)
2402 & ST_UART011_CR_OVSFACT)
2408 static int pl011_console_setup(struct console *co, char *options)
2410 struct uart_amba_port *uap;
2418 * Check whether an invalid uart number has been specified, and
2419 * if so, search for the first available port that does have
2422 if (co->index >= UART_NR)
2424 uap = amba_ports[co->index];
2428 /* Allow pins to be muxed in and configured */
2429 pinctrl_pm_select_default_state(uap->port.dev);
2431 ret = clk_prepare(uap->clk);
2435 if (dev_get_platdata(uap->port.dev)) {
2436 struct amba_pl011_data *plat;
2438 plat = dev_get_platdata(uap->port.dev);
2443 uap->port.uartclk = clk_get_rate(uap->clk);
2445 if (uap->vendor->fixed_options) {
2446 baud = uap->fixed_baud;
2449 uart_parse_options(options,
2450 &baud, &parity, &bits, &flow);
2452 pl011_console_get_options(uap, &baud, &parity, &bits);
2455 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2459 * pl011_console_match - non-standard console matching
2460 * @co: registering console
2461 * @name: name from console command line
2462 * @idx: index from console command line
2463 * @options: ptr to option string from console command line
2465 * Only attempts to match console command lines of the form:
2466 * console=pl011,mmio|mmio32,<addr>[,<options>]
2467 * console=pl011,0x<addr>[,<options>]
2468 * This form is used to register an initial earlycon boot console and
2469 * replace it with the amba_console at pl011 driver init.
2471 * Performs console setup for a match (as required by interface)
2472 * If no <options> are specified, then assume the h/w is already setup.
2474 * Returns 0 if console matches; otherwise non-zero to use default matching
2476 static int pl011_console_match(struct console *co, char *name, int idx,
2479 unsigned char iotype;
2480 resource_size_t addr;
2484 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2485 * have a distinct console name, so make sure we check for that.
2486 * The actual implementation of the erratum occurs in the probe
2489 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2492 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2495 if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2498 /* try to match the port specified on the command line */
2499 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2500 struct uart_port *port;
2505 port = &amba_ports[i]->port;
2507 if (port->mapbase != addr)
2512 return pl011_console_setup(co, options);
2518 static struct uart_driver amba_reg;
2519 static struct console amba_console = {
2521 .write = pl011_console_write,
2522 .device = uart_console_device,
2523 .setup = pl011_console_setup,
2524 .match = pl011_console_match,
2525 .flags = CON_PRINTBUFFER | CON_ANYTIME,
2530 #define AMBA_CONSOLE (&amba_console)
2532 static void qdf2400_e44_putc(struct uart_port *port, int c)
2534 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2536 writel(c, port->membase + UART01x_DR);
2537 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2541 static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2543 struct earlycon_device *dev = con->data;
2545 uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2548 static void pl011_putc(struct uart_port *port, int c)
2550 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2552 if (port->iotype == UPIO_MEM32)
2553 writel(c, port->membase + UART01x_DR);
2555 writeb(c, port->membase + UART01x_DR);
2556 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2560 static void pl011_early_write(struct console *con, const char *s, unsigned n)
2562 struct earlycon_device *dev = con->data;
2564 uart_console_write(&dev->port, s, n, pl011_putc);
2567 #ifdef CONFIG_CONSOLE_POLL
2568 static int pl011_getc(struct uart_port *port)
2570 if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE)
2571 return NO_POLL_CHAR;
2573 if (port->iotype == UPIO_MEM32)
2574 return readl(port->membase + UART01x_DR);
2576 return readb(port->membase + UART01x_DR);
2579 static int pl011_early_read(struct console *con, char *s, unsigned int n)
2581 struct earlycon_device *dev = con->data;
2582 int ch, num_read = 0;
2584 while (num_read < n) {
2585 ch = pl011_getc(&dev->port);
2586 if (ch == NO_POLL_CHAR)
2595 #define pl011_early_read NULL
2599 * On non-ACPI systems, earlycon is enabled by specifying
2600 * "earlycon=pl011,<address>" on the kernel command line.
2602 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2603 * by specifying only "earlycon" on the command line. Because it requires
2604 * SPCR, the console starts after ACPI is parsed, which is later than a
2605 * traditional early console.
2607 * To get the traditional early console that starts before ACPI is parsed,
2608 * specify the full "earlycon=pl011,<address>" option.
2610 static int __init pl011_early_console_setup(struct earlycon_device *device,
2613 if (!device->port.membase)
2616 device->con->write = pl011_early_write;
2617 device->con->read = pl011_early_read;
2621 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2622 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2625 * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
2626 * Erratum 44, traditional earlycon can be enabled by specifying
2627 * "earlycon=qdf2400_e44,<address>". Any options are ignored.
2629 * Alternatively, you can just specify "earlycon", and the early console
2630 * will be enabled with the information from the SPCR table. In this
2631 * case, the SPCR code will detect the need for the E44 work-around,
2632 * and set the console name to "qdf2400_e44".
2635 qdf2400_e44_early_console_setup(struct earlycon_device *device,
2638 if (!device->port.membase)
2641 device->con->write = qdf2400_e44_early_write;
2644 EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2647 #define AMBA_CONSOLE NULL
2650 static struct uart_driver amba_reg = {
2651 .owner = THIS_MODULE,
2652 .driver_name = "ttyAMA",
2653 .dev_name = "ttyAMA",
2654 .major = SERIAL_AMBA_MAJOR,
2655 .minor = SERIAL_AMBA_MINOR,
2657 .cons = AMBA_CONSOLE,
2660 static int pl011_probe_dt_alias(int index, struct device *dev)
2662 struct device_node *np;
2663 static bool seen_dev_with_alias = false;
2664 static bool seen_dev_without_alias = false;
2667 if (!IS_ENABLED(CONFIG_OF))
2674 ret = of_alias_get_id(np, "serial");
2676 seen_dev_without_alias = true;
2679 seen_dev_with_alias = true;
2680 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2681 dev_warn(dev, "requested serial port %d not available.\n", ret);
2686 if (seen_dev_with_alias && seen_dev_without_alias)
2687 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2692 /* unregisters the driver also if no more ports are left */
2693 static void pl011_unregister_port(struct uart_amba_port *uap)
2698 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2699 if (amba_ports[i] == uap)
2700 amba_ports[i] = NULL;
2701 else if (amba_ports[i])
2704 pl011_dma_remove(uap);
2706 uart_unregister_driver(&amba_reg);
2709 static int pl011_find_free_port(void)
2713 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2714 if (amba_ports[i] == NULL)
2720 static int pl011_get_rs485_mode(struct uart_amba_port *uap)
2722 struct uart_port *port = &uap->port;
2723 struct serial_rs485 *rs485 = &port->rs485;
2726 ret = uart_get_rs485_mode(port);
2730 /* clamp the delays to [0, 100ms] */
2731 rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
2732 rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
2737 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2738 struct resource *mmiobase, int index)
2743 base = devm_ioremap_resource(dev, mmiobase);
2745 return PTR_ERR(base);
2747 index = pl011_probe_dt_alias(index, dev);
2750 uap->port.dev = dev;
2751 uap->port.mapbase = mmiobase->start;
2752 uap->port.membase = base;
2753 uap->port.fifosize = uap->fifosize;
2754 uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
2755 uap->port.flags = UPF_BOOT_AUTOCONF;
2756 uap->port.line = index;
2758 ret = pl011_get_rs485_mode(uap);
2762 amba_ports[index] = uap;
2767 static int pl011_register_port(struct uart_amba_port *uap)
2771 /* Ensure interrupts from this UART are masked and cleared */
2772 pl011_write(0, uap, REG_IMSC);
2773 pl011_write(0xffff, uap, REG_ICR);
2775 if (!amba_reg.state) {
2776 ret = uart_register_driver(&amba_reg);
2778 dev_err(uap->port.dev,
2779 "Failed to register AMBA-PL011 driver\n");
2780 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2781 if (amba_ports[i] == uap)
2782 amba_ports[i] = NULL;
2787 ret = uart_add_one_port(&amba_reg, &uap->port);
2789 pl011_unregister_port(uap);
2794 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2796 struct uart_amba_port *uap;
2797 struct vendor_data *vendor = id->data;
2800 portnr = pl011_find_free_port();
2804 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2809 uap->clk = devm_clk_get(&dev->dev, NULL);
2810 if (IS_ERR(uap->clk))
2811 return PTR_ERR(uap->clk);
2813 uap->reg_offset = vendor->reg_offset;
2814 uap->vendor = vendor;
2815 uap->fifosize = vendor->get_fifosize(dev);
2816 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2817 uap->port.irq = dev->irq[0];
2818 uap->port.ops = &amba_pl011_pops;
2819 uap->port.rs485_config = pl011_rs485_config;
2820 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2822 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2826 amba_set_drvdata(dev, uap);
2828 return pl011_register_port(uap);
2831 static void pl011_remove(struct amba_device *dev)
2833 struct uart_amba_port *uap = amba_get_drvdata(dev);
2835 uart_remove_one_port(&amba_reg, &uap->port);
2836 pl011_unregister_port(uap);
2839 #ifdef CONFIG_PM_SLEEP
2840 static int pl011_suspend(struct device *dev)
2842 struct uart_amba_port *uap = dev_get_drvdata(dev);
2847 return uart_suspend_port(&amba_reg, &uap->port);
2850 static int pl011_resume(struct device *dev)
2852 struct uart_amba_port *uap = dev_get_drvdata(dev);
2857 return uart_resume_port(&amba_reg, &uap->port);
2861 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2863 static int sbsa_uart_probe(struct platform_device *pdev)
2865 struct uart_amba_port *uap;
2871 * Check the mandatory baud rate parameter in the DT node early
2872 * so that we can easily exit with the error.
2874 if (pdev->dev.of_node) {
2875 struct device_node *np = pdev->dev.of_node;
2877 ret = of_property_read_u32(np, "current-speed", &baudrate);
2884 portnr = pl011_find_free_port();
2888 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2893 ret = platform_get_irq(pdev, 0);
2896 uap->port.irq = ret;
2898 #ifdef CONFIG_ACPI_SPCR_TABLE
2899 if (qdf2400_e44_present) {
2900 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2901 uap->vendor = &vendor_qdt_qdf2400_e44;
2904 uap->vendor = &vendor_sbsa;
2906 uap->reg_offset = uap->vendor->reg_offset;
2908 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2909 uap->port.ops = &sbsa_uart_pops;
2910 uap->fixed_baud = baudrate;
2912 snprintf(uap->type, sizeof(uap->type), "SBSA");
2914 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2916 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2920 platform_set_drvdata(pdev, uap);
2922 return pl011_register_port(uap);
2925 static int sbsa_uart_remove(struct platform_device *pdev)
2927 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2929 uart_remove_one_port(&amba_reg, &uap->port);
2930 pl011_unregister_port(uap);
2934 static const struct of_device_id sbsa_uart_of_match[] = {
2935 { .compatible = "arm,sbsa-uart", },
2938 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2940 static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = {
2945 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2947 static struct platform_driver arm_sbsa_uart_platform_driver = {
2948 .probe = sbsa_uart_probe,
2949 .remove = sbsa_uart_remove,
2951 .name = "sbsa-uart",
2952 .pm = &pl011_dev_pm_ops,
2953 .of_match_table = of_match_ptr(sbsa_uart_of_match),
2954 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2955 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2959 static const struct amba_id pl011_ids[] = {
2963 .data = &vendor_arm,
2971 .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
2973 .data = &vendor_zte,
2978 MODULE_DEVICE_TABLE(amba, pl011_ids);
2980 static struct amba_driver pl011_driver = {
2982 .name = "uart-pl011",
2983 .pm = &pl011_dev_pm_ops,
2984 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2986 .id_table = pl011_ids,
2987 .probe = pl011_probe,
2988 .remove = pl011_remove,
2991 static int __init pl011_init(void)
2993 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2995 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2996 pr_warn("could not register SBSA UART platform driver\n");
2997 return amba_driver_register(&pl011_driver);
3000 static void __exit pl011_exit(void)
3002 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
3003 amba_driver_unregister(&pl011_driver);
3007 * While this can be a module, if builtin it's most likely the console
3008 * So let's leave module_exit but move module_init to an earlier place
3010 arch_initcall(pl011_init);
3011 module_exit(pl011_exit);
3013 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
3014 MODULE_DESCRIPTION("ARM AMBA serial port driver");
3015 MODULE_LICENSE("GPL");