1 // SPDX-License-Identifier: GPL-2.0+
3 * Driver for AMBA serial ports
5 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
7 * Copyright 1999 ARM Limited
8 * Copyright (C) 2000 Deep Blue Solutions Ltd.
9 * Copyright (C) 2010 ST-Ericsson SA
11 * This is a generic driver for ARM AMBA-type serial ports. They
12 * have a lot of 16550-like features, but are not register compatible.
13 * Note that although they do have CTS, DCD and DSR inputs, they do
14 * not have an RI input, nor do they have DTR or RTS outputs. If
15 * required, these have to be supplied via some other means (eg, GPIO)
16 * and hooked into this driver.
19 #include <linux/module.h>
20 #include <linux/ioport.h>
21 #include <linux/init.h>
22 #include <linux/console.h>
23 #include <linux/platform_device.h>
24 #include <linux/sysrq.h>
25 #include <linux/device.h>
26 #include <linux/tty.h>
27 #include <linux/tty_flip.h>
28 #include <linux/serial_core.h>
29 #include <linux/serial.h>
30 #include <linux/amba/bus.h>
31 #include <linux/amba/serial.h>
32 #include <linux/clk.h>
33 #include <linux/slab.h>
34 #include <linux/dmaengine.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/scatterlist.h>
37 #include <linux/delay.h>
38 #include <linux/types.h>
40 #include <linux/pinctrl/consumer.h>
41 #include <linux/sizes.h>
43 #include <linux/acpi.h>
47 #define SERIAL_AMBA_MAJOR 204
48 #define SERIAL_AMBA_MINOR 64
49 #define SERIAL_AMBA_NR UART_NR
51 #define AMBA_ISR_PASS_LIMIT 256
53 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
54 #define UART_DUMMY_DR_RX (1 << 16)
82 /* The size of the array - must be last */
86 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
87 [REG_DR] = UART01x_DR,
88 [REG_FR] = UART01x_FR,
89 [REG_LCRH_RX] = UART011_LCRH,
90 [REG_LCRH_TX] = UART011_LCRH,
91 [REG_IBRD] = UART011_IBRD,
92 [REG_FBRD] = UART011_FBRD,
93 [REG_CR] = UART011_CR,
94 [REG_IFLS] = UART011_IFLS,
95 [REG_IMSC] = UART011_IMSC,
96 [REG_RIS] = UART011_RIS,
97 [REG_MIS] = UART011_MIS,
98 [REG_ICR] = UART011_ICR,
99 [REG_DMACR] = UART011_DMACR,
102 /* There is by now at least one vendor with differing details, so handle it */
104 const u16 *reg_offset;
106 unsigned int fr_busy;
114 bool cts_event_workaround;
118 unsigned int (*get_fifosize)(struct amba_device *dev);
121 static unsigned int get_fifosize_arm(struct amba_device *dev)
123 return amba_rev(dev) < 3 ? 16 : 32;
126 static struct vendor_data vendor_arm = {
127 .reg_offset = pl011_std_offsets,
128 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
129 .fr_busy = UART01x_FR_BUSY,
130 .fr_dsr = UART01x_FR_DSR,
131 .fr_cts = UART01x_FR_CTS,
132 .fr_ri = UART011_FR_RI,
133 .oversampling = false,
134 .dma_threshold = false,
135 .cts_event_workaround = false,
136 .always_enabled = false,
137 .fixed_options = false,
138 .get_fifosize = get_fifosize_arm,
141 static const struct vendor_data vendor_sbsa = {
142 .reg_offset = pl011_std_offsets,
143 .fr_busy = UART01x_FR_BUSY,
144 .fr_dsr = UART01x_FR_DSR,
145 .fr_cts = UART01x_FR_CTS,
146 .fr_ri = UART011_FR_RI,
148 .oversampling = false,
149 .dma_threshold = false,
150 .cts_event_workaround = false,
151 .always_enabled = true,
152 .fixed_options = true,
155 static struct vendor_data vendor_arm_axi = {
156 .reg_offset = pl011_std_offsets,
157 .ifls = UART011_IFLS_RX4_8 | UART011_IFLS_TX4_8,
158 .fr_busy = UART01x_FR_BUSY,
159 .fr_dsr = UART01x_FR_DSR,
160 .fr_cts = UART01x_FR_CTS,
161 .fr_ri = UART011_FR_RI,
162 .oversampling = false,
163 .dma_threshold = false,
164 .cts_event_workaround = false,
165 .always_enabled = false,
166 .fixed_options = false,
169 #ifdef CONFIG_ACPI_SPCR_TABLE
170 static const struct vendor_data vendor_qdt_qdf2400_e44 = {
171 .reg_offset = pl011_std_offsets,
172 .fr_busy = UART011_FR_TXFE,
173 .fr_dsr = UART01x_FR_DSR,
174 .fr_cts = UART01x_FR_CTS,
175 .fr_ri = UART011_FR_RI,
176 .inv_fr = UART011_FR_TXFE,
178 .oversampling = false,
179 .dma_threshold = false,
180 .cts_event_workaround = false,
181 .always_enabled = true,
182 .fixed_options = true,
186 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
187 [REG_DR] = UART01x_DR,
188 [REG_ST_DMAWM] = ST_UART011_DMAWM,
189 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
190 [REG_FR] = UART01x_FR,
191 [REG_LCRH_RX] = ST_UART011_LCRH_RX,
192 [REG_LCRH_TX] = ST_UART011_LCRH_TX,
193 [REG_IBRD] = UART011_IBRD,
194 [REG_FBRD] = UART011_FBRD,
195 [REG_CR] = UART011_CR,
196 [REG_IFLS] = UART011_IFLS,
197 [REG_IMSC] = UART011_IMSC,
198 [REG_RIS] = UART011_RIS,
199 [REG_MIS] = UART011_MIS,
200 [REG_ICR] = UART011_ICR,
201 [REG_DMACR] = UART011_DMACR,
202 [REG_ST_XFCR] = ST_UART011_XFCR,
203 [REG_ST_XON1] = ST_UART011_XON1,
204 [REG_ST_XON2] = ST_UART011_XON2,
205 [REG_ST_XOFF1] = ST_UART011_XOFF1,
206 [REG_ST_XOFF2] = ST_UART011_XOFF2,
207 [REG_ST_ITCR] = ST_UART011_ITCR,
208 [REG_ST_ITIP] = ST_UART011_ITIP,
209 [REG_ST_ABCR] = ST_UART011_ABCR,
210 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
213 static unsigned int get_fifosize_st(struct amba_device *dev)
218 static struct vendor_data vendor_st = {
219 .reg_offset = pl011_st_offsets,
220 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
221 .fr_busy = UART01x_FR_BUSY,
222 .fr_dsr = UART01x_FR_DSR,
223 .fr_cts = UART01x_FR_CTS,
224 .fr_ri = UART011_FR_RI,
225 .oversampling = true,
226 .dma_threshold = true,
227 .cts_event_workaround = true,
228 .always_enabled = false,
229 .fixed_options = false,
230 .get_fifosize = get_fifosize_st,
233 /* Deals with DMA transactions */
235 struct pl011_dmabuf {
241 struct pl011_dmarx_data {
242 struct dma_chan *chan;
243 struct completion complete;
245 struct pl011_dmabuf dbuf_a;
246 struct pl011_dmabuf dbuf_b;
249 struct timer_list timer;
250 unsigned int last_residue;
251 unsigned long last_jiffies;
253 unsigned int poll_rate;
254 unsigned int poll_timeout;
257 struct pl011_dmatx_data {
258 struct dma_chan *chan;
266 * We wrap our port structure around the generic uart_port.
268 struct uart_amba_port {
269 struct uart_port port;
270 const u16 *reg_offset;
272 const struct vendor_data *vendor;
273 unsigned int dmacr; /* dma control reg */
274 unsigned int im; /* interrupt mask */
275 unsigned int old_status;
276 unsigned int fifosize; /* vendor-specific */
277 unsigned int fixed_baud; /* vendor-set fixed baud rate */
279 bool rs485_tx_started;
280 unsigned int rs485_tx_drain_interval; /* usecs */
281 #ifdef CONFIG_DMA_ENGINE
285 struct pl011_dmarx_data dmarx;
286 struct pl011_dmatx_data dmatx;
291 static unsigned int pl011_tx_empty(struct uart_port *port);
293 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
296 return uap->reg_offset[reg];
299 static unsigned int pl011_read(const struct uart_amba_port *uap,
302 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
304 return (uap->port.iotype == UPIO_MEM32) ?
305 readl_relaxed(addr) : readw_relaxed(addr);
308 static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
311 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
313 if (uap->port.iotype == UPIO_MEM32)
314 writel_relaxed(val, addr);
316 writew_relaxed(val, addr);
320 * Reads up to 256 characters from the FIFO or until it's empty and
321 * inserts them into the TTY layer. Returns the number of characters
322 * read from the FIFO.
324 static int pl011_fifo_to_tty(struct uart_amba_port *uap)
326 unsigned int ch, fifotaken;
331 for (fifotaken = 0; fifotaken != 256; fifotaken++) {
332 status = pl011_read(uap, REG_FR);
333 if (status & UART01x_FR_RXFE)
336 /* Take chars from the FIFO and update status */
337 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
339 uap->port.icount.rx++;
341 if (unlikely(ch & UART_DR_ERROR)) {
342 if (ch & UART011_DR_BE) {
343 ch &= ~(UART011_DR_FE | UART011_DR_PE);
344 uap->port.icount.brk++;
345 if (uart_handle_break(&uap->port))
347 } else if (ch & UART011_DR_PE)
348 uap->port.icount.parity++;
349 else if (ch & UART011_DR_FE)
350 uap->port.icount.frame++;
351 if (ch & UART011_DR_OE)
352 uap->port.icount.overrun++;
354 ch &= uap->port.read_status_mask;
356 if (ch & UART011_DR_BE)
358 else if (ch & UART011_DR_PE)
360 else if (ch & UART011_DR_FE)
364 spin_unlock(&uap->port.lock);
365 sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
366 spin_lock(&uap->port.lock);
369 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
377 * All the DMA operation mode stuff goes inside this ifdef.
378 * This assumes that you have a generic DMA device interface,
379 * no custom DMA interfaces are supported.
381 #ifdef CONFIG_DMA_ENGINE
383 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
385 static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
386 enum dma_data_direction dir)
388 db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
389 &db->dma, GFP_KERNEL);
392 db->len = PL011_DMA_BUFFER_SIZE;
397 static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
398 enum dma_data_direction dir)
401 dma_free_coherent(chan->device->dev,
402 PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
406 static void pl011_dma_probe(struct uart_amba_port *uap)
408 /* DMA is the sole user of the platform data right now */
409 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
410 struct device *dev = uap->port.dev;
411 struct dma_slave_config tx_conf = {
412 .dst_addr = uap->port.mapbase +
413 pl011_reg_to_offset(uap, REG_DR),
414 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
415 .direction = DMA_MEM_TO_DEV,
416 .dst_maxburst = uap->fifosize >> 1,
419 struct dma_chan *chan;
422 uap->dma_probed = true;
423 chan = dma_request_chan(dev, "tx");
425 if (PTR_ERR(chan) == -EPROBE_DEFER) {
426 uap->dma_probed = false;
430 /* We need platform data */
431 if (!plat || !plat->dma_filter) {
432 dev_info(uap->port.dev, "no DMA platform data\n");
436 /* Try to acquire a generic DMA engine slave TX channel */
438 dma_cap_set(DMA_SLAVE, mask);
440 chan = dma_request_channel(mask, plat->dma_filter,
443 dev_err(uap->port.dev, "no TX DMA channel!\n");
448 dmaengine_slave_config(chan, &tx_conf);
449 uap->dmatx.chan = chan;
451 dev_info(uap->port.dev, "DMA channel TX %s\n",
452 dma_chan_name(uap->dmatx.chan));
454 /* Optionally make use of an RX channel as well */
455 chan = dma_request_slave_channel(dev, "rx");
457 if (!chan && plat && plat->dma_rx_param) {
458 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
461 dev_err(uap->port.dev, "no RX DMA channel!\n");
467 struct dma_slave_config rx_conf = {
468 .src_addr = uap->port.mapbase +
469 pl011_reg_to_offset(uap, REG_DR),
470 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
471 .direction = DMA_DEV_TO_MEM,
472 .src_maxburst = uap->fifosize >> 2,
475 struct dma_slave_caps caps;
478 * Some DMA controllers provide information on their capabilities.
479 * If the controller does, check for suitable residue processing
480 * otherwise assime all is well.
482 if (0 == dma_get_slave_caps(chan, &caps)) {
483 if (caps.residue_granularity ==
484 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
485 dma_release_channel(chan);
486 dev_info(uap->port.dev,
487 "RX DMA disabled - no residue processing\n");
491 dmaengine_slave_config(chan, &rx_conf);
492 uap->dmarx.chan = chan;
494 uap->dmarx.auto_poll_rate = false;
495 if (plat && plat->dma_rx_poll_enable) {
496 /* Set poll rate if specified. */
497 if (plat->dma_rx_poll_rate) {
498 uap->dmarx.auto_poll_rate = false;
499 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
502 * 100 ms defaults to poll rate if not
503 * specified. This will be adjusted with
504 * the baud rate at set_termios.
506 uap->dmarx.auto_poll_rate = true;
507 uap->dmarx.poll_rate = 100;
509 /* 3 secs defaults poll_timeout if not specified. */
510 if (plat->dma_rx_poll_timeout)
511 uap->dmarx.poll_timeout =
512 plat->dma_rx_poll_timeout;
514 uap->dmarx.poll_timeout = 3000;
515 } else if (!plat && dev->of_node) {
516 uap->dmarx.auto_poll_rate = of_property_read_bool(
517 dev->of_node, "auto-poll");
518 if (uap->dmarx.auto_poll_rate) {
521 if (0 == of_property_read_u32(dev->of_node,
523 uap->dmarx.poll_rate = x;
525 uap->dmarx.poll_rate = 100;
526 if (0 == of_property_read_u32(dev->of_node,
527 "poll-timeout-ms", &x))
528 uap->dmarx.poll_timeout = x;
530 uap->dmarx.poll_timeout = 3000;
533 dev_info(uap->port.dev, "DMA channel RX %s\n",
534 dma_chan_name(uap->dmarx.chan));
538 static void pl011_dma_remove(struct uart_amba_port *uap)
541 dma_release_channel(uap->dmatx.chan);
543 dma_release_channel(uap->dmarx.chan);
546 /* Forward declare these for the refill routine */
547 static int pl011_dma_tx_refill(struct uart_amba_port *uap);
548 static void pl011_start_tx_pio(struct uart_amba_port *uap);
551 * The current DMA TX buffer has been sent.
552 * Try to queue up another DMA buffer.
554 static void pl011_dma_tx_callback(void *data)
556 struct uart_amba_port *uap = data;
557 struct pl011_dmatx_data *dmatx = &uap->dmatx;
561 spin_lock_irqsave(&uap->port.lock, flags);
562 if (uap->dmatx.queued)
563 dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
564 dmatx->len, DMA_TO_DEVICE);
567 uap->dmacr = dmacr & ~UART011_TXDMAE;
568 pl011_write(uap->dmacr, uap, REG_DMACR);
571 * If TX DMA was disabled, it means that we've stopped the DMA for
572 * some reason (eg, XOFF received, or we want to send an X-char.)
574 * Note: we need to be careful here of a potential race between DMA
575 * and the rest of the driver - if the driver disables TX DMA while
576 * a TX buffer completing, we must update the tx queued status to
577 * get further refills (hence we check dmacr).
579 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
580 uart_circ_empty(&uap->port.state->xmit)) {
581 uap->dmatx.queued = false;
582 spin_unlock_irqrestore(&uap->port.lock, flags);
586 if (pl011_dma_tx_refill(uap) <= 0)
588 * We didn't queue a DMA buffer for some reason, but we
589 * have data pending to be sent. Re-enable the TX IRQ.
591 pl011_start_tx_pio(uap);
593 spin_unlock_irqrestore(&uap->port.lock, flags);
597 * Try to refill the TX DMA buffer.
598 * Locking: called with port lock held and IRQs disabled.
600 * 1 if we queued up a TX DMA buffer.
601 * 0 if we didn't want to handle this by DMA
604 static int pl011_dma_tx_refill(struct uart_amba_port *uap)
606 struct pl011_dmatx_data *dmatx = &uap->dmatx;
607 struct dma_chan *chan = dmatx->chan;
608 struct dma_device *dma_dev = chan->device;
609 struct dma_async_tx_descriptor *desc;
610 struct circ_buf *xmit = &uap->port.state->xmit;
614 * Try to avoid the overhead involved in using DMA if the
615 * transaction fits in the first half of the FIFO, by using
616 * the standard interrupt handling. This ensures that we
617 * issue a uart_write_wakeup() at the appropriate time.
619 count = uart_circ_chars_pending(xmit);
620 if (count < (uap->fifosize >> 1)) {
621 uap->dmatx.queued = false;
626 * Bodge: don't send the last character by DMA, as this
627 * will prevent XON from notifying us to restart DMA.
631 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
632 if (count > PL011_DMA_BUFFER_SIZE)
633 count = PL011_DMA_BUFFER_SIZE;
635 if (xmit->tail < xmit->head)
636 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
638 size_t first = UART_XMIT_SIZE - xmit->tail;
643 second = count - first;
645 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
647 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
651 dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
653 if (dmatx->dma == DMA_MAPPING_ERROR) {
654 uap->dmatx.queued = false;
655 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
659 desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
660 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
663 uap->dmatx.queued = false;
665 * If DMA cannot be used right now, we complete this
666 * transaction via IRQ and let the TTY layer retry.
668 dev_dbg(uap->port.dev, "TX DMA busy\n");
672 /* Some data to go along to the callback */
673 desc->callback = pl011_dma_tx_callback;
674 desc->callback_param = uap;
676 /* All errors should happen at prepare time */
677 dmaengine_submit(desc);
679 /* Fire the DMA transaction */
680 dma_dev->device_issue_pending(chan);
682 uap->dmacr |= UART011_TXDMAE;
683 pl011_write(uap->dmacr, uap, REG_DMACR);
684 uap->dmatx.queued = true;
687 * Now we know that DMA will fire, so advance the ring buffer
688 * with the stuff we just dispatched.
690 uart_xmit_advance(&uap->port, count);
692 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
693 uart_write_wakeup(&uap->port);
699 * We received a transmit interrupt without a pending X-char but with
700 * pending characters.
701 * Locking: called with port lock held and IRQs disabled.
703 * false if we want to use PIO to transmit
704 * true if we queued a DMA buffer
706 static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
708 if (!uap->using_tx_dma)
712 * If we already have a TX buffer queued, but received a
713 * TX interrupt, it will be because we've just sent an X-char.
714 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
716 if (uap->dmatx.queued) {
717 uap->dmacr |= UART011_TXDMAE;
718 pl011_write(uap->dmacr, uap, REG_DMACR);
719 uap->im &= ~UART011_TXIM;
720 pl011_write(uap->im, uap, REG_IMSC);
725 * We don't have a TX buffer queued, so try to queue one.
726 * If we successfully queued a buffer, mask the TX IRQ.
728 if (pl011_dma_tx_refill(uap) > 0) {
729 uap->im &= ~UART011_TXIM;
730 pl011_write(uap->im, uap, REG_IMSC);
737 * Stop the DMA transmit (eg, due to received XOFF).
738 * Locking: called with port lock held and IRQs disabled.
740 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
742 if (uap->dmatx.queued) {
743 uap->dmacr &= ~UART011_TXDMAE;
744 pl011_write(uap->dmacr, uap, REG_DMACR);
749 * Try to start a DMA transmit, or in the case of an XON/OFF
750 * character queued for send, try to get that character out ASAP.
751 * Locking: called with port lock held and IRQs disabled.
753 * false if we want the TX IRQ to be enabled
754 * true if we have a buffer queued
756 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
760 if (!uap->using_tx_dma)
763 if (!uap->port.x_char) {
764 /* no X-char, try to push chars out in DMA mode */
767 if (!uap->dmatx.queued) {
768 if (pl011_dma_tx_refill(uap) > 0) {
769 uap->im &= ~UART011_TXIM;
770 pl011_write(uap->im, uap, REG_IMSC);
773 } else if (!(uap->dmacr & UART011_TXDMAE)) {
774 uap->dmacr |= UART011_TXDMAE;
775 pl011_write(uap->dmacr, uap, REG_DMACR);
781 * We have an X-char to send. Disable DMA to prevent it loading
782 * the TX fifo, and then see if we can stuff it into the FIFO.
785 uap->dmacr &= ~UART011_TXDMAE;
786 pl011_write(uap->dmacr, uap, REG_DMACR);
788 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
790 * No space in the FIFO, so enable the transmit interrupt
791 * so we know when there is space. Note that once we've
792 * loaded the character, we should just re-enable DMA.
797 pl011_write(uap->port.x_char, uap, REG_DR);
798 uap->port.icount.tx++;
799 uap->port.x_char = 0;
801 /* Success - restore the DMA state */
803 pl011_write(dmacr, uap, REG_DMACR);
809 * Flush the transmit buffer.
810 * Locking: called with port lock held and IRQs disabled.
812 static void pl011_dma_flush_buffer(struct uart_port *port)
813 __releases(&uap->port.lock)
814 __acquires(&uap->port.lock)
816 struct uart_amba_port *uap =
817 container_of(port, struct uart_amba_port, port);
819 if (!uap->using_tx_dma)
822 dmaengine_terminate_async(uap->dmatx.chan);
824 if (uap->dmatx.queued) {
825 dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
826 uap->dmatx.len, DMA_TO_DEVICE);
827 uap->dmatx.queued = false;
828 uap->dmacr &= ~UART011_TXDMAE;
829 pl011_write(uap->dmacr, uap, REG_DMACR);
833 static void pl011_dma_rx_callback(void *data);
835 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
837 struct dma_chan *rxchan = uap->dmarx.chan;
838 struct pl011_dmarx_data *dmarx = &uap->dmarx;
839 struct dma_async_tx_descriptor *desc;
840 struct pl011_dmabuf *dbuf;
845 /* Start the RX DMA job */
846 dbuf = uap->dmarx.use_buf_b ?
847 &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
848 desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
850 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
852 * If the DMA engine is busy and cannot prepare a
853 * channel, no big deal, the driver will fall back
854 * to interrupt mode as a result of this error code.
857 uap->dmarx.running = false;
858 dmaengine_terminate_all(rxchan);
862 /* Some data to go along to the callback */
863 desc->callback = pl011_dma_rx_callback;
864 desc->callback_param = uap;
865 dmarx->cookie = dmaengine_submit(desc);
866 dma_async_issue_pending(rxchan);
868 uap->dmacr |= UART011_RXDMAE;
869 pl011_write(uap->dmacr, uap, REG_DMACR);
870 uap->dmarx.running = true;
872 uap->im &= ~UART011_RXIM;
873 pl011_write(uap->im, uap, REG_IMSC);
879 * This is called when either the DMA job is complete, or
880 * the FIFO timeout interrupt occurred. This must be called
881 * with the port spinlock uap->port.lock held.
883 static void pl011_dma_rx_chars(struct uart_amba_port *uap,
884 u32 pending, bool use_buf_b,
887 struct tty_port *port = &uap->port.state->port;
888 struct pl011_dmabuf *dbuf = use_buf_b ?
889 &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
891 u32 fifotaken = 0; /* only used for vdbg() */
893 struct pl011_dmarx_data *dmarx = &uap->dmarx;
896 if (uap->dmarx.poll_rate) {
897 /* The data can be taken by polling */
898 dmataken = dbuf->len - dmarx->last_residue;
899 /* Recalculate the pending size */
900 if (pending >= dmataken)
904 /* Pick the remain data from the DMA */
908 * First take all chars in the DMA pipe, then look in the FIFO.
909 * Note that tty_insert_flip_buf() tries to take as many chars
912 dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
915 uap->port.icount.rx += dma_count;
916 if (dma_count < pending)
917 dev_warn(uap->port.dev,
918 "couldn't insert all characters (TTY is full?)\n");
921 /* Reset the last_residue for Rx DMA poll */
922 if (uap->dmarx.poll_rate)
923 dmarx->last_residue = dbuf->len;
926 * Only continue with trying to read the FIFO if all DMA chars have
929 if (dma_count == pending && readfifo) {
930 /* Clear any error flags */
931 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
932 UART011_FEIS, uap, REG_ICR);
935 * If we read all the DMA'd characters, and we had an
936 * incomplete buffer, that could be due to an rx error, or
937 * maybe we just timed out. Read any pending chars and check
940 * Error conditions will only occur in the FIFO, these will
941 * trigger an immediate interrupt and stop the DMA job, so we
942 * will always find the error in the FIFO, never in the DMA
945 fifotaken = pl011_fifo_to_tty(uap);
948 dev_vdbg(uap->port.dev,
949 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
950 dma_count, fifotaken);
951 tty_flip_buffer_push(port);
954 static void pl011_dma_rx_irq(struct uart_amba_port *uap)
956 struct pl011_dmarx_data *dmarx = &uap->dmarx;
957 struct dma_chan *rxchan = dmarx->chan;
958 struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
959 &dmarx->dbuf_b : &dmarx->dbuf_a;
961 struct dma_tx_state state;
962 enum dma_status dmastat;
965 * Pause the transfer so we can trust the current counter,
966 * do this before we pause the PL011 block, else we may
969 if (dmaengine_pause(rxchan))
970 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
971 dmastat = rxchan->device->device_tx_status(rxchan,
972 dmarx->cookie, &state);
973 if (dmastat != DMA_PAUSED)
974 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
976 /* Disable RX DMA - incoming data will wait in the FIFO */
977 uap->dmacr &= ~UART011_RXDMAE;
978 pl011_write(uap->dmacr, uap, REG_DMACR);
979 uap->dmarx.running = false;
981 pending = dbuf->len - state.residue;
982 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
983 /* Then we terminate the transfer - we now know our residue */
984 dmaengine_terminate_all(rxchan);
987 * This will take the chars we have so far and insert
988 * into the framework.
990 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
992 /* Switch buffer & re-trigger DMA job */
993 dmarx->use_buf_b = !dmarx->use_buf_b;
994 if (pl011_dma_rx_trigger_dma(uap)) {
995 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
996 "fall back to interrupt mode\n");
997 uap->im |= UART011_RXIM;
998 pl011_write(uap->im, uap, REG_IMSC);
1002 static void pl011_dma_rx_callback(void *data)
1004 struct uart_amba_port *uap = data;
1005 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1006 struct dma_chan *rxchan = dmarx->chan;
1007 bool lastbuf = dmarx->use_buf_b;
1008 struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
1009 &dmarx->dbuf_b : &dmarx->dbuf_a;
1011 struct dma_tx_state state;
1015 * This completion interrupt occurs typically when the
1016 * RX buffer is totally stuffed but no timeout has yet
1017 * occurred. When that happens, we just want the RX
1018 * routine to flush out the secondary DMA buffer while
1019 * we immediately trigger the next DMA job.
1021 spin_lock_irq(&uap->port.lock);
1023 * Rx data can be taken by the UART interrupts during
1024 * the DMA irq handler. So we check the residue here.
1026 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1027 pending = dbuf->len - state.residue;
1028 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1029 /* Then we terminate the transfer - we now know our residue */
1030 dmaengine_terminate_all(rxchan);
1032 uap->dmarx.running = false;
1033 dmarx->use_buf_b = !lastbuf;
1034 ret = pl011_dma_rx_trigger_dma(uap);
1036 pl011_dma_rx_chars(uap, pending, lastbuf, false);
1037 spin_unlock_irq(&uap->port.lock);
1039 * Do this check after we picked the DMA chars so we don't
1040 * get some IRQ immediately from RX.
1043 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1044 "fall back to interrupt mode\n");
1045 uap->im |= UART011_RXIM;
1046 pl011_write(uap->im, uap, REG_IMSC);
1051 * Stop accepting received characters, when we're shutting down or
1052 * suspending this port.
1053 * Locking: called with port lock held and IRQs disabled.
1055 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1057 if (!uap->using_rx_dma)
1060 /* FIXME. Just disable the DMA enable */
1061 uap->dmacr &= ~UART011_RXDMAE;
1062 pl011_write(uap->dmacr, uap, REG_DMACR);
1066 * Timer handler for Rx DMA polling.
1067 * Every polling, It checks the residue in the dma buffer and transfer
1068 * data to the tty. Also, last_residue is updated for the next polling.
1070 static void pl011_dma_rx_poll(struct timer_list *t)
1072 struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
1073 struct tty_port *port = &uap->port.state->port;
1074 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1075 struct dma_chan *rxchan = uap->dmarx.chan;
1076 unsigned long flags;
1077 unsigned int dmataken = 0;
1078 unsigned int size = 0;
1079 struct pl011_dmabuf *dbuf;
1081 struct dma_tx_state state;
1083 dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
1084 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1085 if (likely(state.residue < dmarx->last_residue)) {
1086 dmataken = dbuf->len - dmarx->last_residue;
1087 size = dmarx->last_residue - state.residue;
1088 dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
1090 if (dma_count == size)
1091 dmarx->last_residue = state.residue;
1092 dmarx->last_jiffies = jiffies;
1094 tty_flip_buffer_push(port);
1097 * If no data is received in poll_timeout, the driver will fall back
1098 * to interrupt mode. We will retrigger DMA at the first interrupt.
1100 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1101 > uap->dmarx.poll_timeout) {
1103 spin_lock_irqsave(&uap->port.lock, flags);
1104 pl011_dma_rx_stop(uap);
1105 uap->im |= UART011_RXIM;
1106 pl011_write(uap->im, uap, REG_IMSC);
1107 spin_unlock_irqrestore(&uap->port.lock, flags);
1109 uap->dmarx.running = false;
1110 dmaengine_terminate_all(rxchan);
1111 del_timer(&uap->dmarx.timer);
1113 mod_timer(&uap->dmarx.timer,
1114 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1118 static void pl011_dma_startup(struct uart_amba_port *uap)
1122 if (!uap->dma_probed)
1123 pl011_dma_probe(uap);
1125 if (!uap->dmatx.chan)
1128 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1129 if (!uap->dmatx.buf) {
1130 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1131 uap->port.fifosize = uap->fifosize;
1135 uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
1137 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1138 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1139 uap->using_tx_dma = true;
1141 if (!uap->dmarx.chan)
1144 /* Allocate and map DMA RX buffers */
1145 ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
1148 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1149 "RX buffer A", ret);
1153 ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
1156 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1157 "RX buffer B", ret);
1158 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
1163 uap->using_rx_dma = true;
1166 /* Turn on DMA error (RX/TX will be enabled on demand) */
1167 uap->dmacr |= UART011_DMAONERR;
1168 pl011_write(uap->dmacr, uap, REG_DMACR);
1171 * ST Micro variants has some specific dma burst threshold
1172 * compensation. Set this to 16 bytes, so burst will only
1173 * be issued above/below 16 bytes.
1175 if (uap->vendor->dma_threshold)
1176 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1179 if (uap->using_rx_dma) {
1180 if (pl011_dma_rx_trigger_dma(uap))
1181 dev_dbg(uap->port.dev, "could not trigger initial "
1182 "RX DMA job, fall back to interrupt mode\n");
1183 if (uap->dmarx.poll_rate) {
1184 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
1185 mod_timer(&uap->dmarx.timer,
1187 msecs_to_jiffies(uap->dmarx.poll_rate));
1188 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1189 uap->dmarx.last_jiffies = jiffies;
1194 static void pl011_dma_shutdown(struct uart_amba_port *uap)
1196 if (!(uap->using_tx_dma || uap->using_rx_dma))
1199 /* Disable RX and TX DMA */
1200 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1203 spin_lock_irq(&uap->port.lock);
1204 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1205 pl011_write(uap->dmacr, uap, REG_DMACR);
1206 spin_unlock_irq(&uap->port.lock);
1208 if (uap->using_tx_dma) {
1209 /* In theory, this should already be done by pl011_dma_flush_buffer */
1210 dmaengine_terminate_all(uap->dmatx.chan);
1211 if (uap->dmatx.queued) {
1212 dma_unmap_single(uap->dmatx.chan->device->dev,
1213 uap->dmatx.dma, uap->dmatx.len,
1215 uap->dmatx.queued = false;
1218 kfree(uap->dmatx.buf);
1219 uap->using_tx_dma = false;
1222 if (uap->using_rx_dma) {
1223 dmaengine_terminate_all(uap->dmarx.chan);
1224 /* Clean up the RX DMA */
1225 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
1226 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
1227 if (uap->dmarx.poll_rate)
1228 del_timer_sync(&uap->dmarx.timer);
1229 uap->using_rx_dma = false;
1233 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1235 return uap->using_rx_dma;
1238 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1240 return uap->using_rx_dma && uap->dmarx.running;
1244 /* Blank functions if the DMA engine is not available */
1245 static inline void pl011_dma_remove(struct uart_amba_port *uap)
1249 static inline void pl011_dma_startup(struct uart_amba_port *uap)
1253 static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1257 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1262 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1266 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1271 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1275 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1279 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1284 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1289 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1294 #define pl011_dma_flush_buffer NULL
1297 static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
1300 * To be on the safe side only time out after twice as many iterations
1303 const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2;
1304 struct uart_port *port = &uap->port;
1308 /* Wait until hardware tx queue is empty */
1309 while (!pl011_tx_empty(port)) {
1310 if (i > MAX_TX_DRAIN_ITERS) {
1312 "timeout while draining hardware tx queue\n");
1316 udelay(uap->rs485_tx_drain_interval);
1320 if (port->rs485.delay_rts_after_send)
1321 mdelay(port->rs485.delay_rts_after_send);
1323 cr = pl011_read(uap, REG_CR);
1325 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
1326 cr &= ~UART011_CR_RTS;
1328 cr |= UART011_CR_RTS;
1330 /* Disable the transmitter and reenable the transceiver */
1331 cr &= ~UART011_CR_TXE;
1332 cr |= UART011_CR_RXE;
1333 pl011_write(cr, uap, REG_CR);
1335 uap->rs485_tx_started = false;
1338 static void pl011_stop_tx(struct uart_port *port)
1340 struct uart_amba_port *uap =
1341 container_of(port, struct uart_amba_port, port);
1343 uap->im &= ~UART011_TXIM;
1344 pl011_write(uap->im, uap, REG_IMSC);
1345 pl011_dma_tx_stop(uap);
1347 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
1348 pl011_rs485_tx_stop(uap);
1351 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1353 /* Start TX with programmed I/O only (no DMA) */
1354 static void pl011_start_tx_pio(struct uart_amba_port *uap)
1356 if (pl011_tx_chars(uap, false)) {
1357 uap->im |= UART011_TXIM;
1358 pl011_write(uap->im, uap, REG_IMSC);
1362 static void pl011_start_tx(struct uart_port *port)
1364 struct uart_amba_port *uap =
1365 container_of(port, struct uart_amba_port, port);
1367 if (!pl011_dma_tx_start(uap))
1368 pl011_start_tx_pio(uap);
1371 static void pl011_stop_rx(struct uart_port *port)
1373 struct uart_amba_port *uap =
1374 container_of(port, struct uart_amba_port, port);
1376 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1377 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1378 pl011_write(uap->im, uap, REG_IMSC);
1380 pl011_dma_rx_stop(uap);
1383 static void pl011_throttle_rx(struct uart_port *port)
1385 unsigned long flags;
1387 spin_lock_irqsave(&port->lock, flags);
1388 pl011_stop_rx(port);
1389 spin_unlock_irqrestore(&port->lock, flags);
1392 static void pl011_enable_ms(struct uart_port *port)
1394 struct uart_amba_port *uap =
1395 container_of(port, struct uart_amba_port, port);
1397 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1398 pl011_write(uap->im, uap, REG_IMSC);
1401 static void pl011_rx_chars(struct uart_amba_port *uap)
1402 __releases(&uap->port.lock)
1403 __acquires(&uap->port.lock)
1405 pl011_fifo_to_tty(uap);
1407 spin_unlock(&uap->port.lock);
1408 tty_flip_buffer_push(&uap->port.state->port);
1410 * If we were temporarily out of DMA mode for a while,
1411 * attempt to switch back to DMA mode again.
1413 if (pl011_dma_rx_available(uap)) {
1414 if (pl011_dma_rx_trigger_dma(uap)) {
1415 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1416 "fall back to interrupt mode again\n");
1417 uap->im |= UART011_RXIM;
1418 pl011_write(uap->im, uap, REG_IMSC);
1420 #ifdef CONFIG_DMA_ENGINE
1421 /* Start Rx DMA poll */
1422 if (uap->dmarx.poll_rate) {
1423 uap->dmarx.last_jiffies = jiffies;
1424 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1425 mod_timer(&uap->dmarx.timer,
1427 msecs_to_jiffies(uap->dmarx.poll_rate));
1432 spin_lock(&uap->port.lock);
1435 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1438 if (unlikely(!from_irq) &&
1439 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1440 return false; /* unable to transmit character */
1442 pl011_write(c, uap, REG_DR);
1444 uap->port.icount.tx++;
1449 static void pl011_rs485_tx_start(struct uart_amba_port *uap)
1451 struct uart_port *port = &uap->port;
1454 /* Enable transmitter */
1455 cr = pl011_read(uap, REG_CR);
1456 cr |= UART011_CR_TXE;
1458 /* Disable receiver if half-duplex */
1459 if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
1460 cr &= ~UART011_CR_RXE;
1462 if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
1463 cr &= ~UART011_CR_RTS;
1465 cr |= UART011_CR_RTS;
1467 pl011_write(cr, uap, REG_CR);
1469 if (port->rs485.delay_rts_before_send)
1470 mdelay(port->rs485.delay_rts_before_send);
1472 uap->rs485_tx_started = true;
1475 /* Returns true if tx interrupts have to be (kept) enabled */
1476 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1478 struct circ_buf *xmit = &uap->port.state->xmit;
1479 int count = uap->fifosize >> 1;
1481 if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
1482 !uap->rs485_tx_started)
1483 pl011_rs485_tx_start(uap);
1485 if (uap->port.x_char) {
1486 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1488 uap->port.x_char = 0;
1491 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1492 pl011_stop_tx(&uap->port);
1496 /* If we are using DMA mode, try to send some characters. */
1497 if (pl011_dma_tx_irq(uap))
1501 if (likely(from_irq) && count-- == 0)
1504 if (likely(from_irq) && count == 0 &&
1505 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1508 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1511 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1512 } while (!uart_circ_empty(xmit));
1514 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1515 uart_write_wakeup(&uap->port);
1517 if (uart_circ_empty(xmit)) {
1518 pl011_stop_tx(&uap->port);
1524 static void pl011_modem_status(struct uart_amba_port *uap)
1526 unsigned int status, delta;
1528 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1530 delta = status ^ uap->old_status;
1531 uap->old_status = status;
1536 if (delta & UART01x_FR_DCD)
1537 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1539 if (delta & uap->vendor->fr_dsr)
1540 uap->port.icount.dsr++;
1542 if (delta & uap->vendor->fr_cts)
1543 uart_handle_cts_change(&uap->port,
1544 status & uap->vendor->fr_cts);
1546 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1549 static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1551 if (!uap->vendor->cts_event_workaround)
1554 /* workaround to make sure that all bits are unlocked.. */
1555 pl011_write(0x00, uap, REG_ICR);
1558 * WA: introduce 26ns(1 uart clk) delay before W1C;
1559 * single apb access will incur 2 pclk(133.12Mhz) delay,
1560 * so add 2 dummy reads
1562 pl011_read(uap, REG_ICR);
1563 pl011_read(uap, REG_ICR);
1566 static irqreturn_t pl011_int(int irq, void *dev_id)
1568 struct uart_amba_port *uap = dev_id;
1569 unsigned long flags;
1570 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1573 spin_lock_irqsave(&uap->port.lock, flags);
1574 status = pl011_read(uap, REG_RIS) & uap->im;
1577 check_apply_cts_event_workaround(uap);
1579 pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1583 if (status & (UART011_RTIS|UART011_RXIS)) {
1584 if (pl011_dma_rx_running(uap))
1585 pl011_dma_rx_irq(uap);
1587 pl011_rx_chars(uap);
1589 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1590 UART011_CTSMIS|UART011_RIMIS))
1591 pl011_modem_status(uap);
1592 if (status & UART011_TXIS)
1593 pl011_tx_chars(uap, true);
1595 if (pass_counter-- == 0)
1598 status = pl011_read(uap, REG_RIS) & uap->im;
1599 } while (status != 0);
1603 spin_unlock_irqrestore(&uap->port.lock, flags);
1605 return IRQ_RETVAL(handled);
1608 static unsigned int pl011_tx_empty(struct uart_port *port)
1610 struct uart_amba_port *uap =
1611 container_of(port, struct uart_amba_port, port);
1613 /* Allow feature register bits to be inverted to work around errata */
1614 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1616 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1620 static unsigned int pl011_get_mctrl(struct uart_port *port)
1622 struct uart_amba_port *uap =
1623 container_of(port, struct uart_amba_port, port);
1624 unsigned int result = 0;
1625 unsigned int status = pl011_read(uap, REG_FR);
1627 #define TIOCMBIT(uartbit, tiocmbit) \
1628 if (status & uartbit) \
1631 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1632 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1633 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1634 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1639 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1641 struct uart_amba_port *uap =
1642 container_of(port, struct uart_amba_port, port);
1645 cr = pl011_read(uap, REG_CR);
1647 #define TIOCMBIT(tiocmbit, uartbit) \
1648 if (mctrl & tiocmbit) \
1653 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1654 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1655 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1656 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1657 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1659 if (port->status & UPSTAT_AUTORTS) {
1660 /* We need to disable auto-RTS if we want to turn RTS off */
1661 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1665 pl011_write(cr, uap, REG_CR);
1668 static void pl011_break_ctl(struct uart_port *port, int break_state)
1670 struct uart_amba_port *uap =
1671 container_of(port, struct uart_amba_port, port);
1672 unsigned long flags;
1675 spin_lock_irqsave(&uap->port.lock, flags);
1676 lcr_h = pl011_read(uap, REG_LCRH_TX);
1677 if (break_state == -1)
1678 lcr_h |= UART01x_LCRH_BRK;
1680 lcr_h &= ~UART01x_LCRH_BRK;
1681 pl011_write(lcr_h, uap, REG_LCRH_TX);
1682 spin_unlock_irqrestore(&uap->port.lock, flags);
1685 #ifdef CONFIG_CONSOLE_POLL
1687 static void pl011_quiesce_irqs(struct uart_port *port)
1689 struct uart_amba_port *uap =
1690 container_of(port, struct uart_amba_port, port);
1692 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1694 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1695 * we simply mask it. start_tx() will unmask it.
1697 * Note we can race with start_tx(), and if the race happens, the
1698 * polling user might get another interrupt just after we clear it.
1699 * But it should be OK and can happen even w/o the race, e.g.
1700 * controller immediately got some new data and raised the IRQ.
1702 * And whoever uses polling routines assumes that it manages the device
1703 * (including tx queue), so we're also fine with start_tx()'s caller
1706 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1710 static int pl011_get_poll_char(struct uart_port *port)
1712 struct uart_amba_port *uap =
1713 container_of(port, struct uart_amba_port, port);
1714 unsigned int status;
1717 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1720 pl011_quiesce_irqs(port);
1722 status = pl011_read(uap, REG_FR);
1723 if (status & UART01x_FR_RXFE)
1724 return NO_POLL_CHAR;
1726 return pl011_read(uap, REG_DR);
1729 static void pl011_put_poll_char(struct uart_port *port,
1732 struct uart_amba_port *uap =
1733 container_of(port, struct uart_amba_port, port);
1735 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1738 pl011_write(ch, uap, REG_DR);
1741 #endif /* CONFIG_CONSOLE_POLL */
1743 static int pl011_hwinit(struct uart_port *port)
1745 struct uart_amba_port *uap =
1746 container_of(port, struct uart_amba_port, port);
1749 /* Optionaly enable pins to be muxed in and configured */
1750 pinctrl_pm_select_default_state(port->dev);
1753 * Try to enable the clock producer.
1755 retval = clk_prepare_enable(uap->clk);
1759 uap->port.uartclk = clk_get_rate(uap->clk);
1761 /* Clear pending error and receive interrupts */
1762 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1763 UART011_FEIS | UART011_RTIS | UART011_RXIS,
1767 * Save interrupts enable mask, and enable RX interrupts in case if
1768 * the interrupt is used for NMI entry.
1770 uap->im = pl011_read(uap, REG_IMSC);
1771 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1773 if (dev_get_platdata(uap->port.dev)) {
1774 struct amba_pl011_data *plat;
1776 plat = dev_get_platdata(uap->port.dev);
1783 static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1785 return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1786 pl011_reg_to_offset(uap, REG_LCRH_TX);
1789 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1791 pl011_write(lcr_h, uap, REG_LCRH_RX);
1792 if (pl011_split_lcrh(uap)) {
1795 * Wait 10 PCLKs before writing LCRH_TX register,
1796 * to get this delay write read only register 10 times
1798 for (i = 0; i < 10; ++i)
1799 pl011_write(0xff, uap, REG_MIS);
1800 pl011_write(lcr_h, uap, REG_LCRH_TX);
1804 static int pl011_allocate_irq(struct uart_amba_port *uap)
1806 pl011_write(uap->im, uap, REG_IMSC);
1808 return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap);
1812 * Enable interrupts, only timeouts when using DMA
1813 * if initial RX DMA job failed, start in interrupt mode
1816 static void pl011_enable_interrupts(struct uart_amba_port *uap)
1818 unsigned long flags;
1821 spin_lock_irqsave(&uap->port.lock, flags);
1823 /* Clear out any spuriously appearing RX interrupts */
1824 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1827 * RXIS is asserted only when the RX FIFO transitions from below
1828 * to above the trigger threshold. If the RX FIFO is already
1829 * full to the threshold this can't happen and RXIS will now be
1830 * stuck off. Drain the RX FIFO explicitly to fix this:
1832 for (i = 0; i < uap->fifosize * 2; ++i) {
1833 if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
1836 pl011_read(uap, REG_DR);
1839 uap->im = UART011_RTIM;
1840 if (!pl011_dma_rx_running(uap))
1841 uap->im |= UART011_RXIM;
1842 pl011_write(uap->im, uap, REG_IMSC);
1843 spin_unlock_irqrestore(&uap->port.lock, flags);
1846 static void pl011_unthrottle_rx(struct uart_port *port)
1848 struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
1849 unsigned long flags;
1851 spin_lock_irqsave(&uap->port.lock, flags);
1853 uap->im = UART011_RTIM;
1854 if (!pl011_dma_rx_running(uap))
1855 uap->im |= UART011_RXIM;
1857 pl011_write(uap->im, uap, REG_IMSC);
1859 spin_unlock_irqrestore(&uap->port.lock, flags);
1862 static int pl011_startup(struct uart_port *port)
1864 struct uart_amba_port *uap =
1865 container_of(port, struct uart_amba_port, port);
1869 retval = pl011_hwinit(port);
1873 retval = pl011_allocate_irq(uap);
1877 pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1879 spin_lock_irq(&uap->port.lock);
1881 cr = pl011_read(uap, REG_CR);
1882 cr &= UART011_CR_RTS | UART011_CR_DTR;
1883 cr |= UART01x_CR_UARTEN | UART011_CR_RXE;
1885 if (!(port->rs485.flags & SER_RS485_ENABLED))
1886 cr |= UART011_CR_TXE;
1888 pl011_write(cr, uap, REG_CR);
1890 spin_unlock_irq(&uap->port.lock);
1893 * initialise the old status of the modem signals
1895 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1898 pl011_dma_startup(uap);
1900 pl011_enable_interrupts(uap);
1905 clk_disable_unprepare(uap->clk);
1909 static int sbsa_uart_startup(struct uart_port *port)
1911 struct uart_amba_port *uap =
1912 container_of(port, struct uart_amba_port, port);
1915 retval = pl011_hwinit(port);
1919 retval = pl011_allocate_irq(uap);
1923 /* The SBSA UART does not support any modem status lines. */
1924 uap->old_status = 0;
1926 pl011_enable_interrupts(uap);
1931 static void pl011_shutdown_channel(struct uart_amba_port *uap,
1936 val = pl011_read(uap, lcrh);
1937 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1938 pl011_write(val, uap, lcrh);
1942 * disable the port. It should not disable RTS and DTR.
1943 * Also RTS and DTR state should be preserved to restore
1944 * it during startup().
1946 static void pl011_disable_uart(struct uart_amba_port *uap)
1950 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1951 spin_lock_irq(&uap->port.lock);
1952 cr = pl011_read(uap, REG_CR);
1953 cr &= UART011_CR_RTS | UART011_CR_DTR;
1954 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1955 pl011_write(cr, uap, REG_CR);
1956 spin_unlock_irq(&uap->port.lock);
1959 * disable break condition and fifos
1961 pl011_shutdown_channel(uap, REG_LCRH_RX);
1962 if (pl011_split_lcrh(uap))
1963 pl011_shutdown_channel(uap, REG_LCRH_TX);
1966 static void pl011_disable_interrupts(struct uart_amba_port *uap)
1968 spin_lock_irq(&uap->port.lock);
1970 /* mask all interrupts and clear all pending ones */
1972 pl011_write(uap->im, uap, REG_IMSC);
1973 pl011_write(0xffff, uap, REG_ICR);
1975 spin_unlock_irq(&uap->port.lock);
1978 static void pl011_shutdown(struct uart_port *port)
1980 struct uart_amba_port *uap =
1981 container_of(port, struct uart_amba_port, port);
1983 pl011_disable_interrupts(uap);
1985 pl011_dma_shutdown(uap);
1987 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
1988 pl011_rs485_tx_stop(uap);
1990 free_irq(uap->port.irq, uap);
1992 pl011_disable_uart(uap);
1995 * Shut down the clock producer
1997 clk_disable_unprepare(uap->clk);
1998 /* Optionally let pins go into sleep states */
1999 pinctrl_pm_select_sleep_state(port->dev);
2001 if (dev_get_platdata(uap->port.dev)) {
2002 struct amba_pl011_data *plat;
2004 plat = dev_get_platdata(uap->port.dev);
2009 if (uap->port.ops->flush_buffer)
2010 uap->port.ops->flush_buffer(port);
2013 static void sbsa_uart_shutdown(struct uart_port *port)
2015 struct uart_amba_port *uap =
2016 container_of(port, struct uart_amba_port, port);
2018 pl011_disable_interrupts(uap);
2020 free_irq(uap->port.irq, uap);
2022 if (uap->port.ops->flush_buffer)
2023 uap->port.ops->flush_buffer(port);
2027 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
2029 port->read_status_mask = UART011_DR_OE | 255;
2030 if (termios->c_iflag & INPCK)
2031 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
2032 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2033 port->read_status_mask |= UART011_DR_BE;
2036 * Characters to ignore
2038 port->ignore_status_mask = 0;
2039 if (termios->c_iflag & IGNPAR)
2040 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
2041 if (termios->c_iflag & IGNBRK) {
2042 port->ignore_status_mask |= UART011_DR_BE;
2044 * If we're ignoring parity and break indicators,
2045 * ignore overruns too (for real raw support).
2047 if (termios->c_iflag & IGNPAR)
2048 port->ignore_status_mask |= UART011_DR_OE;
2052 * Ignore all characters if CREAD is not set.
2054 if ((termios->c_cflag & CREAD) == 0)
2055 port->ignore_status_mask |= UART_DUMMY_DR_RX;
2059 pl011_set_termios(struct uart_port *port, struct ktermios *termios,
2060 const struct ktermios *old)
2062 struct uart_amba_port *uap =
2063 container_of(port, struct uart_amba_port, port);
2064 unsigned int lcr_h, old_cr;
2065 unsigned long flags;
2066 unsigned int baud, quot, clkdiv;
2069 if (uap->vendor->oversampling)
2075 * Ask the core to calculate the divisor for us.
2077 baud = uart_get_baud_rate(port, termios, old, 0,
2078 port->uartclk / clkdiv);
2079 #ifdef CONFIG_DMA_ENGINE
2081 * Adjust RX DMA polling rate with baud rate if not specified.
2083 if (uap->dmarx.auto_poll_rate)
2084 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
2087 if (baud > port->uartclk/16)
2088 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
2090 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
2092 switch (termios->c_cflag & CSIZE) {
2094 lcr_h = UART01x_LCRH_WLEN_5;
2097 lcr_h = UART01x_LCRH_WLEN_6;
2100 lcr_h = UART01x_LCRH_WLEN_7;
2103 lcr_h = UART01x_LCRH_WLEN_8;
2106 if (termios->c_cflag & CSTOPB)
2107 lcr_h |= UART01x_LCRH_STP2;
2108 if (termios->c_cflag & PARENB) {
2109 lcr_h |= UART01x_LCRH_PEN;
2110 if (!(termios->c_cflag & PARODD))
2111 lcr_h |= UART01x_LCRH_EPS;
2112 if (termios->c_cflag & CMSPAR)
2113 lcr_h |= UART011_LCRH_SPS;
2115 if (uap->fifosize > 1)
2116 lcr_h |= UART01x_LCRH_FEN;
2118 bits = tty_get_frame_size(termios->c_cflag);
2120 spin_lock_irqsave(&port->lock, flags);
2123 * Update the per-port timeout.
2125 uart_update_timeout(port, termios->c_cflag, baud);
2128 * Calculate the approximated time it takes to transmit one character
2129 * with the given baud rate. We use this as the poll interval when we
2130 * wait for the tx queue to empty.
2132 uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud);
2134 pl011_setup_status_masks(port, termios);
2136 if (UART_ENABLE_MS(port, termios->c_cflag))
2137 pl011_enable_ms(port);
2139 if (port->rs485.flags & SER_RS485_ENABLED)
2140 termios->c_cflag &= ~CRTSCTS;
2142 old_cr = pl011_read(uap, REG_CR);
2144 if (termios->c_cflag & CRTSCTS) {
2145 if (old_cr & UART011_CR_RTS)
2146 old_cr |= UART011_CR_RTSEN;
2148 old_cr |= UART011_CR_CTSEN;
2149 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
2151 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2152 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
2155 if (uap->vendor->oversampling) {
2156 if (baud > port->uartclk / 16)
2157 old_cr |= ST_UART011_CR_OVSFACT;
2159 old_cr &= ~ST_UART011_CR_OVSFACT;
2163 * Workaround for the ST Micro oversampling variants to
2164 * increase the bitrate slightly, by lowering the divisor,
2165 * to avoid delayed sampling of start bit at high speeds,
2166 * else we see data corruption.
2168 if (uap->vendor->oversampling) {
2169 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2171 else if ((baud > 3250000) && (quot > 2))
2175 pl011_write(quot & 0x3f, uap, REG_FBRD);
2176 pl011_write(quot >> 6, uap, REG_IBRD);
2179 * ----------v----------v----------v----------v-----
2180 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
2181 * REG_FBRD & REG_IBRD.
2182 * ----------^----------^----------^----------^-----
2184 pl011_write_lcr_h(uap, lcr_h);
2187 * Receive was disabled by pl011_disable_uart during shutdown.
2188 * Need to reenable receive if you need to use a tty_driver
2189 * returns from tty_find_polling_driver() after a port shutdown.
2191 old_cr |= UART011_CR_RXE;
2192 pl011_write(old_cr, uap, REG_CR);
2194 spin_unlock_irqrestore(&port->lock, flags);
2198 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2199 const struct ktermios *old)
2201 struct uart_amba_port *uap =
2202 container_of(port, struct uart_amba_port, port);
2203 unsigned long flags;
2205 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2207 /* The SBSA UART only supports 8n1 without hardware flow control. */
2208 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2209 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2210 termios->c_cflag |= CS8 | CLOCAL;
2212 spin_lock_irqsave(&port->lock, flags);
2213 uart_update_timeout(port, CS8, uap->fixed_baud);
2214 pl011_setup_status_masks(port, termios);
2215 spin_unlock_irqrestore(&port->lock, flags);
2218 static const char *pl011_type(struct uart_port *port)
2220 struct uart_amba_port *uap =
2221 container_of(port, struct uart_amba_port, port);
2222 return uap->port.type == PORT_AMBA ? uap->type : NULL;
2226 * Configure/autoconfigure the port.
2228 static void pl011_config_port(struct uart_port *port, int flags)
2230 if (flags & UART_CONFIG_TYPE)
2231 port->type = PORT_AMBA;
2235 * verify the new serial_struct (for TIOCSSERIAL).
2237 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2240 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2242 if (ser->irq < 0 || ser->irq >= nr_irqs)
2244 if (ser->baud_base < 9600)
2246 if (port->mapbase != (unsigned long) ser->iomem_base)
2251 static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios,
2252 struct serial_rs485 *rs485)
2254 struct uart_amba_port *uap =
2255 container_of(port, struct uart_amba_port, port);
2257 if (port->rs485.flags & SER_RS485_ENABLED)
2258 pl011_rs485_tx_stop(uap);
2260 /* Make sure auto RTS is disabled */
2261 if (rs485->flags & SER_RS485_ENABLED) {
2262 u32 cr = pl011_read(uap, REG_CR);
2264 cr &= ~UART011_CR_RTSEN;
2265 pl011_write(cr, uap, REG_CR);
2266 port->status &= ~UPSTAT_AUTORTS;
2272 static const struct uart_ops amba_pl011_pops = {
2273 .tx_empty = pl011_tx_empty,
2274 .set_mctrl = pl011_set_mctrl,
2275 .get_mctrl = pl011_get_mctrl,
2276 .stop_tx = pl011_stop_tx,
2277 .start_tx = pl011_start_tx,
2278 .stop_rx = pl011_stop_rx,
2279 .throttle = pl011_throttle_rx,
2280 .unthrottle = pl011_unthrottle_rx,
2281 .enable_ms = pl011_enable_ms,
2282 .break_ctl = pl011_break_ctl,
2283 .startup = pl011_startup,
2284 .shutdown = pl011_shutdown,
2285 .flush_buffer = pl011_dma_flush_buffer,
2286 .set_termios = pl011_set_termios,
2288 .config_port = pl011_config_port,
2289 .verify_port = pl011_verify_port,
2290 #ifdef CONFIG_CONSOLE_POLL
2291 .poll_init = pl011_hwinit,
2292 .poll_get_char = pl011_get_poll_char,
2293 .poll_put_char = pl011_put_poll_char,
2297 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2301 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2306 static const struct uart_ops sbsa_uart_pops = {
2307 .tx_empty = pl011_tx_empty,
2308 .set_mctrl = sbsa_uart_set_mctrl,
2309 .get_mctrl = sbsa_uart_get_mctrl,
2310 .stop_tx = pl011_stop_tx,
2311 .start_tx = pl011_start_tx,
2312 .stop_rx = pl011_stop_rx,
2313 .startup = sbsa_uart_startup,
2314 .shutdown = sbsa_uart_shutdown,
2315 .set_termios = sbsa_uart_set_termios,
2317 .config_port = pl011_config_port,
2318 .verify_port = pl011_verify_port,
2319 #ifdef CONFIG_CONSOLE_POLL
2320 .poll_init = pl011_hwinit,
2321 .poll_get_char = pl011_get_poll_char,
2322 .poll_put_char = pl011_put_poll_char,
2326 static struct uart_amba_port *amba_ports[UART_NR];
2328 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2330 static void pl011_console_putchar(struct uart_port *port, unsigned char ch)
2332 struct uart_amba_port *uap =
2333 container_of(port, struct uart_amba_port, port);
2335 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2337 pl011_write(ch, uap, REG_DR);
2341 pl011_console_write(struct console *co, const char *s, unsigned int count)
2343 struct uart_amba_port *uap = amba_ports[co->index];
2344 unsigned int old_cr = 0, new_cr;
2345 unsigned long flags;
2348 clk_enable(uap->clk);
2350 local_irq_save(flags);
2351 if (uap->port.sysrq)
2353 else if (oops_in_progress)
2354 locked = spin_trylock(&uap->port.lock);
2356 spin_lock(&uap->port.lock);
2359 * First save the CR then disable the interrupts
2361 if (!uap->vendor->always_enabled) {
2362 old_cr = pl011_read(uap, REG_CR);
2363 new_cr = old_cr & ~UART011_CR_CTSEN;
2364 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2365 pl011_write(new_cr, uap, REG_CR);
2368 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2371 * Finally, wait for transmitter to become empty and restore the
2372 * TCR. Allow feature register bits to be inverted to work around
2375 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2376 & uap->vendor->fr_busy)
2378 if (!uap->vendor->always_enabled)
2379 pl011_write(old_cr, uap, REG_CR);
2382 spin_unlock(&uap->port.lock);
2383 local_irq_restore(flags);
2385 clk_disable(uap->clk);
2388 static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2389 int *parity, int *bits)
2391 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2392 unsigned int lcr_h, ibrd, fbrd;
2394 lcr_h = pl011_read(uap, REG_LCRH_TX);
2397 if (lcr_h & UART01x_LCRH_PEN) {
2398 if (lcr_h & UART01x_LCRH_EPS)
2404 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2409 ibrd = pl011_read(uap, REG_IBRD);
2410 fbrd = pl011_read(uap, REG_FBRD);
2412 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2414 if (uap->vendor->oversampling) {
2415 if (pl011_read(uap, REG_CR)
2416 & ST_UART011_CR_OVSFACT)
2422 static int pl011_console_setup(struct console *co, char *options)
2424 struct uart_amba_port *uap;
2432 * Check whether an invalid uart number has been specified, and
2433 * if so, search for the first available port that does have
2436 if (co->index >= UART_NR)
2438 uap = amba_ports[co->index];
2442 /* Allow pins to be muxed in and configured */
2443 pinctrl_pm_select_default_state(uap->port.dev);
2445 ret = clk_prepare(uap->clk);
2449 if (dev_get_platdata(uap->port.dev)) {
2450 struct amba_pl011_data *plat;
2452 plat = dev_get_platdata(uap->port.dev);
2457 uap->port.uartclk = clk_get_rate(uap->clk);
2459 if (uap->vendor->fixed_options) {
2460 baud = uap->fixed_baud;
2463 uart_parse_options(options,
2464 &baud, &parity, &bits, &flow);
2466 pl011_console_get_options(uap, &baud, &parity, &bits);
2469 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2473 * pl011_console_match - non-standard console matching
2474 * @co: registering console
2475 * @name: name from console command line
2476 * @idx: index from console command line
2477 * @options: ptr to option string from console command line
2479 * Only attempts to match console command lines of the form:
2480 * console=pl011,mmio|mmio32,<addr>[,<options>]
2481 * console=pl011,0x<addr>[,<options>]
2482 * This form is used to register an initial earlycon boot console and
2483 * replace it with the amba_console at pl011 driver init.
2485 * Performs console setup for a match (as required by interface)
2486 * If no <options> are specified, then assume the h/w is already setup.
2488 * Returns 0 if console matches; otherwise non-zero to use default matching
2490 static int pl011_console_match(struct console *co, char *name, int idx,
2493 unsigned char iotype;
2494 resource_size_t addr;
2498 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2499 * have a distinct console name, so make sure we check for that.
2500 * The actual implementation of the erratum occurs in the probe
2503 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2506 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2509 if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2512 /* try to match the port specified on the command line */
2513 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2514 struct uart_port *port;
2519 port = &amba_ports[i]->port;
2521 if (port->mapbase != addr)
2526 return pl011_console_setup(co, options);
2532 static struct uart_driver amba_reg;
2533 static struct console amba_console = {
2535 .write = pl011_console_write,
2536 .device = uart_console_device,
2537 .setup = pl011_console_setup,
2538 .match = pl011_console_match,
2539 .flags = CON_PRINTBUFFER | CON_ANYTIME,
2544 #define AMBA_CONSOLE (&amba_console)
2546 static void qdf2400_e44_putc(struct uart_port *port, unsigned char c)
2548 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2550 writel(c, port->membase + UART01x_DR);
2551 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2555 static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2557 struct earlycon_device *dev = con->data;
2559 uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2562 static void pl011_putc(struct uart_port *port, unsigned char c)
2564 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2566 if (port->iotype == UPIO_MEM32)
2567 writel(c, port->membase + UART01x_DR);
2569 writeb(c, port->membase + UART01x_DR);
2570 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2574 static void pl011_early_write(struct console *con, const char *s, unsigned n)
2576 struct earlycon_device *dev = con->data;
2578 uart_console_write(&dev->port, s, n, pl011_putc);
2581 #ifdef CONFIG_CONSOLE_POLL
2582 static int pl011_getc(struct uart_port *port)
2584 if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE)
2585 return NO_POLL_CHAR;
2587 if (port->iotype == UPIO_MEM32)
2588 return readl(port->membase + UART01x_DR);
2590 return readb(port->membase + UART01x_DR);
2593 static int pl011_early_read(struct console *con, char *s, unsigned int n)
2595 struct earlycon_device *dev = con->data;
2596 int ch, num_read = 0;
2598 while (num_read < n) {
2599 ch = pl011_getc(&dev->port);
2600 if (ch == NO_POLL_CHAR)
2609 #define pl011_early_read NULL
2613 * On non-ACPI systems, earlycon is enabled by specifying
2614 * "earlycon=pl011,<address>" on the kernel command line.
2616 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2617 * by specifying only "earlycon" on the command line. Because it requires
2618 * SPCR, the console starts after ACPI is parsed, which is later than a
2619 * traditional early console.
2621 * To get the traditional early console that starts before ACPI is parsed,
2622 * specify the full "earlycon=pl011,<address>" option.
2624 static int __init pl011_early_console_setup(struct earlycon_device *device,
2627 if (!device->port.membase)
2630 device->con->write = pl011_early_write;
2631 device->con->read = pl011_early_read;
2635 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2636 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2639 * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
2640 * Erratum 44, traditional earlycon can be enabled by specifying
2641 * "earlycon=qdf2400_e44,<address>". Any options are ignored.
2643 * Alternatively, you can just specify "earlycon", and the early console
2644 * will be enabled with the information from the SPCR table. In this
2645 * case, the SPCR code will detect the need for the E44 work-around,
2646 * and set the console name to "qdf2400_e44".
2649 qdf2400_e44_early_console_setup(struct earlycon_device *device,
2652 if (!device->port.membase)
2655 device->con->write = qdf2400_e44_early_write;
2658 EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2661 #define AMBA_CONSOLE NULL
2664 static struct uart_driver amba_reg = {
2665 .owner = THIS_MODULE,
2666 .driver_name = "ttyAMA",
2667 .dev_name = "ttyAMA",
2668 .major = SERIAL_AMBA_MAJOR,
2669 .minor = SERIAL_AMBA_MINOR,
2671 .cons = AMBA_CONSOLE,
2674 static int pl011_probe_dt_alias(int index, struct device *dev)
2676 struct device_node *np;
2677 static bool seen_dev_with_alias = false;
2678 static bool seen_dev_without_alias = false;
2681 if (!IS_ENABLED(CONFIG_OF))
2688 ret = of_alias_get_id(np, "serial");
2690 seen_dev_without_alias = true;
2693 seen_dev_with_alias = true;
2694 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2695 dev_warn(dev, "requested serial port %d not available.\n", ret);
2700 if (seen_dev_with_alias && seen_dev_without_alias)
2701 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2706 /* unregisters the driver also if no more ports are left */
2707 static void pl011_unregister_port(struct uart_amba_port *uap)
2712 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2713 if (amba_ports[i] == uap)
2714 amba_ports[i] = NULL;
2715 else if (amba_ports[i])
2718 pl011_dma_remove(uap);
2720 uart_unregister_driver(&amba_reg);
2723 static int pl011_find_free_port(void)
2727 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2728 if (amba_ports[i] == NULL)
2734 static int pl011_get_rs485_mode(struct uart_amba_port *uap)
2736 struct uart_port *port = &uap->port;
2739 ret = uart_get_rs485_mode(port);
2746 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2747 struct resource *mmiobase, int index)
2752 base = devm_ioremap_resource(dev, mmiobase);
2754 return PTR_ERR(base);
2756 index = pl011_probe_dt_alias(index, dev);
2758 uap->port.dev = dev;
2759 uap->port.mapbase = mmiobase->start;
2760 uap->port.membase = base;
2761 uap->port.fifosize = uap->fifosize;
2762 uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
2763 uap->port.flags = UPF_BOOT_AUTOCONF;
2764 uap->port.line = index;
2766 ret = pl011_get_rs485_mode(uap);
2770 amba_ports[index] = uap;
2775 static int pl011_register_port(struct uart_amba_port *uap)
2779 /* Ensure interrupts from this UART are masked and cleared */
2780 pl011_write(0, uap, REG_IMSC);
2781 pl011_write(0xffff, uap, REG_ICR);
2783 if (!amba_reg.state) {
2784 ret = uart_register_driver(&amba_reg);
2786 dev_err(uap->port.dev,
2787 "Failed to register AMBA-PL011 driver\n");
2788 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2789 if (amba_ports[i] == uap)
2790 amba_ports[i] = NULL;
2795 ret = uart_add_one_port(&amba_reg, &uap->port);
2797 pl011_unregister_port(uap);
2802 static const struct serial_rs485 pl011_rs485_supported = {
2803 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
2804 SER_RS485_RX_DURING_TX,
2805 .delay_rts_before_send = 1,
2806 .delay_rts_after_send = 1,
2809 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2811 struct uart_amba_port *uap;
2812 struct vendor_data *vendor = id->data;
2816 portnr = pl011_find_free_port();
2820 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2825 uap->clk = devm_clk_get(&dev->dev, NULL);
2826 if (IS_ERR(uap->clk))
2827 return PTR_ERR(uap->clk);
2829 if (of_property_read_bool(dev->dev.of_node, "cts-event-workaround")) {
2830 vendor->cts_event_workaround = true;
2831 dev_info(&dev->dev, "cts_event_workaround enabled\n");
2834 uap->reg_offset = vendor->reg_offset;
2835 uap->vendor = vendor;
2836 uap->fifosize = vendor->get_fifosize(dev);
2837 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2838 uap->port.irq = dev->irq[0];
2839 uap->port.ops = &amba_pl011_pops;
2840 uap->port.rs485_config = pl011_rs485_config;
2841 uap->port.rs485_supported = pl011_rs485_supported;
2842 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2844 if (device_property_read_u32(&dev->dev, "reg-io-width", &val) == 0) {
2847 uap->port.iotype = UPIO_MEM;
2850 uap->port.iotype = UPIO_MEM32;
2853 dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n",
2859 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2863 amba_set_drvdata(dev, uap);
2865 return pl011_register_port(uap);
2868 static void pl011_remove(struct amba_device *dev)
2870 struct uart_amba_port *uap = amba_get_drvdata(dev);
2872 uart_remove_one_port(&amba_reg, &uap->port);
2873 pl011_unregister_port(uap);
2876 #ifdef CONFIG_PM_SLEEP
2877 static int pl011_suspend(struct device *dev)
2879 struct uart_amba_port *uap = dev_get_drvdata(dev);
2884 return uart_suspend_port(&amba_reg, &uap->port);
2887 static int pl011_resume(struct device *dev)
2889 struct uart_amba_port *uap = dev_get_drvdata(dev);
2894 return uart_resume_port(&amba_reg, &uap->port);
2898 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2900 static int sbsa_uart_probe(struct platform_device *pdev)
2902 struct uart_amba_port *uap;
2908 * Check the mandatory baud rate parameter in the DT node early
2909 * so that we can easily exit with the error.
2911 if (pdev->dev.of_node) {
2912 struct device_node *np = pdev->dev.of_node;
2914 ret = of_property_read_u32(np, "current-speed", &baudrate);
2921 portnr = pl011_find_free_port();
2925 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2930 ret = platform_get_irq(pdev, 0);
2933 uap->port.irq = ret;
2935 #ifdef CONFIG_ACPI_SPCR_TABLE
2936 if (qdf2400_e44_present) {
2937 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2938 uap->vendor = &vendor_qdt_qdf2400_e44;
2941 uap->vendor = &vendor_sbsa;
2943 uap->reg_offset = uap->vendor->reg_offset;
2945 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2946 uap->port.ops = &sbsa_uart_pops;
2947 uap->fixed_baud = baudrate;
2949 snprintf(uap->type, sizeof(uap->type), "SBSA");
2951 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2953 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2957 platform_set_drvdata(pdev, uap);
2959 return pl011_register_port(uap);
2962 static int sbsa_uart_remove(struct platform_device *pdev)
2964 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2966 uart_remove_one_port(&amba_reg, &uap->port);
2967 pl011_unregister_port(uap);
2971 static const struct of_device_id sbsa_uart_of_match[] = {
2972 { .compatible = "arm,sbsa-uart", },
2975 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2977 static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = {
2982 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2984 static struct platform_driver arm_sbsa_uart_platform_driver = {
2985 .probe = sbsa_uart_probe,
2986 .remove = sbsa_uart_remove,
2988 .name = "sbsa-uart",
2989 .pm = &pl011_dev_pm_ops,
2990 .of_match_table = of_match_ptr(sbsa_uart_of_match),
2991 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2992 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2996 static int pl011_axi_probe(struct platform_device *pdev)
2998 struct uart_amba_port *uap;
2999 struct vendor_data *vendor = &vendor_arm_axi;
3001 unsigned int periphid;
3002 int portnr, ret, irq;
3004 portnr = pl011_find_free_port();
3008 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
3013 uap->clk = devm_clk_get(&pdev->dev, NULL);
3014 if (IS_ERR(uap->clk))
3015 return PTR_ERR(uap->clk);
3017 if (of_property_read_bool(pdev->dev.of_node, "cts-event-workaround")) {
3018 vendor->cts_event_workaround = true;
3019 dev_info(&pdev->dev, "cts_event_workaround enabled\n");
3022 irq = platform_get_irq(pdev, 0);
3026 periphid = 0x00241011; /* A safe default */
3027 of_property_read_u32(pdev->dev.of_node, "arm,primecell-periphid",
3030 uap->reg_offset = vendor->reg_offset;
3031 uap->vendor = vendor;
3032 uap->fifosize = (AMBA_REV_BITS(periphid) < 3) ? 16 : 32;
3033 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
3034 uap->port.irq = irq;
3035 uap->port.ops = &amba_pl011_pops;
3037 snprintf(uap->type, sizeof(uap->type), "PL011 AXI");
3039 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3041 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
3045 platform_set_drvdata(pdev, uap);
3047 return pl011_register_port(uap);
3050 static int pl011_axi_remove(struct platform_device *pdev)
3052 struct uart_amba_port *uap = platform_get_drvdata(pdev);
3054 uart_remove_one_port(&amba_reg, &uap->port);
3055 pl011_unregister_port(uap);
3059 static const struct of_device_id pl011_axi_of_match[] = {
3060 { .compatible = "arm,pl011-axi" },
3063 MODULE_DEVICE_TABLE(of, pl011_axi_of_match);
3065 static struct platform_driver pl011_axi_platform_driver = {
3066 .probe = pl011_axi_probe,
3067 .remove = pl011_axi_remove,
3069 .name = "pl011-axi",
3070 .pm = &pl011_dev_pm_ops,
3071 .of_match_table = of_match_ptr(pl011_axi_of_match),
3072 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
3076 static const struct amba_id pl011_ids[] = {
3080 .data = &vendor_arm,
3090 MODULE_DEVICE_TABLE(amba, pl011_ids);
3092 static struct amba_driver pl011_driver = {
3094 .name = "uart-pl011",
3095 .pm = &pl011_dev_pm_ops,
3096 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
3098 .id_table = pl011_ids,
3099 .probe = pl011_probe,
3100 .remove = pl011_remove,
3103 static int __init pl011_init(void)
3105 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
3107 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
3108 pr_warn("could not register SBSA UART platform driver\n");
3109 if (platform_driver_register(&pl011_axi_platform_driver))
3110 pr_warn("could not register PL011 AXI platform driver\n");
3111 return amba_driver_register(&pl011_driver);
3114 static void __exit pl011_exit(void)
3116 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
3117 amba_driver_unregister(&pl011_driver);
3121 * While this can be a module, if builtin it's most likely the console
3122 * So let's leave module_exit but move module_init to an earlier place
3124 arch_initcall(pl011_init);
3125 module_exit(pl011_exit);
3127 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
3128 MODULE_DESCRIPTION("ARM AMBA serial port driver");
3129 MODULE_LICENSE("GPL");