From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- drivers/tty/serial/21285.c | 539 +++ drivers/tty/serial/8250/8250.h | 416 ++ drivers/tty/serial/8250/8250_accent.c | 35 + drivers/tty/serial/8250/8250_acorn.c | 138 + drivers/tty/serial/8250/8250_alpha.c | 21 + drivers/tty/serial/8250/8250_aspeed_vuart.c | 604 +++ drivers/tty/serial/8250/8250_bcm2835aux.c | 257 ++ drivers/tty/serial/8250/8250_bcm7271.c | 1247 ++++++ drivers/tty/serial/8250/8250_boca.c | 49 + drivers/tty/serial/8250/8250_core.c | 1300 ++++++ drivers/tty/serial/8250/8250_dma.c | 295 ++ drivers/tty/serial/8250/8250_dw.c | 819 ++++ drivers/tty/serial/8250/8250_dwlib.c | 307 ++ drivers/tty/serial/8250/8250_dwlib.h | 71 + drivers/tty/serial/8250/8250_early.c | 216 + drivers/tty/serial/8250/8250_em.c | 163 + drivers/tty/serial/8250/8250_exar.c | 916 ++++ drivers/tty/serial/8250/8250_exar_st16c554.c | 40 + drivers/tty/serial/8250/8250_fintek.c | 464 ++ drivers/tty/serial/8250/8250_fourport.c | 44 + drivers/tty/serial/8250/8250_fsl.c | 174 + drivers/tty/serial/8250/8250_hp300.c | 324 ++ drivers/tty/serial/8250/8250_hub6.c | 53 + drivers/tty/serial/8250/8250_ingenic.c | 353 ++ drivers/tty/serial/8250/8250_ioc3.c | 98 + drivers/tty/serial/8250/8250_lpc18xx.c | 214 + drivers/tty/serial/8250/8250_lpss.c | 437 ++ drivers/tty/serial/8250/8250_men_mcb.c | 176 + drivers/tty/serial/8250/8250_mid.c | 401 ++ drivers/tty/serial/8250/8250_mtk.c | 698 +++ drivers/tty/serial/8250/8250_of.c | 357 ++ drivers/tty/serial/8250/8250_omap.c | 1725 ++++++++ drivers/tty/serial/8250/8250_parisc.c | 130 + drivers/tty/serial/8250/8250_pci.c | 6058 ++++++++++++++++++++++++++ drivers/tty/serial/8250/8250_pericom.c | 214 + drivers/tty/serial/8250/8250_pnp.c | 540 +++ drivers/tty/serial/8250/8250_port.c | 3534 +++++++++++++++ drivers/tty/serial/8250/8250_pxa.c | 190 + drivers/tty/serial/8250/8250_tegra.c | 201 + drivers/tty/serial/8250/8250_uniphier.c | 307 ++ drivers/tty/serial/8250/Kconfig | 549 +++ drivers/tty/serial/8250/Makefile | 47 + drivers/tty/serial/8250/serial_cs.c | 873 ++++ drivers/tty/serial/Kconfig | 1566 +++++++ drivers/tty/serial/Makefile | 95 + drivers/tty/serial/altera_jtaguart.c | 530 +++ drivers/tty/serial/altera_uart.c | 674 +++ drivers/tty/serial/amba-pl010.c | 825 ++++ drivers/tty/serial/amba-pl011.c | 3016 +++++++++++++ drivers/tty/serial/apbuart.c | 688 +++ drivers/tty/serial/apbuart.h | 65 + drivers/tty/serial/ar933x_uart.c | 890 ++++ drivers/tty/serial/arc_uart.c | 684 +++ drivers/tty/serial/atmel_serial.c | 3072 +++++++++++++ drivers/tty/serial/atmel_serial.h | 171 + drivers/tty/serial/bcm63xx_uart.c | 921 ++++ drivers/tty/serial/clps711x.c | 562 +++ drivers/tty/serial/cpm_uart/Makefile | 12 + drivers/tty/serial/cpm_uart/cpm_uart.h | 142 + drivers/tty/serial/cpm_uart/cpm_uart_core.c | 1485 +++++++ drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c | 122 + drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h | 33 + drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c | 156 + drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h | 33 + drivers/tty/serial/digicolor-usart.c | 562 +++ drivers/tty/serial/dz.c | 948 ++++ drivers/tty/serial/dz.h | 130 + drivers/tty/serial/earlycon-arm-semihost.c | 51 + drivers/tty/serial/earlycon-riscv-sbi.c | 31 + drivers/tty/serial/earlycon.c | 323 ++ drivers/tty/serial/fsl_linflexuart.c | 921 ++++ drivers/tty/serial/fsl_lpuart.c | 2961 +++++++++++++ drivers/tty/serial/icom.c | 1876 ++++++++ drivers/tty/serial/imx.c | 2702 ++++++++++++ drivers/tty/serial/imx_earlycon.c | 50 + drivers/tty/serial/ip22zilog.c | 1223 ++++++ drivers/tty/serial/ip22zilog.h | 282 ++ drivers/tty/serial/jsm/Makefile | 9 + drivers/tty/serial/jsm/jsm.h | 438 ++ drivers/tty/serial/jsm/jsm_cls.c | 949 ++++ drivers/tty/serial/jsm/jsm_driver.c | 383 ++ drivers/tty/serial/jsm/jsm_neo.c | 1387 ++++++ drivers/tty/serial/jsm/jsm_tty.c | 828 ++++ drivers/tty/serial/kgdb_nmi.c | 380 ++ drivers/tty/serial/kgdboc.c | 604 +++ drivers/tty/serial/lantiq.c | 978 +++++ drivers/tty/serial/liteuart.c | 439 ++ drivers/tty/serial/lpc32xx_hs.c | 766 ++++ drivers/tty/serial/max3100.c | 904 ++++ drivers/tty/serial/max310x.c | 1700 ++++++++ drivers/tty/serial/mcf.c | 709 +++ drivers/tty/serial/men_z135_uart.c | 932 ++++ drivers/tty/serial/meson_uart.c | 847 ++++ drivers/tty/serial/milbeaut_usio.c | 612 +++ drivers/tty/serial/mpc52xx_uart.c | 1953 +++++++++ drivers/tty/serial/mps2-uart.c | 655 +++ drivers/tty/serial/msm_serial.c | 1926 ++++++++ drivers/tty/serial/mux.c | 602 +++ drivers/tty/serial/mvebu-uart.c | 1565 +++++++ drivers/tty/serial/mxs-auart.c | 1760 ++++++++ drivers/tty/serial/omap-serial.c | 1874 ++++++++ drivers/tty/serial/owl-uart.c | 796 ++++ drivers/tty/serial/pch_uart.c | 1918 ++++++++ drivers/tty/serial/pic32_uart.c | 1004 +++++ drivers/tty/serial/pmac_zilog.c | 1996 +++++++++ drivers/tty/serial/pmac_zilog.h | 373 ++ drivers/tty/serial/pxa.c | 931 ++++ drivers/tty/serial/qcom_geni_serial.c | 1615 +++++++ drivers/tty/serial/rda-uart.c | 829 ++++ drivers/tty/serial/rp2.c | 863 ++++ drivers/tty/serial/sa1100.c | 948 ++++ drivers/tty/serial/samsung_tty.c | 3118 +++++++++++++ drivers/tty/serial/sb1250-duart.c | 964 ++++ drivers/tty/serial/sc16is7xx.c | 1900 ++++++++ drivers/tty/serial/sccnxp.c | 1069 +++++ drivers/tty/serial/serial-tegra.c | 1710 ++++++++ drivers/tty/serial/serial_core.c | 3494 +++++++++++++++ drivers/tty/serial/serial_mctrl_gpio.c | 388 ++ drivers/tty/serial/serial_mctrl_gpio.h | 165 + drivers/tty/serial/serial_txx9.c | 1291 ++++++ drivers/tty/serial/sh-sci.c | 3498 +++++++++++++++ drivers/tty/serial/sh-sci.h | 177 + drivers/tty/serial/sifive.c | 1093 +++++ drivers/tty/serial/sprd_serial.c | 1297 ++++++ drivers/tty/serial/st-asc.c | 1009 +++++ drivers/tty/serial/stm32-usart.c | 2113 +++++++++ drivers/tty/serial/stm32-usart.h | 216 + drivers/tty/serial/suncore.c | 244 ++ drivers/tty/serial/sunhv.c | 652 +++ drivers/tty/serial/sunplus-uart.c | 775 ++++ drivers/tty/serial/sunsab.c | 1161 +++++ drivers/tty/serial/sunsab.h | 323 ++ drivers/tty/serial/sunsu.c | 1625 +++++++ drivers/tty/serial/sunzilog.c | 1651 +++++++ drivers/tty/serial/sunzilog.h | 290 ++ drivers/tty/serial/tegra-tcu.c | 303 ++ drivers/tty/serial/timbuart.c | 502 +++ drivers/tty/serial/timbuart.h | 46 + drivers/tty/serial/uartlite.c | 950 ++++ drivers/tty/serial/ucc_uart.c | 1539 +++++++ drivers/tty/serial/vt8500_serial.c | 745 ++++ drivers/tty/serial/xilinx_uartps.c | 1729 ++++++++ drivers/tty/serial/zs.c | 1307 ++++++ drivers/tty/serial/zs.h | 285 ++ 144 files changed, 125498 insertions(+) create mode 100644 drivers/tty/serial/21285.c create mode 100644 drivers/tty/serial/8250/8250.h create mode 100644 drivers/tty/serial/8250/8250_accent.c create mode 100644 drivers/tty/serial/8250/8250_acorn.c create mode 100644 drivers/tty/serial/8250/8250_alpha.c create mode 100644 drivers/tty/serial/8250/8250_aspeed_vuart.c create mode 100644 drivers/tty/serial/8250/8250_bcm2835aux.c create mode 100644 drivers/tty/serial/8250/8250_bcm7271.c create mode 100644 drivers/tty/serial/8250/8250_boca.c create mode 100644 drivers/tty/serial/8250/8250_core.c create mode 100644 drivers/tty/serial/8250/8250_dma.c create mode 100644 drivers/tty/serial/8250/8250_dw.c create mode 100644 drivers/tty/serial/8250/8250_dwlib.c create mode 100644 drivers/tty/serial/8250/8250_dwlib.h create mode 100644 drivers/tty/serial/8250/8250_early.c create mode 100644 drivers/tty/serial/8250/8250_em.c create mode 100644 drivers/tty/serial/8250/8250_exar.c create mode 100644 drivers/tty/serial/8250/8250_exar_st16c554.c create mode 100644 drivers/tty/serial/8250/8250_fintek.c create mode 100644 drivers/tty/serial/8250/8250_fourport.c create mode 100644 drivers/tty/serial/8250/8250_fsl.c create mode 100644 drivers/tty/serial/8250/8250_hp300.c create mode 100644 drivers/tty/serial/8250/8250_hub6.c create mode 100644 drivers/tty/serial/8250/8250_ingenic.c create mode 100644 drivers/tty/serial/8250/8250_ioc3.c create mode 100644 drivers/tty/serial/8250/8250_lpc18xx.c create mode 100644 drivers/tty/serial/8250/8250_lpss.c create mode 100644 drivers/tty/serial/8250/8250_men_mcb.c create mode 100644 drivers/tty/serial/8250/8250_mid.c create mode 100644 drivers/tty/serial/8250/8250_mtk.c create mode 100644 drivers/tty/serial/8250/8250_of.c create mode 100644 drivers/tty/serial/8250/8250_omap.c create mode 100644 drivers/tty/serial/8250/8250_parisc.c create mode 100644 drivers/tty/serial/8250/8250_pci.c create mode 100644 drivers/tty/serial/8250/8250_pericom.c create mode 100644 drivers/tty/serial/8250/8250_pnp.c create mode 100644 drivers/tty/serial/8250/8250_port.c create mode 100644 drivers/tty/serial/8250/8250_pxa.c create mode 100644 drivers/tty/serial/8250/8250_tegra.c create mode 100644 drivers/tty/serial/8250/8250_uniphier.c create mode 100644 drivers/tty/serial/8250/Kconfig create mode 100644 drivers/tty/serial/8250/Makefile create mode 100644 drivers/tty/serial/8250/serial_cs.c create mode 100644 drivers/tty/serial/Kconfig create mode 100644 drivers/tty/serial/Makefile create mode 100644 drivers/tty/serial/altera_jtaguart.c create mode 100644 drivers/tty/serial/altera_uart.c create mode 100644 drivers/tty/serial/amba-pl010.c create mode 100644 drivers/tty/serial/amba-pl011.c create mode 100644 drivers/tty/serial/apbuart.c create mode 100644 drivers/tty/serial/apbuart.h create mode 100644 drivers/tty/serial/ar933x_uart.c create mode 100644 drivers/tty/serial/arc_uart.c create mode 100644 drivers/tty/serial/atmel_serial.c create mode 100644 drivers/tty/serial/atmel_serial.h create mode 100644 drivers/tty/serial/bcm63xx_uart.c create mode 100644 drivers/tty/serial/clps711x.c create mode 100644 drivers/tty/serial/cpm_uart/Makefile create mode 100644 drivers/tty/serial/cpm_uart/cpm_uart.h create mode 100644 drivers/tty/serial/cpm_uart/cpm_uart_core.c create mode 100644 drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c create mode 100644 drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h create mode 100644 drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c create mode 100644 drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h create mode 100644 drivers/tty/serial/digicolor-usart.c create mode 100644 drivers/tty/serial/dz.c create mode 100644 drivers/tty/serial/dz.h create mode 100644 drivers/tty/serial/earlycon-arm-semihost.c create mode 100644 drivers/tty/serial/earlycon-riscv-sbi.c create mode 100644 drivers/tty/serial/earlycon.c create mode 100644 drivers/tty/serial/fsl_linflexuart.c create mode 100644 drivers/tty/serial/fsl_lpuart.c create mode 100644 drivers/tty/serial/icom.c create mode 100644 drivers/tty/serial/imx.c create mode 100644 drivers/tty/serial/imx_earlycon.c create mode 100644 drivers/tty/serial/ip22zilog.c create mode 100644 drivers/tty/serial/ip22zilog.h create mode 100644 drivers/tty/serial/jsm/Makefile create mode 100644 drivers/tty/serial/jsm/jsm.h create mode 100644 drivers/tty/serial/jsm/jsm_cls.c create mode 100644 drivers/tty/serial/jsm/jsm_driver.c create mode 100644 drivers/tty/serial/jsm/jsm_neo.c create mode 100644 drivers/tty/serial/jsm/jsm_tty.c create mode 100644 drivers/tty/serial/kgdb_nmi.c create mode 100644 drivers/tty/serial/kgdboc.c create mode 100644 drivers/tty/serial/lantiq.c create mode 100644 drivers/tty/serial/liteuart.c create mode 100644 drivers/tty/serial/lpc32xx_hs.c create mode 100644 drivers/tty/serial/max3100.c create mode 100644 drivers/tty/serial/max310x.c create mode 100644 drivers/tty/serial/mcf.c create mode 100644 drivers/tty/serial/men_z135_uart.c create mode 100644 drivers/tty/serial/meson_uart.c create mode 100644 drivers/tty/serial/milbeaut_usio.c create mode 100644 drivers/tty/serial/mpc52xx_uart.c create mode 100644 drivers/tty/serial/mps2-uart.c create mode 100644 drivers/tty/serial/msm_serial.c create mode 100644 drivers/tty/serial/mux.c create mode 100644 drivers/tty/serial/mvebu-uart.c create mode 100644 drivers/tty/serial/mxs-auart.c create mode 100644 drivers/tty/serial/omap-serial.c create mode 100644 drivers/tty/serial/owl-uart.c create mode 100644 drivers/tty/serial/pch_uart.c create mode 100644 drivers/tty/serial/pic32_uart.c create mode 100644 drivers/tty/serial/pmac_zilog.c create mode 100644 drivers/tty/serial/pmac_zilog.h create mode 100644 drivers/tty/serial/pxa.c create mode 100644 drivers/tty/serial/qcom_geni_serial.c create mode 100644 drivers/tty/serial/rda-uart.c create mode 100644 drivers/tty/serial/rp2.c create mode 100644 drivers/tty/serial/sa1100.c create mode 100644 drivers/tty/serial/samsung_tty.c create mode 100644 drivers/tty/serial/sb1250-duart.c create mode 100644 drivers/tty/serial/sc16is7xx.c create mode 100644 drivers/tty/serial/sccnxp.c create mode 100644 drivers/tty/serial/serial-tegra.c create mode 100644 drivers/tty/serial/serial_core.c create mode 100644 drivers/tty/serial/serial_mctrl_gpio.c create mode 100644 drivers/tty/serial/serial_mctrl_gpio.h create mode 100644 drivers/tty/serial/serial_txx9.c create mode 100644 drivers/tty/serial/sh-sci.c create mode 100644 drivers/tty/serial/sh-sci.h create mode 100644 drivers/tty/serial/sifive.c create mode 100644 drivers/tty/serial/sprd_serial.c create mode 100644 drivers/tty/serial/st-asc.c create mode 100644 drivers/tty/serial/stm32-usart.c create mode 100644 drivers/tty/serial/stm32-usart.h create mode 100644 drivers/tty/serial/suncore.c create mode 100644 drivers/tty/serial/sunhv.c create mode 100644 drivers/tty/serial/sunplus-uart.c create mode 100644 drivers/tty/serial/sunsab.c create mode 100644 drivers/tty/serial/sunsab.h create mode 100644 drivers/tty/serial/sunsu.c create mode 100644 drivers/tty/serial/sunzilog.c create mode 100644 drivers/tty/serial/sunzilog.h create mode 100644 drivers/tty/serial/tegra-tcu.c create mode 100644 drivers/tty/serial/timbuart.c create mode 100644 drivers/tty/serial/timbuart.h create mode 100644 drivers/tty/serial/uartlite.c create mode 100644 drivers/tty/serial/ucc_uart.c create mode 100644 drivers/tty/serial/vt8500_serial.c create mode 100644 drivers/tty/serial/xilinx_uartps.c create mode 100644 drivers/tty/serial/zs.c create mode 100644 drivers/tty/serial/zs.h (limited to 'drivers/tty/serial') diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c new file mode 100644 index 000000000..c7d34823f --- /dev/null +++ b/drivers/tty/serial/21285.c @@ -0,0 +1,539 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the serial port on the 21285 StrongArm-110 core logic chip. + * + * Based on drivers/char/serial.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define BAUD_BASE (mem_fclk_21285/64) + +#define SERIAL_21285_NAME "ttyFB" +#define SERIAL_21285_MAJOR 204 +#define SERIAL_21285_MINOR 4 + +#define RXSTAT_DUMMY_READ 0x80000000 +#define RXSTAT_FRAME (1 << 0) +#define RXSTAT_PARITY (1 << 1) +#define RXSTAT_OVERRUN (1 << 2) +#define RXSTAT_ANYERR (RXSTAT_FRAME|RXSTAT_PARITY|RXSTAT_OVERRUN) + +#define H_UBRLCR_BREAK (1 << 0) +#define H_UBRLCR_PARENB (1 << 1) +#define H_UBRLCR_PAREVN (1 << 2) +#define H_UBRLCR_STOPB (1 << 3) +#define H_UBRLCR_FIFO (1 << 4) + +static const char serial21285_name[] = "Footbridge UART"; + +/* + * We only need 2 bits of data, so instead of creating a whole structure for + * this, use bits of the private_data pointer of the uart port structure. + */ +#define tx_enabled_bit 0 +#define rx_enabled_bit 1 + +static bool is_enabled(struct uart_port *port, int bit) +{ + unsigned long *private_data = (unsigned long *)&port->private_data; + + if (test_bit(bit, private_data)) + return true; + return false; +} + +static void enable(struct uart_port *port, int bit) +{ + unsigned long *private_data = (unsigned long *)&port->private_data; + + set_bit(bit, private_data); +} + +static void disable(struct uart_port *port, int bit) +{ + unsigned long *private_data = (unsigned long *)&port->private_data; + + clear_bit(bit, private_data); +} + +#define is_tx_enabled(port) is_enabled(port, tx_enabled_bit) +#define tx_enable(port) enable(port, tx_enabled_bit) +#define tx_disable(port) disable(port, tx_enabled_bit) + +#define is_rx_enabled(port) is_enabled(port, rx_enabled_bit) +#define rx_enable(port) enable(port, rx_enabled_bit) +#define rx_disable(port) disable(port, rx_enabled_bit) + +/* + * The documented expression for selecting the divisor is: + * BAUD_BASE / baud - 1 + * However, typically BAUD_BASE is not divisible by baud, so + * we want to select the divisor that gives us the minimum + * error. Therefore, we want: + * int(BAUD_BASE / baud - 0.5) -> + * int(BAUD_BASE / baud - (baud >> 1) / baud) -> + * int((BAUD_BASE - (baud >> 1)) / baud) + */ + +static void serial21285_stop_tx(struct uart_port *port) +{ + if (is_tx_enabled(port)) { + disable_irq_nosync(IRQ_CONTX); + tx_disable(port); + } +} + +static void serial21285_start_tx(struct uart_port *port) +{ + if (!is_tx_enabled(port)) { + enable_irq(IRQ_CONTX); + tx_enable(port); + } +} + +static void serial21285_stop_rx(struct uart_port *port) +{ + if (is_rx_enabled(port)) { + disable_irq_nosync(IRQ_CONRX); + rx_disable(port); + } +} + +static irqreturn_t serial21285_rx_chars(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + unsigned int status, ch, flag, rxs, max_count = 256; + + status = *CSR_UARTFLG; + while (!(status & 0x10) && max_count--) { + ch = *CSR_UARTDR; + flag = TTY_NORMAL; + port->icount.rx++; + + rxs = *CSR_RXSTAT | RXSTAT_DUMMY_READ; + if (unlikely(rxs & RXSTAT_ANYERR)) { + if (rxs & RXSTAT_PARITY) + port->icount.parity++; + else if (rxs & RXSTAT_FRAME) + port->icount.frame++; + if (rxs & RXSTAT_OVERRUN) + port->icount.overrun++; + + rxs &= port->read_status_mask; + + if (rxs & RXSTAT_PARITY) + flag = TTY_PARITY; + else if (rxs & RXSTAT_FRAME) + flag = TTY_FRAME; + } + + uart_insert_char(port, rxs, RXSTAT_OVERRUN, ch, flag); + + status = *CSR_UARTFLG; + } + tty_flip_buffer_push(&port->state->port); + + return IRQ_HANDLED; +} + +static irqreturn_t serial21285_tx_chars(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct circ_buf *xmit = &port->state->xmit; + int count = 256; + + if (port->x_char) { + *CSR_UARTDR = port->x_char; + port->icount.tx++; + port->x_char = 0; + goto out; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + serial21285_stop_tx(port); + goto out; + } + + do { + *CSR_UARTDR = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0 && !(*CSR_UARTFLG & 0x20)); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + serial21285_stop_tx(port); + + out: + return IRQ_HANDLED; +} + +static unsigned int serial21285_tx_empty(struct uart_port *port) +{ + return (*CSR_UARTFLG & 8) ? 0 : TIOCSER_TEMT; +} + +/* no modem control lines */ +static unsigned int serial21285_get_mctrl(struct uart_port *port) +{ + return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; +} + +static void serial21285_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static void serial21285_break_ctl(struct uart_port *port, int break_state) +{ + unsigned long flags; + unsigned int h_lcr; + + spin_lock_irqsave(&port->lock, flags); + h_lcr = *CSR_H_UBRLCR; + if (break_state) + h_lcr |= H_UBRLCR_BREAK; + else + h_lcr &= ~H_UBRLCR_BREAK; + *CSR_H_UBRLCR = h_lcr; + spin_unlock_irqrestore(&port->lock, flags); +} + +static int serial21285_startup(struct uart_port *port) +{ + int ret; + + tx_enable(port); + rx_enable(port); + + ret = request_irq(IRQ_CONRX, serial21285_rx_chars, 0, + serial21285_name, port); + if (ret == 0) { + ret = request_irq(IRQ_CONTX, serial21285_tx_chars, 0, + serial21285_name, port); + if (ret) + free_irq(IRQ_CONRX, port); + } + + return ret; +} + +static void serial21285_shutdown(struct uart_port *port) +{ + free_irq(IRQ_CONTX, port); + free_irq(IRQ_CONRX, port); +} + +static void +serial21285_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned long flags; + unsigned int baud, quot, h_lcr, b; + + /* + * We don't support modem control lines. + */ + termios->c_cflag &= ~(HUPCL | CRTSCTS | CMSPAR); + termios->c_cflag |= CLOCAL; + + /* + * We don't support BREAK character recognition. + */ + termios->c_iflag &= ~(IGNBRK | BRKINT); + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + quot = uart_get_divisor(port, baud); + b = port->uartclk / (16 * quot); + tty_termios_encode_baud_rate(termios, b, b); + + switch (termios->c_cflag & CSIZE) { + case CS5: + h_lcr = 0x00; + break; + case CS6: + h_lcr = 0x20; + break; + case CS7: + h_lcr = 0x40; + break; + default: /* CS8 */ + h_lcr = 0x60; + break; + } + + if (termios->c_cflag & CSTOPB) + h_lcr |= H_UBRLCR_STOPB; + if (termios->c_cflag & PARENB) { + h_lcr |= H_UBRLCR_PARENB; + if (!(termios->c_cflag & PARODD)) + h_lcr |= H_UBRLCR_PAREVN; + } + + if (port->fifosize) + h_lcr |= H_UBRLCR_FIFO; + + spin_lock_irqsave(&port->lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* + * Which character status flags are we interested in? + */ + port->read_status_mask = RXSTAT_OVERRUN; + if (termios->c_iflag & INPCK) + port->read_status_mask |= RXSTAT_FRAME | RXSTAT_PARITY; + + /* + * Which character status flags should we ignore? + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= RXSTAT_FRAME | RXSTAT_PARITY; + if (termios->c_iflag & IGNBRK && termios->c_iflag & IGNPAR) + port->ignore_status_mask |= RXSTAT_OVERRUN; + + /* + * Ignore all characters if CREAD is not set. + */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= RXSTAT_DUMMY_READ; + + quot -= 1; + + *CSR_UARTCON = 0; + *CSR_L_UBRLCR = quot & 0xff; + *CSR_M_UBRLCR = (quot >> 8) & 0x0f; + *CSR_H_UBRLCR = h_lcr; + *CSR_UARTCON = 1; + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *serial21285_type(struct uart_port *port) +{ + return port->type == PORT_21285 ? "DC21285" : NULL; +} + +static void serial21285_release_port(struct uart_port *port) +{ + release_mem_region(port->mapbase, 32); +} + +static int serial21285_request_port(struct uart_port *port) +{ + return request_mem_region(port->mapbase, 32, serial21285_name) + != NULL ? 0 : -EBUSY; +} + +static void serial21285_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE && serial21285_request_port(port) == 0) + port->type = PORT_21285; +} + +/* + * verify the new serial_struct (for TIOCSSERIAL). + */ +static int serial21285_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + int ret = 0; + if (ser->type != PORT_UNKNOWN && ser->type != PORT_21285) + ret = -EINVAL; + if (ser->irq <= 0) + ret = -EINVAL; + if (ser->baud_base != port->uartclk / 16) + ret = -EINVAL; + return ret; +} + +static const struct uart_ops serial21285_ops = { + .tx_empty = serial21285_tx_empty, + .get_mctrl = serial21285_get_mctrl, + .set_mctrl = serial21285_set_mctrl, + .stop_tx = serial21285_stop_tx, + .start_tx = serial21285_start_tx, + .stop_rx = serial21285_stop_rx, + .break_ctl = serial21285_break_ctl, + .startup = serial21285_startup, + .shutdown = serial21285_shutdown, + .set_termios = serial21285_set_termios, + .type = serial21285_type, + .release_port = serial21285_release_port, + .request_port = serial21285_request_port, + .config_port = serial21285_config_port, + .verify_port = serial21285_verify_port, +}; + +static struct uart_port serial21285_port = { + .mapbase = 0x42000160, + .iotype = UPIO_MEM, + .irq = 0, + .fifosize = 16, + .ops = &serial21285_ops, + .flags = UPF_BOOT_AUTOCONF, +}; + +static void serial21285_setup_ports(void) +{ + serial21285_port.uartclk = mem_fclk_21285 / 4; +} + +#ifdef CONFIG_SERIAL_21285_CONSOLE +static void serial21285_console_putchar(struct uart_port *port, unsigned char ch) +{ + while (*CSR_UARTFLG & 0x20) + barrier(); + *CSR_UARTDR = ch; +} + +static void +serial21285_console_write(struct console *co, const char *s, + unsigned int count) +{ + uart_console_write(&serial21285_port, s, count, serial21285_console_putchar); +} + +static void __init +serial21285_get_options(struct uart_port *port, int *baud, + int *parity, int *bits) +{ + if (*CSR_UARTCON == 1) { + unsigned int tmp; + + tmp = *CSR_H_UBRLCR; + switch (tmp & 0x60) { + case 0x00: + *bits = 5; + break; + case 0x20: + *bits = 6; + break; + case 0x40: + *bits = 7; + break; + default: + case 0x60: + *bits = 8; + break; + } + + if (tmp & H_UBRLCR_PARENB) { + *parity = 'o'; + if (tmp & H_UBRLCR_PAREVN) + *parity = 'e'; + } + + tmp = *CSR_L_UBRLCR | (*CSR_M_UBRLCR << 8); + + *baud = port->uartclk / (16 * (tmp + 1)); + } +} + +static int __init serial21285_console_setup(struct console *co, char *options) +{ + struct uart_port *port = &serial21285_port; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + serial21285_get_options(port, &baud, &parity, &bits); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct uart_driver serial21285_reg; + +static struct console serial21285_console = +{ + .name = SERIAL_21285_NAME, + .write = serial21285_console_write, + .device = uart_console_device, + .setup = serial21285_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &serial21285_reg, +}; + +static int __init rs285_console_init(void) +{ + serial21285_setup_ports(); + register_console(&serial21285_console); + return 0; +} +console_initcall(rs285_console_init); + +#define SERIAL_21285_CONSOLE &serial21285_console +#else +#define SERIAL_21285_CONSOLE NULL +#endif + +static struct uart_driver serial21285_reg = { + .owner = THIS_MODULE, + .driver_name = "ttyFB", + .dev_name = "ttyFB", + .major = SERIAL_21285_MAJOR, + .minor = SERIAL_21285_MINOR, + .nr = 1, + .cons = SERIAL_21285_CONSOLE, +}; + +static int __init serial21285_init(void) +{ + int ret; + + printk(KERN_INFO "Serial: 21285 driver\n"); + + serial21285_setup_ports(); + + ret = uart_register_driver(&serial21285_reg); + if (ret == 0) + uart_add_one_port(&serial21285_reg, &serial21285_port); + + return ret; +} + +static void __exit serial21285_exit(void) +{ + uart_remove_one_port(&serial21285_reg, &serial21285_port); + uart_unregister_driver(&serial21285_reg); +} + +module_init(serial21285_init); +module_exit(serial21285_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel Footbridge (21285) serial driver"); +MODULE_ALIAS_CHARDEV(SERIAL_21285_MAJOR, SERIAL_21285_MINOR); diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h new file mode 100644 index 000000000..eeb7b43eb --- /dev/null +++ b/drivers/tty/serial/8250/8250.h @@ -0,0 +1,416 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Driver for 8250/16550-type serial ports + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * Copyright (C) 2001 Russell King. + */ + +#include +#include +#include +#include + +#include "../serial_mctrl_gpio.h" + +struct uart_8250_dma { + int (*tx_dma)(struct uart_8250_port *p); + int (*rx_dma)(struct uart_8250_port *p); + void (*prepare_tx_dma)(struct uart_8250_port *p); + void (*prepare_rx_dma)(struct uart_8250_port *p); + + /* Filter function */ + dma_filter_fn fn; + /* Parameter to the filter function */ + void *rx_param; + void *tx_param; + + struct dma_slave_config rxconf; + struct dma_slave_config txconf; + + struct dma_chan *rxchan; + struct dma_chan *txchan; + + /* Device address base for DMA operations */ + phys_addr_t rx_dma_addr; + phys_addr_t tx_dma_addr; + + /* DMA address of the buffer in memory */ + dma_addr_t rx_addr; + dma_addr_t tx_addr; + + dma_cookie_t rx_cookie; + dma_cookie_t tx_cookie; + + void *rx_buf; + + size_t rx_size; + size_t tx_size; + + unsigned char tx_running; + unsigned char tx_err; + unsigned char rx_running; +}; + +struct old_serial_port { + unsigned int uart; + unsigned int baud_base; + unsigned int port; + unsigned int irq; + upf_t flags; + unsigned char io_type; + unsigned char __iomem *iomem_base; + unsigned short iomem_reg_shift; +}; + +struct serial8250_config { + const char *name; + unsigned short fifo_size; + unsigned short tx_loadsz; + unsigned char fcr; + unsigned char rxtrig_bytes[UART_FCR_R_TRIG_MAX_STATE]; + unsigned int flags; +}; + +#define UART_CAP_FIFO BIT(8) /* UART has FIFO */ +#define UART_CAP_EFR BIT(9) /* UART has EFR */ +#define UART_CAP_SLEEP BIT(10) /* UART has IER sleep */ +#define UART_CAP_AFE BIT(11) /* MCR-based hw flow control */ +#define UART_CAP_UUE BIT(12) /* UART needs IER bit 6 set (Xscale) */ +#define UART_CAP_RTOIE BIT(13) /* UART needs IER bit 4 set (Xscale, Tegra) */ +#define UART_CAP_HFIFO BIT(14) /* UART has a "hidden" FIFO */ +#define UART_CAP_RPM BIT(15) /* Runtime PM is active while idle */ +#define UART_CAP_IRDA BIT(16) /* UART supports IrDA line discipline */ +#define UART_CAP_MINI BIT(17) /* Mini UART on BCM283X family lacks: + * STOP PARITY EPAR SPAR WLEN5 WLEN6 + */ +#define UART_CAP_NOTEMT BIT(18) /* UART without interrupt on TEMT available */ + +#define UART_BUG_QUOT BIT(0) /* UART has buggy quot LSB */ +#define UART_BUG_TXEN BIT(1) /* UART has buggy TX IIR status */ +#define UART_BUG_NOMSR BIT(2) /* UART has buggy MSR status bits (Au1x00) */ +#define UART_BUG_THRE BIT(3) /* UART has buggy THRE reassertion */ +#define UART_BUG_TXRACE BIT(5) /* UART Tx fails to set remote DR */ + + +#ifdef CONFIG_SERIAL_8250_SHARE_IRQ +#define SERIAL8250_SHARE_IRQS 1 +#else +#define SERIAL8250_SHARE_IRQS 0 +#endif + +#define SERIAL8250_PORT_FLAGS(_base, _irq, _flags) \ + { \ + .iobase = _base, \ + .irq = _irq, \ + .uartclk = 1843200, \ + .iotype = UPIO_PORT, \ + .flags = UPF_BOOT_AUTOCONF | (_flags), \ + } + +#define SERIAL8250_PORT(_base, _irq) SERIAL8250_PORT_FLAGS(_base, _irq, 0) + + +static inline int serial_in(struct uart_8250_port *up, int offset) +{ + return up->port.serial_in(&up->port, offset); +} + +static inline void serial_out(struct uart_8250_port *up, int offset, int value) +{ + up->port.serial_out(&up->port, offset, value); +} + +/** + * serial_lsr_in - Read LSR register and preserve flags across reads + * @up: uart 8250 port + * + * Read LSR register and handle saving non-preserved flags across reads. + * The flags that are not preserved across reads are stored into + * up->lsr_saved_flags. + * + * Returns LSR value or'ed with the preserved flags (if any). + */ +static inline u16 serial_lsr_in(struct uart_8250_port *up) +{ + u16 lsr = up->lsr_saved_flags; + + lsr |= serial_in(up, UART_LSR); + up->lsr_saved_flags = lsr & up->lsr_save_mask; + + return lsr; +} + +/* + * For the 16C950 + */ +static void serial_icr_write(struct uart_8250_port *up, int offset, int value) +{ + serial_out(up, UART_SCR, offset); + serial_out(up, UART_ICR, value); +} + +static unsigned int __maybe_unused serial_icr_read(struct uart_8250_port *up, + int offset) +{ + unsigned int value; + + serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD); + serial_out(up, UART_SCR, offset); + value = serial_in(up, UART_ICR); + serial_icr_write(up, UART_ACR, up->acr); + + return value; +} + +void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p); + +static inline int serial_dl_read(struct uart_8250_port *up) +{ + return up->dl_read(up); +} + +static inline void serial_dl_write(struct uart_8250_port *up, int value) +{ + up->dl_write(up, value); +} + +static inline bool serial8250_set_THRI(struct uart_8250_port *up) +{ + if (up->ier & UART_IER_THRI) + return false; + up->ier |= UART_IER_THRI; + serial_out(up, UART_IER, up->ier); + return true; +} + +static inline bool serial8250_clear_THRI(struct uart_8250_port *up) +{ + if (!(up->ier & UART_IER_THRI)) + return false; + up->ier &= ~UART_IER_THRI; + serial_out(up, UART_IER, up->ier); + return true; +} + +struct uart_8250_port *serial8250_get_port(int line); + +void serial8250_rpm_get(struct uart_8250_port *p); +void serial8250_rpm_put(struct uart_8250_port *p); + +void serial8250_rpm_get_tx(struct uart_8250_port *p); +void serial8250_rpm_put_tx(struct uart_8250_port *p); + +int serial8250_em485_config(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485); +void serial8250_em485_start_tx(struct uart_8250_port *p); +void serial8250_em485_stop_tx(struct uart_8250_port *p); +void serial8250_em485_destroy(struct uart_8250_port *p); +extern struct serial_rs485 serial8250_em485_supported; + +/* MCR <-> TIOCM conversion */ +static inline int serial8250_TIOCM_to_MCR(int tiocm) +{ + int mcr = 0; + + if (tiocm & TIOCM_RTS) + mcr |= UART_MCR_RTS; + if (tiocm & TIOCM_DTR) + mcr |= UART_MCR_DTR; + if (tiocm & TIOCM_OUT1) + mcr |= UART_MCR_OUT1; + if (tiocm & TIOCM_OUT2) + mcr |= UART_MCR_OUT2; + if (tiocm & TIOCM_LOOP) + mcr |= UART_MCR_LOOP; + + return mcr; +} + +static inline int serial8250_MCR_to_TIOCM(int mcr) +{ + int tiocm = 0; + + if (mcr & UART_MCR_RTS) + tiocm |= TIOCM_RTS; + if (mcr & UART_MCR_DTR) + tiocm |= TIOCM_DTR; + if (mcr & UART_MCR_OUT1) + tiocm |= TIOCM_OUT1; + if (mcr & UART_MCR_OUT2) + tiocm |= TIOCM_OUT2; + if (mcr & UART_MCR_LOOP) + tiocm |= TIOCM_LOOP; + + return tiocm; +} + +/* MSR <-> TIOCM conversion */ +static inline int serial8250_MSR_to_TIOCM(int msr) +{ + int tiocm = 0; + + if (msr & UART_MSR_DCD) + tiocm |= TIOCM_CAR; + if (msr & UART_MSR_RI) + tiocm |= TIOCM_RNG; + if (msr & UART_MSR_DSR) + tiocm |= TIOCM_DSR; + if (msr & UART_MSR_CTS) + tiocm |= TIOCM_CTS; + + return tiocm; +} + +static inline void serial8250_out_MCR(struct uart_8250_port *up, int value) +{ + serial_out(up, UART_MCR, value); + + if (up->gpios) + mctrl_gpio_set(up->gpios, serial8250_MCR_to_TIOCM(value)); +} + +static inline int serial8250_in_MCR(struct uart_8250_port *up) +{ + int mctrl; + + mctrl = serial_in(up, UART_MCR); + + if (up->gpios) { + unsigned int mctrl_gpio = 0; + + mctrl_gpio = mctrl_gpio_get_outputs(up->gpios, &mctrl_gpio); + mctrl |= serial8250_TIOCM_to_MCR(mctrl_gpio); + } + + return mctrl; +} + +bool alpha_jensen(void); +void alpha_jensen_set_mctrl(struct uart_port *port, unsigned int mctrl); + +#ifdef CONFIG_SERIAL_8250_PNP +int serial8250_pnp_init(void); +void serial8250_pnp_exit(void); +#else +static inline int serial8250_pnp_init(void) { return 0; } +static inline void serial8250_pnp_exit(void) { } +#endif + +#ifdef CONFIG_SERIAL_8250_FINTEK +int fintek_8250_probe(struct uart_8250_port *uart); +#else +static inline int fintek_8250_probe(struct uart_8250_port *uart) { return 0; } +#endif + +#ifdef CONFIG_ARCH_OMAP1 +#include +static inline int is_omap1_8250(struct uart_8250_port *pt) +{ + int res; + + switch (pt->port.mapbase) { + case OMAP1_UART1_BASE: + case OMAP1_UART2_BASE: + case OMAP1_UART3_BASE: + res = 1; + break; + default: + res = 0; + break; + } + + return res; +} + +static inline int is_omap1510_8250(struct uart_8250_port *pt) +{ + if (!cpu_is_omap1510()) + return 0; + + return is_omap1_8250(pt); +} +#else +static inline int is_omap1_8250(struct uart_8250_port *pt) +{ + return 0; +} +static inline int is_omap1510_8250(struct uart_8250_port *pt) +{ + return 0; +} +#endif + +#ifdef CONFIG_SERIAL_8250_DMA +extern int serial8250_tx_dma(struct uart_8250_port *); +extern int serial8250_rx_dma(struct uart_8250_port *); +extern void serial8250_rx_dma_flush(struct uart_8250_port *); +extern int serial8250_request_dma(struct uart_8250_port *); +extern void serial8250_release_dma(struct uart_8250_port *); + +static inline void serial8250_do_prepare_tx_dma(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + + if (dma->prepare_tx_dma) + dma->prepare_tx_dma(p); +} + +static inline void serial8250_do_prepare_rx_dma(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + + if (dma->prepare_rx_dma) + dma->prepare_rx_dma(p); +} + +static inline bool serial8250_tx_dma_running(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + + return dma && dma->tx_running; +} +#else +static inline int serial8250_tx_dma(struct uart_8250_port *p) +{ + return -1; +} +static inline int serial8250_rx_dma(struct uart_8250_port *p) +{ + return -1; +} +static inline void serial8250_rx_dma_flush(struct uart_8250_port *p) { } +static inline int serial8250_request_dma(struct uart_8250_port *p) +{ + return -1; +} +static inline void serial8250_release_dma(struct uart_8250_port *p) { } + +static inline bool serial8250_tx_dma_running(struct uart_8250_port *p) +{ + return false; +} +#endif + +static inline int ns16550a_goto_highspeed(struct uart_8250_port *up) +{ + unsigned char status; + + status = serial_in(up, 0x04); /* EXCR2 */ +#define PRESL(x) ((x) & 0x30) + if (PRESL(status) == 0x10) { + /* already in high speed mode */ + return 0; + } else { + status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ + status |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ + serial_out(up, 0x04, status); + } + return 1; +} + +static inline int serial_index(struct uart_port *port) +{ + return port->minor - 64; +} diff --git a/drivers/tty/serial/8250/8250_accent.c b/drivers/tty/serial/8250/8250_accent.c new file mode 100644 index 000000000..1691f1a57 --- /dev/null +++ b/drivers/tty/serial/8250/8250_accent.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005 Russell King. + * Data taken from include/asm-i386/serial.h + */ +#include +#include +#include + +#include "8250.h" + +static struct plat_serial8250_port accent_data[] = { + SERIAL8250_PORT(0x330, 4), + SERIAL8250_PORT(0x338, 4), + { }, +}; + +static struct platform_device accent_device = { + .name = "serial8250", + .id = PLAT8250_DEV_ACCENT, + .dev = { + .platform_data = accent_data, + }, +}; + +static int __init accent_init(void) +{ + return platform_device_register(&accent_device); +} + +module_init(accent_init); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("8250 serial probe module for Accent Async cards"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/8250/8250_acorn.c b/drivers/tty/serial/8250/8250_acorn.c new file mode 100644 index 000000000..758c4aa20 --- /dev/null +++ b/drivers/tty/serial/8250/8250_acorn.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/drivers/serial/acorn.c + * + * Copyright (C) 1996-2003 Russell King. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "8250.h" + +#define MAX_PORTS 3 + +struct serial_card_type { + unsigned int num_ports; + unsigned int uartclk; + unsigned int type; + unsigned int offset[MAX_PORTS]; +}; + +struct serial_card_info { + unsigned int num_ports; + int ports[MAX_PORTS]; + void __iomem *vaddr; +}; + +static int +serial_card_probe(struct expansion_card *ec, const struct ecard_id *id) +{ + struct serial_card_info *info; + struct serial_card_type *type = id->data; + struct uart_8250_port uart; + unsigned long bus_addr; + unsigned int i; + + info = kzalloc(sizeof(struct serial_card_info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->num_ports = type->num_ports; + + bus_addr = ecard_resource_start(ec, type->type); + info->vaddr = ecardm_iomap(ec, type->type, 0, 0); + if (!info->vaddr) { + kfree(info); + return -ENOMEM; + } + + ecard_set_drvdata(ec, info); + + memset(&uart, 0, sizeof(struct uart_8250_port)); + uart.port.irq = ec->irq; + uart.port.flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ; + uart.port.uartclk = type->uartclk; + uart.port.iotype = UPIO_MEM; + uart.port.regshift = 2; + uart.port.dev = &ec->dev; + + for (i = 0; i < info->num_ports; i++) { + uart.port.membase = info->vaddr + type->offset[i]; + uart.port.mapbase = bus_addr + type->offset[i]; + + info->ports[i] = serial8250_register_8250_port(&uart); + } + + return 0; +} + +static void serial_card_remove(struct expansion_card *ec) +{ + struct serial_card_info *info = ecard_get_drvdata(ec); + int i; + + ecard_set_drvdata(ec, NULL); + + for (i = 0; i < info->num_ports; i++) + if (info->ports[i] > 0) + serial8250_unregister_port(info->ports[i]); + + kfree(info); +} + +static struct serial_card_type atomwide_type = { + .num_ports = 3, + .uartclk = 7372800, + .type = ECARD_RES_IOCSLOW, + .offset = { 0x2800, 0x2400, 0x2000 }, +}; + +static struct serial_card_type serport_type = { + .num_ports = 2, + .uartclk = 3686400, + .type = ECARD_RES_IOCSLOW, + .offset = { 0x2000, 0x2020 }, +}; + +static const struct ecard_id serial_cids[] = { + { MANU_ATOMWIDE, PROD_ATOMWIDE_3PSERIAL, &atomwide_type }, + { MANU_SERPORT, PROD_SERPORT_DSPORT, &serport_type }, + { 0xffff, 0xffff } +}; + +static struct ecard_driver serial_card_driver = { + .probe = serial_card_probe, + .remove = serial_card_remove, + .id_table = serial_cids, + .drv = { + .name = "8250_acorn", + }, +}; + +static int __init serial_card_init(void) +{ + return ecard_register_driver(&serial_card_driver); +} + +static void __exit serial_card_exit(void) +{ + ecard_remove_driver(&serial_card_driver); +} + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("Acorn 8250-compatible serial port expansion card driver"); +MODULE_LICENSE("GPL"); + +module_init(serial_card_init); +module_exit(serial_card_exit); diff --git a/drivers/tty/serial/8250/8250_alpha.c b/drivers/tty/serial/8250/8250_alpha.c new file mode 100644 index 000000000..58e70328a --- /dev/null +++ b/drivers/tty/serial/8250/8250_alpha.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include "8250.h" + +bool alpha_jensen(void) +{ + return !strcmp(alpha_mv.vector_name, "Jensen"); +} + +void alpha_jensen_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* + * Digital did something really horribly wrong with the OUT1 and OUT2 + * lines on Alpha Jensen. The failure mode is that if either is + * cleared, the machine locks up with endless interrupts. + */ + mctrl |= TIOCM_OUT1 | TIOCM_OUT2; + + serial8250_do_set_mctrl(port, mctrl); +} diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c new file mode 100644 index 000000000..9d2a78567 --- /dev/null +++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c @@ -0,0 +1,604 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Serial Port driver for Aspeed VUART device + * + * Copyright (C) 2016 Jeremy Kerr , IBM Corp. + * Copyright (C) 2006 Arnd Bergmann , IBM Corp. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +#define ASPEED_VUART_GCRA 0x20 +#define ASPEED_VUART_GCRA_VUART_EN BIT(0) +#define ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY BIT(1) +#define ASPEED_VUART_GCRA_DISABLE_HOST_TX_DISCARD BIT(5) +#define ASPEED_VUART_GCRB 0x24 +#define ASPEED_VUART_GCRB_HOST_SIRQ_MASK GENMASK(7, 4) +#define ASPEED_VUART_GCRB_HOST_SIRQ_SHIFT 4 +#define ASPEED_VUART_ADDRL 0x28 +#define ASPEED_VUART_ADDRH 0x2c + +#define ASPEED_VUART_DEFAULT_LPC_ADDR 0x3f8 +#define ASPEED_VUART_DEFAULT_SIRQ 4 +#define ASPEED_VUART_DEFAULT_SIRQ_POLARITY IRQ_TYPE_LEVEL_LOW + +struct aspeed_vuart { + struct device *dev; + struct clk *clk; + int line; + struct timer_list unthrottle_timer; + struct uart_8250_port *port; +}; + +/* + * If we fill the tty flip buffers, we throttle the data ready interrupt + * to prevent dropped characters. This timeout defines how long we wait + * to (conditionally, depending on buffer state) unthrottle. + */ +static const int unthrottle_timeout = HZ/10; + +/* + * The VUART is basically two UART 'front ends' connected by their FIFO + * (no actual serial line in between). One is on the BMC side (management + * controller) and one is on the host CPU side. + * + * It allows the BMC to provide to the host a "UART" that pipes into + * the BMC itself and can then be turned by the BMC into a network console + * of some sort for example. + * + * This driver is for the BMC side. The sysfs files allow the BMC + * userspace which owns the system configuration policy, to specify + * at what IO port and interrupt number the host side will appear + * to the host on the Host <-> BMC LPC bus. It could be different on a + * different system (though most of them use 3f8/4). + */ + +static inline u8 aspeed_vuart_readb(struct aspeed_vuart *vuart, u8 reg) +{ + return readb(vuart->port->port.membase + reg); +} + +static inline void aspeed_vuart_writeb(struct aspeed_vuart *vuart, u8 val, u8 reg) +{ + writeb(val, vuart->port->port.membase + reg); +} + +static ssize_t lpc_address_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct aspeed_vuart *vuart = dev_get_drvdata(dev); + u16 addr; + + addr = (aspeed_vuart_readb(vuart, ASPEED_VUART_ADDRH) << 8) | + (aspeed_vuart_readb(vuart, ASPEED_VUART_ADDRL)); + + return sysfs_emit(buf, "0x%x\n", addr); +} + +static int aspeed_vuart_set_lpc_address(struct aspeed_vuart *vuart, u32 addr) +{ + if (addr > U16_MAX) + return -EINVAL; + + aspeed_vuart_writeb(vuart, addr >> 8, ASPEED_VUART_ADDRH); + aspeed_vuart_writeb(vuart, addr >> 0, ASPEED_VUART_ADDRL); + + return 0; +} + +static ssize_t lpc_address_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct aspeed_vuart *vuart = dev_get_drvdata(dev); + u32 val; + int err; + + err = kstrtou32(buf, 0, &val); + if (err) + return err; + + err = aspeed_vuart_set_lpc_address(vuart, val); + return err ? : count; +} + +static DEVICE_ATTR_RW(lpc_address); + +static ssize_t sirq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct aspeed_vuart *vuart = dev_get_drvdata(dev); + u8 reg; + + reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRB); + reg &= ASPEED_VUART_GCRB_HOST_SIRQ_MASK; + reg >>= ASPEED_VUART_GCRB_HOST_SIRQ_SHIFT; + + return sysfs_emit(buf, "%u\n", reg); +} + +static int aspeed_vuart_set_sirq(struct aspeed_vuart *vuart, u32 sirq) +{ + u8 reg; + + if (sirq > (ASPEED_VUART_GCRB_HOST_SIRQ_MASK >> ASPEED_VUART_GCRB_HOST_SIRQ_SHIFT)) + return -EINVAL; + + sirq <<= ASPEED_VUART_GCRB_HOST_SIRQ_SHIFT; + sirq &= ASPEED_VUART_GCRB_HOST_SIRQ_MASK; + + reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRB); + reg &= ~ASPEED_VUART_GCRB_HOST_SIRQ_MASK; + reg |= sirq; + aspeed_vuart_writeb(vuart, reg, ASPEED_VUART_GCRB); + + return 0; +} + +static ssize_t sirq_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct aspeed_vuart *vuart = dev_get_drvdata(dev); + unsigned long val; + int err; + + err = kstrtoul(buf, 0, &val); + if (err) + return err; + + err = aspeed_vuart_set_sirq(vuart, val); + return err ? : count; +} + +static DEVICE_ATTR_RW(sirq); + +static ssize_t sirq_polarity_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct aspeed_vuart *vuart = dev_get_drvdata(dev); + u8 reg; + + reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRA); + reg &= ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY; + + return sysfs_emit(buf, "%u\n", reg ? 1 : 0); +} + +static void aspeed_vuart_set_sirq_polarity(struct aspeed_vuart *vuart, + bool polarity) +{ + u8 reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRA); + + if (polarity) + reg |= ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY; + else + reg &= ~ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY; + + aspeed_vuart_writeb(vuart, reg, ASPEED_VUART_GCRA); +} + +static ssize_t sirq_polarity_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct aspeed_vuart *vuart = dev_get_drvdata(dev); + unsigned long val; + int err; + + err = kstrtoul(buf, 0, &val); + if (err) + return err; + + aspeed_vuart_set_sirq_polarity(vuart, val != 0); + + return count; +} + +static DEVICE_ATTR_RW(sirq_polarity); + +static struct attribute *aspeed_vuart_attrs[] = { + &dev_attr_sirq.attr, + &dev_attr_sirq_polarity.attr, + &dev_attr_lpc_address.attr, + NULL, +}; + +static const struct attribute_group aspeed_vuart_attr_group = { + .attrs = aspeed_vuart_attrs, +}; + +static void aspeed_vuart_set_enabled(struct aspeed_vuart *vuart, bool enabled) +{ + u8 reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRA); + + if (enabled) + reg |= ASPEED_VUART_GCRA_VUART_EN; + else + reg &= ~ASPEED_VUART_GCRA_VUART_EN; + + aspeed_vuart_writeb(vuart, reg, ASPEED_VUART_GCRA); +} + +static void aspeed_vuart_set_host_tx_discard(struct aspeed_vuart *vuart, + bool discard) +{ + u8 reg; + + reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRA); + + /* If the DISABLE_HOST_TX_DISCARD bit is set, discard is disabled */ + if (!discard) + reg |= ASPEED_VUART_GCRA_DISABLE_HOST_TX_DISCARD; + else + reg &= ~ASPEED_VUART_GCRA_DISABLE_HOST_TX_DISCARD; + + aspeed_vuart_writeb(vuart, reg, ASPEED_VUART_GCRA); +} + +static int aspeed_vuart_startup(struct uart_port *uart_port) +{ + struct uart_8250_port *uart_8250_port = up_to_u8250p(uart_port); + struct aspeed_vuart *vuart = uart_8250_port->port.private_data; + int rc; + + rc = serial8250_do_startup(uart_port); + if (rc) + return rc; + + aspeed_vuart_set_host_tx_discard(vuart, false); + + return 0; +} + +static void aspeed_vuart_shutdown(struct uart_port *uart_port) +{ + struct uart_8250_port *uart_8250_port = up_to_u8250p(uart_port); + struct aspeed_vuart *vuart = uart_8250_port->port.private_data; + + aspeed_vuart_set_host_tx_discard(vuart, true); + + serial8250_do_shutdown(uart_port); +} + +static void __aspeed_vuart_set_throttle(struct uart_8250_port *up, + bool throttle) +{ + unsigned char irqs = UART_IER_RLSI | UART_IER_RDI; + + up->ier &= ~irqs; + if (!throttle) + up->ier |= irqs; + serial_out(up, UART_IER, up->ier); +} +static void aspeed_vuart_set_throttle(struct uart_port *port, bool throttle) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + __aspeed_vuart_set_throttle(up, throttle); + spin_unlock_irqrestore(&port->lock, flags); +} + +static void aspeed_vuart_throttle(struct uart_port *port) +{ + aspeed_vuart_set_throttle(port, true); +} + +static void aspeed_vuart_unthrottle(struct uart_port *port) +{ + aspeed_vuart_set_throttle(port, false); +} + +static void aspeed_vuart_unthrottle_exp(struct timer_list *timer) +{ + struct aspeed_vuart *vuart = from_timer(vuart, timer, unthrottle_timer); + struct uart_8250_port *up = vuart->port; + + if (!tty_buffer_space_avail(&up->port.state->port)) { + mod_timer(&vuart->unthrottle_timer, + jiffies + unthrottle_timeout); + return; + } + + aspeed_vuart_unthrottle(&up->port); +} + +/* + * Custom interrupt handler to manage finer-grained flow control. Although we + * have throttle/unthrottle callbacks, we've seen that the VUART device can + * deliver characters faster than the ldisc has a chance to check buffer space + * against the throttle threshold. This results in dropped characters before + * the throttle. + * + * We do this by checking for flip buffer space before RX. If we have no space, + * throttle now and schedule an unthrottle for later, once the ldisc has had + * a chance to drain the buffers. + */ +static int aspeed_vuart_handle_irq(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned int iir, lsr; + unsigned long flags; + unsigned int space, count; + + iir = serial_port_in(port, UART_IIR); + + if (iir & UART_IIR_NO_INT) + return 0; + + spin_lock_irqsave(&port->lock, flags); + + lsr = serial_port_in(port, UART_LSR); + + if (lsr & (UART_LSR_DR | UART_LSR_BI)) { + space = tty_buffer_space_avail(&port->state->port); + + if (!space) { + /* throttle and schedule an unthrottle later */ + struct aspeed_vuart *vuart = port->private_data; + __aspeed_vuart_set_throttle(up, true); + + if (!timer_pending(&vuart->unthrottle_timer)) + mod_timer(&vuart->unthrottle_timer, + jiffies + unthrottle_timeout); + + } else { + count = min(space, 256U); + + do { + serial8250_read_char(up, lsr); + lsr = serial_in(up, UART_LSR); + if (--count == 0) + break; + } while (lsr & (UART_LSR_DR | UART_LSR_BI)); + + tty_flip_buffer_push(&port->state->port); + } + } + + serial8250_modem_status(up); + if (lsr & UART_LSR_THRE) + serial8250_tx_chars(up); + + uart_unlock_and_check_sysrq_irqrestore(port, flags); + + return 1; +} + +static void aspeed_vuart_auto_configure_sirq_polarity( + struct aspeed_vuart *vuart, struct device_node *syscon_np, + u32 reg_offset, u32 reg_mask) +{ + struct regmap *regmap; + u32 value; + + regmap = syscon_node_to_regmap(syscon_np); + if (IS_ERR(regmap)) { + dev_warn(vuart->dev, + "could not get regmap for aspeed,sirq-polarity-sense\n"); + return; + } + if (regmap_read(regmap, reg_offset, &value)) { + dev_warn(vuart->dev, "could not read hw strap table\n"); + return; + } + + aspeed_vuart_set_sirq_polarity(vuart, (value & reg_mask) == 0); +} + +static int aspeed_vuart_map_irq_polarity(u32 dt) +{ + switch (dt) { + case IRQ_TYPE_LEVEL_LOW: + return 0; + case IRQ_TYPE_LEVEL_HIGH: + return 1; + default: + return -EINVAL; + } +} + +static int aspeed_vuart_probe(struct platform_device *pdev) +{ + struct of_phandle_args sirq_polarity_sense_args; + struct uart_8250_port port; + struct aspeed_vuart *vuart; + struct device_node *np; + struct resource *res; + u32 clk, prop, sirq[2]; + int rc, sirq_polarity; + + np = pdev->dev.of_node; + + vuart = devm_kzalloc(&pdev->dev, sizeof(*vuart), GFP_KERNEL); + if (!vuart) + return -ENOMEM; + + vuart->dev = &pdev->dev; + timer_setup(&vuart->unthrottle_timer, aspeed_vuart_unthrottle_exp, 0); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + memset(&port, 0, sizeof(port)); + port.port.private_data = vuart; + port.port.mapbase = res->start; + port.port.mapsize = resource_size(res); + port.port.startup = aspeed_vuart_startup; + port.port.shutdown = aspeed_vuart_shutdown; + port.port.throttle = aspeed_vuart_throttle; + port.port.unthrottle = aspeed_vuart_unthrottle; + port.port.status = UPSTAT_SYNC_FIFO; + port.port.dev = &pdev->dev; + port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); + port.bugs |= UART_BUG_TXRACE; + + rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group); + if (rc < 0) + return rc; + + if (of_property_read_u32(np, "clock-frequency", &clk)) { + vuart->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(vuart->clk)) { + dev_warn(&pdev->dev, + "clk or clock-frequency not defined\n"); + rc = PTR_ERR(vuart->clk); + goto err_sysfs_remove; + } + + rc = clk_prepare_enable(vuart->clk); + if (rc < 0) + goto err_sysfs_remove; + + clk = clk_get_rate(vuart->clk); + } + + /* If current-speed was set, then try not to change it. */ + if (of_property_read_u32(np, "current-speed", &prop) == 0) + port.port.custom_divisor = clk / (16 * prop); + + /* Check for shifted address mapping */ + if (of_property_read_u32(np, "reg-offset", &prop) == 0) + port.port.mapbase += prop; + + /* Check for registers offset within the devices address range */ + if (of_property_read_u32(np, "reg-shift", &prop) == 0) + port.port.regshift = prop; + + /* Check for fifo size */ + if (of_property_read_u32(np, "fifo-size", &prop) == 0) + port.port.fifosize = prop; + + /* Check for a fixed line number */ + rc = of_alias_get_id(np, "serial"); + if (rc >= 0) + port.port.line = rc; + + port.port.irq = irq_of_parse_and_map(np, 0); + port.port.handle_irq = aspeed_vuart_handle_irq; + port.port.iotype = UPIO_MEM; + port.port.type = PORT_ASPEED_VUART; + port.port.uartclk = clk; + port.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP + | UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_NO_THRE_TEST; + + if (of_property_read_bool(np, "no-loopback-test")) + port.port.flags |= UPF_SKIP_TEST; + + if (port.port.fifosize) + port.capabilities = UART_CAP_FIFO; + + if (of_property_read_bool(np, "auto-flow-control")) + port.capabilities |= UART_CAP_AFE; + + rc = serial8250_register_8250_port(&port); + if (rc < 0) + goto err_clk_disable; + + vuart->line = rc; + vuart->port = serial8250_get_port(vuart->line); + + rc = of_parse_phandle_with_fixed_args( + np, "aspeed,sirq-polarity-sense", 2, 0, + &sirq_polarity_sense_args); + if (rc < 0) { + dev_dbg(&pdev->dev, + "aspeed,sirq-polarity-sense property not found\n"); + } else { + aspeed_vuart_auto_configure_sirq_polarity( + vuart, sirq_polarity_sense_args.np, + sirq_polarity_sense_args.args[0], + BIT(sirq_polarity_sense_args.args[1])); + of_node_put(sirq_polarity_sense_args.np); + } + + rc = of_property_read_u32(np, "aspeed,lpc-io-reg", &prop); + if (rc < 0) + prop = ASPEED_VUART_DEFAULT_LPC_ADDR; + + rc = aspeed_vuart_set_lpc_address(vuart, prop); + if (rc < 0) { + dev_err(&pdev->dev, "invalid value in aspeed,lpc-io-reg property\n"); + goto err_clk_disable; + } + + rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", sirq, 2); + if (rc < 0) { + sirq[0] = ASPEED_VUART_DEFAULT_SIRQ; + sirq[1] = ASPEED_VUART_DEFAULT_SIRQ_POLARITY; + } + + rc = aspeed_vuart_set_sirq(vuart, sirq[0]); + if (rc < 0) { + dev_err(&pdev->dev, "invalid sirq number in aspeed,lpc-interrupts property\n"); + goto err_clk_disable; + } + + sirq_polarity = aspeed_vuart_map_irq_polarity(sirq[1]); + if (sirq_polarity < 0) { + dev_err(&pdev->dev, "invalid sirq polarity in aspeed,lpc-interrupts property\n"); + rc = sirq_polarity; + goto err_clk_disable; + } + + aspeed_vuart_set_sirq_polarity(vuart, sirq_polarity); + + aspeed_vuart_set_enabled(vuart, true); + aspeed_vuart_set_host_tx_discard(vuart, true); + platform_set_drvdata(pdev, vuart); + + return 0; + +err_clk_disable: + clk_disable_unprepare(vuart->clk); + irq_dispose_mapping(port.port.irq); +err_sysfs_remove: + sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group); + return rc; +} + +static int aspeed_vuart_remove(struct platform_device *pdev) +{ + struct aspeed_vuart *vuart = platform_get_drvdata(pdev); + + del_timer_sync(&vuart->unthrottle_timer); + aspeed_vuart_set_enabled(vuart, false); + serial8250_unregister_port(vuart->line); + sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group); + clk_disable_unprepare(vuart->clk); + + return 0; +} + +static const struct of_device_id aspeed_vuart_table[] = { + { .compatible = "aspeed,ast2400-vuart" }, + { .compatible = "aspeed,ast2500-vuart" }, + { }, +}; + +static struct platform_driver aspeed_vuart_driver = { + .driver = { + .name = "aspeed-vuart", + .of_match_table = aspeed_vuart_table, + }, + .probe = aspeed_vuart_probe, + .remove = aspeed_vuart_remove, +}; + +module_platform_driver(aspeed_vuart_driver); + +MODULE_AUTHOR("Jeremy Kerr "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Driver for Aspeed VUART device"); diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c new file mode 100644 index 000000000..4f4502fb5 --- /dev/null +++ b/drivers/tty/serial/8250/8250_bcm2835aux.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Serial port driver for BCM2835AUX UART + * + * Copyright (C) 2016 Martin Sperl + * + * Based on 8250_lpc18xx.c: + * Copyright (C) 2015 Joachim Eastwood + * + * The bcm2835aux is capable of RTS auto flow-control, but this driver doesn't + * take advantage of it yet. When adding support, be sure not to enable it + * simultaneously to rs485. + */ + +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +#define BCM2835_AUX_UART_CNTL 8 +#define BCM2835_AUX_UART_CNTL_RXEN 0x01 /* Receiver enable */ +#define BCM2835_AUX_UART_CNTL_TXEN 0x02 /* Transmitter enable */ +#define BCM2835_AUX_UART_CNTL_AUTORTS 0x04 /* RTS set by RX fill level */ +#define BCM2835_AUX_UART_CNTL_AUTOCTS 0x08 /* CTS stops transmitter */ +#define BCM2835_AUX_UART_CNTL_RTS3 0x00 /* RTS set until 3 chars left */ +#define BCM2835_AUX_UART_CNTL_RTS2 0x10 /* RTS set until 2 chars left */ +#define BCM2835_AUX_UART_CNTL_RTS1 0x20 /* RTS set until 1 chars left */ +#define BCM2835_AUX_UART_CNTL_RTS4 0x30 /* RTS set until 4 chars left */ +#define BCM2835_AUX_UART_CNTL_RTSINV 0x40 /* Invert auto RTS polarity */ +#define BCM2835_AUX_UART_CNTL_CTSINV 0x80 /* Invert auto CTS polarity */ + +/** + * struct bcm2835aux_data - driver private data of BCM2835 auxiliary UART + * @clk: clock producer of the port's uartclk + * @line: index of the port's serial8250_ports[] entry + * @cntl: cached copy of CNTL register + */ +struct bcm2835aux_data { + struct clk *clk; + int line; + u32 cntl; +}; + +struct bcm2835_aux_serial_driver_data { + resource_size_t offset; +}; + +static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up) +{ + if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) { + struct bcm2835aux_data *data = dev_get_drvdata(up->port.dev); + + data->cntl &= ~BCM2835_AUX_UART_CNTL_RXEN; + serial_out(up, BCM2835_AUX_UART_CNTL, data->cntl); + } + + /* + * On the bcm2835aux, the MCR register contains no other + * flags besides RTS. So no need for a read-modify-write. + */ + if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND) + serial8250_out_MCR(up, 0); + else + serial8250_out_MCR(up, UART_MCR_RTS); +} + +static void bcm2835aux_rs485_stop_tx(struct uart_8250_port *up) +{ + if (up->port.rs485.flags & SER_RS485_RTS_AFTER_SEND) + serial8250_out_MCR(up, 0); + else + serial8250_out_MCR(up, UART_MCR_RTS); + + if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) { + struct bcm2835aux_data *data = dev_get_drvdata(up->port.dev); + + data->cntl |= BCM2835_AUX_UART_CNTL_RXEN; + serial_out(up, BCM2835_AUX_UART_CNTL, data->cntl); + } +} + +static int bcm2835aux_serial_probe(struct platform_device *pdev) +{ + const struct bcm2835_aux_serial_driver_data *bcm_data; + struct uart_8250_port up = { }; + struct bcm2835aux_data *data; + resource_size_t offset = 0; + struct resource *res; + unsigned int uartclk; + int ret; + + /* allocate the custom structure */ + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* initialize data */ + up.capabilities = UART_CAP_FIFO | UART_CAP_MINI; + up.port.dev = &pdev->dev; + up.port.regshift = 2; + up.port.type = PORT_16550; + up.port.iotype = UPIO_MEM; + up.port.fifosize = 8; + up.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE | + UPF_SKIP_TEST | UPF_IOREMAP; + up.port.rs485_config = serial8250_em485_config; + up.port.rs485_supported = serial8250_em485_supported; + up.rs485_start_tx = bcm2835aux_rs485_start_tx; + up.rs485_stop_tx = bcm2835aux_rs485_stop_tx; + + /* initialize cached copy with power-on reset value */ + data->cntl = BCM2835_AUX_UART_CNTL_RXEN | BCM2835_AUX_UART_CNTL_TXEN; + + platform_set_drvdata(pdev, data); + + /* get the clock - this also enables the HW */ + data->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(data->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->clk), "could not get clk\n"); + + /* get the interrupt */ + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + up.port.irq = ret; + + /* map the main registers */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "memory resource not found"); + return -EINVAL; + } + + bcm_data = device_get_match_data(&pdev->dev); + + /* Some UEFI implementations (e.g. tianocore/edk2 for the Raspberry Pi) + * describe the miniuart with a base address that encompasses the auxiliary + * registers shared between the miniuart and spi. + * + * This is due to historical reasons, see discussion here : + * https://edk2.groups.io/g/devel/topic/87501357#84349 + * + * We need to add the offset between the miniuart and auxiliary + * registers to get the real miniuart base address. + */ + if (bcm_data) + offset = bcm_data->offset; + + up.port.mapbase = res->start + offset; + up.port.mapsize = resource_size(res) - offset; + + /* Check for a fixed line number */ + ret = of_alias_get_id(pdev->dev.of_node, "serial"); + if (ret >= 0) + up.port.line = ret; + + /* enable the clock as a last step */ + ret = clk_prepare_enable(data->clk); + if (ret) { + dev_err(&pdev->dev, "unable to enable uart clock - %d\n", + ret); + return ret; + } + + uartclk = clk_get_rate(data->clk); + if (!uartclk) { + ret = device_property_read_u32(&pdev->dev, "clock-frequency", &uartclk); + if (ret) { + dev_err_probe(&pdev->dev, ret, "could not get clk rate\n"); + goto dis_clk; + } + } + + /* the HW-clock divider for bcm2835aux is 8, + * but 8250 expects a divider of 16, + * so we have to multiply the actual clock by 2 + * to get identical baudrates. + */ + up.port.uartclk = uartclk * 2; + + /* register the port */ + ret = serial8250_register_8250_port(&up); + if (ret < 0) { + dev_err_probe(&pdev->dev, ret, "unable to register 8250 port\n"); + goto dis_clk; + } + data->line = ret; + + return 0; + +dis_clk: + clk_disable_unprepare(data->clk); + return ret; +} + +static int bcm2835aux_serial_remove(struct platform_device *pdev) +{ + struct bcm2835aux_data *data = platform_get_drvdata(pdev); + + serial8250_unregister_port(data->line); + clk_disable_unprepare(data->clk); + + return 0; +} + +static const struct bcm2835_aux_serial_driver_data bcm2835_acpi_data = { + .offset = 0x40, +}; + +static const struct of_device_id bcm2835aux_serial_match[] = { + { .compatible = "brcm,bcm2835-aux-uart" }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcm2835aux_serial_match); + +static const struct acpi_device_id bcm2835aux_serial_acpi_match[] = { + { "BCM2836", (kernel_ulong_t)&bcm2835_acpi_data }, + { } +}; +MODULE_DEVICE_TABLE(acpi, bcm2835aux_serial_acpi_match); + +static struct platform_driver bcm2835aux_serial_driver = { + .driver = { + .name = "bcm2835-aux-uart", + .of_match_table = bcm2835aux_serial_match, + .acpi_match_table = bcm2835aux_serial_acpi_match, + }, + .probe = bcm2835aux_serial_probe, + .remove = bcm2835aux_serial_remove, +}; +module_platform_driver(bcm2835aux_serial_driver); + +#ifdef CONFIG_SERIAL_8250_CONSOLE + +static int __init early_bcm2835aux_setup(struct earlycon_device *device, + const char *options) +{ + if (!device->port.membase) + return -ENODEV; + + device->port.iotype = UPIO_MEM32; + device->port.regshift = 2; + + return early_serial8250_setup(device, NULL); +} + +OF_EARLYCON_DECLARE(bcm2835aux, "brcm,bcm2835-aux-uart", + early_bcm2835aux_setup); +#endif + +MODULE_DESCRIPTION("BCM2835 auxiliar UART driver"); +MODULE_AUTHOR("Martin Sperl "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c new file mode 100644 index 000000000..ffc7f67e2 --- /dev/null +++ b/drivers/tty/serial/8250/8250_bcm7271.c @@ -0,0 +1,1247 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020, Broadcom */ +/* + * 8250-core based driver for Broadcom ns16550a UARTs + * + * This driver uses the standard 8250 driver core but adds additional + * optional features including the ability to use a baud rate clock + * mux for more accurate high speed baud rate selection and also + * an optional DMA engine. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +/* Register definitions for UART DMA block. Version 1.1 or later. */ +#define UDMA_ARB_RX 0x00 +#define UDMA_ARB_TX 0x04 +#define UDMA_ARB_REQ 0x00000001 +#define UDMA_ARB_GRANT 0x00000002 + +#define UDMA_RX_REVISION 0x00 +#define UDMA_RX_REVISION_REQUIRED 0x00000101 +#define UDMA_RX_CTRL 0x04 +#define UDMA_RX_CTRL_BUF_CLOSE_MODE 0x00010000 +#define UDMA_RX_CTRL_MASK_WR_DONE 0x00008000 +#define UDMA_RX_CTRL_ENDIAN_OVERRIDE 0x00004000 +#define UDMA_RX_CTRL_ENDIAN 0x00002000 +#define UDMA_RX_CTRL_OE_IS_ERR 0x00001000 +#define UDMA_RX_CTRL_PE_IS_ERR 0x00000800 +#define UDMA_RX_CTRL_FE_IS_ERR 0x00000400 +#define UDMA_RX_CTRL_NUM_BUF_USED_MASK 0x000003c0 +#define UDMA_RX_CTRL_NUM_BUF_USED_SHIFT 6 +#define UDMA_RX_CTRL_BUF_CLOSE_CLK_SEL_SYS 0x00000020 +#define UDMA_RX_CTRL_BUF_CLOSE_ENA 0x00000010 +#define UDMA_RX_CTRL_TIMEOUT_CLK_SEL_SYS 0x00000008 +#define UDMA_RX_CTRL_TIMEOUT_ENA 0x00000004 +#define UDMA_RX_CTRL_ABORT 0x00000002 +#define UDMA_RX_CTRL_ENA 0x00000001 +#define UDMA_RX_STATUS 0x08 +#define UDMA_RX_STATUS_ACTIVE_BUF_MASK 0x0000000f +#define UDMA_RX_TRANSFER_LEN 0x0c +#define UDMA_RX_TRANSFER_TOTAL 0x10 +#define UDMA_RX_BUFFER_SIZE 0x14 +#define UDMA_RX_SRC_ADDR 0x18 +#define UDMA_RX_TIMEOUT 0x1c +#define UDMA_RX_BUFFER_CLOSE 0x20 +#define UDMA_RX_BLOCKOUT_COUNTER 0x24 +#define UDMA_RX_BUF0_PTR_LO 0x28 +#define UDMA_RX_BUF0_PTR_HI 0x2c +#define UDMA_RX_BUF0_STATUS 0x30 +#define UDMA_RX_BUFX_STATUS_OVERRUN_ERR 0x00000010 +#define UDMA_RX_BUFX_STATUS_FRAME_ERR 0x00000008 +#define UDMA_RX_BUFX_STATUS_PARITY_ERR 0x00000004 +#define UDMA_RX_BUFX_STATUS_CLOSE_EXPIRED 0x00000002 +#define UDMA_RX_BUFX_STATUS_DATA_RDY 0x00000001 +#define UDMA_RX_BUF0_DATA_LEN 0x34 +#define UDMA_RX_BUF1_PTR_LO 0x38 +#define UDMA_RX_BUF1_PTR_HI 0x3c +#define UDMA_RX_BUF1_STATUS 0x40 +#define UDMA_RX_BUF1_DATA_LEN 0x44 + +#define UDMA_TX_REVISION 0x00 +#define UDMA_TX_REVISION_REQUIRED 0x00000101 +#define UDMA_TX_CTRL 0x04 +#define UDMA_TX_CTRL_ENDIAN_OVERRIDE 0x00000080 +#define UDMA_TX_CTRL_ENDIAN 0x00000040 +#define UDMA_TX_CTRL_NUM_BUF_USED_MASK 0x00000030 +#define UDMA_TX_CTRL_NUM_BUF_USED_1 0x00000010 +#define UDMA_TX_CTRL_ABORT 0x00000002 +#define UDMA_TX_CTRL_ENA 0x00000001 +#define UDMA_TX_DST_ADDR 0x08 +#define UDMA_TX_BLOCKOUT_COUNTER 0x10 +#define UDMA_TX_TRANSFER_LEN 0x14 +#define UDMA_TX_TRANSFER_TOTAL 0x18 +#define UDMA_TX_STATUS 0x20 +#define UDMA_TX_BUF0_PTR_LO 0x24 +#define UDMA_TX_BUF0_PTR_HI 0x28 +#define UDMA_TX_BUF0_STATUS 0x2c +#define UDMA_TX_BUFX_LAST 0x00000002 +#define UDMA_TX_BUFX_EMPTY 0x00000001 +#define UDMA_TX_BUF0_DATA_LEN 0x30 +#define UDMA_TX_BUF0_DATA_SENT 0x34 +#define UDMA_TX_BUF1_PTR_LO 0x38 + +#define UDMA_INTR_STATUS 0x00 +#define UDMA_INTR_ARB_TX_GRANT 0x00040000 +#define UDMA_INTR_ARB_RX_GRANT 0x00020000 +#define UDMA_INTR_TX_ALL_EMPTY 0x00010000 +#define UDMA_INTR_TX_EMPTY_BUF1 0x00008000 +#define UDMA_INTR_TX_EMPTY_BUF0 0x00004000 +#define UDMA_INTR_TX_ABORT 0x00002000 +#define UDMA_INTR_TX_DONE 0x00001000 +#define UDMA_INTR_RX_ERROR 0x00000800 +#define UDMA_INTR_RX_TIMEOUT 0x00000400 +#define UDMA_INTR_RX_READY_BUF7 0x00000200 +#define UDMA_INTR_RX_READY_BUF6 0x00000100 +#define UDMA_INTR_RX_READY_BUF5 0x00000080 +#define UDMA_INTR_RX_READY_BUF4 0x00000040 +#define UDMA_INTR_RX_READY_BUF3 0x00000020 +#define UDMA_INTR_RX_READY_BUF2 0x00000010 +#define UDMA_INTR_RX_READY_BUF1 0x00000008 +#define UDMA_INTR_RX_READY_BUF0 0x00000004 +#define UDMA_INTR_RX_READY_MASK 0x000003fc +#define UDMA_INTR_RX_READY_SHIFT 2 +#define UDMA_INTR_RX_ABORT 0x00000002 +#define UDMA_INTR_RX_DONE 0x00000001 +#define UDMA_INTR_SET 0x04 +#define UDMA_INTR_CLEAR 0x08 +#define UDMA_INTR_MASK_STATUS 0x0c +#define UDMA_INTR_MASK_SET 0x10 +#define UDMA_INTR_MASK_CLEAR 0x14 + + +#define UDMA_RX_INTERRUPTS ( \ + UDMA_INTR_RX_ERROR | \ + UDMA_INTR_RX_TIMEOUT | \ + UDMA_INTR_RX_READY_BUF0 | \ + UDMA_INTR_RX_READY_BUF1 | \ + UDMA_INTR_RX_READY_BUF2 | \ + UDMA_INTR_RX_READY_BUF3 | \ + UDMA_INTR_RX_READY_BUF4 | \ + UDMA_INTR_RX_READY_BUF5 | \ + UDMA_INTR_RX_READY_BUF6 | \ + UDMA_INTR_RX_READY_BUF7 | \ + UDMA_INTR_RX_ABORT | \ + UDMA_INTR_RX_DONE) + +#define UDMA_RX_ERR_INTERRUPTS ( \ + UDMA_INTR_RX_ERROR | \ + UDMA_INTR_RX_TIMEOUT | \ + UDMA_INTR_RX_ABORT | \ + UDMA_INTR_RX_DONE) + +#define UDMA_TX_INTERRUPTS ( \ + UDMA_INTR_TX_ABORT | \ + UDMA_INTR_TX_DONE) + +#define UDMA_IS_RX_INTERRUPT(status) ((status) & UDMA_RX_INTERRUPTS) +#define UDMA_IS_TX_INTERRUPT(status) ((status) & UDMA_TX_INTERRUPTS) + + +/* Current devices have 8 sets of RX buffer registers */ +#define UDMA_RX_BUFS_COUNT 8 +#define UDMA_RX_BUFS_REG_OFFSET (UDMA_RX_BUF1_PTR_LO - UDMA_RX_BUF0_PTR_LO) +#define UDMA_RX_BUFx_PTR_LO(x) (UDMA_RX_BUF0_PTR_LO + \ + ((x) * UDMA_RX_BUFS_REG_OFFSET)) +#define UDMA_RX_BUFx_PTR_HI(x) (UDMA_RX_BUF0_PTR_HI + \ + ((x) * UDMA_RX_BUFS_REG_OFFSET)) +#define UDMA_RX_BUFx_STATUS(x) (UDMA_RX_BUF0_STATUS + \ + ((x) * UDMA_RX_BUFS_REG_OFFSET)) +#define UDMA_RX_BUFx_DATA_LEN(x) (UDMA_RX_BUF0_DATA_LEN + \ + ((x) * UDMA_RX_BUFS_REG_OFFSET)) + +/* Current devices have 2 sets of TX buffer registers */ +#define UDMA_TX_BUFS_COUNT 2 +#define UDMA_TX_BUFS_REG_OFFSET (UDMA_TX_BUF1_PTR_LO - UDMA_TX_BUF0_PTR_LO) +#define UDMA_TX_BUFx_PTR_LO(x) (UDMA_TX_BUF0_PTR_LO + \ + ((x) * UDMA_TX_BUFS_REG_OFFSET)) +#define UDMA_TX_BUFx_PTR_HI(x) (UDMA_TX_BUF0_PTR_HI + \ + ((x) * UDMA_TX_BUFS_REG_OFFSET)) +#define UDMA_TX_BUFx_STATUS(x) (UDMA_TX_BUF0_STATUS + \ + ((x) * UDMA_TX_BUFS_REG_OFFSET)) +#define UDMA_TX_BUFx_DATA_LEN(x) (UDMA_TX_BUF0_DATA_LEN + \ + ((x) * UDMA_TX_BUFS_REG_OFFSET)) +#define UDMA_TX_BUFx_DATA_SENT(x) (UDMA_TX_BUF0_DATA_SENT + \ + ((x) * UDMA_TX_BUFS_REG_OFFSET)) +#define REGS_8250 0 +#define REGS_DMA_RX 1 +#define REGS_DMA_TX 2 +#define REGS_DMA_ISR 3 +#define REGS_DMA_ARB 4 +#define REGS_MAX 5 + +#define TX_BUF_SIZE 4096 +#define RX_BUF_SIZE 4096 +#define RX_BUFS_COUNT 2 +#define KHZ 1000 +#define MHZ(x) ((x) * KHZ * KHZ) + +static const u32 brcmstb_rate_table[] = { + MHZ(81), + MHZ(108), + MHZ(64), /* Actually 64285715 for some chips */ + MHZ(48), +}; + +static const u32 brcmstb_rate_table_7278[] = { + MHZ(81), + MHZ(108), + 0, + MHZ(48), +}; + +struct brcmuart_priv { + int line; + struct clk *baud_mux_clk; + unsigned long default_mux_rate; + u32 real_rates[ARRAY_SIZE(brcmstb_rate_table)]; + const u32 *rate_table; + ktime_t char_wait; + struct uart_port *up; + struct hrtimer hrt; + bool shutdown; + bool dma_enabled; + struct uart_8250_dma dma; + void __iomem *regs[REGS_MAX]; + dma_addr_t rx_addr; + void *rx_bufs; + size_t rx_size; + int rx_next_buf; + dma_addr_t tx_addr; + void *tx_buf; + size_t tx_size; + bool tx_running; + bool rx_running; + struct dentry *debugfs_dir; + + /* stats exposed through debugfs */ + u64 dma_rx_partial_buf; + u64 dma_rx_full_buf; + u32 rx_bad_timeout_late_char; + u32 rx_bad_timeout_no_char; + u32 rx_missing_close_timeout; + u32 rx_err; + u32 rx_timeout; + u32 rx_abort; + u32 saved_mctrl; +}; + +static struct dentry *brcmuart_debugfs_root; + +/* + * Register access routines + */ +static u32 udma_readl(struct brcmuart_priv *priv, + int reg_type, int offset) +{ + return readl(priv->regs[reg_type] + offset); +} + +static void udma_writel(struct brcmuart_priv *priv, + int reg_type, int offset, u32 value) +{ + writel(value, priv->regs[reg_type] + offset); +} + +static void udma_set(struct brcmuart_priv *priv, + int reg_type, int offset, u32 bits) +{ + void __iomem *reg = priv->regs[reg_type] + offset; + u32 value; + + value = readl(reg); + value |= bits; + writel(value, reg); +} + +static void udma_unset(struct brcmuart_priv *priv, + int reg_type, int offset, u32 bits) +{ + void __iomem *reg = priv->regs[reg_type] + offset; + u32 value; + + value = readl(reg); + value &= ~bits; + writel(value, reg); +} + +/* + * The UART DMA engine hardware can be used by multiple UARTS, but + * only one at a time. Sharing is not currently supported so + * the first UART to request the DMA engine will get it and any + * subsequent requests by other UARTS will fail. + */ +static int brcmuart_arbitration(struct brcmuart_priv *priv, bool acquire) +{ + u32 rx_grant; + u32 tx_grant; + int waits; + int ret = 0; + + if (acquire) { + udma_set(priv, REGS_DMA_ARB, UDMA_ARB_RX, UDMA_ARB_REQ); + udma_set(priv, REGS_DMA_ARB, UDMA_ARB_TX, UDMA_ARB_REQ); + + waits = 1; + while (1) { + rx_grant = udma_readl(priv, REGS_DMA_ARB, UDMA_ARB_RX); + tx_grant = udma_readl(priv, REGS_DMA_ARB, UDMA_ARB_TX); + if (rx_grant & tx_grant & UDMA_ARB_GRANT) + return 0; + if (waits-- == 0) + break; + msleep(1); + } + ret = 1; + } + + udma_unset(priv, REGS_DMA_ARB, UDMA_ARB_RX, UDMA_ARB_REQ); + udma_unset(priv, REGS_DMA_ARB, UDMA_ARB_TX, UDMA_ARB_REQ); + return ret; +} + +static void brcmuart_init_dma_hardware(struct brcmuart_priv *priv) +{ + u32 daddr; + u32 value; + int x; + + /* Start with all interrupts disabled */ + udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_MASK_SET, 0xffffffff); + + udma_writel(priv, REGS_DMA_RX, UDMA_RX_BUFFER_SIZE, RX_BUF_SIZE); + + /* + * Setup buffer close to happen when 32 character times have + * elapsed since the last character was received. + */ + udma_writel(priv, REGS_DMA_RX, UDMA_RX_BUFFER_CLOSE, 16*10*32); + value = (RX_BUFS_COUNT << UDMA_RX_CTRL_NUM_BUF_USED_SHIFT) + | UDMA_RX_CTRL_BUF_CLOSE_MODE + | UDMA_RX_CTRL_BUF_CLOSE_ENA; + udma_writel(priv, REGS_DMA_RX, UDMA_RX_CTRL, value); + + udma_writel(priv, REGS_DMA_RX, UDMA_RX_BLOCKOUT_COUNTER, 0); + daddr = priv->rx_addr; + for (x = 0; x < RX_BUFS_COUNT; x++) { + + /* Set RX transfer length to 0 for unknown */ + udma_writel(priv, REGS_DMA_RX, UDMA_RX_TRANSFER_LEN, 0); + + udma_writel(priv, REGS_DMA_RX, UDMA_RX_BUFx_PTR_LO(x), + lower_32_bits(daddr)); + udma_writel(priv, REGS_DMA_RX, UDMA_RX_BUFx_PTR_HI(x), + upper_32_bits(daddr)); + daddr += RX_BUF_SIZE; + } + + daddr = priv->tx_addr; + udma_writel(priv, REGS_DMA_TX, UDMA_TX_BUFx_PTR_LO(0), + lower_32_bits(daddr)); + udma_writel(priv, REGS_DMA_TX, UDMA_TX_BUFx_PTR_HI(0), + upper_32_bits(daddr)); + udma_writel(priv, REGS_DMA_TX, UDMA_TX_CTRL, + UDMA_TX_CTRL_NUM_BUF_USED_1); + + /* clear all interrupts then enable them */ + udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_CLEAR, 0xffffffff); + udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_MASK_CLEAR, + UDMA_RX_INTERRUPTS | UDMA_TX_INTERRUPTS); + +} + +static void start_rx_dma(struct uart_8250_port *p) +{ + struct brcmuart_priv *priv = p->port.private_data; + int x; + + udma_unset(priv, REGS_DMA_RX, UDMA_RX_CTRL, UDMA_RX_CTRL_ENA); + + /* Clear the RX ready bit for all buffers */ + for (x = 0; x < RX_BUFS_COUNT; x++) + udma_unset(priv, REGS_DMA_RX, UDMA_RX_BUFx_STATUS(x), + UDMA_RX_BUFX_STATUS_DATA_RDY); + + /* always start with buffer 0 */ + udma_unset(priv, REGS_DMA_RX, UDMA_RX_STATUS, + UDMA_RX_STATUS_ACTIVE_BUF_MASK); + priv->rx_next_buf = 0; + + udma_set(priv, REGS_DMA_RX, UDMA_RX_CTRL, UDMA_RX_CTRL_ENA); + priv->rx_running = true; +} + +static void stop_rx_dma(struct uart_8250_port *p) +{ + struct brcmuart_priv *priv = p->port.private_data; + + /* If RX is running, set the RX ABORT */ + if (priv->rx_running) + udma_set(priv, REGS_DMA_RX, UDMA_RX_CTRL, UDMA_RX_CTRL_ABORT); +} + +static int stop_tx_dma(struct uart_8250_port *p) +{ + struct brcmuart_priv *priv = p->port.private_data; + u32 value; + + /* If TX is running, set the TX ABORT */ + value = udma_readl(priv, REGS_DMA_TX, UDMA_TX_CTRL); + if (value & UDMA_TX_CTRL_ENA) + udma_set(priv, REGS_DMA_TX, UDMA_TX_CTRL, UDMA_TX_CTRL_ABORT); + priv->tx_running = false; + return 0; +} + +/* + * NOTE: printk's in this routine will hang the system if this is + * the console tty + */ +static int brcmuart_tx_dma(struct uart_8250_port *p) +{ + struct brcmuart_priv *priv = p->port.private_data; + struct circ_buf *xmit = &p->port.state->xmit; + u32 tx_size; + + if (uart_tx_stopped(&p->port) || priv->tx_running || + uart_circ_empty(xmit)) { + return 0; + } + tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + + priv->dma.tx_err = 0; + memcpy(priv->tx_buf, &xmit->buf[xmit->tail], tx_size); + xmit->tail += tx_size; + xmit->tail &= UART_XMIT_SIZE - 1; + p->port.icount.tx += tx_size; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&p->port); + + udma_writel(priv, REGS_DMA_TX, UDMA_TX_TRANSFER_LEN, tx_size); + udma_writel(priv, REGS_DMA_TX, UDMA_TX_BUF0_DATA_LEN, tx_size); + udma_unset(priv, REGS_DMA_TX, UDMA_TX_BUF0_STATUS, UDMA_TX_BUFX_EMPTY); + udma_set(priv, REGS_DMA_TX, UDMA_TX_CTRL, UDMA_TX_CTRL_ENA); + priv->tx_running = true; + + return 0; +} + +static void brcmuart_rx_buf_done_isr(struct uart_port *up, int index) +{ + struct brcmuart_priv *priv = up->private_data; + struct tty_port *tty_port = &up->state->port; + u32 status; + u32 length; + u32 copied; + + /* Make sure we're still in sync with the hardware */ + status = udma_readl(priv, REGS_DMA_RX, UDMA_RX_BUFx_STATUS(index)); + length = udma_readl(priv, REGS_DMA_RX, UDMA_RX_BUFx_DATA_LEN(index)); + + if ((status & UDMA_RX_BUFX_STATUS_DATA_RDY) == 0) { + dev_err(up->dev, "RX done interrupt but DATA_RDY not found\n"); + return; + } + if (status & (UDMA_RX_BUFX_STATUS_OVERRUN_ERR | + UDMA_RX_BUFX_STATUS_FRAME_ERR | + UDMA_RX_BUFX_STATUS_PARITY_ERR)) { + if (status & UDMA_RX_BUFX_STATUS_OVERRUN_ERR) { + up->icount.overrun++; + dev_warn(up->dev, "RX OVERRUN Error\n"); + } + if (status & UDMA_RX_BUFX_STATUS_FRAME_ERR) { + up->icount.frame++; + dev_warn(up->dev, "RX FRAMING Error\n"); + } + if (status & UDMA_RX_BUFX_STATUS_PARITY_ERR) { + up->icount.parity++; + dev_warn(up->dev, "RX PARITY Error\n"); + } + } + copied = (u32)tty_insert_flip_string( + tty_port, + priv->rx_bufs + (index * RX_BUF_SIZE), + length); + if (copied != length) { + dev_warn(up->dev, "Flip buffer overrun of %d bytes\n", + length - copied); + up->icount.overrun += length - copied; + } + up->icount.rx += length; + if (status & UDMA_RX_BUFX_STATUS_CLOSE_EXPIRED) + priv->dma_rx_partial_buf++; + else if (length != RX_BUF_SIZE) + /* + * This is a bug in the controller that doesn't cause + * any problems but will be fixed in the future. + */ + priv->rx_missing_close_timeout++; + else + priv->dma_rx_full_buf++; + + tty_flip_buffer_push(tty_port); +} + +static void brcmuart_rx_isr(struct uart_port *up, u32 rx_isr) +{ + struct brcmuart_priv *priv = up->private_data; + struct device *dev = up->dev; + u32 rx_done_isr; + u32 check_isr; + + rx_done_isr = (rx_isr & UDMA_INTR_RX_READY_MASK); + while (rx_done_isr) { + check_isr = UDMA_INTR_RX_READY_BUF0 << priv->rx_next_buf; + if (check_isr & rx_done_isr) { + brcmuart_rx_buf_done_isr(up, priv->rx_next_buf); + } else { + dev_err(dev, + "RX buffer ready out of sequence, restarting RX DMA\n"); + start_rx_dma(up_to_u8250p(up)); + break; + } + if (rx_isr & UDMA_RX_ERR_INTERRUPTS) { + if (rx_isr & UDMA_INTR_RX_ERROR) + priv->rx_err++; + if (rx_isr & UDMA_INTR_RX_TIMEOUT) { + priv->rx_timeout++; + dev_err(dev, "RX TIMEOUT Error\n"); + } + if (rx_isr & UDMA_INTR_RX_ABORT) + priv->rx_abort++; + priv->rx_running = false; + } + /* If not ABORT, re-enable RX buffer */ + if (!(rx_isr & UDMA_INTR_RX_ABORT)) + udma_unset(priv, REGS_DMA_RX, + UDMA_RX_BUFx_STATUS(priv->rx_next_buf), + UDMA_RX_BUFX_STATUS_DATA_RDY); + rx_done_isr &= ~check_isr; + priv->rx_next_buf++; + if (priv->rx_next_buf == RX_BUFS_COUNT) + priv->rx_next_buf = 0; + } +} + +static void brcmuart_tx_isr(struct uart_port *up, u32 isr) +{ + struct brcmuart_priv *priv = up->private_data; + struct device *dev = up->dev; + struct uart_8250_port *port_8250 = up_to_u8250p(up); + struct circ_buf *xmit = &port_8250->port.state->xmit; + + if (isr & UDMA_INTR_TX_ABORT) { + if (priv->tx_running) + dev_err(dev, "Unexpected TX_ABORT interrupt\n"); + return; + } + priv->tx_running = false; + if (!uart_circ_empty(xmit) && !uart_tx_stopped(up)) + brcmuart_tx_dma(port_8250); +} + +static irqreturn_t brcmuart_isr(int irq, void *dev_id) +{ + struct uart_port *up = dev_id; + struct device *dev = up->dev; + struct brcmuart_priv *priv = up->private_data; + unsigned long flags; + u32 interrupts; + u32 rval; + u32 tval; + + interrupts = udma_readl(priv, REGS_DMA_ISR, UDMA_INTR_STATUS); + if (interrupts == 0) + return IRQ_NONE; + + spin_lock_irqsave(&up->lock, flags); + + /* Clear all interrupts */ + udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_CLEAR, interrupts); + + rval = UDMA_IS_RX_INTERRUPT(interrupts); + if (rval) + brcmuart_rx_isr(up, rval); + tval = UDMA_IS_TX_INTERRUPT(interrupts); + if (tval) + brcmuart_tx_isr(up, tval); + if ((rval | tval) == 0) + dev_warn(dev, "Spurious interrupt: 0x%x\n", interrupts); + + spin_unlock_irqrestore(&up->lock, flags); + return IRQ_HANDLED; +} + +static int brcmuart_startup(struct uart_port *port) +{ + int res; + struct uart_8250_port *up = up_to_u8250p(port); + struct brcmuart_priv *priv = up->port.private_data; + + priv->shutdown = false; + + /* + * prevent serial8250_do_startup() from allocating non-existent + * DMA resources + */ + up->dma = NULL; + + res = serial8250_do_startup(port); + if (!priv->dma_enabled) + return res; + /* + * Disable the Receive Data Interrupt because the DMA engine + * will handle this. + */ + up->ier &= ~UART_IER_RDI; + serial_port_out(port, UART_IER, up->ier); + + priv->tx_running = false; + priv->dma.rx_dma = NULL; + priv->dma.tx_dma = brcmuart_tx_dma; + up->dma = &priv->dma; + + brcmuart_init_dma_hardware(priv); + start_rx_dma(up); + return res; +} + +static void brcmuart_shutdown(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + struct brcmuart_priv *priv = up->port.private_data; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + priv->shutdown = true; + if (priv->dma_enabled) { + stop_rx_dma(up); + stop_tx_dma(up); + /* disable all interrupts */ + udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_MASK_SET, + UDMA_RX_INTERRUPTS | UDMA_TX_INTERRUPTS); + } + + /* + * prevent serial8250_do_shutdown() from trying to free + * DMA resources that we never alloc'd for this driver. + */ + up->dma = NULL; + + spin_unlock_irqrestore(&port->lock, flags); + serial8250_do_shutdown(port); +} + +/* + * Not all clocks run at the exact specified rate, so set each requested + * rate and then get the actual rate. + */ +static void init_real_clk_rates(struct device *dev, struct brcmuart_priv *priv) +{ + int x; + int rc; + + priv->default_mux_rate = clk_get_rate(priv->baud_mux_clk); + for (x = 0; x < ARRAY_SIZE(priv->real_rates); x++) { + if (priv->rate_table[x] == 0) { + priv->real_rates[x] = 0; + continue; + } + rc = clk_set_rate(priv->baud_mux_clk, priv->rate_table[x]); + if (rc) { + dev_err(dev, "Error selecting BAUD MUX clock for %u\n", + priv->rate_table[x]); + priv->real_rates[x] = priv->rate_table[x]; + } else { + priv->real_rates[x] = clk_get_rate(priv->baud_mux_clk); + } + } + clk_set_rate(priv->baud_mux_clk, priv->default_mux_rate); +} + +static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv, + u32 baud) +{ + u32 percent; + u32 best_percent = UINT_MAX; + u32 quot; + u32 best_quot = 1; + u32 rate; + int best_index = -1; + u64 hires_rate; + u64 hires_baud; + u64 hires_err; + int rc; + int i; + int real_baud; + + /* If the Baud Mux Clock was not specified, just return */ + if (priv->baud_mux_clk == NULL) + return; + + /* Find the closest match for specified baud */ + for (i = 0; i < ARRAY_SIZE(priv->real_rates); i++) { + if (priv->real_rates[i] == 0) + continue; + rate = priv->real_rates[i] / 16; + quot = DIV_ROUND_CLOSEST(rate, baud); + if (!quot) + continue; + + /* increase resolution to get xx.xx percent */ + hires_rate = (u64)rate * 10000; + hires_baud = (u64)baud * 10000; + + hires_err = div_u64(hires_rate, (u64)quot); + + /* get the delta */ + if (hires_err > hires_baud) + hires_err = (hires_err - hires_baud); + else + hires_err = (hires_baud - hires_err); + + percent = (unsigned long)DIV_ROUND_CLOSEST_ULL(hires_err, baud); + dev_dbg(up->dev, + "Baud rate: %u, MUX Clk: %u, Error: %u.%u%%\n", + baud, priv->real_rates[i], percent / 100, + percent % 100); + if (percent < best_percent) { + best_percent = percent; + best_index = i; + best_quot = quot; + } + } + if (best_index == -1) { + dev_err(up->dev, "Error, %d BAUD rate is too fast.\n", baud); + return; + } + rate = priv->real_rates[best_index]; + rc = clk_set_rate(priv->baud_mux_clk, rate); + if (rc) + dev_err(up->dev, "Error selecting BAUD MUX clock\n"); + + /* Error over 3 percent will cause data errors */ + if (best_percent > 300) + dev_err(up->dev, "Error, baud: %d has %u.%u%% error\n", + baud, percent / 100, percent % 100); + + real_baud = rate / 16 / best_quot; + dev_dbg(up->dev, "Selecting BAUD MUX rate: %u\n", rate); + dev_dbg(up->dev, "Requested baud: %u, Actual baud: %u\n", + baud, real_baud); + + /* calc nanoseconds for 1.5 characters time at the given baud rate */ + i = NSEC_PER_SEC / real_baud / 10; + i += (i / 2); + priv->char_wait = ns_to_ktime(i); + + up->uartclk = rate; +} + +static void brcmstb_set_termios(struct uart_port *up, + struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_8250_port *p8250 = up_to_u8250p(up); + struct brcmuart_priv *priv = up->private_data; + + if (priv->dma_enabled) + stop_rx_dma(p8250); + set_clock_mux(up, priv, tty_termios_baud_rate(termios)); + serial8250_do_set_termios(up, termios, old); + if (p8250->mcr & UART_MCR_AFE) + p8250->port.status |= UPSTAT_AUTOCTS; + if (priv->dma_enabled) + start_rx_dma(p8250); +} + +static int brcmuart_handle_irq(struct uart_port *p) +{ + unsigned int iir = serial_port_in(p, UART_IIR); + struct brcmuart_priv *priv = p->private_data; + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int status; + unsigned long flags; + unsigned int ier; + unsigned int mcr; + int handled = 0; + + /* + * There's a bug in some 8250 cores where we get a timeout + * interrupt but there is no data ready. + */ + if (((iir & UART_IIR_ID) == UART_IIR_RX_TIMEOUT) && !(priv->shutdown)) { + spin_lock_irqsave(&p->lock, flags); + status = serial_port_in(p, UART_LSR); + if ((status & UART_LSR_DR) == 0) { + + ier = serial_port_in(p, UART_IER); + /* + * if Receive Data Interrupt is enabled and + * we're uing hardware flow control, deassert + * RTS and wait for any chars in the pipline to + * arrive and then check for DR again. + */ + if ((ier & UART_IER_RDI) && (up->mcr & UART_MCR_AFE)) { + ier &= ~(UART_IER_RLSI | UART_IER_RDI); + serial_port_out(p, UART_IER, ier); + mcr = serial_port_in(p, UART_MCR); + mcr &= ~UART_MCR_RTS; + serial_port_out(p, UART_MCR, mcr); + hrtimer_start(&priv->hrt, priv->char_wait, + HRTIMER_MODE_REL); + } else { + serial_port_in(p, UART_RX); + } + + handled = 1; + } + spin_unlock_irqrestore(&p->lock, flags); + if (handled) + return 1; + } + return serial8250_handle_irq(p, iir); +} + +static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t) +{ + struct brcmuart_priv *priv = container_of(t, struct brcmuart_priv, hrt); + struct uart_port *p = priv->up; + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int status; + unsigned long flags; + + if (priv->shutdown) + return HRTIMER_NORESTART; + + spin_lock_irqsave(&p->lock, flags); + status = serial_port_in(p, UART_LSR); + + /* + * If a character did not arrive after the timeout, clear the false + * receive timeout. + */ + if ((status & UART_LSR_DR) == 0) { + serial_port_in(p, UART_RX); + priv->rx_bad_timeout_no_char++; + } else { + priv->rx_bad_timeout_late_char++; + } + + /* re-enable receive unless upper layer has disabled it */ + if ((up->ier & (UART_IER_RLSI | UART_IER_RDI)) == + (UART_IER_RLSI | UART_IER_RDI)) { + status = serial_port_in(p, UART_IER); + status |= (UART_IER_RLSI | UART_IER_RDI); + serial_port_out(p, UART_IER, status); + status = serial_port_in(p, UART_MCR); + status |= UART_MCR_RTS; + serial_port_out(p, UART_MCR, status); + } + spin_unlock_irqrestore(&p->lock, flags); + return HRTIMER_NORESTART; +} + +static const struct of_device_id brcmuart_dt_ids[] = { + { + .compatible = "brcm,bcm7278-uart", + .data = brcmstb_rate_table_7278, + }, + { + .compatible = "brcm,bcm7271-uart", + .data = brcmstb_rate_table, + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, brcmuart_dt_ids); + +static void brcmuart_free_bufs(struct device *dev, struct brcmuart_priv *priv) +{ + if (priv->rx_bufs) + dma_free_coherent(dev, priv->rx_size, priv->rx_bufs, + priv->rx_addr); + if (priv->tx_buf) + dma_free_coherent(dev, priv->tx_size, priv->tx_buf, + priv->tx_addr); +} + +static void brcmuart_throttle(struct uart_port *port) +{ + struct brcmuart_priv *priv = port->private_data; + + udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_MASK_SET, UDMA_RX_INTERRUPTS); +} + +static void brcmuart_unthrottle(struct uart_port *port) +{ + struct brcmuart_priv *priv = port->private_data; + + udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_MASK_CLEAR, + UDMA_RX_INTERRUPTS); +} + +static int debugfs_stats_show(struct seq_file *s, void *unused) +{ + struct brcmuart_priv *priv = s->private; + + seq_printf(s, "rx_err:\t\t\t\t%u\n", + priv->rx_err); + seq_printf(s, "rx_timeout:\t\t\t%u\n", + priv->rx_timeout); + seq_printf(s, "rx_abort:\t\t\t%u\n", + priv->rx_abort); + seq_printf(s, "rx_bad_timeout_late_char:\t%u\n", + priv->rx_bad_timeout_late_char); + seq_printf(s, "rx_bad_timeout_no_char:\t\t%u\n", + priv->rx_bad_timeout_no_char); + seq_printf(s, "rx_missing_close_timeout:\t%u\n", + priv->rx_missing_close_timeout); + if (priv->dma_enabled) { + seq_printf(s, "dma_rx_partial_buf:\t\t%llu\n", + priv->dma_rx_partial_buf); + seq_printf(s, "dma_rx_full_buf:\t\t%llu\n", + priv->dma_rx_full_buf); + } + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_stats); + +static void brcmuart_init_debugfs(struct brcmuart_priv *priv, + const char *device) +{ + priv->debugfs_dir = debugfs_create_dir(device, brcmuart_debugfs_root); + debugfs_create_file("stats", 0444, priv->debugfs_dir, priv, + &debugfs_stats_fops); +} + + +static int brcmuart_probe(struct platform_device *pdev) +{ + struct resource *regs; + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *of_id = NULL; + struct uart_8250_port *new_port; + struct device *dev = &pdev->dev; + struct brcmuart_priv *priv; + struct clk *baud_mux_clk; + struct uart_8250_port up; + int irq; + void __iomem *membase = NULL; + resource_size_t mapbase = 0; + u32 clk_rate = 0; + int ret; + int x; + int dma_irq; + static const char * const reg_names[REGS_MAX] = { + "uart", "dma_rx", "dma_tx", "dma_intr2", "dma_arb" + }; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + priv = devm_kzalloc(dev, sizeof(struct brcmuart_priv), + GFP_KERNEL); + if (!priv) + return -ENOMEM; + + of_id = of_match_node(brcmuart_dt_ids, np); + if (!of_id || !of_id->data) + priv->rate_table = brcmstb_rate_table; + else + priv->rate_table = of_id->data; + + for (x = 0; x < REGS_MAX; x++) { + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, + reg_names[x]); + if (!regs) + break; + priv->regs[x] = devm_ioremap(dev, regs->start, + resource_size(regs)); + if (!priv->regs[x]) + return -ENOMEM; + if (x == REGS_8250) { + mapbase = regs->start; + membase = priv->regs[x]; + } + } + + /* We should have just the uart base registers or all the registers */ + if (x != 1 && x != REGS_MAX) { + dev_warn(dev, "%s registers not specified\n", reg_names[x]); + return -EINVAL; + } + + /* if the DMA registers were specified, try to enable DMA */ + if (x > REGS_DMA_RX) { + if (brcmuart_arbitration(priv, 1) == 0) { + u32 txrev = 0; + u32 rxrev = 0; + + txrev = udma_readl(priv, REGS_DMA_RX, UDMA_RX_REVISION); + rxrev = udma_readl(priv, REGS_DMA_TX, UDMA_TX_REVISION); + if ((txrev >= UDMA_TX_REVISION_REQUIRED) && + (rxrev >= UDMA_RX_REVISION_REQUIRED)) { + + /* Enable the use of the DMA hardware */ + priv->dma_enabled = true; + } else { + brcmuart_arbitration(priv, 0); + dev_err(dev, + "Unsupported DMA Hardware Revision\n"); + } + } else { + dev_err(dev, + "Timeout arbitrating for UART DMA hardware\n"); + } + } + + of_property_read_u32(np, "clock-frequency", &clk_rate); + + /* See if a Baud clock has been specified */ + baud_mux_clk = devm_clk_get(dev, "sw_baud"); + if (IS_ERR(baud_mux_clk)) { + if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto release_dma; + } + dev_dbg(dev, "BAUD MUX clock not specified\n"); + } else { + dev_dbg(dev, "BAUD MUX clock found\n"); + ret = clk_prepare_enable(baud_mux_clk); + if (ret) + goto release_dma; + priv->baud_mux_clk = baud_mux_clk; + init_real_clk_rates(dev, priv); + clk_rate = priv->default_mux_rate; + } + + if (clk_rate == 0) { + dev_err(dev, "clock-frequency or clk not defined\n"); + ret = -EINVAL; + goto err_clk_disable; + } + + dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not "); + + memset(&up, 0, sizeof(up)); + up.port.type = PORT_16550A; + up.port.uartclk = clk_rate; + up.port.dev = dev; + up.port.mapbase = mapbase; + up.port.membase = membase; + up.port.irq = irq; + up.port.handle_irq = brcmuart_handle_irq; + up.port.regshift = 2; + up.port.iotype = of_device_is_big_endian(np) ? + UPIO_MEM32BE : UPIO_MEM32; + up.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF + | UPF_FIXED_PORT | UPF_FIXED_TYPE; + up.port.dev = dev; + up.port.private_data = priv; + up.capabilities = UART_CAP_FIFO | UART_CAP_AFE; + up.port.fifosize = 32; + + /* Check for a fixed line number */ + ret = of_alias_get_id(np, "serial"); + if (ret >= 0) + up.port.line = ret; + + /* setup HR timer */ + hrtimer_init(&priv->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + priv->hrt.function = brcmuart_hrtimer_func; + + up.port.shutdown = brcmuart_shutdown; + up.port.startup = brcmuart_startup; + up.port.throttle = brcmuart_throttle; + up.port.unthrottle = brcmuart_unthrottle; + up.port.set_termios = brcmstb_set_termios; + + if (priv->dma_enabled) { + priv->rx_size = RX_BUF_SIZE * RX_BUFS_COUNT; + priv->rx_bufs = dma_alloc_coherent(dev, + priv->rx_size, + &priv->rx_addr, GFP_KERNEL); + if (!priv->rx_bufs) { + ret = -ENOMEM; + goto err; + } + priv->tx_size = UART_XMIT_SIZE; + priv->tx_buf = dma_alloc_coherent(dev, + priv->tx_size, + &priv->tx_addr, GFP_KERNEL); + if (!priv->tx_buf) { + ret = -ENOMEM; + goto err; + } + } + + ret = serial8250_register_8250_port(&up); + if (ret < 0) { + dev_err(dev, "unable to register 8250 port\n"); + goto err; + } + priv->line = ret; + new_port = serial8250_get_port(ret); + priv->up = &new_port->port; + if (priv->dma_enabled) { + dma_irq = platform_get_irq_byname(pdev, "dma"); + if (dma_irq < 0) { + ret = dma_irq; + dev_err(dev, "no IRQ resource info\n"); + goto err1; + } + ret = devm_request_irq(dev, dma_irq, brcmuart_isr, + IRQF_SHARED, "uart DMA irq", &new_port->port); + if (ret) { + dev_err(dev, "unable to register IRQ handler\n"); + goto err1; + } + } + platform_set_drvdata(pdev, priv); + brcmuart_init_debugfs(priv, dev_name(&pdev->dev)); + return 0; + +err1: + serial8250_unregister_port(priv->line); +err: + brcmuart_free_bufs(dev, priv); +err_clk_disable: + clk_disable_unprepare(baud_mux_clk); +release_dma: + if (priv->dma_enabled) + brcmuart_arbitration(priv, 0); + return ret; +} + +static int brcmuart_remove(struct platform_device *pdev) +{ + struct brcmuart_priv *priv = platform_get_drvdata(pdev); + + debugfs_remove_recursive(priv->debugfs_dir); + hrtimer_cancel(&priv->hrt); + serial8250_unregister_port(priv->line); + brcmuart_free_bufs(&pdev->dev, priv); + clk_disable_unprepare(priv->baud_mux_clk); + if (priv->dma_enabled) + brcmuart_arbitration(priv, 0); + return 0; +} + +static int __maybe_unused brcmuart_suspend(struct device *dev) +{ + struct brcmuart_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); + struct uart_port *port = &up->port; + unsigned long flags; + + /* + * This will prevent resume from enabling RTS before the + * baud rate has been restored. + */ + spin_lock_irqsave(&port->lock, flags); + priv->saved_mctrl = port->mctrl; + port->mctrl &= ~TIOCM_RTS; + spin_unlock_irqrestore(&port->lock, flags); + + serial8250_suspend_port(priv->line); + clk_disable_unprepare(priv->baud_mux_clk); + + return 0; +} + +static int __maybe_unused brcmuart_resume(struct device *dev) +{ + struct brcmuart_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); + struct uart_port *port = &up->port; + unsigned long flags; + int ret; + + ret = clk_prepare_enable(priv->baud_mux_clk); + if (ret) + dev_err(dev, "Error enabling BAUD MUX clock\n"); + + /* + * The hardware goes back to it's default after suspend + * so get the "clk" back in sync. + */ + ret = clk_set_rate(priv->baud_mux_clk, priv->default_mux_rate); + if (ret) + dev_err(dev, "Error restoring default BAUD MUX clock\n"); + if (priv->dma_enabled) { + if (brcmuart_arbitration(priv, 1)) { + dev_err(dev, "Timeout arbitrating for DMA hardware on resume\n"); + return(-EBUSY); + } + brcmuart_init_dma_hardware(priv); + start_rx_dma(serial8250_get_port(priv->line)); + } + serial8250_resume_port(priv->line); + + if (priv->saved_mctrl & TIOCM_RTS) { + /* Restore RTS */ + spin_lock_irqsave(&port->lock, flags); + port->mctrl |= TIOCM_RTS; + port->ops->set_mctrl(port, port->mctrl); + spin_unlock_irqrestore(&port->lock, flags); + } + + return 0; +} + +static const struct dev_pm_ops brcmuart_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(brcmuart_suspend, brcmuart_resume) +}; + +static struct platform_driver brcmuart_platform_driver = { + .driver = { + .name = "bcm7271-uart", + .pm = &brcmuart_dev_pm_ops, + .of_match_table = brcmuart_dt_ids, + }, + .probe = brcmuart_probe, + .remove = brcmuart_remove, +}; + +static int __init brcmuart_init(void) +{ + int ret; + + brcmuart_debugfs_root = debugfs_create_dir( + brcmuart_platform_driver.driver.name, NULL); + ret = platform_driver_register(&brcmuart_platform_driver); + if (ret) { + debugfs_remove_recursive(brcmuart_debugfs_root); + return ret; + } + + return 0; +} +module_init(brcmuart_init); + +static void __exit brcmuart_deinit(void) +{ + platform_driver_unregister(&brcmuart_platform_driver); + debugfs_remove_recursive(brcmuart_debugfs_root); +} +module_exit(brcmuart_deinit); + +MODULE_AUTHOR("Al Cooper"); +MODULE_DESCRIPTION("Broadcom NS16550A compatible serial port driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/8250/8250_boca.c b/drivers/tty/serial/8250/8250_boca.c new file mode 100644 index 000000000..a9b97c034 --- /dev/null +++ b/drivers/tty/serial/8250/8250_boca.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005 Russell King. + * Data taken from include/asm-i386/serial.h + */ +#include +#include +#include + +#include "8250.h" + +static struct plat_serial8250_port boca_data[] = { + SERIAL8250_PORT(0x100, 12), + SERIAL8250_PORT(0x108, 12), + SERIAL8250_PORT(0x110, 12), + SERIAL8250_PORT(0x118, 12), + SERIAL8250_PORT(0x120, 12), + SERIAL8250_PORT(0x128, 12), + SERIAL8250_PORT(0x130, 12), + SERIAL8250_PORT(0x138, 12), + SERIAL8250_PORT(0x140, 12), + SERIAL8250_PORT(0x148, 12), + SERIAL8250_PORT(0x150, 12), + SERIAL8250_PORT(0x158, 12), + SERIAL8250_PORT(0x160, 12), + SERIAL8250_PORT(0x168, 12), + SERIAL8250_PORT(0x170, 12), + SERIAL8250_PORT(0x178, 12), + { }, +}; + +static struct platform_device boca_device = { + .name = "serial8250", + .id = PLAT8250_DEV_BOCA, + .dev = { + .platform_data = boca_data, + }, +}; + +static int __init boca_init(void) +{ + return platform_device_register(&boca_device); +} + +module_init(boca_init); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("8250 serial probe module for Boca cards"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c new file mode 100644 index 000000000..81a5dab1a --- /dev/null +++ b/drivers/tty/serial/8250/8250_core.c @@ -0,0 +1,1300 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Universal/legacy driver for 8250/16550-type serial ports + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * Copyright (C) 2001 Russell King. + * + * Supports: ISA-compatible 8250/16550 ports + * PNP 8250/16550 ports + * early_serial_setup() ports + * userspace-configurable "phantom" ports + * "serial8250" platform devices + * serial8250_register_8250_port() ports + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_SPARC +#include +#endif + +#include + +#include "8250.h" + +/* + * Configuration: + * share_irqs - whether we pass IRQF_SHARED to request_irq(). This option + * is unsafe when used on edge-triggered interrupts. + */ +static unsigned int share_irqs = SERIAL8250_SHARE_IRQS; + +static unsigned int nr_uarts = CONFIG_SERIAL_8250_RUNTIME_UARTS; + +static struct uart_driver serial8250_reg; + +static unsigned int skip_txen_test; /* force skip of txen test at init time */ + +#define PASS_LIMIT 512 + +#include +/* + * SERIAL_PORT_DFNS tells us about built-in ports that have no + * standard enumeration mechanism. Platforms that can find all + * serial ports via mechanisms like ACPI or PCI need not supply it. + */ +#ifndef SERIAL_PORT_DFNS +#define SERIAL_PORT_DFNS +#endif + +static const struct old_serial_port old_serial_port[] = { + SERIAL_PORT_DFNS /* defined in asm/serial.h */ +}; + +#define UART_NR CONFIG_SERIAL_8250_NR_UARTS + +#ifdef CONFIG_SERIAL_8250_RSA + +#define PORT_RSA_MAX 4 +static unsigned long probe_rsa[PORT_RSA_MAX]; +static unsigned int probe_rsa_count; +#endif /* CONFIG_SERIAL_8250_RSA */ + +struct irq_info { + struct hlist_node node; + int irq; + spinlock_t lock; /* Protects list not the hash */ + struct list_head *head; +}; + +#define NR_IRQ_HASH 32 /* Can be adjusted later */ +static struct hlist_head irq_lists[NR_IRQ_HASH]; +static DEFINE_MUTEX(hash_mutex); /* Used to walk the hash */ + +/* + * This is the serial driver's interrupt routine. + * + * Arjan thinks the old way was overly complex, so it got simplified. + * Alan disagrees, saying that need the complexity to handle the weird + * nature of ISA shared interrupts. (This is a special exception.) + * + * In order to handle ISA shared interrupts properly, we need to check + * that all ports have been serviced, and therefore the ISA interrupt + * line has been de-asserted. + * + * This means we need to loop through all ports. checking that they + * don't have an interrupt pending. + */ +static irqreturn_t serial8250_interrupt(int irq, void *dev_id) +{ + struct irq_info *i = dev_id; + struct list_head *l, *end = NULL; + int pass_counter = 0, handled = 0; + + pr_debug("%s(%d): start\n", __func__, irq); + + spin_lock(&i->lock); + + l = i->head; + do { + struct uart_8250_port *up; + struct uart_port *port; + + up = list_entry(l, struct uart_8250_port, list); + port = &up->port; + + if (port->handle_irq(port)) { + handled = 1; + end = NULL; + } else if (end == NULL) + end = l; + + l = l->next; + + if (l == i->head && pass_counter++ > PASS_LIMIT) + break; + } while (l != end); + + spin_unlock(&i->lock); + + pr_debug("%s(%d): end\n", __func__, irq); + + return IRQ_RETVAL(handled); +} + +/* + * To support ISA shared interrupts, we need to have one interrupt + * handler that ensures that the IRQ line has been deasserted + * before returning. Failing to do this will result in the IRQ + * line being stuck active, and, since ISA irqs are edge triggered, + * no more IRQs will be seen. + */ +static void serial_do_unlink(struct irq_info *i, struct uart_8250_port *up) +{ + spin_lock_irq(&i->lock); + + if (!list_empty(i->head)) { + if (i->head == &up->list) + i->head = i->head->next; + list_del(&up->list); + } else { + BUG_ON(i->head != &up->list); + i->head = NULL; + } + spin_unlock_irq(&i->lock); + /* List empty so throw away the hash node */ + if (i->head == NULL) { + hlist_del(&i->node); + kfree(i); + } +} + +static int serial_link_irq_chain(struct uart_8250_port *up) +{ + struct hlist_head *h; + struct irq_info *i; + int ret; + + mutex_lock(&hash_mutex); + + h = &irq_lists[up->port.irq % NR_IRQ_HASH]; + + hlist_for_each_entry(i, h, node) + if (i->irq == up->port.irq) + break; + + if (i == NULL) { + i = kzalloc(sizeof(struct irq_info), GFP_KERNEL); + if (i == NULL) { + mutex_unlock(&hash_mutex); + return -ENOMEM; + } + spin_lock_init(&i->lock); + i->irq = up->port.irq; + hlist_add_head(&i->node, h); + } + mutex_unlock(&hash_mutex); + + spin_lock_irq(&i->lock); + + if (i->head) { + list_add(&up->list, i->head); + spin_unlock_irq(&i->lock); + + ret = 0; + } else { + INIT_LIST_HEAD(&up->list); + i->head = &up->list; + spin_unlock_irq(&i->lock); + ret = request_irq(up->port.irq, serial8250_interrupt, + up->port.irqflags, up->port.name, i); + if (ret < 0) + serial_do_unlink(i, up); + } + + return ret; +} + +static void serial_unlink_irq_chain(struct uart_8250_port *up) +{ + struct irq_info *i; + struct hlist_head *h; + + mutex_lock(&hash_mutex); + + h = &irq_lists[up->port.irq % NR_IRQ_HASH]; + + hlist_for_each_entry(i, h, node) + if (i->irq == up->port.irq) + break; + + BUG_ON(i == NULL); + BUG_ON(i->head == NULL); + + if (list_empty(i->head)) + free_irq(up->port.irq, i); + + serial_do_unlink(i, up); + mutex_unlock(&hash_mutex); +} + +/* + * This function is used to handle ports that do not have an + * interrupt. This doesn't work very well for 16450's, but gives + * barely passable results for a 16550A. (Although at the expense + * of much CPU overhead). + */ +static void serial8250_timeout(struct timer_list *t) +{ + struct uart_8250_port *up = from_timer(up, t, timer); + + up->port.handle_irq(&up->port); + mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port)); +} + +static void serial8250_backup_timeout(struct timer_list *t) +{ + struct uart_8250_port *up = from_timer(up, t, timer); + unsigned int iir, ier = 0, lsr; + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + + /* + * Must disable interrupts or else we risk racing with the interrupt + * based handler. + */ + if (up->port.irq) { + ier = serial_in(up, UART_IER); + serial_out(up, UART_IER, 0); + } + + iir = serial_in(up, UART_IIR); + + /* + * This should be a safe test for anyone who doesn't trust the + * IIR bits on their UART, but it's specifically designed for + * the "Diva" UART used on the management processor on many HP + * ia64 and parisc boxes. + */ + lsr = serial_lsr_in(up); + if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && + (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) && + (lsr & UART_LSR_THRE)) { + iir &= ~(UART_IIR_ID | UART_IIR_NO_INT); + iir |= UART_IIR_THRI; + } + + if (!(iir & UART_IIR_NO_INT)) + serial8250_tx_chars(up); + + if (up->port.irq) + serial_out(up, UART_IER, ier); + + spin_unlock_irqrestore(&up->port.lock, flags); + + /* Standard timer interval plus 0.2s to keep the port running */ + mod_timer(&up->timer, + jiffies + uart_poll_timeout(&up->port) + HZ / 5); +} + +static void univ8250_setup_timer(struct uart_8250_port *up) +{ + struct uart_port *port = &up->port; + + /* + * The above check will only give an accurate result the first time + * the port is opened so this value needs to be preserved. + */ + if (up->bugs & UART_BUG_THRE) { + pr_debug("%s - using backup timer\n", port->name); + + up->timer.function = serial8250_backup_timeout; + mod_timer(&up->timer, jiffies + + uart_poll_timeout(port) + HZ / 5); + } + + /* + * If the "interrupt" for this port doesn't correspond with any + * hardware interrupt, we use a timer-based system. The original + * driver used to do this with IRQ0. + */ + if (!port->irq) + mod_timer(&up->timer, jiffies + uart_poll_timeout(port)); +} + +static int univ8250_setup_irq(struct uart_8250_port *up) +{ + struct uart_port *port = &up->port; + + if (port->irq) + return serial_link_irq_chain(up); + + return 0; +} + +static void univ8250_release_irq(struct uart_8250_port *up) +{ + struct uart_port *port = &up->port; + + del_timer_sync(&up->timer); + up->timer.function = serial8250_timeout; + if (port->irq) + serial_unlink_irq_chain(up); +} + +#ifdef CONFIG_SERIAL_8250_RSA +static int serial8250_request_rsa_resource(struct uart_8250_port *up) +{ + unsigned long start = UART_RSA_BASE << up->port.regshift; + unsigned int size = 8 << up->port.regshift; + struct uart_port *port = &up->port; + int ret = -EINVAL; + + switch (port->iotype) { + case UPIO_HUB6: + case UPIO_PORT: + start += port->iobase; + if (request_region(start, size, "serial-rsa")) + ret = 0; + else + ret = -EBUSY; + break; + } + + return ret; +} + +static void serial8250_release_rsa_resource(struct uart_8250_port *up) +{ + unsigned long offset = UART_RSA_BASE << up->port.regshift; + unsigned int size = 8 << up->port.regshift; + struct uart_port *port = &up->port; + + switch (port->iotype) { + case UPIO_HUB6: + case UPIO_PORT: + release_region(port->iobase + offset, size); + break; + } +} +#endif + +static const struct uart_ops *base_ops; +static struct uart_ops univ8250_port_ops; + +static const struct uart_8250_ops univ8250_driver_ops = { + .setup_irq = univ8250_setup_irq, + .release_irq = univ8250_release_irq, + .setup_timer = univ8250_setup_timer, +}; + +static struct uart_8250_port serial8250_ports[UART_NR]; + +/** + * serial8250_get_port - retrieve struct uart_8250_port + * @line: serial line number + * + * This function retrieves struct uart_8250_port for the specific line. + * This struct *must* *not* be used to perform a 8250 or serial core operation + * which is not accessible otherwise. Its only purpose is to make the struct + * accessible to the runtime-pm callbacks for context suspend/restore. + * The lock assumption made here is none because runtime-pm suspend/resume + * callbacks should not be invoked if there is any operation performed on the + * port. + */ +struct uart_8250_port *serial8250_get_port(int line) +{ + return &serial8250_ports[line]; +} +EXPORT_SYMBOL_GPL(serial8250_get_port); + +static void (*serial8250_isa_config)(int port, struct uart_port *up, + u32 *capabilities); + +void serial8250_set_isa_configurator( + void (*v)(int port, struct uart_port *up, u32 *capabilities)) +{ + serial8250_isa_config = v; +} +EXPORT_SYMBOL(serial8250_set_isa_configurator); + +#ifdef CONFIG_SERIAL_8250_RSA + +static void univ8250_config_port(struct uart_port *port, int flags) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + up->probe &= ~UART_PROBE_RSA; + if (port->type == PORT_RSA) { + if (serial8250_request_rsa_resource(up) == 0) + up->probe |= UART_PROBE_RSA; + } else if (flags & UART_CONFIG_TYPE) { + int i; + + for (i = 0; i < probe_rsa_count; i++) { + if (probe_rsa[i] == up->port.iobase) { + if (serial8250_request_rsa_resource(up) == 0) + up->probe |= UART_PROBE_RSA; + break; + } + } + } + + base_ops->config_port(port, flags); + + if (port->type != PORT_RSA && up->probe & UART_PROBE_RSA) + serial8250_release_rsa_resource(up); +} + +static int univ8250_request_port(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + int ret; + + ret = base_ops->request_port(port); + if (ret == 0 && port->type == PORT_RSA) { + ret = serial8250_request_rsa_resource(up); + if (ret < 0) + base_ops->release_port(port); + } + + return ret; +} + +static void univ8250_release_port(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + if (port->type == PORT_RSA) + serial8250_release_rsa_resource(up); + base_ops->release_port(port); +} + +static void univ8250_rsa_support(struct uart_ops *ops) +{ + ops->config_port = univ8250_config_port; + ops->request_port = univ8250_request_port; + ops->release_port = univ8250_release_port; +} + +#else +#define univ8250_rsa_support(x) do { } while (0) +#endif /* CONFIG_SERIAL_8250_RSA */ + +static inline void serial8250_apply_quirks(struct uart_8250_port *up) +{ + up->port.quirks |= skip_txen_test ? UPQ_NO_TXEN_TEST : 0; +} + +static void __init serial8250_isa_init_ports(void) +{ + struct uart_8250_port *up; + static int first = 1; + int i, irqflag = 0; + + if (!first) + return; + first = 0; + + if (nr_uarts > UART_NR) + nr_uarts = UART_NR; + + for (i = 0; i < nr_uarts; i++) { + struct uart_8250_port *up = &serial8250_ports[i]; + struct uart_port *port = &up->port; + + port->line = i; + serial8250_init_port(up); + if (!base_ops) + base_ops = port->ops; + port->ops = &univ8250_port_ops; + + timer_setup(&up->timer, serial8250_timeout, 0); + + up->ops = &univ8250_driver_ops; + + if (IS_ENABLED(CONFIG_ALPHA_JENSEN) || + (IS_ENABLED(CONFIG_ALPHA_GENERIC) && alpha_jensen())) + port->set_mctrl = alpha_jensen_set_mctrl; + + serial8250_set_defaults(up); + } + + /* chain base port ops to support Remote Supervisor Adapter */ + univ8250_port_ops = *base_ops; + univ8250_rsa_support(&univ8250_port_ops); + + if (share_irqs) + irqflag = IRQF_SHARED; + + for (i = 0, up = serial8250_ports; + i < ARRAY_SIZE(old_serial_port) && i < nr_uarts; + i++, up++) { + struct uart_port *port = &up->port; + + port->iobase = old_serial_port[i].port; + port->irq = irq_canonicalize(old_serial_port[i].irq); + port->irqflags = 0; + port->uartclk = old_serial_port[i].baud_base * 16; + port->flags = old_serial_port[i].flags; + port->hub6 = 0; + port->membase = old_serial_port[i].iomem_base; + port->iotype = old_serial_port[i].io_type; + port->regshift = old_serial_port[i].iomem_reg_shift; + + port->irqflags |= irqflag; + if (serial8250_isa_config != NULL) + serial8250_isa_config(i, &up->port, &up->capabilities); + } +} + +static void __init +serial8250_register_ports(struct uart_driver *drv, struct device *dev) +{ + int i; + + for (i = 0; i < nr_uarts; i++) { + struct uart_8250_port *up = &serial8250_ports[i]; + + if (up->port.type == PORT_8250_CIR) + continue; + + if (up->port.dev) + continue; + + up->port.dev = dev; + + if (uart_console_enabled(&up->port)) + pm_runtime_get_sync(up->port.dev); + + serial8250_apply_quirks(up); + uart_add_one_port(drv, &up->port); + } +} + +#ifdef CONFIG_SERIAL_8250_CONSOLE + +static void univ8250_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_8250_port *up = &serial8250_ports[co->index]; + + serial8250_console_write(up, s, count); +} + +static int univ8250_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int retval; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index >= nr_uarts) + co->index = 0; + port = &serial8250_ports[co->index].port; + /* link port to console */ + port->cons = co; + + retval = serial8250_console_setup(port, options, false); + if (retval != 0) + port->cons = NULL; + return retval; +} + +static int univ8250_console_exit(struct console *co) +{ + struct uart_port *port; + + port = &serial8250_ports[co->index].port; + return serial8250_console_exit(port); +} + +/** + * univ8250_console_match - non-standard console matching + * @co: registering console + * @name: name from console command line + * @idx: index from console command line + * @options: ptr to option string from console command line + * + * Only attempts to match console command lines of the form: + * console=uart[8250],io|mmio|mmio16|mmio32,[,] + * console=uart[8250],0x[,] + * This form is used to register an initial earlycon boot console and + * replace it with the serial8250_console at 8250 driver init. + * + * Performs console setup for a match (as required by interface) + * If no are specified, then assume the h/w is already setup. + * + * Returns 0 if console matches; otherwise non-zero to use default matching + */ +static int univ8250_console_match(struct console *co, char *name, int idx, + char *options) +{ + char match[] = "uart"; /* 8250-specific earlycon name */ + unsigned char iotype; + resource_size_t addr; + int i; + + if (strncmp(name, match, 4) != 0) + return -ENODEV; + + if (uart_parse_earlycon(options, &iotype, &addr, &options)) + return -ENODEV; + + /* try to match the port specified on the command line */ + for (i = 0; i < nr_uarts; i++) { + struct uart_port *port = &serial8250_ports[i].port; + + if (port->iotype != iotype) + continue; + if ((iotype == UPIO_MEM || iotype == UPIO_MEM16 || + iotype == UPIO_MEM32 || iotype == UPIO_MEM32BE) + && (port->mapbase != addr)) + continue; + if (iotype == UPIO_PORT && port->iobase != addr) + continue; + + co->index = i; + port->cons = co; + return serial8250_console_setup(port, options, true); + } + + return -ENODEV; +} + +static struct console univ8250_console = { + .name = "ttyS", + .write = univ8250_console_write, + .device = uart_console_device, + .setup = univ8250_console_setup, + .exit = univ8250_console_exit, + .match = univ8250_console_match, + .flags = CON_PRINTBUFFER | CON_ANYTIME, + .index = -1, + .data = &serial8250_reg, +}; + +static int __init univ8250_console_init(void) +{ + if (nr_uarts == 0) + return -ENODEV; + + serial8250_isa_init_ports(); + register_console(&univ8250_console); + return 0; +} +console_initcall(univ8250_console_init); + +#define SERIAL8250_CONSOLE (&univ8250_console) +#else +#define SERIAL8250_CONSOLE NULL +#endif + +static struct uart_driver serial8250_reg = { + .owner = THIS_MODULE, + .driver_name = "serial", + .dev_name = "ttyS", + .major = TTY_MAJOR, + .minor = 64, + .cons = SERIAL8250_CONSOLE, +}; + +/* + * early_serial_setup - early registration for 8250 ports + * + * Setup an 8250 port structure prior to console initialisation. Use + * after console initialisation will cause undefined behaviour. + */ +int __init early_serial_setup(struct uart_port *port) +{ + struct uart_port *p; + + if (port->line >= ARRAY_SIZE(serial8250_ports) || nr_uarts == 0) + return -ENODEV; + + serial8250_isa_init_ports(); + p = &serial8250_ports[port->line].port; + p->iobase = port->iobase; + p->membase = port->membase; + p->irq = port->irq; + p->irqflags = port->irqflags; + p->uartclk = port->uartclk; + p->fifosize = port->fifosize; + p->regshift = port->regshift; + p->iotype = port->iotype; + p->flags = port->flags; + p->mapbase = port->mapbase; + p->mapsize = port->mapsize; + p->private_data = port->private_data; + p->type = port->type; + p->line = port->line; + + serial8250_set_defaults(up_to_u8250p(p)); + + if (port->serial_in) + p->serial_in = port->serial_in; + if (port->serial_out) + p->serial_out = port->serial_out; + if (port->handle_irq) + p->handle_irq = port->handle_irq; + + return 0; +} + +/** + * serial8250_suspend_port - suspend one serial port + * @line: serial line number + * + * Suspend one serial port. + */ +void serial8250_suspend_port(int line) +{ + struct uart_8250_port *up = &serial8250_ports[line]; + struct uart_port *port = &up->port; + + if (!console_suspend_enabled && uart_console(port) && + port->type != PORT_8250) { + unsigned char canary = 0xa5; + + serial_out(up, UART_SCR, canary); + if (serial_in(up, UART_SCR) == canary) + up->canary = canary; + } + + uart_suspend_port(&serial8250_reg, port); +} +EXPORT_SYMBOL(serial8250_suspend_port); + +/** + * serial8250_resume_port - resume one serial port + * @line: serial line number + * + * Resume one serial port. + */ +void serial8250_resume_port(int line) +{ + struct uart_8250_port *up = &serial8250_ports[line]; + struct uart_port *port = &up->port; + + up->canary = 0; + + if (up->capabilities & UART_NATSEMI) { + /* Ensure it's still in high speed mode */ + serial_port_out(port, UART_LCR, 0xE0); + + ns16550a_goto_highspeed(up); + + serial_port_out(port, UART_LCR, 0); + port->uartclk = 921600*16; + } + uart_resume_port(&serial8250_reg, port); +} +EXPORT_SYMBOL(serial8250_resume_port); + +/* + * Register a set of serial devices attached to a platform device. The + * list is terminated with a zero flags entry, which means we expect + * all entries to have at least UPF_BOOT_AUTOCONF set. + */ +static int serial8250_probe(struct platform_device *dev) +{ + struct plat_serial8250_port *p = dev_get_platdata(&dev->dev); + struct uart_8250_port uart; + int ret, i, irqflag = 0; + + memset(&uart, 0, sizeof(uart)); + + if (share_irqs) + irqflag = IRQF_SHARED; + + for (i = 0; p && p->flags != 0; p++, i++) { + uart.port.iobase = p->iobase; + uart.port.membase = p->membase; + uart.port.irq = p->irq; + uart.port.irqflags = p->irqflags; + uart.port.uartclk = p->uartclk; + uart.port.regshift = p->regshift; + uart.port.iotype = p->iotype; + uart.port.flags = p->flags; + uart.port.mapbase = p->mapbase; + uart.port.hub6 = p->hub6; + uart.port.has_sysrq = p->has_sysrq; + uart.port.private_data = p->private_data; + uart.port.type = p->type; + uart.port.serial_in = p->serial_in; + uart.port.serial_out = p->serial_out; + uart.port.handle_irq = p->handle_irq; + uart.port.handle_break = p->handle_break; + uart.port.set_termios = p->set_termios; + uart.port.set_ldisc = p->set_ldisc; + uart.port.get_mctrl = p->get_mctrl; + uart.port.pm = p->pm; + uart.port.dev = &dev->dev; + uart.port.irqflags |= irqflag; + ret = serial8250_register_8250_port(&uart); + if (ret < 0) { + dev_err(&dev->dev, "unable to register port at index %d " + "(IO%lx MEM%llx IRQ%d): %d\n", i, + p->iobase, (unsigned long long)p->mapbase, + p->irq, ret); + } + } + return 0; +} + +/* + * Remove serial ports registered against a platform device. + */ +static int serial8250_remove(struct platform_device *dev) +{ + int i; + + for (i = 0; i < nr_uarts; i++) { + struct uart_8250_port *up = &serial8250_ports[i]; + + if (up->port.dev == &dev->dev) + serial8250_unregister_port(i); + } + return 0; +} + +static int serial8250_suspend(struct platform_device *dev, pm_message_t state) +{ + int i; + + for (i = 0; i < UART_NR; i++) { + struct uart_8250_port *up = &serial8250_ports[i]; + + if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) + uart_suspend_port(&serial8250_reg, &up->port); + } + + return 0; +} + +static int serial8250_resume(struct platform_device *dev) +{ + int i; + + for (i = 0; i < UART_NR; i++) { + struct uart_8250_port *up = &serial8250_ports[i]; + + if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) + serial8250_resume_port(i); + } + + return 0; +} + +static struct platform_driver serial8250_isa_driver = { + .probe = serial8250_probe, + .remove = serial8250_remove, + .suspend = serial8250_suspend, + .resume = serial8250_resume, + .driver = { + .name = "serial8250", + }, +}; + +/* + * This "device" covers _all_ ISA 8250-compatible serial devices listed + * in the table in include/asm/serial.h + */ +static struct platform_device *serial8250_isa_devs; + +/* + * serial8250_register_8250_port and serial8250_unregister_port allows for + * 16x50 serial ports to be configured at run-time, to support PCMCIA + * modems and PCI multiport cards. + */ +static DEFINE_MUTEX(serial_mutex); + +static struct uart_8250_port *serial8250_find_match_or_unused(const struct uart_port *port) +{ + int i; + + /* + * First, find a port entry which matches. + */ + for (i = 0; i < nr_uarts; i++) + if (uart_match_port(&serial8250_ports[i].port, port)) + return &serial8250_ports[i]; + + /* try line number first if still available */ + i = port->line; + if (i < nr_uarts && serial8250_ports[i].port.type == PORT_UNKNOWN && + serial8250_ports[i].port.iobase == 0) + return &serial8250_ports[i]; + /* + * We didn't find a matching entry, so look for the first + * free entry. We look for one which hasn't been previously + * used (indicated by zero iobase). + */ + for (i = 0; i < nr_uarts; i++) + if (serial8250_ports[i].port.type == PORT_UNKNOWN && + serial8250_ports[i].port.iobase == 0) + return &serial8250_ports[i]; + + /* + * That also failed. Last resort is to find any entry which + * doesn't have a real port associated with it. + */ + for (i = 0; i < nr_uarts; i++) + if (serial8250_ports[i].port.type == PORT_UNKNOWN) + return &serial8250_ports[i]; + + return NULL; +} + +static void serial_8250_overrun_backoff_work(struct work_struct *work) +{ + struct uart_8250_port *up = + container_of(to_delayed_work(work), struct uart_8250_port, + overrun_backoff); + struct uart_port *port = &up->port; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + up->ier |= UART_IER_RLSI | UART_IER_RDI; + up->port.read_status_mask |= UART_LSR_DR; + serial_out(up, UART_IER, up->ier); + spin_unlock_irqrestore(&port->lock, flags); +} + +/** + * serial8250_register_8250_port - register a serial port + * @up: serial port template + * + * Configure the serial port specified by the request. If the + * port exists and is in use, it is hung up and unregistered + * first. + * + * The port is then probed and if necessary the IRQ is autodetected + * If this fails an error is returned. + * + * On success the port is ready to use and the line number is returned. + */ +int serial8250_register_8250_port(const struct uart_8250_port *up) +{ + struct uart_8250_port *uart; + int ret = -ENOSPC; + + if (up->port.uartclk == 0) + return -EINVAL; + + mutex_lock(&serial_mutex); + + uart = serial8250_find_match_or_unused(&up->port); + if (uart && uart->port.type != PORT_8250_CIR) { + struct mctrl_gpios *gpios; + + if (uart->port.dev) + uart_remove_one_port(&serial8250_reg, &uart->port); + + uart->port.iobase = up->port.iobase; + uart->port.membase = up->port.membase; + uart->port.irq = up->port.irq; + uart->port.irqflags = up->port.irqflags; + uart->port.uartclk = up->port.uartclk; + uart->port.fifosize = up->port.fifosize; + uart->port.regshift = up->port.regshift; + uart->port.iotype = up->port.iotype; + uart->port.flags = up->port.flags | UPF_BOOT_AUTOCONF; + uart->bugs = up->bugs; + uart->port.mapbase = up->port.mapbase; + uart->port.mapsize = up->port.mapsize; + uart->port.private_data = up->port.private_data; + uart->tx_loadsz = up->tx_loadsz; + uart->capabilities = up->capabilities; + uart->port.throttle = up->port.throttle; + uart->port.unthrottle = up->port.unthrottle; + uart->port.rs485_config = up->port.rs485_config; + uart->port.rs485_supported = up->port.rs485_supported; + uart->port.rs485 = up->port.rs485; + uart->rs485_start_tx = up->rs485_start_tx; + uart->rs485_stop_tx = up->rs485_stop_tx; + uart->lsr_save_mask = up->lsr_save_mask; + uart->dma = up->dma; + + /* Take tx_loadsz from fifosize if it wasn't set separately */ + if (uart->port.fifosize && !uart->tx_loadsz) + uart->tx_loadsz = uart->port.fifosize; + + if (up->port.dev) { + uart->port.dev = up->port.dev; + ret = uart_get_rs485_mode(&uart->port); + if (ret) + goto err; + } + + if (up->port.flags & UPF_FIXED_TYPE) + uart->port.type = up->port.type; + + /* + * Only call mctrl_gpio_init(), if the device has no ACPI + * companion device + */ + if (!has_acpi_companion(uart->port.dev)) { + gpios = mctrl_gpio_init(&uart->port, 0); + if (IS_ERR(gpios)) { + ret = PTR_ERR(gpios); + goto err; + } else { + uart->gpios = gpios; + } + } + + serial8250_set_defaults(uart); + + /* Possibly override default I/O functions. */ + if (up->port.serial_in) + uart->port.serial_in = up->port.serial_in; + if (up->port.serial_out) + uart->port.serial_out = up->port.serial_out; + if (up->port.handle_irq) + uart->port.handle_irq = up->port.handle_irq; + /* Possibly override set_termios call */ + if (up->port.set_termios) + uart->port.set_termios = up->port.set_termios; + if (up->port.set_ldisc) + uart->port.set_ldisc = up->port.set_ldisc; + if (up->port.get_mctrl) + uart->port.get_mctrl = up->port.get_mctrl; + if (up->port.set_mctrl) + uart->port.set_mctrl = up->port.set_mctrl; + if (up->port.get_divisor) + uart->port.get_divisor = up->port.get_divisor; + if (up->port.set_divisor) + uart->port.set_divisor = up->port.set_divisor; + if (up->port.startup) + uart->port.startup = up->port.startup; + if (up->port.shutdown) + uart->port.shutdown = up->port.shutdown; + if (up->port.pm) + uart->port.pm = up->port.pm; + if (up->port.handle_break) + uart->port.handle_break = up->port.handle_break; + if (up->dl_read) + uart->dl_read = up->dl_read; + if (up->dl_write) + uart->dl_write = up->dl_write; + + if (uart->port.type != PORT_8250_CIR) { + if (serial8250_isa_config != NULL) + serial8250_isa_config(0, &uart->port, + &uart->capabilities); + + serial8250_apply_quirks(uart); + ret = uart_add_one_port(&serial8250_reg, + &uart->port); + if (ret) + goto err; + + ret = uart->port.line; + } else { + dev_info(uart->port.dev, + "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", + uart->port.iobase, + (unsigned long long)uart->port.mapbase, + uart->port.irq); + + ret = 0; + } + + if (!uart->lsr_save_mask) + uart->lsr_save_mask = LSR_SAVE_FLAGS; /* Use default LSR mask */ + + /* Initialise interrupt backoff work if required */ + if (up->overrun_backoff_time_ms > 0) { + uart->overrun_backoff_time_ms = + up->overrun_backoff_time_ms; + INIT_DELAYED_WORK(&uart->overrun_backoff, + serial_8250_overrun_backoff_work); + } else { + uart->overrun_backoff_time_ms = 0; + } + } + + mutex_unlock(&serial_mutex); + + return ret; + +err: + uart->port.dev = NULL; + mutex_unlock(&serial_mutex); + return ret; +} +EXPORT_SYMBOL(serial8250_register_8250_port); + +/** + * serial8250_unregister_port - remove a 16x50 serial port at runtime + * @line: serial line number + * + * Remove one serial port. This may not be called from interrupt + * context. We hand the port back to the our control. + */ +void serial8250_unregister_port(int line) +{ + struct uart_8250_port *uart = &serial8250_ports[line]; + + mutex_lock(&serial_mutex); + + if (uart->em485) { + unsigned long flags; + + spin_lock_irqsave(&uart->port.lock, flags); + serial8250_em485_destroy(uart); + spin_unlock_irqrestore(&uart->port.lock, flags); + } + + uart_remove_one_port(&serial8250_reg, &uart->port); + if (serial8250_isa_devs) { + uart->port.flags &= ~UPF_BOOT_AUTOCONF; + uart->port.type = PORT_UNKNOWN; + uart->port.dev = &serial8250_isa_devs->dev; + uart->capabilities = 0; + serial8250_init_port(uart); + serial8250_apply_quirks(uart); + uart_add_one_port(&serial8250_reg, &uart->port); + } else { + uart->port.dev = NULL; + } + mutex_unlock(&serial_mutex); +} +EXPORT_SYMBOL(serial8250_unregister_port); + +static int __init serial8250_init(void) +{ + int ret; + + if (nr_uarts == 0) + return -ENODEV; + + serial8250_isa_init_ports(); + + pr_info("Serial: 8250/16550 driver, %d ports, IRQ sharing %sabled\n", + nr_uarts, share_irqs ? "en" : "dis"); + +#ifdef CONFIG_SPARC + ret = sunserial_register_minors(&serial8250_reg, UART_NR); +#else + serial8250_reg.nr = UART_NR; + ret = uart_register_driver(&serial8250_reg); +#endif + if (ret) + goto out; + + ret = serial8250_pnp_init(); + if (ret) + goto unreg_uart_drv; + + serial8250_isa_devs = platform_device_alloc("serial8250", + PLAT8250_DEV_LEGACY); + if (!serial8250_isa_devs) { + ret = -ENOMEM; + goto unreg_pnp; + } + + ret = platform_device_add(serial8250_isa_devs); + if (ret) + goto put_dev; + + serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev); + + ret = platform_driver_register(&serial8250_isa_driver); + if (ret == 0) + goto out; + + platform_device_del(serial8250_isa_devs); +put_dev: + platform_device_put(serial8250_isa_devs); +unreg_pnp: + serial8250_pnp_exit(); +unreg_uart_drv: +#ifdef CONFIG_SPARC + sunserial_unregister_minors(&serial8250_reg, UART_NR); +#else + uart_unregister_driver(&serial8250_reg); +#endif +out: + return ret; +} + +static void __exit serial8250_exit(void) +{ + struct platform_device *isa_dev = serial8250_isa_devs; + + /* + * This tells serial8250_unregister_port() not to re-register + * the ports (thereby making serial8250_isa_driver permanently + * in use.) + */ + serial8250_isa_devs = NULL; + + platform_driver_unregister(&serial8250_isa_driver); + platform_device_unregister(isa_dev); + + serial8250_pnp_exit(); + +#ifdef CONFIG_SPARC + sunserial_unregister_minors(&serial8250_reg, UART_NR); +#else + uart_unregister_driver(&serial8250_reg); +#endif +} + +module_init(serial8250_init); +module_exit(serial8250_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Generic 8250/16x50 serial driver"); + +module_param_hw(share_irqs, uint, other, 0644); +MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices (unsafe)"); + +module_param(nr_uarts, uint, 0644); +MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")"); + +module_param(skip_txen_test, uint, 0644); +MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time"); + +#ifdef CONFIG_SERIAL_8250_RSA +module_param_hw_array(probe_rsa, ulong, ioport, &probe_rsa_count, 0444); +MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); +#endif +MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); + +#ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS +#ifndef MODULE +/* This module was renamed to 8250_core in 3.7. Keep the old "8250" name + * working as well for the module options so we don't break people. We + * need to keep the names identical and the convenient macros will happily + * refuse to let us do that by failing the build with redefinition errors + * of global variables. So we stick them inside a dummy function to avoid + * those conflicts. The options still get parsed, and the redefined + * MODULE_PARAM_PREFIX lets us keep the "8250." syntax alive. + * + * This is hacky. I'm sorry. + */ +static void __used s8250_options(void) +{ +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "8250_core." + + module_param_cb(share_irqs, ¶m_ops_uint, &share_irqs, 0644); + module_param_cb(nr_uarts, ¶m_ops_uint, &nr_uarts, 0644); + module_param_cb(skip_txen_test, ¶m_ops_uint, &skip_txen_test, 0644); +#ifdef CONFIG_SERIAL_8250_RSA + __module_param_call(MODULE_PARAM_PREFIX, probe_rsa, + ¶m_array_ops, .arr = &__param_arr_probe_rsa, + 0444, -1, 0); +#endif +} +#else +MODULE_ALIAS("8250_core"); +#endif +#endif diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c new file mode 100644 index 000000000..a442f0dfd --- /dev/null +++ b/drivers/tty/serial/8250/8250_dma.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * 8250_dma.c - DMA Engine API support for 8250.c + * + * Copyright (C) 2013 Intel Corporation + */ +#include +#include +#include +#include + +#include "8250.h" + +static void __dma_tx_complete(void *param) +{ + struct uart_8250_port *p = param; + struct uart_8250_dma *dma = p->dma; + struct circ_buf *xmit = &p->port.state->xmit; + unsigned long flags; + int ret; + + dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr, + UART_XMIT_SIZE, DMA_TO_DEVICE); + + spin_lock_irqsave(&p->port.lock, flags); + + dma->tx_running = 0; + + uart_xmit_advance(&p->port, dma->tx_size); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&p->port); + + ret = serial8250_tx_dma(p); + if (ret || !dma->tx_running) + serial8250_set_THRI(p); + + spin_unlock_irqrestore(&p->port.lock, flags); +} + +static void __dma_rx_complete(void *param) +{ + struct uart_8250_port *p = param; + struct uart_8250_dma *dma = p->dma; + struct tty_port *tty_port = &p->port.state->port; + struct dma_tx_state state; + enum dma_status dma_status; + int count; + + /* + * New DMA Rx can be started during the completion handler before it + * could acquire port's lock and it might still be ongoing. Don't to + * anything in such case. + */ + dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); + if (dma_status == DMA_IN_PROGRESS) + return; + + count = dma->rx_size - state.residue; + + tty_insert_flip_string(tty_port, dma->rx_buf, count); + p->port.icount.rx += count; + dma->rx_running = 0; + + tty_flip_buffer_push(tty_port); +} + +static void dma_rx_complete(void *param) +{ + struct uart_8250_port *p = param; + struct uart_8250_dma *dma = p->dma; + unsigned long flags; + + spin_lock_irqsave(&p->port.lock, flags); + if (dma->rx_running) + __dma_rx_complete(p); + spin_unlock_irqrestore(&p->port.lock, flags); +} + +int serial8250_tx_dma(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + struct circ_buf *xmit = &p->port.state->xmit; + struct dma_async_tx_descriptor *desc; + struct uart_port *up = &p->port; + int ret; + + if (dma->tx_running) { + if (up->x_char) { + dmaengine_pause(dma->txchan); + uart_xchar_out(up, UART_TX); + dmaengine_resume(dma->txchan); + } + return 0; + } else if (up->x_char) { + uart_xchar_out(up, UART_TX); + } + + if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) { + /* We have been called from __dma_tx_complete() */ + return 0; + } + + dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + + serial8250_do_prepare_tx_dma(p); + + desc = dmaengine_prep_slave_single(dma->txchan, + dma->tx_addr + xmit->tail, + dma->tx_size, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + ret = -EBUSY; + goto err; + } + + dma->tx_running = 1; + desc->callback = __dma_tx_complete; + desc->callback_param = p; + + dma->tx_cookie = dmaengine_submit(desc); + + dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr, + UART_XMIT_SIZE, DMA_TO_DEVICE); + + dma_async_issue_pending(dma->txchan); + serial8250_clear_THRI(p); + dma->tx_err = 0; + + return 0; +err: + dma->tx_err = 1; + return ret; +} + +int serial8250_rx_dma(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + struct dma_async_tx_descriptor *desc; + + if (dma->rx_running) + return 0; + + serial8250_do_prepare_rx_dma(p); + + desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr, + dma->rx_size, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -EBUSY; + + dma->rx_running = 1; + desc->callback = dma_rx_complete; + desc->callback_param = p; + + dma->rx_cookie = dmaengine_submit(desc); + + dma_async_issue_pending(dma->rxchan); + + return 0; +} + +void serial8250_rx_dma_flush(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + + if (dma->rx_running) { + dmaengine_pause(dma->rxchan); + __dma_rx_complete(p); + dmaengine_terminate_async(dma->rxchan); + } +} +EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush); + +int serial8250_request_dma(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + phys_addr_t rx_dma_addr = dma->rx_dma_addr ? + dma->rx_dma_addr : p->port.mapbase; + phys_addr_t tx_dma_addr = dma->tx_dma_addr ? + dma->tx_dma_addr : p->port.mapbase; + dma_cap_mask_t mask; + struct dma_slave_caps caps; + int ret; + + /* Default slave configuration parameters */ + dma->rxconf.direction = DMA_DEV_TO_MEM; + dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma->rxconf.src_addr = rx_dma_addr + UART_RX; + + dma->txconf.direction = DMA_MEM_TO_DEV; + dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma->txconf.dst_addr = tx_dma_addr + UART_TX; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* Get a channel for RX */ + dma->rxchan = dma_request_slave_channel_compat(mask, + dma->fn, dma->rx_param, + p->port.dev, "rx"); + if (!dma->rxchan) + return -ENODEV; + + /* 8250 rx dma requires dmaengine driver to support pause/terminate */ + ret = dma_get_slave_caps(dma->rxchan, &caps); + if (ret) + goto release_rx; + if (!caps.cmd_pause || !caps.cmd_terminate || + caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) { + ret = -EINVAL; + goto release_rx; + } + + dmaengine_slave_config(dma->rxchan, &dma->rxconf); + + /* Get a channel for TX */ + dma->txchan = dma_request_slave_channel_compat(mask, + dma->fn, dma->tx_param, + p->port.dev, "tx"); + if (!dma->txchan) { + ret = -ENODEV; + goto release_rx; + } + + /* 8250 tx dma requires dmaengine driver to support terminate */ + ret = dma_get_slave_caps(dma->txchan, &caps); + if (ret) + goto err; + if (!caps.cmd_terminate) { + ret = -EINVAL; + goto err; + } + + dmaengine_slave_config(dma->txchan, &dma->txconf); + + /* RX buffer */ + if (!dma->rx_size) + dma->rx_size = PAGE_SIZE; + + dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size, + &dma->rx_addr, GFP_KERNEL); + if (!dma->rx_buf) { + ret = -ENOMEM; + goto err; + } + + /* TX buffer */ + dma->tx_addr = dma_map_single(dma->txchan->device->dev, + p->port.state->xmit.buf, + UART_XMIT_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) { + dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, + dma->rx_buf, dma->rx_addr); + ret = -ENOMEM; + goto err; + } + + dev_dbg_ratelimited(p->port.dev, "got both dma channels\n"); + + return 0; +err: + dma_release_channel(dma->txchan); +release_rx: + dma_release_channel(dma->rxchan); + return ret; +} +EXPORT_SYMBOL_GPL(serial8250_request_dma); + +void serial8250_release_dma(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + + if (!dma) + return; + + /* Release RX resources */ + dmaengine_terminate_sync(dma->rxchan); + dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf, + dma->rx_addr); + dma_release_channel(dma->rxchan); + dma->rxchan = NULL; + + /* Release TX resources */ + dmaengine_terminate_sync(dma->txchan); + dma_unmap_single(dma->txchan->device->dev, dma->tx_addr, + UART_XMIT_SIZE, DMA_TO_DEVICE); + dma_release_channel(dma->txchan); + dma->txchan = NULL; + dma->tx_running = 0; + + dev_dbg_ratelimited(p->port.dev, "dma channels released\n"); +} +EXPORT_SYMBOL_GPL(serial8250_release_dma); diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c new file mode 100644 index 000000000..88035100b --- /dev/null +++ b/drivers/tty/serial/8250/8250_dw.c @@ -0,0 +1,819 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Synopsys DesignWare 8250 driver. + * + * Copyright 2011 Picochip, Jamie Iles. + * Copyright 2013 Intel Corporation + * + * The Synopsys DesignWare 8250 has an extra feature whereby it detects if the + * LCR is written whilst busy. If it is, then a busy detect interrupt is + * raised, the LCR needs to be rewritten and the uart status register read. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "8250_dwlib.h" + +/* Offsets for the DesignWare specific registers */ +#define DW_UART_USR 0x1f /* UART Status Register */ +#define DW_UART_DMASA 0xa8 /* DMA Software Ack */ + +#define OCTEON_UART_USR 0x27 /* UART Status Register */ + +#define RZN1_UART_TDMACR 0x10c /* DMA Control Register Transmit Mode */ +#define RZN1_UART_RDMACR 0x110 /* DMA Control Register Receive Mode */ + +/* DesignWare specific register fields */ +#define DW_UART_MCR_SIRE BIT(6) + +/* Renesas specific register fields */ +#define RZN1_UART_xDMACR_DMA_EN BIT(0) +#define RZN1_UART_xDMACR_1_WORD_BURST (0 << 1) +#define RZN1_UART_xDMACR_4_WORD_BURST (1 << 1) +#define RZN1_UART_xDMACR_8_WORD_BURST (2 << 1) +#define RZN1_UART_xDMACR_BLK_SZ(x) ((x) << 3) + +/* Quirks */ +#define DW_UART_QUIRK_OCTEON BIT(0) +#define DW_UART_QUIRK_ARMADA_38X BIT(1) +#define DW_UART_QUIRK_SKIP_SET_RATE BIT(2) +#define DW_UART_QUIRK_IS_DMA_FC BIT(3) + +static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb) +{ + return container_of(nb, struct dw8250_data, clk_notifier); +} + +static inline struct dw8250_data *work_to_dw8250_data(struct work_struct *work) +{ + return container_of(work, struct dw8250_data, clk_work); +} + +static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value) +{ + struct dw8250_data *d = to_dw8250_data(p->private_data); + + /* Override any modem control signals if needed */ + if (offset == UART_MSR) { + value |= d->msr_mask_on; + value &= ~d->msr_mask_off; + } + + return value; +} + +static void dw8250_force_idle(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int lsr; + + serial8250_clear_and_reinit_fifos(up); + + /* + * With PSLVERR_RESP_EN parameter set to 1, the device generates an + * error response when an attempt to read an empty RBR with FIFO + * enabled. + */ + if (up->fcr & UART_FCR_ENABLE_FIFO) { + lsr = p->serial_in(p, UART_LSR); + if (!(lsr & UART_LSR_DR)) + return; + } + + (void)p->serial_in(p, UART_RX); +} + +static void dw8250_check_lcr(struct uart_port *p, int value) +{ + void __iomem *offset = p->membase + (UART_LCR << p->regshift); + int tries = 1000; + + /* Make sure LCR write wasn't ignored */ + while (tries--) { + unsigned int lcr = p->serial_in(p, UART_LCR); + + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) + return; + + dw8250_force_idle(p); + +#ifdef CONFIG_64BIT + if (p->type == PORT_OCTEON) + __raw_writeq(value & 0xff, offset); + else +#endif + if (p->iotype == UPIO_MEM32) + writel(value, offset); + else if (p->iotype == UPIO_MEM32BE) + iowrite32be(value, offset); + else + writeb(value, offset); + } + /* + * FIXME: this deadlocks if port->lock is already held + * dev_err(p->dev, "Couldn't set LCR to %d\n", value); + */ +} + +/* Returns once the transmitter is empty or we run out of retries */ +static void dw8250_tx_wait_empty(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int tries = 20000; + unsigned int delay_threshold = tries - 1000; + unsigned int lsr; + + while (tries--) { + lsr = readb (p->membase + (UART_LSR << p->regshift)); + up->lsr_saved_flags |= lsr & up->lsr_save_mask; + + if (lsr & UART_LSR_TEMT) + break; + + /* The device is first given a chance to empty without delay, + * to avoid slowdowns at high bitrates. If after 1000 tries + * the buffer has still not emptied, allow more time for low- + * speed links. */ + if (tries < delay_threshold) + udelay (1); + } +} + +static void dw8250_serial_out(struct uart_port *p, int offset, int value) +{ + struct dw8250_data *d = to_dw8250_data(p->private_data); + + writeb(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + dw8250_check_lcr(p, value); +} + +static void dw8250_serial_out38x(struct uart_port *p, int offset, int value) +{ + /* Allow the TX to drain before we reconfigure */ + if (offset == UART_LCR) + dw8250_tx_wait_empty(p); + + dw8250_serial_out(p, offset, value); +} + +static unsigned int dw8250_serial_in(struct uart_port *p, int offset) +{ + unsigned int value = readb(p->membase + (offset << p->regshift)); + + return dw8250_modify_msr(p, offset, value); +} + +#ifdef CONFIG_64BIT +static unsigned int dw8250_serial_inq(struct uart_port *p, int offset) +{ + unsigned int value; + + value = (u8)__raw_readq(p->membase + (offset << p->regshift)); + + return dw8250_modify_msr(p, offset, value); +} + +static void dw8250_serial_outq(struct uart_port *p, int offset, int value) +{ + struct dw8250_data *d = to_dw8250_data(p->private_data); + + value &= 0xff; + __raw_writeq(value, p->membase + (offset << p->regshift)); + /* Read back to ensure register write ordering. */ + __raw_readq(p->membase + (UART_LCR << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + dw8250_check_lcr(p, value); +} +#endif /* CONFIG_64BIT */ + +static void dw8250_serial_out32(struct uart_port *p, int offset, int value) +{ + struct dw8250_data *d = to_dw8250_data(p->private_data); + + writel(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + dw8250_check_lcr(p, value); +} + +static unsigned int dw8250_serial_in32(struct uart_port *p, int offset) +{ + unsigned int value = readl(p->membase + (offset << p->regshift)); + + return dw8250_modify_msr(p, offset, value); +} + +static void dw8250_serial_out32be(struct uart_port *p, int offset, int value) +{ + struct dw8250_data *d = to_dw8250_data(p->private_data); + + iowrite32be(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + dw8250_check_lcr(p, value); +} + +static unsigned int dw8250_serial_in32be(struct uart_port *p, int offset) +{ + unsigned int value = ioread32be(p->membase + (offset << p->regshift)); + + return dw8250_modify_msr(p, offset, value); +} + + +static int dw8250_handle_irq(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + struct dw8250_data *d = to_dw8250_data(p->private_data); + unsigned int iir = p->serial_in(p, UART_IIR); + bool rx_timeout = (iir & 0x3f) == UART_IIR_RX_TIMEOUT; + unsigned int quirks = d->pdata->quirks; + unsigned int status; + unsigned long flags; + + /* + * There are ways to get Designware-based UARTs into a state where + * they are asserting UART_IIR_RX_TIMEOUT but there is no actual + * data available. If we see such a case then we'll do a bogus + * read. If we don't do this then the "RX TIMEOUT" interrupt will + * fire forever. + * + * This problem has only been observed so far when not in DMA mode + * so we limit the workaround only to non-DMA mode. + */ + if (!up->dma && rx_timeout) { + spin_lock_irqsave(&p->lock, flags); + status = serial_lsr_in(up); + + if (!(status & (UART_LSR_DR | UART_LSR_BI))) + (void) p->serial_in(p, UART_RX); + + spin_unlock_irqrestore(&p->lock, flags); + } + + /* Manually stop the Rx DMA transfer when acting as flow controller */ + if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) { + spin_lock_irqsave(&p->lock, flags); + status = serial_lsr_in(up); + spin_unlock_irqrestore(&p->lock, flags); + + if (status & (UART_LSR_DR | UART_LSR_BI)) { + dw8250_writel_ext(p, RZN1_UART_RDMACR, 0); + dw8250_writel_ext(p, DW_UART_DMASA, 1); + } + } + + if (serial8250_handle_irq(p, iir)) + return 1; + + if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { + /* Clear the USR */ + (void)p->serial_in(p, d->pdata->usr_reg); + + return 1; + } + + return 0; +} + +static void dw8250_clk_work_cb(struct work_struct *work) +{ + struct dw8250_data *d = work_to_dw8250_data(work); + struct uart_8250_port *up; + unsigned long rate; + + rate = clk_get_rate(d->clk); + if (rate <= 0) + return; + + up = serial8250_get_port(d->data.line); + + serial8250_update_uartclk(&up->port, rate); +} + +static int dw8250_clk_notifier_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct dw8250_data *d = clk_to_dw8250_data(nb); + + /* + * We have no choice but to defer the uartclk update due to two + * deadlocks. First one is caused by a recursive mutex lock which + * happens when clk_set_rate() is called from dw8250_set_termios(). + * Second deadlock is more tricky and is caused by an inverted order of + * the clk and tty-port mutexes lock. It happens if clock rate change + * is requested asynchronously while set_termios() is executed between + * tty-port mutex lock and clk_set_rate() function invocation and + * vise-versa. Anyway if we didn't have the reference clock alteration + * in the dw8250_set_termios() method we wouldn't have needed this + * deferred event handling complication. + */ + if (event == POST_RATE_CHANGE) { + queue_work(system_unbound_wq, &d->clk_work); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static void +dw8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) +{ + if (!state) + pm_runtime_get_sync(port->dev); + + serial8250_do_pm(port, state, old); + + if (state) + pm_runtime_put_sync_suspend(port->dev); +} + +static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned long newrate = tty_termios_baud_rate(termios) * 16; + struct dw8250_data *d = to_dw8250_data(p->private_data); + long rate; + int ret; + + clk_disable_unprepare(d->clk); + rate = clk_round_rate(d->clk, newrate); + if (rate > 0) { + /* + * Note that any clock-notifer worker will block in + * serial8250_update_uartclk() until we are done. + */ + ret = clk_set_rate(d->clk, newrate); + if (!ret) + p->uartclk = rate; + } + clk_prepare_enable(d->clk); + + dw8250_do_set_termios(p, termios, old); +} + +static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios) +{ + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int mcr = p->serial_in(p, UART_MCR); + + if (up->capabilities & UART_CAP_IRDA) { + if (termios->c_line == N_IRDA) + mcr |= DW_UART_MCR_SIRE; + else + mcr &= ~DW_UART_MCR_SIRE; + + p->serial_out(p, UART_MCR, mcr); + } + serial8250_do_set_ldisc(p, termios); +} + +/* + * dw8250_fallback_dma_filter will prevent the UART from getting just any free + * channel on platforms that have DMA engines, but don't have any channels + * assigned to the UART. + * + * REVISIT: This is a work around for limitation in the DMA Engine API. Once the + * core problem is fixed, this function is no longer needed. + */ +static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param) +{ + return false; +} + +static bool dw8250_idma_filter(struct dma_chan *chan, void *param) +{ + return param == chan->device->dev; +} + +static u32 dw8250_rzn1_get_dmacr_burst(int max_burst) +{ + if (max_burst >= 8) + return RZN1_UART_xDMACR_8_WORD_BURST; + else if (max_burst >= 4) + return RZN1_UART_xDMACR_4_WORD_BURST; + else + return RZN1_UART_xDMACR_1_WORD_BURST; +} + +static void dw8250_prepare_tx_dma(struct uart_8250_port *p) +{ + struct uart_port *up = &p->port; + struct uart_8250_dma *dma = p->dma; + u32 val; + + dw8250_writel_ext(up, RZN1_UART_TDMACR, 0); + val = dw8250_rzn1_get_dmacr_burst(dma->txconf.dst_maxburst) | + RZN1_UART_xDMACR_BLK_SZ(dma->tx_size) | + RZN1_UART_xDMACR_DMA_EN; + dw8250_writel_ext(up, RZN1_UART_TDMACR, val); +} + +static void dw8250_prepare_rx_dma(struct uart_8250_port *p) +{ + struct uart_port *up = &p->port; + struct uart_8250_dma *dma = p->dma; + u32 val; + + dw8250_writel_ext(up, RZN1_UART_RDMACR, 0); + val = dw8250_rzn1_get_dmacr_burst(dma->rxconf.src_maxburst) | + RZN1_UART_xDMACR_BLK_SZ(dma->rx_size) | + RZN1_UART_xDMACR_DMA_EN; + dw8250_writel_ext(up, RZN1_UART_RDMACR, val); +} + +static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) +{ + struct device_node *np = p->dev->of_node; + + if (np) { + unsigned int quirks = data->pdata->quirks; + int id; + + /* get index of serial line, if found in DT aliases */ + id = of_alias_get_id(np, "serial"); + if (id >= 0) + p->line = id; +#ifdef CONFIG_64BIT + if (quirks & DW_UART_QUIRK_OCTEON) { + p->serial_in = dw8250_serial_inq; + p->serial_out = dw8250_serial_outq; + p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE; + p->type = PORT_OCTEON; + data->skip_autocfg = true; + } +#endif + + if (of_device_is_big_endian(np)) { + p->iotype = UPIO_MEM32BE; + p->serial_in = dw8250_serial_in32be; + p->serial_out = dw8250_serial_out32be; + } + + if (quirks & DW_UART_QUIRK_ARMADA_38X) + p->serial_out = dw8250_serial_out38x; + if (quirks & DW_UART_QUIRK_SKIP_SET_RATE) + p->set_termios = dw8250_do_set_termios; + if (quirks & DW_UART_QUIRK_IS_DMA_FC) { + data->data.dma.txconf.device_fc = 1; + data->data.dma.rxconf.device_fc = 1; + data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma; + data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma; + } + + } else if (acpi_dev_present("APMC0D08", NULL, -1)) { + p->iotype = UPIO_MEM32; + p->regshift = 2; + p->serial_in = dw8250_serial_in32; + data->uart_16550_compatible = true; + } + + /* Platforms with iDMA 64-bit */ + if (platform_get_resource_byname(to_platform_device(p->dev), + IORESOURCE_MEM, "lpss_priv")) { + data->data.dma.rx_param = p->dev->parent; + data->data.dma.tx_param = p->dev->parent; + data->data.dma.fn = dw8250_idma_filter; + } +} + +static void dw8250_clk_disable_unprepare(void *data) +{ + clk_disable_unprepare(data); +} + +static void dw8250_reset_control_assert(void *data) +{ + reset_control_assert(data); +} + +static int dw8250_probe(struct platform_device *pdev) +{ + struct uart_8250_port uart = {}, *up = &uart; + struct uart_port *p = &up->port; + struct device *dev = &pdev->dev; + struct dw8250_data *data; + struct resource *regs; + int irq; + int err; + u32 val; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return dev_err_probe(dev, -EINVAL, "no registers defined\n"); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + spin_lock_init(&p->lock); + p->mapbase = regs->start; + p->irq = irq; + p->handle_irq = dw8250_handle_irq; + p->pm = dw8250_do_pm; + p->type = PORT_8250; + p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT; + p->dev = dev; + p->iotype = UPIO_MEM; + p->serial_in = dw8250_serial_in; + p->serial_out = dw8250_serial_out; + p->set_ldisc = dw8250_set_ldisc; + p->set_termios = dw8250_set_termios; + + p->membase = devm_ioremap(dev, regs->start, resource_size(regs)); + if (!p->membase) + return -ENOMEM; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->data.dma.fn = dw8250_fallback_dma_filter; + data->pdata = device_get_match_data(p->dev); + p->private_data = &data->data; + + data->uart_16550_compatible = device_property_read_bool(dev, + "snps,uart-16550-compatible"); + + err = device_property_read_u32(dev, "reg-shift", &val); + if (!err) + p->regshift = val; + + err = device_property_read_u32(dev, "reg-io-width", &val); + if (!err && val == 4) { + p->iotype = UPIO_MEM32; + p->serial_in = dw8250_serial_in32; + p->serial_out = dw8250_serial_out32; + } + + if (device_property_read_bool(dev, "dcd-override")) { + /* Always report DCD as active */ + data->msr_mask_on |= UART_MSR_DCD; + data->msr_mask_off |= UART_MSR_DDCD; + } + + if (device_property_read_bool(dev, "dsr-override")) { + /* Always report DSR as active */ + data->msr_mask_on |= UART_MSR_DSR; + data->msr_mask_off |= UART_MSR_DDSR; + } + + if (device_property_read_bool(dev, "cts-override")) { + /* Always report CTS as active */ + data->msr_mask_on |= UART_MSR_CTS; + data->msr_mask_off |= UART_MSR_DCTS; + } + + if (device_property_read_bool(dev, "ri-override")) { + /* Always report Ring indicator as inactive */ + data->msr_mask_off |= UART_MSR_RI; + data->msr_mask_off |= UART_MSR_TERI; + } + + /* Always ask for fixed clock rate from a property. */ + device_property_read_u32(dev, "clock-frequency", &p->uartclk); + + /* If there is separate baudclk, get the rate from it. */ + data->clk = devm_clk_get_optional(dev, "baudclk"); + if (data->clk == NULL) + data->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(data->clk)) + return PTR_ERR(data->clk); + + INIT_WORK(&data->clk_work, dw8250_clk_work_cb); + data->clk_notifier.notifier_call = dw8250_clk_notifier_cb; + + err = clk_prepare_enable(data->clk); + if (err) + return dev_err_probe(dev, err, "could not enable optional baudclk\n"); + + err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->clk); + if (err) + return err; + + if (data->clk) + p->uartclk = clk_get_rate(data->clk); + + /* If no clock rate is defined, fail. */ + if (!p->uartclk) + return dev_err_probe(dev, -EINVAL, "clock rate not defined\n"); + + data->pclk = devm_clk_get_optional(dev, "apb_pclk"); + if (IS_ERR(data->pclk)) + return PTR_ERR(data->pclk); + + err = clk_prepare_enable(data->pclk); + if (err) + return dev_err_probe(dev, err, "could not enable apb_pclk\n"); + + err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->pclk); + if (err) + return err; + + data->rst = devm_reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(data->rst)) + return PTR_ERR(data->rst); + + reset_control_deassert(data->rst); + + err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst); + if (err) + return err; + + dw8250_quirks(p, data); + + /* If the Busy Functionality is not implemented, don't handle it */ + if (data->uart_16550_compatible) + p->handle_irq = NULL; + + if (!data->skip_autocfg) + dw8250_setup_port(p); + + /* If we have a valid fifosize, try hooking up DMA */ + if (p->fifosize) { + data->data.dma.rxconf.src_maxburst = p->fifosize / 4; + data->data.dma.txconf.dst_maxburst = p->fifosize / 4; + up->dma = &data->data.dma; + } + + data->data.line = serial8250_register_8250_port(up); + if (data->data.line < 0) + return data->data.line; + + /* + * Some platforms may provide a reference clock shared between several + * devices. In this case any clock state change must be known to the + * UART port at least post factum. + */ + if (data->clk) { + err = clk_notifier_register(data->clk, &data->clk_notifier); + if (err) + return dev_err_probe(dev, err, "Failed to set the clock notifier\n"); + queue_work(system_unbound_wq, &data->clk_work); + } + + platform_set_drvdata(pdev, data); + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; +} + +static int dw8250_remove(struct platform_device *pdev) +{ + struct dw8250_data *data = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + + pm_runtime_get_sync(dev); + + if (data->clk) { + clk_notifier_unregister(data->clk, &data->clk_notifier); + + flush_work(&data->clk_work); + } + + serial8250_unregister_port(data->data.line); + + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + + return 0; +} + +static int dw8250_suspend(struct device *dev) +{ + struct dw8250_data *data = dev_get_drvdata(dev); + + serial8250_suspend_port(data->data.line); + + return 0; +} + +static int dw8250_resume(struct device *dev) +{ + struct dw8250_data *data = dev_get_drvdata(dev); + + serial8250_resume_port(data->data.line); + + return 0; +} + +static int dw8250_runtime_suspend(struct device *dev) +{ + struct dw8250_data *data = dev_get_drvdata(dev); + + clk_disable_unprepare(data->clk); + + clk_disable_unprepare(data->pclk); + + return 0; +} + +static int dw8250_runtime_resume(struct device *dev) +{ + struct dw8250_data *data = dev_get_drvdata(dev); + + clk_prepare_enable(data->pclk); + + clk_prepare_enable(data->clk); + + return 0; +} + +static const struct dev_pm_ops dw8250_pm_ops = { + SYSTEM_SLEEP_PM_OPS(dw8250_suspend, dw8250_resume) + RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL) +}; + +static const struct dw8250_platform_data dw8250_dw_apb = { + .usr_reg = DW_UART_USR, +}; + +static const struct dw8250_platform_data dw8250_octeon_3860_data = { + .usr_reg = OCTEON_UART_USR, + .quirks = DW_UART_QUIRK_OCTEON, +}; + +static const struct dw8250_platform_data dw8250_armada_38x_data = { + .usr_reg = DW_UART_USR, + .quirks = DW_UART_QUIRK_ARMADA_38X, +}; + +static const struct dw8250_platform_data dw8250_renesas_rzn1_data = { + .usr_reg = DW_UART_USR, + .cpr_val = 0x00012f32, + .quirks = DW_UART_QUIRK_IS_DMA_FC, +}; + +static const struct dw8250_platform_data dw8250_starfive_jh7100_data = { + .usr_reg = DW_UART_USR, + .quirks = DW_UART_QUIRK_SKIP_SET_RATE, +}; + +static const struct of_device_id dw8250_of_match[] = { + { .compatible = "snps,dw-apb-uart", .data = &dw8250_dw_apb }, + { .compatible = "cavium,octeon-3860-uart", .data = &dw8250_octeon_3860_data }, + { .compatible = "marvell,armada-38x-uart", .data = &dw8250_armada_38x_data }, + { .compatible = "renesas,rzn1-uart", .data = &dw8250_renesas_rzn1_data }, + { .compatible = "starfive,jh7100-uart", .data = &dw8250_starfive_jh7100_data }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, dw8250_of_match); + +static const struct acpi_device_id dw8250_acpi_match[] = { + { "80860F0A", (kernel_ulong_t)&dw8250_dw_apb }, + { "8086228A", (kernel_ulong_t)&dw8250_dw_apb }, + { "AMD0020", (kernel_ulong_t)&dw8250_dw_apb }, + { "AMDI0020", (kernel_ulong_t)&dw8250_dw_apb }, + { "AMDI0022", (kernel_ulong_t)&dw8250_dw_apb }, + { "APMC0D08", (kernel_ulong_t)&dw8250_dw_apb}, + { "BRCM2032", (kernel_ulong_t)&dw8250_dw_apb }, + { "HISI0031", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT33C4", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT33C5", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT3434", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT3435", (kernel_ulong_t)&dw8250_dw_apb }, + { "INTC10EE", (kernel_ulong_t)&dw8250_dw_apb }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); + +static struct platform_driver dw8250_platform_driver = { + .driver = { + .name = "dw-apb-uart", + .pm = pm_ptr(&dw8250_pm_ops), + .of_match_table = dw8250_of_match, + .acpi_match_table = dw8250_acpi_match, + }, + .probe = dw8250_probe, + .remove = dw8250_remove, +}; + +module_platform_driver(dw8250_platform_driver); + +MODULE_AUTHOR("Jamie Iles"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Synopsys DesignWare 8250 serial port driver"); +MODULE_ALIAS("platform:dw-apb-uart"); diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c new file mode 100644 index 000000000..84843e204 --- /dev/null +++ b/drivers/tty/serial/8250/8250_dwlib.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Synopsys DesignWare 8250 library. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250_dwlib.h" + +/* Offsets for the DesignWare specific registers */ +#define DW_UART_TCR 0xac /* Transceiver Control Register (RS485) */ +#define DW_UART_DE_EN 0xb0 /* Driver Output Enable Register */ +#define DW_UART_RE_EN 0xb4 /* Receiver Output Enable Register */ +#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */ +#define DW_UART_RAR 0xc4 /* Receive Address Register */ +#define DW_UART_TAR 0xc8 /* Transmit Address Register */ +#define DW_UART_LCR_EXT 0xcc /* Line Extended Control Register */ +#define DW_UART_CPR 0xf4 /* Component Parameter Register */ +#define DW_UART_UCV 0xf8 /* UART Component Version */ + +/* Receive / Transmit Address Register bits */ +#define DW_UART_ADDR_MASK GENMASK(7, 0) + +/* Line Status Register bits */ +#define DW_UART_LSR_ADDR_RCVD BIT(8) + +/* Transceiver Control Register bits */ +#define DW_UART_TCR_RS485_EN BIT(0) +#define DW_UART_TCR_RE_POL BIT(1) +#define DW_UART_TCR_DE_POL BIT(2) +#define DW_UART_TCR_XFER_MODE GENMASK(4, 3) +#define DW_UART_TCR_XFER_MODE_DE_DURING_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 0) +#define DW_UART_TCR_XFER_MODE_SW_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 1) +#define DW_UART_TCR_XFER_MODE_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 2) + +/* Line Extended Control Register bits */ +#define DW_UART_LCR_EXT_DLS_E BIT(0) +#define DW_UART_LCR_EXT_ADDR_MATCH BIT(1) +#define DW_UART_LCR_EXT_SEND_ADDR BIT(2) +#define DW_UART_LCR_EXT_TRANSMIT_MODE BIT(3) + +/* Component Parameter Register bits */ +#define DW_UART_CPR_ABP_DATA_WIDTH GENMASK(1, 0) +#define DW_UART_CPR_AFCE_MODE BIT(4) +#define DW_UART_CPR_THRE_MODE BIT(5) +#define DW_UART_CPR_SIR_MODE BIT(6) +#define DW_UART_CPR_SIR_LP_MODE BIT(7) +#define DW_UART_CPR_ADDITIONAL_FEATURES BIT(8) +#define DW_UART_CPR_FIFO_ACCESS BIT(9) +#define DW_UART_CPR_FIFO_STAT BIT(10) +#define DW_UART_CPR_SHADOW BIT(11) +#define DW_UART_CPR_ENCODED_PARMS BIT(12) +#define DW_UART_CPR_DMA_EXTRA BIT(13) +#define DW_UART_CPR_FIFO_MODE GENMASK(23, 16) + +/* Helper for FIFO size calculation */ +#define DW_UART_CPR_FIFO_SIZE(a) (FIELD_GET(DW_UART_CPR_FIFO_MODE, (a)) * 16) + +/* + * divisor = div(I) + div(F) + * "I" means integer, "F" means fractional + * quot = div(I) = clk / (16 * baud) + * frac = div(F) * 2^dlf_size + * + * let rem = clk % (16 * baud) + * we have: div(F) * (16 * baud) = rem + * so frac = 2^dlf_size * rem / (16 * baud) = (rem << dlf_size) / (16 * baud) + */ +static unsigned int dw8250_get_divisor(struct uart_port *p, unsigned int baud, + unsigned int *frac) +{ + unsigned int quot, rem, base_baud = baud * 16; + struct dw8250_port_data *d = p->private_data; + + quot = p->uartclk / base_baud; + rem = p->uartclk % base_baud; + *frac = DIV_ROUND_CLOSEST(rem << d->dlf_size, base_baud); + + return quot; +} + +static void dw8250_set_divisor(struct uart_port *p, unsigned int baud, + unsigned int quot, unsigned int quot_frac) +{ + dw8250_writel_ext(p, DW_UART_DLF, quot_frac); + serial8250_do_set_divisor(p, baud, quot, quot_frac); +} + +void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, + const struct ktermios *old) +{ + p->status &= ~UPSTAT_AUTOCTS; + if (termios->c_cflag & CRTSCTS) + p->status |= UPSTAT_AUTOCTS; + + serial8250_do_set_termios(p, termios, old); + + /* Filter addresses which have 9th bit set */ + p->ignore_status_mask |= DW_UART_LSR_ADDR_RCVD; + p->read_status_mask |= DW_UART_LSR_ADDR_RCVD; +} +EXPORT_SYMBOL_GPL(dw8250_do_set_termios); + +/* + * Wait until re is de-asserted for sure. An ongoing receive will keep + * re asserted until end of frame. Without BUSY indication available, + * only available course of action is to wait for the time it takes to + * receive one frame (there might nothing to receive but w/o BUSY the + * driver cannot know). + */ +static void dw8250_wait_re_deassert(struct uart_port *p) +{ + ndelay(p->frame_time); +} + +static void dw8250_update_rar(struct uart_port *p, u32 addr) +{ + u32 re_en = dw8250_readl_ext(p, DW_UART_RE_EN); + + /* + * RAR shouldn't be changed while receiving. Thus, de-assert RE_EN + * if asserted and wait. + */ + if (re_en) + dw8250_writel_ext(p, DW_UART_RE_EN, 0); + dw8250_wait_re_deassert(p); + dw8250_writel_ext(p, DW_UART_RAR, addr); + if (re_en) + dw8250_writel_ext(p, DW_UART_RE_EN, re_en); +} + +static void dw8250_rs485_set_addr(struct uart_port *p, struct serial_rs485 *rs485, + struct ktermios *termios) +{ + u32 lcr = dw8250_readl_ext(p, DW_UART_LCR_EXT); + + if (rs485->flags & SER_RS485_ADDRB) { + lcr |= DW_UART_LCR_EXT_DLS_E; + if (termios) + termios->c_cflag |= ADDRB; + + if (rs485->flags & SER_RS485_ADDR_RECV) { + u32 delta = p->rs485.flags ^ rs485->flags; + + /* + * rs485 (param) is equal to uart_port's rs485 only during init + * (during init, delta is not yet applicable). + */ + if (unlikely(&p->rs485 == rs485)) + delta = rs485->flags; + + if ((delta & SER_RS485_ADDR_RECV) || + (p->rs485.addr_recv != rs485->addr_recv)) + dw8250_update_rar(p, rs485->addr_recv); + lcr |= DW_UART_LCR_EXT_ADDR_MATCH; + } else { + lcr &= ~DW_UART_LCR_EXT_ADDR_MATCH; + } + if (rs485->flags & SER_RS485_ADDR_DEST) { + /* + * Don't skip writes here as another endpoint could + * have changed communication line's destination + * address in between. + */ + dw8250_writel_ext(p, DW_UART_TAR, rs485->addr_dest); + lcr |= DW_UART_LCR_EXT_SEND_ADDR; + } + } else { + lcr = 0; + } + dw8250_writel_ext(p, DW_UART_LCR_EXT, lcr); +} + +static int dw8250_rs485_config(struct uart_port *p, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + u32 tcr; + + tcr = dw8250_readl_ext(p, DW_UART_TCR); + tcr &= ~DW_UART_TCR_XFER_MODE; + + if (rs485->flags & SER_RS485_ENABLED) { + tcr |= DW_UART_TCR_RS485_EN; + + if (rs485->flags & SER_RS485_RX_DURING_TX) + tcr |= DW_UART_TCR_XFER_MODE_DE_DURING_RE; + else + tcr |= DW_UART_TCR_XFER_MODE_DE_OR_RE; + dw8250_writel_ext(p, DW_UART_DE_EN, 1); + dw8250_writel_ext(p, DW_UART_RE_EN, 1); + } else { + if (termios) + termios->c_cflag &= ~ADDRB; + + tcr &= ~DW_UART_TCR_RS485_EN; + } + + /* Reset to default polarity */ + tcr |= DW_UART_TCR_DE_POL; + tcr &= ~DW_UART_TCR_RE_POL; + + if (!(rs485->flags & SER_RS485_RTS_ON_SEND)) + tcr &= ~DW_UART_TCR_DE_POL; + if (device_property_read_bool(p->dev, "rs485-rx-active-high")) + tcr |= DW_UART_TCR_RE_POL; + + dw8250_writel_ext(p, DW_UART_TCR, tcr); + + /* Addressing mode can only be set up after TCR */ + if (rs485->flags & SER_RS485_ENABLED) + dw8250_rs485_set_addr(p, rs485, termios); + + return 0; +} + +/* + * Tests if RE_EN register can have non-zero value to see if RS-485 HW support + * is present. + */ +static bool dw8250_detect_rs485_hw(struct uart_port *p) +{ + u32 reg; + + dw8250_writel_ext(p, DW_UART_RE_EN, 1); + reg = dw8250_readl_ext(p, DW_UART_RE_EN); + dw8250_writel_ext(p, DW_UART_RE_EN, 0); + return reg; +} + +static const struct serial_rs485 dw8250_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_RTS_ON_SEND | + SER_RS485_RTS_AFTER_SEND | SER_RS485_ADDRB | SER_RS485_ADDR_RECV | + SER_RS485_ADDR_DEST, +}; + +void dw8250_setup_port(struct uart_port *p) +{ + struct dw8250_port_data *pd = p->private_data; + struct dw8250_data *data = to_dw8250_data(pd); + struct uart_8250_port *up = up_to_u8250p(p); + u32 reg, old_dlf; + + pd->hw_rs485_support = dw8250_detect_rs485_hw(p); + if (pd->hw_rs485_support) { + p->rs485_config = dw8250_rs485_config; + up->lsr_save_mask = LSR_SAVE_FLAGS | DW_UART_LSR_ADDR_RCVD; + p->rs485_supported = dw8250_rs485_supported; + } else { + p->rs485_config = serial8250_em485_config; + p->rs485_supported = serial8250_em485_supported; + up->rs485_start_tx = serial8250_em485_start_tx; + up->rs485_stop_tx = serial8250_em485_stop_tx; + } + up->capabilities |= UART_CAP_NOTEMT; + + /* + * If the Component Version Register returns zero, we know that + * ADDITIONAL_FEATURES are not enabled. No need to go any further. + */ + reg = dw8250_readl_ext(p, DW_UART_UCV); + if (!reg) + return; + + dev_dbg(p->dev, "Designware UART version %c.%c%c\n", + (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff); + + /* Preserve value written by firmware or bootloader */ + old_dlf = dw8250_readl_ext(p, DW_UART_DLF); + dw8250_writel_ext(p, DW_UART_DLF, ~0U); + reg = dw8250_readl_ext(p, DW_UART_DLF); + dw8250_writel_ext(p, DW_UART_DLF, old_dlf); + + if (reg) { + pd->dlf_size = fls(reg); + p->get_divisor = dw8250_get_divisor; + p->set_divisor = dw8250_set_divisor; + } + + reg = dw8250_readl_ext(p, DW_UART_CPR); + if (!reg) { + reg = data->pdata->cpr_val; + dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg); + } + if (!reg) + return; + + /* Select the type based on FIFO */ + if (reg & DW_UART_CPR_FIFO_MODE) { + p->type = PORT_16550A; + p->flags |= UPF_FIXED_TYPE; + p->fifosize = DW_UART_CPR_FIFO_SIZE(reg); + up->capabilities = UART_CAP_FIFO | UART_CAP_NOTEMT; + } + + if (reg & DW_UART_CPR_AFCE_MODE) + up->capabilities |= UART_CAP_AFE; + + if (reg & DW_UART_CPR_SIR_MODE) + up->capabilities |= UART_CAP_IRDA; +} +EXPORT_SYMBOL_GPL(dw8250_setup_port); diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h new file mode 100644 index 000000000..f13e91f2c --- /dev/null +++ b/drivers/tty/serial/8250/8250_dwlib.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Synopsys DesignWare 8250 library header file. */ + +#include +#include +#include +#include + +#include "8250.h" + +struct clk; +struct reset_control; + +struct dw8250_port_data { + /* Port properties */ + int line; + + /* DMA operations */ + struct uart_8250_dma dma; + + /* Hardware configuration */ + u8 dlf_size; + + /* RS485 variables */ + bool hw_rs485_support; +}; + +struct dw8250_platform_data { + u8 usr_reg; + u32 cpr_val; + unsigned int quirks; +}; + +struct dw8250_data { + struct dw8250_port_data data; + const struct dw8250_platform_data *pdata; + + int msr_mask_on; + int msr_mask_off; + struct clk *clk; + struct clk *pclk; + struct notifier_block clk_notifier; + struct work_struct clk_work; + struct reset_control *rst; + + unsigned int skip_autocfg:1; + unsigned int uart_16550_compatible:1; +}; + +void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old); +void dw8250_setup_port(struct uart_port *p); + +static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data) +{ + return container_of(data, struct dw8250_data, data); +} + +static inline u32 dw8250_readl_ext(struct uart_port *p, int offset) +{ + if (p->iotype == UPIO_MEM32BE) + return ioread32be(p->membase + offset); + return readl(p->membase + offset); +} + +static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg) +{ + if (p->iotype == UPIO_MEM32BE) + iowrite32be(reg, p->membase + offset); + else + writel(reg, p->membase + offset); +} diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c new file mode 100644 index 000000000..02c9b98a6 --- /dev/null +++ b/drivers/tty/serial/8250/8250_early.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Early serial console for 8250/16550 devices + * + * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas + * + * Based on the 8250.c serial driver, Copyright (C) 2001 Russell King, + * and on early_printk.c by Andi Kleen. + * + * This is for use before the serial driver has initialized, in + * particular, before the UARTs have been discovered and named. + * Instead of specifying the console device as, e.g., "ttyS0", + * we locate the device directly by its MMIO or I/O port address. + * + * The user can specify the device directly, e.g., + * earlycon=uart8250,io,0x3f8,9600n8 + * earlycon=uart8250,mmio,0xff5e0000,115200n8 + * earlycon=uart8250,mmio32,0xff5e0000,115200n8 + * or + * console=uart8250,io,0x3f8,9600n8 + * console=uart8250,mmio,0xff5e0000,115200n8 + * console=uart8250,mmio32,0xff5e0000,115200n8 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned int serial8250_early_in(struct uart_port *port, int offset) +{ + int reg_offset = offset; + offset <<= port->regshift; + + switch (port->iotype) { + case UPIO_MEM: + return readb(port->membase + offset); + case UPIO_MEM16: + return readw(port->membase + offset); + case UPIO_MEM32: + return readl(port->membase + offset); + case UPIO_MEM32BE: + return ioread32be(port->membase + offset); + case UPIO_PORT: + return inb(port->iobase + offset); + case UPIO_AU: + return port->serial_in(port, reg_offset); + default: + return 0; + } +} + +static void serial8250_early_out(struct uart_port *port, int offset, int value) +{ + int reg_offset = offset; + offset <<= port->regshift; + + switch (port->iotype) { + case UPIO_MEM: + writeb(value, port->membase + offset); + break; + case UPIO_MEM16: + writew(value, port->membase + offset); + break; + case UPIO_MEM32: + writel(value, port->membase + offset); + break; + case UPIO_MEM32BE: + iowrite32be(value, port->membase + offset); + break; + case UPIO_PORT: + outb(value, port->iobase + offset); + break; + case UPIO_AU: + port->serial_out(port, reg_offset, value); + break; + } +} + +static void serial_putc(struct uart_port *port, unsigned char c) +{ + unsigned int status; + + serial8250_early_out(port, UART_TX, c); + + for (;;) { + status = serial8250_early_in(port, UART_LSR); + if (uart_lsr_tx_empty(status)) + break; + cpu_relax(); + } +} + +static void early_serial8250_write(struct console *console, + const char *s, unsigned int count) +{ + struct earlycon_device *device = console->data; + struct uart_port *port = &device->port; + + uart_console_write(port, s, count, serial_putc); +} + +#ifdef CONFIG_CONSOLE_POLL +static int early_serial8250_read(struct console *console, + char *s, unsigned int count) +{ + struct earlycon_device *device = console->data; + struct uart_port *port = &device->port; + unsigned int status; + int num_read = 0; + + while (num_read < count) { + status = serial8250_early_in(port, UART_LSR); + if (!(status & UART_LSR_DR)) + break; + s[num_read++] = serial8250_early_in(port, UART_RX); + } + + return num_read; +} +#else +#define early_serial8250_read NULL +#endif + +static void __init init_port(struct earlycon_device *device) +{ + struct uart_port *port = &device->port; + unsigned int divisor; + unsigned char c; + unsigned int ier; + + serial8250_early_out(port, UART_LCR, 0x3); /* 8n1 */ + ier = serial8250_early_in(port, UART_IER); + serial8250_early_out(port, UART_IER, ier & UART_IER_UUE); /* no interrupt */ + serial8250_early_out(port, UART_FCR, 0); /* no fifo */ + serial8250_early_out(port, UART_MCR, 0x3); /* DTR + RTS */ + + if (port->uartclk) { + divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud); + c = serial8250_early_in(port, UART_LCR); + serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB); + serial8250_early_out(port, UART_DLL, divisor & 0xff); + serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff); + serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB); + } +} + +int __init early_serial8250_setup(struct earlycon_device *device, + const char *options) +{ + if (!(device->port.membase || device->port.iobase)) + return -ENODEV; + + if (!device->baud) { + struct uart_port *port = &device->port; + unsigned int ier; + + /* assume the device was initialized, only mask interrupts */ + ier = serial8250_early_in(port, UART_IER); + serial8250_early_out(port, UART_IER, ier & UART_IER_UUE); + } else + init_port(device); + + device->con->write = early_serial8250_write; + device->con->read = early_serial8250_read; + return 0; +} +EARLYCON_DECLARE(uart8250, early_serial8250_setup); +EARLYCON_DECLARE(uart, early_serial8250_setup); +OF_EARLYCON_DECLARE(ns16550, "ns16550", early_serial8250_setup); +OF_EARLYCON_DECLARE(ns16550a, "ns16550a", early_serial8250_setup); +OF_EARLYCON_DECLARE(uart, "nvidia,tegra20-uart", early_serial8250_setup); +OF_EARLYCON_DECLARE(uart, "snps,dw-apb-uart", early_serial8250_setup); + +#ifdef CONFIG_SERIAL_8250_OMAP + +static int __init early_omap8250_setup(struct earlycon_device *device, + const char *options) +{ + struct uart_port *port = &device->port; + + if (!(device->port.membase || device->port.iobase)) + return -ENODEV; + + port->regshift = 2; + device->con->write = early_serial8250_write; + return 0; +} + +OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup); +OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup); +OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup); +OF_EARLYCON_DECLARE(omap8250, "ti,am654-uart", early_omap8250_setup); + +#endif + +#ifdef CONFIG_SERIAL_8250_RT288X + +static int __init early_au_setup(struct earlycon_device *dev, const char *opt) +{ + dev->port.serial_in = au_serial_in; + dev->port.serial_out = au_serial_out; + dev->port.iotype = UPIO_AU; + dev->con->write = early_serial8250_write; + return 0; +} +OF_EARLYCON_DECLARE(palmchip, "ralink,rt2880-uart", early_au_setup); + +#endif diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c new file mode 100644 index 000000000..d94c3811a --- /dev/null +++ b/drivers/tty/serial/8250/8250_em.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas Emma Mobile 8250 driver + * + * Copyright (C) 2012 Magnus Damm + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +#define UART_DLL_EM 9 +#define UART_DLM_EM 10 + +struct serial8250_em_priv { + struct clk *sclk; + int line; +}; + +static void serial8250_em_serial_out(struct uart_port *p, int offset, int value) +{ + switch (offset) { + case UART_TX: /* TX @ 0x00 */ + writeb(value, p->membase); + break; + case UART_FCR: /* FCR @ 0x0c (+1) */ + case UART_LCR: /* LCR @ 0x10 (+1) */ + case UART_MCR: /* MCR @ 0x14 (+1) */ + case UART_SCR: /* SCR @ 0x20 (+1) */ + writel(value, p->membase + ((offset + 1) << 2)); + break; + case UART_IER: /* IER @ 0x04 */ + value &= 0x0f; /* only 4 valid bits - not Xscale */ + fallthrough; + case UART_DLL_EM: /* DLL @ 0x24 (+9) */ + case UART_DLM_EM: /* DLM @ 0x28 (+9) */ + writel(value, p->membase + (offset << 2)); + } +} + +static unsigned int serial8250_em_serial_in(struct uart_port *p, int offset) +{ + switch (offset) { + case UART_RX: /* RX @ 0x00 */ + return readb(p->membase); + case UART_MCR: /* MCR @ 0x14 (+1) */ + case UART_LSR: /* LSR @ 0x18 (+1) */ + case UART_MSR: /* MSR @ 0x1c (+1) */ + case UART_SCR: /* SCR @ 0x20 (+1) */ + return readl(p->membase + ((offset + 1) << 2)); + case UART_IER: /* IER @ 0x04 */ + case UART_IIR: /* IIR @ 0x08 */ + case UART_DLL_EM: /* DLL @ 0x24 (+9) */ + case UART_DLM_EM: /* DLM @ 0x28 (+9) */ + return readl(p->membase + (offset << 2)); + } + return 0; +} + +static int serial8250_em_serial_dl_read(struct uart_8250_port *up) +{ + return serial_in(up, UART_DLL_EM) | serial_in(up, UART_DLM_EM) << 8; +} + +static void serial8250_em_serial_dl_write(struct uart_8250_port *up, int value) +{ + serial_out(up, UART_DLL_EM, value & 0xff); + serial_out(up, UART_DLM_EM, value >> 8 & 0xff); +} + +static int serial8250_em_probe(struct platform_device *pdev) +{ + struct serial8250_em_priv *priv; + struct uart_8250_port up; + struct resource *regs; + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_err(&pdev->dev, "missing registers\n"); + return -EINVAL; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->sclk = devm_clk_get(&pdev->dev, "sclk"); + if (IS_ERR(priv->sclk)) { + dev_err(&pdev->dev, "unable to get clock\n"); + return PTR_ERR(priv->sclk); + } + + memset(&up, 0, sizeof(up)); + up.port.mapbase = regs->start; + up.port.irq = irq; + up.port.type = PORT_16750; + up.port.flags = UPF_FIXED_PORT | UPF_IOREMAP | UPF_FIXED_TYPE; + up.port.dev = &pdev->dev; + up.port.private_data = priv; + + clk_prepare_enable(priv->sclk); + up.port.uartclk = clk_get_rate(priv->sclk); + + up.port.iotype = UPIO_MEM32; + up.port.serial_in = serial8250_em_serial_in; + up.port.serial_out = serial8250_em_serial_out; + up.dl_read = serial8250_em_serial_dl_read; + up.dl_write = serial8250_em_serial_dl_write; + + ret = serial8250_register_8250_port(&up); + if (ret < 0) { + dev_err(&pdev->dev, "unable to register 8250 port\n"); + clk_disable_unprepare(priv->sclk); + return ret; + } + + priv->line = ret; + platform_set_drvdata(pdev, priv); + return 0; +} + +static int serial8250_em_remove(struct platform_device *pdev) +{ + struct serial8250_em_priv *priv = platform_get_drvdata(pdev); + + serial8250_unregister_port(priv->line); + clk_disable_unprepare(priv->sclk); + return 0; +} + +static const struct of_device_id serial8250_em_dt_ids[] = { + { .compatible = "renesas,em-uart", }, + {}, +}; +MODULE_DEVICE_TABLE(of, serial8250_em_dt_ids); + +static struct platform_driver serial8250_em_platform_driver = { + .driver = { + .name = "serial8250-em", + .of_match_table = serial8250_em_dt_ids, + }, + .probe = serial8250_em_probe, + .remove = serial8250_em_remove, +}; + +module_platform_driver(serial8250_em_platform_driver); + +MODULE_AUTHOR("Magnus Damm"); +MODULE_DESCRIPTION("Renesas Emma Mobile 8250 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c new file mode 100644 index 000000000..dca1abe36 --- /dev/null +++ b/drivers/tty/serial/8250/8250_exar.c @@ -0,0 +1,916 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Probe module for 8250/16550-type Exar chips PCI serial ports. + * + * Based on drivers/tty/serial/8250/8250_pci.c, + * + * Copyright (C) 2017 Sudip Mukherjee, All Rights Reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "8250.h" + +#define PCI_DEVICE_ID_ACCESSIO_COM_2S 0x1052 +#define PCI_DEVICE_ID_ACCESSIO_COM_4S 0x105d +#define PCI_DEVICE_ID_ACCESSIO_COM_8S 0x106c +#define PCI_DEVICE_ID_ACCESSIO_COM232_8 0x10a8 +#define PCI_DEVICE_ID_ACCESSIO_COM_2SM 0x10d2 +#define PCI_DEVICE_ID_ACCESSIO_COM_4SM 0x10db +#define PCI_DEVICE_ID_ACCESSIO_COM_8SM 0x10ea + +#define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002 +#define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004 +#define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a +#define PCI_DEVICE_ID_COMMTECH_2328PCI335 0x000b +#define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020 +#define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021 +#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022 + +#define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358 +#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 + +#define PCI_SUBDEVICE_ID_USR_2980 0x0128 +#define PCI_SUBDEVICE_ID_USR_2981 0x0129 + +#define PCI_DEVICE_ID_SEALEVEL_710xC 0x1001 +#define PCI_DEVICE_ID_SEALEVEL_720xC 0x1002 +#define PCI_DEVICE_ID_SEALEVEL_740xC 0x1004 +#define PCI_DEVICE_ID_SEALEVEL_780xC 0x1008 +#define PCI_DEVICE_ID_SEALEVEL_716xC 0x1010 + +#define UART_EXAR_INT0 0x80 +#define UART_EXAR_8XMODE 0x88 /* 8X sampling rate select */ +#define UART_EXAR_SLEEP 0x8b /* Sleep mode */ +#define UART_EXAR_DVID 0x8d /* Device identification */ + +#define UART_EXAR_FCTR 0x08 /* Feature Control Register */ +#define UART_FCTR_EXAR_IRDA 0x10 /* IrDa data encode select */ +#define UART_FCTR_EXAR_485 0x20 /* Auto 485 half duplex dir ctl */ +#define UART_FCTR_EXAR_TRGA 0x00 /* FIFO trigger table A */ +#define UART_FCTR_EXAR_TRGB 0x60 /* FIFO trigger table B */ +#define UART_FCTR_EXAR_TRGC 0x80 /* FIFO trigger table C */ +#define UART_FCTR_EXAR_TRGD 0xc0 /* FIFO trigger table D programmable */ + +#define UART_EXAR_TXTRG 0x0a /* Tx FIFO trigger level write-only */ +#define UART_EXAR_RXTRG 0x0b /* Rx FIFO trigger level write-only */ + +#define UART_EXAR_MPIOINT_7_0 0x8f /* MPIOINT[7:0] */ +#define UART_EXAR_MPIOLVL_7_0 0x90 /* MPIOLVL[7:0] */ +#define UART_EXAR_MPIO3T_7_0 0x91 /* MPIO3T[7:0] */ +#define UART_EXAR_MPIOINV_7_0 0x92 /* MPIOINV[7:0] */ +#define UART_EXAR_MPIOSEL_7_0 0x93 /* MPIOSEL[7:0] */ +#define UART_EXAR_MPIOOD_7_0 0x94 /* MPIOOD[7:0] */ +#define UART_EXAR_MPIOINT_15_8 0x95 /* MPIOINT[15:8] */ +#define UART_EXAR_MPIOLVL_15_8 0x96 /* MPIOLVL[15:8] */ +#define UART_EXAR_MPIO3T_15_8 0x97 /* MPIO3T[15:8] */ +#define UART_EXAR_MPIOINV_15_8 0x98 /* MPIOINV[15:8] */ +#define UART_EXAR_MPIOSEL_15_8 0x99 /* MPIOSEL[15:8] */ +#define UART_EXAR_MPIOOD_15_8 0x9a /* MPIOOD[15:8] */ + +#define UART_EXAR_RS485_DLY(x) ((x) << 4) + +/* + * IOT2040 MPIO wiring semantics: + * + * MPIO Port Function + * ---- ---- -------- + * 0 2 Mode bit 0 + * 1 2 Mode bit 1 + * 2 2 Terminate bus + * 3 - + * 4 3 Mode bit 0 + * 5 3 Mode bit 1 + * 6 3 Terminate bus + * 7 - + * 8 2 Enable + * 9 3 Enable + * 10 - Red LED + * 11..15 - + */ + +/* IOT2040 MPIOs 0..7 */ +#define IOT2040_UART_MODE_RS232 0x01 +#define IOT2040_UART_MODE_RS485 0x02 +#define IOT2040_UART_MODE_RS422 0x03 +#define IOT2040_UART_TERMINATE_BUS 0x04 + +#define IOT2040_UART1_MASK 0x0f +#define IOT2040_UART2_SHIFT 4 + +#define IOT2040_UARTS_DEFAULT_MODE 0x11 /* both RS232 */ +#define IOT2040_UARTS_GPIO_LO_MODE 0x88 /* reserved pins as input */ + +/* IOT2040 MPIOs 8..15 */ +#define IOT2040_UARTS_ENABLE 0x03 +#define IOT2040_UARTS_GPIO_HI_MODE 0xF8 /* enable & LED as outputs */ + +struct exar8250; + +struct exar8250_platform { + int (*rs485_config)(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485); + const struct serial_rs485 *rs485_supported; + int (*register_gpio)(struct pci_dev *, struct uart_8250_port *); + void (*unregister_gpio)(struct uart_8250_port *); +}; + +/** + * struct exar8250_board - board information + * @num_ports: number of serial ports + * @reg_shift: describes UART register mapping in PCI memory + * @setup: quirk run at ->probe() stage + * @exit: quirk run at ->remove() stage + */ +struct exar8250_board { + unsigned int num_ports; + unsigned int reg_shift; + int (*setup)(struct exar8250 *, struct pci_dev *, + struct uart_8250_port *, int); + void (*exit)(struct pci_dev *pcidev); +}; + +struct exar8250 { + unsigned int nr; + struct exar8250_board *board; + void __iomem *virt; + int line[]; +}; + +static void exar_pm(struct uart_port *port, unsigned int state, unsigned int old) +{ + /* + * Exar UARTs have a SLEEP register that enables or disables each UART + * to enter sleep mode separately. On the XR17V35x the register + * is accessible to each UART at the UART_EXAR_SLEEP offset, but + * the UART channel may only write to the corresponding bit. + */ + serial_port_out(port, UART_EXAR_SLEEP, state ? 0xff : 0); +} + +/* + * XR17V35x UARTs have an extra fractional divisor register (DLD) + * Calculate divisor with extra 4-bit fractional portion + */ +static unsigned int xr17v35x_get_divisor(struct uart_port *p, unsigned int baud, + unsigned int *frac) +{ + unsigned int quot_16; + + quot_16 = DIV_ROUND_CLOSEST(p->uartclk, baud); + *frac = quot_16 & 0x0f; + + return quot_16 >> 4; +} + +static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud, + unsigned int quot, unsigned int quot_frac) +{ + serial8250_do_set_divisor(p, baud, quot, quot_frac); + + /* Preserve bits not related to baudrate; DLD[7:4]. */ + quot_frac |= serial_port_in(p, 0x2) & 0xf0; + serial_port_out(p, 0x2, quot_frac); +} + +static int xr17v35x_startup(struct uart_port *port) +{ + /* + * First enable access to IER [7:5], ISR [5:4], FCR [5:4], + * MCR [7:5] and MSR [7:0] + */ + serial_port_out(port, UART_XR_EFR, UART_EFR_ECB); + + /* + * Make sure all interrups are masked until initialization is + * complete and the FIFOs are cleared + */ + serial_port_out(port, UART_IER, 0); + + return serial8250_do_startup(port); +} + +static void exar_shutdown(struct uart_port *port) +{ + bool tx_complete = false; + struct uart_8250_port *up = up_to_u8250p(port); + struct circ_buf *xmit = &port->state->xmit; + int i = 0; + u16 lsr; + + do { + lsr = serial_in(up, UART_LSR); + if (lsr & (UART_LSR_TEMT | UART_LSR_THRE)) + tx_complete = true; + else + tx_complete = false; + usleep_range(1000, 1100); + } while (!uart_circ_empty(xmit) && !tx_complete && i++ < 1000); + + serial8250_do_shutdown(port); +} + +static int default_setup(struct exar8250 *priv, struct pci_dev *pcidev, + int idx, unsigned int offset, + struct uart_8250_port *port) +{ + const struct exar8250_board *board = priv->board; + unsigned int bar = 0; + unsigned char status; + + port->port.iotype = UPIO_MEM; + port->port.mapbase = pci_resource_start(pcidev, bar) + offset; + port->port.membase = priv->virt + offset; + port->port.regshift = board->reg_shift; + + /* + * XR17V35x UARTs have an extra divisor register, DLD that gets enabled + * with when DLAB is set which will cause the device to incorrectly match + * and assign port type to PORT_16650. The EFR for this UART is found + * at offset 0x09. Instead check the Deice ID (DVID) register + * for a 2, 4 or 8 port UART. + */ + status = readb(port->port.membase + UART_EXAR_DVID); + if (status == 0x82 || status == 0x84 || status == 0x88) { + port->port.type = PORT_XR17V35X; + + port->port.get_divisor = xr17v35x_get_divisor; + port->port.set_divisor = xr17v35x_set_divisor; + + port->port.startup = xr17v35x_startup; + } else { + port->port.type = PORT_XR17D15X; + } + + port->port.pm = exar_pm; + port->port.shutdown = exar_shutdown; + + return 0; +} + +static int +pci_fastcom335_setup(struct exar8250 *priv, struct pci_dev *pcidev, + struct uart_8250_port *port, int idx) +{ + unsigned int offset = idx * 0x200; + unsigned int baud = 1843200; + u8 __iomem *p; + int err; + + port->port.uartclk = baud * 16; + + err = default_setup(priv, pcidev, idx, offset, port); + if (err) + return err; + + p = port->port.membase; + + writeb(0x00, p + UART_EXAR_8XMODE); + writeb(UART_FCTR_EXAR_TRGD, p + UART_EXAR_FCTR); + writeb(32, p + UART_EXAR_TXTRG); + writeb(32, p + UART_EXAR_RXTRG); + + /* + * Setup Multipurpose Input/Output pins. + */ + if (idx == 0) { + switch (pcidev->device) { + case PCI_DEVICE_ID_COMMTECH_4222PCI335: + case PCI_DEVICE_ID_COMMTECH_4224PCI335: + writeb(0x78, p + UART_EXAR_MPIOLVL_7_0); + writeb(0x00, p + UART_EXAR_MPIOINV_7_0); + writeb(0x00, p + UART_EXAR_MPIOSEL_7_0); + break; + case PCI_DEVICE_ID_COMMTECH_2324PCI335: + case PCI_DEVICE_ID_COMMTECH_2328PCI335: + writeb(0x00, p + UART_EXAR_MPIOLVL_7_0); + writeb(0xc0, p + UART_EXAR_MPIOINV_7_0); + writeb(0xc0, p + UART_EXAR_MPIOSEL_7_0); + break; + } + writeb(0x00, p + UART_EXAR_MPIOINT_7_0); + writeb(0x00, p + UART_EXAR_MPIO3T_7_0); + writeb(0x00, p + UART_EXAR_MPIOOD_7_0); + } + + return 0; +} + +static int +pci_connect_tech_setup(struct exar8250 *priv, struct pci_dev *pcidev, + struct uart_8250_port *port, int idx) +{ + unsigned int offset = idx * 0x200; + unsigned int baud = 1843200; + + port->port.uartclk = baud * 16; + return default_setup(priv, pcidev, idx, offset, port); +} + +static int +pci_xr17c154_setup(struct exar8250 *priv, struct pci_dev *pcidev, + struct uart_8250_port *port, int idx) +{ + unsigned int offset = idx * 0x200; + unsigned int baud = 921600; + + port->port.uartclk = baud * 16; + return default_setup(priv, pcidev, idx, offset, port); +} + +static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p) +{ + /* + * The Commtech adapters required the MPIOs to be driven low. The Exar + * devices will export them as GPIOs, so we pre-configure them safely + * as inputs. + */ + + u8 dir = 0x00; + + if ((pcidev->vendor == PCI_VENDOR_ID_EXAR) && + (pcidev->subsystem_vendor != PCI_VENDOR_ID_SEALEVEL)) { + // Configure GPIO as inputs for Commtech adapters + dir = 0xff; + } else { + // Configure GPIO as outputs for SeaLevel adapters + dir = 0x00; + } + + writeb(0x00, p + UART_EXAR_MPIOINT_7_0); + writeb(0x00, p + UART_EXAR_MPIOLVL_7_0); + writeb(0x00, p + UART_EXAR_MPIO3T_7_0); + writeb(0x00, p + UART_EXAR_MPIOINV_7_0); + writeb(dir, p + UART_EXAR_MPIOSEL_7_0); + writeb(0x00, p + UART_EXAR_MPIOOD_7_0); + writeb(0x00, p + UART_EXAR_MPIOINT_15_8); + writeb(0x00, p + UART_EXAR_MPIOLVL_15_8); + writeb(0x00, p + UART_EXAR_MPIO3T_15_8); + writeb(0x00, p + UART_EXAR_MPIOINV_15_8); + writeb(dir, p + UART_EXAR_MPIOSEL_15_8); + writeb(0x00, p + UART_EXAR_MPIOOD_15_8); +} + +static struct platform_device *__xr17v35x_register_gpio(struct pci_dev *pcidev, + const struct software_node *node) +{ + struct platform_device *pdev; + + pdev = platform_device_alloc("gpio_exar", PLATFORM_DEVID_AUTO); + if (!pdev) + return NULL; + + pdev->dev.parent = &pcidev->dev; + ACPI_COMPANION_SET(&pdev->dev, ACPI_COMPANION(&pcidev->dev)); + + if (device_add_software_node(&pdev->dev, node) < 0 || + platform_device_add(pdev) < 0) { + platform_device_put(pdev); + return NULL; + } + + return pdev; +} + +static void __xr17v35x_unregister_gpio(struct platform_device *pdev) +{ + device_remove_software_node(&pdev->dev); + platform_device_unregister(pdev); +} + +static const struct property_entry exar_gpio_properties[] = { + PROPERTY_ENTRY_U32("exar,first-pin", 0), + PROPERTY_ENTRY_U32("ngpios", 16), + { } +}; + +static const struct software_node exar_gpio_node = { + .properties = exar_gpio_properties, +}; + +static int xr17v35x_register_gpio(struct pci_dev *pcidev, struct uart_8250_port *port) +{ + if (pcidev->vendor == PCI_VENDOR_ID_EXAR) + port->port.private_data = + __xr17v35x_register_gpio(pcidev, &exar_gpio_node); + + return 0; +} + +static void xr17v35x_unregister_gpio(struct uart_8250_port *port) +{ + if (!port->port.private_data) + return; + + __xr17v35x_unregister_gpio(port->port.private_data); + port->port.private_data = NULL; +} + +static int generic_rs485_config(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED); + u8 __iomem *p = port->membase; + u8 value; + + value = readb(p + UART_EXAR_FCTR); + if (is_rs485) + value |= UART_FCTR_EXAR_485; + else + value &= ~UART_FCTR_EXAR_485; + + writeb(value, p + UART_EXAR_FCTR); + + if (is_rs485) + writeb(UART_EXAR_RS485_DLY(4), p + UART_MSR); + + return 0; +} + +static const struct serial_rs485 generic_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND, +}; + +static const struct exar8250_platform exar8250_default_platform = { + .register_gpio = xr17v35x_register_gpio, + .unregister_gpio = xr17v35x_unregister_gpio, + .rs485_config = generic_rs485_config, + .rs485_supported = &generic_rs485_supported, +}; + +static int iot2040_rs485_config(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED); + u8 __iomem *p = port->membase; + u8 mask = IOT2040_UART1_MASK; + u8 mode, value; + + if (is_rs485) { + if (rs485->flags & SER_RS485_RX_DURING_TX) + mode = IOT2040_UART_MODE_RS422; + else + mode = IOT2040_UART_MODE_RS485; + + if (rs485->flags & SER_RS485_TERMINATE_BUS) + mode |= IOT2040_UART_TERMINATE_BUS; + } else { + mode = IOT2040_UART_MODE_RS232; + } + + if (port->line == 3) { + mask <<= IOT2040_UART2_SHIFT; + mode <<= IOT2040_UART2_SHIFT; + } + + value = readb(p + UART_EXAR_MPIOLVL_7_0); + value &= ~mask; + value |= mode; + writeb(value, p + UART_EXAR_MPIOLVL_7_0); + + return generic_rs485_config(port, termios, rs485); +} + +static const struct serial_rs485 iot2040_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | + SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS, +}; + +static const struct property_entry iot2040_gpio_properties[] = { + PROPERTY_ENTRY_U32("exar,first-pin", 10), + PROPERTY_ENTRY_U32("ngpios", 1), + { } +}; + +static const struct software_node iot2040_gpio_node = { + .properties = iot2040_gpio_properties, +}; + +static int iot2040_register_gpio(struct pci_dev *pcidev, + struct uart_8250_port *port) +{ + u8 __iomem *p = port->port.membase; + + writeb(IOT2040_UARTS_DEFAULT_MODE, p + UART_EXAR_MPIOLVL_7_0); + writeb(IOT2040_UARTS_GPIO_LO_MODE, p + UART_EXAR_MPIOSEL_7_0); + writeb(IOT2040_UARTS_ENABLE, p + UART_EXAR_MPIOLVL_15_8); + writeb(IOT2040_UARTS_GPIO_HI_MODE, p + UART_EXAR_MPIOSEL_15_8); + + port->port.private_data = + __xr17v35x_register_gpio(pcidev, &iot2040_gpio_node); + + return 0; +} + +static const struct exar8250_platform iot2040_platform = { + .rs485_config = iot2040_rs485_config, + .rs485_supported = &iot2040_rs485_supported, + .register_gpio = iot2040_register_gpio, + .unregister_gpio = xr17v35x_unregister_gpio, +}; + +/* + * For SIMATIC IOT2000, only IOT2040 and its variants have the Exar device, + * IOT2020 doesn't have. Therefore it is sufficient to match on the common + * board name after the device was found. + */ +static const struct dmi_system_id exar_platforms[] = { + { + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), + }, + .driver_data = (void *)&iot2040_platform, + }, + {} +}; + +static const struct exar8250_platform *exar_get_platform(void) +{ + const struct dmi_system_id *dmi_match; + + dmi_match = dmi_first_match(exar_platforms); + if (dmi_match) + return dmi_match->driver_data; + + return &exar8250_default_platform; +} + +static int +pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev, + struct uart_8250_port *port, int idx) +{ + const struct exar8250_platform *platform = exar_get_platform(); + unsigned int offset = idx * 0x400; + unsigned int baud = 7812500; + u8 __iomem *p; + int ret; + + port->port.uartclk = baud * 16; + port->port.rs485_config = platform->rs485_config; + port->port.rs485_supported = *(platform->rs485_supported); + + /* + * Setup the UART clock for the devices on expansion slot to + * half the clock speed of the main chip (which is 125MHz) + */ + if (idx >= 8) + port->port.uartclk /= 2; + + ret = default_setup(priv, pcidev, idx, offset, port); + if (ret) + return ret; + + p = port->port.membase; + + writeb(0x00, p + UART_EXAR_8XMODE); + writeb(UART_FCTR_EXAR_TRGD, p + UART_EXAR_FCTR); + writeb(128, p + UART_EXAR_TXTRG); + writeb(128, p + UART_EXAR_RXTRG); + + if (idx == 0) { + /* Setup Multipurpose Input/Output pins. */ + setup_gpio(pcidev, p); + + ret = platform->register_gpio(pcidev, port); + } + + return ret; +} + +static void pci_xr17v35x_exit(struct pci_dev *pcidev) +{ + const struct exar8250_platform *platform = exar_get_platform(); + struct exar8250 *priv = pci_get_drvdata(pcidev); + struct uart_8250_port *port = serial8250_get_port(priv->line[0]); + + platform->unregister_gpio(port); +} + +static inline void exar_misc_clear(struct exar8250 *priv) +{ + /* Clear all PCI interrupts by reading INT0. No effect on IIR */ + readb(priv->virt + UART_EXAR_INT0); + + /* Clear INT0 for Expansion Interface slave ports, too */ + if (priv->board->num_ports > 8) + readb(priv->virt + 0x2000 + UART_EXAR_INT0); +} + +/* + * These Exar UARTs have an extra interrupt indicator that could fire for a + * few interrupts that are not presented/cleared through IIR. One of which is + * a wakeup interrupt when coming out of sleep. These interrupts are only + * cleared by reading global INT0 or INT1 registers as interrupts are + * associated with channel 0. The INT[3:0] registers _are_ accessible from each + * channel's address space, but for the sake of bus efficiency we register a + * dedicated handler at the PCI device level to handle them. + */ +static irqreturn_t exar_misc_handler(int irq, void *data) +{ + exar_misc_clear(data); + + return IRQ_HANDLED; +} + +static int +exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) +{ + unsigned int nr_ports, i, bar = 0, maxnr; + struct exar8250_board *board; + struct uart_8250_port uart; + struct exar8250 *priv; + int rc; + + board = (struct exar8250_board *)ent->driver_data; + if (!board) + return -EINVAL; + + rc = pcim_enable_device(pcidev); + if (rc) + return rc; + + maxnr = pci_resource_len(pcidev, bar) >> (board->reg_shift + 3); + + if (pcidev->vendor == PCI_VENDOR_ID_ACCESSIO) + nr_ports = BIT(((pcidev->device & 0x38) >> 3) - 1); + else if (board->num_ports) + nr_ports = board->num_ports; + else if (pcidev->vendor == PCI_VENDOR_ID_SEALEVEL) + nr_ports = pcidev->device & 0xff; + else + nr_ports = pcidev->device & 0x0f; + + priv = devm_kzalloc(&pcidev->dev, struct_size(priv, line, nr_ports), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->board = board; + priv->virt = pcim_iomap(pcidev, bar, 0); + if (!priv->virt) + return -ENOMEM; + + pci_set_master(pcidev); + + rc = pci_alloc_irq_vectors(pcidev, 1, 1, PCI_IRQ_ALL_TYPES); + if (rc < 0) + return rc; + + memset(&uart, 0, sizeof(uart)); + uart.port.flags = UPF_SHARE_IRQ | UPF_EXAR_EFR | UPF_FIXED_TYPE | UPF_FIXED_PORT; + uart.port.irq = pci_irq_vector(pcidev, 0); + uart.port.dev = &pcidev->dev; + + rc = devm_request_irq(&pcidev->dev, uart.port.irq, exar_misc_handler, + IRQF_SHARED, "exar_uart", priv); + if (rc) + return rc; + + /* Clear interrupts */ + exar_misc_clear(priv); + + for (i = 0; i < nr_ports && i < maxnr; i++) { + rc = board->setup(priv, pcidev, &uart, i); + if (rc) { + dev_err(&pcidev->dev, "Failed to setup port %u\n", i); + break; + } + + dev_dbg(&pcidev->dev, "Setup PCI port: port %lx, irq %d, type %d\n", + uart.port.iobase, uart.port.irq, uart.port.iotype); + + priv->line[i] = serial8250_register_8250_port(&uart); + if (priv->line[i] < 0) { + dev_err(&pcidev->dev, + "Couldn't register serial port %lx, irq %d, type %d, error %d\n", + uart.port.iobase, uart.port.irq, + uart.port.iotype, priv->line[i]); + break; + } + } + priv->nr = i; + pci_set_drvdata(pcidev, priv); + return 0; +} + +static void exar_pci_remove(struct pci_dev *pcidev) +{ + struct exar8250 *priv = pci_get_drvdata(pcidev); + unsigned int i; + + for (i = 0; i < priv->nr; i++) + serial8250_unregister_port(priv->line[i]); + + if (priv->board->exit) + priv->board->exit(pcidev); +} + +static int __maybe_unused exar_suspend(struct device *dev) +{ + struct pci_dev *pcidev = to_pci_dev(dev); + struct exar8250 *priv = pci_get_drvdata(pcidev); + unsigned int i; + + for (i = 0; i < priv->nr; i++) + if (priv->line[i] >= 0) + serial8250_suspend_port(priv->line[i]); + + /* Ensure that every init quirk is properly torn down */ + if (priv->board->exit) + priv->board->exit(pcidev); + + return 0; +} + +static int __maybe_unused exar_resume(struct device *dev) +{ + struct exar8250 *priv = dev_get_drvdata(dev); + unsigned int i; + + exar_misc_clear(priv); + + for (i = 0; i < priv->nr; i++) + if (priv->line[i] >= 0) + serial8250_resume_port(priv->line[i]); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume); + +static const struct exar8250_board pbn_fastcom335_2 = { + .num_ports = 2, + .setup = pci_fastcom335_setup, +}; + +static const struct exar8250_board pbn_fastcom335_4 = { + .num_ports = 4, + .setup = pci_fastcom335_setup, +}; + +static const struct exar8250_board pbn_fastcom335_8 = { + .num_ports = 8, + .setup = pci_fastcom335_setup, +}; + +static const struct exar8250_board pbn_connect = { + .setup = pci_connect_tech_setup, +}; + +static const struct exar8250_board pbn_exar_ibm_saturn = { + .num_ports = 1, + .setup = pci_xr17c154_setup, +}; + +static const struct exar8250_board pbn_exar_XR17C15x = { + .setup = pci_xr17c154_setup, +}; + +static const struct exar8250_board pbn_exar_XR17V35x = { + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_2 = { + .num_ports = 2, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_4 = { + .num_ports = 4, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_8 = { + .num_ports = 8, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_exar_XR17V4358 = { + .num_ports = 12, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_exar_XR17V8358 = { + .num_ports = 16, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +#define CONNECT_DEVICE(devid, sdevid, bd) { \ + PCI_DEVICE_SUB( \ + PCI_VENDOR_ID_EXAR, \ + PCI_DEVICE_ID_EXAR_##devid, \ + PCI_SUBVENDOR_ID_CONNECT_TECH, \ + PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_##sdevid), 0, 0, \ + (kernel_ulong_t)&bd \ + } + +#define EXAR_DEVICE(vend, devid, bd) { PCI_DEVICE_DATA(vend, devid, &bd) } + +#define IBM_DEVICE(devid, sdevid, bd) { \ + PCI_DEVICE_SUB( \ + PCI_VENDOR_ID_EXAR, \ + PCI_DEVICE_ID_EXAR_##devid, \ + PCI_VENDOR_ID_IBM, \ + PCI_SUBDEVICE_ID_IBM_##sdevid), 0, 0, \ + (kernel_ulong_t)&bd \ + } + +#define USR_DEVICE(devid, sdevid, bd) { \ + PCI_DEVICE_SUB( \ + PCI_VENDOR_ID_USR, \ + PCI_DEVICE_ID_EXAR_##devid, \ + PCI_VENDOR_ID_EXAR, \ + PCI_SUBDEVICE_ID_USR_##sdevid), 0, 0, \ + (kernel_ulong_t)&bd \ + } + +static const struct pci_device_id exar_pci_tbl[] = { + EXAR_DEVICE(ACCESSIO, COM_2S, pbn_exar_XR17C15x), + EXAR_DEVICE(ACCESSIO, COM_4S, pbn_exar_XR17C15x), + EXAR_DEVICE(ACCESSIO, COM_8S, pbn_exar_XR17C15x), + EXAR_DEVICE(ACCESSIO, COM232_8, pbn_exar_XR17C15x), + EXAR_DEVICE(ACCESSIO, COM_2SM, pbn_exar_XR17C15x), + EXAR_DEVICE(ACCESSIO, COM_4SM, pbn_exar_XR17C15x), + EXAR_DEVICE(ACCESSIO, COM_8SM, pbn_exar_XR17C15x), + + CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect), + CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect), + CONNECT_DEVICE(XR17C158, UART_8_232, pbn_connect), + CONNECT_DEVICE(XR17C152, UART_1_1, pbn_connect), + CONNECT_DEVICE(XR17C154, UART_2_2, pbn_connect), + CONNECT_DEVICE(XR17C158, UART_4_4, pbn_connect), + CONNECT_DEVICE(XR17C152, UART_2, pbn_connect), + CONNECT_DEVICE(XR17C154, UART_4, pbn_connect), + CONNECT_DEVICE(XR17C158, UART_8, pbn_connect), + CONNECT_DEVICE(XR17C152, UART_2_485, pbn_connect), + CONNECT_DEVICE(XR17C154, UART_4_485, pbn_connect), + CONNECT_DEVICE(XR17C158, UART_8_485, pbn_connect), + + IBM_DEVICE(XR17C152, SATURN_SERIAL_ONE_PORT, pbn_exar_ibm_saturn), + + /* USRobotics USR298x-OEM PCI Modems */ + USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x), + USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x), + + /* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */ + EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x), + EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x), + EXAR_DEVICE(EXAR, XR17C158, pbn_exar_XR17C15x), + + /* Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs */ + EXAR_DEVICE(EXAR, XR17V352, pbn_exar_XR17V35x), + EXAR_DEVICE(EXAR, XR17V354, pbn_exar_XR17V35x), + EXAR_DEVICE(EXAR, XR17V358, pbn_exar_XR17V35x), + EXAR_DEVICE(EXAR, XR17V4358, pbn_exar_XR17V4358), + EXAR_DEVICE(EXAR, XR17V8358, pbn_exar_XR17V8358), + EXAR_DEVICE(COMMTECH, 4222PCIE, pbn_fastcom35x_2), + EXAR_DEVICE(COMMTECH, 4224PCIE, pbn_fastcom35x_4), + EXAR_DEVICE(COMMTECH, 4228PCIE, pbn_fastcom35x_8), + + EXAR_DEVICE(COMMTECH, 4222PCI335, pbn_fastcom335_2), + EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4), + EXAR_DEVICE(COMMTECH, 2324PCI335, pbn_fastcom335_4), + EXAR_DEVICE(COMMTECH, 2328PCI335, pbn_fastcom335_8), + + EXAR_DEVICE(SEALEVEL, 710xC, pbn_exar_XR17V35x), + EXAR_DEVICE(SEALEVEL, 720xC, pbn_exar_XR17V35x), + EXAR_DEVICE(SEALEVEL, 740xC, pbn_exar_XR17V35x), + EXAR_DEVICE(SEALEVEL, 780xC, pbn_exar_XR17V35x), + EXAR_DEVICE(SEALEVEL, 716xC, pbn_exar_XR17V35x), + { 0, } +}; +MODULE_DEVICE_TABLE(pci, exar_pci_tbl); + +static struct pci_driver exar_pci_driver = { + .name = "exar_serial", + .probe = exar_pci_probe, + .remove = exar_pci_remove, + .driver = { + .pm = &exar_pci_pm, + }, + .id_table = exar_pci_tbl, +}; +module_pci_driver(exar_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Exar Serial Driver"); +MODULE_AUTHOR("Sudip Mukherjee "); diff --git a/drivers/tty/serial/8250/8250_exar_st16c554.c b/drivers/tty/serial/8250/8250_exar_st16c554.c new file mode 100644 index 000000000..933811ebf --- /dev/null +++ b/drivers/tty/serial/8250/8250_exar_st16c554.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Written by Paul B Schroeder < pschroeder "at" uplogix "dot" com > + * Based on 8250_boca. + * + * Copyright (C) 2005 Russell King. + * Data taken from include/asm-i386/serial.h + */ +#include +#include +#include + +#include "8250.h" + +static struct plat_serial8250_port exar_data[] = { + SERIAL8250_PORT(0x100, 5), + SERIAL8250_PORT(0x108, 5), + SERIAL8250_PORT(0x110, 5), + SERIAL8250_PORT(0x118, 5), + { }, +}; + +static struct platform_device exar_device = { + .name = "serial8250", + .id = PLAT8250_DEV_EXAR_ST16C554, + .dev = { + .platform_data = exar_data, + }, +}; + +static int __init exar_init(void) +{ + return platform_device_register(&exar_device); +} + +module_init(exar_init); + +MODULE_AUTHOR("Paul B Schroeder"); +MODULE_DESCRIPTION("8250 serial probe module for Exar cards"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c new file mode 100644 index 000000000..e2aa2a1a0 --- /dev/null +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Probe for F81216A LPC to 4 UART + * + * Copyright (C) 2014-2016 Ricardo Ribalda, Qtechnology A/S + */ +#include +#include +#include +#include +#include +#include +#include "8250.h" + +#define ADDR_PORT 0 +#define DATA_PORT 1 +#define EXIT_KEY 0xAA +#define CHIP_ID1 0x20 +#define CHIP_ID2 0x21 +#define CHIP_ID_F81865 0x0407 +#define CHIP_ID_F81866 0x1010 +#define CHIP_ID_F81966 0x0215 +#define CHIP_ID_F81216AD 0x1602 +#define CHIP_ID_F81216H 0x0501 +#define CHIP_ID_F81216 0x0802 +#define VENDOR_ID1 0x23 +#define VENDOR_ID1_VAL 0x19 +#define VENDOR_ID2 0x24 +#define VENDOR_ID2_VAL 0x34 +#define IO_ADDR1 0x61 +#define IO_ADDR2 0x60 +#define LDN 0x7 + +#define FINTEK_IRQ_MODE 0x70 +#define IRQ_SHARE BIT(4) +#define IRQ_MODE_MASK (BIT(6) | BIT(5)) +#define IRQ_LEVEL_LOW 0 +#define IRQ_EDGE_HIGH BIT(5) + +/* + * F81216H clock source register, the value and mask is the same with F81866, + * but it's on F0h. + * + * Clock speeds for UART (register F0h) + * 00: 1.8432MHz. + * 01: 18.432MHz. + * 10: 24MHz. + * 11: 14.769MHz. + */ +#define RS485 0xF0 +#define RTS_INVERT BIT(5) +#define RS485_URA BIT(4) +#define RXW4C_IRA BIT(3) +#define TXW4C_IRA BIT(2) + +#define FIFO_CTRL 0xF6 +#define FIFO_MODE_MASK (BIT(1) | BIT(0)) +#define FIFO_MODE_128 (BIT(1) | BIT(0)) +#define RXFTHR_MODE_MASK (BIT(5) | BIT(4)) +#define RXFTHR_MODE_4X BIT(5) + +#define F81216_LDN_LOW 0x0 +#define F81216_LDN_HIGH 0x4 + +/* + * F81866/966 registers + * + * The IRQ setting mode of F81866/966 is not the same with F81216 series. + * Level/Low: IRQ_MODE0:0, IRQ_MODE1:0 + * Edge/High: IRQ_MODE0:1, IRQ_MODE1:0 + * + * Clock speeds for UART (register F2h) + * 00: 1.8432MHz. + * 01: 18.432MHz. + * 10: 24MHz. + * 11: 14.769MHz. + */ +#define F81866_IRQ_MODE 0xf0 +#define F81866_IRQ_SHARE BIT(0) +#define F81866_IRQ_MODE0 BIT(1) + +#define F81866_FIFO_CTRL FIFO_CTRL +#define F81866_IRQ_MODE1 BIT(3) + +#define F81866_LDN_LOW 0x10 +#define F81866_LDN_HIGH 0x16 + +#define F81866_UART_CLK 0xF2 +#define F81866_UART_CLK_MASK (BIT(1) | BIT(0)) +#define F81866_UART_CLK_1_8432MHZ 0 +#define F81866_UART_CLK_14_769MHZ (BIT(1) | BIT(0)) +#define F81866_UART_CLK_18_432MHZ BIT(0) +#define F81866_UART_CLK_24MHZ BIT(1) + +struct fintek_8250 { + u16 pid; + u16 base_port; + u8 index; + u8 key; +}; + +static u8 sio_read_reg(struct fintek_8250 *pdata, u8 reg) +{ + outb(reg, pdata->base_port + ADDR_PORT); + return inb(pdata->base_port + DATA_PORT); +} + +static void sio_write_reg(struct fintek_8250 *pdata, u8 reg, u8 data) +{ + outb(reg, pdata->base_port + ADDR_PORT); + outb(data, pdata->base_port + DATA_PORT); +} + +static void sio_write_mask_reg(struct fintek_8250 *pdata, u8 reg, u8 mask, + u8 data) +{ + u8 tmp; + + tmp = (sio_read_reg(pdata, reg) & ~mask) | (mask & data); + sio_write_reg(pdata, reg, tmp); +} + +static int fintek_8250_enter_key(u16 base_port, u8 key) +{ + if (!request_muxed_region(base_port, 2, "8250_fintek")) + return -EBUSY; + + /* Force to deactive all SuperIO in this base_port */ + outb(EXIT_KEY, base_port + ADDR_PORT); + + outb(key, base_port + ADDR_PORT); + outb(key, base_port + ADDR_PORT); + return 0; +} + +static void fintek_8250_exit_key(u16 base_port) +{ + + outb(EXIT_KEY, base_port + ADDR_PORT); + release_region(base_port + ADDR_PORT, 2); +} + +static int fintek_8250_check_id(struct fintek_8250 *pdata) +{ + u16 chip; + + if (sio_read_reg(pdata, VENDOR_ID1) != VENDOR_ID1_VAL) + return -ENODEV; + + if (sio_read_reg(pdata, VENDOR_ID2) != VENDOR_ID2_VAL) + return -ENODEV; + + chip = sio_read_reg(pdata, CHIP_ID1); + chip |= sio_read_reg(pdata, CHIP_ID2) << 8; + + switch (chip) { + case CHIP_ID_F81865: + case CHIP_ID_F81866: + case CHIP_ID_F81966: + case CHIP_ID_F81216AD: + case CHIP_ID_F81216H: + case CHIP_ID_F81216: + break; + default: + return -ENODEV; + } + + pdata->pid = chip; + return 0; +} + +static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min, + int *max) +{ + switch (pdata->pid) { + case CHIP_ID_F81966: + case CHIP_ID_F81865: + case CHIP_ID_F81866: + *min = F81866_LDN_LOW; + *max = F81866_LDN_HIGH; + return 0; + + case CHIP_ID_F81216AD: + case CHIP_ID_F81216H: + case CHIP_ID_F81216: + *min = F81216_LDN_LOW; + *max = F81216_LDN_HIGH; + return 0; + } + + return -ENODEV; +} + +static int fintek_8250_rs485_config(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + uint8_t config = 0; + struct fintek_8250 *pdata = port->private_data; + + if (!pdata) + return -EINVAL; + + + if (rs485->flags & SER_RS485_ENABLED) { + /* Hardware do not support same RTS level on send and receive */ + if (!(rs485->flags & SER_RS485_RTS_ON_SEND) == + !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) + return -EINVAL; + config |= RS485_URA; + } + + if (rs485->delay_rts_before_send) { + rs485->delay_rts_before_send = 1; + config |= TXW4C_IRA; + } + + if (rs485->delay_rts_after_send) { + rs485->delay_rts_after_send = 1; + config |= RXW4C_IRA; + } + + if (rs485->flags & SER_RS485_RTS_ON_SEND) + config |= RTS_INVERT; + + if (fintek_8250_enter_key(pdata->base_port, pdata->key)) + return -EBUSY; + + sio_write_reg(pdata, LDN, pdata->index); + sio_write_reg(pdata, RS485, config); + fintek_8250_exit_key(pdata->base_port); + + return 0; +} + +static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level) +{ + sio_write_reg(pdata, LDN, pdata->index); + + switch (pdata->pid) { + case CHIP_ID_F81966: + case CHIP_ID_F81866: + sio_write_mask_reg(pdata, F81866_FIFO_CTRL, F81866_IRQ_MODE1, + 0); + fallthrough; + case CHIP_ID_F81865: + sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_SHARE, + F81866_IRQ_SHARE); + sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_MODE0, + is_level ? 0 : F81866_IRQ_MODE0); + break; + + case CHIP_ID_F81216AD: + case CHIP_ID_F81216H: + case CHIP_ID_F81216: + sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_SHARE, + IRQ_SHARE); + sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_MODE_MASK, + is_level ? IRQ_LEVEL_LOW : IRQ_EDGE_HIGH); + break; + } +} + +static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata) +{ + switch (pdata->pid) { + case CHIP_ID_F81216H: /* 128Bytes FIFO */ + case CHIP_ID_F81966: + case CHIP_ID_F81866: + sio_write_mask_reg(pdata, FIFO_CTRL, + FIFO_MODE_MASK | RXFTHR_MODE_MASK, + FIFO_MODE_128 | RXFTHR_MODE_4X); + break; + + default: /* Default 16Bytes FIFO */ + break; + } +} + +static void fintek_8250_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct fintek_8250 *pdata = port->private_data; + unsigned int baud = tty_termios_baud_rate(termios); + int i; + u8 reg; + static u32 baudrate_table[] = {115200, 921600, 1152000, 1500000}; + static u8 clock_table[] = { F81866_UART_CLK_1_8432MHZ, + F81866_UART_CLK_14_769MHZ, F81866_UART_CLK_18_432MHZ, + F81866_UART_CLK_24MHZ }; + + /* + * We'll use serial8250_do_set_termios() for baud = 0, otherwise It'll + * crash on baudrate_table[i] % baud with "division by zero". + */ + if (!baud) + goto exit; + + switch (pdata->pid) { + case CHIP_ID_F81216H: + reg = RS485; + break; + case CHIP_ID_F81966: + case CHIP_ID_F81866: + reg = F81866_UART_CLK; + break; + default: + /* Don't change clocksource with unknown PID */ + dev_warn(port->dev, + "%s: pid: %x Not support. use default set_termios.\n", + __func__, pdata->pid); + goto exit; + } + + for (i = 0; i < ARRAY_SIZE(baudrate_table); ++i) { + if (baud > baudrate_table[i] || baudrate_table[i] % baud != 0) + continue; + + if (port->uartclk == baudrate_table[i] * 16) + break; + + if (fintek_8250_enter_key(pdata->base_port, pdata->key)) + continue; + + port->uartclk = baudrate_table[i] * 16; + + sio_write_reg(pdata, LDN, pdata->index); + sio_write_mask_reg(pdata, reg, F81866_UART_CLK_MASK, + clock_table[i]); + + fintek_8250_exit_key(pdata->base_port); + break; + } + + if (i == ARRAY_SIZE(baudrate_table)) { + baud = tty_termios_baud_rate(old); + tty_termios_encode_baud_rate(termios, baud, baud); + } + +exit: + serial8250_do_set_termios(port, termios, old); +} + +static void fintek_8250_set_termios_handler(struct uart_8250_port *uart) +{ + struct fintek_8250 *pdata = uart->port.private_data; + + switch (pdata->pid) { + case CHIP_ID_F81216H: + case CHIP_ID_F81966: + case CHIP_ID_F81866: + uart->port.set_termios = fintek_8250_set_termios; + break; + + default: + break; + } +} + +static int probe_setup_port(struct fintek_8250 *pdata, + struct uart_8250_port *uart) +{ + static const u16 addr[] = {0x4e, 0x2e}; + static const u8 keys[] = {0x77, 0xa0, 0x87, 0x67}; + struct irq_data *irq_data; + bool level_mode = false; + int i, j, k, min, max; + + for (i = 0; i < ARRAY_SIZE(addr); i++) { + for (j = 0; j < ARRAY_SIZE(keys); j++) { + pdata->base_port = addr[i]; + pdata->key = keys[j]; + + if (fintek_8250_enter_key(addr[i], keys[j])) + continue; + if (fintek_8250_check_id(pdata) || + fintek_8250_get_ldn_range(pdata, &min, &max)) { + fintek_8250_exit_key(addr[i]); + continue; + } + + for (k = min; k < max; k++) { + u16 aux; + + sio_write_reg(pdata, LDN, k); + aux = sio_read_reg(pdata, IO_ADDR1); + aux |= sio_read_reg(pdata, IO_ADDR2) << 8; + if (aux != uart->port.iobase) + continue; + + pdata->index = k; + + irq_data = irq_get_irq_data(uart->port.irq); + if (irq_data) + level_mode = + irqd_is_level_type(irq_data); + + fintek_8250_set_irq_mode(pdata, level_mode); + fintek_8250_set_max_fifo(pdata); + + fintek_8250_exit_key(addr[i]); + + return 0; + } + + fintek_8250_exit_key(addr[i]); + } + } + + return -ENODEV; +} + +/* Only the first port supports delays */ +static const struct serial_rs485 fintek_8250_rs485_supported_port0 = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND, + .delay_rts_before_send = 1, + .delay_rts_after_send = 1, +}; + +static const struct serial_rs485 fintek_8250_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND, +}; + +static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart) +{ + struct fintek_8250 *pdata = uart->port.private_data; + + switch (pdata->pid) { + case CHIP_ID_F81216AD: + case CHIP_ID_F81216H: + case CHIP_ID_F81966: + case CHIP_ID_F81866: + case CHIP_ID_F81865: + uart->port.rs485_config = fintek_8250_rs485_config; + if (!pdata->index) + uart->port.rs485_supported = fintek_8250_rs485_supported_port0; + else + uart->port.rs485_supported = fintek_8250_rs485_supported; + break; + + default: /* No RS485 Auto direction functional */ + break; + } +} + +int fintek_8250_probe(struct uart_8250_port *uart) +{ + struct fintek_8250 *pdata; + struct fintek_8250 probe_data; + + if (probe_setup_port(&probe_data, uart)) + return -ENODEV; + + pdata = devm_kzalloc(uart->port.dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + memcpy(pdata, &probe_data, sizeof(probe_data)); + uart->port.private_data = pdata; + fintek_8250_set_rs485_handler(uart); + fintek_8250_set_termios_handler(uart); + + return 0; +} diff --git a/drivers/tty/serial/8250/8250_fourport.c b/drivers/tty/serial/8250/8250_fourport.c new file mode 100644 index 000000000..3215b9b7a --- /dev/null +++ b/drivers/tty/serial/8250/8250_fourport.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005 Russell King. + * Data taken from include/asm-i386/serial.h + */ +#include +#include +#include + +#include "8250.h" + +#define SERIAL8250_FOURPORT(_base, _irq) \ + SERIAL8250_PORT_FLAGS(_base, _irq, UPF_FOURPORT) + +static struct plat_serial8250_port fourport_data[] = { + SERIAL8250_FOURPORT(0x1a0, 9), + SERIAL8250_FOURPORT(0x1a8, 9), + SERIAL8250_FOURPORT(0x1b0, 9), + SERIAL8250_FOURPORT(0x1b8, 9), + SERIAL8250_FOURPORT(0x2a0, 5), + SERIAL8250_FOURPORT(0x2a8, 5), + SERIAL8250_FOURPORT(0x2b0, 5), + SERIAL8250_FOURPORT(0x2b8, 5), + { }, +}; + +static struct platform_device fourport_device = { + .name = "serial8250", + .id = PLAT8250_DEV_FOURPORT, + .dev = { + .platform_data = fourport_data, + }, +}; + +static int __init fourport_init(void) +{ + return platform_device_register(&fourport_device); +} + +module_init(fourport_init); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("8250 serial probe module for AST Fourport cards"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c new file mode 100644 index 000000000..8adfaa183 --- /dev/null +++ b/drivers/tty/serial/8250/8250_fsl.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Freescale 16550 UART "driver", Copyright (C) 2011 Paul Gortmaker. + * Copyright 2020 NXP + * Copyright 2020 Puresoftware Ltd. + * + * This isn't a full driver; it just provides an alternate IRQ + * handler to deal with an errata and provide ACPI wrapper. + * Everything else is just using the bog standard 8250 support. + * + * We follow code flow of serial8250_default_handle_irq() but add + * a check for a break and insert a dummy read on the Rx for the + * immediately following IRQ event. + * + * We re-use the already existing "bug handling" lsr_saved_flags + * field to carry the "what we just did" information from the one + * IRQ event to the next one. + */ + +#include +#include +#include + +#include "8250.h" + +int fsl8250_handle_irq(struct uart_port *port) +{ + unsigned long flags; + u16 lsr, orig_lsr; + unsigned int iir; + struct uart_8250_port *up = up_to_u8250p(port); + + spin_lock_irqsave(&up->port.lock, flags); + + iir = port->serial_in(port, UART_IIR); + if (iir & UART_IIR_NO_INT) { + spin_unlock_irqrestore(&up->port.lock, flags); + return 0; + } + + /* This is the WAR; if last event was BRK, then read and return */ + if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) { + up->lsr_saved_flags &= ~UART_LSR_BI; + port->serial_in(port, UART_RX); + spin_unlock_irqrestore(&up->port.lock, flags); + return 1; + } + + lsr = orig_lsr = up->port.serial_in(&up->port, UART_LSR); + + /* Process incoming characters first */ + if ((lsr & (UART_LSR_DR | UART_LSR_BI)) && + (up->ier & (UART_IER_RLSI | UART_IER_RDI))) { + lsr = serial8250_rx_chars(up, lsr); + } + + /* Stop processing interrupts on input overrun */ + if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) { + unsigned long delay; + + up->ier = port->serial_in(port, UART_IER); + if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) { + port->ops->stop_rx(port); + } else { + /* Keep restarting the timer until + * the input overrun subsides. + */ + cancel_delayed_work(&up->overrun_backoff); + } + + delay = msecs_to_jiffies(up->overrun_backoff_time_ms); + schedule_delayed_work(&up->overrun_backoff, delay); + } + + serial8250_modem_status(up); + + if ((lsr & UART_LSR_THRE) && (up->ier & UART_IER_THRI)) + serial8250_tx_chars(up); + + up->lsr_saved_flags |= orig_lsr & UART_LSR_BI; + + uart_unlock_and_check_sysrq_irqrestore(&up->port, flags); + + return 1; +} +EXPORT_SYMBOL_GPL(fsl8250_handle_irq); + +#ifdef CONFIG_ACPI +struct fsl8250_data { + int line; +}; + +static int fsl8250_acpi_probe(struct platform_device *pdev) +{ + struct fsl8250_data *data; + struct uart_8250_port port8250; + struct device *dev = &pdev->dev; + struct resource *regs; + + int ret, irq; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_err(dev, "no registers defined\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + memset(&port8250, 0, sizeof(port8250)); + + ret = device_property_read_u32(dev, "clock-frequency", + &port8250.port.uartclk); + if (ret) + return ret; + + spin_lock_init(&port8250.port.lock); + + port8250.port.mapbase = regs->start; + port8250.port.irq = irq; + port8250.port.handle_irq = fsl8250_handle_irq; + port8250.port.type = PORT_16550A; + port8250.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF + | UPF_FIXED_PORT | UPF_IOREMAP + | UPF_FIXED_TYPE; + port8250.port.dev = dev; + port8250.port.mapsize = resource_size(regs); + port8250.port.iotype = UPIO_MEM; + port8250.port.irqflags = IRQF_SHARED; + + port8250.port.membase = devm_ioremap(dev, port8250.port.mapbase, + port8250.port.mapsize); + if (!port8250.port.membase) + return -ENOMEM; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->line = serial8250_register_8250_port(&port8250); + if (data->line < 0) + return data->line; + + platform_set_drvdata(pdev, data); + return 0; +} + +static int fsl8250_acpi_remove(struct platform_device *pdev) +{ + struct fsl8250_data *data = platform_get_drvdata(pdev); + + serial8250_unregister_port(data->line); + return 0; +} + +static const struct acpi_device_id fsl_8250_acpi_id[] = { + { "NXP0018", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, fsl_8250_acpi_id); + +static struct platform_driver fsl8250_platform_driver = { + .driver = { + .name = "fsl-16550-uart", + .acpi_match_table = ACPI_PTR(fsl_8250_acpi_id), + }, + .probe = fsl8250_acpi_probe, + .remove = fsl8250_acpi_remove, +}; + +module_platform_driver(fsl8250_platform_driver); +#endif diff --git a/drivers/tty/serial/8250/8250_hp300.c b/drivers/tty/serial/8250/8250_hp300.c new file mode 100644 index 000000000..3012ea03d --- /dev/null +++ b/drivers/tty/serial/8250/8250_hp300.c @@ -0,0 +1,324 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the 98626/98644/internal serial interface on hp300/hp400 + * (based on the National Semiconductor INS8250/NS16550AF/WD16C552 UARTs) + * + * Ported from 2.2 and modified to use the normal 8250 driver + * by Kars de Jong , May 2004. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +#if !defined(CONFIG_HPDCA) && !defined(CONFIG_HPAPCI) && !defined(CONFIG_COMPILE_TEST) +#warning CONFIG_SERIAL_8250 defined but neither CONFIG_HPDCA nor CONFIG_HPAPCI defined, are you sure? +#endif + +#ifdef CONFIG_HPAPCI +struct hp300_port { + struct hp300_port *next; /* next port */ + int line; /* line (tty) number */ +}; + +static struct hp300_port *hp300_ports; +#endif + +#ifdef CONFIG_HPDCA + +static int hpdca_init_one(struct dio_dev *d, + const struct dio_device_id *ent); +static void hpdca_remove_one(struct dio_dev *d); + +static struct dio_device_id hpdca_dio_tbl[] = { + { DIO_ID_DCA0 }, + { DIO_ID_DCA0REM }, + { DIO_ID_DCA1 }, + { DIO_ID_DCA1REM }, + { 0 } +}; + +static struct dio_driver hpdca_driver = { + .name = "hpdca", + .id_table = hpdca_dio_tbl, + .probe = hpdca_init_one, + .remove = hpdca_remove_one, +}; + +#endif + +static unsigned int num_ports; + +extern int hp300_uart_scode; + +/* Offset to UART registers from base of DCA */ +#define UART_OFFSET 17 + +#define DCA_ID 0x01 /* ID (read), reset (write) */ +#define DCA_IC 0x03 /* Interrupt control */ + +/* Interrupt control */ +#define DCA_IC_IE 0x80 /* Master interrupt enable */ + +#define HPDCA_BAUD_BASE 153600 + +/* Base address of the Frodo part */ +#define FRODO_BASE (0x41c000) + +/* + * Where we find the 8250-like APCI ports, and how far apart they are. + */ +#define FRODO_APCIBASE 0x0 +#define FRODO_APCISPACE 0x20 +#define FRODO_APCI_OFFSET(x) (FRODO_APCIBASE + ((x) * FRODO_APCISPACE)) + +#define HPAPCI_BAUD_BASE 500400 + +#ifdef CONFIG_SERIAL_8250_CONSOLE +/* + * Parse the bootinfo to find descriptions for headless console and + * debug serial ports and register them with the 8250 driver. + */ +int __init hp300_setup_serial_console(void) +{ + int scode; + struct uart_port port; + + memset(&port, 0, sizeof(port)); + + if (hp300_uart_scode < 0 || hp300_uart_scode > DIO_SCMAX) + return 0; + + if (DIO_SCINHOLE(hp300_uart_scode)) + return 0; + + scode = hp300_uart_scode; + + /* Memory mapped I/O */ + port.iotype = UPIO_MEM; + port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF; + port.type = PORT_UNKNOWN; + + /* Check for APCI console */ + if (scode == 256) { +#ifdef CONFIG_HPAPCI + pr_info("Serial console is HP APCI 1\n"); + + port.uartclk = HPAPCI_BAUD_BASE * 16; + port.mapbase = (FRODO_BASE + FRODO_APCI_OFFSET(1)); + port.membase = (char *)(port.mapbase + DIO_VIRADDRBASE); + port.regshift = 2; + add_preferred_console("ttyS", port.line, "9600n8"); +#else + pr_warn("Serial console is APCI but support is disabled (CONFIG_HPAPCI)!\n"); + return 0; +#endif + } else { +#ifdef CONFIG_HPDCA + unsigned long pa = dio_scodetophysaddr(scode); + if (!pa) + return 0; + + pr_info("Serial console is HP DCA at select code %d\n", scode); + + port.uartclk = HPDCA_BAUD_BASE * 16; + port.mapbase = (pa + UART_OFFSET); + port.membase = (char *)(port.mapbase + DIO_VIRADDRBASE); + port.regshift = 1; + port.irq = DIO_IPL(pa + DIO_VIRADDRBASE); + + /* Enable board-interrupts */ + out_8(pa + DIO_VIRADDRBASE + DCA_IC, DCA_IC_IE); + + if (DIO_ID(pa + DIO_VIRADDRBASE) & 0x80) + add_preferred_console("ttyS", port.line, "9600n8"); +#else + pr_warn("Serial console is DCA but support is disabled (CONFIG_HPDCA)!\n"); + return 0; +#endif + } + + if (early_serial_setup(&port) < 0) + pr_warn("%s: early_serial_setup() failed.\n", __func__); + return 0; +} +#endif /* CONFIG_SERIAL_8250_CONSOLE */ + +#ifdef CONFIG_HPDCA +static int hpdca_init_one(struct dio_dev *d, + const struct dio_device_id *ent) +{ + struct uart_8250_port uart; + int line; + +#ifdef CONFIG_SERIAL_8250_CONSOLE + if (hp300_uart_scode == d->scode) { + /* Already got it. */ + return 0; + } +#endif + memset(&uart, 0, sizeof(uart)); + + /* Memory mapped I/O */ + uart.port.iotype = UPIO_MEM; + uart.port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF; + uart.port.irq = d->ipl; + uart.port.uartclk = HPDCA_BAUD_BASE * 16; + uart.port.mapbase = (d->resource.start + UART_OFFSET); + uart.port.membase = (char *)(uart.port.mapbase + DIO_VIRADDRBASE); + uart.port.regshift = 1; + uart.port.dev = &d->dev; + line = serial8250_register_8250_port(&uart); + + if (line < 0) { + dev_notice(&d->dev, + "8250_hp300: register_serial() DCA scode %d irq %d failed\n", + d->scode, uart.port.irq); + return -ENOMEM; + } + + /* Enable board-interrupts */ + out_8(d->resource.start + DIO_VIRADDRBASE + DCA_IC, DCA_IC_IE); + dio_set_drvdata(d, (void *)line); + + /* Reset the DCA */ + out_8(d->resource.start + DIO_VIRADDRBASE + DCA_ID, 0xff); + udelay(100); + + num_ports++; + + return 0; +} +#endif + +static int __init hp300_8250_init(void) +{ + static int called; +#ifdef CONFIG_HPAPCI + int line; + unsigned long base; + struct uart_8250_port uart; + struct hp300_port *port; + int i; +#endif + if (called) + return -ENODEV; + called = 1; + + if (!MACH_IS_HP300) + return -ENODEV; + +#ifdef CONFIG_HPDCA + dio_register_driver(&hpdca_driver); +#endif +#ifdef CONFIG_HPAPCI + if (hp300_model < HP_400) { + if (!num_ports) + return -ENODEV; + return 0; + } + /* These models have the Frodo chip. + * Port 0 is reserved for the Apollo Domain keyboard. + * Port 1 is either the console or the DCA. + */ + for (i = 1; i < 4; i++) { + /* Port 1 is the console on a 425e, on other machines it's + * mapped to DCA. + */ +#ifdef CONFIG_SERIAL_8250_CONSOLE + if (i == 1) + continue; +#endif + + /* Create new serial device */ + port = kmalloc(sizeof(struct hp300_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + memset(&uart, 0, sizeof(uart)); + + base = (FRODO_BASE + FRODO_APCI_OFFSET(i)); + + /* Memory mapped I/O */ + uart.port.iotype = UPIO_MEM; + uart.port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ + | UPF_BOOT_AUTOCONF; + /* XXX - no interrupt support yet */ + uart.port.irq = 0; + uart.port.uartclk = HPAPCI_BAUD_BASE * 16; + uart.port.mapbase = base; + uart.port.membase = (char *)(base + DIO_VIRADDRBASE); + uart.port.regshift = 2; + + line = serial8250_register_8250_port(&uart); + + if (line < 0) { + dev_notice(uart.port.dev, + "8250_hp300: register_serial() APCI %d irq %d failed\n", + i, uart.port.irq); + kfree(port); + continue; + } + + port->line = line; + port->next = hp300_ports; + hp300_ports = port; + + num_ports++; + } +#endif + + /* Any boards found? */ + if (!num_ports) + return -ENODEV; + + return 0; +} + +#ifdef CONFIG_HPDCA +static void hpdca_remove_one(struct dio_dev *d) +{ + int line; + + line = (int) dio_get_drvdata(d); + if (d->resource.start) { + /* Disable board-interrupts */ + out_8(d->resource.start + DIO_VIRADDRBASE + DCA_IC, 0); + } + serial8250_unregister_port(line); +} +#endif + +static void __exit hp300_8250_exit(void) +{ +#ifdef CONFIG_HPAPCI + struct hp300_port *port, *to_free; + + for (port = hp300_ports; port; ) { + serial8250_unregister_port(port->line); + to_free = port; + port = port->next; + kfree(to_free); + } + + hp300_ports = NULL; +#endif +#ifdef CONFIG_HPDCA + dio_unregister_driver(&hpdca_driver); +#endif +} + +module_init(hp300_8250_init); +module_exit(hp300_8250_exit); +MODULE_DESCRIPTION("HP DCA/APCI serial driver"); +MODULE_AUTHOR("Kars de Jong "); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/8250/8250_hub6.c b/drivers/tty/serial/8250/8250_hub6.c new file mode 100644 index 000000000..273f59b9b --- /dev/null +++ b/drivers/tty/serial/8250/8250_hub6.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005 Russell King. + * Data taken from include/asm-i386/serial.h + */ +#include +#include +#include + +#define HUB6(card, port) \ + { \ + .iobase = 0x302, \ + .irq = 3, \ + .uartclk = 1843200, \ + .iotype = UPIO_HUB6, \ + .flags = UPF_BOOT_AUTOCONF, \ + .hub6 = (card) << 6 | (port) << 3 | 1, \ + } + +static struct plat_serial8250_port hub6_data[] = { + HUB6(0, 0), + HUB6(0, 1), + HUB6(0, 2), + HUB6(0, 3), + HUB6(0, 4), + HUB6(0, 5), + HUB6(1, 0), + HUB6(1, 1), + HUB6(1, 2), + HUB6(1, 3), + HUB6(1, 4), + HUB6(1, 5), + { }, +}; + +static struct platform_device hub6_device = { + .name = "serial8250", + .id = PLAT8250_DEV_HUB6, + .dev = { + .platform_data = hub6_data, + }, +}; + +static int __init hub6_init(void) +{ + return platform_device_register(&hub6_device); +} + +module_init(hub6_init); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("8250 serial probe module for Hub6 cards"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c new file mode 100644 index 000000000..2b2f5d8d2 --- /dev/null +++ b/drivers/tty/serial/8250/8250_ingenic.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2010 Lars-Peter Clausen + * Copyright (C) 2015 Imagination Technologies + * + * Ingenic SoC UART support + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +/** ingenic_uart_config: SOC specific config data. */ +struct ingenic_uart_config { + int tx_loadsz; + int fifosize; +}; + +struct ingenic_uart_data { + struct clk *clk_module; + struct clk *clk_baud; + int line; +}; + +static const struct of_device_id of_match[]; + +#define UART_FCR_UME BIT(4) + +#define UART_MCR_MDCE BIT(7) +#define UART_MCR_FCM BIT(6) + +static struct earlycon_device *early_device; + +static uint8_t early_in(struct uart_port *port, int offset) +{ + return readl(port->membase + (offset << 2)); +} + +static void early_out(struct uart_port *port, int offset, uint8_t value) +{ + writel(value, port->membase + (offset << 2)); +} + +static void ingenic_early_console_putc(struct uart_port *port, unsigned char c) +{ + u16 lsr; + + do { + lsr = early_in(port, UART_LSR); + } while ((lsr & UART_LSR_TEMT) == 0); + + early_out(port, UART_TX, c); +} + +static void ingenic_early_console_write(struct console *console, + const char *s, unsigned int count) +{ + uart_console_write(&early_device->port, s, count, + ingenic_early_console_putc); +} + +static void __init ingenic_early_console_setup_clock(struct earlycon_device *dev) +{ + void *fdt = initial_boot_params; + const __be32 *prop; + int offset; + + offset = fdt_path_offset(fdt, "/ext"); + if (offset < 0) + return; + + prop = fdt_getprop(fdt, offset, "clock-frequency", NULL); + if (!prop) + return; + + dev->port.uartclk = be32_to_cpup(prop); +} + +static int __init ingenic_early_console_setup(struct earlycon_device *dev, + const char *opt) +{ + struct uart_port *port = &dev->port; + unsigned int divisor; + int baud = 115200; + + if (!dev->port.membase) + return -ENODEV; + + if (opt) { + unsigned int parity, bits, flow; /* unused for now */ + + uart_parse_options(opt, &baud, &parity, &bits, &flow); + } + + ingenic_early_console_setup_clock(dev); + + if (dev->baud) + baud = dev->baud; + divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * baud); + + early_out(port, UART_IER, 0); + early_out(port, UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN8); + early_out(port, UART_DLL, 0); + early_out(port, UART_DLM, 0); + early_out(port, UART_LCR, UART_LCR_WLEN8); + early_out(port, UART_FCR, UART_FCR_UME | UART_FCR_CLEAR_XMIT | + UART_FCR_CLEAR_RCVR | UART_FCR_ENABLE_FIFO); + early_out(port, UART_MCR, UART_MCR_RTS | UART_MCR_DTR); + + early_out(port, UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN8); + early_out(port, UART_DLL, divisor & 0xff); + early_out(port, UART_DLM, (divisor >> 8) & 0xff); + early_out(port, UART_LCR, UART_LCR_WLEN8); + + early_device = dev; + dev->con->write = ingenic_early_console_write; + + return 0; +} + +OF_EARLYCON_DECLARE(jz4740_uart, "ingenic,jz4740-uart", + ingenic_early_console_setup); + +OF_EARLYCON_DECLARE(jz4770_uart, "ingenic,jz4770-uart", + ingenic_early_console_setup); + +OF_EARLYCON_DECLARE(jz4775_uart, "ingenic,jz4775-uart", + ingenic_early_console_setup); + +OF_EARLYCON_DECLARE(jz4780_uart, "ingenic,jz4780-uart", + ingenic_early_console_setup); + +OF_EARLYCON_DECLARE(x1000_uart, "ingenic,x1000-uart", + ingenic_early_console_setup); + +static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value) +{ + int ier; + + switch (offset) { + case UART_FCR: + /* UART module enable */ + value |= UART_FCR_UME; + break; + + case UART_IER: + /* + * Enable receive timeout interrupt with the receive line + * status interrupt. + */ + value |= (value & 0x4) << 2; + break; + + case UART_MCR: + /* + * If we have enabled modem status IRQs we should enable + * modem mode. + */ + ier = p->serial_in(p, UART_IER); + + if (ier & UART_IER_MSI) + value |= UART_MCR_MDCE | UART_MCR_FCM; + else + value &= ~(UART_MCR_MDCE | UART_MCR_FCM); + break; + + default: + break; + } + + writeb(value, p->membase + (offset << p->regshift)); +} + +static unsigned int ingenic_uart_serial_in(struct uart_port *p, int offset) +{ + unsigned int value; + + value = readb(p->membase + (offset << p->regshift)); + + /* Hide non-16550 compliant bits from higher levels */ + switch (offset) { + case UART_FCR: + value &= ~UART_FCR_UME; + break; + + case UART_MCR: + value &= ~(UART_MCR_MDCE | UART_MCR_FCM); + break; + + default: + break; + } + return value; +} + +static int ingenic_uart_probe(struct platform_device *pdev) +{ + struct uart_8250_port uart = {}; + struct ingenic_uart_data *data; + const struct ingenic_uart_config *cdata; + struct resource *regs; + int irq, err, line; + + cdata = of_device_get_match_data(&pdev->dev); + if (!cdata) { + dev_err(&pdev->dev, "Error: No device match found\n"); + return -ENODEV; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_err(&pdev->dev, "no registers defined\n"); + return -EINVAL; + } + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + spin_lock_init(&uart.port.lock); + uart.port.type = PORT_16550A; + uart.port.flags = UPF_SKIP_TEST | UPF_IOREMAP | UPF_FIXED_TYPE; + uart.port.iotype = UPIO_MEM; + uart.port.mapbase = regs->start; + uart.port.regshift = 2; + uart.port.serial_out = ingenic_uart_serial_out; + uart.port.serial_in = ingenic_uart_serial_in; + uart.port.irq = irq; + uart.port.dev = &pdev->dev; + uart.port.fifosize = cdata->fifosize; + uart.tx_loadsz = cdata->tx_loadsz; + uart.capabilities = UART_CAP_FIFO | UART_CAP_RTOIE; + + /* Check for a fixed line number */ + line = of_alias_get_id(pdev->dev.of_node, "serial"); + if (line >= 0) + uart.port.line = line; + + uart.port.membase = devm_ioremap(&pdev->dev, regs->start, + resource_size(regs)); + if (!uart.port.membase) + return -ENOMEM; + + data->clk_module = devm_clk_get(&pdev->dev, "module"); + if (IS_ERR(data->clk_module)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->clk_module), + "unable to get module clock\n"); + + data->clk_baud = devm_clk_get(&pdev->dev, "baud"); + if (IS_ERR(data->clk_baud)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->clk_baud), + "unable to get baud clock\n"); + + err = clk_prepare_enable(data->clk_module); + if (err) { + dev_err(&pdev->dev, "could not enable module clock: %d\n", err); + goto out; + } + + err = clk_prepare_enable(data->clk_baud); + if (err) { + dev_err(&pdev->dev, "could not enable baud clock: %d\n", err); + goto out_disable_moduleclk; + } + uart.port.uartclk = clk_get_rate(data->clk_baud); + + data->line = serial8250_register_8250_port(&uart); + if (data->line < 0) { + err = data->line; + goto out_disable_baudclk; + } + + platform_set_drvdata(pdev, data); + return 0; + +out_disable_baudclk: + clk_disable_unprepare(data->clk_baud); +out_disable_moduleclk: + clk_disable_unprepare(data->clk_module); +out: + return err; +} + +static int ingenic_uart_remove(struct platform_device *pdev) +{ + struct ingenic_uart_data *data = platform_get_drvdata(pdev); + + serial8250_unregister_port(data->line); + clk_disable_unprepare(data->clk_module); + clk_disable_unprepare(data->clk_baud); + return 0; +} + +static const struct ingenic_uart_config jz4740_uart_config = { + .tx_loadsz = 8, + .fifosize = 16, +}; + +static const struct ingenic_uart_config jz4760_uart_config = { + .tx_loadsz = 16, + .fifosize = 32, +}; + +static const struct ingenic_uart_config jz4780_uart_config = { + .tx_loadsz = 32, + .fifosize = 64, +}; + +static const struct ingenic_uart_config x1000_uart_config = { + .tx_loadsz = 32, + .fifosize = 64, +}; + +static const struct of_device_id of_match[] = { + { .compatible = "ingenic,jz4740-uart", .data = &jz4740_uart_config }, + { .compatible = "ingenic,jz4760-uart", .data = &jz4760_uart_config }, + { .compatible = "ingenic,jz4770-uart", .data = &jz4760_uart_config }, + { .compatible = "ingenic,jz4775-uart", .data = &jz4760_uart_config }, + { .compatible = "ingenic,jz4780-uart", .data = &jz4780_uart_config }, + { .compatible = "ingenic,x1000-uart", .data = &x1000_uart_config }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, of_match); + +static struct platform_driver ingenic_uart_platform_driver = { + .driver = { + .name = "ingenic-uart", + .of_match_table = of_match, + }, + .probe = ingenic_uart_probe, + .remove = ingenic_uart_remove, +}; + +module_platform_driver(ingenic_uart_platform_driver); + +MODULE_AUTHOR("Paul Burton"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Ingenic SoC UART driver"); diff --git a/drivers/tty/serial/8250/8250_ioc3.c b/drivers/tty/serial/8250/8250_ioc3.c new file mode 100644 index 000000000..d5a39e105 --- /dev/null +++ b/drivers/tty/serial/8250/8250_ioc3.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SGI IOC3 8250 UART driver + * + * Copyright (C) 2019 Thomas Bogendoerfer + * + * based on code Copyright (C) 2005 Stanislaw Skowronek + * Copyright (C) 2014 Joshua Kinard + */ + +#include +#include +#include +#include + +#include "8250.h" + +#define IOC3_UARTCLK (22000000 / 3) + +struct ioc3_8250_data { + int line; +}; + +static unsigned int ioc3_serial_in(struct uart_port *p, int offset) +{ + return readb(p->membase + (offset ^ 3)); +} + +static void ioc3_serial_out(struct uart_port *p, int offset, int value) +{ + writeb(value, p->membase + (offset ^ 3)); +} + +static int serial8250_ioc3_probe(struct platform_device *pdev) +{ + struct ioc3_8250_data *data; + struct uart_8250_port up; + struct resource *r; + void __iomem *membase; + int irq, line; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) + return -ENODEV; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + membase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (!membase) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + irq = 0; /* no interrupt -> use polling */ + + /* Register serial ports with 8250.c */ + memset(&up, 0, sizeof(struct uart_8250_port)); + up.port.iotype = UPIO_MEM; + up.port.uartclk = IOC3_UARTCLK; + up.port.type = PORT_16550A; + up.port.irq = irq; + up.port.flags = (UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ); + up.port.dev = &pdev->dev; + up.port.membase = membase; + up.port.mapbase = r->start; + up.port.serial_in = ioc3_serial_in; + up.port.serial_out = ioc3_serial_out; + line = serial8250_register_8250_port(&up); + if (line < 0) + return line; + + platform_set_drvdata(pdev, data); + return 0; +} + +static int serial8250_ioc3_remove(struct platform_device *pdev) +{ + struct ioc3_8250_data *data = platform_get_drvdata(pdev); + + serial8250_unregister_port(data->line); + return 0; +} + +static struct platform_driver serial8250_ioc3_driver = { + .probe = serial8250_ioc3_probe, + .remove = serial8250_ioc3_remove, + .driver = { + .name = "ioc3-serial8250", + } +}; + +module_platform_driver(serial8250_ioc3_driver); + +MODULE_AUTHOR("Thomas Bogendoerfer "); +MODULE_DESCRIPTION("SGI IOC3 8250 UART driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c new file mode 100644 index 000000000..6dc85aaba --- /dev/null +++ b/drivers/tty/serial/8250/8250_lpc18xx.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Serial port driver for NXP LPC18xx/43xx UART + * + * Copyright (C) 2015 Joachim Eastwood + * + * Based on 8250_mtk.c: + * Copyright (c) 2014 MundoReader S.L. + * Matthias Brugger + */ + +#include +#include +#include +#include +#include + +#include "8250.h" + +/* Additional LPC18xx/43xx 8250 registers and bits */ +#define LPC18XX_UART_RS485CTRL (0x04c / sizeof(u32)) +#define LPC18XX_UART_RS485CTRL_NMMEN BIT(0) +#define LPC18XX_UART_RS485CTRL_DCTRL BIT(4) +#define LPC18XX_UART_RS485CTRL_OINV BIT(5) +#define LPC18XX_UART_RS485DLY (0x054 / sizeof(u32)) +#define LPC18XX_UART_RS485DLY_MAX 255 + +struct lpc18xx_uart_data { + struct uart_8250_dma dma; + struct clk *clk_uart; + struct clk *clk_reg; + int line; +}; + +static int lpc18xx_rs485_config(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + struct uart_8250_port *up = up_to_u8250p(port); + u32 rs485_ctrl_reg = 0; + u32 rs485_dly_reg = 0; + unsigned baud_clk; + + if (rs485->flags & SER_RS485_ENABLED) { + rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_NMMEN | + LPC18XX_UART_RS485CTRL_DCTRL; + + if (rs485->flags & SER_RS485_RTS_ON_SEND) + rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_OINV; + } + + if (rs485->delay_rts_after_send) { + baud_clk = port->uartclk / up->dl_read(up); + rs485_dly_reg = DIV_ROUND_UP(rs485->delay_rts_after_send + * baud_clk, MSEC_PER_SEC); + + if (rs485_dly_reg > LPC18XX_UART_RS485DLY_MAX) + rs485_dly_reg = LPC18XX_UART_RS485DLY_MAX; + + /* Calculate the resulting delay in ms */ + rs485->delay_rts_after_send = (rs485_dly_reg * MSEC_PER_SEC) + / baud_clk; + } + + serial_out(up, LPC18XX_UART_RS485CTRL, rs485_ctrl_reg); + serial_out(up, LPC18XX_UART_RS485DLY, rs485_dly_reg); + + return 0; +} + +static void lpc18xx_uart_serial_out(struct uart_port *p, int offset, int value) +{ + /* + * For DMA mode one must ensure that the UART_FCR_DMA_SELECT + * bit is set when FIFO is enabled. Even if DMA is not used + * setting this bit doesn't seem to affect anything. + */ + if (offset == UART_FCR && (value & UART_FCR_ENABLE_FIFO)) + value |= UART_FCR_DMA_SELECT; + + offset = offset << p->regshift; + writel(value, p->membase + offset); +} + +static const struct serial_rs485 lpc18xx_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND, + .delay_rts_after_send = 1, + /* Delay RTS before send is not supported */ +}; + +static int lpc18xx_serial_probe(struct platform_device *pdev) +{ + struct lpc18xx_uart_data *data; + struct uart_8250_port uart; + struct resource *res; + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "memory resource not found"); + return -EINVAL; + } + + memset(&uart, 0, sizeof(uart)); + + uart.port.membase = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!uart.port.membase) + return -ENOMEM; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->clk_uart = devm_clk_get(&pdev->dev, "uartclk"); + if (IS_ERR(data->clk_uart)) { + dev_err(&pdev->dev, "uart clock not found\n"); + return PTR_ERR(data->clk_uart); + } + + data->clk_reg = devm_clk_get(&pdev->dev, "reg"); + if (IS_ERR(data->clk_reg)) { + dev_err(&pdev->dev, "reg clock not found\n"); + return PTR_ERR(data->clk_reg); + } + + ret = clk_prepare_enable(data->clk_reg); + if (ret) { + dev_err(&pdev->dev, "unable to enable reg clock\n"); + return ret; + } + + ret = clk_prepare_enable(data->clk_uart); + if (ret) { + dev_err(&pdev->dev, "unable to enable uart clock\n"); + goto dis_clk_reg; + } + + ret = of_alias_get_id(pdev->dev.of_node, "serial"); + if (ret >= 0) + uart.port.line = ret; + + data->dma.rx_param = data; + data->dma.tx_param = data; + + spin_lock_init(&uart.port.lock); + uart.port.dev = &pdev->dev; + uart.port.irq = irq; + uart.port.iotype = UPIO_MEM32; + uart.port.mapbase = res->start; + uart.port.regshift = 2; + uart.port.type = PORT_16550A; + uart.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SKIP_TEST; + uart.port.uartclk = clk_get_rate(data->clk_uart); + uart.port.private_data = data; + uart.port.rs485_config = lpc18xx_rs485_config; + uart.port.rs485_supported = lpc18xx_rs485_supported; + uart.port.serial_out = lpc18xx_uart_serial_out; + + uart.dma = &data->dma; + uart.dma->rxconf.src_maxburst = 1; + uart.dma->txconf.dst_maxburst = 1; + + ret = serial8250_register_8250_port(&uart); + if (ret < 0) { + dev_err(&pdev->dev, "unable to register 8250 port\n"); + goto dis_uart_clk; + } + + data->line = ret; + platform_set_drvdata(pdev, data); + + return 0; + +dis_uart_clk: + clk_disable_unprepare(data->clk_uart); +dis_clk_reg: + clk_disable_unprepare(data->clk_reg); + return ret; +} + +static int lpc18xx_serial_remove(struct platform_device *pdev) +{ + struct lpc18xx_uart_data *data = platform_get_drvdata(pdev); + + serial8250_unregister_port(data->line); + clk_disable_unprepare(data->clk_uart); + clk_disable_unprepare(data->clk_reg); + + return 0; +} + +static const struct of_device_id lpc18xx_serial_match[] = { + { .compatible = "nxp,lpc1850-uart" }, + { }, +}; +MODULE_DEVICE_TABLE(of, lpc18xx_serial_match); + +static struct platform_driver lpc18xx_serial_driver = { + .probe = lpc18xx_serial_probe, + .remove = lpc18xx_serial_remove, + .driver = { + .name = "lpc18xx-uart", + .of_match_table = lpc18xx_serial_match, + }, +}; +module_platform_driver(lpc18xx_serial_driver); + +MODULE_AUTHOR("Joachim Eastwood "); +MODULE_DESCRIPTION("Serial port driver NXP LPC18xx/43xx devices"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c new file mode 100644 index 000000000..0e43bdfb7 --- /dev/null +++ b/drivers/tty/serial/8250/8250_lpss.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * 8250_lpss.c - Driver for UART on Intel Braswell and various other Intel SoCs + * + * Copyright (C) 2016 Intel Corporation + * Author: Andy Shevchenko + */ + +#include +#include +#include +#include + +#include +#include + +#include "8250_dwlib.h" + +#define PCI_DEVICE_ID_INTEL_QRK_UARTx 0x0936 + +#define PCI_DEVICE_ID_INTEL_BYT_UART1 0x0f0a +#define PCI_DEVICE_ID_INTEL_BYT_UART2 0x0f0c + +#define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a +#define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c + +#define PCI_DEVICE_ID_INTEL_EHL_UART0 0x4b96 +#define PCI_DEVICE_ID_INTEL_EHL_UART1 0x4b97 +#define PCI_DEVICE_ID_INTEL_EHL_UART2 0x4b98 +#define PCI_DEVICE_ID_INTEL_EHL_UART3 0x4b99 +#define PCI_DEVICE_ID_INTEL_EHL_UART4 0x4b9a +#define PCI_DEVICE_ID_INTEL_EHL_UART5 0x4b9b + +#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3 +#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4 + +/* Intel LPSS specific registers */ + +#define BYT_PRV_CLK 0x800 +#define BYT_PRV_CLK_EN BIT(0) +#define BYT_PRV_CLK_M_VAL_SHIFT 1 +#define BYT_PRV_CLK_N_VAL_SHIFT 16 +#define BYT_PRV_CLK_UPDATE BIT(31) + +#define BYT_TX_OVF_INT 0x820 +#define BYT_TX_OVF_INT_MASK BIT(1) + +struct lpss8250; + +struct lpss8250_board { + unsigned long freq; + unsigned int base_baud; + int (*setup)(struct lpss8250 *, struct uart_port *p); + void (*exit)(struct lpss8250 *); +}; + +struct lpss8250 { + struct dw8250_port_data data; + struct lpss8250_board *board; + + /* DMA parameters */ + struct dw_dma_chip dma_chip; + struct dw_dma_slave dma_param; + u8 dma_maxburst; +}; + +static inline struct lpss8250 *to_lpss8250(struct dw8250_port_data *data) +{ + return container_of(data, struct lpss8250, data); +} + +static void byt_set_termios(struct uart_port *p, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud = tty_termios_baud_rate(termios); + struct lpss8250 *lpss = to_lpss8250(p->private_data); + unsigned long fref = lpss->board->freq, fuart = baud * 16; + unsigned long w = BIT(15) - 1; + unsigned long m, n; + u32 reg; + + /* Gracefully handle the B0 case: fall back to B9600 */ + fuart = fuart ? fuart : 9600 * 16; + + /* Get Fuart closer to Fref */ + fuart *= rounddown_pow_of_two(fref / fuart); + + /* + * For baud rates 0.5M, 1M, 1.5M, 2M, 2.5M, 3M, 3.5M and 4M the + * dividers must be adjusted. + * + * uartclk = (m / n) * 100 MHz, where m <= n + */ + rational_best_approximation(fuart, fref, w, w, &m, &n); + p->uartclk = fuart; + + /* Reset the clock */ + reg = (m << BYT_PRV_CLK_M_VAL_SHIFT) | (n << BYT_PRV_CLK_N_VAL_SHIFT); + writel(reg, p->membase + BYT_PRV_CLK); + reg |= BYT_PRV_CLK_EN | BYT_PRV_CLK_UPDATE; + writel(reg, p->membase + BYT_PRV_CLK); + + dw8250_do_set_termios(p, termios, old); +} + +static unsigned int byt_get_mctrl(struct uart_port *port) +{ + unsigned int ret = serial8250_do_get_mctrl(port); + + /* Force DCD and DSR signals to permanently be reported as active */ + ret |= TIOCM_CAR | TIOCM_DSR; + + return ret; +} + +static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) +{ + struct dw_dma_slave *param = &lpss->dma_param; + struct pci_dev *pdev = to_pci_dev(port->dev); + struct pci_dev *dma_dev; + + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_BYT_UART1: + case PCI_DEVICE_ID_INTEL_BSW_UART1: + case PCI_DEVICE_ID_INTEL_BDW_UART1: + param->src_id = 3; + param->dst_id = 2; + break; + case PCI_DEVICE_ID_INTEL_BYT_UART2: + case PCI_DEVICE_ID_INTEL_BSW_UART2: + case PCI_DEVICE_ID_INTEL_BDW_UART2: + param->src_id = 5; + param->dst_id = 4; + break; + default: + return -EINVAL; + } + + dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); + + param->dma_dev = &dma_dev->dev; + param->m_master = 0; + param->p_master = 1; + + lpss->dma_maxburst = 16; + + port->set_termios = byt_set_termios; + port->get_mctrl = byt_get_mctrl; + + /* Disable TX counter interrupts */ + writel(BYT_TX_OVF_INT_MASK, port->membase + BYT_TX_OVF_INT); + + return 0; +} + +static void byt_serial_exit(struct lpss8250 *lpss) +{ + struct dw_dma_slave *param = &lpss->dma_param; + + /* Paired with pci_get_slot() in the byt_serial_setup() above */ + put_device(param->dma_dev); +} + +static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port) +{ + struct uart_8250_dma *dma = &lpss->data.dma; + struct uart_8250_port *up = up_to_u8250p(port); + + /* + * This simply makes the checks in the 8250_port to try the DMA + * channel request which in turn uses the magic of ACPI tables + * parsing (see drivers/dma/acpi-dma.c for the details) and + * matching with the registered General Purpose DMA controllers. + */ + up->dma = dma; + + lpss->dma_maxburst = 16; + + port->set_termios = dw8250_do_set_termios; + + return 0; +} + +static void ehl_serial_exit(struct lpss8250 *lpss) +{ + struct uart_8250_port *up = serial8250_get_port(lpss->data.line); + + up->dma = NULL; +} + +#ifdef CONFIG_SERIAL_8250_DMA +static const struct dw_dma_platform_data qrk_serial_dma_pdata = { + .nr_channels = 2, + .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, + .chan_priority = CHAN_PRIORITY_ASCENDING, + .block_size = 4095, + .nr_masters = 1, + .data_width = {4}, + .multi_block = {0}, +}; + +static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port) +{ + struct uart_8250_dma *dma = &lpss->data.dma; + struct dw_dma_chip *chip = &lpss->dma_chip; + struct dw_dma_slave *param = &lpss->dma_param; + struct pci_dev *pdev = to_pci_dev(port->dev); + int ret; + + chip->pdata = &qrk_serial_dma_pdata; + chip->dev = &pdev->dev; + chip->id = pdev->devfn; + chip->irq = pci_irq_vector(pdev, 0); + chip->regs = pci_ioremap_bar(pdev, 1); + if (!chip->regs) + return; + + /* Falling back to PIO mode if DMA probing fails */ + ret = dw_dma_probe(chip); + if (ret) + return; + + pci_try_set_mwi(pdev); + + /* Special DMA address for UART */ + dma->rx_dma_addr = 0xfffff000; + dma->tx_dma_addr = 0xfffff000; + + param->dma_dev = &pdev->dev; + param->src_id = 0; + param->dst_id = 1; + param->hs_polarity = true; + + lpss->dma_maxburst = 8; +} + +static void qrk_serial_exit_dma(struct lpss8250 *lpss) +{ + struct dw_dma_chip *chip = &lpss->dma_chip; + struct dw_dma_slave *param = &lpss->dma_param; + + if (!param->dma_dev) + return; + + dw_dma_remove(chip); + + pci_iounmap(to_pci_dev(chip->dev), chip->regs); +} +#else /* CONFIG_SERIAL_8250_DMA */ +static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port) {} +static void qrk_serial_exit_dma(struct lpss8250 *lpss) {} +#endif /* !CONFIG_SERIAL_8250_DMA */ + +static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port) +{ + qrk_serial_setup_dma(lpss, port); + return 0; +} + +static void qrk_serial_exit(struct lpss8250 *lpss) +{ + qrk_serial_exit_dma(lpss); +} + +static bool lpss8250_dma_filter(struct dma_chan *chan, void *param) +{ + struct dw_dma_slave *dws = param; + + if (dws->dma_dev != chan->device->dev) + return false; + + chan->private = dws; + return true; +} + +static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port) +{ + struct uart_8250_dma *dma = &lpss->data.dma; + struct dw_dma_slave *rx_param, *tx_param; + struct device *dev = port->port.dev; + + if (!lpss->dma_param.dma_dev) { + dma = port->dma; + if (dma) + goto out_configuration_only; + + return 0; + } + + rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL); + if (!rx_param) + return -ENOMEM; + + tx_param = devm_kzalloc(dev, sizeof(*tx_param), GFP_KERNEL); + if (!tx_param) + return -ENOMEM; + + *rx_param = lpss->dma_param; + *tx_param = lpss->dma_param; + + dma->fn = lpss8250_dma_filter; + dma->rx_param = rx_param; + dma->tx_param = tx_param; + + port->dma = dma; + +out_configuration_only: + dma->rxconf.src_maxburst = lpss->dma_maxburst; + dma->txconf.dst_maxburst = lpss->dma_maxburst; + + return 0; +} + +static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct uart_8250_port uart; + struct lpss8250 *lpss; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + lpss = devm_kzalloc(&pdev->dev, sizeof(*lpss), GFP_KERNEL); + if (!lpss) + return -ENOMEM; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + return ret; + + lpss->board = (struct lpss8250_board *)id->driver_data; + + memset(&uart, 0, sizeof(struct uart_8250_port)); + + uart.port.dev = &pdev->dev; + uart.port.irq = pci_irq_vector(pdev, 0); + uart.port.private_data = &lpss->data; + uart.port.type = PORT_16550A; + uart.port.iotype = UPIO_MEM32; + uart.port.regshift = 2; + uart.port.uartclk = lpss->board->base_baud * 16; + uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE; + uart.capabilities = UART_CAP_FIFO | UART_CAP_AFE; + uart.port.mapbase = pci_resource_start(pdev, 0); + uart.port.membase = pcim_iomap(pdev, 0, 0); + if (!uart.port.membase) + return -ENOMEM; + + ret = lpss->board->setup(lpss, &uart.port); + if (ret) + return ret; + + dw8250_setup_port(&uart.port); + + ret = lpss8250_dma_setup(lpss, &uart); + if (ret) + goto err_exit; + + ret = serial8250_register_8250_port(&uart); + if (ret < 0) + goto err_exit; + + lpss->data.line = ret; + + pci_set_drvdata(pdev, lpss); + return 0; + +err_exit: + lpss->board->exit(lpss); + pci_free_irq_vectors(pdev); + return ret; +} + +static void lpss8250_remove(struct pci_dev *pdev) +{ + struct lpss8250 *lpss = pci_get_drvdata(pdev); + + serial8250_unregister_port(lpss->data.line); + + lpss->board->exit(lpss); + pci_free_irq_vectors(pdev); +} + +static const struct lpss8250_board byt_board = { + .freq = 100000000, + .base_baud = 2764800, + .setup = byt_serial_setup, + .exit = byt_serial_exit, +}; + +static const struct lpss8250_board ehl_board = { + .freq = 200000000, + .base_baud = 12500000, + .setup = ehl_serial_setup, + .exit = ehl_serial_exit, +}; + +static const struct lpss8250_board qrk_board = { + .freq = 44236800, + .base_baud = 2764800, + .setup = qrk_serial_setup, + .exit = qrk_serial_exit, +}; + +static const struct pci_device_id pci_ids[] = { + { PCI_DEVICE_DATA(INTEL, QRK_UARTx, &qrk_board) }, + { PCI_DEVICE_DATA(INTEL, EHL_UART0, &ehl_board) }, + { PCI_DEVICE_DATA(INTEL, EHL_UART1, &ehl_board) }, + { PCI_DEVICE_DATA(INTEL, EHL_UART2, &ehl_board) }, + { PCI_DEVICE_DATA(INTEL, EHL_UART3, &ehl_board) }, + { PCI_DEVICE_DATA(INTEL, EHL_UART4, &ehl_board) }, + { PCI_DEVICE_DATA(INTEL, EHL_UART5, &ehl_board) }, + { PCI_DEVICE_DATA(INTEL, BYT_UART1, &byt_board) }, + { PCI_DEVICE_DATA(INTEL, BYT_UART2, &byt_board) }, + { PCI_DEVICE_DATA(INTEL, BSW_UART1, &byt_board) }, + { PCI_DEVICE_DATA(INTEL, BSW_UART2, &byt_board) }, + { PCI_DEVICE_DATA(INTEL, BDW_UART1, &byt_board) }, + { PCI_DEVICE_DATA(INTEL, BDW_UART2, &byt_board) }, + { } +}; +MODULE_DEVICE_TABLE(pci, pci_ids); + +static struct pci_driver lpss8250_pci_driver = { + .name = "8250_lpss", + .id_table = pci_ids, + .probe = lpss8250_probe, + .remove = lpss8250_remove, +}; + +module_pci_driver(lpss8250_pci_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel LPSS UART driver"); diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c new file mode 100644 index 000000000..f46ca13ff --- /dev/null +++ b/drivers/tty/serial/8250/8250_men_mcb.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include + +#define MEN_UART_ID_Z025 0x19 +#define MEN_UART_ID_Z057 0x39 +#define MEN_UART_ID_Z125 0x7d + +#define MEN_UART_MEM_SIZE 0x10 + +struct serial_8250_men_mcb_data { + struct uart_8250_port uart; + int line; +}; + +/* + * The Z125 16550-compatible UART has no fixed base clock assigned + * So, depending on the board we're on, we need to adjust the + * parameter in order to really set the correct baudrate, and + * do so if possible without user interaction + */ +static u32 men_lookup_uartclk(struct mcb_device *mdev) +{ + /* use default value if board is not available below */ + u32 clkval = 1041666; + + dev_info(&mdev->dev, "%s on board %s\n", + dev_name(&mdev->dev), + mdev->bus->name); + if (strncmp(mdev->bus->name, "F075", 4) == 0) + clkval = 1041666; + else if (strncmp(mdev->bus->name, "F216", 4) == 0) + clkval = 1843200; + else if (strncmp(mdev->bus->name, "G215", 4) == 0) + clkval = 1843200; + else if (strncmp(mdev->bus->name, "F210", 4) == 0) + clkval = 115200; + else + dev_info(&mdev->dev, + "board not detected, using default uartclk\n"); + + clkval = clkval << 4; + + return clkval; +} + +static int get_num_ports(struct mcb_device *mdev, + void __iomem *membase) +{ + switch (mdev->id) { + case MEN_UART_ID_Z125: + return 1U; + case MEN_UART_ID_Z025: + return readb(membase) >> 4; + case MEN_UART_ID_Z057: + return 4U; + default: + dev_err(&mdev->dev, "no supported device!\n"); + return -ENODEV; + } +} + +static int serial_8250_men_mcb_probe(struct mcb_device *mdev, + const struct mcb_device_id *id) +{ + struct serial_8250_men_mcb_data *data; + struct resource *mem; + int num_ports; + int i; + void __iomem *membase; + + mem = mcb_get_resource(mdev, IORESOURCE_MEM); + if (mem == NULL) + return -ENXIO; + membase = devm_ioremap_resource(&mdev->dev, mem); + if (IS_ERR(membase)) + return PTR_ERR_OR_ZERO(membase); + + num_ports = get_num_ports(mdev, membase); + + dev_dbg(&mdev->dev, "found a 16z%03u with %u ports\n", + mdev->id, num_ports); + + if (num_ports <= 0 || num_ports > 4) { + dev_err(&mdev->dev, "unexpected number of ports: %u\n", + num_ports); + return -ENODEV; + } + + data = devm_kcalloc(&mdev->dev, num_ports, + sizeof(struct serial_8250_men_mcb_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + mcb_set_drvdata(mdev, data); + + for (i = 0; i < num_ports; i++) { + data[i].uart.port.dev = mdev->dma_dev; + spin_lock_init(&data[i].uart.port.lock); + + data[i].uart.port.type = PORT_16550; + data[i].uart.port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ + | UPF_FIXED_TYPE; + data[i].uart.port.iotype = UPIO_MEM; + data[i].uart.port.uartclk = men_lookup_uartclk(mdev); + data[i].uart.port.regshift = 0; + data[i].uart.port.irq = mcb_get_irq(mdev); + data[i].uart.port.membase = membase; + data[i].uart.port.fifosize = 60; + data[i].uart.port.mapbase = (unsigned long) mem->start + + i * MEN_UART_MEM_SIZE; + data[i].uart.port.iobase = data[i].uart.port.mapbase; + + /* ok, register the port */ + data[i].line = serial8250_register_8250_port(&data[i].uart); + if (data[i].line < 0) { + dev_err(&mdev->dev, "unable to register UART port\n"); + return data[i].line; + } + dev_info(&mdev->dev, "found MCB UART: ttyS%d\n", data[i].line); + } + + return 0; +} + +static void serial_8250_men_mcb_remove(struct mcb_device *mdev) +{ + int num_ports, i; + struct serial_8250_men_mcb_data *data = mcb_get_drvdata(mdev); + + if (!data) + return; + + num_ports = get_num_ports(mdev, data[0].uart.port.membase); + if (num_ports <= 0 || num_ports > 4) { + dev_err(&mdev->dev, "error retrieving number of ports!\n"); + return; + } + + for (i = 0; i < num_ports; i++) + serial8250_unregister_port(data[i].line); +} + +static const struct mcb_device_id serial_8250_men_mcb_ids[] = { + { .device = MEN_UART_ID_Z025 }, + { .device = MEN_UART_ID_Z057 }, + { .device = MEN_UART_ID_Z125 }, + { } +}; +MODULE_DEVICE_TABLE(mcb, serial_8250_men_mcb_ids); + +static struct mcb_driver mcb_driver = { + .driver = { + .name = "8250_men_mcb", + .owner = THIS_MODULE, + }, + .probe = serial_8250_men_mcb_probe, + .remove = serial_8250_men_mcb_remove, + .id_table = serial_8250_men_mcb_ids, +}; +module_mcb_driver(mcb_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MEN 8250 UART driver"); +MODULE_AUTHOR("Michael Moese + */ + +#include +#include +#include +#include + +#include +#include + +#include "8250.h" + +#define PCI_DEVICE_ID_INTEL_PNW_UART1 0x081b +#define PCI_DEVICE_ID_INTEL_PNW_UART2 0x081c +#define PCI_DEVICE_ID_INTEL_PNW_UART3 0x081d +#define PCI_DEVICE_ID_INTEL_TNG_UART 0x1191 +#define PCI_DEVICE_ID_INTEL_CDF_UART 0x18d8 +#define PCI_DEVICE_ID_INTEL_DNV_UART 0x19d8 + +/* Intel MID Specific registers */ +#define INTEL_MID_UART_FISR 0x08 +#define INTEL_MID_UART_PS 0x30 +#define INTEL_MID_UART_MUL 0x34 +#define INTEL_MID_UART_DIV 0x38 + +struct mid8250; + +struct mid8250_board { + unsigned int flags; + unsigned long freq; + unsigned int base_baud; + int (*setup)(struct mid8250 *, struct uart_port *p); + void (*exit)(struct mid8250 *); +}; + +struct mid8250 { + int line; + int dma_index; + struct pci_dev *dma_dev; + struct uart_8250_dma dma; + struct mid8250_board *board; + struct hsu_dma_chip dma_chip; +}; + +/*****************************************************************************/ + +static int pnw_setup(struct mid8250 *mid, struct uart_port *p) +{ + struct pci_dev *pdev = to_pci_dev(p->dev); + + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_PNW_UART1: + mid->dma_index = 0; + break; + case PCI_DEVICE_ID_INTEL_PNW_UART2: + mid->dma_index = 1; + break; + case PCI_DEVICE_ID_INTEL_PNW_UART3: + mid->dma_index = 2; + break; + default: + return -EINVAL; + } + + mid->dma_dev = pci_get_slot(pdev->bus, + PCI_DEVFN(PCI_SLOT(pdev->devfn), 3)); + return 0; +} + +static void pnw_exit(struct mid8250 *mid) +{ + pci_dev_put(mid->dma_dev); +} + +static int tng_handle_irq(struct uart_port *p) +{ + struct mid8250 *mid = p->private_data; + struct uart_8250_port *up = up_to_u8250p(p); + struct hsu_dma_chip *chip; + u32 status; + int ret = 0; + int err; + + chip = pci_get_drvdata(mid->dma_dev); + + /* Rx DMA */ + err = hsu_dma_get_status(chip, mid->dma_index * 2 + 1, &status); + if (err > 0) { + serial8250_rx_dma_flush(up); + ret |= 1; + } else if (err == 0) + ret |= hsu_dma_do_irq(chip, mid->dma_index * 2 + 1, status); + + /* Tx DMA */ + err = hsu_dma_get_status(chip, mid->dma_index * 2, &status); + if (err > 0) + ret |= 1; + else if (err == 0) + ret |= hsu_dma_do_irq(chip, mid->dma_index * 2, status); + + /* UART */ + ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR)); + return IRQ_RETVAL(ret); +} + +static int tng_setup(struct mid8250 *mid, struct uart_port *p) +{ + struct pci_dev *pdev = to_pci_dev(p->dev); + int index = PCI_FUNC(pdev->devfn); + + /* + * Device 0000:00:04.0 is not a real HSU port. It provides a global + * register set for all HSU ports, although it has the same PCI ID. + * Skip it here. + */ + if (index-- == 0) + return -ENODEV; + + mid->dma_index = index; + mid->dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(5, 0)); + + p->handle_irq = tng_handle_irq; + return 0; +} + +static void tng_exit(struct mid8250 *mid) +{ + pci_dev_put(mid->dma_dev); +} + +static int dnv_handle_irq(struct uart_port *p) +{ + struct mid8250 *mid = p->private_data; + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int fisr = serial_port_in(p, INTEL_MID_UART_FISR); + u32 status; + int ret = 0; + int err; + + if (fisr & BIT(2)) { + err = hsu_dma_get_status(&mid->dma_chip, 1, &status); + if (err > 0) { + serial8250_rx_dma_flush(up); + ret |= 1; + } else if (err == 0) + ret |= hsu_dma_do_irq(&mid->dma_chip, 1, status); + } + if (fisr & BIT(1)) { + err = hsu_dma_get_status(&mid->dma_chip, 0, &status); + if (err > 0) + ret |= 1; + else if (err == 0) + ret |= hsu_dma_do_irq(&mid->dma_chip, 0, status); + } + if (fisr & BIT(0)) + ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR)); + return IRQ_RETVAL(ret); +} + +#define DNV_DMA_CHAN_OFFSET 0x80 + +static int dnv_setup(struct mid8250 *mid, struct uart_port *p) +{ + struct hsu_dma_chip *chip = &mid->dma_chip; + struct pci_dev *pdev = to_pci_dev(p->dev); + unsigned int bar = FL_GET_BASE(mid->board->flags); + int ret; + + pci_set_master(pdev); + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + return ret; + + p->irq = pci_irq_vector(pdev, 0); + + chip->dev = &pdev->dev; + chip->irq = pci_irq_vector(pdev, 0); + chip->regs = p->membase; + chip->length = pci_resource_len(pdev, bar); + chip->offset = DNV_DMA_CHAN_OFFSET; + + /* Falling back to PIO mode if DMA probing fails */ + ret = hsu_dma_probe(chip); + if (ret) + return 0; + + mid->dma_dev = pdev; + + p->handle_irq = dnv_handle_irq; + return 0; +} + +static void dnv_exit(struct mid8250 *mid) +{ + if (!mid->dma_dev) + return; + hsu_dma_remove(&mid->dma_chip); +} + +/*****************************************************************************/ + +static void mid8250_set_termios(struct uart_port *p, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud = tty_termios_baud_rate(termios); + struct mid8250 *mid = p->private_data; + unsigned short ps = 16; + unsigned long fuart = baud * ps; + unsigned long w = BIT(24) - 1; + unsigned long mul, div; + + /* Gracefully handle the B0 case: fall back to B9600 */ + fuart = fuart ? fuart : 9600 * 16; + + if (mid->board->freq < fuart) { + /* Find prescaler value that satisfies Fuart < Fref */ + if (mid->board->freq > baud) + ps = mid->board->freq / baud; /* baud rate too high */ + else + ps = 1; /* PLL case */ + fuart = baud * ps; + } else { + /* Get Fuart closer to Fref */ + fuart *= rounddown_pow_of_two(mid->board->freq / fuart); + } + + rational_best_approximation(fuart, mid->board->freq, w, w, &mul, &div); + p->uartclk = fuart * 16 / ps; /* core uses ps = 16 always */ + + writel(ps, p->membase + INTEL_MID_UART_PS); /* set PS */ + writel(mul, p->membase + INTEL_MID_UART_MUL); /* set MUL */ + writel(div, p->membase + INTEL_MID_UART_DIV); + + serial8250_do_set_termios(p, termios, old); +} + +static bool mid8250_dma_filter(struct dma_chan *chan, void *param) +{ + struct hsu_dma_slave *s = param; + + if (s->dma_dev != chan->device->dev || s->chan_id != chan->chan_id) + return false; + + chan->private = s; + return true; +} + +static int mid8250_dma_setup(struct mid8250 *mid, struct uart_8250_port *port) +{ + struct uart_8250_dma *dma = &mid->dma; + struct device *dev = port->port.dev; + struct hsu_dma_slave *rx_param; + struct hsu_dma_slave *tx_param; + + if (!mid->dma_dev) + return 0; + + rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL); + if (!rx_param) + return -ENOMEM; + + tx_param = devm_kzalloc(dev, sizeof(*tx_param), GFP_KERNEL); + if (!tx_param) + return -ENOMEM; + + rx_param->chan_id = mid->dma_index * 2 + 1; + tx_param->chan_id = mid->dma_index * 2; + + dma->rxconf.src_maxburst = 64; + dma->txconf.dst_maxburst = 64; + + rx_param->dma_dev = &mid->dma_dev->dev; + tx_param->dma_dev = &mid->dma_dev->dev; + + dma->fn = mid8250_dma_filter; + dma->rx_param = rx_param; + dma->tx_param = tx_param; + + port->dma = dma; + return 0; +} + +static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct uart_8250_port uart; + struct mid8250 *mid; + unsigned int bar; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + mid = devm_kzalloc(&pdev->dev, sizeof(*mid), GFP_KERNEL); + if (!mid) + return -ENOMEM; + + mid->board = (struct mid8250_board *)id->driver_data; + bar = FL_GET_BASE(mid->board->flags); + + memset(&uart, 0, sizeof(struct uart_8250_port)); + + uart.port.dev = &pdev->dev; + uart.port.irq = pdev->irq; + uart.port.private_data = mid; + uart.port.type = PORT_16750; + uart.port.iotype = UPIO_MEM; + uart.port.uartclk = mid->board->base_baud * 16; + uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE; + uart.port.set_termios = mid8250_set_termios; + + uart.port.mapbase = pci_resource_start(pdev, bar); + uart.port.membase = pcim_iomap(pdev, bar, 0); + if (!uart.port.membase) + return -ENOMEM; + + ret = mid->board->setup(mid, &uart.port); + if (ret) + return ret; + + ret = mid8250_dma_setup(mid, &uart); + if (ret) + goto err; + + ret = serial8250_register_8250_port(&uart); + if (ret < 0) + goto err; + + mid->line = ret; + + pci_set_drvdata(pdev, mid); + return 0; + +err: + mid->board->exit(mid); + return ret; +} + +static void mid8250_remove(struct pci_dev *pdev) +{ + struct mid8250 *mid = pci_get_drvdata(pdev); + + serial8250_unregister_port(mid->line); + + mid->board->exit(mid); +} + +static const struct mid8250_board pnw_board = { + .flags = FL_BASE0, + .freq = 50000000, + .base_baud = 115200, + .setup = pnw_setup, + .exit = pnw_exit, +}; + +static const struct mid8250_board tng_board = { + .flags = FL_BASE0, + .freq = 38400000, + .base_baud = 1843200, + .setup = tng_setup, + .exit = tng_exit, +}; + +static const struct mid8250_board dnv_board = { + .flags = FL_BASE1, + .freq = 133333333, + .base_baud = 115200, + .setup = dnv_setup, + .exit = dnv_exit, +}; + +static const struct pci_device_id pci_ids[] = { + { PCI_DEVICE_DATA(INTEL, PNW_UART1, &pnw_board) }, + { PCI_DEVICE_DATA(INTEL, PNW_UART2, &pnw_board) }, + { PCI_DEVICE_DATA(INTEL, PNW_UART3, &pnw_board) }, + { PCI_DEVICE_DATA(INTEL, TNG_UART, &tng_board) }, + { PCI_DEVICE_DATA(INTEL, CDF_UART, &dnv_board) }, + { PCI_DEVICE_DATA(INTEL, DNV_UART, &dnv_board) }, + { } +}; +MODULE_DEVICE_TABLE(pci, pci_ids); + +static struct pci_driver mid8250_pci_driver = { + .name = "8250_mid", + .id_table = pci_ids, + .probe = mid8250_probe, + .remove = mid8250_remove, +}; + +module_pci_driver(mid8250_pci_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel MID UART driver"); diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c new file mode 100644 index 000000000..fb1d5ec09 --- /dev/null +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -0,0 +1,698 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Mediatek 8250 driver. + * + * Copyright (c) 2014 MundoReader S.L. + * Author: Matthias Brugger + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +#define MTK_UART_HIGHS 0x09 /* Highspeed register */ +#define MTK_UART_SAMPLE_COUNT 0x0a /* Sample count register */ +#define MTK_UART_SAMPLE_POINT 0x0b /* Sample point register */ +#define MTK_UART_RATE_FIX 0x0d /* UART Rate Fix Register */ +#define MTK_UART_ESCAPE_DAT 0x10 /* Escape Character register */ +#define MTK_UART_ESCAPE_EN 0x11 /* Escape Enable register */ +#define MTK_UART_DMA_EN 0x13 /* DMA Enable register */ +#define MTK_UART_RXTRI_AD 0x14 /* RX Trigger address */ +#define MTK_UART_FRACDIV_L 0x15 /* Fractional divider LSB address */ +#define MTK_UART_FRACDIV_M 0x16 /* Fractional divider MSB address */ +#define MTK_UART_DEBUG0 0x18 +#define MTK_UART_IER_XOFFI 0x20 /* Enable XOFF character interrupt */ +#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */ +#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */ + +#define MTK_UART_EFR 38 /* I/O: Extended Features Register */ +#define MTK_UART_EFR_EN 0x10 /* Enable enhancement feature */ +#define MTK_UART_EFR_RTS 0x40 /* Enable hardware rx flow control */ +#define MTK_UART_EFR_CTS 0x80 /* Enable hardware tx flow control */ +#define MTK_UART_EFR_NO_SW_FC 0x0 /* no sw flow control */ +#define MTK_UART_EFR_XON1_XOFF1 0xa /* XON1/XOFF1 as sw flow control */ +#define MTK_UART_EFR_XON2_XOFF2 0x5 /* XON2/XOFF2 as sw flow control */ +#define MTK_UART_EFR_SW_FC_MASK 0xf /* Enable CTS Modem status interrupt */ +#define MTK_UART_EFR_HW_FC (MTK_UART_EFR_RTS | MTK_UART_EFR_CTS) +#define MTK_UART_DMA_EN_TX 0x2 +#define MTK_UART_DMA_EN_RX 0x5 + +#define MTK_UART_ESCAPE_CHAR 0x77 /* Escape char added under sw fc */ +#define MTK_UART_RX_SIZE 0x8000 +#define MTK_UART_TX_TRIGGER 1 +#define MTK_UART_RX_TRIGGER MTK_UART_RX_SIZE + +#define MTK_UART_XON1 40 /* I/O: Xon character 1 */ +#define MTK_UART_XOFF1 42 /* I/O: Xoff character 1 */ + +#ifdef CONFIG_SERIAL_8250_DMA +enum dma_rx_status { + DMA_RX_START = 0, + DMA_RX_RUNNING = 1, + DMA_RX_SHUTDOWN = 2, +}; +#endif + +struct mtk8250_data { + int line; + unsigned int rx_pos; + unsigned int clk_count; + struct clk *uart_clk; + struct clk *bus_clk; + struct uart_8250_dma *dma; +#ifdef CONFIG_SERIAL_8250_DMA + enum dma_rx_status rx_status; +#endif + int rx_wakeup_irq; +}; + +/* flow control mode */ +enum { + MTK_UART_FC_NONE, + MTK_UART_FC_SW, + MTK_UART_FC_HW, +}; + +#ifdef CONFIG_SERIAL_8250_DMA +static void mtk8250_rx_dma(struct uart_8250_port *up); + +static void mtk8250_dma_rx_complete(void *param) +{ + struct uart_8250_port *up = param; + struct uart_8250_dma *dma = up->dma; + struct mtk8250_data *data = up->port.private_data; + struct tty_port *tty_port = &up->port.state->port; + struct dma_tx_state state; + int copied, total, cnt; + unsigned char *ptr; + unsigned long flags; + + if (data->rx_status == DMA_RX_SHUTDOWN) + return; + + spin_lock_irqsave(&up->port.lock, flags); + + dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); + total = dma->rx_size - state.residue; + cnt = total; + + if ((data->rx_pos + cnt) > dma->rx_size) + cnt = dma->rx_size - data->rx_pos; + + ptr = (unsigned char *)(data->rx_pos + dma->rx_buf); + copied = tty_insert_flip_string(tty_port, ptr, cnt); + data->rx_pos += cnt; + + if (total > cnt) { + ptr = (unsigned char *)(dma->rx_buf); + cnt = total - cnt; + copied += tty_insert_flip_string(tty_port, ptr, cnt); + data->rx_pos = cnt; + } + + up->port.icount.rx += copied; + + tty_flip_buffer_push(tty_port); + + mtk8250_rx_dma(up); + + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static void mtk8250_rx_dma(struct uart_8250_port *up) +{ + struct uart_8250_dma *dma = up->dma; + struct dma_async_tx_descriptor *desc; + + desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr, + dma->rx_size, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + pr_err("failed to prepare rx slave single\n"); + return; + } + + desc->callback = mtk8250_dma_rx_complete; + desc->callback_param = up; + + dma->rx_cookie = dmaengine_submit(desc); + + dma_async_issue_pending(dma->rxchan); +} + +static void mtk8250_dma_enable(struct uart_8250_port *up) +{ + struct uart_8250_dma *dma = up->dma; + struct mtk8250_data *data = up->port.private_data; + int lcr = serial_in(up, UART_LCR); + + if (data->rx_status != DMA_RX_START) + return; + + dma->rxconf.src_port_window_size = dma->rx_size; + dma->rxconf.src_addr = dma->rx_addr; + + dma->txconf.dst_port_window_size = UART_XMIT_SIZE; + dma->txconf.dst_addr = dma->tx_addr; + + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | + UART_FCR_CLEAR_XMIT); + serial_out(up, MTK_UART_DMA_EN, + MTK_UART_DMA_EN_RX | MTK_UART_DMA_EN_TX); + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, MTK_UART_EFR, UART_EFR_ECB); + serial_out(up, UART_LCR, lcr); + + if (dmaengine_slave_config(dma->rxchan, &dma->rxconf) != 0) + pr_err("failed to configure rx dma channel\n"); + if (dmaengine_slave_config(dma->txchan, &dma->txconf) != 0) + pr_err("failed to configure tx dma channel\n"); + + data->rx_status = DMA_RX_RUNNING; + data->rx_pos = 0; + mtk8250_rx_dma(up); +} +#endif + +static int mtk8250_startup(struct uart_port *port) +{ +#ifdef CONFIG_SERIAL_8250_DMA + struct uart_8250_port *up = up_to_u8250p(port); + struct mtk8250_data *data = port->private_data; + + /* disable DMA for console */ + if (uart_console(port)) + up->dma = NULL; + + if (up->dma) { + data->rx_status = DMA_RX_START; + uart_circ_clear(&port->state->xmit); + } +#endif + memset(&port->icount, 0, sizeof(port->icount)); + + return serial8250_do_startup(port); +} + +static void mtk8250_shutdown(struct uart_port *port) +{ +#ifdef CONFIG_SERIAL_8250_DMA + struct uart_8250_port *up = up_to_u8250p(port); + struct mtk8250_data *data = port->private_data; + + if (up->dma) + data->rx_status = DMA_RX_SHUTDOWN; +#endif + + return serial8250_do_shutdown(port); +} + +static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask) +{ + serial_out(up, UART_IER, serial_in(up, UART_IER) & (~mask)); +} + +static void mtk8250_enable_intrs(struct uart_8250_port *up, int mask) +{ + serial_out(up, UART_IER, serial_in(up, UART_IER) | mask); +} + +static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode) +{ + struct uart_port *port = &up->port; + int lcr = serial_in(up, UART_LCR); + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, MTK_UART_EFR, UART_EFR_ECB); + serial_out(up, UART_LCR, lcr); + lcr = serial_in(up, UART_LCR); + + switch (mode) { + case MTK_UART_FC_NONE: + serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR); + serial_out(up, MTK_UART_ESCAPE_EN, 0x00); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, MTK_UART_EFR, serial_in(up, MTK_UART_EFR) & + (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))); + serial_out(up, UART_LCR, lcr); + mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI | + MTK_UART_IER_RTSI | MTK_UART_IER_CTSI); + break; + + case MTK_UART_FC_HW: + serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR); + serial_out(up, MTK_UART_ESCAPE_EN, 0x00); + serial_out(up, UART_MCR, UART_MCR_RTS); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + + /*enable hw flow control*/ + serial_out(up, MTK_UART_EFR, MTK_UART_EFR_HW_FC | + (serial_in(up, MTK_UART_EFR) & + (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)))); + + serial_out(up, UART_LCR, lcr); + mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI); + mtk8250_enable_intrs(up, MTK_UART_IER_CTSI | MTK_UART_IER_RTSI); + break; + + case MTK_UART_FC_SW: /*MTK software flow control */ + serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR); + serial_out(up, MTK_UART_ESCAPE_EN, 0x01); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + + /*enable sw flow control */ + serial_out(up, MTK_UART_EFR, MTK_UART_EFR_XON1_XOFF1 | + (serial_in(up, MTK_UART_EFR) & + (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)))); + + serial_out(up, MTK_UART_XON1, START_CHAR(port->state->port.tty)); + serial_out(up, MTK_UART_XOFF1, STOP_CHAR(port->state->port.tty)); + serial_out(up, UART_LCR, lcr); + mtk8250_disable_intrs(up, MTK_UART_IER_CTSI|MTK_UART_IER_RTSI); + mtk8250_enable_intrs(up, MTK_UART_IER_XOFFI); + break; + default: + break; + } +} + +static void +mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + static const unsigned short fraction_L_mapping[] = { + 0, 1, 0x5, 0x15, 0x55, 0x57, 0x57, 0x77, 0x7F, 0xFF, 0xFF + }; + static const unsigned short fraction_M_mapping[] = { + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3 + }; + struct uart_8250_port *up = up_to_u8250p(port); + unsigned int baud, quot, fraction; + unsigned long flags; + int mode; + +#ifdef CONFIG_SERIAL_8250_DMA + if (up->dma) { + if (uart_console(port)) { + devm_kfree(up->port.dev, up->dma); + up->dma = NULL; + } else { + mtk8250_dma_enable(up); + } + } +#endif + + /* + * Store the requested baud rate before calling the generic 8250 + * set_termios method. Standard 8250 port expects bauds to be + * no higher than (uartclk / 16) so the baud will be clamped if it + * gets out of that bound. Mediatek 8250 port supports speed + * higher than that, therefore we'll get original baud rate back + * after calling the generic set_termios method and recalculate + * the speed later in this method. + */ + baud = tty_termios_baud_rate(termios); + + serial8250_do_set_termios(port, termios, NULL); + + tty_termios_encode_baud_rate(termios, baud, baud); + + /* + * Mediatek UARTs use an extra highspeed register (MTK_UART_HIGHS) + * + * We need to recalcualte the quot register, as the claculation depends + * on the vaule in the highspeed register. + * + * Some baudrates are not supported by the chip, so we use the next + * lower rate supported and update termios c_flag. + * + * If highspeed register is set to 3, we need to specify sample count + * and sample point to increase accuracy. If not, we reset the + * registers to their default values. + */ + baud = uart_get_baud_rate(port, termios, old, + port->uartclk / 16 / UART_DIV_MAX, + port->uartclk); + + if (baud < 115200) { + serial_port_out(port, MTK_UART_HIGHS, 0x0); + quot = uart_get_divisor(port, baud); + } else { + serial_port_out(port, MTK_UART_HIGHS, 0x3); + quot = DIV_ROUND_UP(port->uartclk, 256 * baud); + } + + /* + * Ok, we're now changing the port state. Do it with + * interrupts disabled. + */ + spin_lock_irqsave(&port->lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* set DLAB we have cval saved in up->lcr from the call to the core */ + serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB); + serial_dl_write(up, quot); + + /* reset DLAB */ + serial_port_out(port, UART_LCR, up->lcr); + + if (baud >= 115200) { + unsigned int tmp; + + tmp = (port->uartclk / (baud * quot)) - 1; + serial_port_out(port, MTK_UART_SAMPLE_COUNT, tmp); + serial_port_out(port, MTK_UART_SAMPLE_POINT, + (tmp >> 1) - 1); + + /*count fraction to set fractoin register */ + fraction = ((port->uartclk * 100) / baud / quot) % 100; + fraction = DIV_ROUND_CLOSEST(fraction, 10); + serial_port_out(port, MTK_UART_FRACDIV_L, + fraction_L_mapping[fraction]); + serial_port_out(port, MTK_UART_FRACDIV_M, + fraction_M_mapping[fraction]); + } else { + serial_port_out(port, MTK_UART_SAMPLE_COUNT, 0x00); + serial_port_out(port, MTK_UART_SAMPLE_POINT, 0xff); + serial_port_out(port, MTK_UART_FRACDIV_L, 0x00); + serial_port_out(port, MTK_UART_FRACDIV_M, 0x00); + } + + if ((termios->c_cflag & CRTSCTS) && (!(termios->c_iflag & CRTSCTS))) + mode = MTK_UART_FC_HW; + else if (termios->c_iflag & CRTSCTS) + mode = MTK_UART_FC_SW; + else + mode = MTK_UART_FC_NONE; + + mtk8250_set_flow_ctrl(up, mode); + + if (uart_console(port)) + up->port.cons->cflag = termios->c_cflag; + + spin_unlock_irqrestore(&port->lock, flags); + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} + +static int __maybe_unused mtk8250_runtime_suspend(struct device *dev) +{ + struct mtk8250_data *data = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(data->line); + + /* wait until UART in idle status */ + while + (serial_in(up, MTK_UART_DEBUG0)); + + if (data->clk_count == 0U) { + dev_dbg(dev, "%s clock count is 0\n", __func__); + } else { + clk_disable_unprepare(data->bus_clk); + data->clk_count--; + } + + return 0; +} + +static int __maybe_unused mtk8250_runtime_resume(struct device *dev) +{ + struct mtk8250_data *data = dev_get_drvdata(dev); + int err; + + if (data->clk_count > 0U) { + dev_dbg(dev, "%s clock count is %d\n", __func__, + data->clk_count); + } else { + err = clk_prepare_enable(data->bus_clk); + if (err) { + dev_warn(dev, "Can't enable bus clock\n"); + return err; + } + data->clk_count++; + } + + return 0; +} + +static void +mtk8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) +{ + if (!state) + if (!mtk8250_runtime_resume(port->dev)) + pm_runtime_get_sync(port->dev); + + serial8250_do_pm(port, state, old); + + if (state) + if (!pm_runtime_put_sync_suspend(port->dev)) + mtk8250_runtime_suspend(port->dev); +} + +#ifdef CONFIG_SERIAL_8250_DMA +static bool mtk8250_dma_filter(struct dma_chan *chan, void *param) +{ + return false; +} +#endif + +static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p, + struct mtk8250_data *data) +{ +#ifdef CONFIG_SERIAL_8250_DMA + int dmacnt; +#endif + + data->uart_clk = devm_clk_get(&pdev->dev, "baud"); + if (IS_ERR(data->uart_clk)) { + /* + * For compatibility with older device trees try unnamed + * clk when no baud clk can be found. + */ + data->uart_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(data->uart_clk)) { + dev_warn(&pdev->dev, "Can't get uart clock\n"); + return PTR_ERR(data->uart_clk); + } + + return 0; + } + + data->bus_clk = devm_clk_get(&pdev->dev, "bus"); + if (IS_ERR(data->bus_clk)) + return PTR_ERR(data->bus_clk); + + data->dma = NULL; +#ifdef CONFIG_SERIAL_8250_DMA + dmacnt = of_property_count_strings(pdev->dev.of_node, "dma-names"); + if (dmacnt == 2) { + data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma), + GFP_KERNEL); + if (!data->dma) + return -ENOMEM; + + data->dma->fn = mtk8250_dma_filter; + data->dma->rx_size = MTK_UART_RX_SIZE; + data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER; + data->dma->txconf.dst_maxburst = MTK_UART_TX_TRIGGER; + } +#endif + + return 0; +} + +static int mtk8250_probe(struct platform_device *pdev) +{ + struct uart_8250_port uart = {}; + struct mtk8250_data *data; + struct resource *regs; + int irq, err; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_err(&pdev->dev, "no registers defined\n"); + return -EINVAL; + } + + uart.port.membase = devm_ioremap(&pdev->dev, regs->start, + resource_size(regs)); + if (!uart.port.membase) + return -ENOMEM; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->clk_count = 0; + + if (pdev->dev.of_node) { + err = mtk8250_probe_of(pdev, &uart.port, data); + if (err) + return err; + } else + return -ENODEV; + + spin_lock_init(&uart.port.lock); + uart.port.mapbase = regs->start; + uart.port.irq = irq; + uart.port.pm = mtk8250_do_pm; + uart.port.type = PORT_16550; + uart.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT; + uart.port.dev = &pdev->dev; + uart.port.iotype = UPIO_MEM32; + uart.port.regshift = 2; + uart.port.private_data = data; + uart.port.shutdown = mtk8250_shutdown; + uart.port.startup = mtk8250_startup; + uart.port.set_termios = mtk8250_set_termios; + uart.port.uartclk = clk_get_rate(data->uart_clk); +#ifdef CONFIG_SERIAL_8250_DMA + if (data->dma) + uart.dma = data->dma; +#endif + + /* Disable Rate Fix function */ + writel(0x0, uart.port.membase + + (MTK_UART_RATE_FIX << uart.port.regshift)); + + platform_set_drvdata(pdev, data); + + pm_runtime_enable(&pdev->dev); + err = mtk8250_runtime_resume(&pdev->dev); + if (err) + goto err_pm_disable; + + data->line = serial8250_register_8250_port(&uart); + if (data->line < 0) { + err = data->line; + goto err_pm_disable; + } + + data->rx_wakeup_irq = platform_get_irq_optional(pdev, 1); + + return 0; + +err_pm_disable: + pm_runtime_disable(&pdev->dev); + + return err; +} + +static int mtk8250_remove(struct platform_device *pdev) +{ + struct mtk8250_data *data = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + + serial8250_unregister_port(data->line); + + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + if (!pm_runtime_status_suspended(&pdev->dev)) + mtk8250_runtime_suspend(&pdev->dev); + + return 0; +} + +static int __maybe_unused mtk8250_suspend(struct device *dev) +{ + struct mtk8250_data *data = dev_get_drvdata(dev); + int irq = data->rx_wakeup_irq; + int err; + + serial8250_suspend_port(data->line); + + pinctrl_pm_select_sleep_state(dev); + if (irq >= 0) { + err = enable_irq_wake(irq); + if (err) { + dev_err(dev, + "failed to enable irq wake on IRQ %d: %d\n", + irq, err); + pinctrl_pm_select_default_state(dev); + serial8250_resume_port(data->line); + return err; + } + } + + return 0; +} + +static int __maybe_unused mtk8250_resume(struct device *dev) +{ + struct mtk8250_data *data = dev_get_drvdata(dev); + int irq = data->rx_wakeup_irq; + + if (irq >= 0) + disable_irq_wake(irq); + pinctrl_pm_select_default_state(dev); + + serial8250_resume_port(data->line); + + return 0; +} + +static const struct dev_pm_ops mtk8250_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mtk8250_suspend, mtk8250_resume) + SET_RUNTIME_PM_OPS(mtk8250_runtime_suspend, mtk8250_runtime_resume, + NULL) +}; + +static const struct of_device_id mtk8250_of_match[] = { + { .compatible = "mediatek,mt6577-uart" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk8250_of_match); + +static struct platform_driver mtk8250_platform_driver = { + .driver = { + .name = "mt6577-uart", + .pm = &mtk8250_pm_ops, + .of_match_table = mtk8250_of_match, + }, + .probe = mtk8250_probe, + .remove = mtk8250_remove, +}; +module_platform_driver(mtk8250_platform_driver); + +#ifdef CONFIG_SERIAL_8250_CONSOLE +static int __init early_mtk8250_setup(struct earlycon_device *device, + const char *options) +{ + if (!device->port.membase) + return -ENODEV; + + device->port.iotype = UPIO_MEM32; + device->port.regshift = 2; + + return early_serial8250_setup(device, NULL); +} + +OF_EARLYCON_DECLARE(mtk8250, "mediatek,mt6577-uart", early_mtk8250_setup); +#endif + +MODULE_AUTHOR("Matthias Brugger"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Mediatek 8250 serial port driver"); diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c new file mode 100644 index 000000000..1b461fba1 --- /dev/null +++ b/drivers/tty/serial/8250/8250_of.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Serial Port driver for Open Firmware platform devices + * + * Copyright (C) 2006 Arnd Bergmann , IBM Corp. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +struct of_serial_info { + struct clk *clk; + struct reset_control *rst; + int type; + int line; +}; + +/* + * Fill a struct uart_port for a given device node + */ +static int of_platform_serial_setup(struct platform_device *ofdev, + int type, struct uart_8250_port *up, + struct of_serial_info *info) +{ + struct resource resource; + struct device_node *np = ofdev->dev.of_node; + struct uart_port *port = &up->port; + u32 clk, spd, prop; + int ret, irq; + + memset(port, 0, sizeof *port); + + pm_runtime_enable(&ofdev->dev); + pm_runtime_get_sync(&ofdev->dev); + + if (of_property_read_u32(np, "clock-frequency", &clk)) { + + /* Get clk rate through clk driver if present */ + info->clk = devm_clk_get(&ofdev->dev, NULL); + if (IS_ERR(info->clk)) { + ret = PTR_ERR(info->clk); + if (ret != -EPROBE_DEFER) + dev_warn(&ofdev->dev, + "failed to get clock: %d\n", ret); + goto err_pmruntime; + } + + ret = clk_prepare_enable(info->clk); + if (ret < 0) + goto err_pmruntime; + + clk = clk_get_rate(info->clk); + } + /* If current-speed was set, then try not to change it. */ + if (of_property_read_u32(np, "current-speed", &spd) == 0) + port->custom_divisor = clk / (16 * spd); + + ret = of_address_to_resource(np, 0, &resource); + if (ret) { + dev_warn(&ofdev->dev, "invalid address\n"); + goto err_unprepare; + } + + port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | + UPF_FIXED_TYPE; + spin_lock_init(&port->lock); + + if (resource_type(&resource) == IORESOURCE_IO) { + port->iotype = UPIO_PORT; + port->iobase = resource.start; + } else { + port->mapbase = resource.start; + port->mapsize = resource_size(&resource); + + /* Check for shifted address mapping */ + if (of_property_read_u32(np, "reg-offset", &prop) == 0) { + if (prop >= port->mapsize) { + dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n", + prop, &port->mapsize); + ret = -EINVAL; + goto err_unprepare; + } + + port->mapbase += prop; + port->mapsize -= prop; + } + + port->iotype = UPIO_MEM; + if (of_property_read_u32(np, "reg-io-width", &prop) == 0) { + switch (prop) { + case 1: + port->iotype = UPIO_MEM; + break; + case 2: + port->iotype = UPIO_MEM16; + break; + case 4: + port->iotype = of_device_is_big_endian(np) ? + UPIO_MEM32BE : UPIO_MEM32; + break; + default: + dev_warn(&ofdev->dev, "unsupported reg-io-width (%d)\n", + prop); + ret = -EINVAL; + goto err_unprepare; + } + } + port->flags |= UPF_IOREMAP; + } + + /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */ + if (of_device_is_compatible(np, "mrvl,mmp-uart")) + port->regshift = 2; + + /* Check for registers offset within the devices address range */ + if (of_property_read_u32(np, "reg-shift", &prop) == 0) + port->regshift = prop; + + /* Check for fifo size */ + if (of_property_read_u32(np, "fifo-size", &prop) == 0) + port->fifosize = prop; + + /* Check for a fixed line number */ + ret = of_alias_get_id(np, "serial"); + if (ret >= 0) + port->line = ret; + + irq = of_irq_get(np, 0); + if (irq < 0) { + if (irq == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_unprepare; + } + /* IRQ support not mandatory */ + irq = 0; + } + + port->irq = irq; + + info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL); + if (IS_ERR(info->rst)) { + ret = PTR_ERR(info->rst); + goto err_unprepare; + } + + ret = reset_control_deassert(info->rst); + if (ret) + goto err_unprepare; + + port->type = type; + port->uartclk = clk; + + if (of_property_read_bool(np, "no-loopback-test")) + port->flags |= UPF_SKIP_TEST; + + port->dev = &ofdev->dev; + port->rs485_config = serial8250_em485_config; + port->rs485_supported = serial8250_em485_supported; + up->rs485_start_tx = serial8250_em485_start_tx; + up->rs485_stop_tx = serial8250_em485_stop_tx; + + switch (type) { + case PORT_RT2880: + port->iotype = UPIO_AU; + break; + } + + if (IS_ENABLED(CONFIG_SERIAL_8250_FSL) && + (of_device_is_compatible(np, "fsl,ns16550") || + of_device_is_compatible(np, "fsl,16550-FIFO64"))) { + port->handle_irq = fsl8250_handle_irq; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); + } + + return 0; +err_unprepare: + clk_disable_unprepare(info->clk); +err_pmruntime: + pm_runtime_put_sync(&ofdev->dev); + pm_runtime_disable(&ofdev->dev); + return ret; +} + +/* + * Try to register a serial port + */ +static int of_platform_serial_probe(struct platform_device *ofdev) +{ + struct of_serial_info *info; + struct uart_8250_port port8250; + unsigned int port_type; + u32 tx_threshold; + int ret; + + if (IS_ENABLED(CONFIG_SERIAL_8250_BCM7271) && + of_device_is_compatible(ofdev->dev.of_node, "brcm,bcm7271-uart")) + return -ENODEV; + + port_type = (unsigned long)of_device_get_match_data(&ofdev->dev); + if (port_type == PORT_UNKNOWN) + return -EINVAL; + + if (of_property_read_bool(ofdev->dev.of_node, "used-by-rtas")) + return -EBUSY; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (info == NULL) + return -ENOMEM; + + memset(&port8250, 0, sizeof(port8250)); + ret = of_platform_serial_setup(ofdev, port_type, &port8250, info); + if (ret) + goto err_free; + + if (port8250.port.fifosize) + port8250.capabilities = UART_CAP_FIFO; + + /* Check for TX FIFO threshold & set tx_loadsz */ + if ((of_property_read_u32(ofdev->dev.of_node, "tx-threshold", + &tx_threshold) == 0) && + (tx_threshold < port8250.port.fifosize)) + port8250.tx_loadsz = port8250.port.fifosize - tx_threshold; + + if (of_property_read_bool(ofdev->dev.of_node, "auto-flow-control")) + port8250.capabilities |= UART_CAP_AFE; + + if (of_property_read_u32(ofdev->dev.of_node, + "overrun-throttle-ms", + &port8250.overrun_backoff_time_ms) != 0) + port8250.overrun_backoff_time_ms = 0; + + ret = serial8250_register_8250_port(&port8250); + if (ret < 0) + goto err_dispose; + + info->type = port_type; + info->line = ret; + platform_set_drvdata(ofdev, info); + return 0; +err_dispose: + irq_dispose_mapping(port8250.port.irq); + pm_runtime_put_sync(&ofdev->dev); + pm_runtime_disable(&ofdev->dev); + clk_disable_unprepare(info->clk); +err_free: + kfree(info); + return ret; +} + +/* + * Release a line + */ +static int of_platform_serial_remove(struct platform_device *ofdev) +{ + struct of_serial_info *info = platform_get_drvdata(ofdev); + + serial8250_unregister_port(info->line); + + reset_control_assert(info->rst); + pm_runtime_put_sync(&ofdev->dev); + pm_runtime_disable(&ofdev->dev); + clk_disable_unprepare(info->clk); + kfree(info); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int of_serial_suspend(struct device *dev) +{ + struct of_serial_info *info = dev_get_drvdata(dev); + struct uart_8250_port *port8250 = serial8250_get_port(info->line); + struct uart_port *port = &port8250->port; + + serial8250_suspend_port(info->line); + + if (!uart_console(port) || console_suspend_enabled) { + pm_runtime_put_sync(dev); + clk_disable_unprepare(info->clk); + } + return 0; +} + +static int of_serial_resume(struct device *dev) +{ + struct of_serial_info *info = dev_get_drvdata(dev); + struct uart_8250_port *port8250 = serial8250_get_port(info->line); + struct uart_port *port = &port8250->port; + + if (!uart_console(port) || console_suspend_enabled) { + pm_runtime_get_sync(dev); + clk_prepare_enable(info->clk); + } + + serial8250_resume_port(info->line); + + return 0; +} +#endif +static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume); + +/* + * A few common types, add more as needed. + */ +static const struct of_device_id of_platform_serial_table[] = { + { .compatible = "ns8250", .data = (void *)PORT_8250, }, + { .compatible = "ns16450", .data = (void *)PORT_16450, }, + { .compatible = "ns16550a", .data = (void *)PORT_16550A, }, + { .compatible = "ns16550", .data = (void *)PORT_16550, }, + { .compatible = "ns16750", .data = (void *)PORT_16750, }, + { .compatible = "ns16850", .data = (void *)PORT_16850, }, + { .compatible = "nxp,lpc3220-uart", .data = (void *)PORT_LPC3220, }, + { .compatible = "ralink,rt2880-uart", .data = (void *)PORT_RT2880, }, + { .compatible = "intel,xscale-uart", .data = (void *)PORT_XSCALE, }, + { .compatible = "altr,16550-FIFO32", + .data = (void *)PORT_ALTR_16550_F32, }, + { .compatible = "altr,16550-FIFO64", + .data = (void *)PORT_ALTR_16550_F64, }, + { .compatible = "altr,16550-FIFO128", + .data = (void *)PORT_ALTR_16550_F128, }, + { .compatible = "fsl,16550-FIFO64", + .data = (void *)PORT_16550A_FSL64, }, + { .compatible = "mediatek,mtk-btif", + .data = (void *)PORT_MTK_BTIF, }, + { .compatible = "mrvl,mmp-uart", + .data = (void *)PORT_XSCALE, }, + { .compatible = "ti,da830-uart", .data = (void *)PORT_DA830, }, + { .compatible = "nuvoton,wpcm450-uart", .data = (void *)PORT_NPCM, }, + { .compatible = "nuvoton,npcm750-uart", .data = (void *)PORT_NPCM, }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, of_platform_serial_table); + +static struct platform_driver of_platform_serial_driver = { + .driver = { + .name = "of_serial", + .of_match_table = of_platform_serial_table, + .pm = &of_serial_pm_ops, + }, + .probe = of_platform_serial_probe, + .remove = of_platform_serial_remove, +}; + +module_platform_driver(of_platform_serial_driver); + +MODULE_AUTHOR("Arnd Bergmann "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Serial Port driver for Open Firmware platform devices"); diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c new file mode 100644 index 000000000..037d61300 --- /dev/null +++ b/drivers/tty/serial/8250/8250_omap.c @@ -0,0 +1,1725 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * 8250-core based driver for the OMAP internal UART + * + * based on omap-serial.c, Copyright (C) 2010 Texas Instruments. + * + * Copyright (C) 2014 Sebastian Andrzej Siewior + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +#define DEFAULT_CLK_SPEED 48000000 +#define OMAP_UART_REGSHIFT 2 + +#define UART_ERRATA_i202_MDR1_ACCESS (1 << 0) +#define OMAP_UART_WER_HAS_TX_WAKEUP (1 << 1) +#define OMAP_DMA_TX_KICK (1 << 2) +/* + * See Advisory 21 in AM437x errata SPRZ408B, updated April 2015. + * The same errata is applicable to AM335x and DRA7x processors too. + */ +#define UART_ERRATA_CLOCK_DISABLE (1 << 3) +#define UART_HAS_EFR2 BIT(4) +#define UART_HAS_RHR_IT_DIS BIT(5) +#define UART_RX_TIMEOUT_QUIRK BIT(6) + +#define OMAP_UART_FCR_RX_TRIG 6 +#define OMAP_UART_FCR_TX_TRIG 4 + +/* SCR register bitmasks */ +#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK (1 << 7) +#define OMAP_UART_SCR_TX_TRIG_GRANU1_MASK (1 << 6) +#define OMAP_UART_SCR_TX_EMPTY (1 << 3) +#define OMAP_UART_SCR_DMAMODE_MASK (3 << 1) +#define OMAP_UART_SCR_DMAMODE_1 (1 << 1) +#define OMAP_UART_SCR_DMAMODE_CTL (1 << 0) + +/* MVR register bitmasks */ +#define OMAP_UART_MVR_SCHEME_SHIFT 30 +#define OMAP_UART_LEGACY_MVR_MAJ_MASK 0xf0 +#define OMAP_UART_LEGACY_MVR_MAJ_SHIFT 4 +#define OMAP_UART_LEGACY_MVR_MIN_MASK 0x0f +#define OMAP_UART_MVR_MAJ_MASK 0x700 +#define OMAP_UART_MVR_MAJ_SHIFT 8 +#define OMAP_UART_MVR_MIN_MASK 0x3f + +/* SYSC register bitmasks */ +#define OMAP_UART_SYSC_SOFTRESET (1 << 1) + +/* SYSS register bitmasks */ +#define OMAP_UART_SYSS_RESETDONE (1 << 0) + +#define UART_TI752_TLR_TX 0 +#define UART_TI752_TLR_RX 4 + +#define TRIGGER_TLR_MASK(x) ((x & 0x3c) >> 2) +#define TRIGGER_FCR_MASK(x) (x & 3) + +/* Enable XON/XOFF flow control on output */ +#define OMAP_UART_SW_TX 0x08 +/* Enable XON/XOFF flow control on input */ +#define OMAP_UART_SW_RX 0x02 + +#define OMAP_UART_WER_MOD_WKUP 0x7f +#define OMAP_UART_TX_WAKEUP_EN (1 << 7) + +#define TX_TRIGGER 1 +#define RX_TRIGGER 48 + +#define OMAP_UART_TCR_RESTORE(x) ((x / 4) << 4) +#define OMAP_UART_TCR_HALT(x) ((x / 4) << 0) + +#define UART_BUILD_REVISION(x, y) (((x) << 8) | (y)) + +#define OMAP_UART_REV_46 0x0406 +#define OMAP_UART_REV_52 0x0502 +#define OMAP_UART_REV_63 0x0603 + +/* Interrupt Enable Register 2 */ +#define UART_OMAP_IER2 0x1B +#define UART_OMAP_IER2_RHR_IT_DIS BIT(2) + +/* Enhanced features register 2 */ +#define UART_OMAP_EFR2 0x23 +#define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6) + +/* RX FIFO occupancy indicator */ +#define UART_OMAP_RX_LVL 0x19 + +struct omap8250_priv { + void __iomem *membase; + int line; + u8 habit; + u8 mdr1; + u8 efr; + u8 scr; + u8 wer; + u8 xon; + u8 xoff; + u8 delayed_restore; + u16 quot; + + u8 tx_trigger; + u8 rx_trigger; + bool is_suspending; + int wakeirq; + int wakeups_enabled; + u32 latency; + u32 calc_latency; + struct pm_qos_request pm_qos_request; + struct work_struct qos_work; + struct uart_8250_dma omap8250_dma; + spinlock_t rx_dma_lock; + bool rx_dma_broken; + bool throttled; +}; + +struct omap8250_dma_params { + u32 rx_size; + u8 rx_trigger; + u8 tx_trigger; +}; + +struct omap8250_platdata { + struct omap8250_dma_params *dma_params; + u8 habit; +}; + +#ifdef CONFIG_SERIAL_8250_DMA +static void omap_8250_rx_dma_flush(struct uart_8250_port *p); +#else +static inline void omap_8250_rx_dma_flush(struct uart_8250_port *p) { } +#endif + +static u32 uart_read(struct omap8250_priv *priv, u32 reg) +{ + return readl(priv->membase + (reg << OMAP_UART_REGSHIFT)); +} + +/* + * Called on runtime PM resume path from omap8250_restore_regs(), and + * omap8250_set_mctrl(). + */ +static void __omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_8250_port *up = up_to_u8250p(port); + struct omap8250_priv *priv = up->port.private_data; + u8 lcr; + + serial8250_do_set_mctrl(port, mctrl); + + if (!mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS)) { + /* + * Turn off autoRTS if RTS is lowered and restore autoRTS + * setting if RTS is raised + */ + lcr = serial_in(up, UART_LCR); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) + priv->efr |= UART_EFR_RTS; + else + priv->efr &= ~UART_EFR_RTS; + serial_out(up, UART_EFR, priv->efr); + serial_out(up, UART_LCR, lcr); + } +} + +static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + int err; + + err = pm_runtime_resume_and_get(port->dev); + if (err) + return; + + __omap8250_set_mctrl(port, mctrl); + + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); +} + +/* + * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460) + * The access to uart register after MDR1 Access + * causes UART to corrupt data. + * + * Need a delay = + * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS) + * give 10 times as much + */ +static void omap_8250_mdr1_errataset(struct uart_8250_port *up, + struct omap8250_priv *priv) +{ + serial_out(up, UART_OMAP_MDR1, priv->mdr1); + udelay(2); + serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT | + UART_FCR_CLEAR_RCVR); +} + +static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud, + struct omap8250_priv *priv) +{ + unsigned int uartclk = port->uartclk; + unsigned int div_13, div_16; + unsigned int abs_d13, abs_d16; + + /* + * Old custom speed handling. + */ + if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) { + priv->quot = port->custom_divisor & UART_DIV_MAX; + /* + * I assume that nobody is using this. But hey, if somebody + * would like to specify the divisor _and_ the mode then the + * driver is ready and waiting for it. + */ + if (port->custom_divisor & (1 << 16)) + priv->mdr1 = UART_OMAP_MDR1_13X_MODE; + else + priv->mdr1 = UART_OMAP_MDR1_16X_MODE; + return; + } + div_13 = DIV_ROUND_CLOSEST(uartclk, 13 * baud); + div_16 = DIV_ROUND_CLOSEST(uartclk, 16 * baud); + + if (!div_13) + div_13 = 1; + if (!div_16) + div_16 = 1; + + abs_d13 = abs(baud - uartclk / 13 / div_13); + abs_d16 = abs(baud - uartclk / 16 / div_16); + + if (abs_d13 >= abs_d16) { + priv->mdr1 = UART_OMAP_MDR1_16X_MODE; + priv->quot = div_16; + } else { + priv->mdr1 = UART_OMAP_MDR1_13X_MODE; + priv->quot = div_13; + } +} + +static void omap8250_update_scr(struct uart_8250_port *up, + struct omap8250_priv *priv) +{ + u8 old_scr; + + old_scr = serial_in(up, UART_OMAP_SCR); + if (old_scr == priv->scr) + return; + + /* + * The manual recommends not to enable the DMA mode selector in the SCR + * (instead of the FCR) register _and_ selecting the DMA mode as one + * register write because this may lead to malfunction. + */ + if (priv->scr & OMAP_UART_SCR_DMAMODE_MASK) + serial_out(up, UART_OMAP_SCR, + priv->scr & ~OMAP_UART_SCR_DMAMODE_MASK); + serial_out(up, UART_OMAP_SCR, priv->scr); +} + +static void omap8250_update_mdr1(struct uart_8250_port *up, + struct omap8250_priv *priv) +{ + if (priv->habit & UART_ERRATA_i202_MDR1_ACCESS) + omap_8250_mdr1_errataset(up, priv); + else + serial_out(up, UART_OMAP_MDR1, priv->mdr1); +} + +static void omap8250_restore_regs(struct uart_8250_port *up) +{ + struct omap8250_priv *priv = up->port.private_data; + struct uart_8250_dma *dma = up->dma; + u8 mcr = serial8250_in_MCR(up); + + if (dma && dma->tx_running) { + /* + * TCSANOW requests the change to occur immediately however if + * we have a TX-DMA operation in progress then it has been + * observed that it might stall and never complete. Therefore we + * delay DMA completes to prevent this hang from happen. + */ + priv->delayed_restore = 1; + return; + } + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, UART_EFR, UART_EFR_ECB); + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); + serial8250_out_MCR(up, mcr | UART_MCR_TCRTLR); + serial_out(up, UART_FCR, up->fcr); + + omap8250_update_scr(up, priv); + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + + serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_RESTORE(16) | + OMAP_UART_TCR_HALT(52)); + serial_out(up, UART_TI752_TLR, + TRIGGER_TLR_MASK(priv->tx_trigger) << UART_TI752_TLR_TX | + TRIGGER_TLR_MASK(priv->rx_trigger) << UART_TI752_TLR_RX); + + serial_out(up, UART_LCR, 0); + + /* drop TCR + TLR access, we setup XON/XOFF later */ + serial8250_out_MCR(up, mcr); + + serial_out(up, UART_IER, up->ier); + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_dl_write(up, priv->quot); + + serial_out(up, UART_EFR, priv->efr); + + /* Configure flow control */ + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, UART_XON1, priv->xon); + serial_out(up, UART_XOFF1, priv->xoff); + + serial_out(up, UART_LCR, up->lcr); + + omap8250_update_mdr1(up, priv); + + __omap8250_set_mctrl(&up->port, up->port.mctrl); + + if (up->port.rs485.flags & SER_RS485_ENABLED) + serial8250_em485_stop_tx(up); +} + +/* + * OMAP can use "CLK / (16 or 13) / div" for baud rate. And then we have have + * some differences in how we want to handle flow control. + */ +static void omap_8250_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_8250_port *up = up_to_u8250p(port); + struct omap8250_priv *priv = up->port.private_data; + unsigned char cval = 0; + unsigned int baud; + + cval = UART_LCR_WLEN(tty_get_char_size(termios->c_cflag)); + + if (termios->c_cflag & CSTOPB) + cval |= UART_LCR_STOP; + if (termios->c_cflag & PARENB) + cval |= UART_LCR_PARITY; + if (!(termios->c_cflag & PARODD)) + cval |= UART_LCR_EPAR; + if (termios->c_cflag & CMSPAR) + cval |= UART_LCR_SPAR; + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(port, termios, old, + port->uartclk / 16 / UART_DIV_MAX, + port->uartclk / 13); + omap_8250_get_divisor(port, baud, priv); + + /* + * Ok, we're now changing the port state. Do it with + * interrupts disabled. + */ + pm_runtime_get_sync(port->dev); + spin_lock_irq(&port->lock); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; + if (termios->c_iflag & INPCK) + up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; + if (termios->c_iflag & (IGNBRK | PARMRK)) + up->port.read_status_mask |= UART_LSR_BI; + + /* + * Characters to ignore + */ + up->port.ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; + if (termios->c_iflag & IGNBRK) { + up->port.ignore_status_mask |= UART_LSR_BI; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + up->port.ignore_status_mask |= UART_LSR_OE; + } + + /* + * ignore all characters if CREAD is not set + */ + if ((termios->c_cflag & CREAD) == 0) + up->port.ignore_status_mask |= UART_LSR_DR; + + /* + * Modem status interrupts + */ + up->ier &= ~UART_IER_MSI; + if (UART_ENABLE_MS(&up->port, termios->c_cflag)) + up->ier |= UART_IER_MSI; + + up->lcr = cval; + /* Up to here it was mostly serial8250_do_set_termios() */ + + /* + * We enable TRIG_GRANU for RX and TX and additionally we set + * SCR_TX_EMPTY bit. The result is the following: + * - RX_TRIGGER amount of bytes in the FIFO will cause an interrupt. + * - less than RX_TRIGGER number of bytes will also cause an interrupt + * once the UART decides that there no new bytes arriving. + * - Once THRE is enabled, the interrupt will be fired once the FIFO is + * empty - the trigger level is ignored here. + * + * Once DMA is enabled: + * - UART will assert the TX DMA line once there is room for TX_TRIGGER + * bytes in the TX FIFO. On each assert the DMA engine will move + * TX_TRIGGER bytes into the FIFO. + * - UART will assert the RX DMA line once there are RX_TRIGGER bytes in + * the FIFO and move RX_TRIGGER bytes. + * This is because threshold and trigger values are the same. + */ + up->fcr = UART_FCR_ENABLE_FIFO; + up->fcr |= TRIGGER_FCR_MASK(priv->tx_trigger) << OMAP_UART_FCR_TX_TRIG; + up->fcr |= TRIGGER_FCR_MASK(priv->rx_trigger) << OMAP_UART_FCR_RX_TRIG; + + priv->scr = OMAP_UART_SCR_RX_TRIG_GRANU1_MASK | OMAP_UART_SCR_TX_EMPTY | + OMAP_UART_SCR_TX_TRIG_GRANU1_MASK; + + if (up->dma) + priv->scr |= OMAP_UART_SCR_DMAMODE_1 | + OMAP_UART_SCR_DMAMODE_CTL; + + priv->xon = termios->c_cc[VSTART]; + priv->xoff = termios->c_cc[VSTOP]; + + priv->efr = 0; + up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF); + + if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW && + !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS) && + !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_CTS)) { + /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */ + up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; + priv->efr |= UART_EFR_CTS; + } else if (up->port.flags & UPF_SOFT_FLOW) { + /* + * OMAP rx s/w flow control is borked; the transmitter remains + * stuck off even if rx flow control is subsequently disabled + */ + + /* + * IXOFF Flag: + * Enable XON/XOFF flow control on output. + * Transmit XON1, XOFF1 + */ + if (termios->c_iflag & IXOFF) { + up->port.status |= UPSTAT_AUTOXOFF; + priv->efr |= OMAP_UART_SW_TX; + } + } + omap8250_restore_regs(up); + + spin_unlock_irq(&up->port.lock); + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); + + /* calculate wakeup latency constraint */ + priv->calc_latency = USEC_PER_SEC * 64 * 8 / baud; + priv->latency = priv->calc_latency; + + schedule_work(&priv->qos_work); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} + +/* same as 8250 except that we may have extra flow bits set in EFR */ +static void omap_8250_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct uart_8250_port *up = up_to_u8250p(port); + u8 efr; + + pm_runtime_get_sync(port->dev); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + efr = serial_in(up, UART_EFR); + serial_out(up, UART_EFR, efr | UART_EFR_ECB); + serial_out(up, UART_LCR, 0); + + serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, UART_EFR, efr); + serial_out(up, UART_LCR, 0); + + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); +} + +static void omap_serial_fill_features_erratas(struct uart_8250_port *up, + struct omap8250_priv *priv) +{ + static const struct soc_device_attribute k3_soc_devices[] = { + { .family = "AM65X", }, + { .family = "J721E", .revision = "SR1.0" }, + { /* sentinel */ } + }; + u32 mvr, scheme; + u16 revision, major, minor; + + mvr = uart_read(priv, UART_OMAP_MVER); + + /* Check revision register scheme */ + scheme = mvr >> OMAP_UART_MVR_SCHEME_SHIFT; + + switch (scheme) { + case 0: /* Legacy Scheme: OMAP2/3 */ + /* MINOR_REV[0:4], MAJOR_REV[4:7] */ + major = (mvr & OMAP_UART_LEGACY_MVR_MAJ_MASK) >> + OMAP_UART_LEGACY_MVR_MAJ_SHIFT; + minor = (mvr & OMAP_UART_LEGACY_MVR_MIN_MASK); + break; + case 1: + /* New Scheme: OMAP4+ */ + /* MINOR_REV[0:5], MAJOR_REV[8:10] */ + major = (mvr & OMAP_UART_MVR_MAJ_MASK) >> + OMAP_UART_MVR_MAJ_SHIFT; + minor = (mvr & OMAP_UART_MVR_MIN_MASK); + break; + default: + dev_warn(up->port.dev, + "Unknown revision, defaulting to highest\n"); + /* highest possible revision */ + major = 0xff; + minor = 0xff; + } + /* normalize revision for the driver */ + revision = UART_BUILD_REVISION(major, minor); + + switch (revision) { + case OMAP_UART_REV_46: + priv->habit |= UART_ERRATA_i202_MDR1_ACCESS; + break; + case OMAP_UART_REV_52: + priv->habit |= UART_ERRATA_i202_MDR1_ACCESS | + OMAP_UART_WER_HAS_TX_WAKEUP; + break; + case OMAP_UART_REV_63: + priv->habit |= UART_ERRATA_i202_MDR1_ACCESS | + OMAP_UART_WER_HAS_TX_WAKEUP; + break; + default: + break; + } + + /* + * AM65x SR1.0, AM65x SR2.0 and J721e SR1.0 don't + * don't have RHR_IT_DIS bit in IER2 register. So drop to flag + * to enable errata workaround. + */ + if (soc_device_match(k3_soc_devices)) + priv->habit &= ~UART_HAS_RHR_IT_DIS; +} + +static void omap8250_uart_qos_work(struct work_struct *work) +{ + struct omap8250_priv *priv; + + priv = container_of(work, struct omap8250_priv, qos_work); + cpu_latency_qos_update_request(&priv->pm_qos_request, priv->latency); +} + +#ifdef CONFIG_SERIAL_8250_DMA +static int omap_8250_dma_handle_irq(struct uart_port *port); +#endif + +static irqreturn_t omap8250_irq(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct omap8250_priv *priv = port->private_data; + struct uart_8250_port *up = up_to_u8250p(port); + unsigned int iir, lsr; + int ret; + +#ifdef CONFIG_SERIAL_8250_DMA + if (up->dma) { + ret = omap_8250_dma_handle_irq(port); + return IRQ_RETVAL(ret); + } +#endif + + serial8250_rpm_get(up); + lsr = serial_port_in(port, UART_LSR); + iir = serial_port_in(port, UART_IIR); + ret = serial8250_handle_irq(port, iir); + + /* + * On K3 SoCs, it is observed that RX TIMEOUT is signalled after + * FIFO has been drained, in which case a dummy read of RX FIFO + * is required to clear RX TIMEOUT condition. + */ + if (priv->habit & UART_RX_TIMEOUT_QUIRK && + (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT && + serial_port_in(port, UART_OMAP_RX_LVL) == 0) { + serial_port_in(port, UART_RX); + } + + /* Stop processing interrupts on input overrun */ + if ((lsr & UART_LSR_OE) && up->overrun_backoff_time_ms > 0) { + unsigned long delay; + + /* Synchronize UART_IER access against the console. */ + spin_lock(&port->lock); + up->ier = port->serial_in(port, UART_IER); + if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) { + port->ops->stop_rx(port); + } else { + /* Keep restarting the timer until + * the input overrun subsides. + */ + cancel_delayed_work(&up->overrun_backoff); + } + spin_unlock(&port->lock); + + delay = msecs_to_jiffies(up->overrun_backoff_time_ms); + schedule_delayed_work(&up->overrun_backoff, delay); + } + + serial8250_rpm_put(up); + + return IRQ_RETVAL(ret); +} + +static int omap_8250_startup(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + struct omap8250_priv *priv = port->private_data; + int ret; + + if (priv->wakeirq) { + ret = dev_pm_set_dedicated_wake_irq(port->dev, priv->wakeirq); + if (ret) + return ret; + } + + pm_runtime_get_sync(port->dev); + + serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + + serial_out(up, UART_LCR, UART_LCR_WLEN8); + + up->lsr_saved_flags = 0; + up->msr_saved_flags = 0; + + /* Disable DMA for console UART */ + if (uart_console(port)) + up->dma = NULL; + + if (up->dma) { + ret = serial8250_request_dma(up); + if (ret) { + dev_warn_ratelimited(port->dev, + "failed to request DMA\n"); + up->dma = NULL; + } + } + + ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED, + dev_name(port->dev), port); + if (ret < 0) + goto err; + + up->ier = UART_IER_RLSI | UART_IER_RDI; + serial_out(up, UART_IER, up->ier); + +#ifdef CONFIG_PM + up->capabilities |= UART_CAP_RPM; +#endif + + /* Enable module level wake up */ + priv->wer = OMAP_UART_WER_MOD_WKUP; + if (priv->habit & OMAP_UART_WER_HAS_TX_WAKEUP) + priv->wer |= OMAP_UART_TX_WAKEUP_EN; + serial_out(up, UART_OMAP_WER, priv->wer); + + if (up->dma && !(priv->habit & UART_HAS_EFR2)) + up->dma->rx_dma(up); + + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); + return 0; +err: + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); + dev_pm_clear_wake_irq(port->dev); + return ret; +} + +static void omap_8250_shutdown(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + struct omap8250_priv *priv = port->private_data; + + flush_work(&priv->qos_work); + if (up->dma) + omap_8250_rx_dma_flush(up); + + pm_runtime_get_sync(port->dev); + + serial_out(up, UART_OMAP_WER, 0); + if (priv->habit & UART_HAS_EFR2) + serial_out(up, UART_OMAP_EFR2, 0x0); + + up->ier = 0; + serial_out(up, UART_IER, 0); + + if (up->dma) + serial8250_release_dma(up); + + /* + * Disable break condition and FIFOs + */ + if (up->lcr & UART_LCR_SBC) + serial_out(up, UART_LCR, up->lcr & ~UART_LCR_SBC); + serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); + free_irq(port->irq, port); + dev_pm_clear_wake_irq(port->dev); +} + +static void omap_8250_throttle(struct uart_port *port) +{ + struct omap8250_priv *priv = port->private_data; + unsigned long flags; + + pm_runtime_get_sync(port->dev); + + spin_lock_irqsave(&port->lock, flags); + port->ops->stop_rx(port); + priv->throttled = true; + spin_unlock_irqrestore(&port->lock, flags); + + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); +} + +static void omap_8250_unthrottle(struct uart_port *port) +{ + struct omap8250_priv *priv = port->private_data; + struct uart_8250_port *up = up_to_u8250p(port); + unsigned long flags; + + pm_runtime_get_sync(port->dev); + + spin_lock_irqsave(&port->lock, flags); + priv->throttled = false; + if (up->dma) + up->dma->rx_dma(up); + up->ier |= UART_IER_RLSI | UART_IER_RDI; + port->read_status_mask |= UART_LSR_DR; + serial_out(up, UART_IER, up->ier); + spin_unlock_irqrestore(&port->lock, flags); + + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); +} + +#ifdef CONFIG_SERIAL_8250_DMA +static int omap_8250_rx_dma(struct uart_8250_port *p); + +/* Must be called while priv->rx_dma_lock is held */ +static void __dma_rx_do_complete(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + struct tty_port *tty_port = &p->port.state->port; + struct omap8250_priv *priv = p->port.private_data; + struct dma_chan *rxchan = dma->rxchan; + dma_cookie_t cookie; + struct dma_tx_state state; + int count; + int ret; + u32 reg; + + if (!dma->rx_running) + goto out; + + cookie = dma->rx_cookie; + dma->rx_running = 0; + + /* Re-enable RX FIFO interrupt now that transfer is complete */ + if (priv->habit & UART_HAS_RHR_IT_DIS) { + reg = serial_in(p, UART_OMAP_IER2); + reg &= ~UART_OMAP_IER2_RHR_IT_DIS; + serial_out(p, UART_OMAP_IER2, reg); + } + + dmaengine_tx_status(rxchan, cookie, &state); + + count = dma->rx_size - state.residue + state.in_flight_bytes; + if (count < dma->rx_size) { + dmaengine_terminate_async(rxchan); + + /* + * Poll for teardown to complete which guarantees in + * flight data is drained. + */ + if (state.in_flight_bytes) { + int poll_count = 25; + + while (dmaengine_tx_status(rxchan, cookie, NULL) && + poll_count--) + cpu_relax(); + + if (poll_count == -1) + dev_err(p->port.dev, "teardown incomplete\n"); + } + } + if (!count) + goto out; + ret = tty_insert_flip_string(tty_port, dma->rx_buf, count); + + p->port.icount.rx += ret; + p->port.icount.buf_overrun += count - ret; +out: + + tty_flip_buffer_push(tty_port); +} + +static void __dma_rx_complete(void *param) +{ + struct uart_8250_port *p = param; + struct omap8250_priv *priv = p->port.private_data; + struct uart_8250_dma *dma = p->dma; + struct dma_tx_state state; + unsigned long flags; + + spin_lock_irqsave(&p->port.lock, flags); + + /* + * If the tx status is not DMA_COMPLETE, then this is a delayed + * completion callback. A previous RX timeout flush would have + * already pushed the data, so exit. + */ + if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) != + DMA_COMPLETE) { + spin_unlock_irqrestore(&p->port.lock, flags); + return; + } + __dma_rx_do_complete(p); + if (!priv->throttled) { + p->ier |= UART_IER_RLSI | UART_IER_RDI; + serial_out(p, UART_IER, p->ier); + if (!(priv->habit & UART_HAS_EFR2)) + omap_8250_rx_dma(p); + } + + spin_unlock_irqrestore(&p->port.lock, flags); +} + +static void omap_8250_rx_dma_flush(struct uart_8250_port *p) +{ + struct omap8250_priv *priv = p->port.private_data; + struct uart_8250_dma *dma = p->dma; + struct dma_tx_state state; + unsigned long flags; + int ret; + + spin_lock_irqsave(&priv->rx_dma_lock, flags); + + if (!dma->rx_running) { + spin_unlock_irqrestore(&priv->rx_dma_lock, flags); + return; + } + + ret = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); + if (ret == DMA_IN_PROGRESS) { + ret = dmaengine_pause(dma->rxchan); + if (WARN_ON_ONCE(ret)) + priv->rx_dma_broken = true; + } + __dma_rx_do_complete(p); + spin_unlock_irqrestore(&priv->rx_dma_lock, flags); +} + +static int omap_8250_rx_dma(struct uart_8250_port *p) +{ + struct omap8250_priv *priv = p->port.private_data; + struct uart_8250_dma *dma = p->dma; + int err = 0; + struct dma_async_tx_descriptor *desc; + unsigned long flags; + u32 reg; + + if (priv->rx_dma_broken) + return -EINVAL; + + spin_lock_irqsave(&priv->rx_dma_lock, flags); + + if (dma->rx_running) { + enum dma_status state; + + state = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, NULL); + if (state == DMA_COMPLETE) { + /* + * Disable RX interrupts to allow RX DMA completion + * callback to run. + */ + p->ier &= ~(UART_IER_RLSI | UART_IER_RDI); + serial_out(p, UART_IER, p->ier); + } + goto out; + } + + desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr, + dma->rx_size, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + err = -EBUSY; + goto out; + } + + dma->rx_running = 1; + desc->callback = __dma_rx_complete; + desc->callback_param = p; + + dma->rx_cookie = dmaengine_submit(desc); + + /* + * Disable RX FIFO interrupt while RX DMA is enabled, else + * spurious interrupt may be raised when data is in the RX FIFO + * but is yet to be drained by DMA. + */ + if (priv->habit & UART_HAS_RHR_IT_DIS) { + reg = serial_in(p, UART_OMAP_IER2); + reg |= UART_OMAP_IER2_RHR_IT_DIS; + serial_out(p, UART_OMAP_IER2, reg); + } + + dma_async_issue_pending(dma->rxchan); +out: + spin_unlock_irqrestore(&priv->rx_dma_lock, flags); + return err; +} + +static int omap_8250_tx_dma(struct uart_8250_port *p); + +static void omap_8250_dma_tx_complete(void *param) +{ + struct uart_8250_port *p = param; + struct uart_8250_dma *dma = p->dma; + struct circ_buf *xmit = &p->port.state->xmit; + unsigned long flags; + bool en_thri = false; + struct omap8250_priv *priv = p->port.private_data; + + dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr, + UART_XMIT_SIZE, DMA_TO_DEVICE); + + spin_lock_irqsave(&p->port.lock, flags); + + dma->tx_running = 0; + + uart_xmit_advance(&p->port, dma->tx_size); + + if (priv->delayed_restore) { + priv->delayed_restore = 0; + omap8250_restore_regs(p); + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&p->port); + + if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port)) { + int ret; + + ret = omap_8250_tx_dma(p); + if (ret) + en_thri = true; + } else if (p->capabilities & UART_CAP_RPM) { + en_thri = true; + } + + if (en_thri) { + dma->tx_err = 1; + serial8250_set_THRI(p); + } + + spin_unlock_irqrestore(&p->port.lock, flags); +} + +static int omap_8250_tx_dma(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + struct omap8250_priv *priv = p->port.private_data; + struct circ_buf *xmit = &p->port.state->xmit; + struct dma_async_tx_descriptor *desc; + unsigned int skip_byte = 0; + int ret; + + if (dma->tx_running) + return 0; + if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) { + + /* + * Even if no data, we need to return an error for the two cases + * below so serial8250_tx_chars() is invoked and properly clears + * THRI and/or runtime suspend. + */ + if (dma->tx_err || p->capabilities & UART_CAP_RPM) { + ret = -EBUSY; + goto err; + } + serial8250_clear_THRI(p); + return 0; + } + + dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + if (priv->habit & OMAP_DMA_TX_KICK) { + u8 tx_lvl; + + /* + * We need to put the first byte into the FIFO in order to start + * the DMA transfer. For transfers smaller than four bytes we + * don't bother doing DMA at all. It seem not matter if there + * are still bytes in the FIFO from the last transfer (in case + * we got here directly from omap_8250_dma_tx_complete()). Bytes + * leaving the FIFO seem not to trigger the DMA transfer. It is + * really the byte that we put into the FIFO. + * If the FIFO is already full then we most likely got here from + * omap_8250_dma_tx_complete(). And this means the DMA engine + * just completed its work. We don't have to wait the complete + * 86us at 115200,8n1 but around 60us (not to mention lower + * baudrates). So in that case we take the interrupt and try + * again with an empty FIFO. + */ + tx_lvl = serial_in(p, UART_OMAP_TX_LVL); + if (tx_lvl == p->tx_loadsz) { + ret = -EBUSY; + goto err; + } + if (dma->tx_size < 4) { + ret = -EINVAL; + goto err; + } + skip_byte = 1; + } + + desc = dmaengine_prep_slave_single(dma->txchan, + dma->tx_addr + xmit->tail + skip_byte, + dma->tx_size - skip_byte, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + ret = -EBUSY; + goto err; + } + + dma->tx_running = 1; + + desc->callback = omap_8250_dma_tx_complete; + desc->callback_param = p; + + dma->tx_cookie = dmaengine_submit(desc); + + dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr, + UART_XMIT_SIZE, DMA_TO_DEVICE); + + dma_async_issue_pending(dma->txchan); + if (dma->tx_err) + dma->tx_err = 0; + + serial8250_clear_THRI(p); + if (skip_byte) + serial_out(p, UART_TX, xmit->buf[xmit->tail]); + return 0; +err: + dma->tx_err = 1; + return ret; +} + +static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir) +{ + switch (iir & 0x3f) { + case UART_IIR_RLSI: + case UART_IIR_RX_TIMEOUT: + case UART_IIR_RDI: + omap_8250_rx_dma_flush(up); + return true; + } + return omap_8250_rx_dma(up); +} + +static u16 omap_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir, u16 status) +{ + if ((status & (UART_LSR_DR | UART_LSR_BI)) && + (iir & UART_IIR_RDI)) { + if (handle_rx_dma(up, iir)) { + status = serial8250_rx_chars(up, status); + omap_8250_rx_dma(up); + } + } + + return status; +} + +static void am654_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir, + u16 status) +{ + /* + * Queue a new transfer if FIFO has data. + */ + if ((status & (UART_LSR_DR | UART_LSR_BI)) && + (up->ier & UART_IER_RDI)) { + omap_8250_rx_dma(up); + serial_out(up, UART_OMAP_EFR2, UART_OMAP_EFR2_TIMEOUT_BEHAVE); + } else if ((iir & 0x3f) == UART_IIR_RX_TIMEOUT) { + /* + * Disable RX timeout, read IIR to clear + * current timeout condition, clear EFR2 to + * periodic timeouts, re-enable interrupts. + */ + up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); + serial_out(up, UART_IER, up->ier); + omap_8250_rx_dma_flush(up); + serial_in(up, UART_IIR); + serial_out(up, UART_OMAP_EFR2, 0x0); + up->ier |= UART_IER_RLSI | UART_IER_RDI; + serial_out(up, UART_IER, up->ier); + } +} + +/* + * This is mostly serial8250_handle_irq(). We have a slightly different DMA + * hoook for RX/TX and need different logic for them in the ISR. Therefore we + * use the default routine in the non-DMA case and this one for with DMA. + */ +static int omap_8250_dma_handle_irq(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + struct omap8250_priv *priv = up->port.private_data; + u16 status; + u8 iir; + + serial8250_rpm_get(up); + + iir = serial_port_in(port, UART_IIR); + if (iir & UART_IIR_NO_INT) { + serial8250_rpm_put(up); + return IRQ_HANDLED; + } + + spin_lock(&port->lock); + + status = serial_port_in(port, UART_LSR); + + if ((iir & 0x3f) != UART_IIR_THRI) { + if (priv->habit & UART_HAS_EFR2) + am654_8250_handle_rx_dma(up, iir, status); + else + status = omap_8250_handle_rx_dma(up, iir, status); + } + + serial8250_modem_status(up); + if (status & UART_LSR_THRE && up->dma->tx_err) { + if (uart_tx_stopped(&up->port) || + uart_circ_empty(&up->port.state->xmit)) { + up->dma->tx_err = 0; + serial8250_tx_chars(up); + } else { + /* + * try again due to an earlier failer which + * might have been resolved by now. + */ + if (omap_8250_tx_dma(up)) + serial8250_tx_chars(up); + } + } + + uart_unlock_and_check_sysrq(port); + + serial8250_rpm_put(up); + return 1; +} + +static bool the_no_dma_filter_fn(struct dma_chan *chan, void *param) +{ + return false; +} + +#else + +static inline int omap_8250_rx_dma(struct uart_8250_port *p) +{ + return -EINVAL; +} +#endif + +static int omap8250_no_handle_irq(struct uart_port *port) +{ + /* IRQ has not been requested but handling irq? */ + WARN_ONCE(1, "Unexpected irq handling before port startup\n"); + return 0; +} + +static struct omap8250_dma_params am654_dma = { + .rx_size = SZ_2K, + .rx_trigger = 1, + .tx_trigger = TX_TRIGGER, +}; + +static struct omap8250_dma_params am33xx_dma = { + .rx_size = RX_TRIGGER, + .rx_trigger = RX_TRIGGER, + .tx_trigger = TX_TRIGGER, +}; + +static struct omap8250_platdata am654_platdata = { + .dma_params = &am654_dma, + .habit = UART_HAS_EFR2 | UART_HAS_RHR_IT_DIS | + UART_RX_TIMEOUT_QUIRK, +}; + +static struct omap8250_platdata am33xx_platdata = { + .dma_params = &am33xx_dma, + .habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE, +}; + +static struct omap8250_platdata omap4_platdata = { + .dma_params = &am33xx_dma, + .habit = UART_ERRATA_CLOCK_DISABLE, +}; + +static const struct of_device_id omap8250_dt_ids[] = { + { .compatible = "ti,am654-uart", .data = &am654_platdata, }, + { .compatible = "ti,omap2-uart" }, + { .compatible = "ti,omap3-uart" }, + { .compatible = "ti,omap4-uart", .data = &omap4_platdata, }, + { .compatible = "ti,am3352-uart", .data = &am33xx_platdata, }, + { .compatible = "ti,am4372-uart", .data = &am33xx_platdata, }, + { .compatible = "ti,dra742-uart", .data = &omap4_platdata, }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap8250_dt_ids); + +static int omap8250_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct omap8250_priv *priv; + const struct omap8250_platdata *pdata; + struct uart_8250_port up; + struct resource *regs; + void __iomem *membase; + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_err(&pdev->dev, "missing registers\n"); + return -EINVAL; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + membase = devm_ioremap(&pdev->dev, regs->start, + resource_size(regs)); + if (!membase) + return -ENODEV; + + memset(&up, 0, sizeof(up)); + up.port.dev = &pdev->dev; + up.port.mapbase = regs->start; + up.port.membase = membase; + up.port.irq = irq; + /* + * It claims to be 16C750 compatible however it is a little different. + * It has EFR and has no FCR7_64byte bit. The AFE (which it claims to + * have) is enabled via EFR instead of MCR. The type is set here 8250 + * just to get things going. UNKNOWN does not work for a few reasons and + * we don't need our own type since we don't use 8250's set_termios() + * or pm callback. + */ + up.port.type = PORT_8250; + up.port.iotype = UPIO_MEM; + up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SOFT_FLOW | + UPF_HARD_FLOW; + up.port.private_data = priv; + + up.port.regshift = OMAP_UART_REGSHIFT; + up.port.fifosize = 64; + up.tx_loadsz = 64; + up.capabilities = UART_CAP_FIFO; +#ifdef CONFIG_PM + /* + * Runtime PM is mostly transparent. However to do it right we need to a + * TX empty interrupt before we can put the device to auto idle. So if + * PM is not enabled we don't add that flag and can spare that one extra + * interrupt in the TX path. + */ + up.capabilities |= UART_CAP_RPM; +#endif + up.port.set_termios = omap_8250_set_termios; + up.port.set_mctrl = omap8250_set_mctrl; + up.port.pm = omap_8250_pm; + up.port.startup = omap_8250_startup; + up.port.shutdown = omap_8250_shutdown; + up.port.throttle = omap_8250_throttle; + up.port.unthrottle = omap_8250_unthrottle; + up.port.rs485_config = serial8250_em485_config; + up.port.rs485_supported = serial8250_em485_supported; + up.rs485_start_tx = serial8250_em485_start_tx; + up.rs485_stop_tx = serial8250_em485_stop_tx; + up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); + + ret = of_alias_get_id(np, "serial"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get alias\n"); + return ret; + } + up.port.line = ret; + + if (of_property_read_u32(np, "clock-frequency", &up.port.uartclk)) { + struct clk *clk; + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + } else { + up.port.uartclk = clk_get_rate(clk); + } + } + + if (of_property_read_u32(np, "overrun-throttle-ms", + &up.overrun_backoff_time_ms) != 0) + up.overrun_backoff_time_ms = 0; + + priv->wakeirq = irq_of_parse_and_map(np, 1); + + pdata = of_device_get_match_data(&pdev->dev); + if (pdata) + priv->habit |= pdata->habit; + + if (!up.port.uartclk) { + up.port.uartclk = DEFAULT_CLK_SPEED; + dev_warn(&pdev->dev, + "No clock speed specified: using default: %d\n", + DEFAULT_CLK_SPEED); + } + + priv->membase = membase; + priv->line = -ENODEV; + priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + priv->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + cpu_latency_qos_add_request(&priv->pm_qos_request, priv->latency); + INIT_WORK(&priv->qos_work, omap8250_uart_qos_work); + + spin_lock_init(&priv->rx_dma_lock); + + platform_set_drvdata(pdev, priv); + + device_init_wakeup(&pdev->dev, true); + pm_runtime_enable(&pdev->dev); + pm_runtime_use_autosuspend(&pdev->dev); + + /* + * Disable runtime PM until autosuspend delay unless specifically + * enabled by the user via sysfs. This is the historic way to + * prevent an unsafe default policy with lossy characters on wake-up. + * For serdev devices this is not needed, the policy can be managed by + * the serdev driver. + */ + if (!of_get_available_child_count(pdev->dev.of_node)) + pm_runtime_set_autosuspend_delay(&pdev->dev, -1); + + pm_runtime_irq_safe(&pdev->dev); + + pm_runtime_get_sync(&pdev->dev); + + omap_serial_fill_features_erratas(&up, priv); + up.port.handle_irq = omap8250_no_handle_irq; + priv->rx_trigger = RX_TRIGGER; + priv->tx_trigger = TX_TRIGGER; +#ifdef CONFIG_SERIAL_8250_DMA + /* + * Oh DMA support. If there are no DMA properties in the DT then + * we will fall back to a generic DMA channel which does not + * really work here. To ensure that we do not get a generic DMA + * channel assigned, we have the the_no_dma_filter_fn() here. + * To avoid "failed to request DMA" messages we check for DMA + * properties in DT. + */ + ret = of_property_count_strings(np, "dma-names"); + if (ret == 2) { + struct omap8250_dma_params *dma_params = NULL; + + up.dma = &priv->omap8250_dma; + up.dma->fn = the_no_dma_filter_fn; + up.dma->tx_dma = omap_8250_tx_dma; + up.dma->rx_dma = omap_8250_rx_dma; + if (pdata) + dma_params = pdata->dma_params; + + if (dma_params) { + up.dma->rx_size = dma_params->rx_size; + up.dma->rxconf.src_maxburst = dma_params->rx_trigger; + up.dma->txconf.dst_maxburst = dma_params->tx_trigger; + priv->rx_trigger = dma_params->rx_trigger; + priv->tx_trigger = dma_params->tx_trigger; + } else { + up.dma->rx_size = RX_TRIGGER; + up.dma->rxconf.src_maxburst = RX_TRIGGER; + up.dma->txconf.dst_maxburst = TX_TRIGGER; + } + } +#endif + ret = serial8250_register_8250_port(&up); + if (ret < 0) { + dev_err(&pdev->dev, "unable to register 8250 port\n"); + goto err; + } + priv->line = ret; + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + return 0; +err: + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + flush_work(&priv->qos_work); + pm_runtime_disable(&pdev->dev); + cpu_latency_qos_remove_request(&priv->pm_qos_request); + return ret; +} + +static int omap8250_remove(struct platform_device *pdev) +{ + struct omap8250_priv *priv = platform_get_drvdata(pdev); + int err; + + err = pm_runtime_resume_and_get(&pdev->dev); + if (err) + dev_err(&pdev->dev, "Failed to resume hardware\n"); + + serial8250_unregister_port(priv->line); + priv->line = -ENODEV; + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + flush_work(&priv->qos_work); + pm_runtime_disable(&pdev->dev); + cpu_latency_qos_remove_request(&priv->pm_qos_request); + device_init_wakeup(&pdev->dev, false); + return 0; +} + +static int omap8250_prepare(struct device *dev) +{ + struct omap8250_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return 0; + priv->is_suspending = true; + return 0; +} + +static void omap8250_complete(struct device *dev) +{ + struct omap8250_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return; + priv->is_suspending = false; +} + +static int omap8250_suspend(struct device *dev) +{ + struct omap8250_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); + int err = 0; + + serial8250_suspend_port(priv->line); + + err = pm_runtime_resume_and_get(dev); + if (err) + return err; + if (!device_may_wakeup(dev)) + priv->wer = 0; + serial_out(up, UART_OMAP_WER, priv->wer); + if (uart_console(&up->port) && console_suspend_enabled) + err = pm_runtime_force_suspend(dev); + flush_work(&priv->qos_work); + + return err; +} + +static int omap8250_resume(struct device *dev) +{ + struct omap8250_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); + int err; + + if (uart_console(&up->port) && console_suspend_enabled) { + err = pm_runtime_force_resume(dev); + if (err) + return err; + } + + serial8250_resume_port(priv->line); + /* Paired with pm_runtime_resume_and_get() in omap8250_suspend() */ + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; +} + +static int omap8250_lost_context(struct uart_8250_port *up) +{ + u32 val; + + val = serial_in(up, UART_OMAP_SCR); + /* + * If we lose context, then SCR is set to its reset value of zero. + * After set_termios() we set bit 3 of SCR (TX_EMPTY_CTL_IT) to 1, + * among other bits, to never set the register back to zero again. + */ + if (!val) + return 1; + return 0; +} + +static void uart_write(struct omap8250_priv *priv, u32 reg, u32 val) +{ + writel(val, priv->membase + (reg << OMAP_UART_REGSHIFT)); +} + +/* TODO: in future, this should happen via API in drivers/reset/ */ +static int omap8250_soft_reset(struct device *dev) +{ + struct omap8250_priv *priv = dev_get_drvdata(dev); + int timeout = 100; + int sysc; + int syss; + + /* + * At least on omap4, unused uarts may not idle after reset without + * a basic scr dma configuration even with no dma in use. The + * module clkctrl status bits will be 1 instead of 3 blocking idle + * for the whole clockdomain. The softreset below will clear scr, + * and we restore it on resume so this is safe to do on all SoCs + * needing omap8250_soft_reset() quirk. Do it in two writes as + * recommended in the comment for omap8250_update_scr(). + */ + uart_write(priv, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1); + uart_write(priv, UART_OMAP_SCR, + OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL); + + sysc = uart_read(priv, UART_OMAP_SYSC); + + /* softreset the UART */ + sysc |= OMAP_UART_SYSC_SOFTRESET; + uart_write(priv, UART_OMAP_SYSC, sysc); + + /* By experiments, 1us enough for reset complete on AM335x */ + do { + udelay(1); + syss = uart_read(priv, UART_OMAP_SYSS); + } while (--timeout && !(syss & OMAP_UART_SYSS_RESETDONE)); + + if (!timeout) { + dev_err(dev, "timed out waiting for reset done\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int omap8250_runtime_suspend(struct device *dev) +{ + struct omap8250_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = NULL; + + if (priv->line >= 0) + up = serial8250_get_port(priv->line); + + if (priv->habit & UART_ERRATA_CLOCK_DISABLE) { + int ret; + + ret = omap8250_soft_reset(dev); + if (ret) + return ret; + + if (up) { + /* Restore to UART mode after reset (for wakeup) */ + omap8250_update_mdr1(up, priv); + /* Restore wakeup enable register */ + serial_out(up, UART_OMAP_WER, priv->wer); + } + } + + if (up && up->dma && up->dma->rxchan) + omap_8250_rx_dma_flush(up); + + priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + schedule_work(&priv->qos_work); + + return 0; +} + +static int omap8250_runtime_resume(struct device *dev) +{ + struct omap8250_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = NULL; + + if (priv->line >= 0) + up = serial8250_get_port(priv->line); + + if (up && omap8250_lost_context(up)) + omap8250_restore_regs(up); + + if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) + omap_8250_rx_dma(up); + + priv->latency = priv->calc_latency; + schedule_work(&priv->qos_work); + return 0; +} + +#ifdef CONFIG_SERIAL_8250_OMAP_TTYO_FIXUP +static int __init omap8250_console_fixup(void) +{ + char *omap_str; + char *options; + u8 idx; + + if (strstr(boot_command_line, "console=ttyS")) + /* user set a ttyS based name for the console */ + return 0; + + omap_str = strstr(boot_command_line, "console=ttyO"); + if (!omap_str) + /* user did not set ttyO based console, so we don't care */ + return 0; + + omap_str += 12; + if ('0' <= *omap_str && *omap_str <= '9') + idx = *omap_str - '0'; + else + return 0; + + omap_str++; + if (omap_str[0] == ',') { + omap_str++; + options = omap_str; + } else { + options = NULL; + } + + add_preferred_console("ttyS", idx, options); + pr_err("WARNING: Your 'console=ttyO%d' has been replaced by 'ttyS%d'\n", + idx, idx); + pr_err("This ensures that you still see kernel messages. Please\n"); + pr_err("update your kernel commandline.\n"); + return 0; +} +console_initcall(omap8250_console_fixup); +#endif + +static const struct dev_pm_ops omap8250_dev_pm_ops = { + SYSTEM_SLEEP_PM_OPS(omap8250_suspend, omap8250_resume) + RUNTIME_PM_OPS(omap8250_runtime_suspend, + omap8250_runtime_resume, NULL) + .prepare = pm_sleep_ptr(omap8250_prepare), + .complete = pm_sleep_ptr(omap8250_complete), +}; + +static struct platform_driver omap8250_platform_driver = { + .driver = { + .name = "omap8250", + .pm = pm_ptr(&omap8250_dev_pm_ops), + .of_match_table = omap8250_dt_ids, + }, + .probe = omap8250_probe, + .remove = omap8250_remove, +}; +module_platform_driver(omap8250_platform_driver); + +MODULE_AUTHOR("Sebastian Andrzej Siewior"); +MODULE_DESCRIPTION("OMAP 8250 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/8250/8250_parisc.c b/drivers/tty/serial/8250/8250_parisc.c new file mode 100644 index 000000000..948d0a1c6 --- /dev/null +++ b/drivers/tty/serial/8250/8250_parisc.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Serial Device Initialisation for Lasi/Asp/Wax/Dino + * + * (c) Copyright Matthew Wilcox 2001-2002 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "8250.h" + +static int __init serial_init_chip(struct parisc_device *dev) +{ + struct uart_8250_port uart; + unsigned long address; + int err; + +#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC) + if (!dev->irq && (dev->id.sversion == 0xad)) + dev->irq = iosapic_serial_irq(dev); +#endif + + if (!dev->irq) { + /* We find some unattached serial ports by walking native + * busses. These should be silently ignored. Otherwise, + * what we have here is a missing parent device, so tell + * the user what they're missing. + */ + if (parisc_parent(dev)->id.hw_type != HPHW_IOA) + dev_info(&dev->dev, + "Serial: device 0x%llx not configured.\n" + "Enable support for Wax, Lasi, Asp or Dino.\n", + (unsigned long long)dev->hpa.start); + return -ENODEV; + } + + address = dev->hpa.start; + if (dev->id.sversion != 0x8d) + address += 0x800; + + memset(&uart, 0, sizeof(uart)); + uart.port.iotype = UPIO_MEM; + /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */ + uart.port.uartclk = (dev->id.sversion != 0xad) ? + 7272727 : 1843200; + uart.port.mapbase = address; + uart.port.membase = ioremap(address, 16); + if (!uart.port.membase) { + dev_warn(&dev->dev, "Failed to map memory\n"); + return -ENOMEM; + } + uart.port.irq = dev->irq; + uart.port.flags = UPF_BOOT_AUTOCONF; + uart.port.dev = &dev->dev; + + err = serial8250_register_8250_port(&uart); + if (err < 0) { + dev_warn(&dev->dev, + "serial8250_register_8250_port returned error %d\n", + err); + iounmap(uart.port.membase); + return err; + } + + return 0; +} + +static const struct parisc_device_id serial_tbl[] __initconst = { + { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 }, + { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c }, + { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d }, + { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad }, + { 0 } +}; + +/* Hack. Some machines have SERIAL_0 attached to Lasi and SERIAL_1 + * attached to Dino. Unfortunately, Dino appears before Lasi in the device + * tree. To ensure that ttyS0 == SERIAL_0, we register two drivers; one + * which only knows about Lasi and then a second which will find all the + * other serial ports. HPUX ignores this problem. + */ +static const struct parisc_device_id lasi_tbl[] __initconst = { + { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03B, 0x0008C }, /* C1xx/C1xxL */ + { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03C, 0x0008C }, /* B132L */ + { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03D, 0x0008C }, /* B160L */ + { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03E, 0x0008C }, /* B132L+ */ + { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03F, 0x0008C }, /* B180L+ */ + { HPHW_FIO, HVERSION_REV_ANY_ID, 0x046, 0x0008C }, /* Rocky2 120 */ + { HPHW_FIO, HVERSION_REV_ANY_ID, 0x047, 0x0008C }, /* Rocky2 150 */ + { HPHW_FIO, HVERSION_REV_ANY_ID, 0x04E, 0x0008C }, /* Kiji L2 132 */ + { HPHW_FIO, HVERSION_REV_ANY_ID, 0x056, 0x0008C }, /* Raven+ */ + { 0 } +}; + + +MODULE_DEVICE_TABLE(parisc, serial_tbl); + +static struct parisc_driver lasi_driver __refdata = { + .name = "serial_1", + .id_table = lasi_tbl, + .probe = serial_init_chip, +}; + +static struct parisc_driver serial_driver __refdata = { + .name = "serial", + .id_table = serial_tbl, + .probe = serial_init_chip, +}; + +static int __init probe_serial_gsc(void) +{ + register_parisc_driver(&lasi_driver); + register_parisc_driver(&serial_driver); + return 0; +} + +module_init(probe_serial_gsc); + +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c new file mode 100644 index 000000000..38fb7126a --- /dev/null +++ b/drivers/tty/serial/8250/8250_pci.c @@ -0,0 +1,6058 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Probe module for 8250/16550-type PCI serial ports. + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * Copyright (C) 2001 Russell King, All Rights Reserved. + */ +#undef DEBUG +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8250.h" + +/* + * init function returns: + * > 0 - number of ports + * = 0 - use board->num_ports + * < 0 - error + */ +struct pci_serial_quirk { + u32 vendor; + u32 device; + u32 subvendor; + u32 subdevice; + int (*probe)(struct pci_dev *dev); + int (*init)(struct pci_dev *dev); + int (*setup)(struct serial_private *, + const struct pciserial_board *, + struct uart_8250_port *, int); + void (*exit)(struct pci_dev *dev); +}; + +struct f815xxa_data { + spinlock_t lock; + int idx; +}; + +struct serial_private { + struct pci_dev *dev; + unsigned int nr; + struct pci_serial_quirk *quirk; + const struct pciserial_board *board; + int line[]; +}; + +#define PCI_DEVICE_ID_HPE_PCI_SERIAL 0x37e + +static const struct pci_device_id pci_use_msi[] = { + { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900, + 0xA000, 0x1000) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9912, + 0xA000, 0x1000) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922, + 0xA000, 0x1000) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL, + PCI_ANY_ID, PCI_ANY_ID) }, + { } +}; + +static int pci_default_setup(struct serial_private*, + const struct pciserial_board*, struct uart_8250_port *, int); + +static void moan_device(const char *str, struct pci_dev *dev) +{ + pci_err(dev, "%s\n" + "Please send the output of lspci -vv, this\n" + "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" + "manufacturer and name of serial board or\n" + "modem board to .\n", + str, dev->vendor, dev->device, + dev->subsystem_vendor, dev->subsystem_device); +} + +static int +setup_port(struct serial_private *priv, struct uart_8250_port *port, + u8 bar, unsigned int offset, int regshift) +{ + struct pci_dev *dev = priv->dev; + + if (bar >= PCI_STD_NUM_BARS) + return -EINVAL; + + if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) { + if (!pcim_iomap(dev, bar, 0) && !pcim_iomap_table(dev)) + return -ENOMEM; + + port->port.iotype = UPIO_MEM; + port->port.iobase = 0; + port->port.mapbase = pci_resource_start(dev, bar) + offset; + port->port.membase = pcim_iomap_table(dev)[bar] + offset; + port->port.regshift = regshift; + } else { + port->port.iotype = UPIO_PORT; + port->port.iobase = pci_resource_start(dev, bar) + offset; + port->port.mapbase = 0; + port->port.membase = NULL; + port->port.regshift = 0; + } + return 0; +} + +/* + * ADDI-DATA GmbH communication cards + */ +static int addidata_apci7800_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + unsigned int bar = 0, offset = board->first_offset; + bar = FL_GET_BASE(board->flags); + + if (idx < 2) { + offset += idx * board->uart_offset; + } else if ((idx >= 2) && (idx < 4)) { + bar += 1; + offset += ((idx - 2) * board->uart_offset); + } else if ((idx >= 4) && (idx < 6)) { + bar += 2; + offset += ((idx - 4) * board->uart_offset); + } else if (idx >= 6) { + bar += 3; + offset += ((idx - 6) * board->uart_offset); + } + + return setup_port(priv, port, bar, offset, board->reg_shift); +} + +/* + * AFAVLAB uses a different mixture of BARs and offsets + * Not that ugly ;) -- HW + */ +static int +afavlab_setup(struct serial_private *priv, const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + unsigned int bar, offset = board->first_offset; + + bar = FL_GET_BASE(board->flags); + if (idx < 4) + bar += idx; + else { + bar = 4; + offset += (idx - 4) * board->uart_offset; + } + + return setup_port(priv, port, bar, offset, board->reg_shift); +} + +/* + * HP's Remote Management Console. The Diva chip came in several + * different versions. N-class, L2000 and A500 have two Diva chips, each + * with 3 UARTs (the third UART on the second chip is unused). Superdome + * and Keystone have one Diva chip with 3 UARTs. Some later machines have + * one Diva chip, but it has been expanded to 5 UARTs. + */ +static int pci_hp_diva_init(struct pci_dev *dev) +{ + int rc = 0; + + switch (dev->subsystem_device) { + case PCI_DEVICE_ID_HP_DIVA_TOSCA1: + case PCI_DEVICE_ID_HP_DIVA_HALFDOME: + case PCI_DEVICE_ID_HP_DIVA_KEYSTONE: + case PCI_DEVICE_ID_HP_DIVA_EVEREST: + rc = 3; + break; + case PCI_DEVICE_ID_HP_DIVA_TOSCA2: + rc = 2; + break; + case PCI_DEVICE_ID_HP_DIVA_MAESTRO: + rc = 4; + break; + case PCI_DEVICE_ID_HP_DIVA_POWERBAR: + case PCI_DEVICE_ID_HP_DIVA_HURRICANE: + rc = 1; + break; + } + + return rc; +} + +/* + * HP's Diva chip puts the 4th/5th serial port further out, and + * some serial ports are supposed to be hidden on certain models. + */ +static int +pci_hp_diva_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + unsigned int offset = board->first_offset; + unsigned int bar = FL_GET_BASE(board->flags); + + switch (priv->dev->subsystem_device) { + case PCI_DEVICE_ID_HP_DIVA_MAESTRO: + if (idx == 3) + idx++; + break; + case PCI_DEVICE_ID_HP_DIVA_EVEREST: + if (idx > 0) + idx++; + if (idx > 2) + idx++; + break; + } + if (idx > 2) + offset = 0x18; + + offset += idx * board->uart_offset; + + return setup_port(priv, port, bar, offset, board->reg_shift); +} + +/* + * Added for EKF Intel i960 serial boards + */ +static int pci_inteli960ni_init(struct pci_dev *dev) +{ + u32 oldval; + + if (!(dev->subsystem_device & 0x1000)) + return -ENODEV; + + /* is firmware started? */ + pci_read_config_dword(dev, 0x44, &oldval); + if (oldval == 0x00001000L) { /* RESET value */ + pci_dbg(dev, "Local i960 firmware missing\n"); + return -ENODEV; + } + return 0; +} + +/* + * Some PCI serial cards using the PLX 9050 PCI interface chip require + * that the card interrupt be explicitly enabled or disabled. This + * seems to be mainly needed on card using the PLX which also use I/O + * mapped memory. + */ +static int pci_plx9050_init(struct pci_dev *dev) +{ + u8 irq_config; + void __iomem *p; + + if ((pci_resource_flags(dev, 0) & IORESOURCE_MEM) == 0) { + moan_device("no memory in bar 0", dev); + return 0; + } + + irq_config = 0x41; + if (dev->vendor == PCI_VENDOR_ID_PANACOM || + dev->subsystem_vendor == PCI_SUBVENDOR_ID_EXSYS) + irq_config = 0x43; + + if ((dev->vendor == PCI_VENDOR_ID_PLX) && + (dev->device == PCI_DEVICE_ID_PLX_ROMULUS)) + /* + * As the megawolf cards have the int pins active + * high, and have 2 UART chips, both ints must be + * enabled on the 9050. Also, the UARTS are set in + * 16450 mode by default, so we have to enable the + * 16C950 'enhanced' mode so that we can use the + * deep FIFOs + */ + irq_config = 0x5b; + /* + * enable/disable interrupts + */ + p = ioremap(pci_resource_start(dev, 0), 0x80); + if (p == NULL) + return -ENOMEM; + writel(irq_config, p + 0x4c); + + /* + * Read the register back to ensure that it took effect. + */ + readl(p + 0x4c); + iounmap(p); + + return 0; +} + +static void pci_plx9050_exit(struct pci_dev *dev) +{ + u8 __iomem *p; + + if ((pci_resource_flags(dev, 0) & IORESOURCE_MEM) == 0) + return; + + /* + * disable interrupts + */ + p = ioremap(pci_resource_start(dev, 0), 0x80); + if (p != NULL) { + writel(0, p + 0x4c); + + /* + * Read the register back to ensure that it took effect. + */ + readl(p + 0x4c); + iounmap(p); + } +} + +#define NI8420_INT_ENABLE_REG 0x38 +#define NI8420_INT_ENABLE_BIT 0x2000 + +static void pci_ni8420_exit(struct pci_dev *dev) +{ + void __iomem *p; + unsigned int bar = 0; + + if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) { + moan_device("no memory in bar", dev); + return; + } + + p = pci_ioremap_bar(dev, bar); + if (p == NULL) + return; + + /* Disable the CPU Interrupt */ + writel(readl(p + NI8420_INT_ENABLE_REG) & ~(NI8420_INT_ENABLE_BIT), + p + NI8420_INT_ENABLE_REG); + iounmap(p); +} + + +/* MITE registers */ +#define MITE_IOWBSR1 0xc4 +#define MITE_IOWCR1 0xf4 +#define MITE_LCIMR1 0x08 +#define MITE_LCIMR2 0x10 + +#define MITE_LCIMR2_CLR_CPU_IE (1 << 30) + +static void pci_ni8430_exit(struct pci_dev *dev) +{ + void __iomem *p; + unsigned int bar = 0; + + if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) { + moan_device("no memory in bar", dev); + return; + } + + p = pci_ioremap_bar(dev, bar); + if (p == NULL) + return; + + /* Disable the CPU Interrupt */ + writel(MITE_LCIMR2_CLR_CPU_IE, p + MITE_LCIMR2); + iounmap(p); +} + +/* SBS Technologies Inc. PMC-OCTPRO and P-OCTAL cards */ +static int +sbs_setup(struct serial_private *priv, const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + unsigned int bar, offset = board->first_offset; + + bar = 0; + + if (idx < 4) { + /* first four channels map to 0, 0x100, 0x200, 0x300 */ + offset += idx * board->uart_offset; + } else if (idx < 8) { + /* last four channels map to 0x1000, 0x1100, 0x1200, 0x1300 */ + offset += idx * board->uart_offset + 0xC00; + } else /* we have only 8 ports on PMC-OCTALPRO */ + return 1; + + return setup_port(priv, port, bar, offset, board->reg_shift); +} + +/* +* This does initialization for PMC OCTALPRO cards: +* maps the device memory, resets the UARTs (needed, bc +* if the module is removed and inserted again, the card +* is in the sleep mode) and enables global interrupt. +*/ + +/* global control register offset for SBS PMC-OctalPro */ +#define OCT_REG_CR_OFF 0x500 + +static int sbs_init(struct pci_dev *dev) +{ + u8 __iomem *p; + + p = pci_ioremap_bar(dev, 0); + + if (p == NULL) + return -ENOMEM; + /* Set bit-4 Control Register (UART RESET) in to reset the uarts */ + writeb(0x10, p + OCT_REG_CR_OFF); + udelay(50); + writeb(0x0, p + OCT_REG_CR_OFF); + + /* Set bit-2 (INTENABLE) of Control Register */ + writeb(0x4, p + OCT_REG_CR_OFF); + iounmap(p); + + return 0; +} + +/* + * Disables the global interrupt of PMC-OctalPro + */ + +static void sbs_exit(struct pci_dev *dev) +{ + u8 __iomem *p; + + p = pci_ioremap_bar(dev, 0); + /* FIXME: What if resource_len < OCT_REG_CR_OFF */ + if (p != NULL) + writeb(0, p + OCT_REG_CR_OFF); + iounmap(p); +} + +/* + * SIIG serial cards have an PCI interface chip which also controls + * the UART clocking frequency. Each UART can be clocked independently + * (except cards equipped with 4 UARTs) and initial clocking settings + * are stored in the EEPROM chip. It can cause problems because this + * version of serial driver doesn't support differently clocked UART's + * on single PCI card. To prevent this, initialization functions set + * high frequency clocking for all UART's on given card. It is safe (I + * hope) because it doesn't touch EEPROM settings to prevent conflicts + * with other OSes (like M$ DOS). + * + * SIIG support added by Andrey Panin , 10/1999 + * + * There is two family of SIIG serial cards with different PCI + * interface chip and different configuration methods: + * - 10x cards have control registers in IO and/or memory space; + * - 20x cards have control registers in standard PCI configuration space. + * + * Note: all 10x cards have PCI device ids 0x10.. + * all 20x cards have PCI device ids 0x20.. + * + * There are also Quartet Serial cards which use Oxford Semiconductor + * 16954 quad UART PCI chip clocked by 18.432 MHz quartz. + * + * Note: some SIIG cards are probed by the parport_serial object. + */ + +#define PCI_DEVICE_ID_SIIG_1S_10x (PCI_DEVICE_ID_SIIG_1S_10x_550 & 0xfffc) +#define PCI_DEVICE_ID_SIIG_2S_10x (PCI_DEVICE_ID_SIIG_2S_10x_550 & 0xfff8) + +static int pci_siig10x_init(struct pci_dev *dev) +{ + u16 data; + void __iomem *p; + + switch (dev->device & 0xfff8) { + case PCI_DEVICE_ID_SIIG_1S_10x: /* 1S */ + data = 0xffdf; + break; + case PCI_DEVICE_ID_SIIG_2S_10x: /* 2S, 2S1P */ + data = 0xf7ff; + break; + default: /* 1S1P, 4S */ + data = 0xfffb; + break; + } + + p = ioremap(pci_resource_start(dev, 0), 0x80); + if (p == NULL) + return -ENOMEM; + + writew(readw(p + 0x28) & data, p + 0x28); + readw(p + 0x28); + iounmap(p); + return 0; +} + +#define PCI_DEVICE_ID_SIIG_2S_20x (PCI_DEVICE_ID_SIIG_2S_20x_550 & 0xfffc) +#define PCI_DEVICE_ID_SIIG_2S1P_20x (PCI_DEVICE_ID_SIIG_2S1P_20x_550 & 0xfffc) + +static int pci_siig20x_init(struct pci_dev *dev) +{ + u8 data; + + /* Change clock frequency for the first UART. */ + pci_read_config_byte(dev, 0x6f, &data); + pci_write_config_byte(dev, 0x6f, data & 0xef); + + /* If this card has 2 UART, we have to do the same with second UART. */ + if (((dev->device & 0xfffc) == PCI_DEVICE_ID_SIIG_2S_20x) || + ((dev->device & 0xfffc) == PCI_DEVICE_ID_SIIG_2S1P_20x)) { + pci_read_config_byte(dev, 0x73, &data); + pci_write_config_byte(dev, 0x73, data & 0xef); + } + return 0; +} + +static int pci_siig_init(struct pci_dev *dev) +{ + unsigned int type = dev->device & 0xff00; + + if (type == 0x1000) + return pci_siig10x_init(dev); + if (type == 0x2000) + return pci_siig20x_init(dev); + + moan_device("Unknown SIIG card", dev); + return -ENODEV; +} + +static int pci_siig_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + unsigned int bar = FL_GET_BASE(board->flags) + idx, offset = 0; + + if (idx > 3) { + bar = 4; + offset = (idx - 4) * 8; + } + + return setup_port(priv, port, bar, offset, 0); +} + +/* + * Timedia has an explosion of boards, and to avoid the PCI table from + * growing *huge*, we use this function to collapse some 70 entries + * in the PCI table into one, for sanity's and compactness's sake. + */ +static const unsigned short timedia_single_port[] = { + 0x4025, 0x4027, 0x4028, 0x5025, 0x5027, 0 +}; + +static const unsigned short timedia_dual_port[] = { + 0x0002, 0x4036, 0x4037, 0x4038, 0x4078, 0x4079, 0x4085, + 0x4088, 0x4089, 0x5037, 0x5078, 0x5079, 0x5085, 0x6079, + 0x7079, 0x8079, 0x8137, 0x8138, 0x8237, 0x8238, 0x9079, + 0x9137, 0x9138, 0x9237, 0x9238, 0xA079, 0xB079, 0xC079, + 0xD079, 0 +}; + +static const unsigned short timedia_quad_port[] = { + 0x4055, 0x4056, 0x4095, 0x4096, 0x5056, 0x8156, 0x8157, + 0x8256, 0x8257, 0x9056, 0x9156, 0x9157, 0x9158, 0x9159, + 0x9256, 0x9257, 0xA056, 0xA157, 0xA158, 0xA159, 0xB056, + 0xB157, 0 +}; + +static const unsigned short timedia_eight_port[] = { + 0x4065, 0x4066, 0x5065, 0x5066, 0x8166, 0x9066, 0x9166, + 0x9167, 0x9168, 0xA066, 0xA167, 0xA168, 0 +}; + +static const struct timedia_struct { + int num; + const unsigned short *ids; +} timedia_data[] = { + { 1, timedia_single_port }, + { 2, timedia_dual_port }, + { 4, timedia_quad_port }, + { 8, timedia_eight_port } +}; + +/* + * There are nearly 70 different Timedia/SUNIX PCI serial devices. Instead of + * listing them individually, this driver merely grabs them all with + * PCI_ANY_ID. Some of these devices, however, also feature a parallel port, + * and should be left free to be claimed by parport_serial instead. + */ +static int pci_timedia_probe(struct pci_dev *dev) +{ + /* + * Check the third digit of the subdevice ID + * (0,2,3,5,6: serial only -- 7,8,9: serial + parallel) + */ + if ((dev->subsystem_device & 0x00f0) >= 0x70) { + pci_info(dev, "ignoring Timedia subdevice %04x for parport_serial\n", + dev->subsystem_device); + return -ENODEV; + } + + return 0; +} + +static int pci_timedia_init(struct pci_dev *dev) +{ + const unsigned short *ids; + int i, j; + + for (i = 0; i < ARRAY_SIZE(timedia_data); i++) { + ids = timedia_data[i].ids; + for (j = 0; ids[j]; j++) + if (dev->subsystem_device == ids[j]) + return timedia_data[i].num; + } + return 0; +} + +/* + * Timedia/SUNIX uses a mixture of BARs and offsets + * Ugh, this is ugly as all hell --- TYT + */ +static int +pci_timedia_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + unsigned int bar = 0, offset = board->first_offset; + + switch (idx) { + case 0: + bar = 0; + break; + case 1: + offset = board->uart_offset; + bar = 0; + break; + case 2: + bar = 1; + break; + case 3: + offset = board->uart_offset; + fallthrough; + case 4: /* BAR 2 */ + case 5: /* BAR 3 */ + case 6: /* BAR 4 */ + case 7: /* BAR 5 */ + bar = idx - 2; + } + + return setup_port(priv, port, bar, offset, board->reg_shift); +} + +/* + * Some Titan cards are also a little weird + */ +static int +titan_400l_800l_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + unsigned int bar, offset = board->first_offset; + + switch (idx) { + case 0: + bar = 1; + break; + case 1: + bar = 2; + break; + default: + bar = 4; + offset = (idx - 2) * board->uart_offset; + } + + return setup_port(priv, port, bar, offset, board->reg_shift); +} + +static int pci_xircom_init(struct pci_dev *dev) +{ + msleep(100); + return 0; +} + +static int pci_ni8420_init(struct pci_dev *dev) +{ + void __iomem *p; + unsigned int bar = 0; + + if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) { + moan_device("no memory in bar", dev); + return 0; + } + + p = pci_ioremap_bar(dev, bar); + if (p == NULL) + return -ENOMEM; + + /* Enable CPU Interrupt */ + writel(readl(p + NI8420_INT_ENABLE_REG) | NI8420_INT_ENABLE_BIT, + p + NI8420_INT_ENABLE_REG); + + iounmap(p); + return 0; +} + +#define MITE_IOWBSR1_WSIZE 0xa +#define MITE_IOWBSR1_WIN_OFFSET 0x800 +#define MITE_IOWBSR1_WENAB (1 << 7) +#define MITE_LCIMR1_IO_IE_0 (1 << 24) +#define MITE_LCIMR2_SET_CPU_IE (1 << 31) +#define MITE_IOWCR1_RAMSEL_MASK 0xfffffffe + +static int pci_ni8430_init(struct pci_dev *dev) +{ + void __iomem *p; + struct pci_bus_region region; + u32 device_window; + unsigned int bar = 0; + + if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) { + moan_device("no memory in bar", dev); + return 0; + } + + p = pci_ioremap_bar(dev, bar); + if (p == NULL) + return -ENOMEM; + + /* + * Set device window address and size in BAR0, while acknowledging that + * the resource structure may contain a translated address that differs + * from the address the device responds to. + */ + pcibios_resource_to_bus(dev->bus, ®ion, &dev->resource[bar]); + device_window = ((region.start + MITE_IOWBSR1_WIN_OFFSET) & 0xffffff00) + | MITE_IOWBSR1_WENAB | MITE_IOWBSR1_WSIZE; + writel(device_window, p + MITE_IOWBSR1); + + /* Set window access to go to RAMSEL IO address space */ + writel((readl(p + MITE_IOWCR1) & MITE_IOWCR1_RAMSEL_MASK), + p + MITE_IOWCR1); + + /* Enable IO Bus Interrupt 0 */ + writel(MITE_LCIMR1_IO_IE_0, p + MITE_LCIMR1); + + /* Enable CPU Interrupt */ + writel(MITE_LCIMR2_SET_CPU_IE, p + MITE_LCIMR2); + + iounmap(p); + return 0; +} + +/* UART Port Control Register */ +#define NI8430_PORTCON 0x0f +#define NI8430_PORTCON_TXVR_ENABLE (1 << 3) + +static int +pci_ni8430_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + struct pci_dev *dev = priv->dev; + void __iomem *p; + unsigned int bar, offset = board->first_offset; + + if (idx >= board->num_ports) + return 1; + + bar = FL_GET_BASE(board->flags); + offset += idx * board->uart_offset; + + p = pci_ioremap_bar(dev, bar); + if (!p) + return -ENOMEM; + + /* enable the transceiver */ + writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE, + p + offset + NI8430_PORTCON); + + iounmap(p); + + return setup_port(priv, port, bar, offset, board->reg_shift); +} + +static int pci_netmos_9900_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + unsigned int bar; + + if ((priv->dev->device != PCI_DEVICE_ID_NETMOS_9865) && + (priv->dev->subsystem_device & 0xff00) == 0x3000) { + /* netmos apparently orders BARs by datasheet layout, so serial + * ports get BARs 0 and 3 (or 1 and 4 for memmapped) + */ + bar = 3 * idx; + + return setup_port(priv, port, bar, 0, board->reg_shift); + } + + return pci_default_setup(priv, board, port, idx); +} + +/* the 99xx series comes with a range of device IDs and a variety + * of capabilities: + * + * 9900 has varying capabilities and can cascade to sub-controllers + * (cascading should be purely internal) + * 9904 is hardwired with 4 serial ports + * 9912 and 9922 are hardwired with 2 serial ports + */ +static int pci_netmos_9900_numports(struct pci_dev *dev) +{ + unsigned int c = dev->class; + unsigned int pi; + unsigned short sub_serports; + + pi = c & 0xff; + + if (pi == 2) + return 1; + + if ((pi == 0) && (dev->device == PCI_DEVICE_ID_NETMOS_9900)) { + /* two possibilities: 0x30ps encodes number of parallel and + * serial ports, or 0x1000 indicates *something*. This is not + * immediately obvious, since the 2s1p+4s configuration seems + * to offer all functionality on functions 0..2, while still + * advertising the same function 3 as the 4s+2s1p config. + */ + sub_serports = dev->subsystem_device & 0xf; + if (sub_serports > 0) + return sub_serports; + + pci_err(dev, "NetMos/Mostech serial driver ignoring port on ambiguous config.\n"); + return 0; + } + + moan_device("unknown NetMos/Mostech program interface", dev); + return 0; +} + +static int pci_netmos_init(struct pci_dev *dev) +{ + /* subdevice 0x00PS means

parallel, serial */ + unsigned int num_serial = dev->subsystem_device & 0xf; + + if ((dev->device == PCI_DEVICE_ID_NETMOS_9901) || + (dev->device == PCI_DEVICE_ID_NETMOS_9865)) + return 0; + + if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && + dev->subsystem_device == 0x0299) + return 0; + + switch (dev->device) { /* FALLTHROUGH on all */ + case PCI_DEVICE_ID_NETMOS_9904: + case PCI_DEVICE_ID_NETMOS_9912: + case PCI_DEVICE_ID_NETMOS_9922: + case PCI_DEVICE_ID_NETMOS_9900: + num_serial = pci_netmos_9900_numports(dev); + break; + + default: + break; + } + + if (num_serial == 0) { + moan_device("unknown NetMos/Mostech device", dev); + return -ENODEV; + } + + return num_serial; +} + +/* + * These chips are available with optionally one parallel port and up to + * two serial ports. Unfortunately they all have the same product id. + * + * Basic configuration is done over a region of 32 I/O ports. The base + * ioport is called INTA or INTC, depending on docs/other drivers. + * + * The region of the 32 I/O ports is configured in POSIO0R... + */ + +/* registers */ +#define ITE_887x_MISCR 0x9c +#define ITE_887x_INTCBAR 0x78 +#define ITE_887x_UARTBAR 0x7c +#define ITE_887x_PS0BAR 0x10 +#define ITE_887x_POSIO0 0x60 + +/* I/O space size */ +#define ITE_887x_IOSIZE 32 +/* I/O space size (bits 26-24; 8 bytes = 011b) */ +#define ITE_887x_POSIO_IOSIZE_8 (3 << 24) +/* I/O space size (bits 26-24; 32 bytes = 101b) */ +#define ITE_887x_POSIO_IOSIZE_32 (5 << 24) +/* Decoding speed (1 = slow, 2 = medium, 3 = fast) */ +#define ITE_887x_POSIO_SPEED (3 << 29) +/* enable IO_Space bit */ +#define ITE_887x_POSIO_ENABLE (1 << 31) + +/* inta_addr are the configuration addresses of the ITE */ +static const short inta_addr[] = { 0x2a0, 0x2c0, 0x220, 0x240, 0x1e0, 0x200, 0x280 }; +static int pci_ite887x_init(struct pci_dev *dev) +{ + int ret, i, type; + struct resource *iobase = NULL; + u32 miscr, uartbar, ioport; + + /* search for the base-ioport */ + for (i = 0; i < ARRAY_SIZE(inta_addr); i++) { + iobase = request_region(inta_addr[i], ITE_887x_IOSIZE, + "ite887x"); + if (iobase != NULL) { + /* write POSIO0R - speed | size | ioport */ + pci_write_config_dword(dev, ITE_887x_POSIO0, + ITE_887x_POSIO_ENABLE | ITE_887x_POSIO_SPEED | + ITE_887x_POSIO_IOSIZE_32 | inta_addr[i]); + /* write INTCBAR - ioport */ + pci_write_config_dword(dev, ITE_887x_INTCBAR, + inta_addr[i]); + ret = inb(inta_addr[i]); + if (ret != 0xff) { + /* ioport connected */ + break; + } + release_region(iobase->start, ITE_887x_IOSIZE); + } + } + + if (i == ARRAY_SIZE(inta_addr)) { + pci_err(dev, "could not find iobase\n"); + return -ENODEV; + } + + /* start of undocumented type checking (see parport_pc.c) */ + type = inb(iobase->start + 0x18) & 0x0f; + + switch (type) { + case 0x2: /* ITE8871 (1P) */ + case 0xa: /* ITE8875 (1P) */ + ret = 0; + break; + case 0xe: /* ITE8872 (2S1P) */ + ret = 2; + break; + case 0x6: /* ITE8873 (1S) */ + ret = 1; + break; + case 0x8: /* ITE8874 (2S) */ + ret = 2; + break; + default: + moan_device("Unknown ITE887x", dev); + ret = -ENODEV; + } + + /* configure all serial ports */ + for (i = 0; i < ret; i++) { + /* read the I/O port from the device */ + pci_read_config_dword(dev, ITE_887x_PS0BAR + (0x4 * (i + 1)), + &ioport); + ioport &= 0x0000FF00; /* the actual base address */ + pci_write_config_dword(dev, ITE_887x_POSIO0 + (0x4 * (i + 1)), + ITE_887x_POSIO_ENABLE | ITE_887x_POSIO_SPEED | + ITE_887x_POSIO_IOSIZE_8 | ioport); + + /* write the ioport to the UARTBAR */ + pci_read_config_dword(dev, ITE_887x_UARTBAR, &uartbar); + uartbar &= ~(0xffff << (16 * i)); /* clear half the reg */ + uartbar |= (ioport << (16 * i)); /* set the ioport */ + pci_write_config_dword(dev, ITE_887x_UARTBAR, uartbar); + + /* get current config */ + pci_read_config_dword(dev, ITE_887x_MISCR, &miscr); + /* disable interrupts (UARTx_Routing[3:0]) */ + miscr &= ~(0xf << (12 - 4 * i)); + /* activate the UART (UARTx_En) */ + miscr |= 1 << (23 - i); + /* write new config with activated UART */ + pci_write_config_dword(dev, ITE_887x_MISCR, miscr); + } + + if (ret <= 0) { + /* the device has no UARTs if we get here */ + release_region(iobase->start, ITE_887x_IOSIZE); + } + + return ret; +} + +static void pci_ite887x_exit(struct pci_dev *dev) +{ + u32 ioport; + /* the ioport is bit 0-15 in POSIO0R */ + pci_read_config_dword(dev, ITE_887x_POSIO0, &ioport); + ioport &= 0xffff; + release_region(ioport, ITE_887x_IOSIZE); +} + +/* + * Oxford Semiconductor Inc. + * Check if an OxSemi device is part of the Tornado range of devices. + */ +#define PCI_VENDOR_ID_ENDRUN 0x7401 +#define PCI_DEVICE_ID_ENDRUN_1588 0xe100 + +static bool pci_oxsemi_tornado_p(struct pci_dev *dev) +{ + /* OxSemi Tornado devices are all 0xCxxx */ + if (dev->vendor == PCI_VENDOR_ID_OXSEMI && + (dev->device & 0xf000) != 0xc000) + return false; + + /* EndRun devices are all 0xExxx */ + if (dev->vendor == PCI_VENDOR_ID_ENDRUN && + (dev->device & 0xf000) != 0xe000) + return false; + + return true; +} + +/* + * Determine the number of ports available on a Tornado device. + */ +static int pci_oxsemi_tornado_init(struct pci_dev *dev) +{ + u8 __iomem *p; + unsigned long deviceID; + unsigned int number_uarts = 0; + + if (!pci_oxsemi_tornado_p(dev)) + return 0; + + p = pci_iomap(dev, 0, 5); + if (p == NULL) + return -ENOMEM; + + deviceID = ioread32(p); + /* Tornado device */ + if (deviceID == 0x07000200) { + number_uarts = ioread8(p + 4); + pci_dbg(dev, "%d ports detected on %s PCI Express device\n", + number_uarts, + dev->vendor == PCI_VENDOR_ID_ENDRUN ? + "EndRun" : "Oxford"); + } + pci_iounmap(dev, p); + return number_uarts; +} + +/* Tornado-specific constants for the TCR and CPR registers; see below. */ +#define OXSEMI_TORNADO_TCR_MASK 0xf +#define OXSEMI_TORNADO_CPR_MASK 0x1ff +#define OXSEMI_TORNADO_CPR_MIN 0x008 +#define OXSEMI_TORNADO_CPR_DEF 0x10f + +/* + * Determine the oversampling rate, the clock prescaler, and the clock + * divisor for the requested baud rate. The clock rate is 62.5 MHz, + * which is four times the baud base, and the prescaler increments in + * steps of 1/8. Therefore to make calculations on integers we need + * to use a scaled clock rate, which is the baud base multiplied by 32 + * (or our assumed UART clock rate multiplied by 2). + * + * The allowed oversampling rates are from 4 up to 16 inclusive (values + * from 0 to 3 inclusive map to 16). Likewise the clock prescaler allows + * values between 1.000 and 63.875 inclusive (operation for values from + * 0.000 to 0.875 has not been specified). The clock divisor is the usual + * unsigned 16-bit integer. + * + * For the most accurate baud rate we use a table of predetermined + * oversampling rates and clock prescalers that records all possible + * products of the two parameters in the range from 4 up to 255 inclusive, + * and additionally 335 for the 1500000bps rate, with the prescaler scaled + * by 8. The table is sorted by the decreasing value of the oversampling + * rate and ties are resolved by sorting by the decreasing value of the + * product. This way preference is given to higher oversampling rates. + * + * We iterate over the table and choose the product of an oversampling + * rate and a clock prescaler that gives the lowest integer division + * result deviation, or if an exact integer divider is found we stop + * looking for it right away. We do some fixup if the resulting clock + * divisor required would be out of its unsigned 16-bit integer range. + * + * Finally we abuse the supposed fractional part returned to encode the + * 4-bit value of the oversampling rate and the 9-bit value of the clock + * prescaler which will end up in the TCR and CPR/CPR2 registers. + */ +static unsigned int pci_oxsemi_tornado_get_divisor(struct uart_port *port, + unsigned int baud, + unsigned int *frac) +{ + static u8 p[][2] = { + { 16, 14, }, { 16, 13, }, { 16, 12, }, { 16, 11, }, + { 16, 10, }, { 16, 9, }, { 16, 8, }, { 15, 17, }, + { 15, 16, }, { 15, 15, }, { 15, 14, }, { 15, 13, }, + { 15, 12, }, { 15, 11, }, { 15, 10, }, { 15, 9, }, + { 15, 8, }, { 14, 18, }, { 14, 17, }, { 14, 14, }, + { 14, 13, }, { 14, 12, }, { 14, 11, }, { 14, 10, }, + { 14, 9, }, { 14, 8, }, { 13, 19, }, { 13, 18, }, + { 13, 17, }, { 13, 13, }, { 13, 12, }, { 13, 11, }, + { 13, 10, }, { 13, 9, }, { 13, 8, }, { 12, 19, }, + { 12, 18, }, { 12, 17, }, { 12, 11, }, { 12, 9, }, + { 12, 8, }, { 11, 23, }, { 11, 22, }, { 11, 21, }, + { 11, 20, }, { 11, 19, }, { 11, 18, }, { 11, 17, }, + { 11, 11, }, { 11, 10, }, { 11, 9, }, { 11, 8, }, + { 10, 25, }, { 10, 23, }, { 10, 20, }, { 10, 19, }, + { 10, 17, }, { 10, 10, }, { 10, 9, }, { 10, 8, }, + { 9, 27, }, { 9, 23, }, { 9, 21, }, { 9, 19, }, + { 9, 18, }, { 9, 17, }, { 9, 9, }, { 9, 8, }, + { 8, 31, }, { 8, 29, }, { 8, 23, }, { 8, 19, }, + { 8, 17, }, { 8, 8, }, { 7, 35, }, { 7, 31, }, + { 7, 29, }, { 7, 25, }, { 7, 23, }, { 7, 21, }, + { 7, 19, }, { 7, 17, }, { 7, 15, }, { 7, 14, }, + { 7, 13, }, { 7, 12, }, { 7, 11, }, { 7, 10, }, + { 7, 9, }, { 7, 8, }, { 6, 41, }, { 6, 37, }, + { 6, 31, }, { 6, 29, }, { 6, 23, }, { 6, 19, }, + { 6, 17, }, { 6, 13, }, { 6, 11, }, { 6, 10, }, + { 6, 9, }, { 6, 8, }, { 5, 67, }, { 5, 47, }, + { 5, 43, }, { 5, 41, }, { 5, 37, }, { 5, 31, }, + { 5, 29, }, { 5, 25, }, { 5, 23, }, { 5, 19, }, + { 5, 17, }, { 5, 15, }, { 5, 13, }, { 5, 11, }, + { 5, 10, }, { 5, 9, }, { 5, 8, }, { 4, 61, }, + { 4, 59, }, { 4, 53, }, { 4, 47, }, { 4, 43, }, + { 4, 41, }, { 4, 37, }, { 4, 31, }, { 4, 29, }, + { 4, 23, }, { 4, 19, }, { 4, 17, }, { 4, 13, }, + { 4, 9, }, { 4, 8, }, + }; + /* Scale the quotient for comparison to get the fractional part. */ + const unsigned int quot_scale = 65536; + unsigned int sclk = port->uartclk * 2; + unsigned int sdiv = DIV_ROUND_CLOSEST(sclk, baud); + unsigned int best_squot; + unsigned int squot; + unsigned int quot; + u16 cpr; + u8 tcr; + int i; + + /* Old custom speed handling. */ + if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) { + unsigned int cust_div = port->custom_divisor; + + quot = cust_div & UART_DIV_MAX; + tcr = (cust_div >> 16) & OXSEMI_TORNADO_TCR_MASK; + cpr = (cust_div >> 20) & OXSEMI_TORNADO_CPR_MASK; + if (cpr < OXSEMI_TORNADO_CPR_MIN) + cpr = OXSEMI_TORNADO_CPR_DEF; + } else { + best_squot = quot_scale; + for (i = 0; i < ARRAY_SIZE(p); i++) { + unsigned int spre; + unsigned int srem; + u8 cp; + u8 tc; + + tc = p[i][0]; + cp = p[i][1]; + spre = tc * cp; + + srem = sdiv % spre; + if (srem > spre / 2) + srem = spre - srem; + squot = DIV_ROUND_CLOSEST(srem * quot_scale, spre); + + if (srem == 0) { + tcr = tc; + cpr = cp; + quot = sdiv / spre; + break; + } else if (squot < best_squot) { + best_squot = squot; + tcr = tc; + cpr = cp; + quot = DIV_ROUND_CLOSEST(sdiv, spre); + } + } + while (tcr <= (OXSEMI_TORNADO_TCR_MASK + 1) >> 1 && + quot % 2 == 0) { + quot >>= 1; + tcr <<= 1; + } + while (quot > UART_DIV_MAX) { + if (tcr <= (OXSEMI_TORNADO_TCR_MASK + 1) >> 1) { + quot >>= 1; + tcr <<= 1; + } else if (cpr <= OXSEMI_TORNADO_CPR_MASK >> 1) { + quot >>= 1; + cpr <<= 1; + } else { + quot = quot * cpr / OXSEMI_TORNADO_CPR_MASK; + cpr = OXSEMI_TORNADO_CPR_MASK; + } + } + } + + *frac = (cpr << 8) | (tcr & OXSEMI_TORNADO_TCR_MASK); + return quot; +} + +/* + * Set the oversampling rate in the transmitter clock cycle register (TCR), + * the clock prescaler in the clock prescaler register (CPR and CPR2), and + * the clock divisor in the divisor latch (DLL and DLM). Note that for + * backwards compatibility any write to CPR clears CPR2 and therefore CPR + * has to be written first, followed by CPR2, which occupies the location + * of CKS used with earlier UART designs. + */ +static void pci_oxsemi_tornado_set_divisor(struct uart_port *port, + unsigned int baud, + unsigned int quot, + unsigned int quot_frac) +{ + struct uart_8250_port *up = up_to_u8250p(port); + u8 cpr2 = quot_frac >> 16; + u8 cpr = quot_frac >> 8; + u8 tcr = quot_frac; + + serial_icr_write(up, UART_TCR, tcr); + serial_icr_write(up, UART_CPR, cpr); + serial_icr_write(up, UART_CKS, cpr2); + serial8250_do_set_divisor(port, baud, quot, 0); +} + +/* + * For Tornado devices we force MCR[7] set for the Divide-by-M N/8 baud rate + * generator prescaler (CPR and CPR2). Otherwise no prescaler would be used. + */ +static void pci_oxsemi_tornado_set_mctrl(struct uart_port *port, + unsigned int mctrl) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + up->mcr |= UART_MCR_CLKSEL; + serial8250_do_set_mctrl(port, mctrl); +} + +/* + * We require EFR features for clock programming, so set UPF_FULL_PROBE + * for full probing regardless of CONFIG_SERIAL_8250_16550A_VARIANTS setting. + */ +static int pci_oxsemi_tornado_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *up, int idx) +{ + struct pci_dev *dev = priv->dev; + + if (pci_oxsemi_tornado_p(dev)) { + up->port.flags |= UPF_FULL_PROBE; + up->port.get_divisor = pci_oxsemi_tornado_get_divisor; + up->port.set_divisor = pci_oxsemi_tornado_set_divisor; + up->port.set_mctrl = pci_oxsemi_tornado_set_mctrl; + } + + return pci_default_setup(priv, board, up, idx); +} + +#define QPCR_TEST_FOR1 0x3F +#define QPCR_TEST_GET1 0x00 +#define QPCR_TEST_FOR2 0x40 +#define QPCR_TEST_GET2 0x40 +#define QPCR_TEST_FOR3 0x80 +#define QPCR_TEST_GET3 0x40 +#define QPCR_TEST_FOR4 0xC0 +#define QPCR_TEST_GET4 0x80 + +#define QOPR_CLOCK_X1 0x0000 +#define QOPR_CLOCK_X2 0x0001 +#define QOPR_CLOCK_X4 0x0002 +#define QOPR_CLOCK_X8 0x0003 +#define QOPR_CLOCK_RATE_MASK 0x0003 + +/* Quatech devices have their own extra interface features */ +static struct pci_device_id quatech_cards[] = { + { PCI_DEVICE_DATA(QUATECH, QSC100, 1) }, + { PCI_DEVICE_DATA(QUATECH, DSC100, 1) }, + { PCI_DEVICE_DATA(QUATECH, DSC100E, 0) }, + { PCI_DEVICE_DATA(QUATECH, DSC200, 1) }, + { PCI_DEVICE_DATA(QUATECH, DSC200E, 0) }, + { PCI_DEVICE_DATA(QUATECH, ESC100D, 1) }, + { PCI_DEVICE_DATA(QUATECH, ESC100M, 1) }, + { PCI_DEVICE_DATA(QUATECH, QSCP100, 1) }, + { PCI_DEVICE_DATA(QUATECH, DSCP100, 1) }, + { PCI_DEVICE_DATA(QUATECH, QSCP200, 1) }, + { PCI_DEVICE_DATA(QUATECH, DSCP200, 1) }, + { PCI_DEVICE_DATA(QUATECH, ESCLP100, 0) }, + { PCI_DEVICE_DATA(QUATECH, QSCLP100, 0) }, + { PCI_DEVICE_DATA(QUATECH, DSCLP100, 0) }, + { PCI_DEVICE_DATA(QUATECH, SSCLP100, 0) }, + { PCI_DEVICE_DATA(QUATECH, QSCLP200, 0) }, + { PCI_DEVICE_DATA(QUATECH, DSCLP200, 0) }, + { PCI_DEVICE_DATA(QUATECH, SSCLP200, 0) }, + { PCI_DEVICE_DATA(QUATECH, SPPXP_100, 0) }, + { 0, } +}; + +static int pci_quatech_rqopr(struct uart_8250_port *port) +{ + unsigned long base = port->port.iobase; + u8 LCR, val; + + LCR = inb(base + UART_LCR); + outb(0xBF, base + UART_LCR); + val = inb(base + UART_SCR); + outb(LCR, base + UART_LCR); + return val; +} + +static void pci_quatech_wqopr(struct uart_8250_port *port, u8 qopr) +{ + unsigned long base = port->port.iobase; + u8 LCR; + + LCR = inb(base + UART_LCR); + outb(0xBF, base + UART_LCR); + inb(base + UART_SCR); + outb(qopr, base + UART_SCR); + outb(LCR, base + UART_LCR); +} + +static int pci_quatech_rqmcr(struct uart_8250_port *port) +{ + unsigned long base = port->port.iobase; + u8 LCR, val, qmcr; + + LCR = inb(base + UART_LCR); + outb(0xBF, base + UART_LCR); + val = inb(base + UART_SCR); + outb(val | 0x10, base + UART_SCR); + qmcr = inb(base + UART_MCR); + outb(val, base + UART_SCR); + outb(LCR, base + UART_LCR); + + return qmcr; +} + +static void pci_quatech_wqmcr(struct uart_8250_port *port, u8 qmcr) +{ + unsigned long base = port->port.iobase; + u8 LCR, val; + + LCR = inb(base + UART_LCR); + outb(0xBF, base + UART_LCR); + val = inb(base + UART_SCR); + outb(val | 0x10, base + UART_SCR); + outb(qmcr, base + UART_MCR); + outb(val, base + UART_SCR); + outb(LCR, base + UART_LCR); +} + +static int pci_quatech_has_qmcr(struct uart_8250_port *port) +{ + unsigned long base = port->port.iobase; + u8 LCR, val; + + LCR = inb(base + UART_LCR); + outb(0xBF, base + UART_LCR); + val = inb(base + UART_SCR); + if (val & 0x20) { + outb(0x80, UART_LCR); + if (!(inb(UART_SCR) & 0x20)) { + outb(LCR, base + UART_LCR); + return 1; + } + } + return 0; +} + +static int pci_quatech_test(struct uart_8250_port *port) +{ + u8 reg, qopr; + + qopr = pci_quatech_rqopr(port); + pci_quatech_wqopr(port, qopr & QPCR_TEST_FOR1); + reg = pci_quatech_rqopr(port) & 0xC0; + if (reg != QPCR_TEST_GET1) + return -EINVAL; + pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR2); + reg = pci_quatech_rqopr(port) & 0xC0; + if (reg != QPCR_TEST_GET2) + return -EINVAL; + pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR3); + reg = pci_quatech_rqopr(port) & 0xC0; + if (reg != QPCR_TEST_GET3) + return -EINVAL; + pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR4); + reg = pci_quatech_rqopr(port) & 0xC0; + if (reg != QPCR_TEST_GET4) + return -EINVAL; + + pci_quatech_wqopr(port, qopr); + return 0; +} + +static int pci_quatech_clock(struct uart_8250_port *port) +{ + u8 qopr, reg, set; + unsigned long clock; + + if (pci_quatech_test(port) < 0) + return 1843200; + + qopr = pci_quatech_rqopr(port); + + pci_quatech_wqopr(port, qopr & ~QOPR_CLOCK_X8); + reg = pci_quatech_rqopr(port); + if (reg & QOPR_CLOCK_X8) { + clock = 1843200; + goto out; + } + pci_quatech_wqopr(port, qopr | QOPR_CLOCK_X8); + reg = pci_quatech_rqopr(port); + if (!(reg & QOPR_CLOCK_X8)) { + clock = 1843200; + goto out; + } + reg &= QOPR_CLOCK_X8; + if (reg == QOPR_CLOCK_X2) { + clock = 3685400; + set = QOPR_CLOCK_X2; + } else if (reg == QOPR_CLOCK_X4) { + clock = 7372800; + set = QOPR_CLOCK_X4; + } else if (reg == QOPR_CLOCK_X8) { + clock = 14745600; + set = QOPR_CLOCK_X8; + } else { + clock = 1843200; + set = QOPR_CLOCK_X1; + } + qopr &= ~QOPR_CLOCK_RATE_MASK; + qopr |= set; + +out: + pci_quatech_wqopr(port, qopr); + return clock; +} + +static int pci_quatech_rs422(struct uart_8250_port *port) +{ + u8 qmcr; + int rs422 = 0; + + if (!pci_quatech_has_qmcr(port)) + return 0; + qmcr = pci_quatech_rqmcr(port); + pci_quatech_wqmcr(port, 0xFF); + if (pci_quatech_rqmcr(port)) + rs422 = 1; + pci_quatech_wqmcr(port, qmcr); + return rs422; +} + +static int pci_quatech_init(struct pci_dev *dev) +{ + const struct pci_device_id *match; + bool amcc = false; + + match = pci_match_id(quatech_cards, dev); + if (match) + amcc = match->driver_data; + else + pci_err(dev, "unknown port type '0x%04X'.\n", dev->device); + + if (amcc) { + unsigned long base = pci_resource_start(dev, 0); + if (base) { + u32 tmp; + + outl(inl(base + 0x38) | 0x00002000, base + 0x38); + tmp = inl(base + 0x3c); + outl(tmp | 0x01000000, base + 0x3c); + outl(tmp & ~0x01000000, base + 0x3c); + } + } + return 0; +} + +static int pci_quatech_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + /* Needed by pci_quatech calls below */ + port->port.iobase = pci_resource_start(priv->dev, FL_GET_BASE(board->flags)); + /* Set up the clocking */ + port->port.uartclk = pci_quatech_clock(port); + /* For now just warn about RS422 */ + if (pci_quatech_rs422(port)) + pci_warn(priv->dev, "software control of RS422 features not currently supported.\n"); + return pci_default_setup(priv, board, port, idx); +} + +static int pci_default_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + unsigned int bar, offset = board->first_offset, maxnr; + + bar = FL_GET_BASE(board->flags); + if (board->flags & FL_BASE_BARS) + bar += idx; + else + offset += idx * board->uart_offset; + + maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >> + (board->reg_shift + 3); + + if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr) + return 1; + + return setup_port(priv, port, bar, offset, board->reg_shift); +} + +static int +ce4100_serial_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + int ret; + + ret = setup_port(priv, port, idx, 0, board->reg_shift); + port->port.iotype = UPIO_MEM32; + port->port.type = PORT_XSCALE; + port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE); + port->port.regshift = 2; + + return ret; +} + +static int +pci_omegapci_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + return setup_port(priv, port, 2, idx * 8, 0); +} + +static int +pci_brcm_trumanage_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + int ret = pci_default_setup(priv, board, port, idx); + + port->port.type = PORT_BRCM_TRUMANAGE; + port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE); + return ret; +} + +/* RTS will control by MCR if this bit is 0 */ +#define FINTEK_RTS_CONTROL_BY_HW BIT(4) +/* only worked with FINTEK_RTS_CONTROL_BY_HW on */ +#define FINTEK_RTS_INVERT BIT(5) + +/* We should do proper H/W transceiver setting before change to RS485 mode */ +static int pci_fintek_rs485_config(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + struct pci_dev *pci_dev = to_pci_dev(port->dev); + u8 setting; + u8 *index = (u8 *) port->private_data; + + pci_read_config_byte(pci_dev, 0x40 + 8 * *index + 7, &setting); + + if (rs485->flags & SER_RS485_ENABLED) { + /* Enable RTS H/W control mode */ + setting |= FINTEK_RTS_CONTROL_BY_HW; + + if (rs485->flags & SER_RS485_RTS_ON_SEND) { + /* RTS driving high on TX */ + setting &= ~FINTEK_RTS_INVERT; + } else { + /* RTS driving low on TX */ + setting |= FINTEK_RTS_INVERT; + } + } else { + /* Disable RTS H/W control mode */ + setting &= ~(FINTEK_RTS_CONTROL_BY_HW | FINTEK_RTS_INVERT); + } + + pci_write_config_byte(pci_dev, 0x40 + 8 * *index + 7, setting); + + return 0; +} + +static const struct serial_rs485 pci_fintek_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND, + /* F81504/508/512 does not support RTS delay before or after send */ +}; + +static int pci_fintek_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + struct pci_dev *pdev = priv->dev; + u8 *data; + u8 config_base; + u16 iobase; + + config_base = 0x40 + 0x08 * idx; + + /* Get the io address from configuration space */ + pci_read_config_word(pdev, config_base + 4, &iobase); + + pci_dbg(pdev, "idx=%d iobase=0x%x", idx, iobase); + + port->port.iotype = UPIO_PORT; + port->port.iobase = iobase; + port->port.rs485_config = pci_fintek_rs485_config; + port->port.rs485_supported = pci_fintek_rs485_supported; + + data = devm_kzalloc(&pdev->dev, sizeof(u8), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* preserve index in PCI configuration space */ + *data = idx; + port->port.private_data = data; + + return 0; +} + +static int pci_fintek_init(struct pci_dev *dev) +{ + unsigned long iobase; + u32 max_port, i; + resource_size_t bar_data[3]; + u8 config_base; + struct serial_private *priv = pci_get_drvdata(dev); + + if (!(pci_resource_flags(dev, 5) & IORESOURCE_IO) || + !(pci_resource_flags(dev, 4) & IORESOURCE_IO) || + !(pci_resource_flags(dev, 3) & IORESOURCE_IO)) + return -ENODEV; + + switch (dev->device) { + case 0x1104: /* 4 ports */ + case 0x1108: /* 8 ports */ + max_port = dev->device & 0xff; + break; + case 0x1112: /* 12 ports */ + max_port = 12; + break; + default: + return -EINVAL; + } + + /* Get the io address dispatch from the BIOS */ + bar_data[0] = pci_resource_start(dev, 5); + bar_data[1] = pci_resource_start(dev, 4); + bar_data[2] = pci_resource_start(dev, 3); + + for (i = 0; i < max_port; ++i) { + /* UART0 configuration offset start from 0x40 */ + config_base = 0x40 + 0x08 * i; + + /* Calculate Real IO Port */ + iobase = (bar_data[i / 4] & 0xffffffe0) + (i % 4) * 8; + + /* Enable UART I/O port */ + pci_write_config_byte(dev, config_base + 0x00, 0x01); + + /* Select 128-byte FIFO and 8x FIFO threshold */ + pci_write_config_byte(dev, config_base + 0x01, 0x33); + + /* LSB UART */ + pci_write_config_byte(dev, config_base + 0x04, + (u8)(iobase & 0xff)); + + /* MSB UART */ + pci_write_config_byte(dev, config_base + 0x05, + (u8)((iobase & 0xff00) >> 8)); + + pci_write_config_byte(dev, config_base + 0x06, dev->irq); + + if (!priv) { + /* First init without port data + * force init to RS232 Mode + */ + pci_write_config_byte(dev, config_base + 0x07, 0x01); + } + } + + return max_port; +} + +static void f815xxa_mem_serial_out(struct uart_port *p, int offset, int value) +{ + struct f815xxa_data *data = p->private_data; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + writeb(value, p->membase + offset); + readb(p->membase + UART_SCR); /* Dummy read for flush pcie tx queue */ + spin_unlock_irqrestore(&data->lock, flags); +} + +static int pci_fintek_f815xxa_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + struct pci_dev *pdev = priv->dev; + struct f815xxa_data *data; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->idx = idx; + spin_lock_init(&data->lock); + + port->port.private_data = data; + port->port.iotype = UPIO_MEM; + port->port.flags |= UPF_IOREMAP; + port->port.mapbase = pci_resource_start(pdev, 0) + 8 * idx; + port->port.serial_out = f815xxa_mem_serial_out; + + return 0; +} + +static int pci_fintek_f815xxa_init(struct pci_dev *dev) +{ + u32 max_port, i; + int config_base; + + if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM)) + return -ENODEV; + + switch (dev->device) { + case 0x1204: /* 4 ports */ + case 0x1208: /* 8 ports */ + max_port = dev->device & 0xff; + break; + case 0x1212: /* 12 ports */ + max_port = 12; + break; + default: + return -EINVAL; + } + + /* Set to mmio decode */ + pci_write_config_byte(dev, 0x209, 0x40); + + for (i = 0; i < max_port; ++i) { + /* UART0 configuration offset start from 0x2A0 */ + config_base = 0x2A0 + 0x08 * i; + + /* Select 128-byte FIFO and 8x FIFO threshold */ + pci_write_config_byte(dev, config_base + 0x01, 0x33); + + /* Enable UART I/O port */ + pci_write_config_byte(dev, config_base + 0, 0x01); + } + + return max_port; +} + +static int skip_tx_en_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + port->port.quirks |= UPQ_NO_TXEN_TEST; + pci_dbg(priv->dev, + "serial8250: skipping TxEn test for device [%04x:%04x] subsystem [%04x:%04x]\n", + priv->dev->vendor, priv->dev->device, + priv->dev->subsystem_vendor, priv->dev->subsystem_device); + + return pci_default_setup(priv, board, port, idx); +} + +static void kt_handle_break(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + /* + * On receipt of a BI, serial device in Intel ME (Intel + * management engine) needs to have its fifos cleared for sane + * SOL (Serial Over Lan) output. + */ + serial8250_clear_and_reinit_fifos(up); +} + +static unsigned int kt_serial_in(struct uart_port *p, int offset) +{ + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int val; + + /* + * When the Intel ME (management engine) gets reset its serial + * port registers could return 0 momentarily. Functions like + * serial8250_console_write, read and save the IER, perform + * some operation and then restore it. In order to avoid + * setting IER register inadvertently to 0, if the value read + * is 0, double check with ier value in uart_8250_port and use + * that instead. up->ier should be the same value as what is + * currently configured. + */ + val = inb(p->iobase + offset); + if (offset == UART_IER) { + if (val == 0) + val = up->ier; + } + return val; +} + +static int kt_serial_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + port->port.flags |= UPF_BUG_THRE; + port->port.serial_in = kt_serial_in; + port->port.handle_break = kt_handle_break; + return skip_tx_en_setup(priv, board, port, idx); +} + +static int pci_eg20t_init(struct pci_dev *dev) +{ +#if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE) + return -ENODEV; +#else + return 0; +#endif +} + +static int +pci_wch_ch353_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + port->port.flags |= UPF_FIXED_TYPE; + port->port.type = PORT_16550A; + return pci_default_setup(priv, board, port, idx); +} + +static int +pci_wch_ch355_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + port->port.flags |= UPF_FIXED_TYPE; + port->port.type = PORT_16550A; + return pci_default_setup(priv, board, port, idx); +} + +static int +pci_wch_ch38x_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + port->port.flags |= UPF_FIXED_TYPE; + port->port.type = PORT_16850; + return pci_default_setup(priv, board, port, idx); +} + + +#define CH384_XINT_ENABLE_REG 0xEB +#define CH384_XINT_ENABLE_BIT 0x02 + +static int pci_wch_ch38x_init(struct pci_dev *dev) +{ + int max_port; + unsigned long iobase; + + + switch (dev->device) { + case 0x3853: /* 8 ports */ + max_port = 8; + break; + default: + return -EINVAL; + } + + iobase = pci_resource_start(dev, 0); + outb(CH384_XINT_ENABLE_BIT, iobase + CH384_XINT_ENABLE_REG); + + return max_port; +} + +static void pci_wch_ch38x_exit(struct pci_dev *dev) +{ + unsigned long iobase; + + iobase = pci_resource_start(dev, 0); + outb(0x0, iobase + CH384_XINT_ENABLE_REG); +} + + +static int +pci_sunix_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + int bar; + int offset; + + port->port.flags |= UPF_FIXED_TYPE; + port->port.type = PORT_SUNIX; + + if (idx < 4) { + bar = 0; + offset = idx * board->uart_offset; + } else { + bar = 1; + idx -= 4; + idx = div_s64_rem(idx, 4, &offset); + offset = idx * 64 + offset * board->uart_offset; + } + + return setup_port(priv, port, bar, offset, 0); +} + +static int +pci_moxa_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + unsigned int bar = FL_GET_BASE(board->flags); + int offset; + + if (board->num_ports == 4 && idx == 3) + offset = 7 * board->uart_offset; + else + offset = idx * board->uart_offset; + + return setup_port(priv, port, bar, offset, 0); +} + +#define PCI_VENDOR_ID_SBSMODULARIO 0x124B +#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B +#define PCI_DEVICE_ID_OCTPRO 0x0001 +#define PCI_SUBDEVICE_ID_OCTPRO232 0x0108 +#define PCI_SUBDEVICE_ID_OCTPRO422 0x0208 +#define PCI_SUBDEVICE_ID_POCTAL232 0x0308 +#define PCI_SUBDEVICE_ID_POCTAL422 0x0408 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_00 0x2500 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530 +#define PCI_VENDOR_ID_ADVANTECH 0x13fe +#define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66 +#define PCI_DEVICE_ID_ADVANTECH_PCI1600 0x1600 +#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611 0x1611 +#define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620 +#define PCI_DEVICE_ID_ADVANTECH_PCI3618 0x3618 +#define PCI_DEVICE_ID_ADVANTECH_PCIf618 0xf618 +#define PCI_DEVICE_ID_TITAN_200I 0x8028 +#define PCI_DEVICE_ID_TITAN_400I 0x8048 +#define PCI_DEVICE_ID_TITAN_800I 0x8088 +#define PCI_DEVICE_ID_TITAN_800EH 0xA007 +#define PCI_DEVICE_ID_TITAN_800EHB 0xA008 +#define PCI_DEVICE_ID_TITAN_400EH 0xA009 +#define PCI_DEVICE_ID_TITAN_100E 0xA010 +#define PCI_DEVICE_ID_TITAN_200E 0xA012 +#define PCI_DEVICE_ID_TITAN_400E 0xA013 +#define PCI_DEVICE_ID_TITAN_800E 0xA014 +#define PCI_DEVICE_ID_TITAN_200EI 0xA016 +#define PCI_DEVICE_ID_TITAN_200EISI 0xA017 +#define PCI_DEVICE_ID_TITAN_200V3 0xA306 +#define PCI_DEVICE_ID_TITAN_400V3 0xA310 +#define PCI_DEVICE_ID_TITAN_410V3 0xA312 +#define PCI_DEVICE_ID_TITAN_800V3 0xA314 +#define PCI_DEVICE_ID_TITAN_800V3B 0xA315 +#define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538 +#define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6 +#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001 +#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d +#define PCI_VENDOR_ID_WCH 0x4348 +#define PCI_DEVICE_ID_WCH_CH352_2S 0x3253 +#define PCI_DEVICE_ID_WCH_CH353_4S 0x3453 +#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046 +#define PCI_DEVICE_ID_WCH_CH353_1S1P 0x5053 +#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053 +#define PCI_DEVICE_ID_WCH_CH355_4S 0x7173 +#define PCI_VENDOR_ID_AGESTAR 0x5372 +#define PCI_DEVICE_ID_AGESTAR_9375 0x6872 +#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a +#define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e + +#define PCIE_VENDOR_ID_WCH 0x1c00 +#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 +#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 +#define PCIE_DEVICE_ID_WCH_CH384_8S 0x3853 +#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253 + +#define PCI_DEVICE_ID_MOXA_CP102E 0x1024 +#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025 +#define PCI_DEVICE_ID_MOXA_CP104EL_A 0x1045 +#define PCI_DEVICE_ID_MOXA_CP114EL 0x1144 +#define PCI_DEVICE_ID_MOXA_CP116E_A_A 0x1160 +#define PCI_DEVICE_ID_MOXA_CP116E_A_B 0x1161 +#define PCI_DEVICE_ID_MOXA_CP118EL_A 0x1182 +#define PCI_DEVICE_ID_MOXA_CP118E_A_I 0x1183 +#define PCI_DEVICE_ID_MOXA_CP132EL 0x1322 +#define PCI_DEVICE_ID_MOXA_CP134EL_A 0x1342 +#define PCI_DEVICE_ID_MOXA_CP138E_A 0x1381 +#define PCI_DEVICE_ID_MOXA_CP168EL_A 0x1683 + +/* Unknown vendors/cards - this should not be in linux/pci_ids.h */ +#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 +#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 + +/* + * Master list of serial port init/setup/exit quirks. + * This does not describe the general nature of the port. + * (ie, baud base, number and location of ports, etc) + * + * This list is ordered alphabetically by vendor then device. + * Specific entries must come before more generic entries. + */ +static struct pci_serial_quirk pci_serial_quirks[] = { + /* + * ADDI-DATA GmbH communication cards + */ + { + .vendor = PCI_VENDOR_ID_AMCC, + .device = PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = addidata_apci7800_setup, + }, + /* + * AFAVLAB cards - these may be called via parport_serial + * It is not clear whether this applies to all products. + */ + { + .vendor = PCI_VENDOR_ID_AFAVLAB, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = afavlab_setup, + }, + /* + * HP Diva + */ + { + .vendor = PCI_VENDOR_ID_HP, + .device = PCI_DEVICE_ID_HP_DIVA, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_hp_diva_init, + .setup = pci_hp_diva_setup, + }, + /* + * HPE PCI serial device + */ + { + .vendor = PCI_VENDOR_ID_HP_3PAR, + .device = PCI_DEVICE_ID_HPE_PCI_SERIAL, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_hp_diva_setup, + }, + /* + * Intel + */ + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_80960_RP, + .subvendor = 0xe4bf, + .subdevice = PCI_ANY_ID, + .init = pci_inteli960ni_init, + .setup = pci_default_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_8257X_SOL, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = skip_tx_en_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82573L_SOL, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = skip_tx_en_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82573E_SOL, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = skip_tx_en_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CE4100_UART, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = ce4100_serial_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_PATSBURG_KT, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = kt_serial_setup, + }, + /* + * ITE + */ + { + .vendor = PCI_VENDOR_ID_ITE, + .device = PCI_DEVICE_ID_ITE_8872, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ite887x_init, + .setup = pci_default_setup, + .exit = pci_ite887x_exit, + }, + /* + * National Instruments + */ + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PCI23216, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PCI2328, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PCI2324, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PCI2322, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PCI2324I, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PCI2322I, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PXI8420_23216, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PXI8420_2328, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PXI8420_2324, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PXI8420_2322, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PXI8422_2324, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_DEVICE_ID_NI_PXI8422_2322, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8420_init, + .setup = pci_default_setup, + .exit = pci_ni8420_exit, + }, + { + .vendor = PCI_VENDOR_ID_NI, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_ni8430_init, + .setup = pci_ni8430_setup, + .exit = pci_ni8430_exit, + }, + /* Quatech */ + { + .vendor = PCI_VENDOR_ID_QUATECH, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_quatech_init, + .setup = pci_quatech_setup, + }, + /* + * Panacom + */ + { + .vendor = PCI_VENDOR_ID_PANACOM, + .device = PCI_DEVICE_ID_PANACOM_QUADMODEM, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_plx9050_init, + .setup = pci_default_setup, + .exit = pci_plx9050_exit, + }, + { + .vendor = PCI_VENDOR_ID_PANACOM, + .device = PCI_DEVICE_ID_PANACOM_DUALMODEM, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_plx9050_init, + .setup = pci_default_setup, + .exit = pci_plx9050_exit, + }, + /* + * PLX + */ + { + .vendor = PCI_VENDOR_ID_PLX, + .device = PCI_DEVICE_ID_PLX_9050, + .subvendor = PCI_SUBVENDOR_ID_EXSYS, + .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055, + .init = pci_plx9050_init, + .setup = pci_default_setup, + .exit = pci_plx9050_exit, + }, + { + .vendor = PCI_VENDOR_ID_PLX, + .device = PCI_DEVICE_ID_PLX_9050, + .subvendor = PCI_SUBVENDOR_ID_KEYSPAN, + .subdevice = PCI_SUBDEVICE_ID_KEYSPAN_SX2, + .init = pci_plx9050_init, + .setup = pci_default_setup, + .exit = pci_plx9050_exit, + }, + { + .vendor = PCI_VENDOR_ID_PLX, + .device = PCI_DEVICE_ID_PLX_ROMULUS, + .subvendor = PCI_VENDOR_ID_PLX, + .subdevice = PCI_DEVICE_ID_PLX_ROMULUS, + .init = pci_plx9050_init, + .setup = pci_default_setup, + .exit = pci_plx9050_exit, + }, + /* + * SBS Technologies, Inc., PMC-OCTALPRO 232 + */ + { + .vendor = PCI_VENDOR_ID_SBSMODULARIO, + .device = PCI_DEVICE_ID_OCTPRO, + .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO, + .subdevice = PCI_SUBDEVICE_ID_OCTPRO232, + .init = sbs_init, + .setup = sbs_setup, + .exit = sbs_exit, + }, + /* + * SBS Technologies, Inc., PMC-OCTALPRO 422 + */ + { + .vendor = PCI_VENDOR_ID_SBSMODULARIO, + .device = PCI_DEVICE_ID_OCTPRO, + .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO, + .subdevice = PCI_SUBDEVICE_ID_OCTPRO422, + .init = sbs_init, + .setup = sbs_setup, + .exit = sbs_exit, + }, + /* + * SBS Technologies, Inc., P-Octal 232 + */ + { + .vendor = PCI_VENDOR_ID_SBSMODULARIO, + .device = PCI_DEVICE_ID_OCTPRO, + .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO, + .subdevice = PCI_SUBDEVICE_ID_POCTAL232, + .init = sbs_init, + .setup = sbs_setup, + .exit = sbs_exit, + }, + /* + * SBS Technologies, Inc., P-Octal 422 + */ + { + .vendor = PCI_VENDOR_ID_SBSMODULARIO, + .device = PCI_DEVICE_ID_OCTPRO, + .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO, + .subdevice = PCI_SUBDEVICE_ID_POCTAL422, + .init = sbs_init, + .setup = sbs_setup, + .exit = sbs_exit, + }, + /* + * SIIG cards - these may be called via parport_serial + */ + { + .vendor = PCI_VENDOR_ID_SIIG, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_siig_init, + .setup = pci_siig_setup, + }, + /* + * Titan cards + */ + { + .vendor = PCI_VENDOR_ID_TITAN, + .device = PCI_DEVICE_ID_TITAN_400L, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = titan_400l_800l_setup, + }, + { + .vendor = PCI_VENDOR_ID_TITAN, + .device = PCI_DEVICE_ID_TITAN_800L, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = titan_400l_800l_setup, + }, + /* + * Timedia cards + */ + { + .vendor = PCI_VENDOR_ID_TIMEDIA, + .device = PCI_DEVICE_ID_TIMEDIA_1889, + .subvendor = PCI_VENDOR_ID_TIMEDIA, + .subdevice = PCI_ANY_ID, + .probe = pci_timedia_probe, + .init = pci_timedia_init, + .setup = pci_timedia_setup, + }, + { + .vendor = PCI_VENDOR_ID_TIMEDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_timedia_setup, + }, + /* + * Sunix PCI serial boards + */ + { + .vendor = PCI_VENDOR_ID_SUNIX, + .device = PCI_DEVICE_ID_SUNIX_1999, + .subvendor = PCI_VENDOR_ID_SUNIX, + .subdevice = PCI_ANY_ID, + .setup = pci_sunix_setup, + }, + /* + * Xircom cards + */ + { + .vendor = PCI_VENDOR_ID_XIRCOM, + .device = PCI_DEVICE_ID_XIRCOM_X3201_MDM, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_xircom_init, + .setup = pci_default_setup, + }, + /* + * Netmos cards - these may be called via parport_serial + */ + { + .vendor = PCI_VENDOR_ID_NETMOS, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_netmos_init, + .setup = pci_netmos_9900_setup, + }, + /* + * EndRun Technologies + */ + { + .vendor = PCI_VENDOR_ID_ENDRUN, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_default_setup, + }, + /* + * For Oxford Semiconductor Tornado based devices + */ + { + .vendor = PCI_VENDOR_ID_OXSEMI, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_MAINPINE, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_DIGI, + .device = PCIE_DEVICE_ID_NEO_2_OX_IBM, + .subvendor = PCI_SUBVENDOR_ID_IBM, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + /* + * Brainboxes devices - all Oxsemi based + */ + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4027, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4028, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4029, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4019, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4016, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4015, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x400A, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x400E, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x400C, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x400B, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x400F, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4010, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4011, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x401D, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x401E, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4013, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4017, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4018, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = 0x8811, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_eg20t_init, + .setup = pci_default_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = 0x8812, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_eg20t_init, + .setup = pci_default_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = 0x8813, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_eg20t_init, + .setup = pci_default_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = 0x8814, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_eg20t_init, + .setup = pci_default_setup, + }, + { + .vendor = 0x10DB, + .device = 0x8027, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_eg20t_init, + .setup = pci_default_setup, + }, + { + .vendor = 0x10DB, + .device = 0x8028, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_eg20t_init, + .setup = pci_default_setup, + }, + { + .vendor = 0x10DB, + .device = 0x8029, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_eg20t_init, + .setup = pci_default_setup, + }, + { + .vendor = 0x10DB, + .device = 0x800C, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_eg20t_init, + .setup = pci_default_setup, + }, + { + .vendor = 0x10DB, + .device = 0x800D, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_eg20t_init, + .setup = pci_default_setup, + }, + /* + * Cronyx Omega PCI (PLX-chip based) + */ + { + .vendor = PCI_VENDOR_ID_PLX, + .device = PCI_DEVICE_ID_PLX_CRONYX_OMEGA, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_omegapci_setup, + }, + /* WCH CH353 1S1P card (16550 clone) */ + { + .vendor = PCI_VENDOR_ID_WCH, + .device = PCI_DEVICE_ID_WCH_CH353_1S1P, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_wch_ch353_setup, + }, + /* WCH CH353 2S1P card (16550 clone) */ + { + .vendor = PCI_VENDOR_ID_WCH, + .device = PCI_DEVICE_ID_WCH_CH353_2S1P, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_wch_ch353_setup, + }, + /* WCH CH353 4S card (16550 clone) */ + { + .vendor = PCI_VENDOR_ID_WCH, + .device = PCI_DEVICE_ID_WCH_CH353_4S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_wch_ch353_setup, + }, + /* WCH CH353 2S1PF card (16550 clone) */ + { + .vendor = PCI_VENDOR_ID_WCH, + .device = PCI_DEVICE_ID_WCH_CH353_2S1PF, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_wch_ch353_setup, + }, + /* WCH CH352 2S card (16550 clone) */ + { + .vendor = PCI_VENDOR_ID_WCH, + .device = PCI_DEVICE_ID_WCH_CH352_2S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_wch_ch353_setup, + }, + /* WCH CH355 4S card (16550 clone) */ + { + .vendor = PCI_VENDOR_ID_WCH, + .device = PCI_DEVICE_ID_WCH_CH355_4S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_wch_ch355_setup, + }, + /* WCH CH382 2S card (16850 clone) */ + { + .vendor = PCIE_VENDOR_ID_WCH, + .device = PCIE_DEVICE_ID_WCH_CH382_2S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_wch_ch38x_setup, + }, + /* WCH CH382 2S1P card (16850 clone) */ + { + .vendor = PCIE_VENDOR_ID_WCH, + .device = PCIE_DEVICE_ID_WCH_CH382_2S1P, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_wch_ch38x_setup, + }, + /* WCH CH384 4S card (16850 clone) */ + { + .vendor = PCIE_VENDOR_ID_WCH, + .device = PCIE_DEVICE_ID_WCH_CH384_4S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_wch_ch38x_setup, + }, + /* WCH CH384 8S card (16850 clone) */ + { + .vendor = PCIE_VENDOR_ID_WCH, + .device = PCIE_DEVICE_ID_WCH_CH384_8S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_wch_ch38x_init, + .exit = pci_wch_ch38x_exit, + .setup = pci_wch_ch38x_setup, + }, + /* + * Broadcom TruManage (NetXtreme) + */ + { + .vendor = PCI_VENDOR_ID_BROADCOM, + .device = PCI_DEVICE_ID_BROADCOM_TRUMANAGE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_brcm_trumanage_setup, + }, + { + .vendor = 0x1c29, + .device = 0x1104, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_fintek_setup, + .init = pci_fintek_init, + }, + { + .vendor = 0x1c29, + .device = 0x1108, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_fintek_setup, + .init = pci_fintek_init, + }, + { + .vendor = 0x1c29, + .device = 0x1112, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_fintek_setup, + .init = pci_fintek_init, + }, + /* + * MOXA + */ + { + .vendor = PCI_VENDOR_ID_MOXA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_moxa_setup, + }, + { + .vendor = 0x1c29, + .device = 0x1204, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_fintek_f815xxa_setup, + .init = pci_fintek_f815xxa_init, + }, + { + .vendor = 0x1c29, + .device = 0x1208, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_fintek_f815xxa_setup, + .init = pci_fintek_f815xxa_init, + }, + { + .vendor = 0x1c29, + .device = 0x1212, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_fintek_f815xxa_setup, + .init = pci_fintek_f815xxa_init, + }, + + /* + * Default "match everything" terminator entry + */ + { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_default_setup, + } +}; + +static inline int quirk_id_matches(u32 quirk_id, u32 dev_id) +{ + return quirk_id == PCI_ANY_ID || quirk_id == dev_id; +} + +static struct pci_serial_quirk *find_quirk(struct pci_dev *dev) +{ + struct pci_serial_quirk *quirk; + + for (quirk = pci_serial_quirks; ; quirk++) + if (quirk_id_matches(quirk->vendor, dev->vendor) && + quirk_id_matches(quirk->device, dev->device) && + quirk_id_matches(quirk->subvendor, dev->subsystem_vendor) && + quirk_id_matches(quirk->subdevice, dev->subsystem_device)) + break; + return quirk; +} + +/* + * This is the configuration table for all of the PCI serial boards + * which we support. It is directly indexed by the pci_board_num_t enum + * value, which is encoded in the pci_device_id PCI probe table's + * driver_data member. + * + * The makeup of these names are: + * pbn_bn{_bt}_n_baud{_offsetinhex} + * + * bn = PCI BAR number + * bt = Index using PCI BARs + * n = number of serial ports + * baud = baud rate + * offsetinhex = offset for each sequential port (in hex) + * + * This table is sorted by (in order): bn, bt, baud, offsetindex, n. + * + * Please note: in theory if n = 1, _bt infix should make no difference. + * ie, pbn_b0_1_115200 is the same as pbn_b0_bt_1_115200 + */ +enum pci_board_num_t { + pbn_default = 0, + + pbn_b0_1_115200, + pbn_b0_2_115200, + pbn_b0_4_115200, + pbn_b0_5_115200, + pbn_b0_8_115200, + + pbn_b0_1_921600, + pbn_b0_2_921600, + pbn_b0_4_921600, + + pbn_b0_2_1130000, + + pbn_b0_4_1152000, + + pbn_b0_4_1250000, + + pbn_b0_2_1843200, + pbn_b0_4_1843200, + + pbn_b0_1_15625000, + + pbn_b0_bt_1_115200, + pbn_b0_bt_2_115200, + pbn_b0_bt_4_115200, + pbn_b0_bt_8_115200, + + pbn_b0_bt_1_460800, + pbn_b0_bt_2_460800, + pbn_b0_bt_4_460800, + + pbn_b0_bt_1_921600, + pbn_b0_bt_2_921600, + pbn_b0_bt_4_921600, + pbn_b0_bt_8_921600, + + pbn_b1_1_115200, + pbn_b1_2_115200, + pbn_b1_4_115200, + pbn_b1_8_115200, + pbn_b1_16_115200, + + pbn_b1_1_921600, + pbn_b1_2_921600, + pbn_b1_4_921600, + pbn_b1_8_921600, + + pbn_b1_2_1250000, + + pbn_b1_bt_1_115200, + pbn_b1_bt_2_115200, + pbn_b1_bt_4_115200, + + pbn_b1_bt_2_921600, + + pbn_b1_1_1382400, + pbn_b1_2_1382400, + pbn_b1_4_1382400, + pbn_b1_8_1382400, + + pbn_b2_1_115200, + pbn_b2_2_115200, + pbn_b2_4_115200, + pbn_b2_8_115200, + + pbn_b2_1_460800, + pbn_b2_4_460800, + pbn_b2_8_460800, + pbn_b2_16_460800, + + pbn_b2_1_921600, + pbn_b2_4_921600, + pbn_b2_8_921600, + + pbn_b2_8_1152000, + + pbn_b2_bt_1_115200, + pbn_b2_bt_2_115200, + pbn_b2_bt_4_115200, + + pbn_b2_bt_2_921600, + pbn_b2_bt_4_921600, + + pbn_b3_2_115200, + pbn_b3_4_115200, + pbn_b3_8_115200, + + pbn_b4_bt_2_921600, + pbn_b4_bt_4_921600, + pbn_b4_bt_8_921600, + + /* + * Board-specific versions. + */ + pbn_panacom, + pbn_panacom2, + pbn_panacom4, + pbn_plx_romulus, + pbn_oxsemi, + pbn_oxsemi_1_15625000, + pbn_oxsemi_2_15625000, + pbn_oxsemi_4_15625000, + pbn_oxsemi_8_15625000, + pbn_intel_i960, + pbn_sgi_ioc3, + pbn_computone_4, + pbn_computone_6, + pbn_computone_8, + pbn_sbsxrsio, + pbn_pasemi_1682M, + pbn_ni8430_2, + pbn_ni8430_4, + pbn_ni8430_8, + pbn_ni8430_16, + pbn_ADDIDATA_PCIe_1_3906250, + pbn_ADDIDATA_PCIe_2_3906250, + pbn_ADDIDATA_PCIe_4_3906250, + pbn_ADDIDATA_PCIe_8_3906250, + pbn_ce4100_1_115200, + pbn_omegapci, + pbn_NETMOS9900_2s_115200, + pbn_brcm_trumanage, + pbn_fintek_4, + pbn_fintek_8, + pbn_fintek_12, + pbn_fintek_F81504A, + pbn_fintek_F81508A, + pbn_fintek_F81512A, + pbn_wch382_2, + pbn_wch384_4, + pbn_wch384_8, + pbn_sunix_pci_1s, + pbn_sunix_pci_2s, + pbn_sunix_pci_4s, + pbn_sunix_pci_8s, + pbn_sunix_pci_16s, + pbn_titan_1_4000000, + pbn_titan_2_4000000, + pbn_titan_4_4000000, + pbn_titan_8_4000000, + pbn_moxa8250_2p, + pbn_moxa8250_4p, + pbn_moxa8250_8p, +}; + +/* + * uart_offset - the space between channels + * reg_shift - describes how the UART registers are mapped + * to PCI memory by the card. + * For example IER register on SBS, Inc. PMC-OctPro is located at + * offset 0x10 from the UART base, while UART_IER is defined as 1 + * in include/linux/serial_reg.h, + * see first lines of serial_in() and serial_out() in 8250.c +*/ + +static struct pciserial_board pci_boards[] = { + [pbn_default] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b0_1_115200] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b0_2_115200] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b0_4_115200] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b0_5_115200] = { + .flags = FL_BASE0, + .num_ports = 5, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b0_8_115200] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b0_1_921600] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b0_2_921600] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b0_4_921600] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 8, + }, + + [pbn_b0_2_1130000] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 1130000, + .uart_offset = 8, + }, + + [pbn_b0_4_1152000] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 1152000, + .uart_offset = 8, + }, + + [pbn_b0_4_1250000] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 1250000, + .uart_offset = 8, + }, + + [pbn_b0_2_1843200] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 1843200, + .uart_offset = 8, + }, + [pbn_b0_4_1843200] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 1843200, + .uart_offset = 8, + }, + + [pbn_b0_1_15625000] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 15625000, + .uart_offset = 8, + }, + + [pbn_b0_bt_1_115200] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 1, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b0_bt_2_115200] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 2, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b0_bt_4_115200] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b0_bt_8_115200] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 8, + .base_baud = 115200, + .uart_offset = 8, + }, + + [pbn_b0_bt_1_460800] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 1, + .base_baud = 460800, + .uart_offset = 8, + }, + [pbn_b0_bt_2_460800] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 2, + .base_baud = 460800, + .uart_offset = 8, + }, + [pbn_b0_bt_4_460800] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 4, + .base_baud = 460800, + .uart_offset = 8, + }, + + [pbn_b0_bt_1_921600] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 1, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b0_bt_2_921600] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b0_bt_4_921600] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b0_bt_8_921600] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 8, + .base_baud = 921600, + .uart_offset = 8, + }, + + [pbn_b1_1_115200] = { + .flags = FL_BASE1, + .num_ports = 1, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b1_2_115200] = { + .flags = FL_BASE1, + .num_ports = 2, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b1_4_115200] = { + .flags = FL_BASE1, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b1_8_115200] = { + .flags = FL_BASE1, + .num_ports = 8, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b1_16_115200] = { + .flags = FL_BASE1, + .num_ports = 16, + .base_baud = 115200, + .uart_offset = 8, + }, + + [pbn_b1_1_921600] = { + .flags = FL_BASE1, + .num_ports = 1, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b1_2_921600] = { + .flags = FL_BASE1, + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b1_4_921600] = { + .flags = FL_BASE1, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b1_8_921600] = { + .flags = FL_BASE1, + .num_ports = 8, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b1_2_1250000] = { + .flags = FL_BASE1, + .num_ports = 2, + .base_baud = 1250000, + .uart_offset = 8, + }, + + [pbn_b1_bt_1_115200] = { + .flags = FL_BASE1|FL_BASE_BARS, + .num_ports = 1, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b1_bt_2_115200] = { + .flags = FL_BASE1|FL_BASE_BARS, + .num_ports = 2, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b1_bt_4_115200] = { + .flags = FL_BASE1|FL_BASE_BARS, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + }, + + [pbn_b1_bt_2_921600] = { + .flags = FL_BASE1|FL_BASE_BARS, + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 8, + }, + + [pbn_b1_1_1382400] = { + .flags = FL_BASE1, + .num_ports = 1, + .base_baud = 1382400, + .uart_offset = 8, + }, + [pbn_b1_2_1382400] = { + .flags = FL_BASE1, + .num_ports = 2, + .base_baud = 1382400, + .uart_offset = 8, + }, + [pbn_b1_4_1382400] = { + .flags = FL_BASE1, + .num_ports = 4, + .base_baud = 1382400, + .uart_offset = 8, + }, + [pbn_b1_8_1382400] = { + .flags = FL_BASE1, + .num_ports = 8, + .base_baud = 1382400, + .uart_offset = 8, + }, + + [pbn_b2_1_115200] = { + .flags = FL_BASE2, + .num_ports = 1, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b2_2_115200] = { + .flags = FL_BASE2, + .num_ports = 2, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b2_4_115200] = { + .flags = FL_BASE2, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b2_8_115200] = { + .flags = FL_BASE2, + .num_ports = 8, + .base_baud = 115200, + .uart_offset = 8, + }, + + [pbn_b2_1_460800] = { + .flags = FL_BASE2, + .num_ports = 1, + .base_baud = 460800, + .uart_offset = 8, + }, + [pbn_b2_4_460800] = { + .flags = FL_BASE2, + .num_ports = 4, + .base_baud = 460800, + .uart_offset = 8, + }, + [pbn_b2_8_460800] = { + .flags = FL_BASE2, + .num_ports = 8, + .base_baud = 460800, + .uart_offset = 8, + }, + [pbn_b2_16_460800] = { + .flags = FL_BASE2, + .num_ports = 16, + .base_baud = 460800, + .uart_offset = 8, + }, + + [pbn_b2_1_921600] = { + .flags = FL_BASE2, + .num_ports = 1, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b2_4_921600] = { + .flags = FL_BASE2, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b2_8_921600] = { + .flags = FL_BASE2, + .num_ports = 8, + .base_baud = 921600, + .uart_offset = 8, + }, + + [pbn_b2_8_1152000] = { + .flags = FL_BASE2, + .num_ports = 8, + .base_baud = 1152000, + .uart_offset = 8, + }, + + [pbn_b2_bt_1_115200] = { + .flags = FL_BASE2|FL_BASE_BARS, + .num_ports = 1, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b2_bt_2_115200] = { + .flags = FL_BASE2|FL_BASE_BARS, + .num_ports = 2, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b2_bt_4_115200] = { + .flags = FL_BASE2|FL_BASE_BARS, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + }, + + [pbn_b2_bt_2_921600] = { + .flags = FL_BASE2|FL_BASE_BARS, + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b2_bt_4_921600] = { + .flags = FL_BASE2|FL_BASE_BARS, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 8, + }, + + [pbn_b3_2_115200] = { + .flags = FL_BASE3, + .num_ports = 2, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b3_4_115200] = { + .flags = FL_BASE3, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_b3_8_115200] = { + .flags = FL_BASE3, + .num_ports = 8, + .base_baud = 115200, + .uart_offset = 8, + }, + + [pbn_b4_bt_2_921600] = { + .flags = FL_BASE4, + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b4_bt_4_921600] = { + .flags = FL_BASE4, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 8, + }, + [pbn_b4_bt_8_921600] = { + .flags = FL_BASE4, + .num_ports = 8, + .base_baud = 921600, + .uart_offset = 8, + }, + + /* + * Entries following this are board-specific. + */ + + /* + * Panacom - IOMEM + */ + [pbn_panacom] = { + .flags = FL_BASE2, + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 0x400, + .reg_shift = 7, + }, + [pbn_panacom2] = { + .flags = FL_BASE2|FL_BASE_BARS, + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 0x400, + .reg_shift = 7, + }, + [pbn_panacom4] = { + .flags = FL_BASE2|FL_BASE_BARS, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 0x400, + .reg_shift = 7, + }, + + /* I think this entry is broken - the first_offset looks wrong --rmk */ + [pbn_plx_romulus] = { + .flags = FL_BASE2, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 8 << 2, + .reg_shift = 2, + .first_offset = 0x03, + }, + + /* + * This board uses the size of PCI Base region 0 to + * signal now many ports are available + */ + [pbn_oxsemi] = { + .flags = FL_BASE0|FL_REGION_SZ_CAP, + .num_ports = 32, + .base_baud = 115200, + .uart_offset = 8, + }, + [pbn_oxsemi_1_15625000] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 15625000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_oxsemi_2_15625000] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 15625000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_oxsemi_4_15625000] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 15625000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_oxsemi_8_15625000] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 15625000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + + + /* + * EKF addition for i960 Boards form EKF with serial port. + * Max 256 ports. + */ + [pbn_intel_i960] = { + .flags = FL_BASE0, + .num_ports = 32, + .base_baud = 921600, + .uart_offset = 8 << 2, + .reg_shift = 2, + .first_offset = 0x10000, + }, + [pbn_sgi_ioc3] = { + .flags = FL_BASE0|FL_NOIRQ, + .num_ports = 1, + .base_baud = 458333, + .uart_offset = 8, + .reg_shift = 0, + .first_offset = 0x20178, + }, + + /* + * Computone - uses IOMEM. + */ + [pbn_computone_4] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 0x40, + .reg_shift = 2, + .first_offset = 0x200, + }, + [pbn_computone_6] = { + .flags = FL_BASE0, + .num_ports = 6, + .base_baud = 921600, + .uart_offset = 0x40, + .reg_shift = 2, + .first_offset = 0x200, + }, + [pbn_computone_8] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 921600, + .uart_offset = 0x40, + .reg_shift = 2, + .first_offset = 0x200, + }, + [pbn_sbsxrsio] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 460800, + .uart_offset = 256, + .reg_shift = 4, + }, + /* + * PA Semi PWRficient PA6T-1682M on-chip UART + */ + [pbn_pasemi_1682M] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 8333333, + }, + /* + * National Instruments 843x + */ + [pbn_ni8430_16] = { + .flags = FL_BASE0, + .num_ports = 16, + .base_baud = 3686400, + .uart_offset = 0x10, + .first_offset = 0x800, + }, + [pbn_ni8430_8] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 3686400, + .uart_offset = 0x10, + .first_offset = 0x800, + }, + [pbn_ni8430_4] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 3686400, + .uart_offset = 0x10, + .first_offset = 0x800, + }, + [pbn_ni8430_2] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 3686400, + .uart_offset = 0x10, + .first_offset = 0x800, + }, + /* + * ADDI-DATA GmbH PCI-Express communication cards + */ + [pbn_ADDIDATA_PCIe_1_3906250] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 3906250, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_ADDIDATA_PCIe_2_3906250] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 3906250, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_ADDIDATA_PCIe_4_3906250] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 3906250, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_ADDIDATA_PCIe_8_3906250] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 3906250, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_ce4100_1_115200] = { + .flags = FL_BASE_BARS, + .num_ports = 2, + .base_baud = 921600, + .reg_shift = 2, + }, + [pbn_omegapci] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 115200, + .uart_offset = 0x200, + }, + [pbn_NETMOS9900_2s_115200] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 115200, + }, + [pbn_brcm_trumanage] = { + .flags = FL_BASE0, + .num_ports = 1, + .reg_shift = 2, + .base_baud = 115200, + }, + [pbn_fintek_4] = { + .num_ports = 4, + .uart_offset = 8, + .base_baud = 115200, + .first_offset = 0x40, + }, + [pbn_fintek_8] = { + .num_ports = 8, + .uart_offset = 8, + .base_baud = 115200, + .first_offset = 0x40, + }, + [pbn_fintek_12] = { + .num_ports = 12, + .uart_offset = 8, + .base_baud = 115200, + .first_offset = 0x40, + }, + [pbn_fintek_F81504A] = { + .num_ports = 4, + .uart_offset = 8, + .base_baud = 115200, + }, + [pbn_fintek_F81508A] = { + .num_ports = 8, + .uart_offset = 8, + .base_baud = 115200, + }, + [pbn_fintek_F81512A] = { + .num_ports = 12, + .uart_offset = 8, + .base_baud = 115200, + }, + [pbn_wch382_2] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 115200, + .uart_offset = 8, + .first_offset = 0xC0, + }, + [pbn_wch384_4] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + .first_offset = 0xC0, + }, + [pbn_wch384_8] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 115200, + .uart_offset = 8, + .first_offset = 0x00, + }, + [pbn_sunix_pci_1s] = { + .num_ports = 1, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_sunix_pci_2s] = { + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_sunix_pci_4s] = { + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_sunix_pci_8s] = { + .num_ports = 8, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_sunix_pci_16s] = { + .num_ports = 16, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_titan_1_4000000] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 4000000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_titan_2_4000000] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 4000000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_titan_4_4000000] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 4000000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_titan_8_4000000] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 4000000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_moxa8250_2p] = { + .flags = FL_BASE1, + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 0x200, + }, + [pbn_moxa8250_4p] = { + .flags = FL_BASE1, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 0x200, + }, + [pbn_moxa8250_8p] = { + .flags = FL_BASE1, + .num_ports = 8, + .base_baud = 921600, + .uart_offset = 0x200, + }, +}; + +#define REPORT_CONFIG(option) \ + (IS_ENABLED(CONFIG_##option) ? 0 : (kernel_ulong_t)&#option) +#define REPORT_8250_CONFIG(option) \ + (IS_ENABLED(CONFIG_SERIAL_8250_##option) ? \ + 0 : (kernel_ulong_t)&"SERIAL_8250_"#option) + +static const struct pci_device_id blacklist[] = { + /* softmodems */ + { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */ + { PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */ + { PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */ + + /* multi-io cards handled by parport_serial */ + /* WCH CH353 2S1P */ + { PCI_DEVICE(0x4348, 0x7053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), }, + /* WCH CH353 1S1P */ + { PCI_DEVICE(0x4348, 0x5053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), }, + /* WCH CH382 2S1P */ + { PCI_DEVICE(0x1c00, 0x3250), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), }, + + /* Intel platforms with MID UART */ + { PCI_VDEVICE(INTEL, 0x081b), REPORT_8250_CONFIG(MID), }, + { PCI_VDEVICE(INTEL, 0x081c), REPORT_8250_CONFIG(MID), }, + { PCI_VDEVICE(INTEL, 0x081d), REPORT_8250_CONFIG(MID), }, + { PCI_VDEVICE(INTEL, 0x1191), REPORT_8250_CONFIG(MID), }, + { PCI_VDEVICE(INTEL, 0x18d8), REPORT_8250_CONFIG(MID), }, + { PCI_VDEVICE(INTEL, 0x19d8), REPORT_8250_CONFIG(MID), }, + + /* Intel platforms with DesignWare UART */ + { PCI_VDEVICE(INTEL, 0x0936), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x0f0a), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x0f0c), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x228a), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x228c), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x4b96), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x4b97), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x4b98), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x4b99), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x4b9a), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x4b9b), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x9ce3), REPORT_8250_CONFIG(LPSS), }, + { PCI_VDEVICE(INTEL, 0x9ce4), REPORT_8250_CONFIG(LPSS), }, + + /* Exar devices */ + { PCI_VDEVICE(EXAR, PCI_ANY_ID), REPORT_8250_CONFIG(EXAR), }, + { PCI_VDEVICE(COMMTECH, PCI_ANY_ID), REPORT_8250_CONFIG(EXAR), }, + + /* Pericom devices */ + { PCI_VDEVICE(PERICOM, PCI_ANY_ID), REPORT_8250_CONFIG(PERICOM), }, + { PCI_VDEVICE(ACCESSIO, PCI_ANY_ID), REPORT_8250_CONFIG(PERICOM), }, + + /* End of the black list */ + { } +}; + +static int serial_pci_is_class_communication(struct pci_dev *dev) +{ + /* + * If it is not a communications device or the programming + * interface is greater than 6, give up. + */ + if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && + ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MULTISERIAL) && + ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || + (dev->class & 0xff) > 6) + return -ENODEV; + + return 0; +} + +/* + * Given a complete unknown PCI device, try to use some heuristics to + * guess what the configuration might be, based on the pitiful PCI + * serial specs. Returns 0 on success, -ENODEV on failure. + */ +static int +serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) +{ + int num_iomem, num_port, first_port = -1, i; + int rc; + + rc = serial_pci_is_class_communication(dev); + if (rc) + return rc; + + /* + * Should we try to make guesses for multiport serial devices later? + */ + if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_MULTISERIAL) + return -ENODEV; + + num_iomem = num_port = 0; + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + num_port++; + if (first_port == -1) + first_port = i; + } + if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + num_iomem++; + } + + /* + * If there is 1 or 0 iomem regions, and exactly one port, + * use it. We guess the number of ports based on the IO + * region size. + */ + if (num_iomem <= 1 && num_port == 1) { + board->flags = first_port; + board->num_ports = pci_resource_len(dev, first_port) / 8; + return 0; + } + + /* + * Now guess if we've got a board which indexes by BARs. + * Each IO BAR should be 8 bytes, and they should follow + * consecutively. + */ + first_port = -1; + num_port = 0; + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_flags(dev, i) & IORESOURCE_IO && + pci_resource_len(dev, i) == 8 && + (first_port == -1 || (first_port + num_port) == i)) { + num_port++; + if (first_port == -1) + first_port = i; + } + } + + if (num_port > 1) { + board->flags = first_port | FL_BASE_BARS; + board->num_ports = num_port; + return 0; + } + + return -ENODEV; +} + +static inline int +serial_pci_matches(const struct pciserial_board *board, + const struct pciserial_board *guessed) +{ + return + board->num_ports == guessed->num_ports && + board->base_baud == guessed->base_baud && + board->uart_offset == guessed->uart_offset && + board->reg_shift == guessed->reg_shift && + board->first_offset == guessed->first_offset; +} + +struct serial_private * +pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) +{ + struct uart_8250_port uart; + struct serial_private *priv; + struct pci_serial_quirk *quirk; + int rc, nr_ports, i; + + nr_ports = board->num_ports; + + /* + * Find an init and setup quirks. + */ + quirk = find_quirk(dev); + + /* + * Run the new-style initialization function. + * The initialization function returns: + * <0 - error + * 0 - use board->num_ports + * >0 - number of ports + */ + if (quirk->init) { + rc = quirk->init(dev); + if (rc < 0) { + priv = ERR_PTR(rc); + goto err_out; + } + if (rc) + nr_ports = rc; + } + + priv = kzalloc(struct_size(priv, line, nr_ports), GFP_KERNEL); + if (!priv) { + priv = ERR_PTR(-ENOMEM); + goto err_deinit; + } + + priv->dev = dev; + priv->quirk = quirk; + + memset(&uart, 0, sizeof(uart)); + uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ; + uart.port.uartclk = board->base_baud * 16; + + if (board->flags & FL_NOIRQ) { + uart.port.irq = 0; + } else { + if (pci_match_id(pci_use_msi, dev)) { + pci_dbg(dev, "Using MSI(-X) interrupts\n"); + pci_set_master(dev); + uart.port.flags &= ~UPF_SHARE_IRQ; + rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES); + } else { + pci_dbg(dev, "Using legacy interrupts\n"); + rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY); + } + if (rc < 0) { + kfree(priv); + priv = ERR_PTR(rc); + goto err_deinit; + } + + uart.port.irq = pci_irq_vector(dev, 0); + } + + uart.port.dev = &dev->dev; + + for (i = 0; i < nr_ports; i++) { + if (quirk->setup(priv, board, &uart, i)) + break; + + pci_dbg(dev, "Setup PCI port: port %lx, irq %d, type %d\n", + uart.port.iobase, uart.port.irq, uart.port.iotype); + + priv->line[i] = serial8250_register_8250_port(&uart); + if (priv->line[i] < 0) { + pci_err(dev, + "Couldn't register serial port %lx, irq %d, type %d, error %d\n", + uart.port.iobase, uart.port.irq, + uart.port.iotype, priv->line[i]); + break; + } + } + priv->nr = i; + priv->board = board; + return priv; + +err_deinit: + if (quirk->exit) + quirk->exit(dev); +err_out: + return priv; +} +EXPORT_SYMBOL_GPL(pciserial_init_ports); + +static void pciserial_detach_ports(struct serial_private *priv) +{ + struct pci_serial_quirk *quirk; + int i; + + for (i = 0; i < priv->nr; i++) + serial8250_unregister_port(priv->line[i]); + + /* + * Find the exit quirks. + */ + quirk = find_quirk(priv->dev); + if (quirk->exit) + quirk->exit(priv->dev); +} + +void pciserial_remove_ports(struct serial_private *priv) +{ + pciserial_detach_ports(priv); + kfree(priv); +} +EXPORT_SYMBOL_GPL(pciserial_remove_ports); + +void pciserial_suspend_ports(struct serial_private *priv) +{ + int i; + + for (i = 0; i < priv->nr; i++) + if (priv->line[i] >= 0) + serial8250_suspend_port(priv->line[i]); + + /* + * Ensure that every init quirk is properly torn down + */ + if (priv->quirk->exit) + priv->quirk->exit(priv->dev); +} +EXPORT_SYMBOL_GPL(pciserial_suspend_ports); + +void pciserial_resume_ports(struct serial_private *priv) +{ + int i; + + /* + * Ensure that the board is correctly configured. + */ + if (priv->quirk->init) + priv->quirk->init(priv->dev); + + for (i = 0; i < priv->nr; i++) + if (priv->line[i] >= 0) + serial8250_resume_port(priv->line[i]); +} +EXPORT_SYMBOL_GPL(pciserial_resume_ports); + +/* + * Probe one serial board. Unfortunately, there is no rhyme nor reason + * to the arrangement of serial ports on a PCI card. + */ +static int +pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) +{ + struct pci_serial_quirk *quirk; + struct serial_private *priv; + const struct pciserial_board *board; + const struct pci_device_id *exclude; + struct pciserial_board tmp; + int rc; + + quirk = find_quirk(dev); + if (quirk->probe) { + rc = quirk->probe(dev); + if (rc) + return rc; + } + + if (ent->driver_data >= ARRAY_SIZE(pci_boards)) { + pci_err(dev, "invalid driver_data: %ld\n", ent->driver_data); + return -EINVAL; + } + + board = &pci_boards[ent->driver_data]; + + exclude = pci_match_id(blacklist, dev); + if (exclude) { + if (exclude->driver_data) + pci_warn(dev, "ignoring port, enable %s to handle\n", + (const char *)exclude->driver_data); + return -ENODEV; + } + + rc = pcim_enable_device(dev); + pci_save_state(dev); + if (rc) + return rc; + + if (ent->driver_data == pbn_default) { + /* + * Use a copy of the pci_board entry for this; + * avoid changing entries in the table. + */ + memcpy(&tmp, board, sizeof(struct pciserial_board)); + board = &tmp; + + /* + * We matched one of our class entries. Try to + * determine the parameters of this board. + */ + rc = serial_pci_guess_board(dev, &tmp); + if (rc) + return rc; + } else { + /* + * We matched an explicit entry. If we are able to + * detect this boards settings with our heuristic, + * then we no longer need this entry. + */ + memcpy(&tmp, &pci_boards[pbn_default], + sizeof(struct pciserial_board)); + rc = serial_pci_guess_board(dev, &tmp); + if (rc == 0 && serial_pci_matches(board, &tmp)) + moan_device("Redundant entry in serial pci_table.", + dev); + } + + priv = pciserial_init_ports(dev, board); + if (IS_ERR(priv)) + return PTR_ERR(priv); + + pci_set_drvdata(dev, priv); + return 0; +} + +static void pciserial_remove_one(struct pci_dev *dev) +{ + struct serial_private *priv = pci_get_drvdata(dev); + + pciserial_remove_ports(priv); +} + +#ifdef CONFIG_PM_SLEEP +static int pciserial_suspend_one(struct device *dev) +{ + struct serial_private *priv = dev_get_drvdata(dev); + + if (priv) + pciserial_suspend_ports(priv); + + return 0; +} + +static int pciserial_resume_one(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct serial_private *priv = pci_get_drvdata(pdev); + int err; + + if (priv) { + /* + * The device may have been disabled. Re-enable it. + */ + err = pci_enable_device(pdev); + /* FIXME: We cannot simply error out here */ + if (err) + pci_err(pdev, "Unable to re-enable ports, trying to continue.\n"); + pciserial_resume_ports(priv); + } + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one, + pciserial_resume_one); + +static const struct pci_device_id serial_pci_tbl[] = { + { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI1600, + PCI_DEVICE_ID_ADVANTECH_PCI1600_1611, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + /* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */ + { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620, + PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0, + pbn_b2_8_921600 }, + /* Advantech also use 0x3618 and 0xf618 */ + { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3618, + PCI_DEVICE_ID_ADVANTECH_PCI3618, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCIf618, + PCI_DEVICE_ID_ADVANTECH_PCI3618, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232, 0, 0, + pbn_b1_8_1382400 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232, 0, 0, + pbn_b1_4_1382400 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232, 0, 0, + pbn_b1_2_1382400 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232, 0, 0, + pbn_b1_8_1382400 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232, 0, 0, + pbn_b1_4_1382400 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232, 0, 0, + pbn_b1_2_1382400 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485, 0, 0, + pbn_b1_8_921600 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4, 0, 0, + pbn_b1_8_921600 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485, 0, 0, + pbn_b1_4_921600 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2, 0, 0, + pbn_b1_4_921600 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485, 0, 0, + pbn_b1_2_921600 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6, 0, 0, + pbn_b1_8_921600 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1, 0, 0, + pbn_b1_8_921600 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1, 0, 0, + pbn_b1_4_921600 }, + { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ, 0, 0, + pbn_b1_2_1250000 }, + { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2, 0, 0, + pbn_b0_2_1843200 }, + { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, + PCI_SUBVENDOR_ID_CONNECT_TECH, + PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4, 0, 0, + pbn_b0_4_1843200 }, + { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, + PCI_VENDOR_ID_AFAVLAB, + PCI_SUBDEVICE_ID_AFAVLAB_P061, 0, 0, + pbn_b0_4_1152000 }, + { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_U530, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_1_115200 }, + { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_2_115200 }, + { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM422, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_4_115200 }, + { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM232, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_2_115200 }, + { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_4_115200 }, + { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM8, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_8_115200 }, + { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_7803, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_8_460800 }, + { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM8, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_8_115200 }, + + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_GTEK_SERIAL2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_2_115200 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_SPCOM200, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_2_921600 }, + /* + * VScom SPCOM800, from sl@s.pl + */ + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_SPCOM800, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_8_921600 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_1077, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_4_921600 }, + /* Unknown card - subdevice 0x1584 */ + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_VENDOR_ID_PLX, + PCI_SUBDEVICE_ID_UNKNOWN_0x1584, 0, 0, + pbn_b2_4_115200 }, + /* Unknown card - subdevice 0x1588 */ + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_VENDOR_ID_PLX, + PCI_SUBDEVICE_ID_UNKNOWN_0x1588, 0, 0, + pbn_b2_8_115200 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_SUBVENDOR_ID_KEYSPAN, + PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0, + pbn_panacom }, + { PCI_VENDOR_ID_PANACOM, PCI_DEVICE_ID_PANACOM_QUADMODEM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_panacom4 }, + { PCI_VENDOR_ID_PANACOM, PCI_DEVICE_ID_PANACOM_DUALMODEM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_panacom2 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, + PCI_VENDOR_ID_ESDGMBH, + PCI_DEVICE_ID_ESDGMBH_CPCIASIO4, 0, 0, + pbn_b2_4_115200 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_SUBVENDOR_ID_CHASE_PCIFAST, + PCI_SUBDEVICE_ID_CHASE_PCIFAST4, 0, 0, + pbn_b2_4_460800 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_SUBVENDOR_ID_CHASE_PCIFAST, + PCI_SUBDEVICE_ID_CHASE_PCIFAST8, 0, 0, + pbn_b2_8_460800 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_SUBVENDOR_ID_CHASE_PCIFAST, + PCI_SUBDEVICE_ID_CHASE_PCIFAST16, 0, 0, + pbn_b2_16_460800 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_SUBVENDOR_ID_CHASE_PCIFAST, + PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC, 0, 0, + pbn_b2_16_460800 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_SUBVENDOR_ID_CHASE_PCIRAS, + PCI_SUBDEVICE_ID_CHASE_PCIRAS4, 0, 0, + pbn_b2_4_460800 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_SUBVENDOR_ID_CHASE_PCIRAS, + PCI_SUBDEVICE_ID_CHASE_PCIRAS8, 0, 0, + pbn_b2_8_460800 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_SUBVENDOR_ID_EXSYS, + PCI_SUBDEVICE_ID_EXSYS_4055, 0, 0, + pbn_b2_4_115200 }, + /* + * Megawolf Romulus PCI Serial Card, from Mike Hudson + * (Exoray@isys.ca) + */ + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_ROMULUS, + 0x10b5, 0x106a, 0, 0, + pbn_plx_romulus }, + /* + * Quatech cards. These actually have configurable clocks but for + * now we just use the default. + * + * 100 series are RS232, 200 series RS422, + */ + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSC100, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_4_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC100, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_2_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC100E, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC200, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_2_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC200E, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSC200, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_4_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100D, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_8_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100M, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_8_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCP100, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_4_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCP100, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_2_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCP200, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_4_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCP200, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_2_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCLP100, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_4_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCLP100, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SSCLP100, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_1_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCLP200, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_4_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCLP200, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SSCLP200, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_1_115200 }, + { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESCLP100, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_8_115200 }, + + { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_OXSEMI_16PCI954, + PCI_VENDOR_ID_SPECIALIX, PCI_SUBDEVICE_ID_SPECIALIX_SPEED4, + 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, + PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, + 0, 0, + pbn_b0_4_1152000 }, + { PCI_VENDOR_ID_OXSEMI, 0x9505, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_921600 }, + + /* + * The below card is a little controversial since it is the + * subject of a PCI vendor/device ID clash. (See + * www.ussg.iu.edu/hypermail/linux/kernel/0303.1/0516.html). + * For now just used the hex ID 0x950a. + */ + { PCI_VENDOR_ID_OXSEMI, 0x950a, + PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_00, + 0, 0, pbn_b0_2_115200 }, + { PCI_VENDOR_ID_OXSEMI, 0x950a, + PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_30, + 0, 0, pbn_b0_2_115200 }, + { PCI_VENDOR_ID_OXSEMI, 0x950a, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_2_1130000 }, + { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_C950, + PCI_VENDOR_ID_OXSEMI, PCI_SUBDEVICE_ID_OXSEMI_C950, 0, 0, + pbn_b0_1_921600 }, + { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_4_115200 }, + { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_921600 }, + { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI958, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_8_1152000 }, + + /* + * Oxford Semiconductor Inc. Tornado PCI express device range. + */ + { PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_2_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_2_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_4_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_4_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_8_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_8_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */ + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_1_15625000 }, + /* + * Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado + */ + { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */ + PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0, + pbn_oxsemi_1_15625000 }, + { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */ + PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0, + pbn_oxsemi_2_15625000 }, + { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */ + PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0, + pbn_oxsemi_4_15625000 }, + { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */ + PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0, + pbn_oxsemi_8_15625000 }, + + /* + * Digi/IBM PCIe 2-port Async EIA-232 Adapter utilizing OxSemi Tornado + */ + { PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_2_OX_IBM, + PCI_SUBVENDOR_ID_IBM, PCI_ANY_ID, 0, 0, + pbn_oxsemi_2_15625000 }, + /* + * EndRun Technologies. PCI express device range. + * EndRun PTP/1588 has 2 Native UARTs utilizing OxSemi 952. + */ + { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_2_15625000 }, + + /* + * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards, + * from skokodyn@yahoo.com + */ + { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO, + PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_OCTPRO232, 0, 0, + pbn_sbsxrsio }, + { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO, + PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_OCTPRO422, 0, 0, + pbn_sbsxrsio }, + { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO, + PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_POCTAL232, 0, 0, + pbn_sbsxrsio }, + { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO, + PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_POCTAL422, 0, 0, + pbn_sbsxrsio }, + + /* + * Digitan DS560-558, from jimd@esoft.com + */ + { PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_ATT_VENUS_MODEM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_1_115200 }, + + /* + * Titan Electronic cards + * The 400L and 800L have a custom setup quirk. + */ + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_2_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100L, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_1_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200L, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_bt_2_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400L, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800L, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_8_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200I, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b4_bt_2_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400I, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b4_bt_4_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800I, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b4_bt_8_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400EH, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800EH, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800EHB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100E, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_titan_1_4000000 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200E, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_titan_2_4000000 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400E, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_titan_4_4000000 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800E, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_titan_8_4000000 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EI, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_titan_2_4000000 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_titan_2_4000000 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200V3, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_410V3, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, + + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_550, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_1_460800 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_650, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_1_460800 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_850, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_1_460800 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_10x_550, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_2_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_10x_650, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_2_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_10x_850, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_2_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_10x_550, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_4_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_10x_650, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_4_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_10x_850, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_4_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_20x_550, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_20x_650, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_20x_850, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_20x_550, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_20x_650, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_20x_850, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_20x_550, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_20x_650, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_20x_850, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_550, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_8_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_650, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_8_921600 }, + { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_850, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_8_921600 }, + + /* + * Computone devices submitted by Doug McNash dmcnash@computone.com + */ + { PCI_VENDOR_ID_COMPUTONE, PCI_DEVICE_ID_COMPUTONE_PG, + PCI_SUBVENDOR_ID_COMPUTONE, PCI_SUBDEVICE_ID_COMPUTONE_PG4, + 0, 0, pbn_computone_4 }, + { PCI_VENDOR_ID_COMPUTONE, PCI_DEVICE_ID_COMPUTONE_PG, + PCI_SUBVENDOR_ID_COMPUTONE, PCI_SUBDEVICE_ID_COMPUTONE_PG8, + 0, 0, pbn_computone_8 }, + { PCI_VENDOR_ID_COMPUTONE, PCI_DEVICE_ID_COMPUTONE_PG, + PCI_SUBVENDOR_ID_COMPUTONE, PCI_SUBDEVICE_ID_COMPUTONE_PG6, + 0, 0, pbn_computone_6 }, + + { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI95N, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi }, + { PCI_VENDOR_ID_TIMEDIA, PCI_DEVICE_ID_TIMEDIA_1889, + PCI_VENDOR_ID_TIMEDIA, PCI_ANY_ID, 0, 0, + pbn_b0_bt_1_921600 }, + + /* + * Sunix PCI serial boards + */ + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0001, 0, 0, + pbn_sunix_pci_1s }, + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0002, 0, 0, + pbn_sunix_pci_2s }, + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0004, 0, 0, + pbn_sunix_pci_4s }, + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0084, 0, 0, + pbn_sunix_pci_4s }, + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0008, 0, 0, + pbn_sunix_pci_8s }, + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0088, 0, 0, + pbn_sunix_pci_8s }, + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0010, 0, 0, + pbn_sunix_pci_16s }, + + /* + * AFAVLAB serial card, from Harald Welte + */ + { PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_P028, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_8_115200 }, + { PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_P030, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_8_115200 }, + + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DSERIAL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_115200 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_115200 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_115200 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_115200 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_115200 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_460800 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_460800 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_PORT_PLUS, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_460800 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUAD_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_460800 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUAD_B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_460800 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_SSERIAL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_1_115200 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_PORT_650, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_1_460800 }, + + /* + * Korenix Jetcard F0/F1 cards (JC1204, JC1208, JC1404, JC1408). + * Cards are identified by their subsystem vendor IDs, which + * (in hex) match the model number. + * + * Note that JC140x are RS422/485 cards which require ox950 + * ACR = 0x10, and as such are not currently fully supported. + */ + { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0, + 0x1204, 0x0004, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0, + 0x1208, 0x0004, 0, 0, + pbn_b0_4_921600 }, +/* { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0, + 0x1402, 0x0002, 0, 0, + pbn_b0_2_921600 }, */ +/* { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0, + 0x1404, 0x0004, 0, 0, + pbn_b0_4_921600 }, */ + { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF1, + 0x1208, 0x0004, 0, 0, + pbn_b0_4_921600 }, + + { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2, + 0x1204, 0x0004, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2, + 0x1208, 0x0004, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF3, + 0x1208, 0x0004, 0, 0, + pbn_b0_4_921600 }, + /* + * Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com + */ + { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_RAC4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_1_1382400 }, + + /* + * Dell Remote Access Card III - Tim_T_Murphy@Dell.com + */ + { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_RACIII, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_1_1382400 }, + + /* + * RAStel 2 port modem, gerg@moreton.com.au + */ + { PCI_VENDOR_ID_MORETON, PCI_DEVICE_ID_RASTEL_2PORT, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_bt_2_115200 }, + + /* + * EKF addition for i960 Boards form EKF with serial port + */ + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80960_RP, + 0xE4BF, PCI_ANY_ID, 0, 0, + pbn_intel_i960 }, + + /* + * Xircom Cardbus/Ethernet combos + */ + { PCI_VENDOR_ID_XIRCOM, PCI_DEVICE_ID_XIRCOM_X3201_MDM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_115200 }, + /* + * Xircom RBM56G cardbus modem - Dirk Arnold (temp entry) + */ + { PCI_VENDOR_ID_XIRCOM, PCI_DEVICE_ID_XIRCOM_RBM56G, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_115200 }, + + /* + * Untested PCI modems, sent in from various folks... + */ + + /* + * Elsa Model 56K PCI Modem, from Andreas Rath + */ + { PCI_VENDOR_ID_ROCKWELL, 0x1004, + 0x1048, 0x1500, 0, 0, + pbn_b1_1_115200 }, + + { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, + 0xFF00, 0, 0, 0, + pbn_sgi_ioc3 }, + + /* + * HP Diva card + */ + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA, + PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_RMP3, 0, 0, + pbn_b1_1_115200 }, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_5_115200 }, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_1_115200 }, + /* HPE PCI serial device */ + { PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_1_115200 }, + + { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b3_2_115200 }, + { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b3_4_115200 }, + { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM8, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b3_8_115200 }, + /* + * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) + */ + { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_1_115200 }, + /* + * ITE + */ + { PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8872, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b1_bt_1_115200 }, + + /* + * IntaShield IS-100 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0D60, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b2_1_115200 }, + /* + * IntaShield IS-200 + */ + { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS200, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0811 */ + pbn_b2_2_115200 }, + /* + * IntaShield IS-400 + */ + { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ + pbn_b2_4_115200 }, + /* + * IntaShield IX-100 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4027, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_1_15625000 }, + /* + * IntaShield IX-200 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4028, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_2_15625000 }, + /* + * IntaShield IX-400 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4029, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_4_15625000 }, + /* Brainboxes Devices */ + /* + * Brainboxes UC-101 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0BA1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-235/246 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0AA1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_1_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0AA2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_1_115200 }, + /* + * Brainboxes UC-253/UC-734 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0CA1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-260/271/701/756 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0D21, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0E34, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-268 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0841, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-275/279 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0881, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_8_115200 }, + /* + * Brainboxes UC-302 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x08E1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x08E2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x08E3, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-310 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x08C1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-313 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x08A1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x08A2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x08A3, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-320/324 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0A61, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_1_115200 }, + /* + * Brainboxes UC-346 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0B01, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0B02, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-357 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0A81, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0A82, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0A83, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-368 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0C41, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-420 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0921, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-607 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x09A1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x09A2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x09A3, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-836 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0D41, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UP-189 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0AC1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0AC2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0AC3, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UP-200 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0B21, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0B22, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0B23, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UP-869 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0C01, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C02, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C03, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UP-880 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0C21, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C22, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C23, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes PX-101 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4005, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b0_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x4019, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_2_15625000 }, + /* + * Brainboxes PX-235/246 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4004, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b0_1_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x4016, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_1_15625000 }, + /* + * Brainboxes PX-203/PX-257 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4006, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b0_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x4015, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_2_15625000 }, + /* + * Brainboxes PX-260/PX-701 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x400A, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_4_15625000 }, + /* + * Brainboxes PX-275/279 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0E41, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_8_115200 }, + /* + * Brainboxes PX-310 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x400E, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_2_15625000 }, + /* + * Brainboxes PX-313 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x400C, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_2_15625000 }, + /* + * Brainboxes PX-320/324/PX-376/PX-387 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x400B, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_1_15625000 }, + /* + * Brainboxes PX-335/346 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x400F, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_4_15625000 }, + /* + * Brainboxes PX-368 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4010, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_4_15625000 }, + /* + * Brainboxes PX-420 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4000, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b0_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x4011, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_4_15625000 }, + /* + * Brainboxes PX-475 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x401D, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_1_15625000 }, + /* + * Brainboxes PX-803/PX-857 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4009, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b0_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x4018, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_2_15625000 }, + { PCI_VENDOR_ID_INTASHIELD, 0x401E, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_2_15625000 }, + /* + * Brainboxes PX-820 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4002, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b0_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x4013, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_4_15625000 }, + /* + * Brainboxes PX-846 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4008, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b0_1_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x4017, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_1_15625000 }, + + /* + * Perle PCI-RAS cards + */ + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, + PCI_SUBVENDOR_ID_PERLE, PCI_SUBDEVICE_ID_PCI_RAS4, + 0, 0, pbn_b2_4_921600 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, + PCI_SUBVENDOR_ID_PERLE, PCI_SUBDEVICE_ID_PCI_RAS8, + 0, 0, pbn_b2_8_921600 }, + + /* + * Mainpine series cards: Fairly standard layout but fools + * parts of the autodetect in some cases and uses otherwise + * unmatched communications subclasses in the PCI Express case + */ + + { /* RockForceDUO */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x0200, + 0, 0, pbn_b0_2_115200 }, + { /* RockForceQUATRO */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x0300, + 0, 0, pbn_b0_4_115200 }, + { /* RockForceDUO+ */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x0400, + 0, 0, pbn_b0_2_115200 }, + { /* RockForceQUATRO+ */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x0500, + 0, 0, pbn_b0_4_115200 }, + { /* RockForce+ */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x0600, + 0, 0, pbn_b0_2_115200 }, + { /* RockForce+ */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x0700, + 0, 0, pbn_b0_4_115200 }, + { /* RockForceOCTO+ */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x0800, + 0, 0, pbn_b0_8_115200 }, + { /* RockForceDUO+ */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x0C00, + 0, 0, pbn_b0_2_115200 }, + { /* RockForceQUARTRO+ */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x0D00, + 0, 0, pbn_b0_4_115200 }, + { /* RockForceOCTO+ */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x1D00, + 0, 0, pbn_b0_8_115200 }, + { /* RockForceD1 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x2000, + 0, 0, pbn_b0_1_115200 }, + { /* RockForceF1 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x2100, + 0, 0, pbn_b0_1_115200 }, + { /* RockForceD2 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x2200, + 0, 0, pbn_b0_2_115200 }, + { /* RockForceF2 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x2300, + 0, 0, pbn_b0_2_115200 }, + { /* RockForceD4 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x2400, + 0, 0, pbn_b0_4_115200 }, + { /* RockForceF4 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x2500, + 0, 0, pbn_b0_4_115200 }, + { /* RockForceD8 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x2600, + 0, 0, pbn_b0_8_115200 }, + { /* RockForceF8 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x2700, + 0, 0, pbn_b0_8_115200 }, + { /* IQ Express D1 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x3000, + 0, 0, pbn_b0_1_115200 }, + { /* IQ Express F1 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x3100, + 0, 0, pbn_b0_1_115200 }, + { /* IQ Express D2 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x3200, + 0, 0, pbn_b0_2_115200 }, + { /* IQ Express F2 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x3300, + 0, 0, pbn_b0_2_115200 }, + { /* IQ Express D4 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x3400, + 0, 0, pbn_b0_4_115200 }, + { /* IQ Express F4 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x3500, + 0, 0, pbn_b0_4_115200 }, + { /* IQ Express D8 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x3C00, + 0, 0, pbn_b0_8_115200 }, + { /* IQ Express F8 */ + PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, + PCI_VENDOR_ID_MAINPINE, 0x3D00, + 0, 0, pbn_b0_8_115200 }, + + + /* + * PA Semi PA6T-1682M on-chip UART + */ + { PCI_VENDOR_ID_PASEMI, 0xa004, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pasemi_1682M }, + + /* + * National Instruments + */ + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI23216, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_16_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2328, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_8_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2324, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_bt_4_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2322, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_bt_2_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2324I, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_bt_4_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2322I, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_bt_2_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_23216, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_16_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_2328, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_8_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_2324, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_bt_4_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_2322, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_bt_2_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8422_2324, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_bt_4_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8422_2322, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_bt_2_115200 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_2322, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_2 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_2322, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_2 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_2324, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_4 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_2324, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_4 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_2328, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_8 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_2328, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_8 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_23216, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_16 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_23216, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_16 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8432_2322, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_2 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2322, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_2 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8432_2324, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_4 }, + { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2324, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ni8430_4 }, + + /* + * MOXA + */ + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102E, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_2p }, + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102EL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_2p }, + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP104EL_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_4p }, + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP114EL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_4p }, + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP116E_A_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_8p }, + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP116E_A_B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_8p }, + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP118EL_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_8p }, + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP118E_A_I, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_8p }, + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP132EL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_2p }, + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP134EL_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_4p }, + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP138E_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_8p }, + { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP168EL_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_moxa8250_8p }, + + /* + * ADDI-DATA GmbH communication cards + */ + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCI7500, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_b0_4_115200 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCI7420, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_b0_2_115200 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCI7300, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_AMCC, + PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_b1_8_115200 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCI7500_2, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_b0_4_115200 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCI7420_2, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_b0_2_115200 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCI7300_2, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCI7500_3, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_b0_4_115200 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCI7420_3, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_b0_2_115200 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCI7300_3, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCI7800_3, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_b0_8_115200 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCIe7500, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_ADDIDATA_PCIe_4_3906250 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCIe7420, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_ADDIDATA_PCIe_2_3906250 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCIe7300, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_ADDIDATA_PCIe_1_3906250 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCIe7800, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_ADDIDATA_PCIe_8_3906250 }, + + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835, + PCI_VENDOR_ID_IBM, 0x0299, + 0, 0, pbn_b0_bt_2_115200 }, + + /* + * other NetMos 9835 devices are most likely handled by the + * parport_serial driver, check drivers/parport/parport_serial.c + * before adding them here. + */ + + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901, + 0xA000, 0x1000, + 0, 0, pbn_b0_1_115200 }, + + /* the 9901 is a rebranded 9912 */ + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9912, + 0xA000, 0x1000, + 0, 0, pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922, + 0xA000, 0x1000, + 0, 0, pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9904, + 0xA000, 0x1000, + 0, 0, pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900, + 0xA000, 0x1000, + 0, 0, pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900, + 0xA000, 0x3002, + 0, 0, pbn_NETMOS9900_2s_115200 }, + + /* + * Best Connectivity and Rosewill PCI Multi I/O cards + */ + + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, + 0xA000, 0x1000, + 0, 0, pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, + 0xA000, 0x3002, + 0, 0, pbn_b0_bt_2_115200 }, + + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, + 0xA000, 0x3004, + 0, 0, pbn_b0_bt_4_115200 }, + /* Intel CE4100 */ + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100_UART, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_ce4100_1_115200 }, + + /* + * Cronyx Omega PCI + */ + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_omegapci }, + + /* + * Broadcom TruManage + */ + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_brcm_trumanage }, + + /* + * AgeStar as-prs2-009 + */ + { PCI_VENDOR_ID_AGESTAR, PCI_DEVICE_ID_AGESTAR_9375, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_bt_2_115200 }, + + /* + * WCH CH353 series devices: The 2S1P is handled by parport_serial + * so not listed here. + */ + { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_4S, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_bt_4_115200 }, + + { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_2S1PF, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_bt_2_115200 }, + + { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH355_4S, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_bt_4_115200 }, + + { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_wch382_2 }, + + { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_wch384_4 }, + + { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_8S, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_wch384_8 }, + /* + * Realtek RealManage + */ + { PCI_VENDOR_ID_REALTEK, 0x816a, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_REALTEK, 0x816b, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_1_115200 }, + + /* Fintek PCI serial cards */ + { PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 }, + { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 }, + { PCI_DEVICE(0x1c29, 0x1112), .driver_data = pbn_fintek_12 }, + { PCI_DEVICE(0x1c29, 0x1204), .driver_data = pbn_fintek_F81504A }, + { PCI_DEVICE(0x1c29, 0x1208), .driver_data = pbn_fintek_F81508A }, + { PCI_DEVICE(0x1c29, 0x1212), .driver_data = pbn_fintek_F81512A }, + + /* MKS Tenta SCOM-080x serial cards */ + { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 }, + { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 }, + + /* Amazon PCI serial device */ + { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 }, + + /* + * These entries match devices with class COMMUNICATION_SERIAL, + * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL + */ + { PCI_ANY_ID, PCI_ANY_ID, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_SERIAL << 8, + 0xffff00, pbn_default }, + { PCI_ANY_ID, PCI_ANY_ID, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MODEM << 8, + 0xffff00, pbn_default }, + { PCI_ANY_ID, PCI_ANY_ID, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, + 0xffff00, pbn_default }, + { 0, } +}; + +static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, + pci_channel_state_t state) +{ + struct serial_private *priv = pci_get_drvdata(dev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (priv) + pciserial_detach_ports(priv); + + pci_disable_device(dev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) +{ + int rc; + + rc = pci_enable_device(dev); + + if (rc) + return PCI_ERS_RESULT_DISCONNECT; + + pci_restore_state(dev); + pci_save_state(dev); + + return PCI_ERS_RESULT_RECOVERED; +} + +static void serial8250_io_resume(struct pci_dev *dev) +{ + struct serial_private *priv = pci_get_drvdata(dev); + struct serial_private *new; + + if (!priv) + return; + + new = pciserial_init_ports(dev, priv->board); + if (!IS_ERR(new)) { + pci_set_drvdata(dev, new); + kfree(priv); + } +} + +static const struct pci_error_handlers serial8250_err_handler = { + .error_detected = serial8250_io_error_detected, + .slot_reset = serial8250_io_slot_reset, + .resume = serial8250_io_resume, +}; + +static struct pci_driver serial_pci_driver = { + .name = "serial", + .probe = pciserial_init_one, + .remove = pciserial_remove_one, + .driver = { + .pm = &pciserial_pm_ops, + }, + .id_table = serial_pci_tbl, + .err_handler = &serial8250_err_handler, +}; + +module_pci_driver(serial_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Generic 8250/16x50 PCI serial probe module"); +MODULE_DEVICE_TABLE(pci, serial_pci_tbl); diff --git a/drivers/tty/serial/8250/8250_pericom.c b/drivers/tty/serial/8250/8250_pericom.c new file mode 100644 index 000000000..b8d5b7714 --- /dev/null +++ b/drivers/tty/serial/8250/8250_pericom.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Driver for Pericom UART */ + +#include +#include +#include +#include + +#include "8250.h" + +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SDB 0x1051 +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2S 0x1053 +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_4 0x105a +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_4 0x105b +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SDB 0x105c +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4S 0x105e +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_8 0x106a +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_8 0x106b +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_2DB 0x1091 +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_2 0x1093 +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4 0x1098 +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4DB 0x1099 +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_4 0x109b +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_8 0x10a9 +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SMDB 0x10d1 +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2SM 0x10d3 +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SM 0x10d9 +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SMDB 0x10da +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4SM 0x10dc +#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_8SM 0x10e9 +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_1 0x1108 +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_2 0x1110 +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_2 0x1111 +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_4 0x1118 +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_4 0x1119 +#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2S 0x1152 +#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4S 0x115a +#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_2 0x1190 +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_2 0x1191 +#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_4 0x1198 +#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_4 0x1199 +#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2SM 0x11d0 +#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4SM 0x11d8 + +struct pericom8250 { + void __iomem *virt; + unsigned int nr; + int line[]; +}; + +static void pericom_do_set_divisor(struct uart_port *port, unsigned int baud, + unsigned int quot, unsigned int quot_frac) +{ + int scr; + + for (scr = 16; scr > 4; scr--) { + unsigned int maxrate = port->uartclk / scr; + unsigned int divisor = max(maxrate / baud, 1U); + int delta = maxrate / divisor - baud; + + if (baud > maxrate + baud / 50) + continue; + + if (delta > baud / 50) + divisor++; + + if (divisor > 0xffff) + continue; + + /* Update delta due to possible divisor change */ + delta = maxrate / divisor - baud; + if (abs(delta) < baud / 50) { + struct uart_8250_port *up = up_to_u8250p(port); + int lcr = serial_port_in(port, UART_LCR); + + serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB); + serial_dl_write(up, divisor); + serial_port_out(port, 2, 16 - scr); + serial_port_out(port, UART_LCR, lcr); + return; + } + } +} + +static int pericom8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + unsigned int nr, i, bar = 0, maxnr; + struct pericom8250 *pericom; + struct uart_8250_port uart; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + maxnr = pci_resource_len(pdev, bar) >> 3; + + if (pdev->vendor == PCI_VENDOR_ID_PERICOM) + nr = pdev->device & 0x0f; + else if (pdev->vendor == PCI_VENDOR_ID_ACCESSIO) + nr = BIT(((pdev->device & 0x38) >> 3) - 1); + else + nr = 1; + + pericom = devm_kzalloc(&pdev->dev, struct_size(pericom, line, nr), GFP_KERNEL); + if (!pericom) + return -ENOMEM; + + pericom->virt = pcim_iomap(pdev, bar, 0); + if (!pericom->virt) + return -ENOMEM; + + memset(&uart, 0, sizeof(uart)); + + uart.port.dev = &pdev->dev; + uart.port.irq = pdev->irq; + uart.port.private_data = pericom; + uart.port.iotype = UPIO_PORT; + uart.port.uartclk = 921600 * 16; + uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ; + uart.port.set_divisor = pericom_do_set_divisor; + for (i = 0; i < nr && i < maxnr; i++) { + unsigned int offset = (i == 3 && nr == 4) ? 0x38 : i * 0x8; + + uart.port.iobase = pci_resource_start(pdev, bar) + offset; + + dev_dbg(&pdev->dev, "Setup PCI port: port %lx, irq %d, type %d\n", + uart.port.iobase, uart.port.irq, uart.port.iotype); + + pericom->line[i] = serial8250_register_8250_port(&uart); + if (pericom->line[i] < 0) { + dev_err(&pdev->dev, + "Couldn't register serial port %lx, irq %d, type %d, error %d\n", + uart.port.iobase, uart.port.irq, + uart.port.iotype, pericom->line[i]); + break; + } + } + pericom->nr = i; + + pci_set_drvdata(pdev, pericom); + return 0; +} + +static void pericom8250_remove(struct pci_dev *pdev) +{ + struct pericom8250 *pericom = pci_get_drvdata(pdev); + unsigned int i; + + for (i = 0; i < pericom->nr; i++) + serial8250_unregister_port(pericom->line[i]); +} + +static const struct pci_device_id pericom8250_pci_ids[] = { + /* + * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART + * (Only 7954 has an offset jump for port 4) + */ + { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7951) }, + { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7952) }, + { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7954) }, + { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7958) }, + + /* + * ACCES I/O Products quad + * (Only 7954 has an offset jump for port 4) + */ + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SDB) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2S) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_4) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_4) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SDB) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4S) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_8) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_8) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_2DB) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_2) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4DB) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_4) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_8) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SMDB) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2SM) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SM) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SMDB) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4SM) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_8SM) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_1) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_2) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_2) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_4) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_4) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2S) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4S) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_2) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_2) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_4) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_4) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2SM) }, + { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4SM) }, + { } +}; +MODULE_DEVICE_TABLE(pci, pericom8250_pci_ids); + +static struct pci_driver pericom8250_pci_driver = { + .name = "8250_pericom", + .id_table = pericom8250_pci_ids, + .probe = pericom8250_probe, + .remove = pericom8250_remove, +}; +module_pci_driver(pericom8250_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Pericom UART driver"); diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c new file mode 100644 index 000000000..1974bbadc --- /dev/null +++ b/drivers/tty/serial/8250/8250_pnp.c @@ -0,0 +1,540 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Probe for 8250/16550-type ISAPNP serial ports. + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * Copyright (C) 2001 Russell King, All Rights Reserved. + * + * Ported to the Linux PnP Layer - (C) Adam Belay. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "8250.h" + +#define UNKNOWN_DEV 0x3000 +#define CIR_PORT 0x0800 + +static const struct pnp_device_id pnp_dev_table[] = { + /* Archtek America Corp. */ + /* Archtek SmartLink Modem 3334BT Plug & Play */ + { "AAC000F", 0 }, + /* Anchor Datacomm BV */ + /* SXPro 144 External Data Fax Modem Plug & Play */ + { "ADC0001", 0 }, + /* SXPro 288 External Data Fax Modem Plug & Play */ + { "ADC0002", 0 }, + /* PROLiNK 1456VH ISA PnP K56flex Fax Modem */ + { "AEI0250", 0 }, + /* Actiontec ISA PNP 56K X2 Fax Modem */ + { "AEI1240", 0 }, + /* Rockwell 56K ACF II Fax+Data+Voice Modem */ + { "AKY1021", 0 /*SPCI_FL_NO_SHIRQ*/ }, + /* + * ALi Fast Infrared Controller + * Native driver (ali-ircc) is broken so at least + * it can be used with irtty-sir. + */ + { "ALI5123", 0 }, + /* AZT3005 PnP SOUND DEVICE */ + { "AZT4001", 0 }, + /* Best Data Products Inc. Smart One 336F PnP Modem */ + { "BDP3336", 0 }, + /* Boca Research */ + /* Boca Complete Ofc Communicator 14.4 Data-FAX */ + { "BRI0A49", 0 }, + /* Boca Research 33,600 ACF Modem */ + { "BRI1400", 0 }, + /* Boca 33.6 Kbps Internal FD34FSVD */ + { "BRI3400", 0 }, + /* Computer Peripherals Inc */ + /* EuroViVa CommCenter-33.6 SP PnP */ + { "CPI4050", 0 }, + /* Creative Labs */ + /* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */ + { "CTL3001", 0 }, + /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */ + { "CTL3011", 0 }, + /* Davicom ISA 33.6K Modem */ + { "DAV0336", 0 }, + /* Creative */ + /* Creative Modem Blaster Flash56 DI5601-1 */ + { "DMB1032", 0 }, + /* Creative Modem Blaster V.90 DI5660 */ + { "DMB2001", 0 }, + /* E-Tech */ + /* E-Tech CyberBULLET PC56RVP */ + { "ETT0002", 0 }, + /* FUJITSU */ + /* Fujitsu 33600 PnP-I2 R Plug & Play */ + { "FUJ0202", 0 }, + /* Fujitsu FMV-FX431 Plug & Play */ + { "FUJ0205", 0 }, + /* Fujitsu 33600 PnP-I4 R Plug & Play */ + { "FUJ0206", 0 }, + /* Fujitsu Fax Voice 33600 PNP-I5 R Plug & Play */ + { "FUJ0209", 0 }, + /* Archtek America Corp. */ + /* Archtek SmartLink Modem 3334BT Plug & Play */ + { "GVC000F", 0 }, + /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */ + { "GVC0303", 0 }, + /* Hayes */ + /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ + { "HAY0001", 0 }, + /* Hayes Optima 336 V.34 + FAX + Voice PnP */ + { "HAY000C", 0 }, + /* Hayes Optima 336B V.34 + FAX + Voice PnP */ + { "HAY000D", 0 }, + /* Hayes Accura 56K Ext Fax Modem PnP */ + { "HAY5670", 0 }, + /* Hayes Accura 56K Ext Fax Modem PnP */ + { "HAY5674", 0 }, + /* Hayes Accura 56K Fax Modem PnP */ + { "HAY5675", 0 }, + /* Hayes 288, V.34 + FAX */ + { "HAYF000", 0 }, + /* Hayes Optima 288 V.34 + FAX + Voice, Plug & Play */ + { "HAYF001", 0 }, + /* IBM */ + /* IBM Thinkpad 701 Internal Modem Voice */ + { "IBM0033", 0 }, + /* Intermec */ + /* Intermec CV60 touchscreen port */ + { "PNP4972", 0 }, + /* Intertex */ + /* Intertex 28k8 33k6 Voice EXT PnP */ + { "IXDC801", 0 }, + /* Intertex 33k6 56k Voice EXT PnP */ + { "IXDC901", 0 }, + /* Intertex 28k8 33k6 Voice SP EXT PnP */ + { "IXDD801", 0 }, + /* Intertex 33k6 56k Voice SP EXT PnP */ + { "IXDD901", 0 }, + /* Intertex 28k8 33k6 Voice SP INT PnP */ + { "IXDF401", 0 }, + /* Intertex 28k8 33k6 Voice SP EXT PnP */ + { "IXDF801", 0 }, + /* Intertex 33k6 56k Voice SP EXT PnP */ + { "IXDF901", 0 }, + /* Kortex International */ + /* KORTEX 28800 Externe PnP */ + { "KOR4522", 0 }, + /* KXPro 33.6 Vocal ASVD PnP */ + { "KORF661", 0 }, + /* Lasat */ + /* LASAT Internet 33600 PnP */ + { "LAS4040", 0 }, + /* Lasat Safire 560 PnP */ + { "LAS4540", 0 }, + /* Lasat Safire 336 PnP */ + { "LAS5440", 0 }, + /* Microcom, Inc. */ + /* Microcom TravelPorte FAST V.34 Plug & Play */ + { "MNP0281", 0 }, + /* Microcom DeskPorte V.34 FAST or FAST+ Plug & Play */ + { "MNP0336", 0 }, + /* Microcom DeskPorte FAST EP 28.8 Plug & Play */ + { "MNP0339", 0 }, + /* Microcom DeskPorte 28.8P Plug & Play */ + { "MNP0342", 0 }, + /* Microcom DeskPorte FAST ES 28.8 Plug & Play */ + { "MNP0500", 0 }, + /* Microcom DeskPorte FAST ES 28.8 Plug & Play */ + { "MNP0501", 0 }, + /* Microcom DeskPorte 28.8S Internal Plug & Play */ + { "MNP0502", 0 }, + /* Motorola */ + /* Motorola BitSURFR Plug & Play */ + { "MOT1105", 0 }, + /* Motorola TA210 Plug & Play */ + { "MOT1111", 0 }, + /* Motorola HMTA 200 (ISDN) Plug & Play */ + { "MOT1114", 0 }, + /* Motorola BitSURFR Plug & Play */ + { "MOT1115", 0 }, + /* Motorola Lifestyle 28.8 Internal */ + { "MOT1190", 0 }, + /* Motorola V.3400 Plug & Play */ + { "MOT1501", 0 }, + /* Motorola Lifestyle 28.8 V.34 Plug & Play */ + { "MOT1502", 0 }, + /* Motorola Power 28.8 V.34 Plug & Play */ + { "MOT1505", 0 }, + /* Motorola ModemSURFR External 28.8 Plug & Play */ + { "MOT1509", 0 }, + /* Motorola Premier 33.6 Desktop Plug & Play */ + { "MOT150A", 0 }, + /* Motorola VoiceSURFR 56K External PnP */ + { "MOT150F", 0 }, + /* Motorola ModemSURFR 56K External PnP */ + { "MOT1510", 0 }, + /* Motorola ModemSURFR 56K Internal PnP */ + { "MOT1550", 0 }, + /* Motorola ModemSURFR Internal 28.8 Plug & Play */ + { "MOT1560", 0 }, + /* Motorola Premier 33.6 Internal Plug & Play */ + { "MOT1580", 0 }, + /* Motorola OnlineSURFR 28.8 Internal Plug & Play */ + { "MOT15B0", 0 }, + /* Motorola VoiceSURFR 56K Internal PnP */ + { "MOT15F0", 0 }, + /* Com 1 */ + /* Deskline K56 Phone System PnP */ + { "MVX00A1", 0 }, + /* PC Rider K56 Phone System PnP */ + { "MVX00F2", 0 }, + /* NEC 98NOTE SPEAKER PHONE FAX MODEM(33600bps) */ + { "nEC8241", 0 }, + /* Pace 56 Voice Internal Plug & Play Modem */ + { "PMC2430", 0 }, + /* Generic */ + /* Generic standard PC COM port */ + { "PNP0500", 0 }, + /* Generic 16550A-compatible COM port */ + { "PNP0501", 0 }, + /* Compaq 14400 Modem */ + { "PNPC000", 0 }, + /* Compaq 2400/9600 Modem */ + { "PNPC001", 0 }, + /* Dial-Up Networking Serial Cable between 2 PCs */ + { "PNPC031", 0 }, + /* Dial-Up Networking Parallel Cable between 2 PCs */ + { "PNPC032", 0 }, + /* Standard 9600 bps Modem */ + { "PNPC100", 0 }, + /* Standard 14400 bps Modem */ + { "PNPC101", 0 }, + /* Standard 28800 bps Modem*/ + { "PNPC102", 0 }, + /* Standard Modem*/ + { "PNPC103", 0 }, + /* Standard 9600 bps Modem*/ + { "PNPC104", 0 }, + /* Standard 14400 bps Modem*/ + { "PNPC105", 0 }, + /* Standard 28800 bps Modem*/ + { "PNPC106", 0 }, + /* Standard Modem */ + { "PNPC107", 0 }, + /* Standard 9600 bps Modem */ + { "PNPC108", 0 }, + /* Standard 14400 bps Modem */ + { "PNPC109", 0 }, + /* Standard 28800 bps Modem */ + { "PNPC10A", 0 }, + /* Standard Modem */ + { "PNPC10B", 0 }, + /* Standard 9600 bps Modem */ + { "PNPC10C", 0 }, + /* Standard 14400 bps Modem */ + { "PNPC10D", 0 }, + /* Standard 28800 bps Modem */ + { "PNPC10E", 0 }, + /* Standard Modem */ + { "PNPC10F", 0 }, + /* Standard PCMCIA Card Modem */ + { "PNP2000", 0 }, + /* Rockwell */ + /* Modular Technology */ + /* Rockwell 33.6 DPF Internal PnP */ + /* Modular Technology 33.6 Internal PnP */ + { "ROK0030", 0 }, + /* Kortex International */ + /* KORTEX 14400 Externe PnP */ + { "ROK0100", 0 }, + /* Rockwell 28.8 */ + { "ROK4120", 0 }, + /* Viking Components, Inc */ + /* Viking 28.8 INTERNAL Fax+Data+Voice PnP */ + { "ROK4920", 0 }, + /* Rockwell */ + /* British Telecom */ + /* Modular Technology */ + /* Rockwell 33.6 DPF External PnP */ + /* BT Prologue 33.6 External PnP */ + /* Modular Technology 33.6 External PnP */ + { "RSS00A0", 0 }, + /* Viking 56K FAX INT */ + { "RSS0262", 0 }, + /* K56 par,VV,Voice,Speakphone,AudioSpan,PnP */ + { "RSS0250", 0 }, + /* SupraExpress 28.8 Data/Fax PnP modem */ + { "SUP1310", 0 }, + /* SupraExpress 336i PnP Voice Modem */ + { "SUP1381", 0 }, + /* SupraExpress 33.6 Data/Fax PnP modem */ + { "SUP1421", 0 }, + /* SupraExpress 33.6 Data/Fax PnP modem */ + { "SUP1590", 0 }, + /* SupraExpress 336i Sp ASVD */ + { "SUP1620", 0 }, + /* SupraExpress 33.6 Data/Fax PnP modem */ + { "SUP1760", 0 }, + /* SupraExpress 56i Sp Intl */ + { "SUP2171", 0 }, + /* Phoebe Micro */ + /* Phoebe Micro 33.6 Data Fax 1433VQH Plug & Play */ + { "TEX0011", 0 }, + /* Archtek America Corp. */ + /* Archtek SmartLink Modem 3334BT Plug & Play */ + { "UAC000F", 0 }, + /* 3Com Corp. */ + /* Gateway Telepath IIvi 33.6 */ + { "USR0000", 0 }, + /* U.S. Robotics Sporster 33.6K Fax INT PnP */ + { "USR0002", 0 }, + /* Sportster Vi 14.4 PnP FAX Voicemail */ + { "USR0004", 0 }, + /* U.S. Robotics 33.6K Voice INT PnP */ + { "USR0006", 0 }, + /* U.S. Robotics 33.6K Voice EXT PnP */ + { "USR0007", 0 }, + /* U.S. Robotics Courier V.Everything INT PnP */ + { "USR0009", 0 }, + /* U.S. Robotics 33.6K Voice INT PnP */ + { "USR2002", 0 }, + /* U.S. Robotics 56K Voice INT PnP */ + { "USR2070", 0 }, + /* U.S. Robotics 56K Voice EXT PnP */ + { "USR2080", 0 }, + /* U.S. Robotics 56K FAX INT */ + { "USR3031", 0 }, + /* U.S. Robotics 56K FAX INT */ + { "USR3050", 0 }, + /* U.S. Robotics 56K Voice INT PnP */ + { "USR3070", 0 }, + /* U.S. Robotics 56K Voice EXT PnP */ + { "USR3080", 0 }, + /* U.S. Robotics 56K Voice INT PnP */ + { "USR3090", 0 }, + /* U.S. Robotics 56K Message */ + { "USR9100", 0 }, + /* U.S. Robotics 56K FAX EXT PnP*/ + { "USR9160", 0 }, + /* U.S. Robotics 56K FAX INT PnP*/ + { "USR9170", 0 }, + /* U.S. Robotics 56K Voice EXT PnP*/ + { "USR9180", 0 }, + /* U.S. Robotics 56K Voice INT PnP*/ + { "USR9190", 0 }, + /* Wacom tablets */ + { "WACFXXX", 0 }, + /* Compaq touchscreen */ + { "FPI2002", 0 }, + /* Fujitsu Stylistic touchscreens */ + { "FUJ02B2", 0 }, + { "FUJ02B3", 0 }, + /* Fujitsu Stylistic LT touchscreens */ + { "FUJ02B4", 0 }, + /* Passive Fujitsu Stylistic touchscreens */ + { "FUJ02B6", 0 }, + { "FUJ02B7", 0 }, + { "FUJ02B8", 0 }, + { "FUJ02B9", 0 }, + { "FUJ02BC", 0 }, + /* Fujitsu Wacom Tablet PC device */ + { "FUJ02E5", 0 }, + /* Fujitsu P-series tablet PC device */ + { "FUJ02E6", 0 }, + /* Fujitsu Wacom 2FGT Tablet PC device */ + { "FUJ02E7", 0 }, + /* Fujitsu Wacom 1FGT Tablet PC device */ + { "FUJ02E9", 0 }, + /* + * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 + * in disguise). + */ + { "LTS0001", 0 }, + /* Rockwell's (PORALiNK) 33600 INT PNP */ + { "WCI0003", 0 }, + /* Unknown PnP modems */ + { "PNPCXXX", UNKNOWN_DEV }, + /* More unknown PnP modems */ + { "PNPDXXX", UNKNOWN_DEV }, + /* + * Winbond CIR port, should not be probed. We should keep track of + * it to prevent the legacy serial driver from probing it. + */ + { "WEC1022", CIR_PORT }, + /* + * SMSC IrCC SIR/FIR port, should not be probed by serial driver as + * well so its own driver can bind to it. + */ + { "SMCF010", CIR_PORT }, + { "", 0 } +}; + +MODULE_DEVICE_TABLE(pnp, pnp_dev_table); + +static const char *modem_names[] = { + "MODEM", "Modem", "modem", "FAX", "Fax", "fax", + "56K", "56k", "K56", "33.6", "28.8", "14.4", + "33,600", "28,800", "14,400", "33.600", "28.800", "14.400", + "33600", "28800", "14400", "V.90", "V.34", "V.32", NULL +}; + +static bool check_name(const char *name) +{ + const char **tmp; + + for (tmp = modem_names; *tmp; tmp++) + if (strstr(name, *tmp)) + return true; + + return false; +} + +static bool check_resources(struct pnp_dev *dev) +{ + static const resource_size_t base[] = {0x2f8, 0x3f8, 0x2e8, 0x3e8}; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(base); i++) { + if (pnp_possible_config(dev, IORESOURCE_IO, base[i], 8)) + return true; + } + + return false; +} + +/* + * Given a complete unknown PnP device, try to use some heuristics to + * detect modems. Currently use such heuristic set: + * - dev->name or dev->bus->name must contain "modem" substring; + * - device must have only one IO region (8 byte long) with base address + * 0x2e8, 0x3e8, 0x2f8 or 0x3f8. + * + * Such detection looks very ugly, but can detect at least some of numerous + * PnP modems, alternatively we must hardcode all modems in pnp_devices[] + * table. + */ +static int serial_pnp_guess_board(struct pnp_dev *dev) +{ + if (!(check_name(pnp_dev_name(dev)) || + (dev->card && check_name(dev->card->name)))) + return -ENODEV; + + if (check_resources(dev)) + return 0; + + return -ENODEV; +} + +static int +serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) +{ + struct uart_8250_port uart, *port; + int ret, line, flags = dev_id->driver_data; + + if (flags & UNKNOWN_DEV) { + ret = serial_pnp_guess_board(dev); + if (ret < 0) + return ret; + } + + memset(&uart, 0, sizeof(uart)); + if (pnp_irq_valid(dev, 0)) + uart.port.irq = pnp_irq(dev, 0); + if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) { + uart.port.iobase = pnp_port_start(dev, 2); + uart.port.iotype = UPIO_PORT; + } else if (pnp_port_valid(dev, 0)) { + uart.port.iobase = pnp_port_start(dev, 0); + uart.port.iotype = UPIO_PORT; + } else if (pnp_mem_valid(dev, 0)) { + uart.port.mapbase = pnp_mem_start(dev, 0); + uart.port.iotype = UPIO_MEM; + uart.port.flags = UPF_IOREMAP; + } else + return -ENODEV; + + dev_dbg(&dev->dev, + "Setup PNP port: port %#lx, mem %#llx, irq %u, type %u\n", + uart.port.iobase, (unsigned long long)uart.port.mapbase, + uart.port.irq, uart.port.iotype); + + if (flags & CIR_PORT) { + uart.port.flags |= UPF_FIXED_PORT | UPF_FIXED_TYPE; + uart.port.type = PORT_8250_CIR; + } + + uart.port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; + if (pnp_irq_flags(dev, 0) & IORESOURCE_IRQ_SHAREABLE) + uart.port.flags |= UPF_SHARE_IRQ; + uart.port.uartclk = 1843200; + device_property_read_u32(&dev->dev, "clock-frequency", &uart.port.uartclk); + uart.port.dev = &dev->dev; + + line = serial8250_register_8250_port(&uart); + if (line < 0 || (flags & CIR_PORT)) + return -ENODEV; + + port = serial8250_get_port(line); + if (uart_console(&port->port)) + dev->capabilities |= PNP_CONSOLE; + + pnp_set_drvdata(dev, (void *)((long)line + 1)); + return 0; +} + +static void serial_pnp_remove(struct pnp_dev *dev) +{ + long line = (long)pnp_get_drvdata(dev); + + dev->capabilities &= ~PNP_CONSOLE; + if (line) + serial8250_unregister_port(line - 1); +} + +static int __maybe_unused serial_pnp_suspend(struct device *dev) +{ + long line = (long)dev_get_drvdata(dev); + + if (!line) + return -ENODEV; + serial8250_suspend_port(line - 1); + return 0; +} + +static int __maybe_unused serial_pnp_resume(struct device *dev) +{ + long line = (long)dev_get_drvdata(dev); + + if (!line) + return -ENODEV; + serial8250_resume_port(line - 1); + return 0; +} + +static SIMPLE_DEV_PM_OPS(serial_pnp_pm_ops, serial_pnp_suspend, serial_pnp_resume); + +static struct pnp_driver serial_pnp_driver = { + .name = "serial", + .probe = serial_pnp_probe, + .remove = serial_pnp_remove, + .driver = { + .pm = &serial_pnp_pm_ops, + }, + .id_table = pnp_dev_table, +}; + +int serial8250_pnp_init(void) +{ + return pnp_register_driver(&serial_pnp_driver); +} + +void serial8250_pnp_exit(void) +{ + pnp_unregister_driver(&serial_pnp_driver); +} + diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c new file mode 100644 index 000000000..8efe31448 --- /dev/null +++ b/drivers/tty/serial/8250/8250_port.c @@ -0,0 +1,3534 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Base port operations for 8250/16550-type serial ports + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * Split from 8250_core.c, Copyright (C) 2001 Russell King. + * + * A note about mapbase / membase + * + * mapbase is the physical address of the IO port. + * membase is an 'ioremapped' cookie. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8250.h" + +/* Nuvoton NPCM timeout register */ +#define UART_NPCM_TOR 7 +#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */ + +/* + * Debugging. + */ +#if 0 +#define DEBUG_AUTOCONF(fmt...) printk(fmt) +#else +#define DEBUG_AUTOCONF(fmt...) do { } while (0) +#endif + +/* + * Here we define the default xmit fifo size used for each type of UART. + */ +static const struct serial8250_config uart_config[] = { + [PORT_UNKNOWN] = { + .name = "unknown", + .fifo_size = 1, + .tx_loadsz = 1, + }, + [PORT_8250] = { + .name = "8250", + .fifo_size = 1, + .tx_loadsz = 1, + }, + [PORT_16450] = { + .name = "16450", + .fifo_size = 1, + .tx_loadsz = 1, + }, + [PORT_16550] = { + .name = "16550", + .fifo_size = 1, + .tx_loadsz = 1, + }, + [PORT_16550A] = { + .name = "16550A", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, + [PORT_CIRRUS] = { + .name = "Cirrus", + .fifo_size = 1, + .tx_loadsz = 1, + }, + [PORT_16650] = { + .name = "ST16650", + .fifo_size = 1, + .tx_loadsz = 1, + .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, + }, + [PORT_16650V2] = { + .name = "ST16650V2", + .fifo_size = 32, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 | + UART_FCR_T_TRIG_00, + .rxtrig_bytes = {8, 16, 24, 28}, + .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, + }, + [PORT_16750] = { + .name = "TI16750", + .fifo_size = 64, + .tx_loadsz = 64, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 | + UART_FCR7_64BYTE, + .rxtrig_bytes = {1, 16, 32, 56}, + .flags = UART_CAP_FIFO | UART_CAP_SLEEP | UART_CAP_AFE, + }, + [PORT_STARTECH] = { + .name = "Startech", + .fifo_size = 1, + .tx_loadsz = 1, + }, + [PORT_16C950] = { + .name = "16C950/954", + .fifo_size = 128, + .tx_loadsz = 128, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01, + .rxtrig_bytes = {16, 32, 112, 120}, + /* UART_CAP_EFR breaks billionon CF bluetooth card. */ + .flags = UART_CAP_FIFO | UART_CAP_SLEEP, + }, + [PORT_16654] = { + .name = "ST16654", + .fifo_size = 64, + .tx_loadsz = 32, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 | + UART_FCR_T_TRIG_10, + .rxtrig_bytes = {8, 16, 56, 60}, + .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, + }, + [PORT_16850] = { + .name = "XR16850", + .fifo_size = 128, + .tx_loadsz = 128, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, + }, + [PORT_RSA] = { + .name = "RSA", + .fifo_size = 2048, + .tx_loadsz = 2048, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11, + .flags = UART_CAP_FIFO, + }, + [PORT_NS16550A] = { + .name = "NS16550A", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .flags = UART_CAP_FIFO | UART_NATSEMI, + }, + [PORT_XSCALE] = { + .name = "XScale", + .fifo_size = 32, + .tx_loadsz = 32, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .flags = UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE, + }, + [PORT_OCTEON] = { + .name = "OCTEON", + .fifo_size = 64, + .tx_loadsz = 64, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .flags = UART_CAP_FIFO, + }, + [PORT_AR7] = { + .name = "AR7", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00, + .flags = UART_CAP_FIFO /* | UART_CAP_AFE */, + }, + [PORT_U6_16550A] = { + .name = "U6_16550A", + .fifo_size = 64, + .tx_loadsz = 64, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .flags = UART_CAP_FIFO | UART_CAP_AFE, + }, + [PORT_TEGRA] = { + .name = "Tegra", + .fifo_size = 32, + .tx_loadsz = 8, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 | + UART_FCR_T_TRIG_01, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO | UART_CAP_RTOIE, + }, + [PORT_XR17D15X] = { + .name = "XR17D15X", + .fifo_size = 64, + .tx_loadsz = 64, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR | + UART_CAP_SLEEP, + }, + [PORT_XR17V35X] = { + .name = "XR17V35X", + .fifo_size = 256, + .tx_loadsz = 256, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11 | + UART_FCR_T_TRIG_11, + .flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR | + UART_CAP_SLEEP, + }, + [PORT_LPC3220] = { + .name = "LPC3220", + .fifo_size = 64, + .tx_loadsz = 32, + .fcr = UART_FCR_DMA_SELECT | UART_FCR_ENABLE_FIFO | + UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00, + .flags = UART_CAP_FIFO, + }, + [PORT_BRCM_TRUMANAGE] = { + .name = "TruManage", + .fifo_size = 1, + .tx_loadsz = 1024, + .flags = UART_CAP_HFIFO, + }, + [PORT_8250_CIR] = { + .name = "CIR port" + }, + [PORT_ALTR_16550_F32] = { + .name = "Altera 16550 FIFO32", + .fifo_size = 32, + .tx_loadsz = 32, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 8, 16, 30}, + .flags = UART_CAP_FIFO | UART_CAP_AFE, + }, + [PORT_ALTR_16550_F64] = { + .name = "Altera 16550 FIFO64", + .fifo_size = 64, + .tx_loadsz = 64, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 16, 32, 62}, + .flags = UART_CAP_FIFO | UART_CAP_AFE, + }, + [PORT_ALTR_16550_F128] = { + .name = "Altera 16550 FIFO128", + .fifo_size = 128, + .tx_loadsz = 128, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 32, 64, 126}, + .flags = UART_CAP_FIFO | UART_CAP_AFE, + }, + /* + * tx_loadsz is set to 63-bytes instead of 64-bytes to implement + * workaround of errata A-008006 which states that tx_loadsz should + * be configured less than Maximum supported fifo bytes. + */ + [PORT_16550A_FSL64] = { + .name = "16550A_FSL64", + .fifo_size = 64, + .tx_loadsz = 63, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 | + UART_FCR7_64BYTE, + .flags = UART_CAP_FIFO | UART_CAP_NOTEMT, + }, + [PORT_RT2880] = { + .name = "Palmchip BK-3103", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, + [PORT_DA830] = { + .name = "TI DA8xx/66AK2x", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_DMA_SELECT | UART_FCR_ENABLE_FIFO | + UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO | UART_CAP_AFE, + }, + [PORT_MTK_BTIF] = { + .name = "MediaTek BTIF", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, + .flags = UART_CAP_FIFO, + }, + [PORT_NPCM] = { + .name = "Nuvoton 16550", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, + [PORT_SUNIX] = { + .name = "Sunix", + .fifo_size = 128, + .tx_loadsz = 128, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 32, 64, 112}, + .flags = UART_CAP_FIFO | UART_CAP_SLEEP, + }, + [PORT_ASPEED_VUART] = { + .name = "ASPEED VUART", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, +}; + +/* Uart divisor latch read */ +static int default_serial_dl_read(struct uart_8250_port *up) +{ + /* Assign these in pieces to truncate any bits above 7. */ + unsigned char dll = serial_in(up, UART_DLL); + unsigned char dlm = serial_in(up, UART_DLM); + + return dll | dlm << 8; +} + +/* Uart divisor latch write */ +static void default_serial_dl_write(struct uart_8250_port *up, int value) +{ + serial_out(up, UART_DLL, value & 0xff); + serial_out(up, UART_DLM, value >> 8 & 0xff); +} + +#ifdef CONFIG_SERIAL_8250_RT288X + +#define UART_REG_UNMAPPED -1 + +/* Au1x00/RT288x UART hardware has a weird register layout */ +static const s8 au_io_in_map[8] = { + [UART_RX] = 0, + [UART_IER] = 2, + [UART_IIR] = 3, + [UART_LCR] = 5, + [UART_MCR] = 6, + [UART_LSR] = 7, + [UART_MSR] = 8, + [UART_SCR] = UART_REG_UNMAPPED, +}; + +static const s8 au_io_out_map[8] = { + [UART_TX] = 1, + [UART_IER] = 2, + [UART_FCR] = 4, + [UART_LCR] = 5, + [UART_MCR] = 6, + [UART_LSR] = UART_REG_UNMAPPED, + [UART_MSR] = UART_REG_UNMAPPED, + [UART_SCR] = UART_REG_UNMAPPED, +}; + +unsigned int au_serial_in(struct uart_port *p, int offset) +{ + if (offset >= ARRAY_SIZE(au_io_in_map)) + return UINT_MAX; + offset = au_io_in_map[offset]; + if (offset == UART_REG_UNMAPPED) + return UINT_MAX; + return __raw_readl(p->membase + (offset << p->regshift)); +} + +void au_serial_out(struct uart_port *p, int offset, int value) +{ + if (offset >= ARRAY_SIZE(au_io_out_map)) + return; + offset = au_io_out_map[offset]; + if (offset == UART_REG_UNMAPPED) + return; + __raw_writel(value, p->membase + (offset << p->regshift)); +} + +/* Au1x00 haven't got a standard divisor latch */ +static int au_serial_dl_read(struct uart_8250_port *up) +{ + return __raw_readl(up->port.membase + 0x28); +} + +static void au_serial_dl_write(struct uart_8250_port *up, int value) +{ + __raw_writel(value, up->port.membase + 0x28); +} + +#endif + +static unsigned int hub6_serial_in(struct uart_port *p, int offset) +{ + offset = offset << p->regshift; + outb(p->hub6 - 1 + offset, p->iobase); + return inb(p->iobase + 1); +} + +static void hub6_serial_out(struct uart_port *p, int offset, int value) +{ + offset = offset << p->regshift; + outb(p->hub6 - 1 + offset, p->iobase); + outb(value, p->iobase + 1); +} + +static unsigned int mem_serial_in(struct uart_port *p, int offset) +{ + offset = offset << p->regshift; + return readb(p->membase + offset); +} + +static void mem_serial_out(struct uart_port *p, int offset, int value) +{ + offset = offset << p->regshift; + writeb(value, p->membase + offset); +} + +static void mem16_serial_out(struct uart_port *p, int offset, int value) +{ + offset = offset << p->regshift; + writew(value, p->membase + offset); +} + +static unsigned int mem16_serial_in(struct uart_port *p, int offset) +{ + offset = offset << p->regshift; + return readw(p->membase + offset); +} + +static void mem32_serial_out(struct uart_port *p, int offset, int value) +{ + offset = offset << p->regshift; + writel(value, p->membase + offset); +} + +static unsigned int mem32_serial_in(struct uart_port *p, int offset) +{ + offset = offset << p->regshift; + return readl(p->membase + offset); +} + +static void mem32be_serial_out(struct uart_port *p, int offset, int value) +{ + offset = offset << p->regshift; + iowrite32be(value, p->membase + offset); +} + +static unsigned int mem32be_serial_in(struct uart_port *p, int offset) +{ + offset = offset << p->regshift; + return ioread32be(p->membase + offset); +} + +static unsigned int io_serial_in(struct uart_port *p, int offset) +{ + offset = offset << p->regshift; + return inb(p->iobase + offset); +} + +static void io_serial_out(struct uart_port *p, int offset, int value) +{ + offset = offset << p->regshift; + outb(value, p->iobase + offset); +} + +static int serial8250_default_handle_irq(struct uart_port *port); + +static void set_io_from_upio(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + + up->dl_read = default_serial_dl_read; + up->dl_write = default_serial_dl_write; + + switch (p->iotype) { + case UPIO_HUB6: + p->serial_in = hub6_serial_in; + p->serial_out = hub6_serial_out; + break; + + case UPIO_MEM: + p->serial_in = mem_serial_in; + p->serial_out = mem_serial_out; + break; + + case UPIO_MEM16: + p->serial_in = mem16_serial_in; + p->serial_out = mem16_serial_out; + break; + + case UPIO_MEM32: + p->serial_in = mem32_serial_in; + p->serial_out = mem32_serial_out; + break; + + case UPIO_MEM32BE: + p->serial_in = mem32be_serial_in; + p->serial_out = mem32be_serial_out; + break; + +#ifdef CONFIG_SERIAL_8250_RT288X + case UPIO_AU: + p->serial_in = au_serial_in; + p->serial_out = au_serial_out; + up->dl_read = au_serial_dl_read; + up->dl_write = au_serial_dl_write; + break; +#endif + + default: + p->serial_in = io_serial_in; + p->serial_out = io_serial_out; + break; + } + /* Remember loaded iotype */ + up->cur_iotype = p->iotype; + p->handle_irq = serial8250_default_handle_irq; +} + +static void +serial_port_out_sync(struct uart_port *p, int offset, int value) +{ + switch (p->iotype) { + case UPIO_MEM: + case UPIO_MEM16: + case UPIO_MEM32: + case UPIO_MEM32BE: + case UPIO_AU: + p->serial_out(p, offset, value); + p->serial_in(p, UART_LCR); /* safe, no side-effects */ + break; + default: + p->serial_out(p, offset, value); + } +} + +/* + * FIFO support. + */ +static void serial8250_clear_fifos(struct uart_8250_port *p) +{ + if (p->capabilities & UART_CAP_FIFO) { + serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO); + serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + serial_out(p, UART_FCR, 0); + } +} + +static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t); +static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t); + +void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p) +{ + serial8250_clear_fifos(p); + serial_out(p, UART_FCR, p->fcr); +} +EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos); + +void serial8250_rpm_get(struct uart_8250_port *p) +{ + if (!(p->capabilities & UART_CAP_RPM)) + return; + pm_runtime_get_sync(p->port.dev); +} +EXPORT_SYMBOL_GPL(serial8250_rpm_get); + +void serial8250_rpm_put(struct uart_8250_port *p) +{ + if (!(p->capabilities & UART_CAP_RPM)) + return; + pm_runtime_mark_last_busy(p->port.dev); + pm_runtime_put_autosuspend(p->port.dev); +} +EXPORT_SYMBOL_GPL(serial8250_rpm_put); + +/** + * serial8250_em485_init() - put uart_8250_port into rs485 emulating + * @p: uart_8250_port port instance + * + * The function is used to start rs485 software emulating on the + * &struct uart_8250_port* @p. Namely, RTS is switched before/after + * transmission. The function is idempotent, so it is safe to call it + * multiple times. + * + * The caller MUST enable interrupt on empty shift register before + * calling serial8250_em485_init(). This interrupt is not a part of + * 8250 standard, but implementation defined. + * + * The function is supposed to be called from .rs485_config callback + * or from any other callback protected with p->port.lock spinlock. + * + * See also serial8250_em485_destroy() + * + * Return 0 - success, -errno - otherwise + */ +static int serial8250_em485_init(struct uart_8250_port *p) +{ + if (p->em485) + goto deassert_rts; + + p->em485 = kmalloc(sizeof(struct uart_8250_em485), GFP_ATOMIC); + if (!p->em485) + return -ENOMEM; + + hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + p->em485->stop_tx_timer.function = &serial8250_em485_handle_stop_tx; + p->em485->start_tx_timer.function = &serial8250_em485_handle_start_tx; + p->em485->port = p; + p->em485->active_timer = NULL; + p->em485->tx_stopped = true; + +deassert_rts: + if (p->em485->tx_stopped) + p->rs485_stop_tx(p); + + return 0; +} + +/** + * serial8250_em485_destroy() - put uart_8250_port into normal state + * @p: uart_8250_port port instance + * + * The function is used to stop rs485 software emulating on the + * &struct uart_8250_port* @p. The function is idempotent, so it is safe to + * call it multiple times. + * + * The function is supposed to be called from .rs485_config callback + * or from any other callback protected with p->port.lock spinlock. + * + * See also serial8250_em485_init() + */ +void serial8250_em485_destroy(struct uart_8250_port *p) +{ + if (!p->em485) + return; + + hrtimer_cancel(&p->em485->start_tx_timer); + hrtimer_cancel(&p->em485->stop_tx_timer); + + kfree(p->em485); + p->em485 = NULL; +} +EXPORT_SYMBOL_GPL(serial8250_em485_destroy); + +struct serial_rs485 serial8250_em485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | + SER_RS485_TERMINATE_BUS | SER_RS485_RX_DURING_TX, + .delay_rts_before_send = 1, + .delay_rts_after_send = 1, +}; +EXPORT_SYMBOL_GPL(serial8250_em485_supported); + +/** + * serial8250_em485_config() - generic ->rs485_config() callback + * @port: uart port + * @rs485: rs485 settings + * + * Generic callback usable by 8250 uart drivers to activate rs485 settings + * if the uart is incapable of driving RTS as a Transmit Enable signal in + * hardware, relying on software emulation instead. + */ +int serial8250_em485_config(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + /* pick sane settings if the user hasn't */ + if (!!(rs485->flags & SER_RS485_RTS_ON_SEND) == + !!(rs485->flags & SER_RS485_RTS_AFTER_SEND)) { + rs485->flags |= SER_RS485_RTS_ON_SEND; + rs485->flags &= ~SER_RS485_RTS_AFTER_SEND; + } + + /* + * Both serial8250_em485_init() and serial8250_em485_destroy() + * are idempotent. + */ + if (rs485->flags & SER_RS485_ENABLED) + return serial8250_em485_init(up); + + serial8250_em485_destroy(up); + return 0; +} +EXPORT_SYMBOL_GPL(serial8250_em485_config); + +/* + * These two wrappers ensure that enable_runtime_pm_tx() can be called more than + * once and disable_runtime_pm_tx() will still disable RPM because the fifo is + * empty and the HW can idle again. + */ +void serial8250_rpm_get_tx(struct uart_8250_port *p) +{ + unsigned char rpm_active; + + if (!(p->capabilities & UART_CAP_RPM)) + return; + + rpm_active = xchg(&p->rpm_tx_active, 1); + if (rpm_active) + return; + pm_runtime_get_sync(p->port.dev); +} +EXPORT_SYMBOL_GPL(serial8250_rpm_get_tx); + +void serial8250_rpm_put_tx(struct uart_8250_port *p) +{ + unsigned char rpm_active; + + if (!(p->capabilities & UART_CAP_RPM)) + return; + + rpm_active = xchg(&p->rpm_tx_active, 0); + if (!rpm_active) + return; + pm_runtime_mark_last_busy(p->port.dev); + pm_runtime_put_autosuspend(p->port.dev); +} +EXPORT_SYMBOL_GPL(serial8250_rpm_put_tx); + +/* + * IER sleep support. UARTs which have EFRs need the "extended + * capability" bit enabled. Note that on XR16C850s, we need to + * reset LCR to write to IER. + */ +static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) +{ + unsigned char lcr = 0, efr = 0; + + serial8250_rpm_get(p); + + if (p->capabilities & UART_CAP_SLEEP) { + if (p->capabilities & UART_CAP_EFR) { + lcr = serial_in(p, UART_LCR); + efr = serial_in(p, UART_EFR); + serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(p, UART_EFR, UART_EFR_ECB); + serial_out(p, UART_LCR, 0); + } + serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0); + if (p->capabilities & UART_CAP_EFR) { + serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(p, UART_EFR, efr); + serial_out(p, UART_LCR, lcr); + } + } + + serial8250_rpm_put(p); +} + +static void serial8250_clear_IER(struct uart_8250_port *up) +{ + if (up->capabilities & UART_CAP_UUE) + serial_out(up, UART_IER, UART_IER_UUE); + else + serial_out(up, UART_IER, 0); +} + +#ifdef CONFIG_SERIAL_8250_RSA +/* + * Attempts to turn on the RSA FIFO. Returns zero on failure. + * We set the port uart clock rate if we succeed. + */ +static int __enable_rsa(struct uart_8250_port *up) +{ + unsigned char mode; + int result; + + mode = serial_in(up, UART_RSA_MSR); + result = mode & UART_RSA_MSR_FIFO; + + if (!result) { + serial_out(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO); + mode = serial_in(up, UART_RSA_MSR); + result = mode & UART_RSA_MSR_FIFO; + } + + if (result) + up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16; + + return result; +} + +static void enable_rsa(struct uart_8250_port *up) +{ + if (up->port.type == PORT_RSA) { + if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) { + spin_lock_irq(&up->port.lock); + __enable_rsa(up); + spin_unlock_irq(&up->port.lock); + } + if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) + serial_out(up, UART_RSA_FRR, 0); + } +} + +/* + * Attempts to turn off the RSA FIFO. Returns zero on failure. + * It is unknown why interrupts were disabled in here. However, + * the caller is expected to preserve this behaviour by grabbing + * the spinlock before calling this function. + */ +static void disable_rsa(struct uart_8250_port *up) +{ + unsigned char mode; + int result; + + if (up->port.type == PORT_RSA && + up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) { + spin_lock_irq(&up->port.lock); + + mode = serial_in(up, UART_RSA_MSR); + result = !(mode & UART_RSA_MSR_FIFO); + + if (!result) { + serial_out(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO); + mode = serial_in(up, UART_RSA_MSR); + result = !(mode & UART_RSA_MSR_FIFO); + } + + if (result) + up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16; + spin_unlock_irq(&up->port.lock); + } +} +#endif /* CONFIG_SERIAL_8250_RSA */ + +/* + * This is a quickie test to see how big the FIFO is. + * It doesn't work at all the time, more's the pity. + */ +static int size_fifo(struct uart_8250_port *up) +{ + unsigned char old_fcr, old_mcr, old_lcr; + unsigned short old_dl; + int count; + + old_lcr = serial_in(up, UART_LCR); + serial_out(up, UART_LCR, 0); + old_fcr = serial_in(up, UART_FCR); + old_mcr = serial8250_in_MCR(up); + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + serial8250_out_MCR(up, UART_MCR_LOOP); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); + old_dl = serial_dl_read(up); + serial_dl_write(up, 0x0001); + serial_out(up, UART_LCR, UART_LCR_WLEN8); + for (count = 0; count < 256; count++) + serial_out(up, UART_TX, count); + mdelay(20);/* FIXME - schedule_timeout */ + for (count = 0; (serial_in(up, UART_LSR) & UART_LSR_DR) && + (count < 256); count++) + serial_in(up, UART_RX); + serial_out(up, UART_FCR, old_fcr); + serial8250_out_MCR(up, old_mcr); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); + serial_dl_write(up, old_dl); + serial_out(up, UART_LCR, old_lcr); + + return count; +} + +/* + * Read UART ID using the divisor method - set DLL and DLM to zero + * and the revision will be in DLL and device type in DLM. We + * preserve the device state across this. + */ +static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p) +{ + unsigned char old_lcr; + unsigned int id, old_dl; + + old_lcr = serial_in(p, UART_LCR); + serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A); + old_dl = serial_dl_read(p); + serial_dl_write(p, 0); + id = serial_dl_read(p); + serial_dl_write(p, old_dl); + + serial_out(p, UART_LCR, old_lcr); + + return id; +} + +/* + * This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's. + * When this function is called we know it is at least a StarTech + * 16650 V2, but it might be one of several StarTech UARTs, or one of + * its clones. (We treat the broken original StarTech 16650 V1 as a + * 16550, and why not? Startech doesn't seem to even acknowledge its + * existence.) + * + * What evil have men's minds wrought... + */ +static void autoconfig_has_efr(struct uart_8250_port *up) +{ + unsigned int id1, id2, id3, rev; + + /* + * Everything with an EFR has SLEEP + */ + up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP; + + /* + * First we check to see if it's an Oxford Semiconductor UART. + * + * If we have to do this here because some non-National + * Semiconductor clone chips lock up if you try writing to the + * LSR register (which serial_icr_read does) + */ + + /* + * Check for Oxford Semiconductor 16C950. + * + * EFR [4] must be set else this test fails. + * + * This shouldn't be necessary, but Mike Hudson (Exoray@isys.ca) + * claims that it's needed for 952 dual UART's (which are not + * recommended for new designs). + */ + up->acr = 0; + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, UART_EFR, UART_EFR_ECB); + serial_out(up, UART_LCR, 0x00); + id1 = serial_icr_read(up, UART_ID1); + id2 = serial_icr_read(up, UART_ID2); + id3 = serial_icr_read(up, UART_ID3); + rev = serial_icr_read(up, UART_REV); + + DEBUG_AUTOCONF("950id=%02x:%02x:%02x:%02x ", id1, id2, id3, rev); + + if (id1 == 0x16 && id2 == 0xC9 && + (id3 == 0x50 || id3 == 0x52 || id3 == 0x54)) { + up->port.type = PORT_16C950; + + /* + * Enable work around for the Oxford Semiconductor 952 rev B + * chip which causes it to seriously miscalculate baud rates + * when DLL is 0. + */ + if (id3 == 0x52 && rev == 0x01) + up->bugs |= UART_BUG_QUOT; + return; + } + + /* + * We check for a XR16C850 by setting DLL and DLM to 0, and then + * reading back DLL and DLM. The chip type depends on the DLM + * value read back: + * 0x10 - XR16C850 and the DLL contains the chip revision. + * 0x12 - XR16C2850. + * 0x14 - XR16C854. + */ + id1 = autoconfig_read_divisor_id(up); + DEBUG_AUTOCONF("850id=%04x ", id1); + + id2 = id1 >> 8; + if (id2 == 0x10 || id2 == 0x12 || id2 == 0x14) { + up->port.type = PORT_16850; + return; + } + + /* + * It wasn't an XR16C850. + * + * We distinguish between the '654 and the '650 by counting + * how many bytes are in the FIFO. I'm using this for now, + * since that's the technique that was sent to me in the + * serial driver update, but I'm not convinced this works. + * I've had problems doing this in the past. -TYT + */ + if (size_fifo(up) == 64) + up->port.type = PORT_16654; + else + up->port.type = PORT_16650V2; +} + +/* + * We detected a chip without a FIFO. Only two fall into + * this category - the original 8250 and the 16450. The + * 16450 has a scratch register (accessible with LCR=0) + */ +static void autoconfig_8250(struct uart_8250_port *up) +{ + unsigned char scratch, status1, status2; + + up->port.type = PORT_8250; + + scratch = serial_in(up, UART_SCR); + serial_out(up, UART_SCR, 0xa5); + status1 = serial_in(up, UART_SCR); + serial_out(up, UART_SCR, 0x5a); + status2 = serial_in(up, UART_SCR); + serial_out(up, UART_SCR, scratch); + + if (status1 == 0xa5 && status2 == 0x5a) + up->port.type = PORT_16450; +} + +static int broken_efr(struct uart_8250_port *up) +{ + /* + * Exar ST16C2550 "A2" devices incorrectly detect as + * having an EFR, and report an ID of 0x0201. See + * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html + */ + if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16) + return 1; + + return 0; +} + +/* + * We know that the chip has FIFOs. Does it have an EFR? The + * EFR is located in the same register position as the IIR and + * we know the top two bits of the IIR are currently set. The + * EFR should contain zero. Try to read the EFR. + */ +static void autoconfig_16550a(struct uart_8250_port *up) +{ + unsigned char status1, status2; + unsigned int iersave; + + up->port.type = PORT_16550A; + up->capabilities |= UART_CAP_FIFO; + + if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS) && + !(up->port.flags & UPF_FULL_PROBE)) + return; + + /* + * Check for presence of the EFR when DLAB is set. + * Only ST16C650V1 UARTs pass this test. + */ + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); + if (serial_in(up, UART_EFR) == 0) { + serial_out(up, UART_EFR, 0xA8); + if (serial_in(up, UART_EFR) != 0) { + DEBUG_AUTOCONF("EFRv1 "); + up->port.type = PORT_16650; + up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP; + } else { + serial_out(up, UART_LCR, 0); + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | + UART_FCR7_64BYTE); + status1 = serial_in(up, UART_IIR) >> 5; + serial_out(up, UART_FCR, 0); + serial_out(up, UART_LCR, 0); + + if (status1 == 7) + up->port.type = PORT_16550A_FSL64; + else + DEBUG_AUTOCONF("Motorola 8xxx DUART "); + } + serial_out(up, UART_EFR, 0); + return; + } + + /* + * Maybe it requires 0xbf to be written to the LCR. + * (other ST16C650V2 UARTs, TI16C752A, etc) + */ + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) { + DEBUG_AUTOCONF("EFRv2 "); + autoconfig_has_efr(up); + return; + } + + /* + * Check for a National Semiconductor SuperIO chip. + * Attempt to switch to bank 2, read the value of the LOOP bit + * from EXCR1. Switch back to bank 0, change it in MCR. Then + * switch back to bank 2, read it from EXCR1 again and check + * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2 + */ + serial_out(up, UART_LCR, 0); + status1 = serial8250_in_MCR(up); + serial_out(up, UART_LCR, 0xE0); + status2 = serial_in(up, 0x02); /* EXCR1 */ + + if (!((status2 ^ status1) & UART_MCR_LOOP)) { + serial_out(up, UART_LCR, 0); + serial8250_out_MCR(up, status1 ^ UART_MCR_LOOP); + serial_out(up, UART_LCR, 0xE0); + status2 = serial_in(up, 0x02); /* EXCR1 */ + serial_out(up, UART_LCR, 0); + serial8250_out_MCR(up, status1); + + if ((status2 ^ status1) & UART_MCR_LOOP) { + unsigned short quot; + + serial_out(up, UART_LCR, 0xE0); + + quot = serial_dl_read(up); + quot <<= 3; + + if (ns16550a_goto_highspeed(up)) + serial_dl_write(up, quot); + + serial_out(up, UART_LCR, 0); + + up->port.uartclk = 921600*16; + up->port.type = PORT_NS16550A; + up->capabilities |= UART_NATSEMI; + return; + } + } + + /* + * No EFR. Try to detect a TI16750, which only sets bit 5 of + * the IIR when 64 byte FIFO mode is enabled when DLAB is set. + * Try setting it with and without DLAB set. Cheap clones + * set bit 5 without DLAB set. + */ + serial_out(up, UART_LCR, 0); + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE); + status1 = serial_in(up, UART_IIR) >> 5; + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE); + status2 = serial_in(up, UART_IIR) >> 5; + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); + serial_out(up, UART_LCR, 0); + + DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2); + + if (status1 == 6 && status2 == 7) { + up->port.type = PORT_16750; + up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP; + return; + } + + /* + * Try writing and reading the UART_IER_UUE bit (b6). + * If it works, this is probably one of the Xscale platform's + * internal UARTs. + * We're going to explicitly set the UUE bit to 0 before + * trying to write and read a 1 just to make sure it's not + * already a 1 and maybe locked there before we even start. + */ + iersave = serial_in(up, UART_IER); + serial_out(up, UART_IER, iersave & ~UART_IER_UUE); + if (!(serial_in(up, UART_IER) & UART_IER_UUE)) { + /* + * OK it's in a known zero state, try writing and reading + * without disturbing the current state of the other bits. + */ + serial_out(up, UART_IER, iersave | UART_IER_UUE); + if (serial_in(up, UART_IER) & UART_IER_UUE) { + /* + * It's an Xscale. + * We'll leave the UART_IER_UUE bit set to 1 (enabled). + */ + DEBUG_AUTOCONF("Xscale "); + up->port.type = PORT_XSCALE; + up->capabilities |= UART_CAP_UUE | UART_CAP_RTOIE; + return; + } + } else { + /* + * If we got here we couldn't force the IER_UUE bit to 0. + * Log it and continue. + */ + DEBUG_AUTOCONF("Couldn't force IER_UUE to 0 "); + } + serial_out(up, UART_IER, iersave); + + /* + * We distinguish between 16550A and U6 16550A by counting + * how many bytes are in the FIFO. + */ + if (up->port.type == PORT_16550A && size_fifo(up) == 64) { + up->port.type = PORT_U6_16550A; + up->capabilities |= UART_CAP_AFE; + } +} + +/* + * This routine is called by rs_init() to initialize a specific serial + * port. It determines what type of UART chip this serial port is + * using: 8250, 16450, 16550, 16550A. The important question is + * whether or not this UART is a 16550A or not, since this will + * determine whether or not we can use its FIFO features or not. + */ +static void autoconfig(struct uart_8250_port *up) +{ + unsigned char status1, scratch, scratch2, scratch3; + unsigned char save_lcr, save_mcr; + struct uart_port *port = &up->port; + unsigned long flags; + unsigned int old_capabilities; + + if (!port->iobase && !port->mapbase && !port->membase) + return; + + DEBUG_AUTOCONF("%s: autoconf (0x%04lx, 0x%p): ", + port->name, port->iobase, port->membase); + + /* + * We really do need global IRQs disabled here - we're going to + * be frobbing the chips IRQ enable register to see if it exists. + */ + spin_lock_irqsave(&port->lock, flags); + + up->capabilities = 0; + up->bugs = 0; + + if (!(port->flags & UPF_BUGGY_UART)) { + /* + * Do a simple existence test first; if we fail this, + * there's no point trying anything else. + * + * 0x80 is used as a nonsense port to prevent against + * false positives due to ISA bus float. The + * assumption is that 0x80 is a non-existent port; + * which should be safe since include/asm/io.h also + * makes this assumption. + * + * Note: this is safe as long as MCR bit 4 is clear + * and the device is in "PC" mode. + */ + scratch = serial_in(up, UART_IER); + serial_out(up, UART_IER, 0); +#ifdef __i386__ + outb(0xff, 0x080); +#endif + /* + * Mask out IER[7:4] bits for test as some UARTs (e.g. TL + * 16C754B) allow only to modify them if an EFR bit is set. + */ + scratch2 = serial_in(up, UART_IER) & 0x0f; + serial_out(up, UART_IER, 0x0F); +#ifdef __i386__ + outb(0, 0x080); +#endif + scratch3 = serial_in(up, UART_IER) & 0x0f; + serial_out(up, UART_IER, scratch); + if (scratch2 != 0 || scratch3 != 0x0F) { + /* + * We failed; there's nothing here + */ + spin_unlock_irqrestore(&port->lock, flags); + DEBUG_AUTOCONF("IER test failed (%02x, %02x) ", + scratch2, scratch3); + goto out; + } + } + + save_mcr = serial8250_in_MCR(up); + save_lcr = serial_in(up, UART_LCR); + + /* + * Check to see if a UART is really there. Certain broken + * internal modems based on the Rockwell chipset fail this + * test, because they apparently don't implement the loopback + * test mode. So this test is skipped on the COM 1 through + * COM 4 ports. This *should* be safe, since no board + * manufacturer would be stupid enough to design a board + * that conflicts with COM 1-4 --- we hope! + */ + if (!(port->flags & UPF_SKIP_TEST)) { + serial8250_out_MCR(up, UART_MCR_LOOP | 0x0A); + status1 = serial_in(up, UART_MSR) & 0xF0; + serial8250_out_MCR(up, save_mcr); + if (status1 != 0x90) { + spin_unlock_irqrestore(&port->lock, flags); + DEBUG_AUTOCONF("LOOP test failed (%02x) ", + status1); + goto out; + } + } + + /* + * We're pretty sure there's a port here. Lets find out what + * type of port it is. The IIR top two bits allows us to find + * out if it's 8250 or 16450, 16550, 16550A or later. This + * determines what we test for next. + * + * We also initialise the EFR (if any) to zero for later. The + * EFR occupies the same register location as the FCR and IIR. + */ + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, UART_EFR, 0); + serial_out(up, UART_LCR, 0); + + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); + + /* Assign this as it is to truncate any bits above 7. */ + scratch = serial_in(up, UART_IIR); + + switch (scratch >> 6) { + case 0: + autoconfig_8250(up); + break; + case 1: + port->type = PORT_UNKNOWN; + break; + case 2: + port->type = PORT_16550; + break; + case 3: + autoconfig_16550a(up); + break; + } + +#ifdef CONFIG_SERIAL_8250_RSA + /* + * Only probe for RSA ports if we got the region. + */ + if (port->type == PORT_16550A && up->probe & UART_PROBE_RSA && + __enable_rsa(up)) + port->type = PORT_RSA; +#endif + + serial_out(up, UART_LCR, save_lcr); + + port->fifosize = uart_config[up->port.type].fifo_size; + old_capabilities = up->capabilities; + up->capabilities = uart_config[port->type].flags; + up->tx_loadsz = uart_config[port->type].tx_loadsz; + + if (port->type == PORT_UNKNOWN) + goto out_unlock; + + /* + * Reset the UART. + */ +#ifdef CONFIG_SERIAL_8250_RSA + if (port->type == PORT_RSA) + serial_out(up, UART_RSA_FRR, 0); +#endif + serial8250_out_MCR(up, save_mcr); + serial8250_clear_fifos(up); + serial_in(up, UART_RX); + serial8250_clear_IER(up); + +out_unlock: + spin_unlock_irqrestore(&port->lock, flags); + + /* + * Check if the device is a Fintek F81216A + */ + if (port->type == PORT_16550A && port->iotype == UPIO_PORT) + fintek_8250_probe(up); + + if (up->capabilities != old_capabilities) { + dev_warn(port->dev, "detected caps %08x should be %08x\n", + old_capabilities, up->capabilities); + } +out: + DEBUG_AUTOCONF("iir=%d ", scratch); + DEBUG_AUTOCONF("type=%s\n", uart_config[port->type].name); +} + +static void autoconfig_irq(struct uart_8250_port *up) +{ + struct uart_port *port = &up->port; + unsigned char save_mcr, save_ier; + unsigned char save_ICP = 0; + unsigned int ICP = 0; + unsigned long irqs; + int irq; + + if (port->flags & UPF_FOURPORT) { + ICP = (port->iobase & 0xfe0) | 0x1f; + save_ICP = inb_p(ICP); + outb_p(0x80, ICP); + inb_p(ICP); + } + + if (uart_console(port)) + console_lock(); + + /* forget possible initially masked and pending IRQ */ + probe_irq_off(probe_irq_on()); + save_mcr = serial8250_in_MCR(up); + save_ier = serial_in(up, UART_IER); + serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2); + + irqs = probe_irq_on(); + serial8250_out_MCR(up, 0); + udelay(10); + if (port->flags & UPF_FOURPORT) { + serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS); + } else { + serial8250_out_MCR(up, + UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2); + } + serial_out(up, UART_IER, 0x0f); /* enable all intrs */ + serial_in(up, UART_LSR); + serial_in(up, UART_RX); + serial_in(up, UART_IIR); + serial_in(up, UART_MSR); + serial_out(up, UART_TX, 0xFF); + udelay(20); + irq = probe_irq_off(irqs); + + serial8250_out_MCR(up, save_mcr); + serial_out(up, UART_IER, save_ier); + + if (port->flags & UPF_FOURPORT) + outb_p(save_ICP, ICP); + + if (uart_console(port)) + console_unlock(); + + port->irq = (irq > 0) ? irq : 0; +} + +static void serial8250_stop_rx(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + serial8250_rpm_get(up); + + up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); + up->port.read_status_mask &= ~UART_LSR_DR; + serial_port_out(port, UART_IER, up->ier); + + serial8250_rpm_put(up); +} + +/** + * serial8250_em485_stop_tx() - generic ->rs485_stop_tx() callback + * @p: uart 8250 port + * + * Generic callback usable by 8250 uart drivers to stop rs485 transmission. + */ +void serial8250_em485_stop_tx(struct uart_8250_port *p) +{ + unsigned char mcr = serial8250_in_MCR(p); + + if (p->port.rs485.flags & SER_RS485_RTS_AFTER_SEND) + mcr |= UART_MCR_RTS; + else + mcr &= ~UART_MCR_RTS; + serial8250_out_MCR(p, mcr); + + /* + * Empty the RX FIFO, we are not interested in anything + * received during the half-duplex transmission. + * Enable previously disabled RX interrupts. + */ + if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) { + serial8250_clear_and_reinit_fifos(p); + + p->ier |= UART_IER_RLSI | UART_IER_RDI; + serial_port_out(&p->port, UART_IER, p->ier); + } +} +EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx); + +static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t) +{ + struct uart_8250_em485 *em485 = container_of(t, struct uart_8250_em485, + stop_tx_timer); + struct uart_8250_port *p = em485->port; + unsigned long flags; + + serial8250_rpm_get(p); + spin_lock_irqsave(&p->port.lock, flags); + if (em485->active_timer == &em485->stop_tx_timer) { + p->rs485_stop_tx(p); + em485->active_timer = NULL; + em485->tx_stopped = true; + } + spin_unlock_irqrestore(&p->port.lock, flags); + serial8250_rpm_put(p); + + return HRTIMER_NORESTART; +} + +static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec) +{ + hrtimer_start(hrt, ms_to_ktime(msec), HRTIMER_MODE_REL); +} + +static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay) +{ + struct uart_8250_em485 *em485 = p->em485; + + stop_delay += (u64)p->port.rs485.delay_rts_after_send * NSEC_PER_MSEC; + + /* + * rs485_stop_tx() is going to set RTS according to config + * AND flush RX FIFO if required. + */ + if (stop_delay > 0) { + em485->active_timer = &em485->stop_tx_timer; + hrtimer_start(&em485->stop_tx_timer, ns_to_ktime(stop_delay), HRTIMER_MODE_REL); + } else { + p->rs485_stop_tx(p); + em485->active_timer = NULL; + em485->tx_stopped = true; + } +} + +static inline void __stop_tx(struct uart_8250_port *p) +{ + struct uart_8250_em485 *em485 = p->em485; + + if (em485) { + u16 lsr = serial_lsr_in(p); + u64 stop_delay = 0; + + p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + + if (!(lsr & UART_LSR_THRE)) + return; + /* + * To provide required timing and allow FIFO transfer, + * __stop_tx_rs485() must be called only when both FIFO and + * shift register are empty. The device driver should either + * enable interrupt on TEMT or set UART_CAP_NOTEMT that will + * enlarge stop_tx_timer by the tx time of one frame to cover + * for emptying of the shift register. + */ + if (!(lsr & UART_LSR_TEMT)) { + if (!(p->capabilities & UART_CAP_NOTEMT)) + return; + /* + * RTS might get deasserted too early with the normal + * frame timing formula. It seems to suggest THRE might + * get asserted already during tx of the stop bit + * rather than after it is fully sent. + * Roughly estimate 1 extra bit here with / 7. + */ + stop_delay = p->port.frame_time + DIV_ROUND_UP(p->port.frame_time, 7); + } + + __stop_tx_rs485(p, stop_delay); + } + + if (serial8250_clear_THRI(p)) + serial8250_rpm_put_tx(p); +} + +static void serial8250_stop_tx(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + serial8250_rpm_get(up); + __stop_tx(up); + + /* + * We really want to stop the transmitter from sending. + */ + if (port->type == PORT_16C950) { + up->acr |= UART_ACR_TXDIS; + serial_icr_write(up, UART_ACR, up->acr); + } + serial8250_rpm_put(up); +} + +static inline void __start_tx(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + if (up->dma && !up->dma->tx_dma(up)) + return; + + if (serial8250_set_THRI(up)) { + if (up->bugs & UART_BUG_TXEN) { + u16 lsr = serial_lsr_in(up); + + if (lsr & UART_LSR_THRE) + serial8250_tx_chars(up); + } + } + + /* + * Re-enable the transmitter if we disabled it. + */ + if (port->type == PORT_16C950 && up->acr & UART_ACR_TXDIS) { + up->acr &= ~UART_ACR_TXDIS; + serial_icr_write(up, UART_ACR, up->acr); + } +} + +/** + * serial8250_em485_start_tx() - generic ->rs485_start_tx() callback + * @up: uart 8250 port + * + * Generic callback usable by 8250 uart drivers to start rs485 transmission. + * Assumes that setting the RTS bit in the MCR register means RTS is high. + * (Some chips use inverse semantics.) Further assumes that reception is + * stoppable by disabling the UART_IER_RDI interrupt. (Some chips set the + * UART_LSR_DR bit even when UART_IER_RDI is disabled, foiling this approach.) + */ +void serial8250_em485_start_tx(struct uart_8250_port *up) +{ + unsigned char mcr = serial8250_in_MCR(up); + + if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) + serial8250_stop_rx(&up->port); + + if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND) + mcr |= UART_MCR_RTS; + else + mcr &= ~UART_MCR_RTS; + serial8250_out_MCR(up, mcr); +} +EXPORT_SYMBOL_GPL(serial8250_em485_start_tx); + +/* Returns false, if start_tx_timer was setup to defer TX start */ +static bool start_tx_rs485(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + struct uart_8250_em485 *em485 = up->em485; + + /* + * While serial8250_em485_handle_stop_tx() is a noop if + * em485->active_timer != &em485->stop_tx_timer, it might happen that + * the timer is still armed and triggers only after the current bunch of + * chars is send and em485->active_timer == &em485->stop_tx_timer again. + * So cancel the timer. There is still a theoretical race condition if + * the timer is already running and only comes around to check for + * em485->active_timer when &em485->stop_tx_timer is armed again. + */ + if (em485->active_timer == &em485->stop_tx_timer) + hrtimer_try_to_cancel(&em485->stop_tx_timer); + + em485->active_timer = NULL; + + if (em485->tx_stopped) { + em485->tx_stopped = false; + + up->rs485_start_tx(up); + + if (up->port.rs485.delay_rts_before_send > 0) { + em485->active_timer = &em485->start_tx_timer; + start_hrtimer_ms(&em485->start_tx_timer, + up->port.rs485.delay_rts_before_send); + return false; + } + } + + return true; +} + +static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t) +{ + struct uart_8250_em485 *em485 = container_of(t, struct uart_8250_em485, + start_tx_timer); + struct uart_8250_port *p = em485->port; + unsigned long flags; + + spin_lock_irqsave(&p->port.lock, flags); + if (em485->active_timer == &em485->start_tx_timer) { + __start_tx(&p->port); + em485->active_timer = NULL; + } + spin_unlock_irqrestore(&p->port.lock, flags); + + return HRTIMER_NORESTART; +} + +static void serial8250_start_tx(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + struct uart_8250_em485 *em485 = up->em485; + + if (!port->x_char && uart_circ_empty(&port->state->xmit)) + return; + + serial8250_rpm_get_tx(up); + + if (em485) { + if ((em485->active_timer == &em485->start_tx_timer) || + !start_tx_rs485(port)) + return; + } + __start_tx(port); +} + +static void serial8250_throttle(struct uart_port *port) +{ + port->throttle(port); +} + +static void serial8250_unthrottle(struct uart_port *port) +{ + port->unthrottle(port); +} + +static void serial8250_disable_ms(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + /* no MSR capabilities */ + if (up->bugs & UART_BUG_NOMSR) + return; + + mctrl_gpio_disable_ms(up->gpios); + + up->ier &= ~UART_IER_MSI; + serial_port_out(port, UART_IER, up->ier); +} + +static void serial8250_enable_ms(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + /* no MSR capabilities */ + if (up->bugs & UART_BUG_NOMSR) + return; + + mctrl_gpio_enable_ms(up->gpios); + + up->ier |= UART_IER_MSI; + + serial8250_rpm_get(up); + serial_port_out(port, UART_IER, up->ier); + serial8250_rpm_put(up); +} + +void serial8250_read_char(struct uart_8250_port *up, u16 lsr) +{ + struct uart_port *port = &up->port; + unsigned char ch; + char flag = TTY_NORMAL; + + if (likely(lsr & UART_LSR_DR)) + ch = serial_in(up, UART_RX); + else + /* + * Intel 82571 has a Serial Over Lan device that will + * set UART_LSR_BI without setting UART_LSR_DR when + * it receives a break. To avoid reading from the + * receive buffer without UART_LSR_DR bit set, we + * just force the read character to be 0 + */ + ch = 0; + + port->icount.rx++; + + lsr |= up->lsr_saved_flags; + up->lsr_saved_flags = 0; + + if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) { + if (lsr & UART_LSR_BI) { + lsr &= ~(UART_LSR_FE | UART_LSR_PE); + port->icount.brk++; + /* + * We do the SysRQ and SAK checking + * here because otherwise the break + * may get masked by ignore_status_mask + * or read_status_mask. + */ + if (uart_handle_break(port)) + return; + } else if (lsr & UART_LSR_PE) + port->icount.parity++; + else if (lsr & UART_LSR_FE) + port->icount.frame++; + if (lsr & UART_LSR_OE) + port->icount.overrun++; + + /* + * Mask off conditions which should be ignored. + */ + lsr &= port->read_status_mask; + + if (lsr & UART_LSR_BI) { + dev_dbg(port->dev, "handling break\n"); + flag = TTY_BREAK; + } else if (lsr & UART_LSR_PE) + flag = TTY_PARITY; + else if (lsr & UART_LSR_FE) + flag = TTY_FRAME; + } + if (uart_prepare_sysrq_char(port, ch)) + return; + + uart_insert_char(port, lsr, UART_LSR_OE, ch, flag); +} +EXPORT_SYMBOL_GPL(serial8250_read_char); + +/* + * serial8250_rx_chars - Read characters. The first LSR value must be passed in. + * + * Returns LSR bits. The caller should rely only on non-Rx related LSR bits + * (such as THRE) because the LSR value might come from an already consumed + * character. + */ +u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr) +{ + struct uart_port *port = &up->port; + int max_count = 256; + + do { + serial8250_read_char(up, lsr); + if (--max_count == 0) + break; + lsr = serial_in(up, UART_LSR); + } while (lsr & (UART_LSR_DR | UART_LSR_BI)); + + tty_flip_buffer_push(&port->state->port); + return lsr; +} +EXPORT_SYMBOL_GPL(serial8250_rx_chars); + +void serial8250_tx_chars(struct uart_8250_port *up) +{ + struct uart_port *port = &up->port; + struct circ_buf *xmit = &port->state->xmit; + int count; + + if (port->x_char) { + uart_xchar_out(port, UART_TX); + return; + } + if (uart_tx_stopped(port)) { + serial8250_stop_tx(port); + return; + } + if (uart_circ_empty(xmit)) { + __stop_tx(up); + return; + } + + count = up->tx_loadsz; + do { + serial_out(up, UART_TX, xmit->buf[xmit->tail]); + if (up->bugs & UART_BUG_TXRACE) { + /* + * The Aspeed BMC virtual UARTs have a bug where data + * may get stuck in the BMC's Tx FIFO from bursts of + * writes on the APB interface. + * + * Delay back-to-back writes by a read cycle to avoid + * stalling the VUART. Read a register that won't have + * side-effects and discard the result. + */ + serial_in(up, UART_SCR); + } + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + if ((up->capabilities & UART_CAP_HFIFO) && + !uart_lsr_tx_empty(serial_in(up, UART_LSR))) + break; + /* The BCM2835 MINI UART THRE bit is really a not-full bit. */ + if ((up->capabilities & UART_CAP_MINI) && + !(serial_in(up, UART_LSR) & UART_LSR_THRE)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + /* + * With RPM enabled, we have to wait until the FIFO is empty before the + * HW can go idle. So we get here once again with empty FIFO and disable + * the interrupt and RPM in __stop_tx() + */ + if (uart_circ_empty(xmit) && !(up->capabilities & UART_CAP_RPM)) + __stop_tx(up); +} +EXPORT_SYMBOL_GPL(serial8250_tx_chars); + +/* Caller holds uart port lock */ +unsigned int serial8250_modem_status(struct uart_8250_port *up) +{ + struct uart_port *port = &up->port; + unsigned int status = serial_in(up, UART_MSR); + + status |= up->msr_saved_flags; + up->msr_saved_flags = 0; + if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI && + port->state != NULL) { + if (status & UART_MSR_TERI) + port->icount.rng++; + if (status & UART_MSR_DDSR) + port->icount.dsr++; + if (status & UART_MSR_DDCD) + uart_handle_dcd_change(port, status & UART_MSR_DCD); + if (status & UART_MSR_DCTS) + uart_handle_cts_change(port, status & UART_MSR_CTS); + + wake_up_interruptible(&port->state->port.delta_msr_wait); + } + + return status; +} +EXPORT_SYMBOL_GPL(serial8250_modem_status); + +static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir) +{ + switch (iir & 0x3f) { + case UART_IIR_THRI: + /* + * Postpone DMA or not decision to IIR_RDI or IIR_RX_TIMEOUT + * because it's impossible to do an informed decision about + * that with IIR_THRI. + * + * This also fixes one known DMA Rx corruption issue where + * DR is asserted but DMA Rx only gets a corrupted zero byte + * (too early DR?). + */ + return false; + case UART_IIR_RDI: + if (!up->dma->rx_running) + break; + fallthrough; + case UART_IIR_RLSI: + case UART_IIR_RX_TIMEOUT: + serial8250_rx_dma_flush(up); + return true; + } + return up->dma->rx_dma(up); +} + +/* + * This handles the interrupt from one port. + */ +int serial8250_handle_irq(struct uart_port *port, unsigned int iir) +{ + struct uart_8250_port *up = up_to_u8250p(port); + struct tty_port *tport = &port->state->port; + bool skip_rx = false; + unsigned long flags; + u16 status; + + if (iir & UART_IIR_NO_INT) + return 0; + + spin_lock_irqsave(&port->lock, flags); + + status = serial_lsr_in(up); + + /* + * If port is stopped and there are no error conditions in the + * FIFO, then don't drain the FIFO, as this may lead to TTY buffer + * overflow. Not servicing, RX FIFO would trigger auto HW flow + * control when FIFO occupancy reaches preset threshold, thus + * halting RX. This only works when auto HW flow control is + * available. + */ + if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) && + (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) && + !(port->read_status_mask & UART_LSR_DR)) + skip_rx = true; + + if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { + struct irq_data *d; + + d = irq_get_irq_data(port->irq); + if (d && irqd_is_wakeup_set(d)) + pm_wakeup_event(tport->tty->dev, 0); + if (!up->dma || handle_rx_dma(up, iir)) + status = serial8250_rx_chars(up, status); + } + serial8250_modem_status(up); + if ((status & UART_LSR_THRE) && (up->ier & UART_IER_THRI)) { + if (!up->dma || up->dma->tx_err) + serial8250_tx_chars(up); + else if (!up->dma->tx_running) + __stop_tx(up); + } + + uart_unlock_and_check_sysrq_irqrestore(port, flags); + + return 1; +} +EXPORT_SYMBOL_GPL(serial8250_handle_irq); + +static int serial8250_default_handle_irq(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned int iir; + int ret; + + serial8250_rpm_get(up); + + iir = serial_port_in(port, UART_IIR); + ret = serial8250_handle_irq(port, iir); + + serial8250_rpm_put(up); + return ret; +} + +/* + * Newer 16550 compatible parts such as the SC16C650 & Altera 16550 Soft IP + * have a programmable TX threshold that triggers the THRE interrupt in + * the IIR register. In this case, the THRE interrupt indicates the FIFO + * has space available. Load it up with tx_loadsz bytes. + */ +static int serial8250_tx_threshold_handle_irq(struct uart_port *port) +{ + unsigned long flags; + unsigned int iir = serial_port_in(port, UART_IIR); + + /* TX Threshold IRQ triggered so load up FIFO */ + if ((iir & UART_IIR_ID) == UART_IIR_THRI) { + struct uart_8250_port *up = up_to_u8250p(port); + + spin_lock_irqsave(&port->lock, flags); + serial8250_tx_chars(up); + spin_unlock_irqrestore(&port->lock, flags); + } + + iir = serial_port_in(port, UART_IIR); + return serial8250_handle_irq(port, iir); +} + +static unsigned int serial8250_tx_empty(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned int result = 0; + unsigned long flags; + + serial8250_rpm_get(up); + + spin_lock_irqsave(&port->lock, flags); + if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up))) + result = TIOCSER_TEMT; + spin_unlock_irqrestore(&port->lock, flags); + + serial8250_rpm_put(up); + + return result; +} + +unsigned int serial8250_do_get_mctrl(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned int status; + unsigned int val; + + serial8250_rpm_get(up); + status = serial8250_modem_status(up); + serial8250_rpm_put(up); + + val = serial8250_MSR_to_TIOCM(status); + if (up->gpios) + return mctrl_gpio_get(up->gpios, &val); + + return val; +} +EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl); + +static unsigned int serial8250_get_mctrl(struct uart_port *port) +{ + if (port->get_mctrl) + return port->get_mctrl(port); + return serial8250_do_get_mctrl(port); +} + +void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned char mcr; + + mcr = serial8250_TIOCM_to_MCR(mctrl); + + mcr |= up->mcr; + + serial8250_out_MCR(up, mcr); +} +EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl); + +static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + if (port->rs485.flags & SER_RS485_ENABLED) + return; + + if (port->set_mctrl) + port->set_mctrl(port, mctrl); + else + serial8250_do_set_mctrl(port, mctrl); +} + +static void serial8250_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned long flags; + + serial8250_rpm_get(up); + spin_lock_irqsave(&port->lock, flags); + if (break_state == -1) + up->lcr |= UART_LCR_SBC; + else + up->lcr &= ~UART_LCR_SBC; + serial_port_out(port, UART_LCR, up->lcr); + spin_unlock_irqrestore(&port->lock, flags); + serial8250_rpm_put(up); +} + +static void wait_for_lsr(struct uart_8250_port *up, int bits) +{ + unsigned int status, tmout = 10000; + + /* Wait up to 10ms for the character(s) to be sent. */ + for (;;) { + status = serial_lsr_in(up); + + if ((status & bits) == bits) + break; + if (--tmout == 0) + break; + udelay(1); + touch_nmi_watchdog(); + } +} + +/* + * Wait for transmitter & holding register to empty + */ +static void wait_for_xmitr(struct uart_8250_port *up, int bits) +{ + unsigned int tmout; + + wait_for_lsr(up, bits); + + /* Wait up to 1s for flow control if necessary */ + if (up->port.flags & UPF_CONS_FLOW) { + for (tmout = 1000000; tmout; tmout--) { + unsigned int msr = serial_in(up, UART_MSR); + up->msr_saved_flags |= msr & MSR_SAVE_FLAGS; + if (msr & UART_MSR_CTS) + break; + udelay(1); + touch_nmi_watchdog(); + } + } +} + +#ifdef CONFIG_CONSOLE_POLL +/* + * Console polling routines for writing and reading from the uart while + * in an interrupt or debug context. + */ + +static int serial8250_get_poll_char(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + int status; + u16 lsr; + + serial8250_rpm_get(up); + + lsr = serial_port_in(port, UART_LSR); + + if (!(lsr & UART_LSR_DR)) { + status = NO_POLL_CHAR; + goto out; + } + + status = serial_port_in(port, UART_RX); +out: + serial8250_rpm_put(up); + return status; +} + + +static void serial8250_put_poll_char(struct uart_port *port, + unsigned char c) +{ + unsigned int ier; + struct uart_8250_port *up = up_to_u8250p(port); + + serial8250_rpm_get(up); + /* + * First save the IER then disable the interrupts + */ + ier = serial_port_in(port, UART_IER); + serial8250_clear_IER(up); + + wait_for_xmitr(up, UART_LSR_BOTH_EMPTY); + /* + * Send the character out. + */ + serial_port_out(port, UART_TX, c); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + wait_for_xmitr(up, UART_LSR_BOTH_EMPTY); + serial_port_out(port, UART_IER, ier); + serial8250_rpm_put(up); +} + +#endif /* CONFIG_CONSOLE_POLL */ + +int serial8250_do_startup(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned long flags; + unsigned char iir; + int retval; + u16 lsr; + + if (!port->fifosize) + port->fifosize = uart_config[port->type].fifo_size; + if (!up->tx_loadsz) + up->tx_loadsz = uart_config[port->type].tx_loadsz; + if (!up->capabilities) + up->capabilities = uart_config[port->type].flags; + up->mcr = 0; + + if (port->iotype != up->cur_iotype) + set_io_from_upio(port); + + serial8250_rpm_get(up); + if (port->type == PORT_16C950) { + /* Wake up and initialize UART */ + up->acr = 0; + serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); + serial_port_out(port, UART_EFR, UART_EFR_ECB); + serial_port_out(port, UART_IER, 0); + serial_port_out(port, UART_LCR, 0); + serial_icr_write(up, UART_CSR, 0); /* Reset the UART */ + serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); + serial_port_out(port, UART_EFR, UART_EFR_ECB); + serial_port_out(port, UART_LCR, 0); + } + + if (port->type == PORT_DA830) { + /* Reset the port */ + serial_port_out(port, UART_IER, 0); + serial_port_out(port, UART_DA830_PWREMU_MGMT, 0); + mdelay(10); + + /* Enable Tx, Rx and free run mode */ + serial_port_out(port, UART_DA830_PWREMU_MGMT, + UART_DA830_PWREMU_MGMT_UTRST | + UART_DA830_PWREMU_MGMT_URRST | + UART_DA830_PWREMU_MGMT_FREE); + } + + if (port->type == PORT_NPCM) { + /* + * Nuvoton calls the scratch register 'UART_TOR' (timeout + * register). Enable it, and set TIOC (timeout interrupt + * comparator) to be 0x20 for correct operation. + */ + serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20); + } + +#ifdef CONFIG_SERIAL_8250_RSA + /* + * If this is an RSA port, see if we can kick it up to the + * higher speed clock. + */ + enable_rsa(up); +#endif + + /* + * Clear the FIFO buffers and disable them. + * (they will be reenabled in set_termios()) + */ + serial8250_clear_fifos(up); + + /* + * Clear the interrupt registers. + */ + serial_port_in(port, UART_LSR); + serial_port_in(port, UART_RX); + serial_port_in(port, UART_IIR); + serial_port_in(port, UART_MSR); + + /* + * At this point, there's no way the LSR could still be 0xff; + * if it is, then bail out, because there's likely no UART + * here. + */ + if (!(port->flags & UPF_BUGGY_UART) && + (serial_port_in(port, UART_LSR) == 0xff)) { + dev_info_ratelimited(port->dev, "LSR safety check engaged!\n"); + retval = -ENODEV; + goto out; + } + + /* + * For a XR16C850, we need to set the trigger levels + */ + if (port->type == PORT_16850) { + unsigned char fctr; + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + + fctr = serial_in(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX); + serial_port_out(port, UART_FCTR, + fctr | UART_FCTR_TRGD | UART_FCTR_RX); + serial_port_out(port, UART_TRG, UART_TRG_96); + serial_port_out(port, UART_FCTR, + fctr | UART_FCTR_TRGD | UART_FCTR_TX); + serial_port_out(port, UART_TRG, UART_TRG_96); + + serial_port_out(port, UART_LCR, 0); + } + + /* + * For the Altera 16550 variants, set TX threshold trigger level. + */ + if (((port->type == PORT_ALTR_16550_F32) || + (port->type == PORT_ALTR_16550_F64) || + (port->type == PORT_ALTR_16550_F128)) && (port->fifosize > 1)) { + /* Bounds checking of TX threshold (valid 0 to fifosize-2) */ + if ((up->tx_loadsz < 2) || (up->tx_loadsz > port->fifosize)) { + dev_err(port->dev, "TX FIFO Threshold errors, skipping\n"); + } else { + serial_port_out(port, UART_ALTR_AFR, + UART_ALTR_EN_TXFIFO_LW); + serial_port_out(port, UART_ALTR_TX_LOW, + port->fifosize - up->tx_loadsz); + port->handle_irq = serial8250_tx_threshold_handle_irq; + } + } + + /* Check if we need to have shared IRQs */ + if (port->irq && (up->port.flags & UPF_SHARE_IRQ)) + up->port.irqflags |= IRQF_SHARED; + + retval = up->ops->setup_irq(up); + if (retval) + goto out; + + if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { + unsigned char iir1; + + if (port->irqflags & IRQF_SHARED) + disable_irq_nosync(port->irq); + + /* + * Test for UARTs that do not reassert THRE when the + * transmitter is idle and the interrupt has already + * been cleared. Real 16550s should always reassert + * this interrupt whenever the transmitter is idle and + * the interrupt is enabled. Delays are necessary to + * allow register changes to become visible. + */ + spin_lock_irqsave(&port->lock, flags); + + wait_for_xmitr(up, UART_LSR_THRE); + serial_port_out_sync(port, UART_IER, UART_IER_THRI); + udelay(1); /* allow THRE to set */ + iir1 = serial_port_in(port, UART_IIR); + serial_port_out(port, UART_IER, 0); + serial_port_out_sync(port, UART_IER, UART_IER_THRI); + udelay(1); /* allow a working UART time to re-assert THRE */ + iir = serial_port_in(port, UART_IIR); + serial_port_out(port, UART_IER, 0); + + spin_unlock_irqrestore(&port->lock, flags); + + if (port->irqflags & IRQF_SHARED) + enable_irq(port->irq); + + /* + * If the interrupt is not reasserted, or we otherwise + * don't trust the iir, setup a timer to kick the UART + * on a regular basis. + */ + if ((!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) || + up->port.flags & UPF_BUG_THRE) { + up->bugs |= UART_BUG_THRE; + } + } + + up->ops->setup_timer(up); + + /* + * Now, initialize the UART + */ + serial_port_out(port, UART_LCR, UART_LCR_WLEN8); + + spin_lock_irqsave(&port->lock, flags); + if (up->port.flags & UPF_FOURPORT) { + if (!up->port.irq) + up->port.mctrl |= TIOCM_OUT1; + } else + /* + * Most PC uarts need OUT2 raised to enable interrupts. + */ + if (port->irq) + up->port.mctrl |= TIOCM_OUT2; + + serial8250_set_mctrl(port, port->mctrl); + + /* + * Serial over Lan (SoL) hack: + * Intel 8257x Gigabit ethernet chips have a 16550 emulation, to be + * used for Serial Over Lan. Those chips take a longer time than a + * normal serial device to signalize that a transmission data was + * queued. Due to that, the above test generally fails. One solution + * would be to delay the reading of iir. However, this is not + * reliable, since the timeout is variable. So, let's just don't + * test if we receive TX irq. This way, we'll never enable + * UART_BUG_TXEN. + */ + if (up->port.quirks & UPQ_NO_TXEN_TEST) + goto dont_test_tx_en; + + /* + * Do a quick test to see if we receive an interrupt when we enable + * the TX irq. + */ + serial_port_out(port, UART_IER, UART_IER_THRI); + lsr = serial_port_in(port, UART_LSR); + iir = serial_port_in(port, UART_IIR); + serial_port_out(port, UART_IER, 0); + + if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) { + if (!(up->bugs & UART_BUG_TXEN)) { + up->bugs |= UART_BUG_TXEN; + dev_dbg(port->dev, "enabling bad tx status workarounds\n"); + } + } else { + up->bugs &= ~UART_BUG_TXEN; + } + +dont_test_tx_en: + spin_unlock_irqrestore(&port->lock, flags); + + /* + * Clear the interrupt registers again for luck, and clear the + * saved flags to avoid getting false values from polling + * routines or the previous session. + */ + serial_port_in(port, UART_LSR); + serial_port_in(port, UART_RX); + serial_port_in(port, UART_IIR); + serial_port_in(port, UART_MSR); + up->lsr_saved_flags = 0; + up->msr_saved_flags = 0; + + /* + * Request DMA channels for both RX and TX. + */ + if (up->dma) { + const char *msg = NULL; + + if (uart_console(port)) + msg = "forbid DMA for kernel console"; + else if (serial8250_request_dma(up)) + msg = "failed to request DMA"; + if (msg) { + dev_warn_ratelimited(port->dev, "%s\n", msg); + up->dma = NULL; + } + } + + /* + * Set the IER shadow for rx interrupts but defer actual interrupt + * enable until after the FIFOs are enabled; otherwise, an already- + * active sender can swamp the interrupt handler with "too much work". + */ + up->ier = UART_IER_RLSI | UART_IER_RDI; + + if (port->flags & UPF_FOURPORT) { + unsigned int icp; + /* + * Enable interrupts on the AST Fourport board + */ + icp = (port->iobase & 0xfe0) | 0x01f; + outb_p(0x80, icp); + inb_p(icp); + } + retval = 0; +out: + serial8250_rpm_put(up); + return retval; +} +EXPORT_SYMBOL_GPL(serial8250_do_startup); + +static int serial8250_startup(struct uart_port *port) +{ + if (port->startup) + return port->startup(port); + return serial8250_do_startup(port); +} + +void serial8250_do_shutdown(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned long flags; + + serial8250_rpm_get(up); + /* + * Disable interrupts from this port + */ + spin_lock_irqsave(&port->lock, flags); + up->ier = 0; + serial_port_out(port, UART_IER, 0); + spin_unlock_irqrestore(&port->lock, flags); + + synchronize_irq(port->irq); + + if (up->dma) + serial8250_release_dma(up); + + spin_lock_irqsave(&port->lock, flags); + if (port->flags & UPF_FOURPORT) { + /* reset interrupts on the AST Fourport board */ + inb((port->iobase & 0xfe0) | 0x1f); + port->mctrl |= TIOCM_OUT1; + } else + port->mctrl &= ~TIOCM_OUT2; + + serial8250_set_mctrl(port, port->mctrl); + spin_unlock_irqrestore(&port->lock, flags); + + /* + * Disable break condition and FIFOs + */ + serial_port_out(port, UART_LCR, + serial_port_in(port, UART_LCR) & ~UART_LCR_SBC); + serial8250_clear_fifos(up); + +#ifdef CONFIG_SERIAL_8250_RSA + /* + * Reset the RSA board back to 115kbps compat mode. + */ + disable_rsa(up); +#endif + + /* + * Read data port to reset things, and then unlink from + * the IRQ chain. + */ + serial_port_in(port, UART_RX); + serial8250_rpm_put(up); + + up->ops->release_irq(up); +} +EXPORT_SYMBOL_GPL(serial8250_do_shutdown); + +static void serial8250_shutdown(struct uart_port *port) +{ + if (port->shutdown) + port->shutdown(port); + else + serial8250_do_shutdown(port); +} + +/* Nuvoton NPCM UARTs have a custom divisor calculation */ +static unsigned int npcm_get_divisor(struct uart_8250_port *up, + unsigned int baud) +{ + struct uart_port *port = &up->port; + + return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2; +} + +static unsigned int serial8250_do_get_divisor(struct uart_port *port, + unsigned int baud, + unsigned int *frac) +{ + upf_t magic_multiplier = port->flags & UPF_MAGIC_MULTIPLIER; + struct uart_8250_port *up = up_to_u8250p(port); + unsigned int quot; + + /* + * Handle magic divisors for baud rates above baud_base on SMSC + * Super I/O chips. We clamp custom rates from clk/6 and clk/12 + * up to clk/4 (0x8001) and clk/8 (0x8002) respectively. These + * magic divisors actually reprogram the baud rate generator's + * reference clock derived from chips's 14.318MHz clock input. + * + * Documentation claims that with these magic divisors the base + * frequencies of 7.3728MHz and 3.6864MHz are used respectively + * for the extra baud rates of 460800bps and 230400bps rather + * than the usual base frequency of 1.8462MHz. However empirical + * evidence contradicts that. + * + * Instead bit 7 of the DLM register (bit 15 of the divisor) is + * effectively used as a clock prescaler selection bit for the + * base frequency of 7.3728MHz, always used. If set to 0, then + * the base frequency is divided by 4 for use by the Baud Rate + * Generator, for the usual arrangement where the value of 1 of + * the divisor produces the baud rate of 115200bps. Conversely, + * if set to 1 and high-speed operation has been enabled with the + * Serial Port Mode Register in the Device Configuration Space, + * then the base frequency is supplied directly to the Baud Rate + * Generator, so for the divisor values of 0x8001, 0x8002, 0x8003, + * 0x8004, etc. the respective baud rates produced are 460800bps, + * 230400bps, 153600bps, 115200bps, etc. + * + * In all cases only low 15 bits of the divisor are used to divide + * the baud base and therefore 32767 is the maximum divisor value + * possible, even though documentation says that the programmable + * Baud Rate Generator is capable of dividing the internal PLL + * clock by any divisor from 1 to 65535. + */ + if (magic_multiplier && baud >= port->uartclk / 6) + quot = 0x8001; + else if (magic_multiplier && baud >= port->uartclk / 12) + quot = 0x8002; + else if (up->port.type == PORT_NPCM) + quot = npcm_get_divisor(up, baud); + else + quot = uart_get_divisor(port, baud); + + /* + * Oxford Semi 952 rev B workaround + */ + if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0) + quot++; + + return quot; +} + +static unsigned int serial8250_get_divisor(struct uart_port *port, + unsigned int baud, + unsigned int *frac) +{ + if (port->get_divisor) + return port->get_divisor(port, baud, frac); + + return serial8250_do_get_divisor(port, baud, frac); +} + +static unsigned char serial8250_compute_lcr(struct uart_8250_port *up, + tcflag_t c_cflag) +{ + unsigned char cval; + + cval = UART_LCR_WLEN(tty_get_char_size(c_cflag)); + + if (c_cflag & CSTOPB) + cval |= UART_LCR_STOP; + if (c_cflag & PARENB) + cval |= UART_LCR_PARITY; + if (!(c_cflag & PARODD)) + cval |= UART_LCR_EPAR; + if (c_cflag & CMSPAR) + cval |= UART_LCR_SPAR; + + return cval; +} + +void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud, + unsigned int quot, unsigned int quot_frac) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + /* Workaround to enable 115200 baud on OMAP1510 internal ports */ + if (is_omap1510_8250(up)) { + if (baud == 115200) { + quot = 1; + serial_port_out(port, UART_OMAP_OSC_12M_SEL, 1); + } else + serial_port_out(port, UART_OMAP_OSC_12M_SEL, 0); + } + + /* + * For NatSemi, switch to bank 2 not bank 1, to avoid resetting EXCR2, + * otherwise just set DLAB + */ + if (up->capabilities & UART_NATSEMI) + serial_port_out(port, UART_LCR, 0xe0); + else + serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB); + + serial_dl_write(up, quot); +} +EXPORT_SYMBOL_GPL(serial8250_do_set_divisor); + +static void serial8250_set_divisor(struct uart_port *port, unsigned int baud, + unsigned int quot, unsigned int quot_frac) +{ + if (port->set_divisor) + port->set_divisor(port, baud, quot, quot_frac); + else + serial8250_do_set_divisor(port, baud, quot, quot_frac); +} + +static unsigned int serial8250_get_baud_rate(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int tolerance = port->uartclk / 100; + unsigned int min; + unsigned int max; + + /* + * Handle magic divisors for baud rates above baud_base on SMSC + * Super I/O chips. Enable custom rates of clk/4 and clk/8, but + * disable divisor values beyond 32767, which are unavailable. + */ + if (port->flags & UPF_MAGIC_MULTIPLIER) { + min = port->uartclk / 16 / UART_DIV_MAX >> 1; + max = (port->uartclk + tolerance) / 4; + } else { + min = port->uartclk / 16 / UART_DIV_MAX; + max = (port->uartclk + tolerance) / 16; + } + + /* + * Ask the core to calculate the divisor for us. + * Allow 1% tolerance at the upper limit so uart clks marginally + * slower than nominal still match standard baud rates without + * causing transmission errors. + */ + return uart_get_baud_rate(port, termios, old, min, max); +} + +/* + * Note in order to avoid the tty port mutex deadlock don't use the next method + * within the uart port callbacks. Primarily it's supposed to be utilized to + * handle a sudden reference clock rate change. + */ +void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk) +{ + struct uart_8250_port *up = up_to_u8250p(port); + struct tty_port *tport = &port->state->port; + unsigned int baud, quot, frac = 0; + struct ktermios *termios; + struct tty_struct *tty; + unsigned long flags; + + tty = tty_port_tty_get(tport); + if (!tty) { + mutex_lock(&tport->mutex); + port->uartclk = uartclk; + mutex_unlock(&tport->mutex); + return; + } + + down_write(&tty->termios_rwsem); + mutex_lock(&tport->mutex); + + if (port->uartclk == uartclk) + goto out_unlock; + + port->uartclk = uartclk; + + if (!tty_port_initialized(tport)) + goto out_unlock; + + termios = &tty->termios; + + baud = serial8250_get_baud_rate(port, termios, NULL); + quot = serial8250_get_divisor(port, baud, &frac); + + serial8250_rpm_get(up); + spin_lock_irqsave(&port->lock, flags); + + uart_update_timeout(port, termios->c_cflag, baud); + + serial8250_set_divisor(port, baud, quot, frac); + serial_port_out(port, UART_LCR, up->lcr); + + spin_unlock_irqrestore(&port->lock, flags); + serial8250_rpm_put(up); + +out_unlock: + mutex_unlock(&tport->mutex); + up_write(&tty->termios_rwsem); + tty_kref_put(tty); +} +EXPORT_SYMBOL_GPL(serial8250_update_uartclk); + +void +serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned char cval; + unsigned long flags; + unsigned int baud, quot, frac = 0; + + if (up->capabilities & UART_CAP_MINI) { + termios->c_cflag &= ~(CSTOPB | PARENB | PARODD | CMSPAR); + if ((termios->c_cflag & CSIZE) == CS5 || + (termios->c_cflag & CSIZE) == CS6) + termios->c_cflag = (termios->c_cflag & ~CSIZE) | CS7; + } + cval = serial8250_compute_lcr(up, termios->c_cflag); + + baud = serial8250_get_baud_rate(port, termios, old); + quot = serial8250_get_divisor(port, baud, &frac); + + /* + * Ok, we're now changing the port state. Do it with + * interrupts disabled. + */ + serial8250_rpm_get(up); + spin_lock_irqsave(&port->lock, flags); + + up->lcr = cval; /* Save computed LCR */ + + if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) { + if (baud < 2400 && !up->dma) { + up->fcr &= ~UART_FCR_TRIGGER_MASK; + up->fcr |= UART_FCR_TRIGGER_1; + } + } + + /* + * MCR-based auto flow control. When AFE is enabled, RTS will be + * deasserted when the receive FIFO contains more characters than + * the trigger, or the MCR RTS bit is cleared. + */ + if (up->capabilities & UART_CAP_AFE) { + up->mcr &= ~UART_MCR_AFE; + if (termios->c_cflag & CRTSCTS) + up->mcr |= UART_MCR_AFE; + } + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; + if (termios->c_iflag & INPCK) + port->read_status_mask |= UART_LSR_FE | UART_LSR_PE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= UART_LSR_BI; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= UART_LSR_BI; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= UART_LSR_OE; + } + + /* + * ignore all characters if CREAD is not set + */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= UART_LSR_DR; + + /* + * CTS flow control flag and modem status interrupts + */ + up->ier &= ~UART_IER_MSI; + if (!(up->bugs & UART_BUG_NOMSR) && + UART_ENABLE_MS(&up->port, termios->c_cflag)) + up->ier |= UART_IER_MSI; + if (up->capabilities & UART_CAP_UUE) + up->ier |= UART_IER_UUE; + if (up->capabilities & UART_CAP_RTOIE) + up->ier |= UART_IER_RTOIE; + + serial_port_out(port, UART_IER, up->ier); + + if (up->capabilities & UART_CAP_EFR) { + unsigned char efr = 0; + /* + * TI16C752/Startech hardware flow control. FIXME: + * - TI16C752 requires control thresholds to be set. + * - UART_MCR_RTS is ineffective if auto-RTS mode is enabled. + */ + if (termios->c_cflag & CRTSCTS) + efr |= UART_EFR_CTS; + + serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); + if (port->flags & UPF_EXAR_EFR) + serial_port_out(port, UART_XR_EFR, efr); + else + serial_port_out(port, UART_EFR, efr); + } + + serial8250_set_divisor(port, baud, quot, frac); + + /* + * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR + * is written without DLAB set, this mode will be disabled. + */ + if (port->type == PORT_16750) + serial_port_out(port, UART_FCR, up->fcr); + + serial_port_out(port, UART_LCR, up->lcr); /* reset DLAB */ + if (port->type != PORT_16750) { + /* emulated UARTs (Lucent Venus 167x) need two steps */ + if (up->fcr & UART_FCR_ENABLE_FIFO) + serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO); + serial_port_out(port, UART_FCR, up->fcr); /* set fcr */ + } + serial8250_set_mctrl(port, port->mctrl); + spin_unlock_irqrestore(&port->lock, flags); + serial8250_rpm_put(up); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} +EXPORT_SYMBOL(serial8250_do_set_termios); + +static void +serial8250_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + if (port->set_termios) + port->set_termios(port, termios, old); + else + serial8250_do_set_termios(port, termios, old); +} + +void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios) +{ + if (termios->c_line == N_PPS) { + port->flags |= UPF_HARDPPS_CD; + spin_lock_irq(&port->lock); + serial8250_enable_ms(port); + spin_unlock_irq(&port->lock); + } else { + port->flags &= ~UPF_HARDPPS_CD; + if (!UART_ENABLE_MS(port, termios->c_cflag)) { + spin_lock_irq(&port->lock); + serial8250_disable_ms(port); + spin_unlock_irq(&port->lock); + } + } +} +EXPORT_SYMBOL_GPL(serial8250_do_set_ldisc); + +static void +serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios) +{ + if (port->set_ldisc) + port->set_ldisc(port, termios); + else + serial8250_do_set_ldisc(port, termios); +} + +void serial8250_do_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct uart_8250_port *p = up_to_u8250p(port); + + serial8250_set_sleep(p, state != 0); +} +EXPORT_SYMBOL(serial8250_do_pm); + +static void +serial8250_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + if (port->pm) + port->pm(port, state, oldstate); + else + serial8250_do_pm(port, state, oldstate); +} + +static unsigned int serial8250_port_size(struct uart_8250_port *pt) +{ + if (pt->port.mapsize) + return pt->port.mapsize; + if (pt->port.iotype == UPIO_AU) { + if (pt->port.type == PORT_RT2880) + return 0x100; + return 0x1000; + } + if (is_omap1_8250(pt)) + return 0x16 << pt->port.regshift; + + return 8 << pt->port.regshift; +} + +/* + * Resource handling. + */ +static int serial8250_request_std_resource(struct uart_8250_port *up) +{ + unsigned int size = serial8250_port_size(up); + struct uart_port *port = &up->port; + int ret = 0; + + switch (port->iotype) { + case UPIO_AU: + case UPIO_TSI: + case UPIO_MEM32: + case UPIO_MEM32BE: + case UPIO_MEM16: + case UPIO_MEM: + if (!port->mapbase) { + ret = -EINVAL; + break; + } + + if (!request_mem_region(port->mapbase, size, "serial")) { + ret = -EBUSY; + break; + } + + if (port->flags & UPF_IOREMAP) { + port->membase = ioremap(port->mapbase, size); + if (!port->membase) { + release_mem_region(port->mapbase, size); + ret = -ENOMEM; + } + } + break; + + case UPIO_HUB6: + case UPIO_PORT: + if (!request_region(port->iobase, size, "serial")) + ret = -EBUSY; + break; + } + return ret; +} + +static void serial8250_release_std_resource(struct uart_8250_port *up) +{ + unsigned int size = serial8250_port_size(up); + struct uart_port *port = &up->port; + + switch (port->iotype) { + case UPIO_AU: + case UPIO_TSI: + case UPIO_MEM32: + case UPIO_MEM32BE: + case UPIO_MEM16: + case UPIO_MEM: + if (!port->mapbase) + break; + + if (port->flags & UPF_IOREMAP) { + iounmap(port->membase); + port->membase = NULL; + } + + release_mem_region(port->mapbase, size); + break; + + case UPIO_HUB6: + case UPIO_PORT: + release_region(port->iobase, size); + break; + } +} + +static void serial8250_release_port(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + serial8250_release_std_resource(up); +} + +static int serial8250_request_port(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + return serial8250_request_std_resource(up); +} + +static int fcr_get_rxtrig_bytes(struct uart_8250_port *up) +{ + const struct serial8250_config *conf_type = &uart_config[up->port.type]; + unsigned char bytes; + + bytes = conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(up->fcr)]; + + return bytes ? bytes : -EOPNOTSUPP; +} + +static int bytes_to_fcr_rxtrig(struct uart_8250_port *up, unsigned char bytes) +{ + const struct serial8250_config *conf_type = &uart_config[up->port.type]; + int i; + + if (!conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(UART_FCR_R_TRIG_00)]) + return -EOPNOTSUPP; + + for (i = 1; i < UART_FCR_R_TRIG_MAX_STATE; i++) { + if (bytes < conf_type->rxtrig_bytes[i]) + /* Use the nearest lower value */ + return (--i) << UART_FCR_R_TRIG_SHIFT; + } + + return UART_FCR_R_TRIG_11; +} + +static int do_get_rxtrig(struct tty_port *port) +{ + struct uart_state *state = container_of(port, struct uart_state, port); + struct uart_port *uport = state->uart_port; + struct uart_8250_port *up = up_to_u8250p(uport); + + if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1) + return -EINVAL; + + return fcr_get_rxtrig_bytes(up); +} + +static int do_serial8250_get_rxtrig(struct tty_port *port) +{ + int rxtrig_bytes; + + mutex_lock(&port->mutex); + rxtrig_bytes = do_get_rxtrig(port); + mutex_unlock(&port->mutex); + + return rxtrig_bytes; +} + +static ssize_t rx_trig_bytes_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tty_port *port = dev_get_drvdata(dev); + int rxtrig_bytes; + + rxtrig_bytes = do_serial8250_get_rxtrig(port); + if (rxtrig_bytes < 0) + return rxtrig_bytes; + + return sysfs_emit(buf, "%d\n", rxtrig_bytes); +} + +static int do_set_rxtrig(struct tty_port *port, unsigned char bytes) +{ + struct uart_state *state = container_of(port, struct uart_state, port); + struct uart_port *uport = state->uart_port; + struct uart_8250_port *up = up_to_u8250p(uport); + int rxtrig; + + if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1) + return -EINVAL; + + rxtrig = bytes_to_fcr_rxtrig(up, bytes); + if (rxtrig < 0) + return rxtrig; + + serial8250_clear_fifos(up); + up->fcr &= ~UART_FCR_TRIGGER_MASK; + up->fcr |= (unsigned char)rxtrig; + serial_out(up, UART_FCR, up->fcr); + return 0; +} + +static int do_serial8250_set_rxtrig(struct tty_port *port, unsigned char bytes) +{ + int ret; + + mutex_lock(&port->mutex); + ret = do_set_rxtrig(port, bytes); + mutex_unlock(&port->mutex); + + return ret; +} + +static ssize_t rx_trig_bytes_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct tty_port *port = dev_get_drvdata(dev); + unsigned char bytes; + int ret; + + if (!count) + return -EINVAL; + + ret = kstrtou8(buf, 10, &bytes); + if (ret < 0) + return ret; + + ret = do_serial8250_set_rxtrig(port, bytes); + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR_RW(rx_trig_bytes); + +static struct attribute *serial8250_dev_attrs[] = { + &dev_attr_rx_trig_bytes.attr, + NULL +}; + +static struct attribute_group serial8250_dev_attr_group = { + .attrs = serial8250_dev_attrs, +}; + +static void register_dev_spec_attr_grp(struct uart_8250_port *up) +{ + const struct serial8250_config *conf_type = &uart_config[up->port.type]; + + if (conf_type->rxtrig_bytes[0]) + up->port.attr_group = &serial8250_dev_attr_group; +} + +static void serial8250_config_port(struct uart_port *port, int flags) +{ + struct uart_8250_port *up = up_to_u8250p(port); + int ret; + + /* + * Find the region that we can probe for. This in turn + * tells us whether we can probe for the type of port. + */ + ret = serial8250_request_std_resource(up); + if (ret < 0) + return; + + if (port->iotype != up->cur_iotype) + set_io_from_upio(port); + + if (flags & UART_CONFIG_TYPE) + autoconfig(up); + + /* if access method is AU, it is a 16550 with a quirk */ + if (port->type == PORT_16550A && port->iotype == UPIO_AU) + up->bugs |= UART_BUG_NOMSR; + + /* HW bugs may trigger IRQ while IIR == NO_INT */ + if (port->type == PORT_TEGRA) + up->bugs |= UART_BUG_NOMSR; + + if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ) + autoconfig_irq(up); + + if (port->type == PORT_UNKNOWN) + serial8250_release_std_resource(up); + + register_dev_spec_attr_grp(up); + up->fcr = uart_config[up->port.type].fcr; +} + +static int +serial8250_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (ser->irq >= nr_irqs || ser->irq < 0 || + ser->baud_base < 9600 || ser->type < PORT_UNKNOWN || + ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS || + ser->type == PORT_STARTECH) + return -EINVAL; + return 0; +} + +static const char *serial8250_type(struct uart_port *port) +{ + int type = port->type; + + if (type >= ARRAY_SIZE(uart_config)) + type = 0; + return uart_config[type].name; +} + +static const struct uart_ops serial8250_pops = { + .tx_empty = serial8250_tx_empty, + .set_mctrl = serial8250_set_mctrl, + .get_mctrl = serial8250_get_mctrl, + .stop_tx = serial8250_stop_tx, + .start_tx = serial8250_start_tx, + .throttle = serial8250_throttle, + .unthrottle = serial8250_unthrottle, + .stop_rx = serial8250_stop_rx, + .enable_ms = serial8250_enable_ms, + .break_ctl = serial8250_break_ctl, + .startup = serial8250_startup, + .shutdown = serial8250_shutdown, + .set_termios = serial8250_set_termios, + .set_ldisc = serial8250_set_ldisc, + .pm = serial8250_pm, + .type = serial8250_type, + .release_port = serial8250_release_port, + .request_port = serial8250_request_port, + .config_port = serial8250_config_port, + .verify_port = serial8250_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = serial8250_get_poll_char, + .poll_put_char = serial8250_put_poll_char, +#endif +}; + +void serial8250_init_port(struct uart_8250_port *up) +{ + struct uart_port *port = &up->port; + + spin_lock_init(&port->lock); + port->pm = NULL; + port->ops = &serial8250_pops; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); + + up->cur_iotype = 0xFF; +} +EXPORT_SYMBOL_GPL(serial8250_init_port); + +void serial8250_set_defaults(struct uart_8250_port *up) +{ + struct uart_port *port = &up->port; + + if (up->port.flags & UPF_FIXED_TYPE) { + unsigned int type = up->port.type; + + if (!up->port.fifosize) + up->port.fifosize = uart_config[type].fifo_size; + if (!up->tx_loadsz) + up->tx_loadsz = uart_config[type].tx_loadsz; + if (!up->capabilities) + up->capabilities = uart_config[type].flags; + } + + set_io_from_upio(port); + + /* default dma handlers */ + if (up->dma) { + if (!up->dma->tx_dma) + up->dma->tx_dma = serial8250_tx_dma; + if (!up->dma->rx_dma) + up->dma->rx_dma = serial8250_rx_dma; + } +} +EXPORT_SYMBOL_GPL(serial8250_set_defaults); + +#ifdef CONFIG_SERIAL_8250_CONSOLE + +static void serial8250_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + wait_for_xmitr(up, UART_LSR_THRE); + serial_port_out(port, UART_TX, ch); +} + +/* + * Restore serial console when h/w power-off detected + */ +static void serial8250_console_restore(struct uart_8250_port *up) +{ + struct uart_port *port = &up->port; + struct ktermios termios; + unsigned int baud, quot, frac = 0; + + termios.c_cflag = port->cons->cflag; + termios.c_ispeed = port->cons->ispeed; + termios.c_ospeed = port->cons->ospeed; + if (port->state->port.tty && termios.c_cflag == 0) { + termios.c_cflag = port->state->port.tty->termios.c_cflag; + termios.c_ispeed = port->state->port.tty->termios.c_ispeed; + termios.c_ospeed = port->state->port.tty->termios.c_ospeed; + } + + baud = serial8250_get_baud_rate(port, &termios, NULL); + quot = serial8250_get_divisor(port, baud, &frac); + + serial8250_set_divisor(port, baud, quot, frac); + serial_port_out(port, UART_LCR, up->lcr); + serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); +} + +/* + * Print a string to the serial port using the device FIFO + * + * It sends fifosize bytes and then waits for the fifo + * to get empty. + */ +static void serial8250_console_fifo_write(struct uart_8250_port *up, + const char *s, unsigned int count) +{ + int i; + const char *end = s + count; + unsigned int fifosize = up->tx_loadsz; + bool cr_sent = false; + + while (s != end) { + wait_for_lsr(up, UART_LSR_THRE); + + for (i = 0; i < fifosize && s != end; ++i) { + if (*s == '\n' && !cr_sent) { + serial_out(up, UART_TX, '\r'); + cr_sent = true; + } else { + serial_out(up, UART_TX, *s++); + cr_sent = false; + } + } + } +} + +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + * + * The console_lock must be held when we get here. + * + * Doing runtime PM is really a bad idea for the kernel console. + * Thus, we assume the function is called when device is powered up. + */ +void serial8250_console_write(struct uart_8250_port *up, const char *s, + unsigned int count) +{ + struct uart_8250_em485 *em485 = up->em485; + struct uart_port *port = &up->port; + unsigned long flags; + unsigned int ier, use_fifo; + int locked = 1; + + touch_nmi_watchdog(); + + if (oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); + + /* + * First save the IER then disable the interrupts + */ + ier = serial_port_in(port, UART_IER); + serial8250_clear_IER(up); + + /* check scratch reg to see if port powered off during system sleep */ + if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) { + serial8250_console_restore(up); + up->canary = 0; + } + + if (em485) { + if (em485->tx_stopped) + up->rs485_start_tx(up); + mdelay(port->rs485.delay_rts_before_send); + } + + use_fifo = (up->capabilities & UART_CAP_FIFO) && + /* + * BCM283x requires to check the fifo + * after each byte. + */ + !(up->capabilities & UART_CAP_MINI) && + /* + * tx_loadsz contains the transmit fifo size + */ + up->tx_loadsz > 1 && + (up->fcr & UART_FCR_ENABLE_FIFO) && + port->state && + test_bit(TTY_PORT_INITIALIZED, &port->state->port.iflags) && + /* + * After we put a data in the fifo, the controller will send + * it regardless of the CTS state. Therefore, only use fifo + * if we don't use control flow. + */ + !(up->port.flags & UPF_CONS_FLOW); + + if (likely(use_fifo)) + serial8250_console_fifo_write(up, s, count); + else + uart_console_write(port, s, count, serial8250_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + wait_for_xmitr(up, UART_LSR_BOTH_EMPTY); + + if (em485) { + mdelay(port->rs485.delay_rts_after_send); + if (em485->tx_stopped) + up->rs485_stop_tx(up); + } + + serial_port_out(port, UART_IER, ier); + + /* + * The receive handling will happen properly because the + * receive ready bit will still be set; it is not cleared + * on read. However, modem control will not, we must + * call it if we have saved something in the saved flags + * while processing with interrupts off. + */ + if (up->msr_saved_flags) + serial8250_modem_status(up); + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); +} + +static unsigned int probe_baud(struct uart_port *port) +{ + unsigned char lcr, dll, dlm; + unsigned int quot; + + lcr = serial_port_in(port, UART_LCR); + serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB); + dll = serial_port_in(port, UART_DLL); + dlm = serial_port_in(port, UART_DLM); + serial_port_out(port, UART_LCR, lcr); + + quot = (dlm << 8) | dll; + return (port->uartclk / 16) / quot; +} + +int serial8250_console_setup(struct uart_port *port, char *options, bool probe) +{ + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + if (!port->iobase && !port->membase) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else if (probe) + baud = probe_baud(port); + + ret = uart_set_options(port, port->cons, baud, parity, bits, flow); + if (ret) + return ret; + + if (port->dev) + pm_runtime_get_sync(port->dev); + + return 0; +} + +int serial8250_console_exit(struct uart_port *port) +{ + if (port->dev) + pm_runtime_put_sync(port->dev); + + return 0; +} + +#endif /* CONFIG_SERIAL_8250_CONSOLE */ + +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c new file mode 100644 index 000000000..795e55142 --- /dev/null +++ b/drivers/tty/serial/8250/8250_pxa.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * drivers/tty/serial/8250/8250_pxa.c -- driver for PXA on-board UARTS + * Copyright: (C) 2013 Sergei Ianovich + * + * replaces drivers/serial/pxa.c by Nicolas Pitre + * Created: Feb 20, 2003 + * Copyright: (C) 2003 Monta Vista Software, Inc. + * + * Based on drivers/serial/8250.c by Russell King. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +struct pxa8250_data { + int line; + struct clk *clk; +}; + +static int __maybe_unused serial_pxa_suspend(struct device *dev) +{ + struct pxa8250_data *data = dev_get_drvdata(dev); + + serial8250_suspend_port(data->line); + + return 0; +} + +static int __maybe_unused serial_pxa_resume(struct device *dev) +{ + struct pxa8250_data *data = dev_get_drvdata(dev); + + serial8250_resume_port(data->line); + + return 0; +} + +static const struct dev_pm_ops serial_pxa_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(serial_pxa_suspend, serial_pxa_resume) +}; + +static const struct of_device_id serial_pxa_dt_ids[] = { + { .compatible = "mrvl,pxa-uart", }, + { .compatible = "mrvl,mmp-uart", }, + {} +}; +MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids); + +/* Uart divisor latch write */ +static void serial_pxa_dl_write(struct uart_8250_port *up, int value) +{ + unsigned int dll; + + serial_out(up, UART_DLL, value & 0xff); + /* + * work around Erratum #74 according to Marvel(R) PXA270M Processor + * Specification Update (April 19, 2010) + */ + dll = serial_in(up, UART_DLL); + WARN_ON(dll != (value & 0xff)); + + serial_out(up, UART_DLM, value >> 8 & 0xff); +} + + +static void serial_pxa_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct pxa8250_data *data = port->private_data; + + if (!state) + clk_prepare_enable(data->clk); + else + clk_disable_unprepare(data->clk); +} + +static int serial_pxa_probe(struct platform_device *pdev) +{ + struct uart_8250_port uart = {}; + struct pxa8250_data *data; + struct resource *mmres; + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mmres) + return -ENODEV; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(data->clk)) + return PTR_ERR(data->clk); + + ret = clk_prepare(data->clk); + if (ret) + return ret; + + ret = of_alias_get_id(pdev->dev.of_node, "serial"); + if (ret >= 0) + uart.port.line = ret; + + uart.port.type = PORT_XSCALE; + uart.port.iotype = UPIO_MEM32; + uart.port.mapbase = mmres->start; + uart.port.regshift = 2; + uart.port.irq = irq; + uart.port.fifosize = 64; + uart.port.flags = UPF_IOREMAP | UPF_SKIP_TEST | UPF_FIXED_TYPE; + uart.port.dev = &pdev->dev; + uart.port.uartclk = clk_get_rate(data->clk); + uart.port.pm = serial_pxa_pm; + uart.port.private_data = data; + uart.dl_write = serial_pxa_dl_write; + + ret = serial8250_register_8250_port(&uart); + if (ret < 0) + goto err_clk; + + data->line = ret; + + platform_set_drvdata(pdev, data); + + return 0; + + err_clk: + clk_unprepare(data->clk); + return ret; +} + +static int serial_pxa_remove(struct platform_device *pdev) +{ + struct pxa8250_data *data = platform_get_drvdata(pdev); + + serial8250_unregister_port(data->line); + + clk_unprepare(data->clk); + + return 0; +} + +static struct platform_driver serial_pxa_driver = { + .probe = serial_pxa_probe, + .remove = serial_pxa_remove, + + .driver = { + .name = "pxa2xx-uart", + .pm = &serial_pxa_pm_ops, + .of_match_table = serial_pxa_dt_ids, + }, +}; + +module_platform_driver(serial_pxa_driver); + +#ifdef CONFIG_SERIAL_8250_CONSOLE +static int __init early_serial_pxa_setup(struct earlycon_device *device, + const char *options) +{ + struct uart_port *port = &device->port; + + if (!(device->port.membase || device->port.iobase)) + return -ENODEV; + + port->regshift = 2; + return early_serial8250_setup(device, NULL); +} +OF_EARLYCON_DECLARE(early_pxa, "mrvl,pxa-uart", early_serial_pxa_setup); +#endif + +MODULE_AUTHOR("Sergei Ianovich"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pxa2xx-uart"); diff --git a/drivers/tty/serial/8250/8250_tegra.c b/drivers/tty/serial/8250/8250_tegra.c new file mode 100644 index 000000000..c424e2ae0 --- /dev/null +++ b/drivers/tty/serial/8250/8250_tegra.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Serial Port driver for Tegra devices + * + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +struct tegra_uart { + struct clk *clk; + struct reset_control *rst; + int line; +}; + +static void tegra_uart_handle_break(struct uart_port *p) +{ + unsigned int status, tmout = 10000; + + while (1) { + status = p->serial_in(p, UART_LSR); + if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS))) + break; + + p->serial_in(p, UART_RX); + + if (--tmout == 0) + break; + udelay(1); + } +} + +static int tegra_uart_probe(struct platform_device *pdev) +{ + struct uart_8250_port port8250; + struct tegra_uart *uart; + struct uart_port *port; + struct resource *res; + int ret; + + uart = devm_kzalloc(&pdev->dev, sizeof(*uart), GFP_KERNEL); + if (!uart) + return -ENOMEM; + + memset(&port8250, 0, sizeof(port8250)); + + port = &port8250.port; + spin_lock_init(&port->lock); + + port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | + UPF_FIXED_TYPE; + port->iotype = UPIO_MEM32; + port->regshift = 2; + port->type = PORT_TEGRA; + port->irqflags |= IRQF_SHARED; + port->dev = &pdev->dev; + port->handle_break = tegra_uart_handle_break; + + ret = of_alias_get_id(pdev->dev.of_node, "serial"); + if (ret >= 0) + port->line = ret; + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + + port->irq = ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + port->membase = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!port->membase) + return -ENOMEM; + + port->mapbase = res->start; + port->mapsize = resource_size(res); + + uart->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL); + if (IS_ERR(uart->rst)) + return PTR_ERR(uart->rst); + + if (device_property_read_u32(&pdev->dev, "clock-frequency", + &port->uartclk)) { + uart->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(uart->clk)) { + dev_err(&pdev->dev, "failed to get clock!\n"); + return -ENODEV; + } + + ret = clk_prepare_enable(uart->clk); + if (ret < 0) + return ret; + + port->uartclk = clk_get_rate(uart->clk); + } + + ret = reset_control_deassert(uart->rst); + if (ret) + goto err_clkdisable; + + ret = serial8250_register_8250_port(&port8250); + if (ret < 0) + goto err_ctrl_assert; + + platform_set_drvdata(pdev, uart); + uart->line = ret; + + return 0; + +err_ctrl_assert: + reset_control_assert(uart->rst); +err_clkdisable: + clk_disable_unprepare(uart->clk); + + return ret; +} + +static int tegra_uart_remove(struct platform_device *pdev) +{ + struct tegra_uart *uart = platform_get_drvdata(pdev); + + serial8250_unregister_port(uart->line); + reset_control_assert(uart->rst); + clk_disable_unprepare(uart->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int tegra_uart_suspend(struct device *dev) +{ + struct tegra_uart *uart = dev_get_drvdata(dev); + struct uart_8250_port *port8250 = serial8250_get_port(uart->line); + struct uart_port *port = &port8250->port; + + serial8250_suspend_port(uart->line); + + if (!uart_console(port) || console_suspend_enabled) + clk_disable_unprepare(uart->clk); + + return 0; +} + +static int tegra_uart_resume(struct device *dev) +{ + struct tegra_uart *uart = dev_get_drvdata(dev); + struct uart_8250_port *port8250 = serial8250_get_port(uart->line); + struct uart_port *port = &port8250->port; + + if (!uart_console(port) || console_suspend_enabled) + clk_prepare_enable(uart->clk); + + serial8250_resume_port(uart->line); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(tegra_uart_pm_ops, tegra_uart_suspend, + tegra_uart_resume); + +static const struct of_device_id tegra_uart_of_match[] = { + { .compatible = "nvidia,tegra20-uart", }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_uart_of_match); + +static const struct acpi_device_id tegra_uart_acpi_match[] __maybe_unused = { + { "NVDA0100", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, tegra_uart_acpi_match); + +static struct platform_driver tegra_uart_driver = { + .driver = { + .name = "tegra-uart", + .pm = &tegra_uart_pm_ops, + .of_match_table = tegra_uart_of_match, + .acpi_match_table = ACPI_PTR(tegra_uart_acpi_match), + }, + .probe = tegra_uart_probe, + .remove = tegra_uart_remove, +}; + +module_platform_driver(tegra_uart_driver); + +MODULE_AUTHOR("Jeff Brasen "); +MODULE_DESCRIPTION("NVIDIA Tegra 8250 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c new file mode 100644 index 000000000..a2978abab --- /dev/null +++ b/drivers/tty/serial/8250/8250_uniphier.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2015 Masahiro Yamada + */ + +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +/* + * This hardware is similar to 8250, but its register map is a bit different: + * - MMIO32 (regshift = 2) + * - FCR is not at 2, but 3 + * - LCR and MCR are not at 3 and 4, they share 4 + * - No SCR (Instead, CHAR can be used as a scratch register) + * - Divisor latch at 9, no divisor latch access bit + */ + +#define UNIPHIER_UART_REGSHIFT 2 + +/* bit[15:8] = CHAR, bit[7:0] = FCR */ +#define UNIPHIER_UART_CHAR_FCR (3 << (UNIPHIER_UART_REGSHIFT)) +/* bit[15:8] = LCR, bit[7:0] = MCR */ +#define UNIPHIER_UART_LCR_MCR (4 << (UNIPHIER_UART_REGSHIFT)) +/* Divisor Latch Register */ +#define UNIPHIER_UART_DLR (9 << (UNIPHIER_UART_REGSHIFT)) + +struct uniphier8250_priv { + int line; + struct clk *clk; + spinlock_t atomic_write_lock; +}; + +#ifdef CONFIG_SERIAL_8250_CONSOLE +static int __init uniphier_early_console_setup(struct earlycon_device *device, + const char *options) +{ + if (!device->port.membase) + return -ENODEV; + + /* This hardware always expects MMIO32 register interface. */ + device->port.iotype = UPIO_MEM32; + device->port.regshift = UNIPHIER_UART_REGSHIFT; + + /* + * Do not touch the divisor register in early_serial8250_setup(); + * we assume it has been initialized by a boot loader. + */ + device->baud = 0; + + return early_serial8250_setup(device, options); +} +OF_EARLYCON_DECLARE(uniphier, "socionext,uniphier-uart", + uniphier_early_console_setup); +#endif + +/* + * The register map is slightly different from that of 8250. + * IO callbacks must be overridden for correct access to FCR, LCR, MCR and SCR. + */ +static unsigned int uniphier_serial_in(struct uart_port *p, int offset) +{ + unsigned int valshift = 0; + + switch (offset) { + case UART_SCR: + /* No SCR for this hardware. Use CHAR as a scratch register */ + valshift = 8; + offset = UNIPHIER_UART_CHAR_FCR; + break; + case UART_LCR: + valshift = 8; + fallthrough; + case UART_MCR: + offset = UNIPHIER_UART_LCR_MCR; + break; + default: + offset <<= UNIPHIER_UART_REGSHIFT; + break; + } + + /* + * The return value must be masked with 0xff because some registers + * share the same offset that must be accessed by 32-bit write/read. + * 8 or 16 bit access to this hardware result in unexpected behavior. + */ + return (readl(p->membase + offset) >> valshift) & 0xff; +} + +static void uniphier_serial_out(struct uart_port *p, int offset, int value) +{ + unsigned int valshift = 0; + bool normal = false; + + switch (offset) { + case UART_SCR: + /* No SCR for this hardware. Use CHAR as a scratch register */ + valshift = 8; + fallthrough; + case UART_FCR: + offset = UNIPHIER_UART_CHAR_FCR; + break; + case UART_LCR: + valshift = 8; + /* Divisor latch access bit does not exist. */ + value &= ~UART_LCR_DLAB; + fallthrough; + case UART_MCR: + offset = UNIPHIER_UART_LCR_MCR; + break; + default: + offset <<= UNIPHIER_UART_REGSHIFT; + normal = true; + break; + } + + if (normal) { + writel(value, p->membase + offset); + } else { + /* + * Special case: two registers share the same address that + * must be 32-bit accessed. As this is not longer atomic safe, + * take a lock just in case. + */ + struct uniphier8250_priv *priv = p->private_data; + unsigned long flags; + u32 tmp; + + spin_lock_irqsave(&priv->atomic_write_lock, flags); + tmp = readl(p->membase + offset); + tmp &= ~(0xff << valshift); + tmp |= value << valshift; + writel(tmp, p->membase + offset); + spin_unlock_irqrestore(&priv->atomic_write_lock, flags); + } +} + +/* + * This hardware does not have the divisor latch access bit. + * The divisor latch register exists at different address. + * Override dl_read/write callbacks. + */ +static int uniphier_serial_dl_read(struct uart_8250_port *up) +{ + return readl(up->port.membase + UNIPHIER_UART_DLR); +} + +static void uniphier_serial_dl_write(struct uart_8250_port *up, int value) +{ + writel(value, up->port.membase + UNIPHIER_UART_DLR); +} + +static int uniphier_uart_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct uart_8250_port up; + struct uniphier8250_priv *priv; + struct resource *regs; + void __iomem *membase; + int irq; + int ret; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_err(dev, "failed to get memory resource\n"); + return -EINVAL; + } + + membase = devm_ioremap(dev, regs->start, resource_size(regs)); + if (!membase) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + memset(&up, 0, sizeof(up)); + + ret = of_alias_get_id(dev->of_node, "serial"); + if (ret < 0) { + dev_err(dev, "failed to get alias id\n"); + return ret; + } + up.port.line = ret; + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to get clock\n"); + return PTR_ERR(priv->clk); + } + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + up.port.uartclk = clk_get_rate(priv->clk); + + spin_lock_init(&priv->atomic_write_lock); + + up.port.dev = dev; + up.port.private_data = priv; + up.port.mapbase = regs->start; + up.port.mapsize = resource_size(regs); + up.port.membase = membase; + up.port.irq = irq; + + up.port.type = PORT_16550A; + up.port.iotype = UPIO_MEM32; + up.port.fifosize = 64; + up.port.regshift = UNIPHIER_UART_REGSHIFT; + up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE; + up.capabilities = UART_CAP_FIFO; + + if (of_property_read_bool(dev->of_node, "auto-flow-control")) + up.capabilities |= UART_CAP_AFE; + + up.port.serial_in = uniphier_serial_in; + up.port.serial_out = uniphier_serial_out; + up.dl_read = uniphier_serial_dl_read; + up.dl_write = uniphier_serial_dl_write; + + ret = serial8250_register_8250_port(&up); + if (ret < 0) { + dev_err(dev, "failed to register 8250 port\n"); + clk_disable_unprepare(priv->clk); + return ret; + } + priv->line = ret; + + platform_set_drvdata(pdev, priv); + + return 0; +} + +static int uniphier_uart_remove(struct platform_device *pdev) +{ + struct uniphier8250_priv *priv = platform_get_drvdata(pdev); + + serial8250_unregister_port(priv->line); + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused uniphier_uart_suspend(struct device *dev) +{ + struct uniphier8250_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); + + serial8250_suspend_port(priv->line); + + if (!uart_console(&up->port) || console_suspend_enabled) + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused uniphier_uart_resume(struct device *dev) +{ + struct uniphier8250_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); + int ret; + + if (!uart_console(&up->port) || console_suspend_enabled) { + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + } + + serial8250_resume_port(priv->line); + + return 0; +} + +static const struct dev_pm_ops uniphier_uart_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(uniphier_uart_suspend, uniphier_uart_resume) +}; + +static const struct of_device_id uniphier_uart_match[] = { + { .compatible = "socionext,uniphier-uart" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, uniphier_uart_match); + +static struct platform_driver uniphier_uart_platform_driver = { + .probe = uniphier_uart_probe, + .remove = uniphier_uart_remove, + .driver = { + .name = "uniphier-uart", + .of_match_table = uniphier_uart_match, + .pm = &uniphier_uart_pm_ops, + }, +}; +module_platform_driver(uniphier_uart_platform_driver); + +MODULE_AUTHOR("Masahiro Yamada "); +MODULE_DESCRIPTION("UniPhier UART driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig new file mode 100644 index 000000000..583a340f9 --- /dev/null +++ b/drivers/tty/serial/8250/Kconfig @@ -0,0 +1,549 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# The 8250/16550 serial drivers. You shouldn't be in this list unless +# you somehow have an implicit or explicit dependency on SERIAL_8250. +# + +config SERIAL_8250 + tristate "8250/16550 and compatible serial support" + depends on !S390 + select SERIAL_CORE + select SERIAL_MCTRL_GPIO if GPIOLIB + help + This selects whether you want to include the driver for the standard + serial ports. The standard answer is Y. People who might say N + here are those that are setting up dedicated Ethernet WWW/FTP + servers, or users that have one of the various bus mice instead of a + serial mouse and don't intend to use their machine's standard serial + port for anything. + + To compile this driver as a module, choose M here: the + module will be called 8250. + [WARNING: Do not compile this driver as a module if you are using + non-standard serial ports, since the configuration information will + be lost when the driver is unloaded. This limitation may be lifted + in the future.] + + BTW1: If you have a mouseman serial mouse which is not recognized by + the X window system, try running gpm first. + + BTW2: If you intend to use a software modem (also called Winmodem) + under Linux, forget it. These modems are crippled and require + proprietary drivers which are only available under Windows. + + Most people will say Y or M here, so that they can use serial mice, + modems and similar devices connecting to the standard serial ports. + +config SERIAL_8250_DEPRECATED_OPTIONS + bool "Support 8250_core.* kernel options (DEPRECATED)" + depends on SERIAL_8250 + default y + help + In 3.7 we renamed 8250 to 8250_core by mistake, so now we have to + accept kernel parameters in both forms like 8250_core.nr_uarts=4 and + 8250.nr_uarts=4. We now renamed the module back to 8250, but if + anybody noticed in 3.7 and changed their userspace we still have to + keep the 8250_core.* options around until they revert the changes + they already did. + + If 8250 is built as a module, this adds 8250_core alias instead. + + If you did not notice yet and/or you have userspace from pre-3.7, it + is safe (and recommended) to say N here. + +config SERIAL_8250_PNP + bool "8250/16550 PNP device support" if EXPERT + depends on SERIAL_8250 && PNP + default y + help + This builds standard PNP serial support. You may be able to + disable this feature if you only need legacy serial support. + +config SERIAL_8250_16550A_VARIANTS + bool "Support for variants of the 16550A serial port" + depends on SERIAL_8250 + default !X86 + help + The 8250 driver can probe for many variants of the venerable 16550A + serial port. Doing so takes additional time at boot. + + On modern systems, especially those using serial only for a simple + console, you can say N here. + +config SERIAL_8250_FINTEK + bool "Support for Fintek F81216A LPC to 4 UART RS485 API" + depends on SERIAL_8250 + help + Selecting this option will add support for the RS485 capabilities + of the Fintek F81216A LPC to 4 UART. + + If this option is not selected the device will be configured as a + standard 16550A serial port. + + If unsure, say N. + +config SERIAL_8250_CONSOLE + bool "Console on 8250/16550 and compatible serial port" + depends on SERIAL_8250=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + If you say Y here, it will be possible to use a serial port as the + system console (the system console is the device which receives all + kernel messages and warnings and which allows logins in single user + mode). This could be useful if some terminal or printer is connected + to that serial port. + + Even if you say Y here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttyS1". (Try "man bootparam" or see the documentation of + your boot loader (grub or lilo or loadlin) about how to pass options + to the kernel at boot time.) + + If you don't have a VGA card installed and you say Y here, the + kernel will automatically use the first serial line, /dev/ttyS0, as + system console. + + You can set that using a kernel command line option such as + "console=uart8250,io,0x3f8,9600n8" + "console=uart8250,mmio,0xff5e0000,115200n8". + and it will switch to normal serial console when the corresponding + port is ready. + "earlycon=uart8250,io,0x3f8,9600n8" + "earlycon=uart8250,mmio,0xff5e0000,115200n8". + it will not only setup early console. + + If unsure, say N. + +config SERIAL_8250_PARISC + tristate + depends on SERIAL_8250 && PARISC + default SERIAL_8250 + +config SERIAL_8250_DMA + bool "DMA support for 16550 compatible UART controllers" if EXPERT + depends on SERIAL_8250 && DMADEVICES=y + default SERIAL_8250 + help + This builds DMA support that can be used with 8250/16650 + compatible UART controllers that support DMA signaling. + +config SERIAL_8250_PCI + tristate "8250/16550 PCI device support" + depends on SERIAL_8250 && PCI + default SERIAL_8250 + help + This builds standard PCI serial support. You may be able to + disable this feature if you only need legacy serial support. + Saves about 9K. + Note that serial ports on NetMos 9835 Multi-I/O cards are handled + by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL. + +config SERIAL_8250_EXAR + tristate "8250/16550 Exar/Commtech PCI/PCIe device support" + depends on SERIAL_8250_PCI + default SERIAL_8250 + help + This builds support for XR17C1xx, XR17V3xx and some Commtech + 422x PCIe serial cards that are not covered by the more generic + SERIAL_8250_PCI option. + +config SERIAL_8250_HP300 + tristate + depends on SERIAL_8250 && HP300 + default SERIAL_8250 + +config SERIAL_8250_CS + tristate "8250/16550 PCMCIA device support" + depends on PCMCIA && SERIAL_8250 + help + Say Y here to enable support for 16-bit PCMCIA serial devices, + including serial port cards, modems, and the modem functions of + multi-function Ethernet/modem cards. (PCMCIA- or PC-cards are + credit-card size devices often used with laptops.) + + To compile this driver as a module, choose M here: the + module will be called serial_cs. + + If unsure, say N. + +config SERIAL_8250_MEN_MCB + tristate "MEN MCB UART device support" + depends on MCB && SERIAL_8250 + help + This enables support for FPGA based UARTs found on many MEN + boards. This driver enables support for the 16z025, 16z057 + and 16z125 UARTs. + + To compile this driver as a module, chose M here: the + module will be called 8250_men_mcb. + + +config SERIAL_8250_NR_UARTS + int "Maximum number of 8250/16550 serial ports" + depends on SERIAL_8250 + default "4" + help + Set this to the number of serial ports you want the driver + to support. This includes any ports discovered via ACPI or + PCI enumeration and any ports that may be added at run-time + via hot-plug, or any ISA multi-port serial cards. + +config SERIAL_8250_RUNTIME_UARTS + int "Number of 8250/16550 serial ports to register at runtime" + depends on SERIAL_8250 + range 0 SERIAL_8250_NR_UARTS + default "4" + help + Set this to the maximum number of serial ports you want + the kernel to register at boot time. This can be overridden + with the module parameter "nr_uarts", or boot-time parameter + 8250.nr_uarts + +config SERIAL_8250_EXTENDED + bool "Extended 8250/16550 serial driver options" + depends on SERIAL_8250 + help + If you wish to use any non-standard features of the standard "dumb" + driver, say Y here. This includes HUB6 support, shared serial + interrupts, special multiport support, support for more than the + four COM 1/2/3/4 boards, etc. + + Note that the answer to this question won't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about serial driver options. If unsure, say N. + +config SERIAL_8250_MANY_PORTS + bool "Support more than 4 legacy serial ports" + depends on SERIAL_8250_EXTENDED && !IA64 + help + Say Y here if you have dumb serial boards other than the four + standard COM 1/2/3/4 ports. This may happen if you have an AST + FourPort, Accent Async, Boca (read the Boca mini-HOWTO, available + from ), or other custom + serial port hardware which acts similar to standard serial port + hardware. If you only use the standard COM 1/2/3/4 ports, you can + say N here to save some memory. You can also say Y if you have an + "intelligent" multiport card such as Digiboards, etc. + +# +# Multi-port serial cards +# + +config SERIAL_8250_FOURPORT + tristate "Support Fourport cards" + depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS + help + Say Y here if you have an AST FourPort serial board. + + To compile this driver as a module, choose M here: the module + will be called 8250_fourport. + +config SERIAL_8250_ACCENT + tristate "Support Accent cards" + depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS + help + Say Y here if you have an Accent Async serial board. + + To compile this driver as a module, choose M here: the module + will be called 8250_accent. + +config SERIAL_8250_ASPEED_VUART + tristate "Aspeed Virtual UART" + depends on SERIAL_8250 + depends on OF + depends on MFD_SYSCON + depends on ARCH_ASPEED || COMPILE_TEST + select REGMAP + help + If you want to use the virtual UART (VUART) device on Aspeed + BMC platforms, enable this option. This enables the 16550A- + compatible device on the local LPC bus, giving a UART device + with no physical RS232 connections. + +config SERIAL_8250_BOCA + tristate "Support Boca cards" + depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS + help + Say Y here if you have a Boca serial board. Please read the Boca + mini-HOWTO, available from + + To compile this driver as a module, choose M here: the module + will be called 8250_boca. + +config SERIAL_8250_EXAR_ST16C554 + tristate "Support Exar ST16C554/554D Quad UART" + depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS + help + The Uplogix Envoy TU301 uses this Exar Quad UART. If you are + tinkering with your Envoy TU301, or have a machine with this UART, + say Y here. + + To compile this driver as a module, choose M here: the module + will be called 8250_exar_st16c554. + +config SERIAL_8250_HUB6 + tristate "Support Hub6 cards" + depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS + help + Say Y here if you have a HUB6 serial board. + + To compile this driver as a module, choose M here: the module + will be called 8250_hub6. + +# +# Misc. options/drivers. +# + +config SERIAL_8250_SHARE_IRQ + bool "Support for sharing serial interrupts" + depends on SERIAL_8250_EXTENDED + help + Some serial boards have hardware support which allows multiple dumb + serial ports on the same board to share a single IRQ. To enable + support for this in the serial driver, say Y here. + +config SERIAL_8250_DETECT_IRQ + bool "Autodetect IRQ on standard ports (unsafe)" + depends on SERIAL_8250_EXTENDED + help + Say Y here if you want the kernel to try to guess which IRQ + to use for your serial port. + + This is considered unsafe; it is far better to configure the IRQ in + a boot script using the setserial command. + + If unsure, say N. + +config SERIAL_8250_RSA + bool "Support RSA serial ports" + depends on SERIAL_8250_EXTENDED + help + Say Y here if you have a IODATA RSA-DV II/S ISA card and + would like to use its >115kbps speeds. + You will need to provide module parameter "probe_rsa", or boot-time + parameter 8250.probe_rsa with I/O addresses of this card then. + + If you don't have such card, or if unsure, say N. + +config SERIAL_8250_DWLIB + bool + +config SERIAL_8250_ACORN + tristate "Acorn expansion card serial port support" + depends on ARCH_ACORN && SERIAL_8250 + help + If you have an Atomwide Serial card or Serial Port card for an Acorn + system, say Y to this option. The driver can handle 1, 2, or 3 port + cards. If unsure, say N. + +config SERIAL_8250_BCM2835AUX + tristate "BCM2835 auxiliar mini UART support" + depends on ARCH_BCM2835 || COMPILE_TEST + depends on SERIAL_8250 && SERIAL_8250_SHARE_IRQ + help + Support for the BCM2835 auxiliar mini UART. + + Features and limitations of the UART are + Registers are similar to 16650 registers, + set bits in the control registers that are unsupported + are ignored and read back as 0 + 7/8 bit operation with 1 start and 1 stop bit + 8 symbols deep fifo for rx and tx + SW controlled RTS and SW readable CTS + Clock rate derived from system clock + Uses 8 times oversampling (compared to 16 times for 16650) + Missing break detection (but break generation) + Missing framing error detection + Missing parity bit + Missing receive time-out interrupt + Missing DCD, DSR, DTR and RI signals + + If unsure, say N. + +config SERIAL_8250_FSL + bool "Freescale 16550 UART support" if COMPILE_TEST && !(PPC || ARM || ARM64) + depends on SERIAL_8250_CONSOLE + default PPC || ARM || ARM64 + help + Selecting this option enables a workaround for a break-detection + erratum for Freescale 16550 UARTs in the 8250 driver. It also + enables support for ACPI enumeration. + +config SERIAL_8250_DW + tristate "Support for Synopsys DesignWare 8250 quirks" + depends on SERIAL_8250 + select SERIAL_8250_DWLIB + help + Selecting this option will enable handling of the extra features + present in the Synopsys DesignWare APB UART. + +config SERIAL_8250_EM + tristate "Support for Emma Mobile integrated serial port" + depends on SERIAL_8250 && HAVE_CLK + depends on ARCH_RENESAS || COMPILE_TEST + help + Selecting this option will add support for the integrated serial + port hardware found on the Emma Mobile line of processors. + If unsure, say N. + +config SERIAL_8250_IOC3 + tristate "SGI IOC3 8250 UART support" + depends on SERIAL_8250 + depends on SGI_MFD_IOC3 || COMPILE_TEST + select SERIAL_8250_EXTENDED + select SERIAL_8250_SHARE_IRQ + help + Enable this if you have a SGI Origin or Octane machine. This module + provides basic serial support by directly driving the UART chip + behind the IOC3 device on those systems. Maximum baud speed is + 38400bps using this driver. + +config SERIAL_8250_RT288X + bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support" + depends on SERIAL_8250 + default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620 + help + Selecting this option will add support for the alternate register + layout used by Ralink RT288x/RT305x, Alchemy Au1xxx, and some others. + If unsure, say N. + +config SERIAL_8250_OMAP + tristate "Support for OMAP internal UART (8250 based driver)" + depends on SERIAL_8250 + depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST + help + If you have a machine based on an Texas Instruments OMAP CPU you + can enable its onboard serial ports by enabling this option. + + This driver uses ttyS instead of ttyO. + +config SERIAL_8250_OMAP_TTYO_FIXUP + bool "Replace ttyO with ttyS" + depends on SERIAL_8250_OMAP=y && SERIAL_8250_CONSOLE + default y + help + This option replaces the "console=ttyO" argument with the matching + ttyS argument if the user did not specified it on the command line. + This ensures that the user can see the kernel output during boot + which he wouldn't see otherwise. The getty has still to be configured + for ttyS instead of ttyO regardless of this option. + This option is intended for people who "automatically" enable this + driver without knowing that this driver requires a different console= + argument. If you read this, please keep this option disabled and + instead update your kernel command line. If you prepare a kernel for a + distribution or other kind of larger user base then you probably want + to keep this option enabled. Otherwise people might complain about a + not booting kernel because the serial console remains silent in case + they forgot to update the command line. + +config SERIAL_8250_LPC18XX + tristate "NXP LPC18xx/43xx serial port support" + depends on SERIAL_8250 && OF && (ARCH_LPC18XX || COMPILE_TEST) + default ARCH_LPC18XX + help + If you have a LPC18xx/43xx based board and want to use the + serial port, say Y to this option. If unsure, say Y. + +config SERIAL_8250_MT6577 + tristate "Mediatek serial port support" + depends on SERIAL_8250 + depends on ARCH_MEDIATEK || COMPILE_TEST + help + If you have a Mediatek based board and want to use the + serial port, say Y to this option. If unsure, say N. + +config SERIAL_8250_UNIPHIER + tristate "Support for UniPhier on-chip UART" + depends on SERIAL_8250 + depends on ARCH_UNIPHIER || COMPILE_TEST + help + If you have a UniPhier based board and want to use the on-chip + serial ports, say Y to this option. If unsure, say N. + +config SERIAL_8250_INGENIC + tristate "Support for Ingenic SoC serial ports" + depends on SERIAL_8250 + depends on OF_FLATTREE + depends on MIPS || COMPILE_TEST + help + If you have a system using an Ingenic SoC and wish to make use of + its UARTs, say Y to this option. If unsure, say N. + +config SERIAL_8250_LPSS + tristate "Support for serial ports on Intel LPSS platforms" + default SERIAL_8250 + depends on SERIAL_8250 && PCI + depends on X86 || COMPILE_TEST + select SERIAL_8250_DWLIB + select DW_DMAC_CORE if SERIAL_8250_DMA + select DW_DMAC_PCI if (SERIAL_8250_DMA && X86_INTEL_LPSS) + select RATIONAL + help + Selecting this option will enable handling of the UART found on + various Intel platforms such as: + - Intel Baytrail SoC + - Intel Braswell SoC + - Intel Quark X1000 SoC + that are not covered by the more generic SERIAL_8250_PCI option. + +config SERIAL_8250_MID + tristate "Support for serial ports on Intel MID platforms" + default SERIAL_8250 + depends on SERIAL_8250 && PCI + depends on X86 || COMPILE_TEST + select HSU_DMA if SERIAL_8250_DMA + select HSU_DMA_PCI if (HSU_DMA && X86_INTEL_MID) + select RATIONAL + help + Selecting this option will enable handling of the UART found on + Intel Medfield SOC and various other Intel platforms that is not + covered by the more generic SERIAL_8250_PCI option. + +config SERIAL_8250_PERICOM + tristate "Support for Pericom and Acces I/O serial ports" + default SERIAL_8250 + depends on SERIAL_8250 && PCI + help + Selecting this option will enable handling of the Pericom and Acces + I/O UARTs that are not covered by the more generic SERIAL_8250_PCI + option. + +config SERIAL_8250_PXA + tristate "PXA serial port support" + depends on SERIAL_8250 + depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST + help + If you have a machine based on an Intel XScale PXA2xx CPU you can + enable its onboard serial ports by enabling this option. The option is + applicable to both devicetree and legacy boards, and early console is + part of its support. + +config SERIAL_8250_TEGRA + tristate "8250 support for Tegra serial ports" + default SERIAL_8250 + depends on SERIAL_8250 + depends on ARCH_TEGRA || COMPILE_TEST + help + Select this option if you have machine with an NVIDIA Tegra SoC and + wish to enable 8250 serial driver for the Tegra serial interfaces. + +config SERIAL_8250_BCM7271 + tristate "Broadcom 8250 based serial port" + depends on SERIAL_8250 && (ARCH_BRCMSTB || COMPILE_TEST) + default ARCH_BRCMSTB + help + If you have a Broadcom STB based board and want to use the + enhanced features of the Broadcom 8250 based serial port, + including DMA support and high accuracy BAUD rates, say + Y to this option. If unsure, say N. + +config SERIAL_OF_PLATFORM + tristate "Devicetree based probing for 8250 ports" + depends on SERIAL_8250 && OF + help + This option is used for all 8250 compatible serial ports that + are probed through devicetree, including Open Firmware based + PowerPC systems and embedded systems on architectures using the + flattened device tree format. diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile new file mode 100644 index 000000000..1615bfdde --- /dev/null +++ b/drivers/tty/serial/8250/Makefile @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the 8250 serial device drivers. +# + +obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o +8250-y := 8250_core.o +8250-$(CONFIG_ALPHA_GENERIC) += 8250_alpha.o +8250-$(CONFIG_ALPHA_JENSEN) += 8250_alpha.o +8250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o +8250_base-y := 8250_port.o +8250_base-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o +8250_base-$(CONFIG_SERIAL_8250_DWLIB) += 8250_dwlib.o +8250_base-$(CONFIG_SERIAL_8250_FINTEK) += 8250_fintek.o +obj-$(CONFIG_SERIAL_8250_PARISC) += 8250_parisc.o +obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o +obj-$(CONFIG_SERIAL_8250_EXAR) += 8250_exar.o +obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o +obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o +obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o +obj-$(CONFIG_SERIAL_8250_ASPEED_VUART) += 8250_aspeed_vuart.o +obj-$(CONFIG_SERIAL_8250_BCM2835AUX) += 8250_bcm2835aux.o +obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o +obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o +obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o +obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o +obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o +obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o +obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o +obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o +obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o +obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o +obj-$(CONFIG_SERIAL_8250_IOC3) += 8250_ioc3.o +obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o +obj-$(CONFIG_SERIAL_8250_LPC18XX) += 8250_lpc18xx.o +obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o +obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o +obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o +obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o +obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o +obj-$(CONFIG_SERIAL_8250_PERICOM) += 8250_pericom.o +obj-$(CONFIG_SERIAL_8250_PXA) += 8250_pxa.o +obj-$(CONFIG_SERIAL_8250_TEGRA) += 8250_tegra.o +obj-$(CONFIG_SERIAL_8250_BCM7271) += 8250_bcm7271.o +obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o + +CFLAGS_8250_ingenic.o += -I$(srctree)/scripts/dtc/libfdt diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c new file mode 100644 index 000000000..dc2ef05a1 --- /dev/null +++ b/drivers/tty/serial/8250/serial_cs.c @@ -0,0 +1,873 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) +/*====================================================================== + + A driver for PCMCIA serial devices + + serial_cs.c 1.134 2002/05/04 05:48:53 + + The contents of this file are subject to the Mozilla Public + License Version 1.1 (the "License"); you may not use this file + except in compliance with the License. You may obtain a copy of + the License at http://www.mozilla.org/MPL/ + + Software distributed under the License is distributed on an "AS + IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + implied. See the License for the specific language governing + rights and limitations under the License. + + The initial developer of the original code is David A. Hinds + . Portions created by David A. Hinds + are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + + Alternatively, the contents of this file may be used under the + terms of the GNU General Public License version 2 (the "GPL"), in which + case the provisions of the GPL are applicable instead of the + above. If you wish to allow the use of your version of this file + only under the terms of the GPL and not to allow others to use + your version of this file under the MPL, indicate your decision + by deleting the provisions above and replace them with the notice + and other provisions required by the GPL. If you do not delete + the provisions above, a recipient may use your version of this + file under either the MPL or the GPL. + +======================================================================*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "8250.h" + + +/*====================================================================*/ + +/* Parameters that can be set with 'insmod' */ + +/* Enable the speaker? */ +static int do_sound = 1; +/* Skip strict UART tests? */ +static int buggy_uart; + +module_param(do_sound, int, 0444); +module_param(buggy_uart, int, 0444); + +/*====================================================================*/ + +/* Table of multi-port card ID's */ + +struct serial_quirk { + unsigned int manfid; + unsigned int prodid; + int multi; /* 1 = multifunction, > 1 = # ports */ + void (*config)(struct pcmcia_device *); + void (*setup)(struct pcmcia_device *, struct uart_8250_port *); + void (*wakeup)(struct pcmcia_device *); + int (*post)(struct pcmcia_device *); +}; + +struct serial_info { + struct pcmcia_device *p_dev; + int ndev; + int multi; + int slave; + int manfid; + int prodid; + int c950ctrl; + int line[4]; + const struct serial_quirk *quirk; +}; + +struct serial_cfg_mem { + tuple_t tuple; + cisparse_t parse; + u_char buf[256]; +}; + +/* + * vers_1 5.0, "Brain Boxes", "2-Port RS232 card", "r6" + * manfid 0x0160, 0x0104 + * This card appears to have a 14.7456MHz clock. + */ +/* Generic Modem: MD55x (GPRS/EDGE) have + * Elan VPU16551 UART with 14.7456MHz oscillator + * manfid 0x015D, 0x4C45 + */ +static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_8250_port *uart) +{ + uart->port.uartclk = 14745600; +} + +static int quirk_post_ibm(struct pcmcia_device *link) +{ + u8 val; + int ret; + + ret = pcmcia_read_config_byte(link, 0x800, &val); + if (ret) + goto failed; + + ret = pcmcia_write_config_byte(link, 0x800, val | 1); + if (ret) + goto failed; + return 0; + + failed: + return -ENODEV; +} + +/* + * Nokia cards are not really multiport cards. Shouldn't this + * be handled by setting the quirk entry .multi = 0 | 1 ? + */ +static void quirk_config_nokia(struct pcmcia_device *link) +{ + struct serial_info *info = link->priv; + + if (info->multi > 1) + info->multi = 1; +} + +static void quirk_wakeup_oxsemi(struct pcmcia_device *link) +{ + struct serial_info *info = link->priv; + + if (info->c950ctrl) + outb(12, info->c950ctrl + 1); +} + +/* request_region? oxsemi branch does no request_region too... */ +/* + * This sequence is needed to properly initialize MC45 attached to OXCF950. + * I tried decreasing these msleep()s, but it worked properly (survived + * 1000 stop/start operations) with these timeouts (or bigger). + */ +static void quirk_wakeup_possio_gcc(struct pcmcia_device *link) +{ + struct serial_info *info = link->priv; + unsigned int ctrl = info->c950ctrl; + + outb(0xA, ctrl + 1); + msleep(100); + outb(0xE, ctrl + 1); + msleep(300); + outb(0xC, ctrl + 1); + msleep(100); + outb(0xE, ctrl + 1); + msleep(200); + outb(0xF, ctrl + 1); + msleep(100); + outb(0xE, ctrl + 1); + msleep(100); + outb(0xC, ctrl + 1); +} + +/* + * Socket Dual IO: this enables irq's for second port + */ +static void quirk_config_socket(struct pcmcia_device *link) +{ + struct serial_info *info = link->priv; + + if (info->multi) + link->config_flags |= CONF_ENABLE_ESR; +} + +static const struct serial_quirk quirks[] = { + { + .manfid = 0x0160, + .prodid = 0x0104, + .multi = -1, + .setup = quirk_setup_brainboxes_0104, + }, { + .manfid = 0x015D, + .prodid = 0x4C45, + .multi = -1, + .setup = quirk_setup_brainboxes_0104, + }, { + .manfid = MANFID_IBM, + .prodid = ~0, + .multi = -1, + .post = quirk_post_ibm, + }, { + .manfid = MANFID_INTEL, + .prodid = PRODID_INTEL_DUAL_RS232, + .multi = 2, + }, { + .manfid = MANFID_NATINST, + .prodid = PRODID_NATINST_QUAD_RS232, + .multi = 4, + }, { + .manfid = MANFID_NOKIA, + .prodid = ~0, + .multi = -1, + .config = quirk_config_nokia, + }, { + .manfid = MANFID_OMEGA, + .prodid = PRODID_OMEGA_QSP_100, + .multi = 4, + }, { + .manfid = MANFID_OXSEMI, + .prodid = ~0, + .multi = -1, + .wakeup = quirk_wakeup_oxsemi, + }, { + .manfid = MANFID_POSSIO, + .prodid = PRODID_POSSIO_GCC, + .multi = -1, + .wakeup = quirk_wakeup_possio_gcc, + }, { + .manfid = MANFID_QUATECH, + .prodid = PRODID_QUATECH_DUAL_RS232, + .multi = 2, + }, { + .manfid = MANFID_QUATECH, + .prodid = PRODID_QUATECH_DUAL_RS232_D1, + .multi = 2, + }, { + .manfid = MANFID_QUATECH, + .prodid = PRODID_QUATECH_DUAL_RS232_G, + .multi = 2, + }, { + .manfid = MANFID_QUATECH, + .prodid = PRODID_QUATECH_QUAD_RS232, + .multi = 4, + }, { + .manfid = MANFID_SOCKET, + .prodid = PRODID_SOCKET_DUAL_RS232, + .multi = 2, + .config = quirk_config_socket, + }, { + .manfid = MANFID_SOCKET, + .prodid = ~0, + .multi = -1, + .config = quirk_config_socket, + } +}; + + +static int serial_config(struct pcmcia_device *link); + + +static void serial_remove(struct pcmcia_device *link) +{ + struct serial_info *info = link->priv; + int i; + + dev_dbg(&link->dev, "serial_release\n"); + + /* + * Recheck to see if the device is still configured. + */ + for (i = 0; i < info->ndev; i++) + serial8250_unregister_port(info->line[i]); + + if (!info->slave) + pcmcia_disable_device(link); +} + +static int serial_suspend(struct pcmcia_device *link) +{ + struct serial_info *info = link->priv; + int i; + + for (i = 0; i < info->ndev; i++) + serial8250_suspend_port(info->line[i]); + + return 0; +} + +static int serial_resume(struct pcmcia_device *link) +{ + struct serial_info *info = link->priv; + int i; + + for (i = 0; i < info->ndev; i++) + serial8250_resume_port(info->line[i]); + + if (info->quirk && info->quirk->wakeup) + info->quirk->wakeup(link); + + return 0; +} + +static int serial_probe(struct pcmcia_device *link) +{ + struct serial_info *info; + int ret; + + dev_dbg(&link->dev, "serial_attach()\n"); + + /* Create new serial device */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + info->p_dev = link; + link->priv = info; + + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + if (do_sound) + link->config_flags |= CONF_ENABLE_SPKR; + + ret = serial_config(link); + if (ret) + goto free_info; + + return 0; + +free_info: + kfree(info); + return ret; +} + +static void serial_detach(struct pcmcia_device *link) +{ + struct serial_info *info = link->priv; + + dev_dbg(&link->dev, "serial_detach\n"); + + /* + * Ensure that the ports have been released. + */ + serial_remove(link); + + /* free bits */ + kfree(info); +} + +/*====================================================================*/ + +static int setup_serial(struct pcmcia_device *handle, struct serial_info *info, + unsigned int iobase, int irq) +{ + struct uart_8250_port uart; + int line; + + memset(&uart, 0, sizeof(uart)); + uart.port.iobase = iobase; + uart.port.irq = irq; + uart.port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; + uart.port.uartclk = 1843200; + uart.port.dev = &handle->dev; + if (buggy_uart) + uart.port.flags |= UPF_BUGGY_UART; + + if (info->quirk && info->quirk->setup) + info->quirk->setup(handle, &uart); + + line = serial8250_register_8250_port(&uart); + if (line < 0) { + pr_err("serial_cs: serial8250_register_8250_port() at 0x%04lx, irq %d failed\n", + (unsigned long)iobase, irq); + return -EINVAL; + } + + info->line[info->ndev] = line; + info->ndev++; + + return 0; +} + +/*====================================================================*/ + +static int pfc_config(struct pcmcia_device *p_dev) +{ + unsigned int port = 0; + struct serial_info *info = p_dev->priv; + + if ((p_dev->resource[1]->end != 0) && + (resource_size(p_dev->resource[1]) == 8)) { + port = p_dev->resource[1]->start; + info->slave = 1; + } else if ((info->manfid == MANFID_OSITECH) && + (resource_size(p_dev->resource[0]) == 0x40)) { + port = p_dev->resource[0]->start + 0x28; + info->slave = 1; + } + if (info->slave) + return setup_serial(p_dev, info, port, p_dev->irq); + + dev_warn(&p_dev->dev, "no usable port range found, giving up\n"); + return -ENODEV; +} + +static int simple_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + static const int size_table[2] = { 8, 16 }; + int *try = priv_data; + + if (p_dev->resource[0]->start == 0) + return -ENODEV; + + if ((*try & 0x1) == 0) + p_dev->io_lines = 16; + + if (p_dev->resource[0]->end != size_table[(*try >> 1)]) + return -ENODEV; + + p_dev->resource[0]->end = 8; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + + return pcmcia_request_io(p_dev); +} + +static int simple_config_check_notpicky(struct pcmcia_device *p_dev, + void *priv_data) +{ + static const unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; + int j; + + if (p_dev->io_lines > 3) + return -ENODEV; + + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[0]->end = 8; + + for (j = 0; j < 5; j++) { + p_dev->resource[0]->start = base[j]; + p_dev->io_lines = base[j] ? 16 : 3; + if (!pcmcia_request_io(p_dev)) + return 0; + } + return -ENODEV; +} + +static int simple_config(struct pcmcia_device *link) +{ + struct serial_info *info = link->priv; + int ret, try; + + /* + * First pass: look for a config entry that looks normal. + * Two tries: without IO aliases, then with aliases. + */ + link->config_flags |= CONF_AUTO_SET_VPP; + for (try = 0; try < 4; try++) + if (!pcmcia_loop_config(link, simple_config_check, &try)) + goto found_port; + + /* + * Second pass: try to find an entry that isn't picky about + * its base address, then try to grab any standard serial port + * address, and finally try to get any free port. + */ + ret = pcmcia_loop_config(link, simple_config_check_notpicky, NULL); + if (ret) { + dev_warn(&link->dev, "no usable port range found, giving up\n"); + return ret; + } + +found_port: + if (info->multi && (info->manfid == MANFID_3COM)) + link->config_index &= ~(0x08); + + /* + * Apply any configuration quirks. + */ + if (info->quirk && info->quirk->config) + info->quirk->config(link); + + ret = pcmcia_enable_device(link); + if (ret != 0) + return ret; + return setup_serial(link, info, link->resource[0]->start, link->irq); +} + +static int multi_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + int *multi = priv_data; + + if (p_dev->resource[1]->end) + return -EINVAL; + + /* + * The quad port cards have bad CIS's, so just look for a + * window larger than 8 ports and assume it will be right. + */ + if (p_dev->resource[0]->end <= 8) + return -EINVAL; + + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[0]->end = *multi * 8; + + if (pcmcia_request_io(p_dev)) + return -ENODEV; + return 0; +} + +static int multi_config_check_notpicky(struct pcmcia_device *p_dev, + void *priv_data) +{ + int *base2 = priv_data; + + if (!p_dev->resource[0]->end || !p_dev->resource[1]->end || + p_dev->resource[0]->start + 8 != p_dev->resource[1]->start) + return -ENODEV; + + p_dev->resource[0]->end = p_dev->resource[1]->end = 8; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + + if (pcmcia_request_io(p_dev)) + return -ENODEV; + + *base2 = p_dev->resource[0]->start + 8; + return 0; +} + +static int multi_config(struct pcmcia_device *link) +{ + struct serial_info *info = link->priv; + int i, base2 = 0; + + /* First, look for a generic full-sized window */ + if (!pcmcia_loop_config(link, multi_config_check, &info->multi)) + base2 = link->resource[0]->start + 8; + else { + /* If that didn't work, look for two windows */ + info->multi = 2; + if (pcmcia_loop_config(link, multi_config_check_notpicky, + &base2)) { + dev_warn(&link->dev, + "no usable port range found, giving up\n"); + return -ENODEV; + } + } + + if (!link->irq) + dev_warn(&link->dev, "no usable IRQ found, continuing...\n"); + + /* + * Apply any configuration quirks. + */ + if (info->quirk && info->quirk->config) + info->quirk->config(link); + + i = pcmcia_enable_device(link); + if (i != 0) + return -ENODEV; + + /* The Oxford Semiconductor OXCF950 cards are in fact single-port: + * 8 registers are for the UART, the others are extra registers. + * Siemen's MC45 PCMCIA (Possio's GCC) is OXCF950 based too. + */ + if (info->manfid == MANFID_OXSEMI || (info->manfid == MANFID_POSSIO && + info->prodid == PRODID_POSSIO_GCC)) { + if (link->config_index == 1 || + link->config_index == 3) { + setup_serial(link, info, base2, link->irq); + base2 = link->resource[0]->start; + } else { + setup_serial(link, info, link->resource[0]->start, + link->irq); + } + info->c950ctrl = base2; + + /* + * FIXME: We really should wake up the port prior to + * handing it over to the serial layer. + */ + if (info->quirk && info->quirk->wakeup) + info->quirk->wakeup(link); + + return 0; + } + + setup_serial(link, info, link->resource[0]->start, link->irq); + for (i = 0; i < info->multi - 1; i++) + setup_serial(link, info, base2 + (8 * i), + link->irq); + return 0; +} + +static int serial_check_for_multi(struct pcmcia_device *p_dev, void *priv_data) +{ + struct serial_info *info = p_dev->priv; + + if (!p_dev->resource[0]->end) + return -EINVAL; + + if ((!p_dev->resource[1]->end) && (p_dev->resource[0]->end % 8 == 0)) + info->multi = p_dev->resource[0]->end >> 3; + + if ((p_dev->resource[1]->end) && (p_dev->resource[0]->end == 8) + && (p_dev->resource[1]->end == 8)) + info->multi = 2; + + return 0; /* break */ +} + + +static int serial_config(struct pcmcia_device *link) +{ + struct serial_info *info = link->priv; + int i; + + dev_dbg(&link->dev, "serial_config\n"); + + /* Is this a compliant multifunction card? */ + info->multi = (link->socket->functions > 1); + + /* Is this a multiport card? */ + info->manfid = link->manf_id; + info->prodid = link->card_id; + + for (i = 0; i < ARRAY_SIZE(quirks); i++) + if ((quirks[i].manfid == ~0 || + quirks[i].manfid == info->manfid) && + (quirks[i].prodid == ~0 || + quirks[i].prodid == info->prodid)) { + info->quirk = &quirks[i]; + break; + } + + /* + * Another check for dual-serial cards: look for either serial or + * multifunction cards that ask for appropriate IO port ranges. + */ + if ((info->multi == 0) && + (link->has_func_id) && + (link->socket->pcmcia_pfc == 0) && + ((link->func_id == CISTPL_FUNCID_MULTI) || + (link->func_id == CISTPL_FUNCID_SERIAL))) { + if (pcmcia_loop_config(link, serial_check_for_multi, info)) + goto failed; + } + + /* + * Apply any multi-port quirk. + */ + if (info->quirk && info->quirk->multi != -1) + info->multi = info->quirk->multi; + + dev_info(&link->dev, + "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n", + link->manf_id, link->card_id, + link->socket->pcmcia_pfc, info->multi, info->quirk); + if (link->socket->pcmcia_pfc) + i = pfc_config(link); + else if (info->multi > 1) + i = multi_config(link); + else + i = simple_config(link); + + if (i || info->ndev == 0) + goto failed; + + /* + * Apply any post-init quirk. FIXME: This should really happen + * before we register the port, since it might already be in use. + */ + if (info->quirk && info->quirk->post) + if (info->quirk->post(link)) + goto failed; + + return 0; + +failed: + dev_warn(&link->dev, "failed to initialize\n"); + serial_remove(link); + return -ENODEV; +} + +static const struct pcmcia_device_id serial_ids[] = { + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0057, 0x0021), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0089, 0x110a), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0104, 0x000a), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0105, 0x0d0a), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0105, 0x0e0a), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0105, 0xea15), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0109, 0x0501), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0138, 0x110a), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0140, 0x000a), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0143, 0x3341), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0143, 0xc0ab), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x016c, 0x0081), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x021b, 0x0101), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x08a1, 0xc0ab), + PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "CC/XJEM3288", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x04cd2988, 0x46a52d63), + PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "CC/XJEM3336", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x0143b773, 0x46a52d63), + PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "EM1144T", "PCMCIA MODEM", 0xf510db04, 0x856d66c8, 0xbd6c43ef), + PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "XJEM1144/CCEM1144", "PCMCIA MODEM", 0xf510db04, 0x52d21e1e, 0xbd6c43ef), + PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "CEM28", 0x2e3ee845, 0x0ea978ea), + PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "CEM33", 0x2e3ee845, 0x80609023), + PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "CEM56", 0x2e3ee845, 0xa650c32a), + PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29), + PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "NEC", "PK-UG-J001", 0x18df0ba0, 0x831b1064), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), + PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), + PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), + PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0104, 0x0070), + PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x016c, 0x0020), + PCMCIA_MFC_DEVICE_PROD_ID123(1, "APEX DATA", "MULTICARD", "ETHERNET-MODEM", 0x11c2da09, 0x7289dc5d, 0xaad95e1f), + PCMCIA_MFC_DEVICE_PROD_ID12(1, "IBM", "Home and Away 28.8 PC Card ", 0xb569a6e5, 0x5bd4ff2c), + PCMCIA_MFC_DEVICE_PROD_ID12(1, "IBM", "Home and Away Credit Card Adapter", 0xb569a6e5, 0x4bdf15c3), + PCMCIA_MFC_DEVICE_PROD_ID12(1, "IBM", "w95 Home and Away Credit Card ", 0xb569a6e5, 0xae911c15), + PCMCIA_MFC_DEVICE_PROD_ID1(1, "Motorola MARQUIS", 0xf03e4e77), + PCMCIA_MFC_DEVICE_PROD_ID2(1, "FAX/Modem/Ethernet Combo Card ", 0x1ed59302), + PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0301), + PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x0276), + PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0039), + PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0006), + PCMCIA_DEVICE_MANF_CARD(0x0105, 0x0101), /* TDK DF2814 */ + PCMCIA_DEVICE_MANF_CARD(0x0105, 0x100a), /* Xircom CM-56G */ + PCMCIA_DEVICE_MANF_CARD(0x0105, 0x3e0a), /* TDK DF5660 */ + PCMCIA_DEVICE_MANF_CARD(0x0105, 0x410a), + PCMCIA_DEVICE_MANF_CARD(0x0107, 0x0002), /* USRobotics 14,400 */ + PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d50), + PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d51), + PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d52), + PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d53), + PCMCIA_DEVICE_MANF_CARD(0x010b, 0xd180), + PCMCIA_DEVICE_MANF_CARD(0x0115, 0x3330), /* USRobotics/SUN 14,400 */ + PCMCIA_DEVICE_MANF_CARD(0x0124, 0x0100), /* Nokia DTP-2 ver II */ + PCMCIA_DEVICE_MANF_CARD(0x0134, 0x5600), /* LASAT COMMUNICATIONS A/S */ + PCMCIA_DEVICE_MANF_CARD(0x0137, 0x000e), + PCMCIA_DEVICE_MANF_CARD(0x0137, 0x001b), + PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0025), + PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0045), + PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0052), + PCMCIA_DEVICE_MANF_CARD(0x016c, 0x0006), /* Psion 56K+Fax */ + PCMCIA_DEVICE_MANF_CARD(0x0200, 0x0001), /* MultiMobile */ + PCMCIA_DEVICE_PROD_ID134("ADV", "TECH", "COMpad-32/85", 0x67459937, 0x916d02ba, 0x8fbe92ae), + PCMCIA_DEVICE_PROD_ID124("GATEWAY2000", "CC3144", "PCMCIA MODEM", 0x506bccae, 0xcb3685f1, 0xbd6c43ef), + PCMCIA_DEVICE_PROD_ID14("MEGAHERTZ", "PCMCIA MODEM", 0xf510db04, 0xbd6c43ef), + PCMCIA_DEVICE_PROD_ID124("TOSHIBA", "T144PF", "PCMCIA MODEM", 0xb4585a1a, 0x7271409c, 0xbd6c43ef), + PCMCIA_DEVICE_PROD_ID123("FUJITSU", "FC14F ", "MBH10213", 0x6ee5a3d8, 0x30ead12b, 0xb00f05a0), + PCMCIA_DEVICE_PROD_ID123("Novatel Wireless", "Merlin UMTS Modem", "U630", 0x32607776, 0xd9e73b13, 0xe87332e), + PCMCIA_DEVICE_PROD_ID13("MEGAHERTZ", "V.34 PCMCIA MODEM", 0xf510db04, 0xbb2cce4a), + PCMCIA_DEVICE_PROD_ID12("Brain Boxes", "Bluetooth PC Card", 0xee138382, 0xd4ce9b02), + PCMCIA_DEVICE_PROD_ID12("CIRRUS LOGIC", "FAX MODEM", 0xe625f451, 0xcecd6dfa), + PCMCIA_DEVICE_PROD_ID12("COMPAQ", "PCMCIA 28800 FAX/DATA MODEM", 0xa3a3062c, 0x8cbd7c76), + PCMCIA_DEVICE_PROD_ID12("COMPAQ", "PCMCIA 33600 FAX/DATA MODEM", 0xa3a3062c, 0x5a00ce95), + PCMCIA_DEVICE_PROD_ID12("Computerboards, Inc.", "PCM-COM422", 0xd0b78f51, 0x7e2d49ed), + PCMCIA_DEVICE_PROD_ID12("Dr. Neuhaus", "FURY CARD 14K4", 0x76942813, 0x8b96ce65), + PCMCIA_DEVICE_PROD_ID12("IBM", "ISDN/56K/GSM", 0xb569a6e5, 0xfee5297b), + PCMCIA_DEVICE_PROD_ID12("Intelligent", "ANGIA FAX/MODEM", 0xb496e65e, 0xf31602a6), + PCMCIA_DEVICE_PROD_ID12("Intel", "MODEM 2400+", 0x816cc815, 0x412729fb), + PCMCIA_DEVICE_PROD_ID12("Intertex", "IX34-PCMCIA", 0xf8a097e3, 0x97880447), + PCMCIA_DEVICE_PROD_ID12("IOTech Inc ", "PCMCIA Dual RS-232 Serial Port Card", 0x3bd2d898, 0x92abc92f), + PCMCIA_DEVICE_PROD_ID12("MACRONIX", "FAX/MODEM", 0x668388b3, 0x3f9bdf2f), + PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT1432LT", 0x5f73be51, 0x0b3e2383), + PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT2834LT", 0x5f73be51, 0x4cd7c09e), + PCMCIA_DEVICE_PROD_ID12("OEM ", "C288MX ", 0xb572d360, 0xd2385b7a), + PCMCIA_DEVICE_PROD_ID12("Option International", "V34bis GSM/PSTN Data/Fax Modem", 0x9d7cd6f5, 0x5cb8bf41), + PCMCIA_DEVICE_PROD_ID12("Option International", "GSM-Ready 56K/ISDN", 0x9d7cd6f5, 0xb23844aa), + PCMCIA_DEVICE_PROD_ID12("PCMCIA ", "C336MX ", 0x99bcafe9, 0xaa25bcab), + PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "PCMCIA Dual RS-232 Serial Port Card", 0xc4420b35, 0x92abc92f), + PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "Dual RS-232 Serial Port PC Card", 0xc4420b35, 0x031a380d), + PCMCIA_DEVICE_PROD_ID12("Telia", "SurfinBird 560P/A+", 0xe2cdd5e, 0xc9314b38), + PCMCIA_DEVICE_PROD_ID1("Smart Serial Port", 0x2d8ce292), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "TOSHIBA", "Modem/LAN Card", 0xb4585a1a, 0x53f922f8, "cis/PCMLM28.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ + PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC860", 0xd85f6206, 0x698f93db, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC860 3G Network Adapter R1 */ + PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */ + PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ + PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ + PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), + PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"), + PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100 1.00.", 0x19ca78af, 0xf964f42b), + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100", 0x19ca78af, 0x71d98e83), + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL232 1.00.", 0x19ca78af, 0x69fb7490), + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL232", 0x19ca78af, 0xb6bc0235), + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.", "SERIAL CARD: CF232", 0x63f2e0bd, 0xb9e175d3), + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.", "SERIAL CARD: CF232-5", 0x63f2e0bd, 0xfce33442), + PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF232", 0x3beb8cf2, 0x171e7190), + PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF232-5", 0x3beb8cf2, 0x20da4262), + PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF428", 0x3beb8cf2, 0xea5dd57d), + PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF500", 0x3beb8cf2, 0xd77255fa), + PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: IC232", 0x3beb8cf2, 0x6a709903), + PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: SL232", 0x3beb8cf2, 0x18430676), + PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: XL232", 0x3beb8cf2, 0x6f933767), + PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: CF332", 0x3beb8cf2, 0x16dc1ba7), + PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: SL332", 0x3beb8cf2, 0x19816c41), + PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: SL385", 0x3beb8cf2, 0x64112029), + PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4), + PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial+Parallel Port: SP230", 0x3beb8cf2, 0xdb9e58bc), + PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: CF332", 0x3beb8cf2, 0x16dc1ba7), + PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: SL332", 0x3beb8cf2, 0x19816c41), + PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: SL385", 0x3beb8cf2, 0x64112029), + PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4), + PCMCIA_MFC_DEVICE_PROD_ID12(2, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4), + PCMCIA_MFC_DEVICE_PROD_ID12(3, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4), + PCMCIA_DEVICE_MANF_CARD(0x0279, 0x950b), + /* too generic */ + /* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */ + /* PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0160, 0x0002), */ + PCMCIA_DEVICE_FUNC_ID(2), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, serial_ids); + +MODULE_FIRMWARE("cis/PCMLM28.cis"); +MODULE_FIRMWARE("cis/DP83903.cis"); +MODULE_FIRMWARE("cis/3CCFEM556.cis"); +MODULE_FIRMWARE("cis/3CXEM556.cis"); +MODULE_FIRMWARE("cis/SW_8xx_SER.cis"); +MODULE_FIRMWARE("cis/SW_7xx_SER.cis"); +MODULE_FIRMWARE("cis/SW_555_SER.cis"); +MODULE_FIRMWARE("cis/MT5634ZLX.cis"); +MODULE_FIRMWARE("cis/COMpad2.cis"); +MODULE_FIRMWARE("cis/COMpad4.cis"); +MODULE_FIRMWARE("cis/RS-COM-2P.cis"); + +static struct pcmcia_driver serial_cs_driver = { + .owner = THIS_MODULE, + .name = "serial_cs", + .probe = serial_probe, + .remove = serial_detach, + .id_table = serial_ids, + .suspend = serial_suspend, + .resume = serial_resume, +}; +module_pcmcia_driver(serial_cs_driver); + +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig new file mode 100644 index 000000000..12f685168 --- /dev/null +++ b/drivers/tty/serial/Kconfig @@ -0,0 +1,1566 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Serial device configuration +# + +menu "Serial drivers" + depends on HAS_IOMEM + +config SERIAL_EARLYCON + bool + depends on SERIAL_CORE + help + Support for early consoles with the earlycon parameter. This enables + the console before standard serial driver is probed. The console is + enabled when early_param is processed. + +source "drivers/tty/serial/8250/Kconfig" + +comment "Non-8250 serial port support" + +config SERIAL_AMBA_PL010 + tristate "ARM AMBA PL010 serial port support" + depends on ARM_AMBA || COMPILE_TEST + select SERIAL_CORE + help + This selects the ARM(R) AMBA(R) PrimeCell PL010 UART. If you have + an Integrator/AP or Integrator/PP2 platform, or if you have a + Cirrus Logic EP93xx CPU, say Y or M here. + + If unsure, say N. + +config SERIAL_AMBA_PL010_CONSOLE + bool "Support for console on AMBA serial port" + depends on SERIAL_AMBA_PL010=y + select SERIAL_CORE_CONSOLE + help + Say Y here if you wish to use an AMBA PrimeCell UART as the system + console (the system console is the device which receives all kernel + messages and warnings and which allows logins in single user mode). + + Even if you say Y here, the currently visible framebuffer console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttyAM0". (Try "man bootparam" or see the documentation of + your boot loader (lilo or loadlin) about how to pass options to the + kernel at boot time.) + +config SERIAL_AMBA_PL011 + tristate "ARM AMBA PL011 serial port support" + depends on ARM_AMBA + select SERIAL_CORE + help + This selects the ARM(R) AMBA(R) PrimeCell PL011 UART. If you have + an Integrator/PP2, Integrator/CP or Versatile platform, say Y or M + here. + + If unsure, say N. + +config SERIAL_AMBA_PL011_CONSOLE + bool "Support for console on AMBA serial port" + depends on SERIAL_AMBA_PL011=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Say Y here if you wish to use an AMBA PrimeCell UART as the system + console (the system console is the device which receives all kernel + messages and warnings and which allows logins in single user mode). + + Even if you say Y here, the currently visible framebuffer console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttyAMA0". (Try "man bootparam" or see the documentation of + your boot loader (lilo or loadlin) about how to pass options to the + kernel at boot time.) + +config SERIAL_EARLYCON_ARM_SEMIHOST + bool "Early console using ARM semihosting" + depends on ARM64 || ARM + select SERIAL_CORE + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Support for early debug console using ARM semihosting. This enables + the console before standard serial driver is probed. This is enabled + with "earlycon=smh" on the kernel command line. The console is + enabled when early_param is processed. + +config SERIAL_EARLYCON_RISCV_SBI + bool "Early console using RISC-V SBI" + depends on RISCV_SBI_V01 + select SERIAL_CORE + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Support for early debug console using RISC-V SBI. This enables + the console before standard serial driver is probed. This is enabled + with "earlycon=sbi" on the kernel command line. The console is + enabled when early_param is processed. + +config SERIAL_SB1250_DUART + tristate "BCM1xxx on-chip DUART serial support" + depends on SIBYTE_SB1xxx_SOC=y + select SERIAL_CORE + default y + help + Support for the asynchronous serial interface (DUART) included in + the BCM1250 and derived System-On-a-Chip (SOC) devices. Note that + the letter D in DUART stands for "dual", which is how the device + is implemented. Depending on the SOC configuration there may be + one or more DUARTs available of which all are handled. + + If unsure, say Y. To compile this driver as a module, choose M here: + the module will be called sb1250-duart. + +config SERIAL_SB1250_DUART_CONSOLE + bool "Support for console on a BCM1xxx DUART serial port" + depends on SERIAL_SB1250_DUART=y + select SERIAL_CORE_CONSOLE + default y + help + If you say Y here, it will be possible to use a serial port as the + system console (the system console is the device which receives all + kernel messages and warnings and which allows logins in single user + mode). + + If unsure, say Y. + +config SERIAL_ATMEL + bool "AT91 on-chip serial port support" + depends on COMMON_CLK + depends on ARCH_AT91 || COMPILE_TEST + select SERIAL_CORE + select SERIAL_MCTRL_GPIO if GPIOLIB + select MFD_AT91_USART + help + This enables the driver for the on-chip UARTs of the Atmel + AT91 processors. + +config SERIAL_ATMEL_CONSOLE + bool "Support for console on AT91 serial port" + depends on SERIAL_ATMEL=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Say Y here if you wish to use an on-chip UART on a Atmel + AT91 processor as the system console (the system + console is the device which receives all kernel messages and + warnings and which allows logins in single user mode). + +config SERIAL_ATMEL_PDC + bool "Support DMA transfers on AT91 serial port" + depends on SERIAL_ATMEL + default y + help + Say Y here if you wish to use the PDC to do DMA transfers to + and from the Atmel AT91 serial port. In order to + actually use DMA transfers, make sure that the use_dma_tx + and use_dma_rx members in the atmel_uart_data struct is set + appropriately for each port. + + Note that break and error handling currently doesn't work + properly when DMA is enabled. Make sure that ports where + this matters don't use DMA. + +config SERIAL_ATMEL_TTYAT + bool "Install as device ttyATn instead of ttySn" + depends on SERIAL_ATMEL=y + help + Say Y here if you wish to have the internal AT91 UARTs + appear as /dev/ttyATn (major 204, minor starting at 154) + instead of the normal /dev/ttySn (major 4, minor starting at + 64). This is necessary if you also want other UARTs, such as + external 8250/16C550 compatible UARTs. + The ttySn nodes are legally reserved for the 8250 serial driver + but are often misused by other serial drivers. + + To use this, you should create suitable ttyATn device nodes in + /dev/, and pass "console=ttyATn" to the kernel. + + Say Y if you have an external 8250/16C550 UART. If unsure, say N. + +config SERIAL_KGDB_NMI + bool "Serial console over KGDB NMI debugger port" + depends on KGDB_SERIAL_CONSOLE + help + This special driver allows you to temporary use NMI debugger port + as a normal console (assuming that the port is attached to KGDB). + + Unlike KDB's disable_nmi command, with this driver you are always + able to go back to the debugger using KGDB escape sequence ($3#33). + This is because this console driver processes the input in NMI + context, and thus is able to intercept the magic sequence. + + Note that since the console interprets input and uses polling + communication methods, for things like PPP you still must fully + detach debugger port from the KGDB NMI (i.e. disable_nmi), and + use raw console. + + If unsure, say N. + +config SERIAL_MESON + tristate "Meson serial port support" + depends on ARCH_MESON || COMPILE_TEST + select SERIAL_CORE + help + This enables the driver for the on-chip UARTs of the Amlogic + MesonX processors. + +config SERIAL_MESON_CONSOLE + bool "Support for console on meson" + depends on SERIAL_MESON + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Say Y here if you wish to use a Amlogic MesonX UART as the + system console (the system console is the device which + receives all kernel messages and warnings and which allows + logins in single user mode) as /dev/ttyAMLx. + +config SERIAL_CLPS711X + tristate "CLPS711X serial port support" + depends on ARCH_CLPS711X || COMPILE_TEST + select SERIAL_CORE + select SERIAL_MCTRL_GPIO if GPIOLIB + help + This enables the driver for the on-chip UARTs of the Cirrus + Logic EP711x/EP721x/EP731x processors. + +config SERIAL_CLPS711X_CONSOLE + bool "Support for console on CLPS711X serial port" + depends on SERIAL_CLPS711X=y + select SERIAL_CORE_CONSOLE + help + Even if you say Y here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttyCL1". + +config SERIAL_SAMSUNG + tristate "Samsung SoC serial support" + depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_APPLE || ARCH_ARTPEC || COMPILE_TEST + select SERIAL_CORE + help + Support for the on-chip UARTs on the Samsung + S3C24xx/S3C64xx/S5Pv210/Exynos and Apple M1 SoCs, providing + /dev/ttySAC0, 1 and 2 (note, some machines may not provide all of + these ports, depending on how the serial port pins are configured. + Choose Y/M here only if you build for such SoC. + +config SERIAL_SAMSUNG_UARTS_4 + bool + depends on SERIAL_SAMSUNG + default y if !(CPU_S3C2410 || CPU_S3C2412 || CPU_S3C2440 || CPU_S3C2442) + help + Internal node for the common case of 4 Samsung compatible UARTs + +config SERIAL_SAMSUNG_UARTS + int + depends on SERIAL_SAMSUNG + default 4 if SERIAL_SAMSUNG_UARTS_4 || CPU_S3C2416 + default 3 + help + Select the number of available UART ports for the Samsung S3C + serial driver + +config SERIAL_SAMSUNG_CONSOLE + bool "Support for console on Samsung SoC serial port" + depends on SERIAL_SAMSUNG + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Allow selection of the S3C24XX on-board serial ports for use as + an virtual console. + + Even if you say Y here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttySACx". (Try "man bootparam" or see the documentation of + your boot loader about how to pass options to the kernel at + boot time.) + +config SERIAL_TEGRA + tristate "NVIDIA Tegra20/30 SoC serial controller" + depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST + select SERIAL_CORE + help + Support for the on-chip UARTs on the NVIDIA Tegra series SOCs + providing /dev/ttyTHS0, 1, 2, 3 and 4 (note, some machines may not + provide all of these ports, depending on how the serial port + are enabled). This driver uses the APB DMA to achieve higher baudrate + and better performance. + +config SERIAL_TEGRA_TCU + tristate "NVIDIA Tegra Combined UART" + depends on MAILBOX + depends on (ARCH_TEGRA && TEGRA_HSP_MBOX) || COMPILE_TEST + select SERIAL_CORE + help + Support for the mailbox-based TCU (Tegra Combined UART) serial port. + TCU is a virtual serial port that allows multiplexing multiple data + streams into a single hardware serial port. + +config SERIAL_TEGRA_TCU_CONSOLE + bool "Support for console on a Tegra TCU serial port" + depends on SERIAL_TEGRA_TCU=y + select SERIAL_CORE_CONSOLE + default y + help + If you say Y here, it will be possible to use a the Tegra TCU as the + system console (the system console is the device which receives all + kernel messages and warnings and which allows logins in single user + mode). + + If unsure, say Y. + +config SERIAL_MAX3100 + tristate "MAX3100 support" + depends on SPI + select SERIAL_CORE + help + MAX3100 chip support + +config SERIAL_MAX310X + tristate "MAX310X support" + depends on SPI_MASTER + select SERIAL_CORE + select REGMAP_SPI if SPI_MASTER + select REGMAP_I2C if I2C + help + This selects support for an advanced UART from Maxim (Dallas). + Supported ICs are MAX3107, MAX3108, MAX3109, MAX14830. + Each IC contains 128 words each of receive and transmit FIFO + that can be controlled through I2C or high-speed SPI. + + Say Y here if you want to support this ICs. + +config SERIAL_DZ + bool "DECstation DZ serial driver" + depends on MACH_DECSTATION && 32BIT + select SERIAL_CORE + default y + help + DZ11-family serial controllers for DECstations and VAXstations, + including the DC7085, M7814, and M7819. + +config SERIAL_DZ_CONSOLE + bool "Support console on DECstation DZ serial driver" + depends on SERIAL_DZ=y + select SERIAL_CORE_CONSOLE + default y + help + If you say Y here, it will be possible to use a serial port as the + system console (the system console is the device which receives all + kernel messages and warnings and which allows logins in single user + mode). + + Note that the firmware uses ttyS3 as the serial console on + DECstations that use this driver. + + If unsure, say Y. + +config SERIAL_ZS + tristate "DECstation Z85C30 serial support" + depends on MACH_DECSTATION + select SERIAL_CORE + default y + help + Support for the Zilog 85C350 serial communications controller used + for serial ports in newer DECstation systems. These include the + DECsystem 5900 and all models of the DECstation and DECsystem 5000 + systems except from model 200. + + If unsure, say Y. To compile this driver as a module, choose M here: + the module will be called zs. + +config SERIAL_ZS_CONSOLE + bool "Support for console on a DECstation Z85C30 serial port" + depends on SERIAL_ZS=y + select SERIAL_CORE_CONSOLE + default y + help + If you say Y here, it will be possible to use a serial port as the + system console (the system console is the device which receives all + kernel messages and warnings and which allows logins in single user + mode). + + Note that the firmware uses ttyS1 as the serial console on the + Maxine and ttyS3 on the others using this driver. + + If unsure, say Y. + +config SERIAL_21285 + tristate "DC21285 serial port support" + depends on FOOTBRIDGE + select SERIAL_CORE + help + If you have a machine based on a 21285 (Footbridge) StrongARM(R)/ + PCI bridge you can enable its onboard serial port by enabling this + option. + +config SERIAL_21285_CONSOLE + bool "Console on DC21285 serial port" + depends on SERIAL_21285=y + select SERIAL_CORE_CONSOLE + help + If you have enabled the serial port on the 21285 footbridge you can + make it the console by answering Y to this option. + + Even if you say Y here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttyFB". (Try "man bootparam" or see the documentation of + your boot loader (lilo or loadlin) about how to pass options to the + kernel at boot time.) + +config SERIAL_PXA + bool "PXA serial port support (DEPRECATED)" + depends on ARCH_PXA || ARCH_MMP + select SERIAL_CORE + select SERIAL_8250_PXA if SERIAL_8250=y + select SERIAL_PXA_NON8250 if !SERIAL_8250=y + help + If you have a machine based on an Intel XScale PXA2xx CPU you + can enable its onboard serial ports by enabling this option. + + Unless you have a specific need, you should use SERIAL_8250_PXA + instead of this. + +config SERIAL_PXA_NON8250 + bool + depends on !SERIAL_8250 || COMPILE_TEST + +config SERIAL_PXA_CONSOLE + bool "Console on PXA serial port (DEPRECATED)" + depends on SERIAL_PXA + select SERIAL_CORE_CONSOLE + select SERIAL_8250_CONSOLE if SERIAL_8250=y + help + If you have enabled the serial port on the Intel XScale PXA + CPU you can make it the console by answering Y to this option. + + Even if you say Y here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttySA0". (Try "man bootparam" or see the documentation of + your boot loader (lilo or loadlin) about how to pass options to the + kernel at boot time.) + + Unless you have a specific need, you should use SERIAL_8250_PXA + and SERIAL_8250_CONSOLE instead of this. + +config SERIAL_SA1100 + bool "SA1100 serial port support" + depends on ARCH_SA1100 + select SERIAL_CORE + select SERIAL_MCTRL_GPIO if GPIOLIB + help + If you have a machine based on a SA1100/SA1110 StrongARM(R) CPU you + can enable its onboard serial port by enabling this option. + Please read for further + info. + +config SERIAL_SA1100_CONSOLE + bool "Console on SA1100 serial port" + depends on SERIAL_SA1100 + select SERIAL_CORE_CONSOLE + help + If you have enabled the serial port on the SA1100/SA1110 StrongARM + CPU you can make it the console by answering Y to this option. + + Even if you say Y here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttySA0". (Try "man bootparam" or see the documentation of + your boot loader (lilo or loadlin) about how to pass options to the + kernel at boot time.) + +config SERIAL_IMX + tristate "IMX serial port support" + depends on ARCH_MXC || COMPILE_TEST + select SERIAL_CORE + select RATIONAL + select SERIAL_MCTRL_GPIO if GPIOLIB + help + If you have a machine based on a Motorola IMX CPU you + can enable its onboard serial port by enabling this option. + +config SERIAL_IMX_CONSOLE + tristate "Console on IMX serial port" + depends on SERIAL_IMX + select SERIAL_CORE_CONSOLE + help + If you have enabled the serial port on the Freescale IMX + CPU you can make it the console by answering Y/M to this option. + + Even if you say Y/M here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttymxc0". (Try "man bootparam" or see the documentation of + your bootloader about how to pass options to the kernel at boot time.) + +config SERIAL_IMX_EARLYCON + bool "Earlycon on IMX serial port" + depends on ARCH_MXC || COMPILE_TEST + depends on OF + select SERIAL_CORE + select SERIAL_EARLYCON + select SERIAL_CORE_CONSOLE + default y if SERIAL_IMX_CONSOLE + help + If you have enabled the earlycon on the Freescale IMX + CPU you can make it the earlycon by answering Y to this option. + +config SERIAL_UARTLITE + tristate "Xilinx uartlite serial port support" + depends on HAS_IOMEM + select SERIAL_CORE + help + Say Y here if you want to use the Xilinx uartlite serial controller. + + To compile this driver as a module, choose M here: the + module will be called uartlite. + +config SERIAL_UARTLITE_CONSOLE + bool "Support for console on Xilinx uartlite serial port" + depends on SERIAL_UARTLITE=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Say Y here if you wish to use a Xilinx uartlite as the system + console (the system console is the device which receives all kernel + messages and warnings and which allows logins in single user mode). + +config SERIAL_UARTLITE_NR_UARTS + int "Maximum number of uartlite serial ports" + depends on SERIAL_UARTLITE + range 1 256 + default 1 + help + Set this to the number of uartlites in your system, or the number + you think you might implement. + +config SERIAL_SUNCORE + bool + depends on SPARC + select SERIAL_CORE + select SERIAL_CORE_CONSOLE + default y + +config SERIAL_SUNZILOG + tristate "Sun Zilog8530 serial support" + depends on SPARC + help + This driver supports the Zilog8530 serial ports found on many Sparc + systems. Say Y or M if you want to be able to these serial ports. + +config SERIAL_SUNZILOG_CONSOLE + bool "Console on Sun Zilog8530 serial port" + depends on SERIAL_SUNZILOG=y + help + If you would like to be able to use the Zilog8530 serial port + on your Sparc system as the console, you can do so by answering + Y to this option. + +config SERIAL_SUNSU + tristate "Sun SU serial support" + depends on SPARC && PCI + help + This driver supports the 8250 serial ports that run the keyboard and + mouse on (PCI) UltraSPARC systems. Say Y or M if you want to be able + to these serial ports. + +config SERIAL_SUNSU_CONSOLE + bool "Console on Sun SU serial port" + depends on SERIAL_SUNSU=y + help + If you would like to be able to use the SU serial port + on your Sparc system as the console, you can do so by answering + Y to this option. + +config SERIAL_MUX + tristate "Serial MUX support" + depends on GSC + select SERIAL_CORE + default y + help + Saying Y here will enable the hardware MUX serial driver for + the Nova, K class systems and D class with a 'remote control card'. + The hardware MUX is not 8250/16550 compatible therefore the + /dev/ttyB0 device is shared between the Serial MUX and the PDC + software console. The following steps need to be completed to use + the Serial MUX: + + 1. create the device entry (mknod /dev/ttyB0 c 11 0) + 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0 + 3. Add device ttyB0 to /etc/securetty (if you want to log on as + root on this console.) + 4. Change the kernel command console parameter to: console=ttyB0 + +config SERIAL_MUX_CONSOLE + bool "Support for console on serial MUX" + depends on SERIAL_MUX=y + select SERIAL_CORE_CONSOLE + default y + +config SERIAL_SUNSAB + tristate "Sun Siemens SAB82532 serial support" + depends on SPARC && PCI + help + This driver supports the Siemens SAB82532 DUSCC serial ports on newer + (PCI) UltraSPARC systems. Say Y or M if you want to be able to these + serial ports. + +config SERIAL_SUNSAB_CONSOLE + bool "Console on Sun Siemens SAB82532 serial port" + depends on SERIAL_SUNSAB=y + help + If you would like to be able to use the SAB82532 serial port + on your Sparc system as the console, you can do so by answering + Y to this option. + +config SERIAL_SUNHV + bool "Sun4v Hypervisor Console support" + depends on SPARC64 + help + This driver supports the console device found on SUN4V Sparc + systems. Say Y if you want to be able to use this device. + +config SERIAL_IP22_ZILOG + tristate "SGI Zilog8530 serial support" + depends on SGI_HAS_ZILOG + select SERIAL_CORE + help + This driver supports the Zilog8530 serial ports found on SGI + systems. Say Y or M if you want to be able to these serial ports. + +config SERIAL_IP22_ZILOG_CONSOLE + bool "Console on SGI Zilog8530 serial port" + depends on SERIAL_IP22_ZILOG=y + select SERIAL_CORE_CONSOLE + +config SERIAL_SH_SCI + tristate "SuperH SCI(F) serial port support" + depends on SUPERH || ARCH_RENESAS || COMPILE_TEST + select SERIAL_CORE + select SERIAL_MCTRL_GPIO if GPIOLIB + +config SERIAL_SH_SCI_NR_UARTS + int "Maximum number of SCI(F) serial ports" if EXPERT + range 1 64 if 64BIT + range 1 32 if !64BIT + depends on SERIAL_SH_SCI + default "10" if SUPERH + default "18" if ARCH_RENESAS + default "2" + +config SERIAL_SH_SCI_CONSOLE + bool "Support for console on SuperH SCI(F)" if EXPERT + depends on SERIAL_SH_SCI=y + select SERIAL_CORE_CONSOLE + default y + +config SERIAL_SH_SCI_EARLYCON + bool "Support for early console on SuperH SCI(F)" if EXPERT + depends on SERIAL_SH_SCI=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + default ARCH_RENESAS + +config SERIAL_SH_SCI_DMA + bool "DMA support" if EXPERT + depends on SERIAL_SH_SCI && DMA_ENGINE + default ARCH_RENESAS + +config SERIAL_HS_LPC32XX + tristate "LPC32XX high speed serial port support" + depends on ARCH_LPC32XX || COMPILE_TEST + depends on OF + select SERIAL_CORE + help + Support for the LPC32XX high speed serial ports (up to 900kbps). + Those are UARTs completely different from the Standard UARTs on the + LPC32XX SoC. + Choose M or Y here to build this driver. + +config SERIAL_HS_LPC32XX_CONSOLE + bool "Enable LPC32XX high speed UART serial console" + depends on SERIAL_HS_LPC32XX=y + select SERIAL_CORE_CONSOLE + help + If you would like to be able to use one of the high speed serial + ports on the LPC32XX as the console, you can do so by answering + Y to this option. + +config SERIAL_CORE + tristate + +config SERIAL_CORE_CONSOLE + bool + +config CONSOLE_POLL + bool + +config SERIAL_MCF + bool "Coldfire serial support" + depends on COLDFIRE + select SERIAL_CORE + help + This serial driver supports the Freescale Coldfire serial ports. + +config SERIAL_MCF_BAUDRATE + int "Default baudrate for Coldfire serial ports" + depends on SERIAL_MCF + default 19200 + help + This setting lets you define what the default baudrate is for the + ColdFire serial ports. The usual default varies from board to board, + and this setting is a way of catering for that. + +config SERIAL_MCF_CONSOLE + bool "Coldfire serial console support" + depends on SERIAL_MCF + select SERIAL_CORE_CONSOLE + help + Enable a ColdFire internal serial port to be the system console. + +config SERIAL_PMACZILOG + tristate "Mac or PowerMac z85c30 ESCC support" + depends on (M68K && MAC) || PPC_PMAC + select SERIAL_CORE + help + This driver supports the Zilog z85C30 serial ports found on + (Power)Mac machines. + Say Y or M if you want to be able to these serial ports. + +config SERIAL_PMACZILOG_TTYS + bool "Use ttySn device nodes for Zilog z85c30" + depends on SERIAL_PMACZILOG + help + The pmac_zilog driver for the z85C30 chip on many powermacs + historically used the device numbers for /dev/ttySn. The + 8250 serial port driver also uses these numbers, which means + the two drivers being unable to coexist; you could not use + both z85C30 and 8250 type ports at the same time. + + If this option is not selected, the pmac_zilog driver will + use the device numbers allocated for /dev/ttyPZn. This allows + the pmac_zilog and 8250 drivers to co-exist, but may cause + existing userspace setups to break. Programs that need to + access the built-in serial ports on powermacs will need to + be reconfigured to use /dev/ttyPZn instead of /dev/ttySn. + + If you enable this option, any z85c30 ports in the system will + be registered as ttyS0 onwards as in the past, and you will be + unable to use the 8250 module for PCMCIA or other 16C550-style + UARTs. + + Say N unless you need the z85c30 ports on your (Power)Mac + to appear as /dev/ttySn. + +config SERIAL_PMACZILOG_CONSOLE + bool "Console on Mac or PowerMac z85c30 serial port" + depends on SERIAL_PMACZILOG=y + select SERIAL_CORE_CONSOLE + help + If you would like to be able to use the z85c30 serial port + on your (Power)Mac as the console, you can do so by answering + Y to this option. + +config SERIAL_CPM + tristate "CPM SCC/SMC serial port support" + depends on CPM2 || CPM1 + select SERIAL_CORE + help + This driver supports the SCC and SMC serial ports on Motorola + embedded PowerPC that contain a CPM1 (8xx) or CPM2 (8xxx) + +config SERIAL_CPM_CONSOLE + bool "Support for console on CPM SCC/SMC serial port" + depends on SERIAL_CPM=y + select SERIAL_CORE_CONSOLE + help + Say Y here if you wish to use a SCC or SMC CPM UART as the system + console (the system console is the device which receives all kernel + messages and warnings and which allows logins in single user mode). + + Even if you say Y here, the currently visible framebuffer console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttyCPM0". (Try "man bootparam" or see the documentation of + your boot loader (lilo or loadlin) about how to pass options to the + kernel at boot time.) + +config SERIAL_PIC32 + tristate "Microchip PIC32 serial support" + depends on MACH_PIC32 || (MIPS && COMPILE_TEST) + select SERIAL_CORE + help + If you have a PIC32, this driver supports the serial ports. + + Say Y or M to use PIC32 serial ports, otherwise say N. Note that + to use a serial port as a console, this must be included in kernel and + not as a module. + +config SERIAL_PIC32_CONSOLE + bool "PIC32 serial console support" + depends on SERIAL_PIC32=y + select SERIAL_CORE_CONSOLE + help + If you have a PIC32, this driver supports the putting a console on one + of the serial ports. + + Say Y to use the PIC32 console, otherwise say N. + +config SERIAL_MPC52xx + tristate "Freescale MPC52xx/MPC512x family PSC serial support" + depends on PPC_MPC52xx || PPC_MPC512x + select SERIAL_CORE + help + This driver supports MPC52xx and MPC512x PSC serial ports. If you would + like to use them, you must answer Y or M to this option. Note that + for use as console, it must be included in kernel and not as a + module. + +config SERIAL_MPC52xx_CONSOLE + bool "Console on a Freescale MPC52xx/MPC512x family PSC serial port" + depends on SERIAL_MPC52xx=y + select SERIAL_CORE_CONSOLE + help + Select this options if you'd like to use one of the PSC serial port + of the Freescale MPC52xx family as a console. + +config SERIAL_MPC52xx_CONSOLE_BAUD + int "Freescale MPC52xx/MPC512x family PSC serial port baud" + depends on SERIAL_MPC52xx_CONSOLE=y + default "9600" + help + Select the MPC52xx console baud rate. + This value is only used if the bootloader doesn't pass in the + console baudrate + +config SERIAL_ICOM + tristate "IBM Multiport Serial Adapter" + depends on PCI + depends on PPC_PSERIES || COMPILE_TEST + select SERIAL_CORE + select FW_LOADER + help + This driver is for a family of multiport serial adapters + including 2 port RVX, 2 port internal modem, 4 port internal + modem and a split 1 port RVX and 1 port internal modem. + + This driver can also be built as a module. If so, the module + will be called icom. + +config SERIAL_TXX9 + bool "TMPTX39XX/49XX SIO support" + depends on HAS_TXX9_SERIAL + select SERIAL_CORE + default y + +config HAS_TXX9_SERIAL + bool + +config SERIAL_TXX9_NR_UARTS + int "Maximum number of TMPTX39XX/49XX SIO ports" + depends on SERIAL_TXX9 + default "6" + +config SERIAL_TXX9_CONSOLE + bool "TMPTX39XX/49XX SIO Console support" + depends on SERIAL_TXX9=y + select SERIAL_CORE_CONSOLE + +config SERIAL_TXX9_STDSERIAL + bool "TX39XX/49XX SIO act as standard serial" + depends on !SERIAL_8250 && SERIAL_TXX9 + +config SERIAL_JSM + tristate "Digi International NEO and Classic PCI Support" + depends on PCI + select SERIAL_CORE + help + This is a driver for Digi International's Neo and Classic series + of cards which provide multiple serial ports. You would need + something like this to connect more than two modems to your Linux + box, for instance in order to become a dial-in server. This driver + supports PCI boards only. + + If you have a card like this, say Y here, otherwise say N. + + To compile this driver as a module, choose M here: the + module will be called jsm. + +config SERIAL_MSM + tristate "MSM on-chip serial port support" + depends on ARCH_QCOM || COMPILE_TEST + select SERIAL_CORE + +config SERIAL_MSM_CONSOLE + bool "MSM serial console support" + depends on SERIAL_MSM=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + +config SERIAL_QCOM_GENI + tristate "QCOM on-chip GENI based serial port support" + depends on ARCH_QCOM || COMPILE_TEST + depends on QCOM_GENI_SE + select SERIAL_CORE + +config SERIAL_QCOM_GENI_CONSOLE + bool "QCOM GENI Serial Console support" + depends on SERIAL_QCOM_GENI + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Serial console driver for Qualcomm Technologies Inc's GENI based + QUP hardware. + +config SERIAL_VT8500 + bool "VIA VT8500 on-chip serial port support" + depends on ARCH_VT8500 || COMPILE_TEST + select SERIAL_CORE + +config SERIAL_VT8500_CONSOLE + bool "VIA VT8500 serial console support" + depends on SERIAL_VT8500=y + select SERIAL_CORE_CONSOLE + +config SERIAL_OMAP + tristate "OMAP serial port support" + depends on (ARCH_OMAP2PLUS && !SERIAL_8250_OMAP) || COMPILE_TEST + select SERIAL_CORE + help + If you have a machine based on an Texas Instruments OMAP CPU you + can enable its onboard serial ports by enabling this option. + + By enabling this option you take advantage of dma feature available + with the omap-serial driver. DMA support can be enabled from platform + data. + +config SERIAL_OMAP_CONSOLE + bool "Console on OMAP serial port" + depends on SERIAL_OMAP=y + select SERIAL_CORE_CONSOLE + help + Select this option if you would like to use omap serial port as + console. + + Even if you say Y here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttyOx". (Try "man bootparam" or see the documentation of + your boot loader about how to pass options to the kernel at + boot time.) + +config SERIAL_SIFIVE + tristate "SiFive UART support" + depends on OF + select SERIAL_CORE + help + Select this option if you are building a kernel for a device that + contains a SiFive UART IP block. This type of UART is present on + SiFive FU540 SoCs, among others. + +config SERIAL_SIFIVE_CONSOLE + bool "Console on SiFive UART" + depends on SERIAL_SIFIVE=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Select this option if you would like to use a SiFive UART as the + system console. + + Even if you say Y here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttySIFx". (Try "man bootparam" or see the documentation of + your boot loader about how to pass options to the kernel at + boot time.) + +config SERIAL_LANTIQ + tristate "Lantiq serial driver" + depends on (LANTIQ || X86) || COMPILE_TEST + select SERIAL_CORE + help + Support for UART on Lantiq and Intel SoCs. + To compile this driver as a module, select M here. The + module will be called lantiq. + +config SERIAL_LANTIQ_CONSOLE + bool "Console on Lantiq UART" + depends on SERIAL_LANTIQ=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Select this option if you would like to use a Lantiq UART as the + system console. + +config SERIAL_QE + tristate "Freescale QUICC Engine serial port support" + depends on QUICC_ENGINE + select SERIAL_CORE + select FW_LOADER + help + This driver supports the QE serial ports on Freescale embedded + PowerPC that contain a QUICC Engine. + +config SERIAL_SCCNXP + tristate "SCCNXP serial port support" + select SERIAL_CORE + help + This selects support for an advanced UART from NXP (Philips). + Supported ICs are SCC2681, SCC2691, SCC2692, SC28L91, SC28L92, + SC28L202, SCC68681 and SCC68692. + +config SERIAL_SCCNXP_CONSOLE + bool "Console on SCCNXP serial port" + depends on SERIAL_SCCNXP=y + select SERIAL_CORE_CONSOLE + help + Support for console on SCCNXP serial ports. + +config SERIAL_SC16IS7XX_CORE + tristate + +config SERIAL_SC16IS7XX + tristate "SC16IS7xx serial support" + select SERIAL_CORE + depends on (SPI_MASTER && !I2C) || I2C + help + This selects support for SC16IS7xx serial ports. + Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752, + SC16IS760 and SC16IS762. Select supported buses using options below. + +config SERIAL_SC16IS7XX_I2C + bool "SC16IS7xx for I2C interface" + depends on SERIAL_SC16IS7XX + depends on I2C + select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX + select REGMAP_I2C if I2C + default y + help + Enable SC16IS7xx driver on I2C bus, + If required say y, and say n to i2c if not required, + Enabled by default to support oldconfig. + You must select at least one bus for the driver to be built. + +config SERIAL_SC16IS7XX_SPI + bool "SC16IS7xx for spi interface" + depends on SERIAL_SC16IS7XX + depends on SPI_MASTER + select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX + select REGMAP_SPI if SPI_MASTER + help + Enable SC16IS7xx driver on SPI bus, + If required say y, and say n to spi if not required, + This is additional support to existing driver. + You must select at least one bus for the driver to be built. + +config SERIAL_TIMBERDALE + tristate "Support for timberdale UART" + select SERIAL_CORE + depends on X86_32 || COMPILE_TEST + help + Add support for UART controller on timberdale. + +config SERIAL_BCM63XX + tristate "Broadcom BCM63xx/BCM33xx UART support" + select SERIAL_CORE + depends on ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST + default ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC + help + This enables the driver for the onchip UART core found on + the following chipsets: + + BCM33xx (cable modem) + BCM63xx/BCM63xxx (DSL) + BCM68xx (PON) + BCM7xxx (STB) - DOCSIS console + +config SERIAL_BCM63XX_CONSOLE + bool "Console on BCM63xx serial port" + depends on SERIAL_BCM63XX=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + If you have enabled the serial port on the BCM63xx CPU + you can make it the console by answering Y to this option. + +config SERIAL_GRLIB_GAISLER_APBUART + tristate "GRLIB APBUART serial support" + depends on OF && SPARC + select SERIAL_CORE + help + Add support for the GRLIB APBUART serial port. + +config SERIAL_GRLIB_GAISLER_APBUART_CONSOLE + bool "Console on GRLIB APBUART serial port" + depends on SERIAL_GRLIB_GAISLER_APBUART=y + select SERIAL_CORE_CONSOLE + help + Support for running a console on the GRLIB APBUART + +config SERIAL_ALTERA_JTAGUART + tristate "Altera JTAG UART support" + select SERIAL_CORE + help + This driver supports the Altera JTAG UART port. + +config SERIAL_ALTERA_JTAGUART_CONSOLE + bool "Altera JTAG UART console support" + depends on SERIAL_ALTERA_JTAGUART=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Enable a Altera JTAG UART port to be the system console. + +config SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS + bool "Bypass output when no connection" + depends on SERIAL_ALTERA_JTAGUART_CONSOLE + select SERIAL_CORE_CONSOLE + help + Bypass console output and keep going even if there is no + JTAG terminal connection with the host. + +config SERIAL_ALTERA_UART + tristate "Altera UART support" + select SERIAL_CORE + help + This driver supports the Altera softcore UART port. + +config SERIAL_ALTERA_UART_MAXPORTS + int "Maximum number of Altera UART ports" + depends on SERIAL_ALTERA_UART + default 4 + help + This setting lets you define the maximum number of the Altera + UART ports. The usual default varies from board to board, and + this setting is a way of catering for that. + +config SERIAL_ALTERA_UART_BAUDRATE + int "Default baudrate for Altera UART ports" + depends on SERIAL_ALTERA_UART + default 115200 + help + This setting lets you define what the default baudrate is for the + Altera UART ports. The usual default varies from board to board, + and this setting is a way of catering for that. + +config SERIAL_ALTERA_UART_CONSOLE + bool "Altera UART console support" + depends on SERIAL_ALTERA_UART=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Enable a Altera UART port to be the system console. + +config SERIAL_PCH_UART + tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART" + depends on PCI && (X86_32 || MIPS || COMPILE_TEST) + select SERIAL_CORE + help + This driver is for PCH(Platform controller Hub) UART of Intel EG20T + which is an IOH(Input/Output Hub) for x86 embedded processor. + Enabling PCH_DMA, this PCH UART works as DMA mode. + + This driver also can be used for LAPIS Semiconductor IOH(Input/ + Output Hub), ML7213, ML7223 and ML7831. + ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is + for MP(Media Phone) use and ML7831 IOH is for general purpose use. + ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. + +config SERIAL_PCH_UART_CONSOLE + bool "Support for console on Intel EG20T PCH UART/OKI SEMICONDUCTOR ML7213 IOH" + depends on SERIAL_PCH_UART=y + select SERIAL_CORE_CONSOLE + help + Say Y here if you wish to use the PCH UART as the system console + (the system console is the device which receives all kernel messages and + warnings and which allows logins in single user mode). + +config SERIAL_MXS_AUART + tristate "MXS AUART support" + depends on ARCH_MXS || MACH_ASM9260 || COMPILE_TEST + select SERIAL_CORE + select SERIAL_MCTRL_GPIO if GPIOLIB + help + This driver supports the MXS and Alphascale ASM9260 Application + UART (AUART) port. + +config SERIAL_MXS_AUART_CONSOLE + bool "MXS AUART console support" + depends on SERIAL_MXS_AUART=y + select SERIAL_CORE_CONSOLE + help + Enable a MXS AUART port to be the system console. + +config SERIAL_XILINX_PS_UART + tristate "Cadence (Xilinx Zynq) UART support" + depends on OF + select SERIAL_CORE + help + This driver supports the Cadence UART. It is found e.g. in Xilinx + Zynq. + +config SERIAL_XILINX_PS_UART_CONSOLE + bool "Cadence UART console support" + depends on SERIAL_XILINX_PS_UART=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Enable a Cadence UART port to be the system console. + +config SERIAL_AR933X + tristate "AR933X serial port support" + depends on (HAVE_CLK && ATH79) || (MIPS && COMPILE_TEST) + select SERIAL_CORE + select SERIAL_MCTRL_GPIO if GPIOLIB + help + If you have an Atheros AR933X SOC based board and want to use the + built-in UART of the SoC, say Y to this option. + + To compile this driver as a module, choose M here: the + module will be called ar933x_uart. + +config SERIAL_AR933X_CONSOLE + bool "Console on AR933X serial port" + depends on SERIAL_AR933X=y + select SERIAL_CORE_CONSOLE + help + Enable a built-in UART port of the AR933X to be the system console. + +config SERIAL_AR933X_NR_UARTS + int "Maximum number of AR933X serial ports" + depends on SERIAL_AR933X + default "2" + help + Set this to the number of serial ports you want the driver + to support. + +config SERIAL_MPS2_UART_CONSOLE + bool "MPS2 UART console support" + depends on SERIAL_MPS2_UART + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + +config SERIAL_MPS2_UART + bool "MPS2 UART port" + depends on ARCH_MPS2 || COMPILE_TEST + select SERIAL_CORE + help + This driver support the UART ports on ARM MPS2. + +config SERIAL_ARC + tristate "ARC UART driver support" + select SERIAL_CORE + help + Driver for on-chip UART for ARC(Synopsys) for the legacy + FPGA Boards (ML50x/ARCAngel4) + +config SERIAL_ARC_CONSOLE + bool "Console on ARC UART" + depends on SERIAL_ARC=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Enable system Console on ARC UART + +config SERIAL_ARC_NR_PORTS + int "Number of ARC UART ports" + depends on SERIAL_ARC + range 1 3 + default "1" + help + Set this to the number of serial ports you want the driver + to support. + +config SERIAL_RP2 + tristate "Comtrol RocketPort EXPRESS/INFINITY support" + depends on PCI + select SERIAL_CORE + help + This driver supports the Comtrol RocketPort EXPRESS and + RocketPort INFINITY families of PCI/PCIe multiport serial adapters. + These adapters use a "RocketPort 2" ASIC that is not compatible + with the original RocketPort driver (CONFIG_ROCKETPORT). + + To compile this driver as a module, choose M here: the + module will be called rp2. + + If you want to compile this driver into the kernel, say Y here. If + you don't have a suitable RocketPort card installed, say N. + +config SERIAL_RP2_NR_UARTS + int "Maximum number of RocketPort EXPRESS/INFINITY ports" + depends on SERIAL_RP2 + default "32" + help + If multiple cards are present, the default limit of 32 ports may + need to be increased. + +config SERIAL_FSL_LPUART + tristate "Freescale lpuart serial port support" + depends on HAS_DMA + select SERIAL_CORE + help + Support for the on-chip lpuart on some Freescale SOCs. + +config SERIAL_FSL_LPUART_CONSOLE + bool "Console on Freescale lpuart serial port" + depends on SERIAL_FSL_LPUART=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + If you have enabled the lpuart serial port on the Freescale SoCs, + you can make it the console by answering Y to this option. + +config SERIAL_FSL_LINFLEXUART + tristate "Freescale LINFlexD UART serial port support" + depends on PRINTK + select SERIAL_CORE + help + Support for the on-chip LINFlexD UART on some Freescale SOCs. + +config SERIAL_FSL_LINFLEXUART_CONSOLE + bool "Console on Freescale LINFlexD UART serial port" + depends on SERIAL_FSL_LINFLEXUART=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + If you have enabled the LINFlexD UART serial port on the Freescale + SoCs, you can make it the console by answering Y to this option. + +config SERIAL_CONEXANT_DIGICOLOR + tristate "Conexant Digicolor CX92xxx USART serial port support" + depends on OF + select SERIAL_CORE + help + Support for the on-chip USART on Conexant Digicolor SoCs. + +config SERIAL_CONEXANT_DIGICOLOR_CONSOLE + bool "Console on Conexant Digicolor serial port" + depends on SERIAL_CONEXANT_DIGICOLOR=y + select SERIAL_CORE_CONSOLE + help + If you have enabled the USART serial port on Conexant Digicolor + SoCs, you can make it the console by answering Y to this option. + +config SERIAL_ST_ASC + tristate "ST ASC serial port support" + select SERIAL_CORE + depends on ARM || COMPILE_TEST + help + This driver is for the on-chip Asynchronous Serial Controller on + STMicroelectronics STi SoCs. + ASC is embedded in ST COMMS IP block. It supports Rx & Tx functionality. + It support all industry standard baud rates. + + If unsure, say N. + +config SERIAL_ST_ASC_CONSOLE + bool "Support for console on ST ASC" + depends on SERIAL_ST_ASC=y + select SERIAL_CORE_CONSOLE + +config SERIAL_MEN_Z135 + tristate "MEN 16z135 Support" + select SERIAL_CORE + depends on MCB + help + Say yes here to enable support for the MEN 16z135 High Speed UART IP-Core + on a MCB carrier. + + This driver can also be build as a module. If so, the module will be called + men_z135_uart.ko + +config SERIAL_SPRD + tristate "Support for Spreadtrum serial" + select SERIAL_CORE + depends on COMMON_CLK + help + This enables the driver for the Spreadtrum's serial. + +config SERIAL_SPRD_CONSOLE + bool "Spreadtrum UART console support" + depends on SERIAL_SPRD=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Support for early debug console using Spreadtrum's serial. This enables + the console before standard serial driver is probed. This is enabled + with "earlycon" on the kernel command line. The console is + enabled when early_param is processed. + +config SERIAL_STM32 + tristate "STMicroelectronics STM32 serial port support" + select SERIAL_CORE + depends on ARCH_STM32 || COMPILE_TEST + select SERIAL_MCTRL_GPIO if GPIOLIB + help + This driver is for the on-chip Serial Controller on + STMicroelectronics STM32 MCUs. + USART supports Rx & Tx functionality. + It support all industry standard baud rates. + + If unsure, say N. + +config SERIAL_STM32_CONSOLE + bool "Support for console on STM32" + depends on SERIAL_STM32=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + +config SERIAL_MVEBU_UART + bool "Marvell EBU serial port support" + depends on ARCH_MVEBU || COMPILE_TEST + depends on COMMON_CLK + select SERIAL_CORE + help + This driver is for Marvell EBU SoC's UART. If you have a machine + based on the Armada-3700 SoC and wish to use the on-board serial + port, + say 'Y' here. + Otherwise, say 'N'. + +config SERIAL_MVEBU_CONSOLE + bool "Console on Marvell EBU serial port" + depends on SERIAL_MVEBU_UART + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + default y + help + Say 'Y' here if you wish to use Armada-3700 UART as the system console. + (the system console is the device which receives all kernel messages + and warnings and which allows logins in single user mode) + Otherwise, say 'N'. + +config SERIAL_OWL + tristate "Actions Semi Owl serial port support" + depends on ARCH_ACTIONS || COMPILE_TEST + select SERIAL_CORE + help + This driver is for Actions Semiconductor S500/S900 SoC's UART. + Say 'Y' here if you wish to use the on-board serial port. + Otherwise, say 'N'. + +config SERIAL_OWL_CONSOLE + bool "Console on Actions Semi Owl serial port" + depends on SERIAL_OWL=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + default y + help + Say 'Y' here if you wish to use Actions Semiconductor S500/S900 UART + as the system console. + +config SERIAL_RDA + bool "RDA Micro serial port support" + depends on ARCH_RDA || COMPILE_TEST + select SERIAL_CORE + help + This driver is for RDA8810PL SoC's UART. + Say 'Y' here if you wish to use the on-board serial port. + Otherwise, say 'N'. + +config SERIAL_RDA_CONSOLE + bool "Console on RDA Micro serial port" + depends on SERIAL_RDA=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + default y + help + Say 'Y' here if you wish to use the RDA8810PL UART as the system + console. Only earlycon is implemented currently. + +config SERIAL_MILBEAUT_USIO + tristate "Milbeaut USIO/UART serial port support" + depends on ARCH_MILBEAUT || (COMPILE_TEST && OF) + default ARCH_MILBEAUT + select SERIAL_CORE + help + This selects the USIO/UART IP found in Socionext Milbeaut SoCs. + +config SERIAL_MILBEAUT_USIO_PORTS + int "Maximum number of CSIO/UART ports (1-8)" + range 1 8 + depends on SERIAL_MILBEAUT_USIO + default "4" + +config SERIAL_MILBEAUT_USIO_CONSOLE + bool "Support for console on MILBEAUT USIO/UART serial port" + depends on SERIAL_MILBEAUT_USIO=y + default y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Say 'Y' here if you wish to use a USIO/UART of Socionext Milbeaut + SoCs as the system console (the system console is the device which + receives all kernel messages and warnings and which allows logins in + single user mode). + +config SERIAL_LITEUART + tristate "LiteUART serial port support" + depends on HAS_IOMEM + depends on OF || COMPILE_TEST + depends on LITEX || COMPILE_TEST + select SERIAL_CORE + help + This driver is for the FPGA-based LiteUART serial controller from LiteX + SoC builder. + + Say 'Y' or 'M' here if you wish to use the LiteUART serial controller. + Otherwise, say 'N'. + +config SERIAL_LITEUART_MAX_PORTS + int "Maximum number of LiteUART ports" + depends on SERIAL_LITEUART + default "1" + help + Set this to the maximum number of serial ports you want the driver + to support. + +config SERIAL_LITEUART_CONSOLE + bool "LiteUART serial port console support" + depends on SERIAL_LITEUART=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Say 'Y' or 'M' here if you wish to use the FPGA-based LiteUART serial + controller from LiteX SoC builder as the system console + (the system console is the device which receives all kernel messages + and warnings and which allows logins in single user mode). + Otherwise, say 'N'. + +config SERIAL_SUNPLUS + tristate "Sunplus UART support" + depends on ARCH_SUNPLUS || COMPILE_TEST + select SERIAL_CORE + help + Select this option if you would like to use Sunplus serial port on + Sunplus SoC SP7021. + If you enable this option, Sunplus serial ports in the system will + be registered as ttySUPx. + This driver can also be built as a module. If so, the module will be + called sunplus-uart. + +config SERIAL_SUNPLUS_CONSOLE + bool "Console on Sunplus UART" + depends on SERIAL_SUNPLUS + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Select this option if you would like to use a Sunplus UART as the + system console. + Even if you say Y here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttySUPx". + +endmenu + +config SERIAL_MCTRL_GPIO + tristate diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile new file mode 100644 index 000000000..238a9557b --- /dev/null +++ b/drivers/tty/serial/Makefile @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the kernel serial device drivers. +# + +obj-$(CONFIG_SERIAL_CORE) += serial_core.o + +obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o +obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o +obj-$(CONFIG_SERIAL_EARLYCON_RISCV_SBI) += earlycon-riscv-sbi.o + +# These Sparc drivers have to appear before others such as 8250 +# which share ttySx minor node space. Otherwise console device +# names change and other unplesantries. +obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o +obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o +obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o +obj-$(CONFIG_SERIAL_SUNSU) += sunsu.o +obj-$(CONFIG_SERIAL_SUNSAB) += sunsab.o + +obj-$(CONFIG_SERIAL_21285) += 21285.o + +# Now bring in any enabled 8250/16450/16550 type drivers. +obj-$(CONFIG_SERIAL_8250) += 8250/ + +obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o +obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o +obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o +obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o +obj-$(CONFIG_SERIAL_SA1100) += sa1100.o +obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o +obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o +obj-$(CONFIG_SERIAL_MAX3100) += max3100.o +obj-$(CONFIG_SERIAL_MAX310X) += max310x.o +obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o +obj-$(CONFIG_SERIAL_MUX) += mux.o +obj-$(CONFIG_SERIAL_MCF) += mcf.o +obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o +obj-$(CONFIG_SERIAL_HS_LPC32XX) += lpc32xx_hs.o +obj-$(CONFIG_SERIAL_DZ) += dz.o +obj-$(CONFIG_SERIAL_ZS) += zs.o +obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o +obj-$(CONFIG_SERIAL_CPM) += cpm_uart/ +obj-$(CONFIG_SERIAL_IMX) += imx.o +obj-$(CONFIG_SERIAL_IMX_EARLYCON) += imx_earlycon.o +obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o +obj-$(CONFIG_SERIAL_ICOM) += icom.o +obj-$(CONFIG_SERIAL_MESON) += meson_uart.o +obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o +obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o +obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o +obj-$(CONFIG_SERIAL_JSM) += jsm/ +obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o +obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o +obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o +obj-$(CONFIG_SERIAL_MSM) += msm_serial.o +obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o +obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o +obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o +obj-$(CONFIG_SERIAL_ST_ASC) += st-asc.o +obj-$(CONFIG_SERIAL_QE) += ucc_uart.o +obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o +obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o +obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o +obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o +obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o +obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o +obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o +obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o +obj-$(CONFIG_SERIAL_TEGRA) += serial-tegra.o +obj-$(CONFIG_SERIAL_TEGRA_TCU) += tegra-tcu.o +obj-$(CONFIG_SERIAL_AR933X) += ar933x_uart.o +obj-$(CONFIG_SERIAL_ARC) += arc_uart.o +obj-$(CONFIG_SERIAL_RP2) += rp2.o +obj-$(CONFIG_SERIAL_FSL_LPUART) += fsl_lpuart.o +obj-$(CONFIG_SERIAL_FSL_LINFLEXUART) += fsl_linflexuart.o +obj-$(CONFIG_SERIAL_CONEXANT_DIGICOLOR) += digicolor-usart.o +obj-$(CONFIG_SERIAL_MEN_Z135) += men_z135_uart.o +obj-$(CONFIG_SERIAL_SPRD) += sprd_serial.o +obj-$(CONFIG_SERIAL_STM32) += stm32-usart.o +obj-$(CONFIG_SERIAL_MVEBU_UART) += mvebu-uart.o +obj-$(CONFIG_SERIAL_PIC32) += pic32_uart.o +obj-$(CONFIG_SERIAL_MPS2_UART) += mps2-uart.o +obj-$(CONFIG_SERIAL_OWL) += owl-uart.o +obj-$(CONFIG_SERIAL_RDA) += rda-uart.o +obj-$(CONFIG_SERIAL_MILBEAUT_USIO) += milbeaut_usio.o +obj-$(CONFIG_SERIAL_SIFIVE) += sifive.o +obj-$(CONFIG_SERIAL_LITEUART) += liteuart.o +obj-$(CONFIG_SERIAL_SUNPLUS) += sunplus-uart.o + +# GPIOLIB helpers for modem control lines +obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o + +obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o +obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c new file mode 100644 index 000000000..c2d154d78 --- /dev/null +++ b/drivers/tty/serial/altera_jtaguart.c @@ -0,0 +1,530 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * altera_jtaguart.c -- Altera JTAG UART driver + * + * Based on mcf.c -- Freescale ColdFire UART driver + * + * (C) Copyright 2003-2007, Greg Ungerer + * (C) Copyright 2008, Thomas Chou + * (C) Copyright 2010, Tobias Klauser + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "altera_jtaguart" + +/* + * Altera JTAG UART register definitions according to the Altera JTAG UART + * datasheet: https://www.altera.com/literature/hb/nios2/n2cpu_nii51009.pdf + */ + +#define ALTERA_JTAGUART_SIZE 8 + +#define ALTERA_JTAGUART_DATA_REG 0 + +#define ALTERA_JTAGUART_DATA_DATA_MSK 0x000000FF +#define ALTERA_JTAGUART_DATA_RVALID_MSK 0x00008000 +#define ALTERA_JTAGUART_DATA_RAVAIL_MSK 0xFFFF0000 +#define ALTERA_JTAGUART_DATA_RAVAIL_OFF 16 + +#define ALTERA_JTAGUART_CONTROL_REG 4 + +#define ALTERA_JTAGUART_CONTROL_RE_MSK 0x00000001 +#define ALTERA_JTAGUART_CONTROL_WE_MSK 0x00000002 +#define ALTERA_JTAGUART_CONTROL_RI_MSK 0x00000100 +#define ALTERA_JTAGUART_CONTROL_RI_OFF 8 +#define ALTERA_JTAGUART_CONTROL_WI_MSK 0x00000200 +#define ALTERA_JTAGUART_CONTROL_AC_MSK 0x00000400 +#define ALTERA_JTAGUART_CONTROL_WSPACE_MSK 0xFFFF0000 + +/* + * Local per-uart structure. + */ +struct altera_jtaguart { + struct uart_port port; + unsigned int sigs; /* Local copy of line sigs */ + unsigned long imr; /* Local IMR mirror */ +}; + +static unsigned int altera_jtaguart_tx_space(struct uart_port *port, u32 *ctlp) +{ + u32 ctl = readl(port->membase + ALTERA_JTAGUART_CONTROL_REG); + + if (ctlp) + *ctlp = ctl; + + return FIELD_GET(ALTERA_JTAGUART_CONTROL_WSPACE_MSK, ctl); +} + +static unsigned int altera_jtaguart_tx_empty(struct uart_port *port) +{ + return altera_jtaguart_tx_space(port, NULL) ? TIOCSER_TEMT : 0; +} + +static unsigned int altera_jtaguart_get_mctrl(struct uart_port *port) +{ + return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; +} + +static void altera_jtaguart_set_mctrl(struct uart_port *port, unsigned int sigs) +{ +} + +static void altera_jtaguart_start_tx(struct uart_port *port) +{ + struct altera_jtaguart *pp = + container_of(port, struct altera_jtaguart, port); + + pp->imr |= ALTERA_JTAGUART_CONTROL_WE_MSK; + writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); +} + +static void altera_jtaguart_stop_tx(struct uart_port *port) +{ + struct altera_jtaguart *pp = + container_of(port, struct altera_jtaguart, port); + + pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK; + writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); +} + +static void altera_jtaguart_stop_rx(struct uart_port *port) +{ + struct altera_jtaguart *pp = + container_of(port, struct altera_jtaguart, port); + + pp->imr &= ~ALTERA_JTAGUART_CONTROL_RE_MSK; + writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); +} + +static void altera_jtaguart_break_ctl(struct uart_port *port, int break_state) +{ +} + +static void altera_jtaguart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + /* Just copy the old termios settings back */ + if (old) + tty_termios_copy_hw(termios, old); +} + +static void altera_jtaguart_rx_chars(struct altera_jtaguart *pp) +{ + struct uart_port *port = &pp->port; + unsigned char ch, flag; + unsigned long status; + + while ((status = readl(port->membase + ALTERA_JTAGUART_DATA_REG)) & + ALTERA_JTAGUART_DATA_RVALID_MSK) { + ch = status & ALTERA_JTAGUART_DATA_DATA_MSK; + flag = TTY_NORMAL; + port->icount.rx++; + + if (uart_handle_sysrq_char(port, ch)) + continue; + uart_insert_char(port, 0, 0, ch, flag); + } + + tty_flip_buffer_push(&port->state->port); +} + +static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp) +{ + struct uart_port *port = &pp->port; + struct circ_buf *xmit = &port->state->xmit; + unsigned int pending, count; + + if (port->x_char) { + /* Send special char - probably flow control */ + writel(port->x_char, port->membase + ALTERA_JTAGUART_DATA_REG); + port->x_char = 0; + port->icount.tx++; + return; + } + + pending = uart_circ_chars_pending(xmit); + if (pending > 0) { + count = altera_jtaguart_tx_space(port, NULL); + if (count > pending) + count = pending; + if (count > 0) { + pending -= count; + while (count--) { + writel(xmit->buf[xmit->tail], + port->membase + ALTERA_JTAGUART_DATA_REG); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + if (pending < WAKEUP_CHARS) + uart_write_wakeup(port); + } + } + + if (pending == 0) + altera_jtaguart_stop_tx(port); +} + +static irqreturn_t altera_jtaguart_interrupt(int irq, void *data) +{ + struct uart_port *port = data; + struct altera_jtaguart *pp = + container_of(port, struct altera_jtaguart, port); + unsigned int isr; + + isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >> + ALTERA_JTAGUART_CONTROL_RI_OFF) & pp->imr; + + spin_lock(&port->lock); + + if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK) + altera_jtaguart_rx_chars(pp); + if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK) + altera_jtaguart_tx_chars(pp); + + spin_unlock(&port->lock); + + return IRQ_RETVAL(isr); +} + +static void altera_jtaguart_config_port(struct uart_port *port, int flags) +{ + port->type = PORT_ALTERA_JTAGUART; + + /* Clear mask, so no surprise interrupts. */ + writel(0, port->membase + ALTERA_JTAGUART_CONTROL_REG); +} + +static int altera_jtaguart_startup(struct uart_port *port) +{ + struct altera_jtaguart *pp = + container_of(port, struct altera_jtaguart, port); + unsigned long flags; + int ret; + + ret = request_irq(port->irq, altera_jtaguart_interrupt, 0, + DRV_NAME, port); + if (ret) { + pr_err(DRV_NAME ": unable to attach Altera JTAG UART %d " + "interrupt vector=%d\n", port->line, port->irq); + return ret; + } + + spin_lock_irqsave(&port->lock, flags); + + /* Enable RX interrupts now */ + pp->imr = ALTERA_JTAGUART_CONTROL_RE_MSK; + writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); + + spin_unlock_irqrestore(&port->lock, flags); + + return 0; +} + +static void altera_jtaguart_shutdown(struct uart_port *port) +{ + struct altera_jtaguart *pp = + container_of(port, struct altera_jtaguart, port); + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* Disable all interrupts now */ + pp->imr = 0; + writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); + + spin_unlock_irqrestore(&port->lock, flags); + + free_irq(port->irq, port); +} + +static const char *altera_jtaguart_type(struct uart_port *port) +{ + return (port->type == PORT_ALTERA_JTAGUART) ? "Altera JTAG UART" : NULL; +} + +static int altera_jtaguart_request_port(struct uart_port *port) +{ + /* UARTs always present */ + return 0; +} + +static void altera_jtaguart_release_port(struct uart_port *port) +{ + /* Nothing to release... */ +} + +static int altera_jtaguart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + if (ser->type != PORT_UNKNOWN && ser->type != PORT_ALTERA_JTAGUART) + return -EINVAL; + return 0; +} + +/* + * Define the basic serial functions we support. + */ +static const struct uart_ops altera_jtaguart_ops = { + .tx_empty = altera_jtaguart_tx_empty, + .get_mctrl = altera_jtaguart_get_mctrl, + .set_mctrl = altera_jtaguart_set_mctrl, + .start_tx = altera_jtaguart_start_tx, + .stop_tx = altera_jtaguart_stop_tx, + .stop_rx = altera_jtaguart_stop_rx, + .break_ctl = altera_jtaguart_break_ctl, + .startup = altera_jtaguart_startup, + .shutdown = altera_jtaguart_shutdown, + .set_termios = altera_jtaguart_set_termios, + .type = altera_jtaguart_type, + .request_port = altera_jtaguart_request_port, + .release_port = altera_jtaguart_release_port, + .config_port = altera_jtaguart_config_port, + .verify_port = altera_jtaguart_verify_port, +}; + +#define ALTERA_JTAGUART_MAXPORTS 1 +static struct altera_jtaguart altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS]; + +#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE) + +#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS) +static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c) +{ + unsigned long flags; + u32 status; + + spin_lock_irqsave(&port->lock, flags); + while (!altera_jtaguart_tx_space(port, &status)) { + spin_unlock_irqrestore(&port->lock, flags); + + if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) { + return; /* no connection activity */ + } + + cpu_relax(); + spin_lock_irqsave(&port->lock, flags); + } + writel(c, port->membase + ALTERA_JTAGUART_DATA_REG); + spin_unlock_irqrestore(&port->lock, flags); +} +#else +static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + while (!altera_jtaguart_tx_space(port, NULL)) { + spin_unlock_irqrestore(&port->lock, flags); + cpu_relax(); + spin_lock_irqsave(&port->lock, flags); + } + writel(c, port->membase + ALTERA_JTAGUART_DATA_REG); + spin_unlock_irqrestore(&port->lock, flags); +} +#endif + +static void altera_jtaguart_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *port = &(altera_jtaguart_ports + co->index)->port; + + uart_console_write(port, s, count, altera_jtaguart_console_putc); +} + +static int __init altera_jtaguart_console_setup(struct console *co, + char *options) +{ + struct uart_port *port; + + if (co->index < 0 || co->index >= ALTERA_JTAGUART_MAXPORTS) + return -EINVAL; + port = &altera_jtaguart_ports[co->index].port; + if (port->membase == NULL) + return -ENODEV; + return 0; +} + +static struct uart_driver altera_jtaguart_driver; + +static struct console altera_jtaguart_console = { + .name = "ttyJ", + .write = altera_jtaguart_console_write, + .device = uart_console_device, + .setup = altera_jtaguart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &altera_jtaguart_driver, +}; + +static int __init altera_jtaguart_console_init(void) +{ + register_console(&altera_jtaguart_console); + return 0; +} + +console_initcall(altera_jtaguart_console_init); + +#define ALTERA_JTAGUART_CONSOLE (&altera_jtaguart_console) + +static void altera_jtaguart_earlycon_write(struct console *co, const char *s, + unsigned int count) +{ + struct earlycon_device *dev = co->data; + + uart_console_write(&dev->port, s, count, altera_jtaguart_console_putc); +} + +static int __init altera_jtaguart_earlycon_setup(struct earlycon_device *dev, + const char *options) +{ + if (!dev->port.membase) + return -ENODEV; + + dev->con->write = altera_jtaguart_earlycon_write; + return 0; +} + +OF_EARLYCON_DECLARE(juart, "altr,juart-1.0", altera_jtaguart_earlycon_setup); + +#else + +#define ALTERA_JTAGUART_CONSOLE NULL + +#endif /* CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE */ + +static struct uart_driver altera_jtaguart_driver = { + .owner = THIS_MODULE, + .driver_name = "altera_jtaguart", + .dev_name = "ttyJ", + .major = ALTERA_JTAGUART_MAJOR, + .minor = ALTERA_JTAGUART_MINOR, + .nr = ALTERA_JTAGUART_MAXPORTS, + .cons = ALTERA_JTAGUART_CONSOLE, +}; + +static int altera_jtaguart_probe(struct platform_device *pdev) +{ + struct altera_jtaguart_platform_uart *platp = + dev_get_platdata(&pdev->dev); + struct uart_port *port; + struct resource *res_mem; + int i = pdev->id; + int irq; + + /* -1 emphasizes that the platform must have one port, no .N suffix */ + if (i == -1) + i = 0; + + if (i >= ALTERA_JTAGUART_MAXPORTS) + return -EINVAL; + + port = &altera_jtaguart_ports[i].port; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res_mem) + port->mapbase = res_mem->start; + else if (platp) + port->mapbase = platp->mapbase; + else + return -ENODEV; + + irq = platform_get_irq_optional(pdev, 0); + if (irq < 0 && irq != -ENXIO) + return irq; + if (irq > 0) + port->irq = irq; + else if (platp) + port->irq = platp->irq; + else + return -ENODEV; + + port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE); + if (!port->membase) + return -ENOMEM; + + port->line = i; + port->type = PORT_ALTERA_JTAGUART; + port->iotype = SERIAL_IO_MEM; + port->ops = &altera_jtaguart_ops; + port->flags = UPF_BOOT_AUTOCONF; + port->dev = &pdev->dev; + + uart_add_one_port(&altera_jtaguart_driver, port); + + return 0; +} + +static int altera_jtaguart_remove(struct platform_device *pdev) +{ + struct uart_port *port; + int i = pdev->id; + + if (i == -1) + i = 0; + + port = &altera_jtaguart_ports[i].port; + uart_remove_one_port(&altera_jtaguart_driver, port); + iounmap(port->membase); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id altera_jtaguart_match[] = { + { .compatible = "ALTR,juart-1.0", }, + { .compatible = "altr,juart-1.0", }, + {}, +}; +MODULE_DEVICE_TABLE(of, altera_jtaguart_match); +#endif /* CONFIG_OF */ + +static struct platform_driver altera_jtaguart_platform_driver = { + .probe = altera_jtaguart_probe, + .remove = altera_jtaguart_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = of_match_ptr(altera_jtaguart_match), + }, +}; + +static int __init altera_jtaguart_init(void) +{ + int rc; + + rc = uart_register_driver(&altera_jtaguart_driver); + if (rc) + return rc; + rc = platform_driver_register(&altera_jtaguart_platform_driver); + if (rc) + uart_unregister_driver(&altera_jtaguart_driver); + return rc; +} + +static void __exit altera_jtaguart_exit(void) +{ + platform_driver_unregister(&altera_jtaguart_platform_driver); + uart_unregister_driver(&altera_jtaguart_driver); +} + +module_init(altera_jtaguart_init); +module_exit(altera_jtaguart_exit); + +MODULE_DESCRIPTION("Altera JTAG UART driver"); +MODULE_AUTHOR("Thomas Chou "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c new file mode 100644 index 000000000..1203d1e08 --- /dev/null +++ b/drivers/tty/serial/altera_uart.c @@ -0,0 +1,674 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * altera_uart.c -- Altera UART driver + * + * Based on mcf.c -- Freescale ColdFire UART driver + * + * (C) Copyright 2003-2007, Greg Ungerer + * (C) Copyright 2008, Thomas Chou + * (C) Copyright 2010, Tobias Klauser + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "altera_uart" +#define SERIAL_ALTERA_MAJOR 204 +#define SERIAL_ALTERA_MINOR 213 + +/* + * Altera UART register definitions according to the Nios UART datasheet: + * http://www.altera.com/literature/ds/ds_nios_uart.pdf + */ + +#define ALTERA_UART_SIZE 32 + +#define ALTERA_UART_RXDATA_REG 0 +#define ALTERA_UART_TXDATA_REG 4 +#define ALTERA_UART_STATUS_REG 8 +#define ALTERA_UART_CONTROL_REG 12 +#define ALTERA_UART_DIVISOR_REG 16 +#define ALTERA_UART_EOP_REG 20 + +#define ALTERA_UART_STATUS_PE_MSK 0x0001 /* parity error */ +#define ALTERA_UART_STATUS_FE_MSK 0x0002 /* framing error */ +#define ALTERA_UART_STATUS_BRK_MSK 0x0004 /* break */ +#define ALTERA_UART_STATUS_ROE_MSK 0x0008 /* RX overrun error */ +#define ALTERA_UART_STATUS_TOE_MSK 0x0010 /* TX overrun error */ +#define ALTERA_UART_STATUS_TMT_MSK 0x0020 /* TX shift register state */ +#define ALTERA_UART_STATUS_TRDY_MSK 0x0040 /* TX ready */ +#define ALTERA_UART_STATUS_RRDY_MSK 0x0080 /* RX ready */ +#define ALTERA_UART_STATUS_E_MSK 0x0100 /* exception condition */ +#define ALTERA_UART_STATUS_DCTS_MSK 0x0400 /* CTS logic-level change */ +#define ALTERA_UART_STATUS_CTS_MSK 0x0800 /* CTS logic state */ +#define ALTERA_UART_STATUS_EOP_MSK 0x1000 /* EOP written/read */ + + /* Enable interrupt on... */ +#define ALTERA_UART_CONTROL_PE_MSK 0x0001 /* ...parity error */ +#define ALTERA_UART_CONTROL_FE_MSK 0x0002 /* ...framing error */ +#define ALTERA_UART_CONTROL_BRK_MSK 0x0004 /* ...break */ +#define ALTERA_UART_CONTROL_ROE_MSK 0x0008 /* ...RX overrun */ +#define ALTERA_UART_CONTROL_TOE_MSK 0x0010 /* ...TX overrun */ +#define ALTERA_UART_CONTROL_TMT_MSK 0x0020 /* ...TX shift register empty */ +#define ALTERA_UART_CONTROL_TRDY_MSK 0x0040 /* ...TX ready */ +#define ALTERA_UART_CONTROL_RRDY_MSK 0x0080 /* ...RX ready */ +#define ALTERA_UART_CONTROL_E_MSK 0x0100 /* ...exception*/ + +#define ALTERA_UART_CONTROL_TRBK_MSK 0x0200 /* TX break */ +#define ALTERA_UART_CONTROL_DCTS_MSK 0x0400 /* Interrupt on CTS change */ +#define ALTERA_UART_CONTROL_RTS_MSK 0x0800 /* RTS signal */ +#define ALTERA_UART_CONTROL_EOP_MSK 0x1000 /* Interrupt on EOP */ + +/* + * Local per-uart structure. + */ +struct altera_uart { + struct uart_port port; + struct timer_list tmr; + unsigned int sigs; /* Local copy of line sigs */ + unsigned short imr; /* Local IMR mirror */ +}; + +static u32 altera_uart_readl(struct uart_port *port, int reg) +{ + return readl(port->membase + (reg << port->regshift)); +} + +static void altera_uart_writel(struct uart_port *port, u32 dat, int reg) +{ + writel(dat, port->membase + (reg << port->regshift)); +} + +static unsigned int altera_uart_tx_empty(struct uart_port *port) +{ + return (altera_uart_readl(port, ALTERA_UART_STATUS_REG) & + ALTERA_UART_STATUS_TMT_MSK) ? TIOCSER_TEMT : 0; +} + +static unsigned int altera_uart_get_mctrl(struct uart_port *port) +{ + struct altera_uart *pp = container_of(port, struct altera_uart, port); + unsigned int sigs; + + sigs = (altera_uart_readl(port, ALTERA_UART_STATUS_REG) & + ALTERA_UART_STATUS_CTS_MSK) ? TIOCM_CTS : 0; + sigs |= (pp->sigs & TIOCM_RTS); + + return sigs; +} + +static void altera_uart_update_ctrl_reg(struct altera_uart *pp) +{ + unsigned short imr = pp->imr; + + /* + * If the device doesn't have an irq, ensure that the irq bits are + * masked out to keep the irq line inactive. + */ + if (!pp->port.irq) + imr &= ALTERA_UART_CONTROL_TRBK_MSK | ALTERA_UART_CONTROL_RTS_MSK; + + altera_uart_writel(&pp->port, imr, ALTERA_UART_CONTROL_REG); +} + +static void altera_uart_set_mctrl(struct uart_port *port, unsigned int sigs) +{ + struct altera_uart *pp = container_of(port, struct altera_uart, port); + + pp->sigs = sigs; + if (sigs & TIOCM_RTS) + pp->imr |= ALTERA_UART_CONTROL_RTS_MSK; + else + pp->imr &= ~ALTERA_UART_CONTROL_RTS_MSK; + altera_uart_update_ctrl_reg(pp); +} + +static void altera_uart_start_tx(struct uart_port *port) +{ + struct altera_uart *pp = container_of(port, struct altera_uart, port); + + pp->imr |= ALTERA_UART_CONTROL_TRDY_MSK; + altera_uart_update_ctrl_reg(pp); +} + +static void altera_uart_stop_tx(struct uart_port *port) +{ + struct altera_uart *pp = container_of(port, struct altera_uart, port); + + pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK; + altera_uart_update_ctrl_reg(pp); +} + +static void altera_uart_stop_rx(struct uart_port *port) +{ + struct altera_uart *pp = container_of(port, struct altera_uart, port); + + pp->imr &= ~ALTERA_UART_CONTROL_RRDY_MSK; + altera_uart_update_ctrl_reg(pp); +} + +static void altera_uart_break_ctl(struct uart_port *port, int break_state) +{ + struct altera_uart *pp = container_of(port, struct altera_uart, port); + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + if (break_state == -1) + pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK; + else + pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK; + altera_uart_update_ctrl_reg(pp); + spin_unlock_irqrestore(&port->lock, flags); +} + +static void altera_uart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + unsigned long flags; + unsigned int baud, baudclk; + + baud = uart_get_baud_rate(port, termios, old, 0, 4000000); + baudclk = port->uartclk / baud; + + if (old) + tty_termios_copy_hw(termios, old); + tty_termios_encode_baud_rate(termios, baud, baud); + + spin_lock_irqsave(&port->lock, flags); + uart_update_timeout(port, termios->c_cflag, baud); + altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG); + spin_unlock_irqrestore(&port->lock, flags); + + /* + * FIXME: port->read_status_mask and port->ignore_status_mask + * need to be initialized based on termios settings for + * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT + */ +} + +static void altera_uart_rx_chars(struct uart_port *port) +{ + unsigned char ch, flag; + unsigned short status; + + while ((status = altera_uart_readl(port, ALTERA_UART_STATUS_REG)) & + ALTERA_UART_STATUS_RRDY_MSK) { + ch = altera_uart_readl(port, ALTERA_UART_RXDATA_REG); + flag = TTY_NORMAL; + port->icount.rx++; + + if (status & ALTERA_UART_STATUS_E_MSK) { + altera_uart_writel(port, status, + ALTERA_UART_STATUS_REG); + + if (status & ALTERA_UART_STATUS_BRK_MSK) { + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } else if (status & ALTERA_UART_STATUS_PE_MSK) { + port->icount.parity++; + } else if (status & ALTERA_UART_STATUS_ROE_MSK) { + port->icount.overrun++; + } else if (status & ALTERA_UART_STATUS_FE_MSK) { + port->icount.frame++; + } + + status &= port->read_status_mask; + + if (status & ALTERA_UART_STATUS_BRK_MSK) + flag = TTY_BREAK; + else if (status & ALTERA_UART_STATUS_PE_MSK) + flag = TTY_PARITY; + else if (status & ALTERA_UART_STATUS_FE_MSK) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(port, ch)) + continue; + uart_insert_char(port, status, ALTERA_UART_STATUS_ROE_MSK, ch, + flag); + } + + tty_flip_buffer_push(&port->state->port); +} + +static void altera_uart_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + + if (port->x_char) { + /* Send special char - probably flow control */ + altera_uart_writel(port, port->x_char, ALTERA_UART_TXDATA_REG); + port->x_char = 0; + port->icount.tx++; + return; + } + + while (altera_uart_readl(port, ALTERA_UART_STATUS_REG) & + ALTERA_UART_STATUS_TRDY_MSK) { + if (xmit->head == xmit->tail) + break; + altera_uart_writel(port, xmit->buf[xmit->tail], + ALTERA_UART_TXDATA_REG); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + altera_uart_stop_tx(port); +} + +static irqreturn_t altera_uart_interrupt(int irq, void *data) +{ + struct uart_port *port = data; + struct altera_uart *pp = container_of(port, struct altera_uart, port); + unsigned long flags; + unsigned int isr; + + isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr; + + spin_lock_irqsave(&port->lock, flags); + if (isr & ALTERA_UART_STATUS_RRDY_MSK) + altera_uart_rx_chars(port); + if (isr & ALTERA_UART_STATUS_TRDY_MSK) + altera_uart_tx_chars(port); + spin_unlock_irqrestore(&port->lock, flags); + + return IRQ_RETVAL(isr); +} + +static void altera_uart_timer(struct timer_list *t) +{ + struct altera_uart *pp = from_timer(pp, t, tmr); + struct uart_port *port = &pp->port; + + altera_uart_interrupt(0, port); + mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port)); +} + +static void altera_uart_config_port(struct uart_port *port, int flags) +{ + port->type = PORT_ALTERA_UART; + + /* Clear mask, so no surprise interrupts. */ + altera_uart_writel(port, 0, ALTERA_UART_CONTROL_REG); + /* Clear status register */ + altera_uart_writel(port, 0, ALTERA_UART_STATUS_REG); +} + +static int altera_uart_startup(struct uart_port *port) +{ + struct altera_uart *pp = container_of(port, struct altera_uart, port); + unsigned long flags; + + if (!port->irq) { + timer_setup(&pp->tmr, altera_uart_timer, 0); + mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port)); + } else { + int ret; + + ret = request_irq(port->irq, altera_uart_interrupt, 0, + DRV_NAME, port); + if (ret) { + pr_err(DRV_NAME ": unable to attach Altera UART %d " + "interrupt vector=%d\n", port->line, port->irq); + return ret; + } + } + + spin_lock_irqsave(&port->lock, flags); + + /* Enable RX interrupts now */ + pp->imr = ALTERA_UART_CONTROL_RRDY_MSK; + altera_uart_update_ctrl_reg(pp); + + spin_unlock_irqrestore(&port->lock, flags); + + return 0; +} + +static void altera_uart_shutdown(struct uart_port *port) +{ + struct altera_uart *pp = container_of(port, struct altera_uart, port); + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* Disable all interrupts now */ + pp->imr = 0; + altera_uart_update_ctrl_reg(pp); + + spin_unlock_irqrestore(&port->lock, flags); + + if (port->irq) + free_irq(port->irq, port); + else + del_timer_sync(&pp->tmr); +} + +static const char *altera_uart_type(struct uart_port *port) +{ + return (port->type == PORT_ALTERA_UART) ? "Altera UART" : NULL; +} + +static int altera_uart_request_port(struct uart_port *port) +{ + /* UARTs always present */ + return 0; +} + +static void altera_uart_release_port(struct uart_port *port) +{ + /* Nothing to release... */ +} + +static int altera_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_ALTERA_UART)) + return -EINVAL; + return 0; +} + +#ifdef CONFIG_CONSOLE_POLL +static int altera_uart_poll_get_char(struct uart_port *port) +{ + while (!(altera_uart_readl(port, ALTERA_UART_STATUS_REG) & + ALTERA_UART_STATUS_RRDY_MSK)) + cpu_relax(); + + return altera_uart_readl(port, ALTERA_UART_RXDATA_REG); +} + +static void altera_uart_poll_put_char(struct uart_port *port, unsigned char c) +{ + while (!(altera_uart_readl(port, ALTERA_UART_STATUS_REG) & + ALTERA_UART_STATUS_TRDY_MSK)) + cpu_relax(); + + altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG); +} +#endif + +/* + * Define the basic serial functions we support. + */ +static const struct uart_ops altera_uart_ops = { + .tx_empty = altera_uart_tx_empty, + .get_mctrl = altera_uart_get_mctrl, + .set_mctrl = altera_uart_set_mctrl, + .start_tx = altera_uart_start_tx, + .stop_tx = altera_uart_stop_tx, + .stop_rx = altera_uart_stop_rx, + .break_ctl = altera_uart_break_ctl, + .startup = altera_uart_startup, + .shutdown = altera_uart_shutdown, + .set_termios = altera_uart_set_termios, + .type = altera_uart_type, + .request_port = altera_uart_request_port, + .release_port = altera_uart_release_port, + .config_port = altera_uart_config_port, + .verify_port = altera_uart_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = altera_uart_poll_get_char, + .poll_put_char = altera_uart_poll_put_char, +#endif +}; + +static struct altera_uart altera_uart_ports[CONFIG_SERIAL_ALTERA_UART_MAXPORTS]; + +#if defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE) + +static void altera_uart_console_putc(struct uart_port *port, unsigned char c) +{ + while (!(altera_uart_readl(port, ALTERA_UART_STATUS_REG) & + ALTERA_UART_STATUS_TRDY_MSK)) + cpu_relax(); + + altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG); +} + +static void altera_uart_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *port = &(altera_uart_ports + co->index)->port; + + uart_console_write(port, s, count, altera_uart_console_putc); +} + +static int __init altera_uart_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = CONFIG_SERIAL_ALTERA_UART_BAUDRATE; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS) + return -EINVAL; + port = &altera_uart_ports[co->index].port; + if (!port->membase) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct uart_driver altera_uart_driver; + +static struct console altera_uart_console = { + .name = "ttyAL", + .write = altera_uart_console_write, + .device = uart_console_device, + .setup = altera_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &altera_uart_driver, +}; + +static int __init altera_uart_console_init(void) +{ + register_console(&altera_uart_console); + return 0; +} + +console_initcall(altera_uart_console_init); + +#define ALTERA_UART_CONSOLE (&altera_uart_console) + +static void altera_uart_earlycon_write(struct console *co, const char *s, + unsigned int count) +{ + struct earlycon_device *dev = co->data; + + uart_console_write(&dev->port, s, count, altera_uart_console_putc); +} + +static int __init altera_uart_earlycon_setup(struct earlycon_device *dev, + const char *options) +{ + struct uart_port *port = &dev->port; + + if (!port->membase) + return -ENODEV; + + /* Enable RX interrupts now */ + altera_uart_writel(port, ALTERA_UART_CONTROL_RRDY_MSK, + ALTERA_UART_CONTROL_REG); + + if (dev->baud) { + unsigned int baudclk = port->uartclk / dev->baud; + + altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG); + } + + dev->con->write = altera_uart_earlycon_write; + return 0; +} + +OF_EARLYCON_DECLARE(uart, "altr,uart-1.0", altera_uart_earlycon_setup); + +#else + +#define ALTERA_UART_CONSOLE NULL + +#endif /* CONFIG_SERIAL_ALTERA_UART_CONSOLE */ + +/* + * Define the altera_uart UART driver structure. + */ +static struct uart_driver altera_uart_driver = { + .owner = THIS_MODULE, + .driver_name = DRV_NAME, + .dev_name = "ttyAL", + .major = SERIAL_ALTERA_MAJOR, + .minor = SERIAL_ALTERA_MINOR, + .nr = CONFIG_SERIAL_ALTERA_UART_MAXPORTS, + .cons = ALTERA_UART_CONSOLE, +}; + +static int altera_uart_probe(struct platform_device *pdev) +{ + struct altera_uart_platform_uart *platp = dev_get_platdata(&pdev->dev); + struct uart_port *port; + struct resource *res_mem; + int i = pdev->id; + int ret; + + /* if id is -1 scan for a free id and use that one */ + if (i == -1) { + for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS; i++) + if (altera_uart_ports[i].port.mapbase == 0) + break; + } + + if (i < 0 || i >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS) + return -EINVAL; + + port = &altera_uart_ports[i].port; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res_mem) + port->mapbase = res_mem->start; + else if (platp) + port->mapbase = platp->mapbase; + else + return -EINVAL; + + ret = platform_get_irq_optional(pdev, 0); + if (ret < 0 && ret != -ENXIO) + return ret; + if (ret > 0) + port->irq = ret; + else if (platp) + port->irq = platp->irq; + + /* Check platform data first so we can override device node data */ + if (platp) + port->uartclk = platp->uartclk; + else { + ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", + &port->uartclk); + if (ret) + return ret; + } + + port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE); + if (!port->membase) + return -ENOMEM; + + if (platp) + port->regshift = platp->bus_shift; + else + port->regshift = 0; + + port->line = i; + port->type = PORT_ALTERA_UART; + port->iotype = SERIAL_IO_MEM; + port->ops = &altera_uart_ops; + port->flags = UPF_BOOT_AUTOCONF; + port->dev = &pdev->dev; + + platform_set_drvdata(pdev, port); + + uart_add_one_port(&altera_uart_driver, port); + + return 0; +} + +static int altera_uart_remove(struct platform_device *pdev) +{ + struct uart_port *port = platform_get_drvdata(pdev); + + if (port) { + uart_remove_one_port(&altera_uart_driver, port); + port->mapbase = 0; + iounmap(port->membase); + } + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id altera_uart_match[] = { + { .compatible = "ALTR,uart-1.0", }, + { .compatible = "altr,uart-1.0", }, + {}, +}; +MODULE_DEVICE_TABLE(of, altera_uart_match); +#endif /* CONFIG_OF */ + +static struct platform_driver altera_uart_platform_driver = { + .probe = altera_uart_probe, + .remove = altera_uart_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = of_match_ptr(altera_uart_match), + }, +}; + +static int __init altera_uart_init(void) +{ + int rc; + + rc = uart_register_driver(&altera_uart_driver); + if (rc) + return rc; + rc = platform_driver_register(&altera_uart_platform_driver); + if (rc) + uart_unregister_driver(&altera_uart_driver); + return rc; +} + +static void __exit altera_uart_exit(void) +{ + platform_driver_unregister(&altera_uart_platform_driver); + uart_unregister_driver(&altera_uart_driver); +} + +module_init(altera_uart_init); +module_exit(altera_uart_exit); + +MODULE_DESCRIPTION("Altera UART driver"); +MODULE_AUTHOR("Thomas Chou "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); +MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_ALTERA_MAJOR); diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c new file mode 100644 index 000000000..af27fb8ec --- /dev/null +++ b/drivers/tty/serial/amba-pl010.c @@ -0,0 +1,825 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for AMBA serial ports + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * Copyright 1999 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This is a generic driver for ARM AMBA-type serial ports. They + * have a lot of 16550-like features, but are not register compatible. + * Note that although they do have CTS, DCD and DSR inputs, they do + * not have an RI input, nor do they have DTR or RTS outputs. If + * required, these have to be supplied via some other means (eg, GPIO) + * and hooked into this driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define UART_NR 8 + +#define SERIAL_AMBA_MAJOR 204 +#define SERIAL_AMBA_MINOR 16 +#define SERIAL_AMBA_NR UART_NR + +#define AMBA_ISR_PASS_LIMIT 256 + +#define UART_RX_DATA(s) (((s) & UART01x_FR_RXFE) == 0) +#define UART_TX_READY(s) (((s) & UART01x_FR_TXFF) == 0) + +#define UART_DUMMY_RSR_RX 256 +#define UART_PORT_SIZE 64 + +/* + * We wrap our port structure around the generic uart_port. + */ +struct uart_amba_port { + struct uart_port port; + struct clk *clk; + struct amba_device *dev; + struct amba_pl010_data *data; + unsigned int old_status; +}; + +static void pl010_stop_tx(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + unsigned int cr; + + cr = readb(uap->port.membase + UART010_CR); + cr &= ~UART010_CR_TIE; + writel(cr, uap->port.membase + UART010_CR); +} + +static void pl010_start_tx(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + unsigned int cr; + + cr = readb(uap->port.membase + UART010_CR); + cr |= UART010_CR_TIE; + writel(cr, uap->port.membase + UART010_CR); +} + +static void pl010_stop_rx(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + unsigned int cr; + + cr = readb(uap->port.membase + UART010_CR); + cr &= ~(UART010_CR_RIE | UART010_CR_RTIE); + writel(cr, uap->port.membase + UART010_CR); +} + +static void pl010_disable_ms(struct uart_port *port) +{ + struct uart_amba_port *uap = (struct uart_amba_port *)port; + unsigned int cr; + + cr = readb(uap->port.membase + UART010_CR); + cr &= ~UART010_CR_MSIE; + writel(cr, uap->port.membase + UART010_CR); +} + +static void pl010_enable_ms(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + unsigned int cr; + + cr = readb(uap->port.membase + UART010_CR); + cr |= UART010_CR_MSIE; + writel(cr, uap->port.membase + UART010_CR); +} + +static void pl010_rx_chars(struct uart_port *port) +{ + unsigned int status, ch, flag, rsr, max_count = 256; + + status = readb(port->membase + UART01x_FR); + while (UART_RX_DATA(status) && max_count--) { + ch = readb(port->membase + UART01x_DR); + flag = TTY_NORMAL; + + port->icount.rx++; + + /* + * Note that the error handling code is + * out of the main execution path + */ + rsr = readb(port->membase + UART01x_RSR) | UART_DUMMY_RSR_RX; + if (unlikely(rsr & UART01x_RSR_ANY)) { + writel(0, port->membase + UART01x_ECR); + + if (rsr & UART01x_RSR_BE) { + rsr &= ~(UART01x_RSR_FE | UART01x_RSR_PE); + port->icount.brk++; + if (uart_handle_break(port)) + goto ignore_char; + } else if (rsr & UART01x_RSR_PE) + port->icount.parity++; + else if (rsr & UART01x_RSR_FE) + port->icount.frame++; + if (rsr & UART01x_RSR_OE) + port->icount.overrun++; + + rsr &= port->read_status_mask; + + if (rsr & UART01x_RSR_BE) + flag = TTY_BREAK; + else if (rsr & UART01x_RSR_PE) + flag = TTY_PARITY; + else if (rsr & UART01x_RSR_FE) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(port, ch)) + goto ignore_char; + + uart_insert_char(port, rsr, UART01x_RSR_OE, ch, flag); + + ignore_char: + status = readb(port->membase + UART01x_FR); + } + tty_flip_buffer_push(&port->state->port); +} + +static void pl010_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + int count; + + if (port->x_char) { + writel(port->x_char, port->membase + UART01x_DR); + port->icount.tx++; + port->x_char = 0; + return; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + pl010_stop_tx(port); + return; + } + + count = port->fifosize >> 1; + do { + writel(xmit->buf[xmit->tail], port->membase + UART01x_DR); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + pl010_stop_tx(port); +} + +static void pl010_modem_status(struct uart_amba_port *uap) +{ + struct uart_port *port = &uap->port; + unsigned int status, delta; + + writel(0, port->membase + UART010_ICR); + + status = readb(port->membase + UART01x_FR) & UART01x_FR_MODEM_ANY; + + delta = status ^ uap->old_status; + uap->old_status = status; + + if (!delta) + return; + + if (delta & UART01x_FR_DCD) + uart_handle_dcd_change(port, status & UART01x_FR_DCD); + + if (delta & UART01x_FR_DSR) + port->icount.dsr++; + + if (delta & UART01x_FR_CTS) + uart_handle_cts_change(port, status & UART01x_FR_CTS); + + wake_up_interruptible(&port->state->port.delta_msr_wait); +} + +static irqreturn_t pl010_int(int irq, void *dev_id) +{ + struct uart_amba_port *uap = dev_id; + struct uart_port *port = &uap->port; + unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; + int handled = 0; + + spin_lock(&port->lock); + + status = readb(port->membase + UART010_IIR); + if (status) { + do { + if (status & (UART010_IIR_RTIS | UART010_IIR_RIS)) + pl010_rx_chars(port); + if (status & UART010_IIR_MIS) + pl010_modem_status(uap); + if (status & UART010_IIR_TIS) + pl010_tx_chars(port); + + if (pass_counter-- == 0) + break; + + status = readb(port->membase + UART010_IIR); + } while (status & (UART010_IIR_RTIS | UART010_IIR_RIS | + UART010_IIR_TIS)); + handled = 1; + } + + spin_unlock(&port->lock); + + return IRQ_RETVAL(handled); +} + +static unsigned int pl010_tx_empty(struct uart_port *port) +{ + unsigned int status = readb(port->membase + UART01x_FR); + + return status & UART01x_FR_BUSY ? 0 : TIOCSER_TEMT; +} + +static unsigned int pl010_get_mctrl(struct uart_port *port) +{ + unsigned int result = 0; + unsigned int status; + + status = readb(port->membase + UART01x_FR); + if (status & UART01x_FR_DCD) + result |= TIOCM_CAR; + if (status & UART01x_FR_DSR) + result |= TIOCM_DSR; + if (status & UART01x_FR_CTS) + result |= TIOCM_CTS; + + return result; +} + +static void pl010_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + if (uap->data) + uap->data->set_mctrl(uap->dev, port->membase, mctrl); +} + +static void pl010_break_ctl(struct uart_port *port, int break_state) +{ + unsigned long flags; + unsigned int lcr_h; + + spin_lock_irqsave(&port->lock, flags); + lcr_h = readb(port->membase + UART010_LCRH); + if (break_state == -1) + lcr_h |= UART01x_LCRH_BRK; + else + lcr_h &= ~UART01x_LCRH_BRK; + writel(lcr_h, port->membase + UART010_LCRH); + spin_unlock_irqrestore(&port->lock, flags); +} + +static int pl010_startup(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + int retval; + + /* + * Try to enable the clock producer. + */ + retval = clk_prepare_enable(uap->clk); + if (retval) + goto out; + + port->uartclk = clk_get_rate(uap->clk); + + /* + * Allocate the IRQ + */ + retval = request_irq(port->irq, pl010_int, 0, "uart-pl010", uap); + if (retval) + goto clk_dis; + + /* + * initialise the old status of the modem signals + */ + uap->old_status = readb(port->membase + UART01x_FR) & UART01x_FR_MODEM_ANY; + + /* + * Finally, enable interrupts + */ + writel(UART01x_CR_UARTEN | UART010_CR_RIE | UART010_CR_RTIE, + port->membase + UART010_CR); + + return 0; + + clk_dis: + clk_disable_unprepare(uap->clk); + out: + return retval; +} + +static void pl010_shutdown(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + /* + * Free the interrupt + */ + free_irq(port->irq, uap); + + /* + * disable all interrupts, disable the port + */ + writel(0, port->membase + UART010_CR); + + /* disable break condition and fifos */ + writel(readb(port->membase + UART010_LCRH) & + ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN), + port->membase + UART010_LCRH); + + /* + * Shut down the clock producer + */ + clk_disable_unprepare(uap->clk); +} + +static void +pl010_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int lcr_h, old_cr; + unsigned long flags; + unsigned int baud, quot; + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); + quot = uart_get_divisor(port, baud); + + switch (termios->c_cflag & CSIZE) { + case CS5: + lcr_h = UART01x_LCRH_WLEN_5; + break; + case CS6: + lcr_h = UART01x_LCRH_WLEN_6; + break; + case CS7: + lcr_h = UART01x_LCRH_WLEN_7; + break; + default: // CS8 + lcr_h = UART01x_LCRH_WLEN_8; + break; + } + if (termios->c_cflag & CSTOPB) + lcr_h |= UART01x_LCRH_STP2; + if (termios->c_cflag & PARENB) { + lcr_h |= UART01x_LCRH_PEN; + if (!(termios->c_cflag & PARODD)) + lcr_h |= UART01x_LCRH_EPS; + } + if (port->fifosize > 1) + lcr_h |= UART01x_LCRH_FEN; + + spin_lock_irqsave(&port->lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = UART01x_RSR_OE; + if (termios->c_iflag & INPCK) + port->read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= UART01x_RSR_BE; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= UART01x_RSR_BE; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= UART01x_RSR_OE; + } + + /* + * Ignore all characters if CREAD is not set. + */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= UART_DUMMY_RSR_RX; + + old_cr = readb(port->membase + UART010_CR) & ~UART010_CR_MSIE; + + if (UART_ENABLE_MS(port, termios->c_cflag)) + old_cr |= UART010_CR_MSIE; + + /* Set baud rate */ + quot -= 1; + writel((quot & 0xf00) >> 8, port->membase + UART010_LCRM); + writel(quot & 0xff, port->membase + UART010_LCRL); + + /* + * ----------v----------v----------v----------v----- + * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L + * ----------^----------^----------^----------^----- + */ + writel(lcr_h, port->membase + UART010_LCRH); + writel(old_cr, port->membase + UART010_CR); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void pl010_set_ldisc(struct uart_port *port, struct ktermios *termios) +{ + if (termios->c_line == N_PPS) { + port->flags |= UPF_HARDPPS_CD; + spin_lock_irq(&port->lock); + pl010_enable_ms(port); + spin_unlock_irq(&port->lock); + } else { + port->flags &= ~UPF_HARDPPS_CD; + if (!UART_ENABLE_MS(port, termios->c_cflag)) { + spin_lock_irq(&port->lock); + pl010_disable_ms(port); + spin_unlock_irq(&port->lock); + } + } +} + +static const char *pl010_type(struct uart_port *port) +{ + return port->type == PORT_AMBA ? "AMBA" : NULL; +} + +/* + * Release the memory region(s) being used by 'port' + */ +static void pl010_release_port(struct uart_port *port) +{ + release_mem_region(port->mapbase, UART_PORT_SIZE); +} + +/* + * Request the memory region(s) being used by 'port' + */ +static int pl010_request_port(struct uart_port *port) +{ + return request_mem_region(port->mapbase, UART_PORT_SIZE, "uart-pl010") + != NULL ? 0 : -EBUSY; +} + +/* + * Configure/autoconfigure the port. + */ +static void pl010_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_AMBA; + pl010_request_port(port); + } +} + +/* + * verify the new serial_struct (for TIOCSSERIAL). + */ +static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + int ret = 0; + if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= nr_irqs) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + return ret; +} + +static const struct uart_ops amba_pl010_pops = { + .tx_empty = pl010_tx_empty, + .set_mctrl = pl010_set_mctrl, + .get_mctrl = pl010_get_mctrl, + .stop_tx = pl010_stop_tx, + .start_tx = pl010_start_tx, + .stop_rx = pl010_stop_rx, + .enable_ms = pl010_enable_ms, + .break_ctl = pl010_break_ctl, + .startup = pl010_startup, + .shutdown = pl010_shutdown, + .set_termios = pl010_set_termios, + .set_ldisc = pl010_set_ldisc, + .type = pl010_type, + .release_port = pl010_release_port, + .request_port = pl010_request_port, + .config_port = pl010_config_port, + .verify_port = pl010_verify_port, +}; + +static struct uart_amba_port *amba_ports[UART_NR]; + +#ifdef CONFIG_SERIAL_AMBA_PL010_CONSOLE + +static void pl010_console_putchar(struct uart_port *port, unsigned char ch) +{ + unsigned int status; + + do { + status = readb(port->membase + UART01x_FR); + barrier(); + } while (!UART_TX_READY(status)); + writel(ch, port->membase + UART01x_DR); +} + +static void +pl010_console_write(struct console *co, const char *s, unsigned int count) +{ + struct uart_amba_port *uap = amba_ports[co->index]; + struct uart_port *port = &uap->port; + unsigned int status, old_cr; + + clk_enable(uap->clk); + + /* + * First save the CR then disable the interrupts + */ + old_cr = readb(port->membase + UART010_CR); + writel(UART01x_CR_UARTEN, port->membase + UART010_CR); + + uart_console_write(port, s, count, pl010_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore the TCR + */ + do { + status = readb(port->membase + UART01x_FR); + barrier(); + } while (status & UART01x_FR_BUSY); + writel(old_cr, port->membase + UART010_CR); + + clk_disable(uap->clk); +} + +static void __init +pl010_console_get_options(struct uart_amba_port *uap, int *baud, + int *parity, int *bits) +{ + if (readb(uap->port.membase + UART010_CR) & UART01x_CR_UARTEN) { + unsigned int lcr_h, quot; + lcr_h = readb(uap->port.membase + UART010_LCRH); + + *parity = 'n'; + if (lcr_h & UART01x_LCRH_PEN) { + if (lcr_h & UART01x_LCRH_EPS) + *parity = 'e'; + else + *parity = 'o'; + } + + if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) + *bits = 7; + else + *bits = 8; + + quot = readb(uap->port.membase + UART010_LCRL) | + readb(uap->port.membase + UART010_LCRM) << 8; + *baud = uap->port.uartclk / (16 * (quot + 1)); + } +} + +static int __init pl010_console_setup(struct console *co, char *options) +{ + struct uart_amba_port *uap; + int baud = 38400; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index >= UART_NR) + co->index = 0; + uap = amba_ports[co->index]; + if (!uap) + return -ENODEV; + + ret = clk_prepare(uap->clk); + if (ret) + return ret; + + uap->port.uartclk = clk_get_rate(uap->clk); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + pl010_console_get_options(uap, &baud, &parity, &bits); + + return uart_set_options(&uap->port, co, baud, parity, bits, flow); +} + +static struct uart_driver amba_reg; +static struct console amba_console = { + .name = "ttyAM", + .write = pl010_console_write, + .device = uart_console_device, + .setup = pl010_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &amba_reg, +}; + +#define AMBA_CONSOLE &amba_console +#else +#define AMBA_CONSOLE NULL +#endif + +static DEFINE_MUTEX(amba_reg_lock); +static struct uart_driver amba_reg = { + .owner = THIS_MODULE, + .driver_name = "ttyAM", + .dev_name = "ttyAM", + .major = SERIAL_AMBA_MAJOR, + .minor = SERIAL_AMBA_MINOR, + .nr = UART_NR, + .cons = AMBA_CONSOLE, +}; + +static int pl010_probe(struct amba_device *dev, const struct amba_id *id) +{ + struct uart_amba_port *uap; + void __iomem *base; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) + if (amba_ports[i] == NULL) + break; + + if (i == ARRAY_SIZE(amba_ports)) + return -EBUSY; + + uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port), + GFP_KERNEL); + if (!uap) + return -ENOMEM; + + base = devm_ioremap(&dev->dev, dev->res.start, + resource_size(&dev->res)); + if (!base) + return -ENOMEM; + + uap->clk = devm_clk_get(&dev->dev, NULL); + if (IS_ERR(uap->clk)) + return PTR_ERR(uap->clk); + + uap->port.dev = &dev->dev; + uap->port.mapbase = dev->res.start; + uap->port.membase = base; + uap->port.iotype = UPIO_MEM; + uap->port.irq = dev->irq[0]; + uap->port.fifosize = 16; + uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL010_CONSOLE); + uap->port.ops = &amba_pl010_pops; + uap->port.flags = UPF_BOOT_AUTOCONF; + uap->port.line = i; + uap->dev = dev; + uap->data = dev_get_platdata(&dev->dev); + + amba_ports[i] = uap; + + amba_set_drvdata(dev, uap); + + mutex_lock(&amba_reg_lock); + if (!amba_reg.state) { + ret = uart_register_driver(&amba_reg); + if (ret < 0) { + mutex_unlock(&amba_reg_lock); + dev_err(uap->port.dev, + "Failed to register AMBA-PL010 driver\n"); + return ret; + } + } + mutex_unlock(&amba_reg_lock); + + ret = uart_add_one_port(&amba_reg, &uap->port); + if (ret) + amba_ports[i] = NULL; + + return ret; +} + +static void pl010_remove(struct amba_device *dev) +{ + struct uart_amba_port *uap = amba_get_drvdata(dev); + int i; + bool busy = false; + + uart_remove_one_port(&amba_reg, &uap->port); + + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) + if (amba_ports[i] == uap) + amba_ports[i] = NULL; + else if (amba_ports[i]) + busy = true; + + if (!busy) + uart_unregister_driver(&amba_reg); +} + +#ifdef CONFIG_PM_SLEEP +static int pl010_suspend(struct device *dev) +{ + struct uart_amba_port *uap = dev_get_drvdata(dev); + + if (uap) + uart_suspend_port(&amba_reg, &uap->port); + + return 0; +} + +static int pl010_resume(struct device *dev) +{ + struct uart_amba_port *uap = dev_get_drvdata(dev); + + if (uap) + uart_resume_port(&amba_reg, &uap->port); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(pl010_dev_pm_ops, pl010_suspend, pl010_resume); + +static const struct amba_id pl010_ids[] = { + { + .id = 0x00041010, + .mask = 0x000fffff, + }, + { 0, 0 }, +}; + +MODULE_DEVICE_TABLE(amba, pl010_ids); + +static struct amba_driver pl010_driver = { + .drv = { + .name = "uart-pl010", + .pm = &pl010_dev_pm_ops, + }, + .id_table = pl010_ids, + .probe = pl010_probe, + .remove = pl010_remove, +}; + +static int __init pl010_init(void) +{ + printk(KERN_INFO "Serial: AMBA driver\n"); + + return amba_driver_register(&pl010_driver); +} + +static void __exit pl010_exit(void) +{ + amba_driver_unregister(&pl010_driver); +} + +module_init(pl010_init); +module_exit(pl010_exit); + +MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); +MODULE_DESCRIPTION("ARM AMBA serial port driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c new file mode 100644 index 000000000..c74eaf255 --- /dev/null +++ b/drivers/tty/serial/amba-pl011.c @@ -0,0 +1,3016 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for AMBA serial ports + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * Copyright 1999 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * Copyright (C) 2010 ST-Ericsson SA + * + * This is a generic driver for ARM AMBA-type serial ports. They + * have a lot of 16550-like features, but are not register compatible. + * Note that although they do have CTS, DCD and DSR inputs, they do + * not have an RI input, nor do they have DTR or RTS outputs. If + * required, these have to be supplied via some other means (eg, GPIO) + * and hooked into this driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define UART_NR 14 + +#define SERIAL_AMBA_MAJOR 204 +#define SERIAL_AMBA_MINOR 64 +#define SERIAL_AMBA_NR UART_NR + +#define AMBA_ISR_PASS_LIMIT 256 + +#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) +#define UART_DUMMY_DR_RX (1 << 16) + +enum { + REG_DR, + REG_ST_DMAWM, + REG_ST_TIMEOUT, + REG_FR, + REG_LCRH_RX, + REG_LCRH_TX, + REG_IBRD, + REG_FBRD, + REG_CR, + REG_IFLS, + REG_IMSC, + REG_RIS, + REG_MIS, + REG_ICR, + REG_DMACR, + REG_ST_XFCR, + REG_ST_XON1, + REG_ST_XON2, + REG_ST_XOFF1, + REG_ST_XOFF2, + REG_ST_ITCR, + REG_ST_ITIP, + REG_ST_ABCR, + REG_ST_ABIMSC, + + /* The size of the array - must be last */ + REG_ARRAY_SIZE, +}; + +static u16 pl011_std_offsets[REG_ARRAY_SIZE] = { + [REG_DR] = UART01x_DR, + [REG_FR] = UART01x_FR, + [REG_LCRH_RX] = UART011_LCRH, + [REG_LCRH_TX] = UART011_LCRH, + [REG_IBRD] = UART011_IBRD, + [REG_FBRD] = UART011_FBRD, + [REG_CR] = UART011_CR, + [REG_IFLS] = UART011_IFLS, + [REG_IMSC] = UART011_IMSC, + [REG_RIS] = UART011_RIS, + [REG_MIS] = UART011_MIS, + [REG_ICR] = UART011_ICR, + [REG_DMACR] = UART011_DMACR, +}; + +/* There is by now at least one vendor with differing details, so handle it */ +struct vendor_data { + const u16 *reg_offset; + unsigned int ifls; + unsigned int fr_busy; + unsigned int fr_dsr; + unsigned int fr_cts; + unsigned int fr_ri; + unsigned int inv_fr; + bool access_32b; + bool oversampling; + bool dma_threshold; + bool cts_event_workaround; + bool always_enabled; + bool fixed_options; + + unsigned int (*get_fifosize)(struct amba_device *dev); +}; + +static unsigned int get_fifosize_arm(struct amba_device *dev) +{ + return amba_rev(dev) < 3 ? 16 : 32; +} + +static struct vendor_data vendor_arm = { + .reg_offset = pl011_std_offsets, + .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, + .fr_busy = UART01x_FR_BUSY, + .fr_dsr = UART01x_FR_DSR, + .fr_cts = UART01x_FR_CTS, + .fr_ri = UART011_FR_RI, + .oversampling = false, + .dma_threshold = false, + .cts_event_workaround = false, + .always_enabled = false, + .fixed_options = false, + .get_fifosize = get_fifosize_arm, +}; + +static const struct vendor_data vendor_sbsa = { + .reg_offset = pl011_std_offsets, + .fr_busy = UART01x_FR_BUSY, + .fr_dsr = UART01x_FR_DSR, + .fr_cts = UART01x_FR_CTS, + .fr_ri = UART011_FR_RI, + .access_32b = true, + .oversampling = false, + .dma_threshold = false, + .cts_event_workaround = false, + .always_enabled = true, + .fixed_options = true, +}; + +#ifdef CONFIG_ACPI_SPCR_TABLE +static const struct vendor_data vendor_qdt_qdf2400_e44 = { + .reg_offset = pl011_std_offsets, + .fr_busy = UART011_FR_TXFE, + .fr_dsr = UART01x_FR_DSR, + .fr_cts = UART01x_FR_CTS, + .fr_ri = UART011_FR_RI, + .inv_fr = UART011_FR_TXFE, + .access_32b = true, + .oversampling = false, + .dma_threshold = false, + .cts_event_workaround = false, + .always_enabled = true, + .fixed_options = true, +}; +#endif + +static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { + [REG_DR] = UART01x_DR, + [REG_ST_DMAWM] = ST_UART011_DMAWM, + [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT, + [REG_FR] = UART01x_FR, + [REG_LCRH_RX] = ST_UART011_LCRH_RX, + [REG_LCRH_TX] = ST_UART011_LCRH_TX, + [REG_IBRD] = UART011_IBRD, + [REG_FBRD] = UART011_FBRD, + [REG_CR] = UART011_CR, + [REG_IFLS] = UART011_IFLS, + [REG_IMSC] = UART011_IMSC, + [REG_RIS] = UART011_RIS, + [REG_MIS] = UART011_MIS, + [REG_ICR] = UART011_ICR, + [REG_DMACR] = UART011_DMACR, + [REG_ST_XFCR] = ST_UART011_XFCR, + [REG_ST_XON1] = ST_UART011_XON1, + [REG_ST_XON2] = ST_UART011_XON2, + [REG_ST_XOFF1] = ST_UART011_XOFF1, + [REG_ST_XOFF2] = ST_UART011_XOFF2, + [REG_ST_ITCR] = ST_UART011_ITCR, + [REG_ST_ITIP] = ST_UART011_ITIP, + [REG_ST_ABCR] = ST_UART011_ABCR, + [REG_ST_ABIMSC] = ST_UART011_ABIMSC, +}; + +static unsigned int get_fifosize_st(struct amba_device *dev) +{ + return 64; +} + +static struct vendor_data vendor_st = { + .reg_offset = pl011_st_offsets, + .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF, + .fr_busy = UART01x_FR_BUSY, + .fr_dsr = UART01x_FR_DSR, + .fr_cts = UART01x_FR_CTS, + .fr_ri = UART011_FR_RI, + .oversampling = true, + .dma_threshold = true, + .cts_event_workaround = true, + .always_enabled = false, + .fixed_options = false, + .get_fifosize = get_fifosize_st, +}; + +/* Deals with DMA transactions */ + +struct pl011_dmabuf { + dma_addr_t dma; + size_t len; + char *buf; +}; + +struct pl011_dmarx_data { + struct dma_chan *chan; + struct completion complete; + bool use_buf_b; + struct pl011_dmabuf dbuf_a; + struct pl011_dmabuf dbuf_b; + dma_cookie_t cookie; + bool running; + struct timer_list timer; + unsigned int last_residue; + unsigned long last_jiffies; + bool auto_poll_rate; + unsigned int poll_rate; + unsigned int poll_timeout; +}; + +struct pl011_dmatx_data { + struct dma_chan *chan; + dma_addr_t dma; + size_t len; + char *buf; + bool queued; +}; + +/* + * We wrap our port structure around the generic uart_port. + */ +struct uart_amba_port { + struct uart_port port; + const u16 *reg_offset; + struct clk *clk; + const struct vendor_data *vendor; + unsigned int dmacr; /* dma control reg */ + unsigned int im; /* interrupt mask */ + unsigned int old_status; + unsigned int fifosize; /* vendor-specific */ + unsigned int fixed_baud; /* vendor-set fixed baud rate */ + char type[12]; + bool rs485_tx_started; + unsigned int rs485_tx_drain_interval; /* usecs */ +#ifdef CONFIG_DMA_ENGINE + /* DMA stuff */ + bool using_tx_dma; + bool using_rx_dma; + struct pl011_dmarx_data dmarx; + struct pl011_dmatx_data dmatx; + bool dma_probed; +#endif +}; + +static unsigned int pl011_tx_empty(struct uart_port *port); + +static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap, + unsigned int reg) +{ + return uap->reg_offset[reg]; +} + +static unsigned int pl011_read(const struct uart_amba_port *uap, + unsigned int reg) +{ + void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); + + return (uap->port.iotype == UPIO_MEM32) ? + readl_relaxed(addr) : readw_relaxed(addr); +} + +static void pl011_write(unsigned int val, const struct uart_amba_port *uap, + unsigned int reg) +{ + void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); + + if (uap->port.iotype == UPIO_MEM32) + writel_relaxed(val, addr); + else + writew_relaxed(val, addr); +} + +/* + * Reads up to 256 characters from the FIFO or until it's empty and + * inserts them into the TTY layer. Returns the number of characters + * read from the FIFO. + */ +static int pl011_fifo_to_tty(struct uart_amba_port *uap) +{ + unsigned int ch, flag, fifotaken; + int sysrq; + u16 status; + + for (fifotaken = 0; fifotaken != 256; fifotaken++) { + status = pl011_read(uap, REG_FR); + if (status & UART01x_FR_RXFE) + break; + + /* Take chars from the FIFO and update status */ + ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX; + flag = TTY_NORMAL; + uap->port.icount.rx++; + + if (unlikely(ch & UART_DR_ERROR)) { + if (ch & UART011_DR_BE) { + ch &= ~(UART011_DR_FE | UART011_DR_PE); + uap->port.icount.brk++; + if (uart_handle_break(&uap->port)) + continue; + } else if (ch & UART011_DR_PE) + uap->port.icount.parity++; + else if (ch & UART011_DR_FE) + uap->port.icount.frame++; + if (ch & UART011_DR_OE) + uap->port.icount.overrun++; + + ch &= uap->port.read_status_mask; + + if (ch & UART011_DR_BE) + flag = TTY_BREAK; + else if (ch & UART011_DR_PE) + flag = TTY_PARITY; + else if (ch & UART011_DR_FE) + flag = TTY_FRAME; + } + + spin_unlock(&uap->port.lock); + sysrq = uart_handle_sysrq_char(&uap->port, ch & 255); + spin_lock(&uap->port.lock); + + if (!sysrq) + uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); + } + + return fifotaken; +} + + +/* + * All the DMA operation mode stuff goes inside this ifdef. + * This assumes that you have a generic DMA device interface, + * no custom DMA interfaces are supported. + */ +#ifdef CONFIG_DMA_ENGINE + +#define PL011_DMA_BUFFER_SIZE PAGE_SIZE + +static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db, + enum dma_data_direction dir) +{ + db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE, + &db->dma, GFP_KERNEL); + if (!db->buf) + return -ENOMEM; + db->len = PL011_DMA_BUFFER_SIZE; + + return 0; +} + +static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db, + enum dma_data_direction dir) +{ + if (db->buf) { + dma_free_coherent(chan->device->dev, + PL011_DMA_BUFFER_SIZE, db->buf, db->dma); + } +} + +static void pl011_dma_probe(struct uart_amba_port *uap) +{ + /* DMA is the sole user of the platform data right now */ + struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev); + struct device *dev = uap->port.dev; + struct dma_slave_config tx_conf = { + .dst_addr = uap->port.mapbase + + pl011_reg_to_offset(uap, REG_DR), + .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .direction = DMA_MEM_TO_DEV, + .dst_maxburst = uap->fifosize >> 1, + .device_fc = false, + }; + struct dma_chan *chan; + dma_cap_mask_t mask; + + uap->dma_probed = true; + chan = dma_request_chan(dev, "tx"); + if (IS_ERR(chan)) { + if (PTR_ERR(chan) == -EPROBE_DEFER) { + uap->dma_probed = false; + return; + } + + /* We need platform data */ + if (!plat || !plat->dma_filter) { + dev_info(uap->port.dev, "no DMA platform data\n"); + return; + } + + /* Try to acquire a generic DMA engine slave TX channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + chan = dma_request_channel(mask, plat->dma_filter, + plat->dma_tx_param); + if (!chan) { + dev_err(uap->port.dev, "no TX DMA channel!\n"); + return; + } + } + + dmaengine_slave_config(chan, &tx_conf); + uap->dmatx.chan = chan; + + dev_info(uap->port.dev, "DMA channel TX %s\n", + dma_chan_name(uap->dmatx.chan)); + + /* Optionally make use of an RX channel as well */ + chan = dma_request_slave_channel(dev, "rx"); + + if (!chan && plat && plat->dma_rx_param) { + chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); + + if (!chan) { + dev_err(uap->port.dev, "no RX DMA channel!\n"); + return; + } + } + + if (chan) { + struct dma_slave_config rx_conf = { + .src_addr = uap->port.mapbase + + pl011_reg_to_offset(uap, REG_DR), + .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .direction = DMA_DEV_TO_MEM, + .src_maxburst = uap->fifosize >> 2, + .device_fc = false, + }; + struct dma_slave_caps caps; + + /* + * Some DMA controllers provide information on their capabilities. + * If the controller does, check for suitable residue processing + * otherwise assime all is well. + */ + if (0 == dma_get_slave_caps(chan, &caps)) { + if (caps.residue_granularity == + DMA_RESIDUE_GRANULARITY_DESCRIPTOR) { + dma_release_channel(chan); + dev_info(uap->port.dev, + "RX DMA disabled - no residue processing\n"); + return; + } + } + dmaengine_slave_config(chan, &rx_conf); + uap->dmarx.chan = chan; + + uap->dmarx.auto_poll_rate = false; + if (plat && plat->dma_rx_poll_enable) { + /* Set poll rate if specified. */ + if (plat->dma_rx_poll_rate) { + uap->dmarx.auto_poll_rate = false; + uap->dmarx.poll_rate = plat->dma_rx_poll_rate; + } else { + /* + * 100 ms defaults to poll rate if not + * specified. This will be adjusted with + * the baud rate at set_termios. + */ + uap->dmarx.auto_poll_rate = true; + uap->dmarx.poll_rate = 100; + } + /* 3 secs defaults poll_timeout if not specified. */ + if (plat->dma_rx_poll_timeout) + uap->dmarx.poll_timeout = + plat->dma_rx_poll_timeout; + else + uap->dmarx.poll_timeout = 3000; + } else if (!plat && dev->of_node) { + uap->dmarx.auto_poll_rate = of_property_read_bool( + dev->of_node, "auto-poll"); + if (uap->dmarx.auto_poll_rate) { + u32 x; + + if (0 == of_property_read_u32(dev->of_node, + "poll-rate-ms", &x)) + uap->dmarx.poll_rate = x; + else + uap->dmarx.poll_rate = 100; + if (0 == of_property_read_u32(dev->of_node, + "poll-timeout-ms", &x)) + uap->dmarx.poll_timeout = x; + else + uap->dmarx.poll_timeout = 3000; + } + } + dev_info(uap->port.dev, "DMA channel RX %s\n", + dma_chan_name(uap->dmarx.chan)); + } +} + +static void pl011_dma_remove(struct uart_amba_port *uap) +{ + if (uap->dmatx.chan) + dma_release_channel(uap->dmatx.chan); + if (uap->dmarx.chan) + dma_release_channel(uap->dmarx.chan); +} + +/* Forward declare these for the refill routine */ +static int pl011_dma_tx_refill(struct uart_amba_port *uap); +static void pl011_start_tx_pio(struct uart_amba_port *uap); + +/* + * The current DMA TX buffer has been sent. + * Try to queue up another DMA buffer. + */ +static void pl011_dma_tx_callback(void *data) +{ + struct uart_amba_port *uap = data; + struct pl011_dmatx_data *dmatx = &uap->dmatx; + unsigned long flags; + u16 dmacr; + + spin_lock_irqsave(&uap->port.lock, flags); + if (uap->dmatx.queued) + dma_unmap_single(dmatx->chan->device->dev, dmatx->dma, + dmatx->len, DMA_TO_DEVICE); + + dmacr = uap->dmacr; + uap->dmacr = dmacr & ~UART011_TXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); + + /* + * If TX DMA was disabled, it means that we've stopped the DMA for + * some reason (eg, XOFF received, or we want to send an X-char.) + * + * Note: we need to be careful here of a potential race between DMA + * and the rest of the driver - if the driver disables TX DMA while + * a TX buffer completing, we must update the tx queued status to + * get further refills (hence we check dmacr). + */ + if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || + uart_circ_empty(&uap->port.state->xmit)) { + uap->dmatx.queued = false; + spin_unlock_irqrestore(&uap->port.lock, flags); + return; + } + + if (pl011_dma_tx_refill(uap) <= 0) + /* + * We didn't queue a DMA buffer for some reason, but we + * have data pending to be sent. Re-enable the TX IRQ. + */ + pl011_start_tx_pio(uap); + + spin_unlock_irqrestore(&uap->port.lock, flags); +} + +/* + * Try to refill the TX DMA buffer. + * Locking: called with port lock held and IRQs disabled. + * Returns: + * 1 if we queued up a TX DMA buffer. + * 0 if we didn't want to handle this by DMA + * <0 on error + */ +static int pl011_dma_tx_refill(struct uart_amba_port *uap) +{ + struct pl011_dmatx_data *dmatx = &uap->dmatx; + struct dma_chan *chan = dmatx->chan; + struct dma_device *dma_dev = chan->device; + struct dma_async_tx_descriptor *desc; + struct circ_buf *xmit = &uap->port.state->xmit; + unsigned int count; + + /* + * Try to avoid the overhead involved in using DMA if the + * transaction fits in the first half of the FIFO, by using + * the standard interrupt handling. This ensures that we + * issue a uart_write_wakeup() at the appropriate time. + */ + count = uart_circ_chars_pending(xmit); + if (count < (uap->fifosize >> 1)) { + uap->dmatx.queued = false; + return 0; + } + + /* + * Bodge: don't send the last character by DMA, as this + * will prevent XON from notifying us to restart DMA. + */ + count -= 1; + + /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ + if (count > PL011_DMA_BUFFER_SIZE) + count = PL011_DMA_BUFFER_SIZE; + + if (xmit->tail < xmit->head) + memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); + else { + size_t first = UART_XMIT_SIZE - xmit->tail; + size_t second; + + if (first > count) + first = count; + second = count - first; + + memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); + if (second) + memcpy(&dmatx->buf[first], &xmit->buf[0], second); + } + + dmatx->len = count; + dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count, + DMA_TO_DEVICE); + if (dmatx->dma == DMA_MAPPING_ERROR) { + uap->dmatx.queued = false; + dev_dbg(uap->port.dev, "unable to map TX DMA\n"); + return -EBUSY; + } + + desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE); + uap->dmatx.queued = false; + /* + * If DMA cannot be used right now, we complete this + * transaction via IRQ and let the TTY layer retry. + */ + dev_dbg(uap->port.dev, "TX DMA busy\n"); + return -EBUSY; + } + + /* Some data to go along to the callback */ + desc->callback = pl011_dma_tx_callback; + desc->callback_param = uap; + + /* All errors should happen at prepare time */ + dmaengine_submit(desc); + + /* Fire the DMA transaction */ + dma_dev->device_issue_pending(chan); + + uap->dmacr |= UART011_TXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); + uap->dmatx.queued = true; + + /* + * Now we know that DMA will fire, so advance the ring buffer + * with the stuff we just dispatched. + */ + xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uap->port.icount.tx += count; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&uap->port); + + return 1; +} + +/* + * We received a transmit interrupt without a pending X-char but with + * pending characters. + * Locking: called with port lock held and IRQs disabled. + * Returns: + * false if we want to use PIO to transmit + * true if we queued a DMA buffer + */ +static bool pl011_dma_tx_irq(struct uart_amba_port *uap) +{ + if (!uap->using_tx_dma) + return false; + + /* + * If we already have a TX buffer queued, but received a + * TX interrupt, it will be because we've just sent an X-char. + * Ensure the TX DMA is enabled and the TX IRQ is disabled. + */ + if (uap->dmatx.queued) { + uap->dmacr |= UART011_TXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); + uap->im &= ~UART011_TXIM; + pl011_write(uap->im, uap, REG_IMSC); + return true; + } + + /* + * We don't have a TX buffer queued, so try to queue one. + * If we successfully queued a buffer, mask the TX IRQ. + */ + if (pl011_dma_tx_refill(uap) > 0) { + uap->im &= ~UART011_TXIM; + pl011_write(uap->im, uap, REG_IMSC); + return true; + } + return false; +} + +/* + * Stop the DMA transmit (eg, due to received XOFF). + * Locking: called with port lock held and IRQs disabled. + */ +static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) +{ + if (uap->dmatx.queued) { + uap->dmacr &= ~UART011_TXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); + } +} + +/* + * Try to start a DMA transmit, or in the case of an XON/OFF + * character queued for send, try to get that character out ASAP. + * Locking: called with port lock held and IRQs disabled. + * Returns: + * false if we want the TX IRQ to be enabled + * true if we have a buffer queued + */ +static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) +{ + u16 dmacr; + + if (!uap->using_tx_dma) + return false; + + if (!uap->port.x_char) { + /* no X-char, try to push chars out in DMA mode */ + bool ret = true; + + if (!uap->dmatx.queued) { + if (pl011_dma_tx_refill(uap) > 0) { + uap->im &= ~UART011_TXIM; + pl011_write(uap->im, uap, REG_IMSC); + } else + ret = false; + } else if (!(uap->dmacr & UART011_TXDMAE)) { + uap->dmacr |= UART011_TXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); + } + return ret; + } + + /* + * We have an X-char to send. Disable DMA to prevent it loading + * the TX fifo, and then see if we can stuff it into the FIFO. + */ + dmacr = uap->dmacr; + uap->dmacr &= ~UART011_TXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); + + if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) { + /* + * No space in the FIFO, so enable the transmit interrupt + * so we know when there is space. Note that once we've + * loaded the character, we should just re-enable DMA. + */ + return false; + } + + pl011_write(uap->port.x_char, uap, REG_DR); + uap->port.icount.tx++; + uap->port.x_char = 0; + + /* Success - restore the DMA state */ + uap->dmacr = dmacr; + pl011_write(dmacr, uap, REG_DMACR); + + return true; +} + +/* + * Flush the transmit buffer. + * Locking: called with port lock held and IRQs disabled. + */ +static void pl011_dma_flush_buffer(struct uart_port *port) +__releases(&uap->port.lock) +__acquires(&uap->port.lock) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + if (!uap->using_tx_dma) + return; + + dmaengine_terminate_async(uap->dmatx.chan); + + if (uap->dmatx.queued) { + dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma, + uap->dmatx.len, DMA_TO_DEVICE); + uap->dmatx.queued = false; + uap->dmacr &= ~UART011_TXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); + } +} + +static void pl011_dma_rx_callback(void *data); + +static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) +{ + struct dma_chan *rxchan = uap->dmarx.chan; + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_async_tx_descriptor *desc; + struct pl011_dmabuf *dbuf; + + if (!rxchan) + return -EIO; + + /* Start the RX DMA job */ + dbuf = uap->dmarx.use_buf_b ? + &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; + desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + /* + * If the DMA engine is busy and cannot prepare a + * channel, no big deal, the driver will fall back + * to interrupt mode as a result of this error code. + */ + if (!desc) { + uap->dmarx.running = false; + dmaengine_terminate_all(rxchan); + return -EBUSY; + } + + /* Some data to go along to the callback */ + desc->callback = pl011_dma_rx_callback; + desc->callback_param = uap; + dmarx->cookie = dmaengine_submit(desc); + dma_async_issue_pending(rxchan); + + uap->dmacr |= UART011_RXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); + uap->dmarx.running = true; + + uap->im &= ~UART011_RXIM; + pl011_write(uap->im, uap, REG_IMSC); + + return 0; +} + +/* + * This is called when either the DMA job is complete, or + * the FIFO timeout interrupt occurred. This must be called + * with the port spinlock uap->port.lock held. + */ +static void pl011_dma_rx_chars(struct uart_amba_port *uap, + u32 pending, bool use_buf_b, + bool readfifo) +{ + struct tty_port *port = &uap->port.state->port; + struct pl011_dmabuf *dbuf = use_buf_b ? + &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; + int dma_count = 0; + u32 fifotaken = 0; /* only used for vdbg() */ + + struct pl011_dmarx_data *dmarx = &uap->dmarx; + int dmataken = 0; + + if (uap->dmarx.poll_rate) { + /* The data can be taken by polling */ + dmataken = dbuf->len - dmarx->last_residue; + /* Recalculate the pending size */ + if (pending >= dmataken) + pending -= dmataken; + } + + /* Pick the remain data from the DMA */ + if (pending) { + + /* + * First take all chars in the DMA pipe, then look in the FIFO. + * Note that tty_insert_flip_buf() tries to take as many chars + * as it can. + */ + dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, + pending); + + uap->port.icount.rx += dma_count; + if (dma_count < pending) + dev_warn(uap->port.dev, + "couldn't insert all characters (TTY is full?)\n"); + } + + /* Reset the last_residue for Rx DMA poll */ + if (uap->dmarx.poll_rate) + dmarx->last_residue = dbuf->len; + + /* + * Only continue with trying to read the FIFO if all DMA chars have + * been taken first. + */ + if (dma_count == pending && readfifo) { + /* Clear any error flags */ + pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | + UART011_FEIS, uap, REG_ICR); + + /* + * If we read all the DMA'd characters, and we had an + * incomplete buffer, that could be due to an rx error, or + * maybe we just timed out. Read any pending chars and check + * the error status. + * + * Error conditions will only occur in the FIFO, these will + * trigger an immediate interrupt and stop the DMA job, so we + * will always find the error in the FIFO, never in the DMA + * buffer. + */ + fifotaken = pl011_fifo_to_tty(uap); + } + + dev_vdbg(uap->port.dev, + "Took %d chars from DMA buffer and %d chars from the FIFO\n", + dma_count, fifotaken); + tty_flip_buffer_push(port); +} + +static void pl011_dma_rx_irq(struct uart_amba_port *uap) +{ + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_chan *rxchan = dmarx->chan; + struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? + &dmarx->dbuf_b : &dmarx->dbuf_a; + size_t pending; + struct dma_tx_state state; + enum dma_status dmastat; + + /* + * Pause the transfer so we can trust the current counter, + * do this before we pause the PL011 block, else we may + * overflow the FIFO. + */ + if (dmaengine_pause(rxchan)) + dev_err(uap->port.dev, "unable to pause DMA transfer\n"); + dmastat = rxchan->device->device_tx_status(rxchan, + dmarx->cookie, &state); + if (dmastat != DMA_PAUSED) + dev_err(uap->port.dev, "unable to pause DMA transfer\n"); + + /* Disable RX DMA - incoming data will wait in the FIFO */ + uap->dmacr &= ~UART011_RXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); + uap->dmarx.running = false; + + pending = dbuf->len - state.residue; + BUG_ON(pending > PL011_DMA_BUFFER_SIZE); + /* Then we terminate the transfer - we now know our residue */ + dmaengine_terminate_all(rxchan); + + /* + * This will take the chars we have so far and insert + * into the framework. + */ + pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); + + /* Switch buffer & re-trigger DMA job */ + dmarx->use_buf_b = !dmarx->use_buf_b; + if (pl011_dma_rx_trigger_dma(uap)) { + dev_dbg(uap->port.dev, "could not retrigger RX DMA job " + "fall back to interrupt mode\n"); + uap->im |= UART011_RXIM; + pl011_write(uap->im, uap, REG_IMSC); + } +} + +static void pl011_dma_rx_callback(void *data) +{ + struct uart_amba_port *uap = data; + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_chan *rxchan = dmarx->chan; + bool lastbuf = dmarx->use_buf_b; + struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? + &dmarx->dbuf_b : &dmarx->dbuf_a; + size_t pending; + struct dma_tx_state state; + int ret; + + /* + * This completion interrupt occurs typically when the + * RX buffer is totally stuffed but no timeout has yet + * occurred. When that happens, we just want the RX + * routine to flush out the secondary DMA buffer while + * we immediately trigger the next DMA job. + */ + spin_lock_irq(&uap->port.lock); + /* + * Rx data can be taken by the UART interrupts during + * the DMA irq handler. So we check the residue here. + */ + rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); + pending = dbuf->len - state.residue; + BUG_ON(pending > PL011_DMA_BUFFER_SIZE); + /* Then we terminate the transfer - we now know our residue */ + dmaengine_terminate_all(rxchan); + + uap->dmarx.running = false; + dmarx->use_buf_b = !lastbuf; + ret = pl011_dma_rx_trigger_dma(uap); + + pl011_dma_rx_chars(uap, pending, lastbuf, false); + spin_unlock_irq(&uap->port.lock); + /* + * Do this check after we picked the DMA chars so we don't + * get some IRQ immediately from RX. + */ + if (ret) { + dev_dbg(uap->port.dev, "could not retrigger RX DMA job " + "fall back to interrupt mode\n"); + uap->im |= UART011_RXIM; + pl011_write(uap->im, uap, REG_IMSC); + } +} + +/* + * Stop accepting received characters, when we're shutting down or + * suspending this port. + * Locking: called with port lock held and IRQs disabled. + */ +static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) +{ + if (!uap->using_rx_dma) + return; + + /* FIXME. Just disable the DMA enable */ + uap->dmacr &= ~UART011_RXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); +} + +/* + * Timer handler for Rx DMA polling. + * Every polling, It checks the residue in the dma buffer and transfer + * data to the tty. Also, last_residue is updated for the next polling. + */ +static void pl011_dma_rx_poll(struct timer_list *t) +{ + struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer); + struct tty_port *port = &uap->port.state->port; + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_chan *rxchan = uap->dmarx.chan; + unsigned long flags; + unsigned int dmataken = 0; + unsigned int size = 0; + struct pl011_dmabuf *dbuf; + int dma_count; + struct dma_tx_state state; + + dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; + rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); + if (likely(state.residue < dmarx->last_residue)) { + dmataken = dbuf->len - dmarx->last_residue; + size = dmarx->last_residue - state.residue; + dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, + size); + if (dma_count == size) + dmarx->last_residue = state.residue; + dmarx->last_jiffies = jiffies; + } + tty_flip_buffer_push(port); + + /* + * If no data is received in poll_timeout, the driver will fall back + * to interrupt mode. We will retrigger DMA at the first interrupt. + */ + if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) + > uap->dmarx.poll_timeout) { + + spin_lock_irqsave(&uap->port.lock, flags); + pl011_dma_rx_stop(uap); + uap->im |= UART011_RXIM; + pl011_write(uap->im, uap, REG_IMSC); + spin_unlock_irqrestore(&uap->port.lock, flags); + + uap->dmarx.running = false; + dmaengine_terminate_all(rxchan); + del_timer(&uap->dmarx.timer); + } else { + mod_timer(&uap->dmarx.timer, + jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); + } +} + +static void pl011_dma_startup(struct uart_amba_port *uap) +{ + int ret; + + if (!uap->dma_probed) + pl011_dma_probe(uap); + + if (!uap->dmatx.chan) + return; + + uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA); + if (!uap->dmatx.buf) { + dev_err(uap->port.dev, "no memory for DMA TX buffer\n"); + uap->port.fifosize = uap->fifosize; + return; + } + + uap->dmatx.len = PL011_DMA_BUFFER_SIZE; + + /* The DMA buffer is now the FIFO the TTY subsystem can use */ + uap->port.fifosize = PL011_DMA_BUFFER_SIZE; + uap->using_tx_dma = true; + + if (!uap->dmarx.chan) + goto skip_rx; + + /* Allocate and map DMA RX buffers */ + ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a, + DMA_FROM_DEVICE); + if (ret) { + dev_err(uap->port.dev, "failed to init DMA %s: %d\n", + "RX buffer A", ret); + goto skip_rx; + } + + ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b, + DMA_FROM_DEVICE); + if (ret) { + dev_err(uap->port.dev, "failed to init DMA %s: %d\n", + "RX buffer B", ret); + pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, + DMA_FROM_DEVICE); + goto skip_rx; + } + + uap->using_rx_dma = true; + +skip_rx: + /* Turn on DMA error (RX/TX will be enabled on demand) */ + uap->dmacr |= UART011_DMAONERR; + pl011_write(uap->dmacr, uap, REG_DMACR); + + /* + * ST Micro variants has some specific dma burst threshold + * compensation. Set this to 16 bytes, so burst will only + * be issued above/below 16 bytes. + */ + if (uap->vendor->dma_threshold) + pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, + uap, REG_ST_DMAWM); + + if (uap->using_rx_dma) { + if (pl011_dma_rx_trigger_dma(uap)) + dev_dbg(uap->port.dev, "could not trigger initial " + "RX DMA job, fall back to interrupt mode\n"); + if (uap->dmarx.poll_rate) { + timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0); + mod_timer(&uap->dmarx.timer, + jiffies + + msecs_to_jiffies(uap->dmarx.poll_rate)); + uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; + uap->dmarx.last_jiffies = jiffies; + } + } +} + +static void pl011_dma_shutdown(struct uart_amba_port *uap) +{ + if (!(uap->using_tx_dma || uap->using_rx_dma)) + return; + + /* Disable RX and TX DMA */ + while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy) + cpu_relax(); + + spin_lock_irq(&uap->port.lock); + uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); + pl011_write(uap->dmacr, uap, REG_DMACR); + spin_unlock_irq(&uap->port.lock); + + if (uap->using_tx_dma) { + /* In theory, this should already be done by pl011_dma_flush_buffer */ + dmaengine_terminate_all(uap->dmatx.chan); + if (uap->dmatx.queued) { + dma_unmap_single(uap->dmatx.chan->device->dev, + uap->dmatx.dma, uap->dmatx.len, + DMA_TO_DEVICE); + uap->dmatx.queued = false; + } + + kfree(uap->dmatx.buf); + uap->using_tx_dma = false; + } + + if (uap->using_rx_dma) { + dmaengine_terminate_all(uap->dmarx.chan); + /* Clean up the RX DMA */ + pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE); + pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE); + if (uap->dmarx.poll_rate) + del_timer_sync(&uap->dmarx.timer); + uap->using_rx_dma = false; + } +} + +static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) +{ + return uap->using_rx_dma; +} + +static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) +{ + return uap->using_rx_dma && uap->dmarx.running; +} + +#else +/* Blank functions if the DMA engine is not available */ +static inline void pl011_dma_remove(struct uart_amba_port *uap) +{ +} + +static inline void pl011_dma_startup(struct uart_amba_port *uap) +{ +} + +static inline void pl011_dma_shutdown(struct uart_amba_port *uap) +{ +} + +static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap) +{ + return false; +} + +static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) +{ +} + +static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) +{ + return false; +} + +static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) +{ +} + +static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) +{ +} + +static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) +{ + return -EIO; +} + +static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) +{ + return false; +} + +static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) +{ + return false; +} + +#define pl011_dma_flush_buffer NULL +#endif + +static void pl011_rs485_tx_stop(struct uart_amba_port *uap) +{ + /* + * To be on the safe side only time out after twice as many iterations + * as fifo size. + */ + const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2; + struct uart_port *port = &uap->port; + int i = 0; + u32 cr; + + /* Wait until hardware tx queue is empty */ + while (!pl011_tx_empty(port)) { + if (i > MAX_TX_DRAIN_ITERS) { + dev_warn(port->dev, + "timeout while draining hardware tx queue\n"); + break; + } + + udelay(uap->rs485_tx_drain_interval); + i++; + } + + if (port->rs485.delay_rts_after_send) + mdelay(port->rs485.delay_rts_after_send); + + cr = pl011_read(uap, REG_CR); + + if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) + cr &= ~UART011_CR_RTS; + else + cr |= UART011_CR_RTS; + + /* Disable the transmitter and reenable the transceiver */ + cr &= ~UART011_CR_TXE; + cr |= UART011_CR_RXE; + pl011_write(cr, uap, REG_CR); + + uap->rs485_tx_started = false; +} + +static void pl011_stop_tx(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + uap->im &= ~UART011_TXIM; + pl011_write(uap->im, uap, REG_IMSC); + pl011_dma_tx_stop(uap); + + if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) + pl011_rs485_tx_stop(uap); +} + +static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq); + +/* Start TX with programmed I/O only (no DMA) */ +static void pl011_start_tx_pio(struct uart_amba_port *uap) +{ + if (pl011_tx_chars(uap, false)) { + uap->im |= UART011_TXIM; + pl011_write(uap->im, uap, REG_IMSC); + } +} + +static void pl011_start_tx(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + if (!pl011_dma_tx_start(uap)) + pl011_start_tx_pio(uap); +} + +static void pl011_stop_rx(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| + UART011_PEIM|UART011_BEIM|UART011_OEIM); + pl011_write(uap->im, uap, REG_IMSC); + + pl011_dma_rx_stop(uap); +} + +static void pl011_throttle_rx(struct uart_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + pl011_stop_rx(port); + spin_unlock_irqrestore(&port->lock, flags); +} + +static void pl011_enable_ms(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM; + pl011_write(uap->im, uap, REG_IMSC); +} + +static void pl011_rx_chars(struct uart_amba_port *uap) +__releases(&uap->port.lock) +__acquires(&uap->port.lock) +{ + pl011_fifo_to_tty(uap); + + spin_unlock(&uap->port.lock); + tty_flip_buffer_push(&uap->port.state->port); + /* + * If we were temporarily out of DMA mode for a while, + * attempt to switch back to DMA mode again. + */ + if (pl011_dma_rx_available(uap)) { + if (pl011_dma_rx_trigger_dma(uap)) { + dev_dbg(uap->port.dev, "could not trigger RX DMA job " + "fall back to interrupt mode again\n"); + uap->im |= UART011_RXIM; + pl011_write(uap->im, uap, REG_IMSC); + } else { +#ifdef CONFIG_DMA_ENGINE + /* Start Rx DMA poll */ + if (uap->dmarx.poll_rate) { + uap->dmarx.last_jiffies = jiffies; + uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; + mod_timer(&uap->dmarx.timer, + jiffies + + msecs_to_jiffies(uap->dmarx.poll_rate)); + } +#endif + } + } + spin_lock(&uap->port.lock); +} + +static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, + bool from_irq) +{ + if (unlikely(!from_irq) && + pl011_read(uap, REG_FR) & UART01x_FR_TXFF) + return false; /* unable to transmit character */ + + pl011_write(c, uap, REG_DR); + uap->port.icount.tx++; + + return true; +} + +static void pl011_rs485_tx_start(struct uart_amba_port *uap) +{ + struct uart_port *port = &uap->port; + u32 cr; + + /* Enable transmitter */ + cr = pl011_read(uap, REG_CR); + cr |= UART011_CR_TXE; + + /* Disable receiver if half-duplex */ + if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) + cr &= ~UART011_CR_RXE; + + if (port->rs485.flags & SER_RS485_RTS_ON_SEND) + cr &= ~UART011_CR_RTS; + else + cr |= UART011_CR_RTS; + + pl011_write(cr, uap, REG_CR); + + if (port->rs485.delay_rts_before_send) + mdelay(port->rs485.delay_rts_before_send); + + uap->rs485_tx_started = true; +} + +/* Returns true if tx interrupts have to be (kept) enabled */ +static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) +{ + struct circ_buf *xmit = &uap->port.state->xmit; + int count = uap->fifosize >> 1; + + if ((uap->port.rs485.flags & SER_RS485_ENABLED) && + !uap->rs485_tx_started) + pl011_rs485_tx_start(uap); + + if (uap->port.x_char) { + if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) + return true; + uap->port.x_char = 0; + --count; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { + pl011_stop_tx(&uap->port); + return false; + } + + /* If we are using DMA mode, try to send some characters. */ + if (pl011_dma_tx_irq(uap)) + return true; + + do { + if (likely(from_irq) && count-- == 0) + break; + + if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq)) + break; + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + } while (!uart_circ_empty(xmit)); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&uap->port); + + if (uart_circ_empty(xmit)) { + pl011_stop_tx(&uap->port); + return false; + } + return true; +} + +static void pl011_modem_status(struct uart_amba_port *uap) +{ + unsigned int status, delta; + + status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; + + delta = status ^ uap->old_status; + uap->old_status = status; + + if (!delta) + return; + + if (delta & UART01x_FR_DCD) + uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); + + if (delta & uap->vendor->fr_dsr) + uap->port.icount.dsr++; + + if (delta & uap->vendor->fr_cts) + uart_handle_cts_change(&uap->port, + status & uap->vendor->fr_cts); + + wake_up_interruptible(&uap->port.state->port.delta_msr_wait); +} + +static void check_apply_cts_event_workaround(struct uart_amba_port *uap) +{ + if (!uap->vendor->cts_event_workaround) + return; + + /* workaround to make sure that all bits are unlocked.. */ + pl011_write(0x00, uap, REG_ICR); + + /* + * WA: introduce 26ns(1 uart clk) delay before W1C; + * single apb access will incur 2 pclk(133.12Mhz) delay, + * so add 2 dummy reads + */ + pl011_read(uap, REG_ICR); + pl011_read(uap, REG_ICR); +} + +static irqreturn_t pl011_int(int irq, void *dev_id) +{ + struct uart_amba_port *uap = dev_id; + unsigned long flags; + unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; + int handled = 0; + + spin_lock_irqsave(&uap->port.lock, flags); + status = pl011_read(uap, REG_RIS) & uap->im; + if (status) { + do { + check_apply_cts_event_workaround(uap); + + pl011_write(status & ~(UART011_TXIS|UART011_RTIS| + UART011_RXIS), + uap, REG_ICR); + + if (status & (UART011_RTIS|UART011_RXIS)) { + if (pl011_dma_rx_running(uap)) + pl011_dma_rx_irq(uap); + else + pl011_rx_chars(uap); + } + if (status & (UART011_DSRMIS|UART011_DCDMIS| + UART011_CTSMIS|UART011_RIMIS)) + pl011_modem_status(uap); + if (status & UART011_TXIS) + pl011_tx_chars(uap, true); + + if (pass_counter-- == 0) + break; + + status = pl011_read(uap, REG_RIS) & uap->im; + } while (status != 0); + handled = 1; + } + + spin_unlock_irqrestore(&uap->port.lock, flags); + + return IRQ_RETVAL(handled); +} + +static unsigned int pl011_tx_empty(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + /* Allow feature register bits to be inverted to work around errata */ + unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr; + + return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ? + 0 : TIOCSER_TEMT; +} + +static unsigned int pl011_get_mctrl(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + unsigned int result = 0; + unsigned int status = pl011_read(uap, REG_FR); + +#define TIOCMBIT(uartbit, tiocmbit) \ + if (status & uartbit) \ + result |= tiocmbit + + TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR); + TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR); + TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS); + TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG); +#undef TIOCMBIT + return result; +} + +static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + unsigned int cr; + + cr = pl011_read(uap, REG_CR); + +#define TIOCMBIT(tiocmbit, uartbit) \ + if (mctrl & tiocmbit) \ + cr |= uartbit; \ + else \ + cr &= ~uartbit + + TIOCMBIT(TIOCM_RTS, UART011_CR_RTS); + TIOCMBIT(TIOCM_DTR, UART011_CR_DTR); + TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1); + TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2); + TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE); + + if (port->status & UPSTAT_AUTORTS) { + /* We need to disable auto-RTS if we want to turn RTS off */ + TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN); + } +#undef TIOCMBIT + + pl011_write(cr, uap, REG_CR); +} + +static void pl011_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + unsigned long flags; + unsigned int lcr_h; + + spin_lock_irqsave(&uap->port.lock, flags); + lcr_h = pl011_read(uap, REG_LCRH_TX); + if (break_state == -1) + lcr_h |= UART01x_LCRH_BRK; + else + lcr_h &= ~UART01x_LCRH_BRK; + pl011_write(lcr_h, uap, REG_LCRH_TX); + spin_unlock_irqrestore(&uap->port.lock, flags); +} + +#ifdef CONFIG_CONSOLE_POLL + +static void pl011_quiesce_irqs(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR); + /* + * There is no way to clear TXIM as this is "ready to transmit IRQ", so + * we simply mask it. start_tx() will unmask it. + * + * Note we can race with start_tx(), and if the race happens, the + * polling user might get another interrupt just after we clear it. + * But it should be OK and can happen even w/o the race, e.g. + * controller immediately got some new data and raised the IRQ. + * + * And whoever uses polling routines assumes that it manages the device + * (including tx queue), so we're also fine with start_tx()'s caller + * side. + */ + pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap, + REG_IMSC); +} + +static int pl011_get_poll_char(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + unsigned int status; + + /* + * The caller might need IRQs lowered, e.g. if used with KDB NMI + * debugger. + */ + pl011_quiesce_irqs(port); + + status = pl011_read(uap, REG_FR); + if (status & UART01x_FR_RXFE) + return NO_POLL_CHAR; + + return pl011_read(uap, REG_DR); +} + +static void pl011_put_poll_char(struct uart_port *port, + unsigned char ch) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) + cpu_relax(); + + pl011_write(ch, uap, REG_DR); +} + +#endif /* CONFIG_CONSOLE_POLL */ + +static int pl011_hwinit(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + int retval; + + /* Optionaly enable pins to be muxed in and configured */ + pinctrl_pm_select_default_state(port->dev); + + /* + * Try to enable the clock producer. + */ + retval = clk_prepare_enable(uap->clk); + if (retval) + return retval; + + uap->port.uartclk = clk_get_rate(uap->clk); + + /* Clear pending error and receive interrupts */ + pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | + UART011_FEIS | UART011_RTIS | UART011_RXIS, + uap, REG_ICR); + + /* + * Save interrupts enable mask, and enable RX interrupts in case if + * the interrupt is used for NMI entry. + */ + uap->im = pl011_read(uap, REG_IMSC); + pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC); + + if (dev_get_platdata(uap->port.dev)) { + struct amba_pl011_data *plat; + + plat = dev_get_platdata(uap->port.dev); + if (plat->init) + plat->init(); + } + return 0; +} + +static bool pl011_split_lcrh(const struct uart_amba_port *uap) +{ + return pl011_reg_to_offset(uap, REG_LCRH_RX) != + pl011_reg_to_offset(uap, REG_LCRH_TX); +} + +static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h) +{ + pl011_write(lcr_h, uap, REG_LCRH_RX); + if (pl011_split_lcrh(uap)) { + int i; + /* + * Wait 10 PCLKs before writing LCRH_TX register, + * to get this delay write read only register 10 times + */ + for (i = 0; i < 10; ++i) + pl011_write(0xff, uap, REG_MIS); + pl011_write(lcr_h, uap, REG_LCRH_TX); + } +} + +static int pl011_allocate_irq(struct uart_amba_port *uap) +{ + pl011_write(uap->im, uap, REG_IMSC); + + return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap); +} + +/* + * Enable interrupts, only timeouts when using DMA + * if initial RX DMA job failed, start in interrupt mode + * as well. + */ +static void pl011_enable_interrupts(struct uart_amba_port *uap) +{ + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&uap->port.lock, flags); + + /* Clear out any spuriously appearing RX interrupts */ + pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); + + /* + * RXIS is asserted only when the RX FIFO transitions from below + * to above the trigger threshold. If the RX FIFO is already + * full to the threshold this can't happen and RXIS will now be + * stuck off. Drain the RX FIFO explicitly to fix this: + */ + for (i = 0; i < uap->fifosize * 2; ++i) { + if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE) + break; + + pl011_read(uap, REG_DR); + } + + uap->im = UART011_RTIM; + if (!pl011_dma_rx_running(uap)) + uap->im |= UART011_RXIM; + pl011_write(uap->im, uap, REG_IMSC); + spin_unlock_irqrestore(&uap->port.lock, flags); +} + +static void pl011_unthrottle_rx(struct uart_port *port) +{ + struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); + unsigned long flags; + + spin_lock_irqsave(&uap->port.lock, flags); + + uap->im = UART011_RTIM; + if (!pl011_dma_rx_running(uap)) + uap->im |= UART011_RXIM; + + pl011_write(uap->im, uap, REG_IMSC); + + spin_unlock_irqrestore(&uap->port.lock, flags); +} + +static int pl011_startup(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + unsigned int cr; + int retval; + + retval = pl011_hwinit(port); + if (retval) + goto clk_dis; + + retval = pl011_allocate_irq(uap); + if (retval) + goto clk_dis; + + pl011_write(uap->vendor->ifls, uap, REG_IFLS); + + spin_lock_irq(&uap->port.lock); + + cr = pl011_read(uap, REG_CR); + cr &= UART011_CR_RTS | UART011_CR_DTR; + cr |= UART01x_CR_UARTEN | UART011_CR_RXE; + + if (!(port->rs485.flags & SER_RS485_ENABLED)) + cr |= UART011_CR_TXE; + + pl011_write(cr, uap, REG_CR); + + spin_unlock_irq(&uap->port.lock); + + /* + * initialise the old status of the modem signals + */ + uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; + + /* Startup DMA */ + pl011_dma_startup(uap); + + pl011_enable_interrupts(uap); + + return 0; + + clk_dis: + clk_disable_unprepare(uap->clk); + return retval; +} + +static int sbsa_uart_startup(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + int retval; + + retval = pl011_hwinit(port); + if (retval) + return retval; + + retval = pl011_allocate_irq(uap); + if (retval) + return retval; + + /* The SBSA UART does not support any modem status lines. */ + uap->old_status = 0; + + pl011_enable_interrupts(uap); + + return 0; +} + +static void pl011_shutdown_channel(struct uart_amba_port *uap, + unsigned int lcrh) +{ + unsigned long val; + + val = pl011_read(uap, lcrh); + val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); + pl011_write(val, uap, lcrh); +} + +/* + * disable the port. It should not disable RTS and DTR. + * Also RTS and DTR state should be preserved to restore + * it during startup(). + */ +static void pl011_disable_uart(struct uart_amba_port *uap) +{ + unsigned int cr; + + uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + spin_lock_irq(&uap->port.lock); + cr = pl011_read(uap, REG_CR); + cr &= UART011_CR_RTS | UART011_CR_DTR; + cr |= UART01x_CR_UARTEN | UART011_CR_TXE; + pl011_write(cr, uap, REG_CR); + spin_unlock_irq(&uap->port.lock); + + /* + * disable break condition and fifos + */ + pl011_shutdown_channel(uap, REG_LCRH_RX); + if (pl011_split_lcrh(uap)) + pl011_shutdown_channel(uap, REG_LCRH_TX); +} + +static void pl011_disable_interrupts(struct uart_amba_port *uap) +{ + spin_lock_irq(&uap->port.lock); + + /* mask all interrupts and clear all pending ones */ + uap->im = 0; + pl011_write(uap->im, uap, REG_IMSC); + pl011_write(0xffff, uap, REG_ICR); + + spin_unlock_irq(&uap->port.lock); +} + +static void pl011_shutdown(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + pl011_disable_interrupts(uap); + + pl011_dma_shutdown(uap); + + if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) + pl011_rs485_tx_stop(uap); + + free_irq(uap->port.irq, uap); + + pl011_disable_uart(uap); + + /* + * Shut down the clock producer + */ + clk_disable_unprepare(uap->clk); + /* Optionally let pins go into sleep states */ + pinctrl_pm_select_sleep_state(port->dev); + + if (dev_get_platdata(uap->port.dev)) { + struct amba_pl011_data *plat; + + plat = dev_get_platdata(uap->port.dev); + if (plat->exit) + plat->exit(); + } + + if (uap->port.ops->flush_buffer) + uap->port.ops->flush_buffer(port); +} + +static void sbsa_uart_shutdown(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + pl011_disable_interrupts(uap); + + free_irq(uap->port.irq, uap); + + if (uap->port.ops->flush_buffer) + uap->port.ops->flush_buffer(port); +} + +static void +pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios) +{ + port->read_status_mask = UART011_DR_OE | 255; + if (termios->c_iflag & INPCK) + port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= UART011_DR_BE; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= UART011_DR_BE; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= UART011_DR_OE; + } + + /* + * Ignore all characters if CREAD is not set. + */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= UART_DUMMY_DR_RX; +} + +static void +pl011_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + unsigned int lcr_h, old_cr; + unsigned long flags; + unsigned int baud, quot, clkdiv; + unsigned int bits; + + if (uap->vendor->oversampling) + clkdiv = 8; + else + clkdiv = 16; + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(port, termios, old, 0, + port->uartclk / clkdiv); +#ifdef CONFIG_DMA_ENGINE + /* + * Adjust RX DMA polling rate with baud rate if not specified. + */ + if (uap->dmarx.auto_poll_rate) + uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); +#endif + + if (baud > port->uartclk/16) + quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); + else + quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); + + switch (termios->c_cflag & CSIZE) { + case CS5: + lcr_h = UART01x_LCRH_WLEN_5; + break; + case CS6: + lcr_h = UART01x_LCRH_WLEN_6; + break; + case CS7: + lcr_h = UART01x_LCRH_WLEN_7; + break; + default: // CS8 + lcr_h = UART01x_LCRH_WLEN_8; + break; + } + if (termios->c_cflag & CSTOPB) + lcr_h |= UART01x_LCRH_STP2; + if (termios->c_cflag & PARENB) { + lcr_h |= UART01x_LCRH_PEN; + if (!(termios->c_cflag & PARODD)) + lcr_h |= UART01x_LCRH_EPS; + if (termios->c_cflag & CMSPAR) + lcr_h |= UART011_LCRH_SPS; + } + if (uap->fifosize > 1) + lcr_h |= UART01x_LCRH_FEN; + + bits = tty_get_frame_size(termios->c_cflag); + + spin_lock_irqsave(&port->lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* + * Calculate the approximated time it takes to transmit one character + * with the given baud rate. We use this as the poll interval when we + * wait for the tx queue to empty. + */ + uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud); + + pl011_setup_status_masks(port, termios); + + if (UART_ENABLE_MS(port, termios->c_cflag)) + pl011_enable_ms(port); + + if (port->rs485.flags & SER_RS485_ENABLED) + termios->c_cflag &= ~CRTSCTS; + + old_cr = pl011_read(uap, REG_CR); + + if (termios->c_cflag & CRTSCTS) { + if (old_cr & UART011_CR_RTS) + old_cr |= UART011_CR_RTSEN; + + old_cr |= UART011_CR_CTSEN; + port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; + } else { + old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN); + port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + } + + if (uap->vendor->oversampling) { + if (baud > port->uartclk / 16) + old_cr |= ST_UART011_CR_OVSFACT; + else + old_cr &= ~ST_UART011_CR_OVSFACT; + } + + /* + * Workaround for the ST Micro oversampling variants to + * increase the bitrate slightly, by lowering the divisor, + * to avoid delayed sampling of start bit at high speeds, + * else we see data corruption. + */ + if (uap->vendor->oversampling) { + if ((baud >= 3000000) && (baud < 3250000) && (quot > 1)) + quot -= 1; + else if ((baud > 3250000) && (quot > 2)) + quot -= 2; + } + /* Set baud rate */ + pl011_write(quot & 0x3f, uap, REG_FBRD); + pl011_write(quot >> 6, uap, REG_IBRD); + + /* + * ----------v----------v----------v----------v----- + * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER + * REG_FBRD & REG_IBRD. + * ----------^----------^----------^----------^----- + */ + pl011_write_lcr_h(uap, lcr_h); + pl011_write(old_cr, uap, REG_CR); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void +sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + unsigned long flags; + + tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud); + + /* The SBSA UART only supports 8n1 without hardware flow control. */ + termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD); + termios->c_cflag &= ~(CMSPAR | CRTSCTS); + termios->c_cflag |= CS8 | CLOCAL; + + spin_lock_irqsave(&port->lock, flags); + uart_update_timeout(port, CS8, uap->fixed_baud); + pl011_setup_status_masks(port, termios); + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *pl011_type(struct uart_port *port) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + return uap->port.type == PORT_AMBA ? uap->type : NULL; +} + +/* + * Configure/autoconfigure the port. + */ +static void pl011_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_AMBA; +} + +/* + * verify the new serial_struct (for TIOCSSERIAL). + */ +static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + int ret = 0; + if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= nr_irqs) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + if (port->mapbase != (unsigned long) ser->iomem_base) + ret = -EINVAL; + return ret; +} + +static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + if (port->rs485.flags & SER_RS485_ENABLED) + pl011_rs485_tx_stop(uap); + + /* Make sure auto RTS is disabled */ + if (rs485->flags & SER_RS485_ENABLED) { + u32 cr = pl011_read(uap, REG_CR); + + cr &= ~UART011_CR_RTSEN; + pl011_write(cr, uap, REG_CR); + port->status &= ~UPSTAT_AUTORTS; + } + + return 0; +} + +static const struct uart_ops amba_pl011_pops = { + .tx_empty = pl011_tx_empty, + .set_mctrl = pl011_set_mctrl, + .get_mctrl = pl011_get_mctrl, + .stop_tx = pl011_stop_tx, + .start_tx = pl011_start_tx, + .stop_rx = pl011_stop_rx, + .throttle = pl011_throttle_rx, + .unthrottle = pl011_unthrottle_rx, + .enable_ms = pl011_enable_ms, + .break_ctl = pl011_break_ctl, + .startup = pl011_startup, + .shutdown = pl011_shutdown, + .flush_buffer = pl011_dma_flush_buffer, + .set_termios = pl011_set_termios, + .type = pl011_type, + .config_port = pl011_config_port, + .verify_port = pl011_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_init = pl011_hwinit, + .poll_get_char = pl011_get_poll_char, + .poll_put_char = pl011_put_poll_char, +#endif +}; + +static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static unsigned int sbsa_uart_get_mctrl(struct uart_port *port) +{ + return 0; +} + +static const struct uart_ops sbsa_uart_pops = { + .tx_empty = pl011_tx_empty, + .set_mctrl = sbsa_uart_set_mctrl, + .get_mctrl = sbsa_uart_get_mctrl, + .stop_tx = pl011_stop_tx, + .start_tx = pl011_start_tx, + .stop_rx = pl011_stop_rx, + .startup = sbsa_uart_startup, + .shutdown = sbsa_uart_shutdown, + .set_termios = sbsa_uart_set_termios, + .type = pl011_type, + .config_port = pl011_config_port, + .verify_port = pl011_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_init = pl011_hwinit, + .poll_get_char = pl011_get_poll_char, + .poll_put_char = pl011_put_poll_char, +#endif +}; + +static struct uart_amba_port *amba_ports[UART_NR]; + +#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE + +static void pl011_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct uart_amba_port *uap = + container_of(port, struct uart_amba_port, port); + + while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) + cpu_relax(); + pl011_write(ch, uap, REG_DR); +} + +static void +pl011_console_write(struct console *co, const char *s, unsigned int count) +{ + struct uart_amba_port *uap = amba_ports[co->index]; + unsigned int old_cr = 0, new_cr; + unsigned long flags; + int locked = 1; + + clk_enable(uap->clk); + + local_irq_save(flags); + if (uap->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&uap->port.lock); + else + spin_lock(&uap->port.lock); + + /* + * First save the CR then disable the interrupts + */ + if (!uap->vendor->always_enabled) { + old_cr = pl011_read(uap, REG_CR); + new_cr = old_cr & ~UART011_CR_CTSEN; + new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; + pl011_write(new_cr, uap, REG_CR); + } + + uart_console_write(&uap->port, s, count, pl011_console_putchar); + + /* + * Finally, wait for transmitter to become empty and restore the + * TCR. Allow feature register bits to be inverted to work around + * errata. + */ + while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) + & uap->vendor->fr_busy) + cpu_relax(); + if (!uap->vendor->always_enabled) + pl011_write(old_cr, uap, REG_CR); + + if (locked) + spin_unlock(&uap->port.lock); + local_irq_restore(flags); + + clk_disable(uap->clk); +} + +static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, + int *parity, int *bits) +{ + if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { + unsigned int lcr_h, ibrd, fbrd; + + lcr_h = pl011_read(uap, REG_LCRH_TX); + + *parity = 'n'; + if (lcr_h & UART01x_LCRH_PEN) { + if (lcr_h & UART01x_LCRH_EPS) + *parity = 'e'; + else + *parity = 'o'; + } + + if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) + *bits = 7; + else + *bits = 8; + + ibrd = pl011_read(uap, REG_IBRD); + fbrd = pl011_read(uap, REG_FBRD); + + *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); + + if (uap->vendor->oversampling) { + if (pl011_read(uap, REG_CR) + & ST_UART011_CR_OVSFACT) + *baud *= 2; + } + } +} + +static int pl011_console_setup(struct console *co, char *options) +{ + struct uart_amba_port *uap; + int baud = 38400; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index >= UART_NR) + co->index = 0; + uap = amba_ports[co->index]; + if (!uap) + return -ENODEV; + + /* Allow pins to be muxed in and configured */ + pinctrl_pm_select_default_state(uap->port.dev); + + ret = clk_prepare(uap->clk); + if (ret) + return ret; + + if (dev_get_platdata(uap->port.dev)) { + struct amba_pl011_data *plat; + + plat = dev_get_platdata(uap->port.dev); + if (plat->init) + plat->init(); + } + + uap->port.uartclk = clk_get_rate(uap->clk); + + if (uap->vendor->fixed_options) { + baud = uap->fixed_baud; + } else { + if (options) + uart_parse_options(options, + &baud, &parity, &bits, &flow); + else + pl011_console_get_options(uap, &baud, &parity, &bits); + } + + return uart_set_options(&uap->port, co, baud, parity, bits, flow); +} + +/** + * pl011_console_match - non-standard console matching + * @co: registering console + * @name: name from console command line + * @idx: index from console command line + * @options: ptr to option string from console command line + * + * Only attempts to match console command lines of the form: + * console=pl011,mmio|mmio32,[,] + * console=pl011,0x[,] + * This form is used to register an initial earlycon boot console and + * replace it with the amba_console at pl011 driver init. + * + * Performs console setup for a match (as required by interface) + * If no are specified, then assume the h/w is already setup. + * + * Returns 0 if console matches; otherwise non-zero to use default matching + */ +static int pl011_console_match(struct console *co, char *name, int idx, + char *options) +{ + unsigned char iotype; + resource_size_t addr; + int i; + + /* + * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum + * have a distinct console name, so make sure we check for that. + * The actual implementation of the erratum occurs in the probe + * function. + */ + if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0)) + return -ENODEV; + + if (uart_parse_earlycon(options, &iotype, &addr, &options)) + return -ENODEV; + + if (iotype != UPIO_MEM && iotype != UPIO_MEM32) + return -ENODEV; + + /* try to match the port specified on the command line */ + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { + struct uart_port *port; + + if (!amba_ports[i]) + continue; + + port = &amba_ports[i]->port; + + if (port->mapbase != addr) + continue; + + co->index = i; + port->cons = co; + return pl011_console_setup(co, options); + } + + return -ENODEV; +} + +static struct uart_driver amba_reg; +static struct console amba_console = { + .name = "ttyAMA", + .write = pl011_console_write, + .device = uart_console_device, + .setup = pl011_console_setup, + .match = pl011_console_match, + .flags = CON_PRINTBUFFER | CON_ANYTIME, + .index = -1, + .data = &amba_reg, +}; + +#define AMBA_CONSOLE (&amba_console) + +static void qdf2400_e44_putc(struct uart_port *port, unsigned char c) +{ + while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) + cpu_relax(); + writel(c, port->membase + UART01x_DR); + while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE)) + cpu_relax(); +} + +static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, qdf2400_e44_putc); +} + +static void pl011_putc(struct uart_port *port, unsigned char c) +{ + while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) + cpu_relax(); + if (port->iotype == UPIO_MEM32) + writel(c, port->membase + UART01x_DR); + else + writeb(c, port->membase + UART01x_DR); + while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY) + cpu_relax(); +} + +static void pl011_early_write(struct console *con, const char *s, unsigned n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, pl011_putc); +} + +#ifdef CONFIG_CONSOLE_POLL +static int pl011_getc(struct uart_port *port) +{ + if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE) + return NO_POLL_CHAR; + + if (port->iotype == UPIO_MEM32) + return readl(port->membase + UART01x_DR); + else + return readb(port->membase + UART01x_DR); +} + +static int pl011_early_read(struct console *con, char *s, unsigned int n) +{ + struct earlycon_device *dev = con->data; + int ch, num_read = 0; + + while (num_read < n) { + ch = pl011_getc(&dev->port); + if (ch == NO_POLL_CHAR) + break; + + s[num_read++] = ch; + } + + return num_read; +} +#else +#define pl011_early_read NULL +#endif + +/* + * On non-ACPI systems, earlycon is enabled by specifying + * "earlycon=pl011,

" on the kernel command line. + * + * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table, + * by specifying only "earlycon" on the command line. Because it requires + * SPCR, the console starts after ACPI is parsed, which is later than a + * traditional early console. + * + * To get the traditional early console that starts before ACPI is parsed, + * specify the full "earlycon=pl011,
" option. + */ +static int __init pl011_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = pl011_early_write; + device->con->read = pl011_early_read; + + return 0; +} +OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); +OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup); + +/* + * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by + * Erratum 44, traditional earlycon can be enabled by specifying + * "earlycon=qdf2400_e44,
". Any options are ignored. + * + * Alternatively, you can just specify "earlycon", and the early console + * will be enabled with the information from the SPCR table. In this + * case, the SPCR code will detect the need for the E44 work-around, + * and set the console name to "qdf2400_e44". + */ +static int __init +qdf2400_e44_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = qdf2400_e44_early_write; + return 0; +} +EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup); + +#else +#define AMBA_CONSOLE NULL +#endif + +static struct uart_driver amba_reg = { + .owner = THIS_MODULE, + .driver_name = "ttyAMA", + .dev_name = "ttyAMA", + .major = SERIAL_AMBA_MAJOR, + .minor = SERIAL_AMBA_MINOR, + .nr = UART_NR, + .cons = AMBA_CONSOLE, +}; + +static int pl011_probe_dt_alias(int index, struct device *dev) +{ + struct device_node *np; + static bool seen_dev_with_alias = false; + static bool seen_dev_without_alias = false; + int ret = index; + + if (!IS_ENABLED(CONFIG_OF)) + return ret; + + np = dev->of_node; + if (!np) + return ret; + + ret = of_alias_get_id(np, "serial"); + if (ret < 0) { + seen_dev_without_alias = true; + ret = index; + } else { + seen_dev_with_alias = true; + if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) { + dev_warn(dev, "requested serial port %d not available.\n", ret); + ret = index; + } + } + + if (seen_dev_with_alias && seen_dev_without_alias) + dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n"); + + return ret; +} + +/* unregisters the driver also if no more ports are left */ +static void pl011_unregister_port(struct uart_amba_port *uap) +{ + int i; + bool busy = false; + + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { + if (amba_ports[i] == uap) + amba_ports[i] = NULL; + else if (amba_ports[i]) + busy = true; + } + pl011_dma_remove(uap); + if (!busy) + uart_unregister_driver(&amba_reg); +} + +static int pl011_find_free_port(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) + if (amba_ports[i] == NULL) + return i; + + return -EBUSY; +} + +static int pl011_get_rs485_mode(struct uart_amba_port *uap) +{ + struct uart_port *port = &uap->port; + int ret; + + ret = uart_get_rs485_mode(port); + if (ret) + return ret; + + return 0; +} + +static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, + struct resource *mmiobase, int index) +{ + void __iomem *base; + int ret; + + base = devm_ioremap_resource(dev, mmiobase); + if (IS_ERR(base)) + return PTR_ERR(base); + + index = pl011_probe_dt_alias(index, dev); + + uap->port.dev = dev; + uap->port.mapbase = mmiobase->start; + uap->port.membase = base; + uap->port.fifosize = uap->fifosize; + uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE); + uap->port.flags = UPF_BOOT_AUTOCONF; + uap->port.line = index; + + ret = pl011_get_rs485_mode(uap); + if (ret) + return ret; + + amba_ports[index] = uap; + + return 0; +} + +static int pl011_register_port(struct uart_amba_port *uap) +{ + int ret, i; + + /* Ensure interrupts from this UART are masked and cleared */ + pl011_write(0, uap, REG_IMSC); + pl011_write(0xffff, uap, REG_ICR); + + if (!amba_reg.state) { + ret = uart_register_driver(&amba_reg); + if (ret < 0) { + dev_err(uap->port.dev, + "Failed to register AMBA-PL011 driver\n"); + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) + if (amba_ports[i] == uap) + amba_ports[i] = NULL; + return ret; + } + } + + ret = uart_add_one_port(&amba_reg, &uap->port); + if (ret) + pl011_unregister_port(uap); + + return ret; +} + +static const struct serial_rs485 pl011_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | + SER_RS485_RX_DURING_TX, + .delay_rts_before_send = 1, + .delay_rts_after_send = 1, +}; + +static int pl011_probe(struct amba_device *dev, const struct amba_id *id) +{ + struct uart_amba_port *uap; + struct vendor_data *vendor = id->data; + int portnr, ret; + u32 val; + + portnr = pl011_find_free_port(); + if (portnr < 0) + return portnr; + + uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port), + GFP_KERNEL); + if (!uap) + return -ENOMEM; + + uap->clk = devm_clk_get(&dev->dev, NULL); + if (IS_ERR(uap->clk)) + return PTR_ERR(uap->clk); + + uap->reg_offset = vendor->reg_offset; + uap->vendor = vendor; + uap->fifosize = vendor->get_fifosize(dev); + uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; + uap->port.irq = dev->irq[0]; + uap->port.ops = &amba_pl011_pops; + uap->port.rs485_config = pl011_rs485_config; + uap->port.rs485_supported = pl011_rs485_supported; + snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); + + if (device_property_read_u32(&dev->dev, "reg-io-width", &val) == 0) { + switch (val) { + case 1: + uap->port.iotype = UPIO_MEM; + break; + case 4: + uap->port.iotype = UPIO_MEM32; + break; + default: + dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n", + val); + return -EINVAL; + } + } + + ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr); + if (ret) + return ret; + + amba_set_drvdata(dev, uap); + + return pl011_register_port(uap); +} + +static void pl011_remove(struct amba_device *dev) +{ + struct uart_amba_port *uap = amba_get_drvdata(dev); + + uart_remove_one_port(&amba_reg, &uap->port); + pl011_unregister_port(uap); +} + +#ifdef CONFIG_PM_SLEEP +static int pl011_suspend(struct device *dev) +{ + struct uart_amba_port *uap = dev_get_drvdata(dev); + + if (!uap) + return -EINVAL; + + return uart_suspend_port(&amba_reg, &uap->port); +} + +static int pl011_resume(struct device *dev) +{ + struct uart_amba_port *uap = dev_get_drvdata(dev); + + if (!uap) + return -EINVAL; + + return uart_resume_port(&amba_reg, &uap->port); +} +#endif + +static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume); + +static int sbsa_uart_probe(struct platform_device *pdev) +{ + struct uart_amba_port *uap; + struct resource *r; + int portnr, ret; + int baudrate; + + /* + * Check the mandatory baud rate parameter in the DT node early + * so that we can easily exit with the error. + */ + if (pdev->dev.of_node) { + struct device_node *np = pdev->dev.of_node; + + ret = of_property_read_u32(np, "current-speed", &baudrate); + if (ret) + return ret; + } else { + baudrate = 115200; + } + + portnr = pl011_find_free_port(); + if (portnr < 0) + return portnr; + + uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port), + GFP_KERNEL); + if (!uap) + return -ENOMEM; + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + uap->port.irq = ret; + +#ifdef CONFIG_ACPI_SPCR_TABLE + if (qdf2400_e44_present) { + dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n"); + uap->vendor = &vendor_qdt_qdf2400_e44; + } else +#endif + uap->vendor = &vendor_sbsa; + + uap->reg_offset = uap->vendor->reg_offset; + uap->fifosize = 32; + uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; + uap->port.ops = &sbsa_uart_pops; + uap->fixed_baud = baudrate; + + snprintf(uap->type, sizeof(uap->type), "SBSA"); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + ret = pl011_setup_port(&pdev->dev, uap, r, portnr); + if (ret) + return ret; + + platform_set_drvdata(pdev, uap); + + return pl011_register_port(uap); +} + +static int sbsa_uart_remove(struct platform_device *pdev) +{ + struct uart_amba_port *uap = platform_get_drvdata(pdev); + + uart_remove_one_port(&amba_reg, &uap->port); + pl011_unregister_port(uap); + return 0; +} + +static const struct of_device_id sbsa_uart_of_match[] = { + { .compatible = "arm,sbsa-uart", }, + {}, +}; +MODULE_DEVICE_TABLE(of, sbsa_uart_of_match); + +static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = { + { "ARMH0011", 0 }, + { "ARMHB000", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match); + +static struct platform_driver arm_sbsa_uart_platform_driver = { + .probe = sbsa_uart_probe, + .remove = sbsa_uart_remove, + .driver = { + .name = "sbsa-uart", + .pm = &pl011_dev_pm_ops, + .of_match_table = of_match_ptr(sbsa_uart_of_match), + .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match), + .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), + }, +}; + +static const struct amba_id pl011_ids[] = { + { + .id = 0x00041011, + .mask = 0x000fffff, + .data = &vendor_arm, + }, + { + .id = 0x00380802, + .mask = 0x00ffffff, + .data = &vendor_st, + }, + { 0, 0 }, +}; + +MODULE_DEVICE_TABLE(amba, pl011_ids); + +static struct amba_driver pl011_driver = { + .drv = { + .name = "uart-pl011", + .pm = &pl011_dev_pm_ops, + .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), + }, + .id_table = pl011_ids, + .probe = pl011_probe, + .remove = pl011_remove, +}; + +static int __init pl011_init(void) +{ + printk(KERN_INFO "Serial: AMBA PL011 UART driver\n"); + + if (platform_driver_register(&arm_sbsa_uart_platform_driver)) + pr_warn("could not register SBSA UART platform driver\n"); + return amba_driver_register(&pl011_driver); +} + +static void __exit pl011_exit(void) +{ + platform_driver_unregister(&arm_sbsa_uart_platform_driver); + amba_driver_unregister(&pl011_driver); +} + +/* + * While this can be a module, if builtin it's most likely the console + * So let's leave module_exit but move module_init to an earlier place + */ +arch_initcall(pl011_init); +module_exit(pl011_exit); + +MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); +MODULE_DESCRIPTION("ARM AMBA serial port driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c new file mode 100644 index 000000000..450f4edfd --- /dev/null +++ b/drivers/tty/serial/apbuart.c @@ -0,0 +1,688 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for GRLIB serial ports (APBUART) + * + * Based on linux/drivers/serial/amba.c + * + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * Copyright (C) 2003 Konrad Eisele + * Copyright (C) 2006 Daniel Hellstrom , Aeroflex Gaisler AB + * Copyright (C) 2008 Gilead Kutnick + * Copyright (C) 2009 Kristoffer Glembo , Aeroflex Gaisler AB + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "apbuart.h" + +#define SERIAL_APBUART_MAJOR TTY_MAJOR +#define SERIAL_APBUART_MINOR 64 +#define UART_DUMMY_RSR_RX 0x8000 /* for ignore all read */ + +static void apbuart_tx_chars(struct uart_port *port); + +static void apbuart_stop_tx(struct uart_port *port) +{ + unsigned int cr; + + cr = UART_GET_CTRL(port); + cr &= ~UART_CTRL_TI; + UART_PUT_CTRL(port, cr); +} + +static void apbuart_start_tx(struct uart_port *port) +{ + unsigned int cr; + + cr = UART_GET_CTRL(port); + cr |= UART_CTRL_TI; + UART_PUT_CTRL(port, cr); + + if (UART_GET_STATUS(port) & UART_STATUS_THE) + apbuart_tx_chars(port); +} + +static void apbuart_stop_rx(struct uart_port *port) +{ + unsigned int cr; + + cr = UART_GET_CTRL(port); + cr &= ~(UART_CTRL_RI); + UART_PUT_CTRL(port, cr); +} + +static void apbuart_rx_chars(struct uart_port *port) +{ + unsigned int status, ch, rsr, flag; + unsigned int max_chars = port->fifosize; + + status = UART_GET_STATUS(port); + + while (UART_RX_DATA(status) && (max_chars--)) { + + ch = UART_GET_CHAR(port); + flag = TTY_NORMAL; + + port->icount.rx++; + + rsr = UART_GET_STATUS(port) | UART_DUMMY_RSR_RX; + UART_PUT_STATUS(port, 0); + if (rsr & UART_STATUS_ERR) { + + if (rsr & UART_STATUS_BR) { + rsr &= ~(UART_STATUS_FE | UART_STATUS_PE); + port->icount.brk++; + if (uart_handle_break(port)) + goto ignore_char; + } else if (rsr & UART_STATUS_PE) { + port->icount.parity++; + } else if (rsr & UART_STATUS_FE) { + port->icount.frame++; + } + if (rsr & UART_STATUS_OE) + port->icount.overrun++; + + rsr &= port->read_status_mask; + + if (rsr & UART_STATUS_PE) + flag = TTY_PARITY; + else if (rsr & UART_STATUS_FE) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(port, ch)) + goto ignore_char; + + uart_insert_char(port, rsr, UART_STATUS_OE, ch, flag); + + + ignore_char: + status = UART_GET_STATUS(port); + } + + tty_flip_buffer_push(&port->state->port); +} + +static void apbuart_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + int count; + + if (port->x_char) { + UART_PUT_CHAR(port, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + apbuart_stop_tx(port); + return; + } + + /* amba: fill FIFO */ + count = port->fifosize >> 1; + do { + UART_PUT_CHAR(port, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + apbuart_stop_tx(port); +} + +static irqreturn_t apbuart_int(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + unsigned int status; + + spin_lock(&port->lock); + + status = UART_GET_STATUS(port); + if (status & UART_STATUS_DR) + apbuart_rx_chars(port); + if (status & UART_STATUS_THE) + apbuart_tx_chars(port); + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static unsigned int apbuart_tx_empty(struct uart_port *port) +{ + unsigned int status = UART_GET_STATUS(port); + return status & UART_STATUS_THE ? TIOCSER_TEMT : 0; +} + +static unsigned int apbuart_get_mctrl(struct uart_port *port) +{ + /* The GRLIB APBUART handles flow control in hardware */ + return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; +} + +static void apbuart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* The GRLIB APBUART handles flow control in hardware */ +} + +static void apbuart_break_ctl(struct uart_port *port, int break_state) +{ + /* We don't support sending break */ +} + +static int apbuart_startup(struct uart_port *port) +{ + int retval; + unsigned int cr; + + /* Allocate the IRQ */ + retval = request_irq(port->irq, apbuart_int, 0, "apbuart", port); + if (retval) + return retval; + + /* Finally, enable interrupts */ + cr = UART_GET_CTRL(port); + UART_PUT_CTRL(port, + cr | UART_CTRL_RE | UART_CTRL_TE | + UART_CTRL_RI | UART_CTRL_TI); + + return 0; +} + +static void apbuart_shutdown(struct uart_port *port) +{ + unsigned int cr; + + /* disable all interrupts, disable the port */ + cr = UART_GET_CTRL(port); + UART_PUT_CTRL(port, + cr & ~(UART_CTRL_RE | UART_CTRL_TE | + UART_CTRL_RI | UART_CTRL_TI)); + + /* Free the interrupt */ + free_irq(port->irq, port); +} + +static void apbuart_set_termios(struct uart_port *port, + struct ktermios *termios, const struct ktermios *old) +{ + unsigned int cr; + unsigned long flags; + unsigned int baud, quot; + + /* Ask the core to calculate the divisor for us. */ + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); + if (baud == 0) + panic("invalid baudrate %i\n", port->uartclk / 16); + + /* uart_get_divisor calc a *16 uart freq, apbuart is *8 */ + quot = (uart_get_divisor(port, baud)) * 2; + cr = UART_GET_CTRL(port); + cr &= ~(UART_CTRL_PE | UART_CTRL_PS); + + if (termios->c_cflag & PARENB) { + cr |= UART_CTRL_PE; + if ((termios->c_cflag & PARODD)) + cr |= UART_CTRL_PS; + } + + /* Enable flow control. */ + if (termios->c_cflag & CRTSCTS) + cr |= UART_CTRL_FL; + + spin_lock_irqsave(&port->lock, flags); + + /* Update the per-port timeout. */ + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = UART_STATUS_OE; + if (termios->c_iflag & INPCK) + port->read_status_mask |= UART_STATUS_FE | UART_STATUS_PE; + + /* Characters to ignore */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= UART_STATUS_FE | UART_STATUS_PE; + + /* Ignore all characters if CREAD is not set. */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= UART_DUMMY_RSR_RX; + + /* Set baud rate */ + quot -= 1; + UART_PUT_SCAL(port, quot); + UART_PUT_CTRL(port, cr); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *apbuart_type(struct uart_port *port) +{ + return port->type == PORT_APBUART ? "GRLIB/APBUART" : NULL; +} + +static void apbuart_release_port(struct uart_port *port) +{ + release_mem_region(port->mapbase, 0x100); +} + +static int apbuart_request_port(struct uart_port *port) +{ + return request_mem_region(port->mapbase, 0x100, "grlib-apbuart") + != NULL ? 0 : -EBUSY; + return 0; +} + +/* Configure/autoconfigure the port */ +static void apbuart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_APBUART; + apbuart_request_port(port); + } +} + +/* Verify the new serial_struct (for TIOCSSERIAL) */ +static int apbuart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + int ret = 0; + if (ser->type != PORT_UNKNOWN && ser->type != PORT_APBUART) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= NR_IRQS) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + return ret; +} + +static const struct uart_ops grlib_apbuart_ops = { + .tx_empty = apbuart_tx_empty, + .set_mctrl = apbuart_set_mctrl, + .get_mctrl = apbuart_get_mctrl, + .stop_tx = apbuart_stop_tx, + .start_tx = apbuart_start_tx, + .stop_rx = apbuart_stop_rx, + .break_ctl = apbuart_break_ctl, + .startup = apbuart_startup, + .shutdown = apbuart_shutdown, + .set_termios = apbuart_set_termios, + .type = apbuart_type, + .release_port = apbuart_release_port, + .request_port = apbuart_request_port, + .config_port = apbuart_config_port, + .verify_port = apbuart_verify_port, +}; + +static struct uart_port grlib_apbuart_ports[UART_NR]; +static struct device_node *grlib_apbuart_nodes[UART_NR]; + +static int apbuart_scan_fifo_size(struct uart_port *port, int portnumber) +{ + int ctrl, loop = 0; + int status; + int fifosize; + unsigned long flags; + + ctrl = UART_GET_CTRL(port); + + /* + * Enable the transceiver and wait for it to be ready to send data. + * Clear interrupts so that this process will not be externally + * interrupted in the middle (which can cause the transceiver to + * drain prematurely). + */ + + local_irq_save(flags); + + UART_PUT_CTRL(port, ctrl | UART_CTRL_TE); + + while (!UART_TX_READY(UART_GET_STATUS(port))) + loop++; + + /* + * Disable the transceiver so data isn't actually sent during the + * actual test. + */ + + UART_PUT_CTRL(port, ctrl & ~(UART_CTRL_TE)); + + fifosize = 1; + UART_PUT_CHAR(port, 0); + + /* + * So long as transmitting a character increments the tranceivier FIFO + * length the FIFO must be at least that big. These bytes will + * automatically drain off of the FIFO. + */ + + status = UART_GET_STATUS(port); + while (((status >> 20) & 0x3F) == fifosize) { + fifosize++; + UART_PUT_CHAR(port, 0); + status = UART_GET_STATUS(port); + } + + fifosize--; + + UART_PUT_CTRL(port, ctrl); + local_irq_restore(flags); + + if (fifosize == 0) + fifosize = 1; + + return fifosize; +} + +static void apbuart_flush_fifo(struct uart_port *port) +{ + int i; + + for (i = 0; i < port->fifosize; i++) + UART_GET_CHAR(port); +} + + +/* ======================================================================== */ +/* Console driver, if enabled */ +/* ======================================================================== */ + +#ifdef CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE + +static void apbuart_console_putchar(struct uart_port *port, unsigned char ch) +{ + unsigned int status; + do { + status = UART_GET_STATUS(port); + } while (!UART_TX_READY(status)); + UART_PUT_CHAR(port, ch); +} + +static void +apbuart_console_write(struct console *co, const char *s, unsigned int count) +{ + struct uart_port *port = &grlib_apbuart_ports[co->index]; + unsigned int status, old_cr, new_cr; + + /* First save the CR then disable the interrupts */ + old_cr = UART_GET_CTRL(port); + new_cr = old_cr & ~(UART_CTRL_RI | UART_CTRL_TI); + UART_PUT_CTRL(port, new_cr); + + uart_console_write(port, s, count, apbuart_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore the TCR + */ + do { + status = UART_GET_STATUS(port); + } while (!UART_TX_READY(status)); + UART_PUT_CTRL(port, old_cr); +} + +static void __init +apbuart_console_get_options(struct uart_port *port, int *baud, + int *parity, int *bits) +{ + if (UART_GET_CTRL(port) & (UART_CTRL_RE | UART_CTRL_TE)) { + + unsigned int quot, status; + status = UART_GET_STATUS(port); + + *parity = 'n'; + if (status & UART_CTRL_PE) { + if ((status & UART_CTRL_PS) == 0) + *parity = 'e'; + else + *parity = 'o'; + } + + *bits = 8; + quot = UART_GET_SCAL(port) / 8; + *baud = port->uartclk / (16 * (quot + 1)); + } +} + +static int __init apbuart_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = 38400; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + pr_debug("apbuart_console_setup co=%p, co->index=%i, options=%s\n", + co, co->index, options); + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index >= grlib_apbuart_port_nr) + co->index = 0; + + port = &grlib_apbuart_ports[co->index]; + + spin_lock_init(&port->lock); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + apbuart_console_get_options(port, &baud, &parity, &bits); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct uart_driver grlib_apbuart_driver; + +static struct console grlib_apbuart_console = { + .name = "ttyS", + .write = apbuart_console_write, + .device = uart_console_device, + .setup = apbuart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &grlib_apbuart_driver, +}; + + +static int grlib_apbuart_configure(void); + +static int __init apbuart_console_init(void) +{ + if (grlib_apbuart_configure()) + return -ENODEV; + register_console(&grlib_apbuart_console); + return 0; +} + +console_initcall(apbuart_console_init); + +#define APBUART_CONSOLE (&grlib_apbuart_console) +#else +#define APBUART_CONSOLE NULL +#endif + +static struct uart_driver grlib_apbuart_driver = { + .owner = THIS_MODULE, + .driver_name = "serial", + .dev_name = "ttyS", + .major = SERIAL_APBUART_MAJOR, + .minor = SERIAL_APBUART_MINOR, + .nr = UART_NR, + .cons = APBUART_CONSOLE, +}; + + +/* ======================================================================== */ +/* OF Platform Driver */ +/* ======================================================================== */ + +static int apbuart_probe(struct platform_device *op) +{ + int i; + struct uart_port *port = NULL; + + for (i = 0; i < grlib_apbuart_port_nr; i++) { + if (op->dev.of_node == grlib_apbuart_nodes[i]) + break; + } + + port = &grlib_apbuart_ports[i]; + port->dev = &op->dev; + port->irq = op->archdata.irqs[0]; + + uart_add_one_port(&grlib_apbuart_driver, (struct uart_port *) port); + + apbuart_flush_fifo((struct uart_port *) port); + + printk(KERN_INFO "grlib-apbuart at 0x%llx, irq %d\n", + (unsigned long long) port->mapbase, port->irq); + return 0; +} + +static const struct of_device_id apbuart_match[] = { + { + .name = "GAISLER_APBUART", + }, + { + .name = "01_00c", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, apbuart_match); + +static struct platform_driver grlib_apbuart_of_driver = { + .probe = apbuart_probe, + .driver = { + .name = "grlib-apbuart", + .of_match_table = apbuart_match, + }, +}; + + +static int __init grlib_apbuart_configure(void) +{ + struct device_node *np; + int line = 0; + + for_each_matching_node(np, apbuart_match) { + const int *ampopts; + const u32 *freq_hz; + const struct amba_prom_registers *regs; + struct uart_port *port; + unsigned long addr; + + ampopts = of_get_property(np, "ampopts", NULL); + if (ampopts && (*ampopts == 0)) + continue; /* Ignore if used by another OS instance */ + regs = of_get_property(np, "reg", NULL); + /* Frequency of APB Bus is frequency of UART */ + freq_hz = of_get_property(np, "freq", NULL); + + if (!regs || !freq_hz || (*freq_hz == 0)) + continue; + + grlib_apbuart_nodes[line] = np; + + addr = regs->phys_addr; + + port = &grlib_apbuart_ports[line]; + + port->mapbase = addr; + port->membase = ioremap(addr, sizeof(struct grlib_apbuart_regs_map)); + port->irq = 0; + port->iotype = UPIO_MEM; + port->ops = &grlib_apbuart_ops; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE); + port->flags = UPF_BOOT_AUTOCONF; + port->line = line; + port->uartclk = *freq_hz; + port->fifosize = apbuart_scan_fifo_size((struct uart_port *) port, line); + line++; + + /* We support maximum UART_NR uarts ... */ + if (line == UART_NR) + break; + } + + grlib_apbuart_driver.nr = grlib_apbuart_port_nr = line; + return line ? 0 : -ENODEV; +} + +static int __init grlib_apbuart_init(void) +{ + int ret; + + /* Find all APBUARTS in device the tree and initialize their ports */ + ret = grlib_apbuart_configure(); + if (ret) + return ret; + + printk(KERN_INFO "Serial: GRLIB APBUART driver\n"); + + ret = uart_register_driver(&grlib_apbuart_driver); + + if (ret) { + printk(KERN_ERR "%s: uart_register_driver failed (%i)\n", + __FILE__, ret); + return ret; + } + + ret = platform_driver_register(&grlib_apbuart_of_driver); + if (ret) { + printk(KERN_ERR + "%s: platform_driver_register failed (%i)\n", + __FILE__, ret); + uart_unregister_driver(&grlib_apbuart_driver); + return ret; + } + + return ret; +} + +static void __exit grlib_apbuart_exit(void) +{ + int i; + + for (i = 0; i < grlib_apbuart_port_nr; i++) + uart_remove_one_port(&grlib_apbuart_driver, + &grlib_apbuart_ports[i]); + + uart_unregister_driver(&grlib_apbuart_driver); + platform_driver_unregister(&grlib_apbuart_of_driver); +} + +module_init(grlib_apbuart_init); +module_exit(grlib_apbuart_exit); + +MODULE_AUTHOR("Aeroflex Gaisler AB"); +MODULE_DESCRIPTION("GRLIB APBUART serial driver"); +MODULE_VERSION("2.1"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/apbuart.h b/drivers/tty/serial/apbuart.h new file mode 100644 index 000000000..81baf0076 --- /dev/null +++ b/drivers/tty/serial/apbuart.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __GRLIB_APBUART_H__ +#define __GRLIB_APBUART_H__ + +#include + +#define UART_NR 8 +static int grlib_apbuart_port_nr; + +struct grlib_apbuart_regs_map { + u32 data; + u32 status; + u32 ctrl; + u32 scaler; +}; + +struct amba_prom_registers { + unsigned int phys_addr; + unsigned int reg_size; +}; + +/* + * The following defines the bits in the APBUART Status Registers. + */ +#define UART_STATUS_DR 0x00000001 /* Data Ready */ +#define UART_STATUS_TSE 0x00000002 /* TX Send Register Empty */ +#define UART_STATUS_THE 0x00000004 /* TX Hold Register Empty */ +#define UART_STATUS_BR 0x00000008 /* Break Error */ +#define UART_STATUS_OE 0x00000010 /* RX Overrun Error */ +#define UART_STATUS_PE 0x00000020 /* RX Parity Error */ +#define UART_STATUS_FE 0x00000040 /* RX Framing Error */ +#define UART_STATUS_ERR 0x00000078 /* Error Mask */ + +/* + * The following defines the bits in the APBUART Ctrl Registers. + */ +#define UART_CTRL_RE 0x00000001 /* Receiver enable */ +#define UART_CTRL_TE 0x00000002 /* Transmitter enable */ +#define UART_CTRL_RI 0x00000004 /* Receiver interrupt enable */ +#define UART_CTRL_TI 0x00000008 /* Transmitter irq */ +#define UART_CTRL_PS 0x00000010 /* Parity select */ +#define UART_CTRL_PE 0x00000020 /* Parity enable */ +#define UART_CTRL_FL 0x00000040 /* Flow control enable */ +#define UART_CTRL_LB 0x00000080 /* Loopback enable */ + +#define APBBASE(port) ((struct grlib_apbuart_regs_map *)((port)->membase)) + +#define APBBASE_DATA_P(port) (&(APBBASE(port)->data)) +#define APBBASE_STATUS_P(port) (&(APBBASE(port)->status)) +#define APBBASE_CTRL_P(port) (&(APBBASE(port)->ctrl)) +#define APBBASE_SCALAR_P(port) (&(APBBASE(port)->scaler)) + +#define UART_GET_CHAR(port) (__raw_readl(APBBASE_DATA_P(port))) +#define UART_PUT_CHAR(port, v) (__raw_writel(v, APBBASE_DATA_P(port))) +#define UART_GET_STATUS(port) (__raw_readl(APBBASE_STATUS_P(port))) +#define UART_PUT_STATUS(port, v)(__raw_writel(v, APBBASE_STATUS_P(port))) +#define UART_GET_CTRL(port) (__raw_readl(APBBASE_CTRL_P(port))) +#define UART_PUT_CTRL(port, v) (__raw_writel(v, APBBASE_CTRL_P(port))) +#define UART_GET_SCAL(port) (__raw_readl(APBBASE_SCALAR_P(port))) +#define UART_PUT_SCAL(port, v) (__raw_writel(v, APBBASE_SCALAR_P(port))) + +#define UART_RX_DATA(s) (((s) & UART_STATUS_DR) != 0) +#define UART_TX_READY(s) (((s) & UART_STATUS_THE) != 0) + +#endif /* __GRLIB_APBUART_H__ */ diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c new file mode 100644 index 000000000..925484a42 --- /dev/null +++ b/drivers/tty/serial/ar933x_uart.c @@ -0,0 +1,890 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Atheros AR933X SoC built-in UART driver + * + * Copyright (C) 2011 Gabor Juhos + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "serial_mctrl_gpio.h" + +#define DRIVER_NAME "ar933x-uart" + +#define AR933X_UART_MAX_SCALE 0xff +#define AR933X_UART_MAX_STEP 0xffff + +#define AR933X_UART_MIN_BAUD 300 +#define AR933X_UART_MAX_BAUD 3000000 + +#define AR933X_DUMMY_STATUS_RD 0x01 + +static struct uart_driver ar933x_uart_driver; + +struct ar933x_uart_port { + struct uart_port port; + unsigned int ier; /* shadow Interrupt Enable Register */ + unsigned int min_baud; + unsigned int max_baud; + struct clk *clk; + struct mctrl_gpios *gpios; + struct gpio_desc *rts_gpiod; +}; + +static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, + int offset) +{ + return readl(up->port.membase + offset); +} + +static inline void ar933x_uart_write(struct ar933x_uart_port *up, + int offset, unsigned int value) +{ + writel(value, up->port.membase + offset); +} + +static inline void ar933x_uart_rmw(struct ar933x_uart_port *up, + unsigned int offset, + unsigned int mask, + unsigned int val) +{ + unsigned int t; + + t = ar933x_uart_read(up, offset); + t &= ~mask; + t |= val; + ar933x_uart_write(up, offset, t); +} + +static inline void ar933x_uart_rmw_set(struct ar933x_uart_port *up, + unsigned int offset, + unsigned int val) +{ + ar933x_uart_rmw(up, offset, 0, val); +} + +static inline void ar933x_uart_rmw_clear(struct ar933x_uart_port *up, + unsigned int offset, + unsigned int val) +{ + ar933x_uart_rmw(up, offset, val, 0); +} + +static inline void ar933x_uart_start_tx_interrupt(struct ar933x_uart_port *up) +{ + up->ier |= AR933X_UART_INT_TX_EMPTY; + ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); +} + +static inline void ar933x_uart_stop_tx_interrupt(struct ar933x_uart_port *up) +{ + up->ier &= ~AR933X_UART_INT_TX_EMPTY; + ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); +} + +static inline void ar933x_uart_start_rx_interrupt(struct ar933x_uart_port *up) +{ + up->ier |= AR933X_UART_INT_RX_VALID; + ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); +} + +static inline void ar933x_uart_stop_rx_interrupt(struct ar933x_uart_port *up) +{ + up->ier &= ~AR933X_UART_INT_RX_VALID; + ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); +} + +static inline void ar933x_uart_putc(struct ar933x_uart_port *up, int ch) +{ + unsigned int rdata; + + rdata = ch & AR933X_UART_DATA_TX_RX_MASK; + rdata |= AR933X_UART_DATA_TX_CSR; + ar933x_uart_write(up, AR933X_UART_DATA_REG, rdata); +} + +static unsigned int ar933x_uart_tx_empty(struct uart_port *port) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + unsigned long flags; + unsigned int rdata; + + spin_lock_irqsave(&up->port.lock, flags); + rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG); + spin_unlock_irqrestore(&up->port.lock, flags); + + return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT; +} + +static unsigned int ar933x_uart_get_mctrl(struct uart_port *port) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + int ret = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; + + mctrl_gpio_get(up->gpios, &ret); + + return ret; +} + +static void ar933x_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + + mctrl_gpio_set(up->gpios, mctrl); +} + +static void ar933x_uart_start_tx(struct uart_port *port) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + + ar933x_uart_start_tx_interrupt(up); +} + +static void ar933x_uart_wait_tx_complete(struct ar933x_uart_port *up) +{ + unsigned int status; + unsigned int timeout = 60000; + + /* Wait up to 60ms for the character(s) to be sent. */ + do { + status = ar933x_uart_read(up, AR933X_UART_CS_REG); + if (--timeout == 0) + break; + udelay(1); + } while (status & AR933X_UART_CS_TX_BUSY); + + if (timeout == 0) + dev_err(up->port.dev, "waiting for TX timed out\n"); +} + +static void ar933x_uart_rx_flush(struct ar933x_uart_port *up) +{ + unsigned int status; + + /* clear RX_VALID interrupt */ + ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_RX_VALID); + + /* remove characters from the RX FIFO */ + do { + ar933x_uart_write(up, AR933X_UART_DATA_REG, AR933X_UART_DATA_RX_CSR); + status = ar933x_uart_read(up, AR933X_UART_DATA_REG); + } while (status & AR933X_UART_DATA_RX_CSR); +} + +static void ar933x_uart_stop_tx(struct uart_port *port) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + + ar933x_uart_stop_tx_interrupt(up); +} + +static void ar933x_uart_stop_rx(struct uart_port *port) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + + ar933x_uart_stop_rx_interrupt(up); +} + +static void ar933x_uart_break_ctl(struct uart_port *port, int break_state) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + if (break_state == -1) + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, + AR933X_UART_CS_TX_BREAK); + else + ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG, + AR933X_UART_CS_TX_BREAK); + spin_unlock_irqrestore(&up->port.lock, flags); +} + +/* + * baudrate = (clk / (scale + 1)) * (step * (1 / 2^17)) + */ +static unsigned long ar933x_uart_get_baud(unsigned int clk, + unsigned int scale, + unsigned int step) +{ + u64 t; + u32 div; + + div = (2 << 16) * (scale + 1); + t = clk; + t *= step; + t += (div / 2); + do_div(t, div); + + return t; +} + +static void ar933x_uart_get_scale_step(unsigned int clk, + unsigned int baud, + unsigned int *scale, + unsigned int *step) +{ + unsigned int tscale; + long min_diff; + + *scale = 0; + *step = 0; + + min_diff = baud; + for (tscale = 0; tscale < AR933X_UART_MAX_SCALE; tscale++) { + u64 tstep; + int diff; + + tstep = baud * (tscale + 1); + tstep *= (2 << 16); + do_div(tstep, clk); + + if (tstep > AR933X_UART_MAX_STEP) + break; + + diff = abs(ar933x_uart_get_baud(clk, tscale, tstep) - baud); + if (diff < min_diff) { + min_diff = diff; + *scale = tscale; + *step = tstep; + } + } +} + +static void ar933x_uart_set_termios(struct uart_port *port, + struct ktermios *new, + const struct ktermios *old) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + unsigned int cs; + unsigned long flags; + unsigned int baud, scale, step; + + /* Only CS8 is supported */ + new->c_cflag &= ~CSIZE; + new->c_cflag |= CS8; + + /* Only one stop bit is supported */ + new->c_cflag &= ~CSTOPB; + + cs = 0; + if (new->c_cflag & PARENB) { + if (!(new->c_cflag & PARODD)) + cs |= AR933X_UART_CS_PARITY_EVEN; + else + cs |= AR933X_UART_CS_PARITY_ODD; + } else { + cs |= AR933X_UART_CS_PARITY_NONE; + } + + /* Mark/space parity is not supported */ + new->c_cflag &= ~CMSPAR; + + baud = uart_get_baud_rate(port, new, old, up->min_baud, up->max_baud); + ar933x_uart_get_scale_step(port->uartclk, baud, &scale, &step); + + /* + * Ok, we're now changing the port state. Do it with + * interrupts disabled. + */ + spin_lock_irqsave(&up->port.lock, flags); + + /* disable the UART */ + ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG, + AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S); + + /* Update the per-port timeout. */ + uart_update_timeout(port, new->c_cflag, baud); + + up->port.ignore_status_mask = 0; + + /* ignore all characters if CREAD is not set */ + if ((new->c_cflag & CREAD) == 0) + up->port.ignore_status_mask |= AR933X_DUMMY_STATUS_RD; + + ar933x_uart_write(up, AR933X_UART_CLOCK_REG, + scale << AR933X_UART_CLOCK_SCALE_S | step); + + /* setup configuration register */ + ar933x_uart_rmw(up, AR933X_UART_CS_REG, AR933X_UART_CS_PARITY_M, cs); + + /* enable host interrupt */ + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, + AR933X_UART_CS_HOST_INT_EN); + + /* enable RX and TX ready overide */ + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, + AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE); + + /* reenable the UART */ + ar933x_uart_rmw(up, AR933X_UART_CS_REG, + AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S, + AR933X_UART_CS_IF_MODE_DCE << AR933X_UART_CS_IF_MODE_S); + + spin_unlock_irqrestore(&up->port.lock, flags); + + if (tty_termios_baud_rate(new)) + tty_termios_encode_baud_rate(new, baud, baud); +} + +static void ar933x_uart_rx_chars(struct ar933x_uart_port *up) +{ + struct tty_port *port = &up->port.state->port; + int max_count = 256; + + do { + unsigned int rdata; + unsigned char ch; + + rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG); + if ((rdata & AR933X_UART_DATA_RX_CSR) == 0) + break; + + /* remove the character from the FIFO */ + ar933x_uart_write(up, AR933X_UART_DATA_REG, + AR933X_UART_DATA_RX_CSR); + + up->port.icount.rx++; + ch = rdata & AR933X_UART_DATA_TX_RX_MASK; + + if (uart_handle_sysrq_char(&up->port, ch)) + continue; + + if ((up->port.ignore_status_mask & AR933X_DUMMY_STATUS_RD) == 0) + tty_insert_flip_char(port, ch, TTY_NORMAL); + } while (max_count-- > 0); + + tty_flip_buffer_push(port); +} + +static void ar933x_uart_tx_chars(struct ar933x_uart_port *up) +{ + struct circ_buf *xmit = &up->port.state->xmit; + struct serial_rs485 *rs485conf = &up->port.rs485; + int count; + bool half_duplex_send = false; + + if (uart_tx_stopped(&up->port)) + return; + + if ((rs485conf->flags & SER_RS485_ENABLED) && + (up->port.x_char || !uart_circ_empty(xmit))) { + ar933x_uart_stop_rx_interrupt(up); + gpiod_set_value(up->rts_gpiod, !!(rs485conf->flags & SER_RS485_RTS_ON_SEND)); + half_duplex_send = true; + } + + count = up->port.fifosize; + do { + unsigned int rdata; + + rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG); + if ((rdata & AR933X_UART_DATA_TX_CSR) == 0) + break; + + if (up->port.x_char) { + ar933x_uart_putc(up, up->port.x_char); + up->port.icount.tx++; + up->port.x_char = 0; + continue; + } + + if (uart_circ_empty(xmit)) + break; + + ar933x_uart_putc(up, xmit->buf[xmit->tail]); + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->port.icount.tx++; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); + + if (!uart_circ_empty(xmit)) { + ar933x_uart_start_tx_interrupt(up); + } else if (half_duplex_send) { + ar933x_uart_wait_tx_complete(up); + ar933x_uart_rx_flush(up); + ar933x_uart_start_rx_interrupt(up); + gpiod_set_value(up->rts_gpiod, !!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND)); + } +} + +static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id) +{ + struct ar933x_uart_port *up = dev_id; + unsigned int status; + + status = ar933x_uart_read(up, AR933X_UART_CS_REG); + if ((status & AR933X_UART_CS_HOST_INT) == 0) + return IRQ_NONE; + + spin_lock(&up->port.lock); + + status = ar933x_uart_read(up, AR933X_UART_INT_REG); + status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG); + + if (status & AR933X_UART_INT_RX_VALID) { + ar933x_uart_write(up, AR933X_UART_INT_REG, + AR933X_UART_INT_RX_VALID); + ar933x_uart_rx_chars(up); + } + + if (status & AR933X_UART_INT_TX_EMPTY) { + ar933x_uart_write(up, AR933X_UART_INT_REG, + AR933X_UART_INT_TX_EMPTY); + ar933x_uart_stop_tx_interrupt(up); + ar933x_uart_tx_chars(up); + } + + spin_unlock(&up->port.lock); + + return IRQ_HANDLED; +} + +static int ar933x_uart_startup(struct uart_port *port) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + unsigned long flags; + int ret; + + ret = request_irq(up->port.irq, ar933x_uart_interrupt, + up->port.irqflags, dev_name(up->port.dev), up); + if (ret) + return ret; + + spin_lock_irqsave(&up->port.lock, flags); + + /* Enable HOST interrupts */ + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, + AR933X_UART_CS_HOST_INT_EN); + + /* enable RX and TX ready overide */ + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, + AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE); + + /* Enable RX interrupts */ + ar933x_uart_start_rx_interrupt(up); + + spin_unlock_irqrestore(&up->port.lock, flags); + + return 0; +} + +static void ar933x_uart_shutdown(struct uart_port *port) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + + /* Disable all interrupts */ + up->ier = 0; + ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); + + /* Disable break condition */ + ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG, + AR933X_UART_CS_TX_BREAK); + + free_irq(up->port.irq, up); +} + +static const char *ar933x_uart_type(struct uart_port *port) +{ + return (port->type == PORT_AR933X) ? "AR933X UART" : NULL; +} + +static void ar933x_uart_release_port(struct uart_port *port) +{ + /* Nothing to release ... */ +} + +static int ar933x_uart_request_port(struct uart_port *port) +{ + /* UARTs always present */ + return 0; +} + +static void ar933x_uart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_AR933X; +} + +static int ar933x_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + + if (ser->type != PORT_UNKNOWN && + ser->type != PORT_AR933X) + return -EINVAL; + + if (ser->irq < 0 || ser->irq >= NR_IRQS) + return -EINVAL; + + if (ser->baud_base < up->min_baud || + ser->baud_base > up->max_baud) + return -EINVAL; + + return 0; +} + +static const struct uart_ops ar933x_uart_ops = { + .tx_empty = ar933x_uart_tx_empty, + .set_mctrl = ar933x_uart_set_mctrl, + .get_mctrl = ar933x_uart_get_mctrl, + .stop_tx = ar933x_uart_stop_tx, + .start_tx = ar933x_uart_start_tx, + .stop_rx = ar933x_uart_stop_rx, + .break_ctl = ar933x_uart_break_ctl, + .startup = ar933x_uart_startup, + .shutdown = ar933x_uart_shutdown, + .set_termios = ar933x_uart_set_termios, + .type = ar933x_uart_type, + .release_port = ar933x_uart_release_port, + .request_port = ar933x_uart_request_port, + .config_port = ar933x_uart_config_port, + .verify_port = ar933x_uart_verify_port, +}; + +static int ar933x_config_rs485(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485conf) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + + if (port->rs485.flags & SER_RS485_ENABLED) + gpiod_set_value(up->rts_gpiod, + !!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND)); + + return 0; +} + +#ifdef CONFIG_SERIAL_AR933X_CONSOLE +static struct ar933x_uart_port * +ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS]; + +static void ar933x_uart_wait_xmitr(struct ar933x_uart_port *up) +{ + unsigned int status; + unsigned int timeout = 60000; + + /* Wait up to 60ms for the character(s) to be sent. */ + do { + status = ar933x_uart_read(up, AR933X_UART_DATA_REG); + if (--timeout == 0) + break; + udelay(1); + } while ((status & AR933X_UART_DATA_TX_CSR) == 0); +} + +static void ar933x_uart_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct ar933x_uart_port *up = + container_of(port, struct ar933x_uart_port, port); + + ar933x_uart_wait_xmitr(up); + ar933x_uart_putc(up, ch); +} + +static void ar933x_uart_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct ar933x_uart_port *up = ar933x_console_ports[co->index]; + unsigned long flags; + unsigned int int_en; + int locked = 1; + + local_irq_save(flags); + + if (up->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&up->port.lock); + else + spin_lock(&up->port.lock); + + /* + * First save the IER then disable the interrupts + */ + int_en = ar933x_uart_read(up, AR933X_UART_INT_EN_REG); + ar933x_uart_write(up, AR933X_UART_INT_EN_REG, 0); + + uart_console_write(&up->port, s, count, ar933x_uart_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + ar933x_uart_wait_xmitr(up); + ar933x_uart_write(up, AR933X_UART_INT_EN_REG, int_en); + + ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS); + + if (locked) + spin_unlock(&up->port.lock); + + local_irq_restore(flags); +} + +static int ar933x_uart_console_setup(struct console *co, char *options) +{ + struct ar933x_uart_port *up; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= CONFIG_SERIAL_AR933X_NR_UARTS) + return -EINVAL; + + up = ar933x_console_ports[co->index]; + if (!up) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&up->port, co, baud, parity, bits, flow); +} + +static struct console ar933x_uart_console = { + .name = "ttyATH", + .write = ar933x_uart_console_write, + .device = uart_console_device, + .setup = ar933x_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &ar933x_uart_driver, +}; +#endif /* CONFIG_SERIAL_AR933X_CONSOLE */ + +static struct uart_driver ar933x_uart_driver = { + .owner = THIS_MODULE, + .driver_name = DRIVER_NAME, + .dev_name = "ttyATH", + .nr = CONFIG_SERIAL_AR933X_NR_UARTS, + .cons = NULL, /* filled in runtime */ +}; + +static const struct serial_rs485 ar933x_no_rs485 = {}; +static const struct serial_rs485 ar933x_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND, +}; + +static int ar933x_uart_probe(struct platform_device *pdev) +{ + struct ar933x_uart_port *up; + struct uart_port *port; + struct resource *mem_res; + struct device_node *np; + unsigned int baud; + int id; + int ret; + int irq; + + np = pdev->dev.of_node; + if (IS_ENABLED(CONFIG_OF) && np) { + id = of_alias_get_id(np, "serial"); + if (id < 0) { + dev_err(&pdev->dev, "unable to get alias id, err=%d\n", + id); + return id; + } + } else { + id = pdev->id; + if (id == -1) + id = 0; + } + + if (id >= CONFIG_SERIAL_AR933X_NR_UARTS) + return -EINVAL; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + up = devm_kzalloc(&pdev->dev, sizeof(struct ar933x_uart_port), + GFP_KERNEL); + if (!up) + return -ENOMEM; + + up->clk = devm_clk_get(&pdev->dev, "uart"); + if (IS_ERR(up->clk)) { + dev_err(&pdev->dev, "unable to get UART clock\n"); + return PTR_ERR(up->clk); + } + + port = &up->port; + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + port->membase = devm_ioremap_resource(&pdev->dev, mem_res); + if (IS_ERR(port->membase)) + return PTR_ERR(port->membase); + + ret = clk_prepare_enable(up->clk); + if (ret) + return ret; + + port->uartclk = clk_get_rate(up->clk); + if (!port->uartclk) { + ret = -EINVAL; + goto err_disable_clk; + } + + port->mapbase = mem_res->start; + port->line = id; + port->irq = irq; + port->dev = &pdev->dev; + port->type = PORT_AR933X; + port->iotype = UPIO_MEM32; + + port->regshift = 2; + port->fifosize = AR933X_UART_FIFO_SIZE; + port->ops = &ar933x_uart_ops; + port->rs485_config = ar933x_config_rs485; + port->rs485_supported = ar933x_rs485_supported; + + baud = ar933x_uart_get_baud(port->uartclk, AR933X_UART_MAX_SCALE, 1); + up->min_baud = max_t(unsigned int, baud, AR933X_UART_MIN_BAUD); + + baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP); + up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD); + + ret = uart_get_rs485_mode(port); + if (ret) + goto err_disable_clk; + + up->gpios = mctrl_gpio_init(port, 0); + if (IS_ERR(up->gpios) && PTR_ERR(up->gpios) != -ENOSYS) { + ret = PTR_ERR(up->gpios); + goto err_disable_clk; + } + + up->rts_gpiod = mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS); + + if (!up->rts_gpiod) { + port->rs485_supported = ar933x_no_rs485; + if (port->rs485.flags & SER_RS485_ENABLED) { + dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n"); + port->rs485.flags &= ~SER_RS485_ENABLED; + } + } + +#ifdef CONFIG_SERIAL_AR933X_CONSOLE + ar933x_console_ports[up->port.line] = up; +#endif + + ret = uart_add_one_port(&ar933x_uart_driver, &up->port); + if (ret) + goto err_disable_clk; + + platform_set_drvdata(pdev, up); + return 0; + +err_disable_clk: + clk_disable_unprepare(up->clk); + return ret; +} + +static int ar933x_uart_remove(struct platform_device *pdev) +{ + struct ar933x_uart_port *up; + + up = platform_get_drvdata(pdev); + + if (up) { + uart_remove_one_port(&ar933x_uart_driver, &up->port); + clk_disable_unprepare(up->clk); + } + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id ar933x_uart_of_ids[] = { + { .compatible = "qca,ar9330-uart" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ar933x_uart_of_ids); +#endif + +static struct platform_driver ar933x_uart_platform_driver = { + .probe = ar933x_uart_probe, + .remove = ar933x_uart_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(ar933x_uart_of_ids), + }, +}; + +static int __init ar933x_uart_init(void) +{ + int ret; + +#ifdef CONFIG_SERIAL_AR933X_CONSOLE + ar933x_uart_driver.cons = &ar933x_uart_console; +#endif + + ret = uart_register_driver(&ar933x_uart_driver); + if (ret) + goto err_out; + + ret = platform_driver_register(&ar933x_uart_platform_driver); + if (ret) + goto err_unregister_uart_driver; + + return 0; + +err_unregister_uart_driver: + uart_unregister_driver(&ar933x_uart_driver); +err_out: + return ret; +} + +static void __exit ar933x_uart_exit(void) +{ + platform_driver_unregister(&ar933x_uart_platform_driver); + uart_unregister_driver(&ar933x_uart_driver); +} + +module_init(ar933x_uart_init); +module_exit(ar933x_uart_exit); + +MODULE_DESCRIPTION("Atheros AR933X UART driver"); +MODULE_AUTHOR("Gabor Juhos "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c new file mode 100644 index 000000000..f3ccc59d8 --- /dev/null +++ b/drivers/tty/serial/arc_uart.c @@ -0,0 +1,684 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARC On-Chip(fpga) UART Driver + * + * Copyright (C) 2010-2012 Synopsys, Inc. (www.synopsys.com) + * + * vineetg: July 10th 2012 + * -Decoupled the driver from arch/arc + * +Using platform_get_resource() for irq/membase (thx to bfin_uart.c) + * +Using early_platform_xxx() for early console (thx to mach-shmobile/xxx) + * + * Vineetg: Aug 21st 2010 + * -Is uart_tx_stopped() not done in tty write path as it has already been + * taken care of, in serial core + * + * Vineetg: Aug 18th 2010 + * -New Serial Core based ARC UART driver + * -Derived largely from blackfin driver albiet with some major tweaks + * + * TODO: + * -check if sysreq works + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/************************************* + * ARC UART Hardware Specs + ************************************/ +#define ARC_UART_TX_FIFO_SIZE 1 + +/* + * UART Register set (this is not a Standards Compliant IP) + * Also each reg is Word aligned, but only 8 bits wide + */ +#define R_ID0 0 +#define R_ID1 4 +#define R_ID2 8 +#define R_ID3 12 +#define R_DATA 16 +#define R_STS 20 +#define R_BAUDL 24 +#define R_BAUDH 28 + +/* Bits for UART Status Reg (R/W) */ +#define RXIENB 0x04 /* Receive Interrupt Enable */ +#define TXIENB 0x40 /* Transmit Interrupt Enable */ + +#define RXEMPTY 0x20 /* Receive FIFO Empty: No char receivede */ +#define TXEMPTY 0x80 /* Transmit FIFO Empty, thus char can be written into */ + +#define RXFULL 0x08 /* Receive FIFO full */ +#define RXFULL1 0x10 /* Receive FIFO has space for 1 char (tot space=4) */ + +#define RXFERR 0x01 /* Frame Error: Stop Bit not detected */ +#define RXOERR 0x02 /* OverFlow Err: Char recv but RXFULL still set */ + +/* Uart bit fiddling helpers: lowest level */ +#define RBASE(port, reg) (port->membase + reg) +#define UART_REG_SET(u, r, v) writeb((v), RBASE(u, r)) +#define UART_REG_GET(u, r) readb(RBASE(u, r)) + +#define UART_REG_OR(u, r, v) UART_REG_SET(u, r, UART_REG_GET(u, r) | (v)) +#define UART_REG_CLR(u, r, v) UART_REG_SET(u, r, UART_REG_GET(u, r) & ~(v)) + +/* Uart bit fiddling helpers: API level */ +#define UART_SET_DATA(uart, val) UART_REG_SET(uart, R_DATA, val) +#define UART_GET_DATA(uart) UART_REG_GET(uart, R_DATA) + +#define UART_SET_BAUDH(uart, val) UART_REG_SET(uart, R_BAUDH, val) +#define UART_SET_BAUDL(uart, val) UART_REG_SET(uart, R_BAUDL, val) + +#define UART_CLR_STATUS(uart, val) UART_REG_CLR(uart, R_STS, val) +#define UART_GET_STATUS(uart) UART_REG_GET(uart, R_STS) + +#define UART_ALL_IRQ_DISABLE(uart) UART_REG_CLR(uart, R_STS, RXIENB|TXIENB) +#define UART_RX_IRQ_DISABLE(uart) UART_REG_CLR(uart, R_STS, RXIENB) +#define UART_TX_IRQ_DISABLE(uart) UART_REG_CLR(uart, R_STS, TXIENB) + +#define UART_ALL_IRQ_ENABLE(uart) UART_REG_OR(uart, R_STS, RXIENB|TXIENB) +#define UART_RX_IRQ_ENABLE(uart) UART_REG_OR(uart, R_STS, RXIENB) +#define UART_TX_IRQ_ENABLE(uart) UART_REG_OR(uart, R_STS, TXIENB) + +#define ARC_SERIAL_DEV_NAME "ttyARC" + +struct arc_uart_port { + struct uart_port port; + unsigned long baud; +}; + +#define to_arc_port(uport) container_of(uport, struct arc_uart_port, port) + +static struct arc_uart_port arc_uart_ports[CONFIG_SERIAL_ARC_NR_PORTS]; + +#ifdef CONFIG_SERIAL_ARC_CONSOLE +static struct console arc_console; +#endif + +#define DRIVER_NAME "arc-uart" + +static struct uart_driver arc_uart_driver = { + .owner = THIS_MODULE, + .driver_name = DRIVER_NAME, + .dev_name = ARC_SERIAL_DEV_NAME, + .major = 0, + .minor = 0, + .nr = CONFIG_SERIAL_ARC_NR_PORTS, +#ifdef CONFIG_SERIAL_ARC_CONSOLE + .cons = &arc_console, +#endif +}; + +static void arc_serial_stop_rx(struct uart_port *port) +{ + UART_RX_IRQ_DISABLE(port); +} + +static void arc_serial_stop_tx(struct uart_port *port) +{ + while (!(UART_GET_STATUS(port) & TXEMPTY)) + cpu_relax(); + + UART_TX_IRQ_DISABLE(port); +} + +/* + * Return TIOCSER_TEMT when transmitter is not busy. + */ +static unsigned int arc_serial_tx_empty(struct uart_port *port) +{ + unsigned int stat; + + stat = UART_GET_STATUS(port); + if (stat & TXEMPTY) + return TIOCSER_TEMT; + + return 0; +} + +/* + * Driver internal routine, used by both tty(serial core) as well as tx-isr + * -Called under spinlock in either cases + * -also tty->flow.stopped has already been checked + * = by uart_start( ) before calling us + * = tx_ist checks that too before calling + */ +static void arc_serial_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + int sent = 0; + unsigned char ch; + + if (unlikely(port->x_char)) { + UART_SET_DATA(port, port->x_char); + port->icount.tx++; + port->x_char = 0; + sent = 1; + } else if (!uart_circ_empty(xmit)) { + ch = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + while (!(UART_GET_STATUS(port) & TXEMPTY)) + cpu_relax(); + UART_SET_DATA(port, ch); + sent = 1; + } + + /* + * If num chars in xmit buffer are too few, ask tty layer for more. + * By Hard ISR to schedule processing in software interrupt part + */ + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (sent) + UART_TX_IRQ_ENABLE(port); +} + +/* + * port is locked and interrupts are disabled + * uart_start( ) calls us under the port spinlock irqsave + */ +static void arc_serial_start_tx(struct uart_port *port) +{ + arc_serial_tx_chars(port); +} + +static void arc_serial_rx_chars(struct uart_port *port, unsigned int status) +{ + unsigned int ch, flg = 0; + + /* + * UART has 4 deep RX-FIFO. Driver's recongnition of this fact + * is very subtle. Here's how ... + * Upon getting a RX-Intr, such that RX-EMPTY=0, meaning data available, + * driver reads the DATA Reg and keeps doing that in a loop, until + * RX-EMPTY=1. Multiple chars being avail, with a single Interrupt, + * before RX-EMPTY=0, implies some sort of buffering going on in the + * controller, which is indeed the Rx-FIFO. + */ + do { + /* + * This could be an Rx Intr for err (no data), + * so check err and clear that Intr first + */ + if (unlikely(status & (RXOERR | RXFERR))) { + if (status & RXOERR) { + port->icount.overrun++; + flg = TTY_OVERRUN; + UART_CLR_STATUS(port, RXOERR); + } + + if (status & RXFERR) { + port->icount.frame++; + flg = TTY_FRAME; + UART_CLR_STATUS(port, RXFERR); + } + } else + flg = TTY_NORMAL; + + if (status & RXEMPTY) + continue; + + ch = UART_GET_DATA(port); + port->icount.rx++; + + if (!(uart_handle_sysrq_char(port, ch))) + uart_insert_char(port, status, RXOERR, ch, flg); + + tty_flip_buffer_push(&port->state->port); + } while (!((status = UART_GET_STATUS(port)) & RXEMPTY)); +} + +/* + * A note on the Interrupt handling state machine of this driver + * + * kernel printk writes funnel thru the console driver framework and in order + * to keep things simple as well as efficient, it writes to UART in polled + * mode, in one shot, and exits. + * + * OTOH, Userland output (via tty layer), uses interrupt based writes as there + * can be undeterministic delay between char writes. + * + * Thus Rx-interrupts are always enabled, while tx-interrupts are by default + * disabled. + * + * When tty has some data to send out, serial core calls driver's start_tx + * which + * -checks-if-tty-buffer-has-char-to-send + * -writes-data-to-uart + * -enable-tx-intr + * + * Once data bits are pushed out, controller raises the Tx-room-avail-Interrupt. + * The first thing Tx ISR does is disable further Tx interrupts (as this could + * be the last char to send, before settling down into the quiet polled mode). + * It then calls the exact routine used by tty layer write to send out any + * more char in tty buffer. In case of sending, it re-enables Tx-intr. In case + * of no data, it remains disabled. + * This is how the transmit state machine is dynamically switched on/off + */ + +static irqreturn_t arc_serial_isr(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + unsigned int status; + + status = UART_GET_STATUS(port); + + /* + * Single IRQ for both Rx (data available) Tx (room available) Interrupt + * notifications from the UART Controller. + * To demultiplex between the two, we check the relevant bits + */ + if (status & RXIENB) { + + /* already in ISR, no need of xx_irqsave */ + spin_lock(&port->lock); + arc_serial_rx_chars(port, status); + spin_unlock(&port->lock); + } + + if ((status & TXIENB) && (status & TXEMPTY)) { + + /* Unconditionally disable further Tx-Interrupts. + * will be enabled by tx_chars() if needed. + */ + UART_TX_IRQ_DISABLE(port); + + spin_lock(&port->lock); + + if (!uart_tx_stopped(port)) + arc_serial_tx_chars(port); + + spin_unlock(&port->lock); + } + + return IRQ_HANDLED; +} + +static unsigned int arc_serial_get_mctrl(struct uart_port *port) +{ + /* + * Pretend we have a Modem status reg and following bits are + * always set, to satify the serial core state machine + * (DSR) Data Set Ready + * (CTS) Clear To Send + * (CAR) Carrier Detect + */ + return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; +} + +static void arc_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* MCR not present */ +} + +static void arc_serial_break_ctl(struct uart_port *port, int break_state) +{ + /* ARC UART doesn't support sending Break signal */ +} + +static int arc_serial_startup(struct uart_port *port) +{ + /* Before we hook up the ISR, Disable all UART Interrupts */ + UART_ALL_IRQ_DISABLE(port); + + if (request_irq(port->irq, arc_serial_isr, 0, "arc uart rx-tx", port)) { + dev_warn(port->dev, "Unable to attach ARC UART intr\n"); + return -EBUSY; + } + + UART_RX_IRQ_ENABLE(port); /* Only Rx IRQ enabled to begin with */ + + return 0; +} + +/* This is not really needed */ +static void arc_serial_shutdown(struct uart_port *port) +{ + free_irq(port->irq, port); +} + +static void +arc_serial_set_termios(struct uart_port *port, struct ktermios *new, + const struct ktermios *old) +{ + struct arc_uart_port *uart = to_arc_port(port); + unsigned int baud, uartl, uarth, hw_val; + unsigned long flags; + + /* + * Use the generic handler so that any specially encoded baud rates + * such as SPD_xx flags or "%B0" can be handled + * Max Baud I suppose will not be more than current 115K * 4 + * Formula for ARC UART is: hw-val = ((CLK/(BAUD*4)) -1) + * spread over two 8-bit registers + */ + baud = uart_get_baud_rate(port, new, old, 0, 460800); + + hw_val = port->uartclk / (uart->baud * 4) - 1; + uartl = hw_val & 0xFF; + uarth = (hw_val >> 8) & 0xFF; + + spin_lock_irqsave(&port->lock, flags); + + UART_ALL_IRQ_DISABLE(port); + + UART_SET_BAUDL(port, uartl); + UART_SET_BAUDH(port, uarth); + + UART_RX_IRQ_ENABLE(port); + + /* + * UART doesn't support Parity/Hardware Flow Control; + * Only supports 8N1 character size + */ + new->c_cflag &= ~(CMSPAR|CRTSCTS|CSIZE); + new->c_cflag |= CS8; + + if (old) + tty_termios_copy_hw(new, old); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(new)) + tty_termios_encode_baud_rate(new, baud, baud); + + uart_update_timeout(port, new->c_cflag, baud); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *arc_serial_type(struct uart_port *port) +{ + return port->type == PORT_ARC ? DRIVER_NAME : NULL; +} + +static void arc_serial_release_port(struct uart_port *port) +{ +} + +static int arc_serial_request_port(struct uart_port *port) +{ + return 0; +} + +/* + * Verify the new serial_struct (for TIOCSSERIAL). + */ +static int +arc_serial_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (port->type != PORT_UNKNOWN && ser->type != PORT_ARC) + return -EINVAL; + + return 0; +} + +/* + * Configure/autoconfigure the port. + */ +static void arc_serial_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_ARC; +} + +#ifdef CONFIG_CONSOLE_POLL + +static void arc_serial_poll_putchar(struct uart_port *port, unsigned char chr) +{ + while (!(UART_GET_STATUS(port) & TXEMPTY)) + cpu_relax(); + + UART_SET_DATA(port, chr); +} + +static int arc_serial_poll_getchar(struct uart_port *port) +{ + unsigned char chr; + + while (!(UART_GET_STATUS(port) & RXEMPTY)) + cpu_relax(); + + chr = UART_GET_DATA(port); + return chr; +} +#endif + +static const struct uart_ops arc_serial_pops = { + .tx_empty = arc_serial_tx_empty, + .set_mctrl = arc_serial_set_mctrl, + .get_mctrl = arc_serial_get_mctrl, + .stop_tx = arc_serial_stop_tx, + .start_tx = arc_serial_start_tx, + .stop_rx = arc_serial_stop_rx, + .break_ctl = arc_serial_break_ctl, + .startup = arc_serial_startup, + .shutdown = arc_serial_shutdown, + .set_termios = arc_serial_set_termios, + .type = arc_serial_type, + .release_port = arc_serial_release_port, + .request_port = arc_serial_request_port, + .config_port = arc_serial_config_port, + .verify_port = arc_serial_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_put_char = arc_serial_poll_putchar, + .poll_get_char = arc_serial_poll_getchar, +#endif +}; + +#ifdef CONFIG_SERIAL_ARC_CONSOLE + +static int arc_serial_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= CONFIG_SERIAL_ARC_NR_PORTS) + return -ENODEV; + + /* + * The uart port backing the console (e.g. ttyARC1) might not have been + * init yet. If so, defer the console setup to after the port. + */ + port = &arc_uart_ports[co->index].port; + if (!port->membase) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + /* + * Serial core will call port->ops->set_termios( ) + * which will set the baud reg + */ + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static void arc_serial_console_putchar(struct uart_port *port, unsigned char ch) +{ + while (!(UART_GET_STATUS(port) & TXEMPTY)) + cpu_relax(); + + UART_SET_DATA(port, (unsigned char)ch); +} + +/* + * Interrupts are disabled on entering + */ +static void arc_serial_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *port = &arc_uart_ports[co->index].port; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + uart_console_write(port, s, count, arc_serial_console_putchar); + spin_unlock_irqrestore(&port->lock, flags); +} + +static struct console arc_console = { + .name = ARC_SERIAL_DEV_NAME, + .write = arc_serial_console_write, + .device = uart_console_device, + .setup = arc_serial_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &arc_uart_driver +}; + +static void arc_early_serial_write(struct console *con, const char *s, + unsigned int n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, arc_serial_console_putchar); +} + +static int __init arc_early_console_setup(struct earlycon_device *dev, + const char *opt) +{ + struct uart_port *port = &dev->port; + unsigned int l, h, hw_val; + + if (!dev->port.membase) + return -ENODEV; + + hw_val = port->uartclk / (dev->baud * 4) - 1; + l = hw_val & 0xFF; + h = (hw_val >> 8) & 0xFF; + + UART_SET_BAUDL(port, l); + UART_SET_BAUDH(port, h); + + dev->con->write = arc_early_serial_write; + return 0; +} +OF_EARLYCON_DECLARE(arc_uart, "snps,arc-uart", arc_early_console_setup); + +#endif /* CONFIG_SERIAL_ARC_CONSOLE */ + +static int arc_serial_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct arc_uart_port *uart; + struct uart_port *port; + int dev_id; + u32 val; + + /* no device tree device */ + if (!np) + return -ENODEV; + + dev_id = of_alias_get_id(np, "serial"); + if (dev_id < 0) + dev_id = 0; + + if (dev_id >= ARRAY_SIZE(arc_uart_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", dev_id); + return -EINVAL; + } + + uart = &arc_uart_ports[dev_id]; + port = &uart->port; + + if (of_property_read_u32(np, "clock-frequency", &val)) { + dev_err(&pdev->dev, "clock-frequency property NOTset\n"); + return -EINVAL; + } + port->uartclk = val; + + if (of_property_read_u32(np, "current-speed", &val)) { + dev_err(&pdev->dev, "current-speed property NOT set\n"); + return -EINVAL; + } + uart->baud = val; + + port->membase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(port->membase)) { + /* No point of dev_err since UART itself is hosed here */ + return PTR_ERR(port->membase); + } + + port->irq = irq_of_parse_and_map(np, 0); + + port->dev = &pdev->dev; + port->iotype = UPIO_MEM; + port->flags = UPF_BOOT_AUTOCONF; + port->line = dev_id; + port->ops = &arc_serial_pops; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ARC_CONSOLE); + + port->fifosize = ARC_UART_TX_FIFO_SIZE; + + /* + * uart_insert_char( ) uses it in decideding whether to ignore a + * char or not. Explicitly setting it here, removes the subtelty + */ + port->ignore_status_mask = 0; + + return uart_add_one_port(&arc_uart_driver, &arc_uart_ports[dev_id].port); +} + +static int arc_serial_remove(struct platform_device *pdev) +{ + /* This will never be called */ + return 0; +} + +static const struct of_device_id arc_uart_dt_ids[] = { + { .compatible = "snps,arc-uart" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, arc_uart_dt_ids); + +static struct platform_driver arc_platform_driver = { + .probe = arc_serial_probe, + .remove = arc_serial_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = arc_uart_dt_ids, + }, +}; + +static int __init arc_serial_init(void) +{ + int ret; + + ret = uart_register_driver(&arc_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&arc_platform_driver); + if (ret) + uart_unregister_driver(&arc_uart_driver); + + return ret; +} + +static void __exit arc_serial_exit(void) +{ + platform_driver_unregister(&arc_platform_driver); + uart_unregister_driver(&arc_uart_driver); +} + +module_init(arc_serial_init); +module_exit(arc_serial_exit); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_AUTHOR("Vineet Gupta"); +MODULE_DESCRIPTION("ARC(Synopsys) On-Chip(fpga) serial driver"); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c new file mode 100644 index 000000000..fbce8ef20 --- /dev/null +++ b/drivers/tty/serial/atmel_serial.c @@ -0,0 +1,3072 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Atmel AT91 Serial ports + * Copyright (C) 2003 Rick Bronson + * + * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * DMA support added by Chip Coldwell. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define PDC_BUFFER_SIZE 512 +/* Revisit: We should calculate this based on the actual port settings */ +#define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */ + +/* The minium number of data FIFOs should be able to contain */ +#define ATMEL_MIN_FIFO_SIZE 8 +/* + * These two offsets are substracted from the RX FIFO size to define the RTS + * high and low thresholds + */ +#define ATMEL_RTS_HIGH_OFFSET 16 +#define ATMEL_RTS_LOW_OFFSET 20 + +#include + +#include "serial_mctrl_gpio.h" +#include "atmel_serial.h" + +static void atmel_start_rx(struct uart_port *port); +static void atmel_stop_rx(struct uart_port *port); + +#ifdef CONFIG_SERIAL_ATMEL_TTYAT + +/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we + * should coexist with the 8250 driver, such as if we have an external 16C550 + * UART. */ +#define SERIAL_ATMEL_MAJOR 204 +#define MINOR_START 154 +#define ATMEL_DEVICENAME "ttyAT" + +#else + +/* Use device name ttyS, major 4, minor 64-68. This is the usual serial port + * name, but it is legally reserved for the 8250 driver. */ +#define SERIAL_ATMEL_MAJOR TTY_MAJOR +#define MINOR_START 64 +#define ATMEL_DEVICENAME "ttyS" + +#endif + +#define ATMEL_ISR_PASS_LIMIT 256 + +struct atmel_dma_buffer { + unsigned char *buf; + dma_addr_t dma_addr; + unsigned int dma_size; + unsigned int ofs; +}; + +struct atmel_uart_char { + u16 status; + u16 ch; +}; + +/* + * Be careful, the real size of the ring buffer is + * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer + * can contain up to 1024 characters in PIO mode and up to 4096 characters in + * DMA mode. + */ +#define ATMEL_SERIAL_RINGSIZE 1024 + +/* + * at91: 6 USARTs and one DBGU port (SAM9260) + * samx7: 3 USARTs and 5 UARTs + */ +#define ATMEL_MAX_UART 8 + +/* + * We wrap our port structure around the generic uart_port. + */ +struct atmel_uart_port { + struct uart_port uart; /* uart */ + struct clk *clk; /* uart clock */ + struct clk *gclk; /* uart generic clock */ + int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ + u32 backup_imr; /* IMR saved during suspend */ + int break_active; /* break being received */ + + bool use_dma_rx; /* enable DMA receiver */ + bool use_pdc_rx; /* enable PDC receiver */ + short pdc_rx_idx; /* current PDC RX buffer */ + struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ + + bool use_dma_tx; /* enable DMA transmitter */ + bool use_pdc_tx; /* enable PDC transmitter */ + struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ + + spinlock_t lock_tx; /* port lock */ + spinlock_t lock_rx; /* port lock */ + struct dma_chan *chan_tx; + struct dma_chan *chan_rx; + struct dma_async_tx_descriptor *desc_tx; + struct dma_async_tx_descriptor *desc_rx; + dma_cookie_t cookie_tx; + dma_cookie_t cookie_rx; + struct scatterlist sg_tx; + struct scatterlist sg_rx; + struct tasklet_struct tasklet_rx; + struct tasklet_struct tasklet_tx; + atomic_t tasklet_shutdown; + unsigned int irq_status_prev; + unsigned int tx_len; + + struct circ_buf rx_ring; + + struct mctrl_gpios *gpios; + u32 backup_mode; /* MR saved during iso7816 operations */ + u32 backup_brgr; /* BRGR saved during iso7816 operations */ + unsigned int tx_done_mask; + u32 fifo_size; + u32 rts_high; + u32 rts_low; + bool ms_irq_enabled; + u32 rtor; /* address of receiver timeout register if it exists */ + bool is_usart; + bool has_frac_baudrate; + bool has_hw_timer; + struct timer_list uart_timer; + + bool tx_stopped; + bool suspended; + unsigned int pending; + unsigned int pending_status; + spinlock_t lock_suspended; + + bool hd_start_rx; /* can start RX during half-duplex operation */ + + /* ISO7816 */ + unsigned int fidi_min; + unsigned int fidi_max; + + struct { + u32 cr; + u32 mr; + u32 imr; + u32 brgr; + u32 rtor; + u32 ttgr; + u32 fmr; + u32 fimr; + } cache; + + int (*prepare_rx)(struct uart_port *port); + int (*prepare_tx)(struct uart_port *port); + void (*schedule_rx)(struct uart_port *port); + void (*schedule_tx)(struct uart_port *port); + void (*release_rx)(struct uart_port *port); + void (*release_tx)(struct uart_port *port); +}; + +static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; +static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART); + +#if defined(CONFIG_OF) +static const struct of_device_id atmel_serial_dt_ids[] = { + { .compatible = "atmel,at91rm9200-usart-serial" }, + { /* sentinel */ } +}; +#endif + +static inline struct atmel_uart_port * +to_atmel_uart_port(struct uart_port *uart) +{ + return container_of(uart, struct atmel_uart_port, uart); +} + +static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg) +{ + return __raw_readl(port->membase + reg); +} + +static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value) +{ + __raw_writel(value, port->membase + reg); +} + +static inline u8 atmel_uart_read_char(struct uart_port *port) +{ + return __raw_readb(port->membase + ATMEL_US_RHR); +} + +static inline void atmel_uart_write_char(struct uart_port *port, u8 value) +{ + __raw_writeb(value, port->membase + ATMEL_US_THR); +} + +static inline int atmel_uart_is_half_duplex(struct uart_port *port) +{ + return ((port->rs485.flags & SER_RS485_ENABLED) && + !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || + (port->iso7816.flags & SER_ISO7816_ENABLED); +} + +static inline int atmel_error_rate(int desired_value, int actual_value) +{ + return 100 - (desired_value * 100) / actual_value; +} + +#ifdef CONFIG_SERIAL_ATMEL_PDC +static bool atmel_use_pdc_rx(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + return atmel_port->use_pdc_rx; +} + +static bool atmel_use_pdc_tx(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + return atmel_port->use_pdc_tx; +} +#else +static bool atmel_use_pdc_rx(struct uart_port *port) +{ + return false; +} + +static bool atmel_use_pdc_tx(struct uart_port *port) +{ + return false; +} +#endif + +static bool atmel_use_dma_tx(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + return atmel_port->use_dma_tx; +} + +static bool atmel_use_dma_rx(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + return atmel_port->use_dma_rx; +} + +static bool atmel_use_fifo(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + return atmel_port->fifo_size; +} + +static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port, + struct tasklet_struct *t) +{ + if (!atomic_read(&atmel_port->tasklet_shutdown)) + tasklet_schedule(t); +} + +/* Enable or disable the rs485 support */ +static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485conf) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned int mode; + + /* Disable interrupts */ + atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); + + mode = atmel_uart_readl(port, ATMEL_US_MR); + + if (rs485conf->flags & SER_RS485_ENABLED) { + dev_dbg(port->dev, "Setting UART to RS485\n"); + if (rs485conf->flags & SER_RS485_RX_DURING_TX) + atmel_port->tx_done_mask = ATMEL_US_TXRDY; + else + atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; + + atmel_uart_writel(port, ATMEL_US_TTGR, + rs485conf->delay_rts_after_send); + mode &= ~ATMEL_US_USMODE; + mode |= ATMEL_US_USMODE_RS485; + } else { + dev_dbg(port->dev, "Setting UART to RS232\n"); + if (atmel_use_pdc_tx(port)) + atmel_port->tx_done_mask = ATMEL_US_ENDTX | + ATMEL_US_TXBUFE; + else + atmel_port->tx_done_mask = ATMEL_US_TXRDY; + } + atmel_uart_writel(port, ATMEL_US_MR, mode); + + /* Enable interrupts */ + atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); + + return 0; +} + +static unsigned int atmel_calc_cd(struct uart_port *port, + struct serial_iso7816 *iso7816conf) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned int cd; + u64 mck_rate; + + mck_rate = (u64)clk_get_rate(atmel_port->clk); + do_div(mck_rate, iso7816conf->clk); + cd = mck_rate; + return cd; +} + +static unsigned int atmel_calc_fidi(struct uart_port *port, + struct serial_iso7816 *iso7816conf) +{ + u64 fidi = 0; + + if (iso7816conf->sc_fi && iso7816conf->sc_di) { + fidi = (u64)iso7816conf->sc_fi; + do_div(fidi, iso7816conf->sc_di); + } + return (u32)fidi; +} + +/* Enable or disable the iso7816 support */ +/* Called with interrupts disabled */ +static int atmel_config_iso7816(struct uart_port *port, + struct serial_iso7816 *iso7816conf) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned int mode; + unsigned int cd, fidi; + int ret = 0; + + /* Disable interrupts */ + atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); + + mode = atmel_uart_readl(port, ATMEL_US_MR); + + if (iso7816conf->flags & SER_ISO7816_ENABLED) { + mode &= ~ATMEL_US_USMODE; + + if (iso7816conf->tg > 255) { + dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n"); + memset(iso7816conf, 0, sizeof(struct serial_iso7816)); + ret = -EINVAL; + goto err_out; + } + + if ((iso7816conf->flags & SER_ISO7816_T_PARAM) + == SER_ISO7816_T(0)) { + mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK; + } else if ((iso7816conf->flags & SER_ISO7816_T_PARAM) + == SER_ISO7816_T(1)) { + mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK; + } else { + dev_err(port->dev, "ISO7816: Type not supported\n"); + memset(iso7816conf, 0, sizeof(struct serial_iso7816)); + ret = -EINVAL; + goto err_out; + } + + mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR); + + /* select mck clock, and output */ + mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; + /* set parity for normal/inverse mode + max iterations */ + mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3); + + cd = atmel_calc_cd(port, iso7816conf); + fidi = atmel_calc_fidi(port, iso7816conf); + if (fidi == 0) { + dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n"); + } else if (fidi < atmel_port->fidi_min + || fidi > atmel_port->fidi_max) { + dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi); + memset(iso7816conf, 0, sizeof(struct serial_iso7816)); + ret = -EINVAL; + goto err_out; + } + + if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) { + /* port not yet in iso7816 mode: store configuration */ + atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR); + atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR); + } + + atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg); + atmel_uart_writel(port, ATMEL_US_BRGR, cd); + atmel_uart_writel(port, ATMEL_US_FIDI, fidi); + + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN); + atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION; + } else { + dev_dbg(port->dev, "Setting UART back to RS232\n"); + /* back to last RS232 settings */ + mode = atmel_port->backup_mode; + memset(iso7816conf, 0, sizeof(struct serial_iso7816)); + atmel_uart_writel(port, ATMEL_US_TTGR, 0); + atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr); + atmel_uart_writel(port, ATMEL_US_FIDI, 0x174); + + if (atmel_use_pdc_tx(port)) + atmel_port->tx_done_mask = ATMEL_US_ENDTX | + ATMEL_US_TXBUFE; + else + atmel_port->tx_done_mask = ATMEL_US_TXRDY; + } + + port->iso7816 = *iso7816conf; + + atmel_uart_writel(port, ATMEL_US_MR, mode); + +err_out: + /* Enable interrupts */ + atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); + + return ret; +} + +/* + * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. + */ +static u_int atmel_tx_empty(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + if (atmel_port->tx_stopped) + return TIOCSER_TEMT; + return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ? + TIOCSER_TEMT : + 0; +} + +/* + * Set state of the modem control output lines + */ +static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) +{ + unsigned int control = 0; + unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR); + unsigned int rts_paused, rts_ready; + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + /* override mode to RS485 if needed, otherwise keep the current mode */ + if (port->rs485.flags & SER_RS485_ENABLED) { + atmel_uart_writel(port, ATMEL_US_TTGR, + port->rs485.delay_rts_after_send); + mode &= ~ATMEL_US_USMODE; + mode |= ATMEL_US_USMODE_RS485; + } + + /* set the RTS line state according to the mode */ + if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { + /* force RTS line to high level */ + rts_paused = ATMEL_US_RTSEN; + + /* give the control of the RTS line back to the hardware */ + rts_ready = ATMEL_US_RTSDIS; + } else { + /* force RTS line to high level */ + rts_paused = ATMEL_US_RTSDIS; + + /* force RTS line to low level */ + rts_ready = ATMEL_US_RTSEN; + } + + if (mctrl & TIOCM_RTS) + control |= rts_ready; + else + control |= rts_paused; + + if (mctrl & TIOCM_DTR) + control |= ATMEL_US_DTREN; + else + control |= ATMEL_US_DTRDIS; + + atmel_uart_writel(port, ATMEL_US_CR, control); + + mctrl_gpio_set(atmel_port->gpios, mctrl); + + /* Local loopback mode? */ + mode &= ~ATMEL_US_CHMODE; + if (mctrl & TIOCM_LOOP) + mode |= ATMEL_US_CHMODE_LOC_LOOP; + else + mode |= ATMEL_US_CHMODE_NORMAL; + + atmel_uart_writel(port, ATMEL_US_MR, mode); +} + +/* + * Get state of the modem control input lines + */ +static u_int atmel_get_mctrl(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned int ret = 0, status; + + status = atmel_uart_readl(port, ATMEL_US_CSR); + + /* + * The control signals are active low. + */ + if (!(status & ATMEL_US_DCD)) + ret |= TIOCM_CD; + if (!(status & ATMEL_US_CTS)) + ret |= TIOCM_CTS; + if (!(status & ATMEL_US_DSR)) + ret |= TIOCM_DSR; + if (!(status & ATMEL_US_RI)) + ret |= TIOCM_RI; + + return mctrl_gpio_get(atmel_port->gpios, &ret); +} + +/* + * Stop transmitting. + */ +static void atmel_stop_tx(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + if (atmel_use_pdc_tx(port)) { + /* disable PDC transmit */ + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); + } + + /* + * Disable the transmitter. + * This is mandatory when DMA is used, otherwise the DMA buffer + * is fully transmitted. + */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); + atmel_port->tx_stopped = true; + + /* Disable interrupts */ + atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); + + if (atmel_uart_is_half_duplex(port)) + if (!atomic_read(&atmel_port->tasklet_shutdown)) + atmel_start_rx(port); + +} + +/* + * Start transmitting. + */ +static void atmel_start_tx(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR) + & ATMEL_PDC_TXTEN)) + /* The transmitter is already running. Yes, we + really need this.*/ + return; + + if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) + if (atmel_uart_is_half_duplex(port)) + atmel_stop_rx(port); + + if (atmel_use_pdc_tx(port)) + /* re-enable PDC transmit */ + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); + + /* Enable interrupts */ + atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); + + /* re-enable the transmitter */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); + atmel_port->tx_stopped = false; +} + +/* + * start receiving - port is in process of being opened. + */ +static void atmel_start_rx(struct uart_port *port) +{ + /* reset status and receiver */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); + + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN); + + if (atmel_use_pdc_rx(port)) { + /* enable PDC controller */ + atmel_uart_writel(port, ATMEL_US_IER, + ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | + port->read_status_mask); + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); + } else { + atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); + } +} + +/* + * Stop receiving - port is in process of being closed. + */ +static void atmel_stop_rx(struct uart_port *port) +{ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS); + + if (atmel_use_pdc_rx(port)) { + /* disable PDC receive */ + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS); + atmel_uart_writel(port, ATMEL_US_IDR, + ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | + port->read_status_mask); + } else { + atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY); + } +} + +/* + * Enable modem status interrupts + */ +static void atmel_enable_ms(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + uint32_t ier = 0; + + /* + * Interrupt should not be enabled twice + */ + if (atmel_port->ms_irq_enabled) + return; + + atmel_port->ms_irq_enabled = true; + + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) + ier |= ATMEL_US_CTSIC; + + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) + ier |= ATMEL_US_DSRIC; + + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) + ier |= ATMEL_US_RIIC; + + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) + ier |= ATMEL_US_DCDIC; + + atmel_uart_writel(port, ATMEL_US_IER, ier); + + mctrl_gpio_enable_ms(atmel_port->gpios); +} + +/* + * Disable modem status interrupts + */ +static void atmel_disable_ms(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + uint32_t idr = 0; + + /* + * Interrupt should not be disabled twice + */ + if (!atmel_port->ms_irq_enabled) + return; + + atmel_port->ms_irq_enabled = false; + + mctrl_gpio_disable_ms(atmel_port->gpios); + + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) + idr |= ATMEL_US_CTSIC; + + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) + idr |= ATMEL_US_DSRIC; + + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) + idr |= ATMEL_US_RIIC; + + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) + idr |= ATMEL_US_DCDIC; + + atmel_uart_writel(port, ATMEL_US_IDR, idr); +} + +/* + * Control the transmission of a break signal + */ +static void atmel_break_ctl(struct uart_port *port, int break_state) +{ + if (break_state != 0) + /* start break */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK); + else + /* stop break */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK); +} + +/* + * Stores the incoming character in the ring buffer + */ +static void +atmel_buffer_rx_char(struct uart_port *port, unsigned int status, + unsigned int ch) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct circ_buf *ring = &atmel_port->rx_ring; + struct atmel_uart_char *c; + + if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) + /* Buffer overflow, ignore char */ + return; + + c = &((struct atmel_uart_char *)ring->buf)[ring->head]; + c->status = status; + c->ch = ch; + + /* Make sure the character is stored before we update head. */ + smp_wmb(); + + ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); +} + +/* + * Deal with parity, framing and overrun errors. + */ +static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status) +{ + /* clear error */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); + + if (status & ATMEL_US_RXBRK) { + /* ignore side-effect */ + status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); + port->icount.brk++; + } + if (status & ATMEL_US_PARE) + port->icount.parity++; + if (status & ATMEL_US_FRAME) + port->icount.frame++; + if (status & ATMEL_US_OVRE) + port->icount.overrun++; +} + +/* + * Characters received (called from interrupt handler) + */ +static void atmel_rx_chars(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned int status, ch; + + status = atmel_uart_readl(port, ATMEL_US_CSR); + while (status & ATMEL_US_RXRDY) { + ch = atmel_uart_read_char(port); + + /* + * note that the error handling code is + * out of the main execution path + */ + if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME + | ATMEL_US_OVRE | ATMEL_US_RXBRK) + || atmel_port->break_active)) { + + /* clear error */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); + + if (status & ATMEL_US_RXBRK + && !atmel_port->break_active) { + atmel_port->break_active = 1; + atmel_uart_writel(port, ATMEL_US_IER, + ATMEL_US_RXBRK); + } else { + /* + * This is either the end-of-break + * condition or we've received at + * least one character without RXBRK + * being set. In both cases, the next + * RXBRK will indicate start-of-break. + */ + atmel_uart_writel(port, ATMEL_US_IDR, + ATMEL_US_RXBRK); + status &= ~ATMEL_US_RXBRK; + atmel_port->break_active = 0; + } + } + + atmel_buffer_rx_char(port, status, ch); + status = atmel_uart_readl(port, ATMEL_US_CSR); + } + + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); +} + +/* + * Transmit characters (called from tasklet with TXRDY interrupt + * disabled) + */ +static void atmel_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + if (port->x_char && + (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) { + atmel_uart_write_char(port, port->x_char); + port->icount.tx++; + port->x_char = 0; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + return; + + while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) { + atmel_uart_write_char(port, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (!uart_circ_empty(xmit)) { + /* we still have characters to transmit, so we should continue + * transmitting them when TX is ready, regardless of + * mode or duplexity + */ + atmel_port->tx_done_mask |= ATMEL_US_TXRDY; + + /* Enable interrupts */ + atmel_uart_writel(port, ATMEL_US_IER, + atmel_port->tx_done_mask); + } else { + if (atmel_uart_is_half_duplex(port)) + atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY; + } +} + +static void atmel_complete_tx_dma(void *arg) +{ + struct atmel_uart_port *atmel_port = arg; + struct uart_port *port = &atmel_port->uart; + struct circ_buf *xmit = &port->state->xmit; + struct dma_chan *chan = atmel_port->chan_tx; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + if (chan) + dmaengine_terminate_all(chan); + xmit->tail += atmel_port->tx_len; + xmit->tail &= UART_XMIT_SIZE - 1; + + port->icount.tx += atmel_port->tx_len; + + spin_lock(&atmel_port->lock_tx); + async_tx_ack(atmel_port->desc_tx); + atmel_port->cookie_tx = -EINVAL; + atmel_port->desc_tx = NULL; + spin_unlock(&atmel_port->lock_tx); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + /* + * xmit is a circular buffer so, if we have just send data from + * xmit->tail to the end of xmit->buf, now we have to transmit the + * remaining data from the beginning of xmit->buf to xmit->head. + */ + if (!uart_circ_empty(xmit)) + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); + else if (atmel_uart_is_half_duplex(port)) { + /* + * DMA done, re-enable TXEMPTY and signal that we can stop + * TX and start RX for RS485 + */ + atmel_port->hd_start_rx = true; + atmel_uart_writel(port, ATMEL_US_IER, + atmel_port->tx_done_mask); + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void atmel_release_tx_dma(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct dma_chan *chan = atmel_port->chan_tx; + + if (chan) { + dmaengine_terminate_all(chan); + dma_release_channel(chan); + dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1, + DMA_TO_DEVICE); + } + + atmel_port->desc_tx = NULL; + atmel_port->chan_tx = NULL; + atmel_port->cookie_tx = -EINVAL; +} + +/* + * Called from tasklet with TXRDY interrupt is disabled. + */ +static void atmel_tx_dma(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct circ_buf *xmit = &port->state->xmit; + struct dma_chan *chan = atmel_port->chan_tx; + struct dma_async_tx_descriptor *desc; + struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx; + unsigned int tx_len, part1_len, part2_len, sg_len; + dma_addr_t phys_addr; + + /* Make sure we have an idle channel */ + if (atmel_port->desc_tx != NULL) + return; + + if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { + /* + * DMA is idle now. + * Port xmit buffer is already mapped, + * and it is one page... Just adjust + * offsets and lengths. Since it is a circular buffer, + * we have to transmit till the end, and then the rest. + * Take the port lock to get a + * consistent xmit buffer state. + */ + tx_len = CIRC_CNT_TO_END(xmit->head, + xmit->tail, + UART_XMIT_SIZE); + + if (atmel_port->fifo_size) { + /* multi data mode */ + part1_len = (tx_len & ~0x3); /* DWORD access */ + part2_len = (tx_len & 0x3); /* BYTE access */ + } else { + /* single data (legacy) mode */ + part1_len = 0; + part2_len = tx_len; /* BYTE access only */ + } + + sg_init_table(sgl, 2); + sg_len = 0; + phys_addr = sg_dma_address(sg_tx) + xmit->tail; + if (part1_len) { + sg = &sgl[sg_len++]; + sg_dma_address(sg) = phys_addr; + sg_dma_len(sg) = part1_len; + + phys_addr += part1_len; + } + + if (part2_len) { + sg = &sgl[sg_len++]; + sg_dma_address(sg) = phys_addr; + sg_dma_len(sg) = part2_len; + } + + /* + * save tx_len so atmel_complete_tx_dma() will increase + * xmit->tail correctly + */ + atmel_port->tx_len = tx_len; + + desc = dmaengine_prep_slave_sg(chan, + sgl, + sg_len, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | + DMA_CTRL_ACK); + if (!desc) { + dev_err(port->dev, "Failed to send via dma!\n"); + return; + } + + dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE); + + atmel_port->desc_tx = desc; + desc->callback = atmel_complete_tx_dma; + desc->callback_param = atmel_port; + atmel_port->cookie_tx = dmaengine_submit(desc); + if (dma_submit_error(atmel_port->cookie_tx)) { + dev_err(port->dev, "dma_submit_error %d\n", + atmel_port->cookie_tx); + return; + } + + dma_async_issue_pending(chan); + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static int atmel_prepare_tx_dma(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct device *mfd_dev = port->dev->parent; + dma_cap_mask_t mask; + struct dma_slave_config config; + int ret, nent; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx"); + if (atmel_port->chan_tx == NULL) + goto chan_err; + dev_info(port->dev, "using %s for tx DMA transfers\n", + dma_chan_name(atmel_port->chan_tx)); + + spin_lock_init(&atmel_port->lock_tx); + sg_init_table(&atmel_port->sg_tx, 1); + /* UART circular tx buffer is an aligned page. */ + BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf)); + sg_set_page(&atmel_port->sg_tx, + virt_to_page(port->state->xmit.buf), + UART_XMIT_SIZE, + offset_in_page(port->state->xmit.buf)); + nent = dma_map_sg(port->dev, + &atmel_port->sg_tx, + 1, + DMA_TO_DEVICE); + + if (!nent) { + dev_dbg(port->dev, "need to release resource of dma\n"); + goto chan_err; + } else { + dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, + sg_dma_len(&atmel_port->sg_tx), + port->state->xmit.buf, + &sg_dma_address(&atmel_port->sg_tx)); + } + + /* Configure the slave DMA */ + memset(&config, 0, sizeof(config)); + config.direction = DMA_MEM_TO_DEV; + config.dst_addr_width = (atmel_port->fifo_size) ? + DMA_SLAVE_BUSWIDTH_4_BYTES : + DMA_SLAVE_BUSWIDTH_1_BYTE; + config.dst_addr = port->mapbase + ATMEL_US_THR; + config.dst_maxburst = 1; + + ret = dmaengine_slave_config(atmel_port->chan_tx, + &config); + if (ret) { + dev_err(port->dev, "DMA tx slave configuration failed\n"); + goto chan_err; + } + + return 0; + +chan_err: + dev_err(port->dev, "TX channel not available, switch to pio\n"); + atmel_port->use_dma_tx = false; + if (atmel_port->chan_tx) + atmel_release_tx_dma(port); + return -EINVAL; +} + +static void atmel_complete_rx_dma(void *arg) +{ + struct uart_port *port = arg; + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); +} + +static void atmel_release_rx_dma(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct dma_chan *chan = atmel_port->chan_rx; + + if (chan) { + dmaengine_terminate_all(chan); + dma_release_channel(chan); + dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1, + DMA_FROM_DEVICE); + } + + atmel_port->desc_rx = NULL; + atmel_port->chan_rx = NULL; + atmel_port->cookie_rx = -EINVAL; +} + +static void atmel_rx_from_dma(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct tty_port *tport = &port->state->port; + struct circ_buf *ring = &atmel_port->rx_ring; + struct dma_chan *chan = atmel_port->chan_rx; + struct dma_tx_state state; + enum dma_status dmastat; + size_t count; + + + /* Reset the UART timeout early so that we don't miss one */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); + dmastat = dmaengine_tx_status(chan, + atmel_port->cookie_rx, + &state); + /* Restart a new tasklet if DMA status is error */ + if (dmastat == DMA_ERROR) { + dev_dbg(port->dev, "Get residue error, restart tasklet\n"); + atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); + return; + } + + /* CPU claims ownership of RX DMA buffer */ + dma_sync_sg_for_cpu(port->dev, + &atmel_port->sg_rx, + 1, + DMA_FROM_DEVICE); + + /* + * ring->head points to the end of data already written by the DMA. + * ring->tail points to the beginning of data to be read by the + * framework. + * The current transfer size should not be larger than the dma buffer + * length. + */ + ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue; + BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx)); + /* + * At this point ring->head may point to the first byte right after the + * last byte of the dma buffer: + * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx) + * + * However ring->tail must always points inside the dma buffer: + * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1 + * + * Since we use a ring buffer, we have to handle the case + * where head is lower than tail. In such a case, we first read from + * tail to the end of the buffer then reset tail. + */ + if (ring->head < ring->tail) { + count = sg_dma_len(&atmel_port->sg_rx) - ring->tail; + + tty_insert_flip_string(tport, ring->buf + ring->tail, count); + ring->tail = 0; + port->icount.rx += count; + } + + /* Finally we read data from tail to head */ + if (ring->tail < ring->head) { + count = ring->head - ring->tail; + + tty_insert_flip_string(tport, ring->buf + ring->tail, count); + /* Wrap ring->head if needed */ + if (ring->head >= sg_dma_len(&atmel_port->sg_rx)) + ring->head = 0; + ring->tail = ring->head; + port->icount.rx += count; + } + + /* USART retreives ownership of RX DMA buffer */ + dma_sync_sg_for_device(port->dev, + &atmel_port->sg_rx, + 1, + DMA_FROM_DEVICE); + + tty_flip_buffer_push(tport); + + atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); +} + +static int atmel_prepare_rx_dma(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct device *mfd_dev = port->dev->parent; + struct dma_async_tx_descriptor *desc; + dma_cap_mask_t mask; + struct dma_slave_config config; + struct circ_buf *ring; + int ret, nent; + + ring = &atmel_port->rx_ring; + + dma_cap_zero(mask); + dma_cap_set(DMA_CYCLIC, mask); + + atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx"); + if (atmel_port->chan_rx == NULL) + goto chan_err; + dev_info(port->dev, "using %s for rx DMA transfers\n", + dma_chan_name(atmel_port->chan_rx)); + + spin_lock_init(&atmel_port->lock_rx); + sg_init_table(&atmel_port->sg_rx, 1); + /* UART circular rx buffer is an aligned page. */ + BUG_ON(!PAGE_ALIGNED(ring->buf)); + sg_set_page(&atmel_port->sg_rx, + virt_to_page(ring->buf), + sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE, + offset_in_page(ring->buf)); + nent = dma_map_sg(port->dev, + &atmel_port->sg_rx, + 1, + DMA_FROM_DEVICE); + + if (!nent) { + dev_dbg(port->dev, "need to release resource of dma\n"); + goto chan_err; + } else { + dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, + sg_dma_len(&atmel_port->sg_rx), + ring->buf, + &sg_dma_address(&atmel_port->sg_rx)); + } + + /* Configure the slave DMA */ + memset(&config, 0, sizeof(config)); + config.direction = DMA_DEV_TO_MEM; + config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + config.src_addr = port->mapbase + ATMEL_US_RHR; + config.src_maxburst = 1; + + ret = dmaengine_slave_config(atmel_port->chan_rx, + &config); + if (ret) { + dev_err(port->dev, "DMA rx slave configuration failed\n"); + goto chan_err; + } + /* + * Prepare a cyclic dma transfer, assign 2 descriptors, + * each one is half ring buffer size + */ + desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx, + sg_dma_address(&atmel_port->sg_rx), + sg_dma_len(&atmel_port->sg_rx), + sg_dma_len(&atmel_port->sg_rx)/2, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(port->dev, "Preparing DMA cyclic failed\n"); + goto chan_err; + } + desc->callback = atmel_complete_rx_dma; + desc->callback_param = port; + atmel_port->desc_rx = desc; + atmel_port->cookie_rx = dmaengine_submit(desc); + if (dma_submit_error(atmel_port->cookie_rx)) { + dev_err(port->dev, "dma_submit_error %d\n", + atmel_port->cookie_rx); + goto chan_err; + } + + dma_async_issue_pending(atmel_port->chan_rx); + + return 0; + +chan_err: + dev_err(port->dev, "RX channel not available, switch to pio\n"); + atmel_port->use_dma_rx = false; + if (atmel_port->chan_rx) + atmel_release_rx_dma(port); + return -EINVAL; +} + +static void atmel_uart_timer_callback(struct timer_list *t) +{ + struct atmel_uart_port *atmel_port = from_timer(atmel_port, t, + uart_timer); + struct uart_port *port = &atmel_port->uart; + + if (!atomic_read(&atmel_port->tasklet_shutdown)) { + tasklet_schedule(&atmel_port->tasklet_rx); + mod_timer(&atmel_port->uart_timer, + jiffies + uart_poll_timeout(port)); + } +} + +/* + * receive interrupt handler. + */ +static void +atmel_handle_receive(struct uart_port *port, unsigned int pending) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + if (atmel_use_pdc_rx(port)) { + /* + * PDC receive. Just schedule the tasklet and let it + * figure out the details. + * + * TODO: We're not handling error flags correctly at + * the moment. + */ + if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) { + atmel_uart_writel(port, ATMEL_US_IDR, + (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)); + atmel_tasklet_schedule(atmel_port, + &atmel_port->tasklet_rx); + } + + if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE | + ATMEL_US_FRAME | ATMEL_US_PARE)) + atmel_pdc_rxerr(port, pending); + } + + if (atmel_use_dma_rx(port)) { + if (pending & ATMEL_US_TIMEOUT) { + atmel_uart_writel(port, ATMEL_US_IDR, + ATMEL_US_TIMEOUT); + atmel_tasklet_schedule(atmel_port, + &atmel_port->tasklet_rx); + } + } + + /* Interrupt receive */ + if (pending & ATMEL_US_RXRDY) + atmel_rx_chars(port); + else if (pending & ATMEL_US_RXBRK) { + /* + * End of break detected. If it came along with a + * character, atmel_rx_chars will handle it. + */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); + atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK); + atmel_port->break_active = 0; + } +} + +/* + * transmit interrupt handler. (Transmit is IRQF_NODELAY safe) + */ +static void +atmel_handle_transmit(struct uart_port *port, unsigned int pending) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + if (pending & atmel_port->tx_done_mask) { + atmel_uart_writel(port, ATMEL_US_IDR, + atmel_port->tx_done_mask); + + /* Start RX if flag was set and FIFO is empty */ + if (atmel_port->hd_start_rx) { + if (!(atmel_uart_readl(port, ATMEL_US_CSR) + & ATMEL_US_TXEMPTY)) + dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n"); + + atmel_port->hd_start_rx = false; + atmel_start_rx(port); + } + + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); + } +} + +/* + * status flags interrupt handler. + */ +static void +atmel_handle_status(struct uart_port *port, unsigned int pending, + unsigned int status) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned int status_change; + + if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC + | ATMEL_US_CTSIC)) { + status_change = status ^ atmel_port->irq_status_prev; + atmel_port->irq_status_prev = status; + + if (status_change & (ATMEL_US_RI | ATMEL_US_DSR + | ATMEL_US_DCD | ATMEL_US_CTS)) { + /* TODO: All reads to CSR will clear these interrupts! */ + if (status_change & ATMEL_US_RI) + port->icount.rng++; + if (status_change & ATMEL_US_DSR) + port->icount.dsr++; + if (status_change & ATMEL_US_DCD) + uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); + if (status_change & ATMEL_US_CTS) + uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); + + wake_up_interruptible(&port->state->port.delta_msr_wait); + } + } + + if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION)) + dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending); +} + +/* + * Interrupt handler + */ +static irqreturn_t atmel_interrupt(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned int status, pending, mask, pass_counter = 0; + + spin_lock(&atmel_port->lock_suspended); + + do { + status = atmel_uart_readl(port, ATMEL_US_CSR); + mask = atmel_uart_readl(port, ATMEL_US_IMR); + pending = status & mask; + if (!pending) + break; + + if (atmel_port->suspended) { + atmel_port->pending |= pending; + atmel_port->pending_status = status; + atmel_uart_writel(port, ATMEL_US_IDR, mask); + pm_system_wakeup(); + break; + } + + atmel_handle_receive(port, pending); + atmel_handle_status(port, pending, status); + atmel_handle_transmit(port, pending); + } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); + + spin_unlock(&atmel_port->lock_suspended); + + return pass_counter ? IRQ_HANDLED : IRQ_NONE; +} + +static void atmel_release_tx_pdc(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; + + dma_unmap_single(port->dev, + pdc->dma_addr, + pdc->dma_size, + DMA_TO_DEVICE); +} + +/* + * Called from tasklet with ENDTX and TXBUFE interrupts disabled. + */ +static void atmel_tx_pdc(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct circ_buf *xmit = &port->state->xmit; + struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; + int count; + + /* nothing left to transmit? */ + if (atmel_uart_readl(port, ATMEL_PDC_TCR)) + return; + + xmit->tail += pdc->ofs; + xmit->tail &= UART_XMIT_SIZE - 1; + + port->icount.tx += pdc->ofs; + pdc->ofs = 0; + + /* more to transmit - setup next transfer */ + + /* disable PDC transmit */ + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); + + if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { + dma_sync_single_for_device(port->dev, + pdc->dma_addr, + pdc->dma_size, + DMA_TO_DEVICE); + + count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + pdc->ofs = count; + + atmel_uart_writel(port, ATMEL_PDC_TPR, + pdc->dma_addr + xmit->tail); + atmel_uart_writel(port, ATMEL_PDC_TCR, count); + /* re-enable PDC transmit */ + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); + /* Enable interrupts */ + atmel_uart_writel(port, ATMEL_US_IER, + atmel_port->tx_done_mask); + } else { + if (atmel_uart_is_half_duplex(port)) { + /* DMA done, stop TX, start RX for RS485 */ + atmel_start_rx(port); + } + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static int atmel_prepare_tx_pdc(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; + struct circ_buf *xmit = &port->state->xmit; + + pdc->buf = xmit->buf; + pdc->dma_addr = dma_map_single(port->dev, + pdc->buf, + UART_XMIT_SIZE, + DMA_TO_DEVICE); + pdc->dma_size = UART_XMIT_SIZE; + pdc->ofs = 0; + + return 0; +} + +static void atmel_rx_from_ring(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct circ_buf *ring = &atmel_port->rx_ring; + unsigned int flg; + unsigned int status; + + while (ring->head != ring->tail) { + struct atmel_uart_char c; + + /* Make sure c is loaded after head. */ + smp_rmb(); + + c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; + + ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); + + port->icount.rx++; + status = c.status; + flg = TTY_NORMAL; + + /* + * note that the error handling code is + * out of the main execution path + */ + if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME + | ATMEL_US_OVRE | ATMEL_US_RXBRK))) { + if (status & ATMEL_US_RXBRK) { + /* ignore side-effect */ + status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); + + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } + if (status & ATMEL_US_PARE) + port->icount.parity++; + if (status & ATMEL_US_FRAME) + port->icount.frame++; + if (status & ATMEL_US_OVRE) + port->icount.overrun++; + + status &= port->read_status_mask; + + if (status & ATMEL_US_RXBRK) + flg = TTY_BREAK; + else if (status & ATMEL_US_PARE) + flg = TTY_PARITY; + else if (status & ATMEL_US_FRAME) + flg = TTY_FRAME; + } + + + if (uart_handle_sysrq_char(port, c.ch)) + continue; + + uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); + } + + tty_flip_buffer_push(&port->state->port); +} + +static void atmel_release_rx_pdc(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + int i; + + for (i = 0; i < 2; i++) { + struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; + + dma_unmap_single(port->dev, + pdc->dma_addr, + pdc->dma_size, + DMA_FROM_DEVICE); + kfree(pdc->buf); + } +} + +static void atmel_rx_from_pdc(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct tty_port *tport = &port->state->port; + struct atmel_dma_buffer *pdc; + int rx_idx = atmel_port->pdc_rx_idx; + unsigned int head; + unsigned int tail; + unsigned int count; + + do { + /* Reset the UART timeout early so that we don't miss one */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); + + pdc = &atmel_port->pdc_rx[rx_idx]; + head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr; + tail = pdc->ofs; + + /* If the PDC has switched buffers, RPR won't contain + * any address within the current buffer. Since head + * is unsigned, we just need a one-way comparison to + * find out. + * + * In this case, we just need to consume the entire + * buffer and resubmit it for DMA. This will clear the + * ENDRX bit as well, so that we can safely re-enable + * all interrupts below. + */ + head = min(head, pdc->dma_size); + + if (likely(head != tail)) { + dma_sync_single_for_cpu(port->dev, pdc->dma_addr, + pdc->dma_size, DMA_FROM_DEVICE); + + /* + * head will only wrap around when we recycle + * the DMA buffer, and when that happens, we + * explicitly set tail to 0. So head will + * always be greater than tail. + */ + count = head - tail; + + tty_insert_flip_string(tport, pdc->buf + pdc->ofs, + count); + + dma_sync_single_for_device(port->dev, pdc->dma_addr, + pdc->dma_size, DMA_FROM_DEVICE); + + port->icount.rx += count; + pdc->ofs = head; + } + + /* + * If the current buffer is full, we need to check if + * the next one contains any additional data. + */ + if (head >= pdc->dma_size) { + pdc->ofs = 0; + atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr); + atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size); + + rx_idx = !rx_idx; + atmel_port->pdc_rx_idx = rx_idx; + } + } while (head >= pdc->dma_size); + + tty_flip_buffer_push(tport); + + atmel_uart_writel(port, ATMEL_US_IER, + ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); +} + +static int atmel_prepare_rx_pdc(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + int i; + + for (i = 0; i < 2; i++) { + struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; + + pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL); + if (pdc->buf == NULL) { + if (i != 0) { + dma_unmap_single(port->dev, + atmel_port->pdc_rx[0].dma_addr, + PDC_BUFFER_SIZE, + DMA_FROM_DEVICE); + kfree(atmel_port->pdc_rx[0].buf); + } + atmel_port->use_pdc_rx = false; + return -ENOMEM; + } + pdc->dma_addr = dma_map_single(port->dev, + pdc->buf, + PDC_BUFFER_SIZE, + DMA_FROM_DEVICE); + pdc->dma_size = PDC_BUFFER_SIZE; + pdc->ofs = 0; + } + + atmel_port->pdc_rx_idx = 0; + + atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr); + atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE); + + atmel_uart_writel(port, ATMEL_PDC_RNPR, + atmel_port->pdc_rx[1].dma_addr); + atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE); + + return 0; +} + +/* + * tasklet handling tty stuff outside the interrupt handler. + */ +static void atmel_tasklet_rx_func(struct tasklet_struct *t) +{ + struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t, + tasklet_rx); + struct uart_port *port = &atmel_port->uart; + + /* The interrupt handler does not take the lock */ + spin_lock(&port->lock); + atmel_port->schedule_rx(port); + spin_unlock(&port->lock); +} + +static void atmel_tasklet_tx_func(struct tasklet_struct *t) +{ + struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t, + tasklet_tx); + struct uart_port *port = &atmel_port->uart; + + /* The interrupt handler does not take the lock */ + spin_lock(&port->lock); + atmel_port->schedule_tx(port); + spin_unlock(&port->lock); +} + +static void atmel_init_property(struct atmel_uart_port *atmel_port, + struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + + /* DMA/PDC usage specification */ + if (of_property_read_bool(np, "atmel,use-dma-rx")) { + if (of_property_read_bool(np, "dmas")) { + atmel_port->use_dma_rx = true; + atmel_port->use_pdc_rx = false; + } else { + atmel_port->use_dma_rx = false; + atmel_port->use_pdc_rx = true; + } + } else { + atmel_port->use_dma_rx = false; + atmel_port->use_pdc_rx = false; + } + + if (of_property_read_bool(np, "atmel,use-dma-tx")) { + if (of_property_read_bool(np, "dmas")) { + atmel_port->use_dma_tx = true; + atmel_port->use_pdc_tx = false; + } else { + atmel_port->use_dma_tx = false; + atmel_port->use_pdc_tx = true; + } + } else { + atmel_port->use_dma_tx = false; + atmel_port->use_pdc_tx = false; + } +} + +static void atmel_set_ops(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + if (atmel_use_dma_rx(port)) { + atmel_port->prepare_rx = &atmel_prepare_rx_dma; + atmel_port->schedule_rx = &atmel_rx_from_dma; + atmel_port->release_rx = &atmel_release_rx_dma; + } else if (atmel_use_pdc_rx(port)) { + atmel_port->prepare_rx = &atmel_prepare_rx_pdc; + atmel_port->schedule_rx = &atmel_rx_from_pdc; + atmel_port->release_rx = &atmel_release_rx_pdc; + } else { + atmel_port->prepare_rx = NULL; + atmel_port->schedule_rx = &atmel_rx_from_ring; + atmel_port->release_rx = NULL; + } + + if (atmel_use_dma_tx(port)) { + atmel_port->prepare_tx = &atmel_prepare_tx_dma; + atmel_port->schedule_tx = &atmel_tx_dma; + atmel_port->release_tx = &atmel_release_tx_dma; + } else if (atmel_use_pdc_tx(port)) { + atmel_port->prepare_tx = &atmel_prepare_tx_pdc; + atmel_port->schedule_tx = &atmel_tx_pdc; + atmel_port->release_tx = &atmel_release_tx_pdc; + } else { + atmel_port->prepare_tx = NULL; + atmel_port->schedule_tx = &atmel_tx_chars; + atmel_port->release_tx = NULL; + } +} + +/* + * Get ip name usart or uart + */ +static void atmel_get_ip_name(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + int name = atmel_uart_readl(port, ATMEL_US_NAME); + u32 version; + u32 usart, dbgu_uart, new_uart; + /* ASCII decoding for IP version */ + usart = 0x55534152; /* USAR(T) */ + dbgu_uart = 0x44424755; /* DBGU */ + new_uart = 0x55415254; /* UART */ + + /* + * Only USART devices from at91sam9260 SOC implement fractional + * baudrate. It is available for all asynchronous modes, with the + * following restriction: the sampling clock's duty cycle is not + * constant. + */ + atmel_port->has_frac_baudrate = false; + atmel_port->has_hw_timer = false; + atmel_port->is_usart = false; + + if (name == new_uart) { + dev_dbg(port->dev, "Uart with hw timer"); + atmel_port->has_hw_timer = true; + atmel_port->rtor = ATMEL_UA_RTOR; + } else if (name == usart) { + dev_dbg(port->dev, "Usart\n"); + atmel_port->has_frac_baudrate = true; + atmel_port->has_hw_timer = true; + atmel_port->is_usart = true; + atmel_port->rtor = ATMEL_US_RTOR; + version = atmel_uart_readl(port, ATMEL_US_VERSION); + switch (version) { + case 0x814: /* sama5d2 */ + fallthrough; + case 0x701: /* sama5d4 */ + atmel_port->fidi_min = 3; + atmel_port->fidi_max = 65535; + break; + case 0x502: /* sam9x5, sama5d3 */ + atmel_port->fidi_min = 3; + atmel_port->fidi_max = 2047; + break; + default: + atmel_port->fidi_min = 1; + atmel_port->fidi_max = 2047; + } + } else if (name == dbgu_uart) { + dev_dbg(port->dev, "Dbgu or uart without hw timer\n"); + } else { + /* fallback for older SoCs: use version field */ + version = atmel_uart_readl(port, ATMEL_US_VERSION); + switch (version) { + case 0x302: + case 0x10213: + case 0x10302: + dev_dbg(port->dev, "This version is usart\n"); + atmel_port->has_frac_baudrate = true; + atmel_port->has_hw_timer = true; + atmel_port->is_usart = true; + atmel_port->rtor = ATMEL_US_RTOR; + break; + case 0x203: + case 0x10202: + dev_dbg(port->dev, "This version is uart\n"); + break; + default: + dev_err(port->dev, "Not supported ip name nor version, set to uart\n"); + } + } +} + +/* + * Perform initialization and enable port for reception + */ +static int atmel_startup(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + int retval; + + /* + * Ensure that no interrupts are enabled otherwise when + * request_irq() is called we could get stuck trying to + * handle an unexpected interrupt + */ + atmel_uart_writel(port, ATMEL_US_IDR, -1); + atmel_port->ms_irq_enabled = false; + + /* + * Allocate the IRQ + */ + retval = request_irq(port->irq, atmel_interrupt, + IRQF_SHARED | IRQF_COND_SUSPEND, + dev_name(&pdev->dev), port); + if (retval) { + dev_err(port->dev, "atmel_startup - Can't get irq\n"); + return retval; + } + + atomic_set(&atmel_port->tasklet_shutdown, 0); + tasklet_setup(&atmel_port->tasklet_rx, atmel_tasklet_rx_func); + tasklet_setup(&atmel_port->tasklet_tx, atmel_tasklet_tx_func); + + /* + * Initialize DMA (if necessary) + */ + atmel_init_property(atmel_port, pdev); + atmel_set_ops(port); + + if (atmel_port->prepare_rx) { + retval = atmel_port->prepare_rx(port); + if (retval < 0) + atmel_set_ops(port); + } + + if (atmel_port->prepare_tx) { + retval = atmel_port->prepare_tx(port); + if (retval < 0) + atmel_set_ops(port); + } + + /* + * Enable FIFO when available + */ + if (atmel_port->fifo_size) { + unsigned int txrdym = ATMEL_US_ONE_DATA; + unsigned int rxrdym = ATMEL_US_ONE_DATA; + unsigned int fmr; + + atmel_uart_writel(port, ATMEL_US_CR, + ATMEL_US_FIFOEN | + ATMEL_US_RXFCLR | + ATMEL_US_TXFLCLR); + + if (atmel_use_dma_tx(port)) + txrdym = ATMEL_US_FOUR_DATA; + + fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym); + if (atmel_port->rts_high && + atmel_port->rts_low) + fmr |= ATMEL_US_FRTSC | + ATMEL_US_RXFTHRES(atmel_port->rts_high) | + ATMEL_US_RXFTHRES2(atmel_port->rts_low); + + atmel_uart_writel(port, ATMEL_US_FMR, fmr); + } + + /* Save current CSR for comparison in atmel_tasklet_func() */ + atmel_port->irq_status_prev = atmel_uart_readl(port, ATMEL_US_CSR); + + /* + * Finally, enable the serial port + */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); + /* enable xmit & rcvr */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); + atmel_port->tx_stopped = false; + + timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0); + + if (atmel_use_pdc_rx(port)) { + /* set UART timeout */ + if (!atmel_port->has_hw_timer) { + mod_timer(&atmel_port->uart_timer, + jiffies + uart_poll_timeout(port)); + /* set USART timeout */ + } else { + atmel_uart_writel(port, atmel_port->rtor, + PDC_RX_TIMEOUT); + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); + + atmel_uart_writel(port, ATMEL_US_IER, + ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); + } + /* enable PDC controller */ + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); + } else if (atmel_use_dma_rx(port)) { + /* set UART timeout */ + if (!atmel_port->has_hw_timer) { + mod_timer(&atmel_port->uart_timer, + jiffies + uart_poll_timeout(port)); + /* set USART timeout */ + } else { + atmel_uart_writel(port, atmel_port->rtor, + PDC_RX_TIMEOUT); + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); + + atmel_uart_writel(port, ATMEL_US_IER, + ATMEL_US_TIMEOUT); + } + } else { + /* enable receive only */ + atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); + } + + return 0; +} + +/* + * Flush any TX data submitted for DMA. Called when the TX circular + * buffer is reset. + */ +static void atmel_flush_buffer(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + if (atmel_use_pdc_tx(port)) { + atmel_uart_writel(port, ATMEL_PDC_TCR, 0); + atmel_port->pdc_tx.ofs = 0; + } + /* + * in uart_flush_buffer(), the xmit circular buffer has just + * been cleared, so we have to reset tx_len accordingly. + */ + atmel_port->tx_len = 0; +} + +/* + * Disable the port + */ +static void atmel_shutdown(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + /* Disable modem control lines interrupts */ + atmel_disable_ms(port); + + /* Disable interrupts at device level */ + atmel_uart_writel(port, ATMEL_US_IDR, -1); + + /* Prevent spurious interrupts from scheduling the tasklet */ + atomic_inc(&atmel_port->tasklet_shutdown); + + /* + * Prevent any tasklets being scheduled during + * cleanup + */ + del_timer_sync(&atmel_port->uart_timer); + + /* Make sure that no interrupt is on the fly */ + synchronize_irq(port->irq); + + /* + * Clear out any scheduled tasklets before + * we destroy the buffers + */ + tasklet_kill(&atmel_port->tasklet_rx); + tasklet_kill(&atmel_port->tasklet_tx); + + /* + * Ensure everything is stopped and + * disable port and break condition. + */ + atmel_stop_rx(port); + atmel_stop_tx(port); + + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); + + /* + * Shut-down the DMA. + */ + if (atmel_port->release_rx) + atmel_port->release_rx(port); + if (atmel_port->release_tx) + atmel_port->release_tx(port); + + /* + * Reset ring buffer pointers + */ + atmel_port->rx_ring.head = 0; + atmel_port->rx_ring.tail = 0; + + /* + * Free the interrupts + */ + free_irq(port->irq, port); + + atmel_flush_buffer(port); +} + +/* + * Power / Clock management. + */ +static void atmel_serial_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + switch (state) { + case UART_PM_STATE_ON: + /* + * Enable the peripheral clock for this serial port. + * This is called on uart_open() or a resume event. + */ + clk_prepare_enable(atmel_port->clk); + + /* re-enable interrupts if we disabled some on suspend */ + atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr); + break; + case UART_PM_STATE_OFF: + /* Back up the interrupt mask and disable all interrupts */ + atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR); + atmel_uart_writel(port, ATMEL_US_IDR, -1); + + /* + * Disable the peripheral clock for this serial port. + * This is called on uart_close() or a suspend event. + */ + clk_disable_unprepare(atmel_port->clk); + if (__clk_is_enabled(atmel_port->gclk)) + clk_disable_unprepare(atmel_port->gclk); + break; + default: + dev_err(port->dev, "atmel_serial: unknown pm %d\n", state); + } +} + +/* + * Change the port parameters + */ +static void atmel_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned long flags; + unsigned int old_mode, mode, imr, quot, div, cd, fp = 0; + unsigned int baud, actual_baud, gclk_rate; + int ret; + + /* save the current mode register */ + mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR); + + /* reset the mode, clock divisor, parity, stop bits and data size */ + if (atmel_port->is_usart) + mode &= ~(ATMEL_US_NBSTOP | ATMEL_US_PAR | ATMEL_US_CHRL | + ATMEL_US_USCLKS | ATMEL_US_USMODE); + else + mode &= ~(ATMEL_UA_BRSRCCK | ATMEL_US_PAR | ATMEL_UA_FILTER); + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); + + /* byte size */ + switch (termios->c_cflag & CSIZE) { + case CS5: + mode |= ATMEL_US_CHRL_5; + break; + case CS6: + mode |= ATMEL_US_CHRL_6; + break; + case CS7: + mode |= ATMEL_US_CHRL_7; + break; + default: + mode |= ATMEL_US_CHRL_8; + break; + } + + /* stop bits */ + if (termios->c_cflag & CSTOPB) + mode |= ATMEL_US_NBSTOP_2; + + /* parity */ + if (termios->c_cflag & PARENB) { + /* Mark or Space parity */ + if (termios->c_cflag & CMSPAR) { + if (termios->c_cflag & PARODD) + mode |= ATMEL_US_PAR_MARK; + else + mode |= ATMEL_US_PAR_SPACE; + } else if (termios->c_cflag & PARODD) + mode |= ATMEL_US_PAR_ODD; + else + mode |= ATMEL_US_PAR_EVEN; + } else + mode |= ATMEL_US_PAR_NONE; + + spin_lock_irqsave(&port->lock, flags); + + port->read_status_mask = ATMEL_US_OVRE; + if (termios->c_iflag & INPCK) + port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= ATMEL_US_RXBRK; + + if (atmel_use_pdc_rx(port)) + /* need to enable error interrupts */ + atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask); + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= ATMEL_US_RXBRK; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= ATMEL_US_OVRE; + } + /* TODO: Ignore all characters if CREAD is set.*/ + + /* update the per-port timeout */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* + * save/disable interrupts. The tty layer will ensure that the + * transmitter is empty if requested by the caller, so there's + * no need to wait for it here. + */ + imr = atmel_uart_readl(port, ATMEL_US_IMR); + atmel_uart_writel(port, ATMEL_US_IDR, -1); + + /* disable receiver and transmitter */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS); + atmel_port->tx_stopped = true; + + /* mode */ + if (port->rs485.flags & SER_RS485_ENABLED) { + atmel_uart_writel(port, ATMEL_US_TTGR, + port->rs485.delay_rts_after_send); + mode |= ATMEL_US_USMODE_RS485; + } else if (port->iso7816.flags & SER_ISO7816_ENABLED) { + atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg); + /* select mck clock, and output */ + mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; + /* set max iterations */ + mode |= ATMEL_US_MAX_ITER(3); + if ((port->iso7816.flags & SER_ISO7816_T_PARAM) + == SER_ISO7816_T(0)) + mode |= ATMEL_US_USMODE_ISO7816_T0; + else + mode |= ATMEL_US_USMODE_ISO7816_T1; + } else if (termios->c_cflag & CRTSCTS) { + /* RS232 with hardware handshake (RTS/CTS) */ + if (atmel_use_fifo(port) && + !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) { + /* + * with ATMEL_US_USMODE_HWHS set, the controller will + * be able to drive the RTS pin high/low when the RX + * FIFO is above RXFTHRES/below RXFTHRES2. + * It will also disable the transmitter when the CTS + * pin is high. + * This mode is not activated if CTS pin is a GPIO + * because in this case, the transmitter is always + * disabled (there must be an internal pull-up + * responsible for this behaviour). + * If the RTS pin is a GPIO, the controller won't be + * able to drive it according to the FIFO thresholds, + * but it will be handled by the driver. + */ + mode |= ATMEL_US_USMODE_HWHS; + } else { + /* + * For platforms without FIFO, the flow control is + * handled by the driver. + */ + mode |= ATMEL_US_USMODE_NORMAL; + } + } else { + /* RS232 without hadware handshake */ + mode |= ATMEL_US_USMODE_NORMAL; + } + + /* + * Set the baud rate: + * Fractional baudrate allows to setup output frequency more + * accurately. This feature is enabled only when using normal mode. + * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8)) + * Currently, OVER is always set to 0 so we get + * baudrate = selected clock / (16 * (CD + FP / 8)) + * then + * 8 CD + FP = selected clock / (2 * baudrate) + */ + if (atmel_port->has_frac_baudrate) { + div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2); + cd = div >> 3; + fp = div & ATMEL_US_FP_MASK; + } else { + cd = uart_get_divisor(port, baud); + } + + /* + * If the current value of the Clock Divisor surpasses the 16 bit + * ATMEL_US_CD mask and the IP is USART, switch to the Peripheral + * Clock implicitly divided by 8. + * If the IP is UART however, keep the highest possible value for + * the CD and avoid needless division of CD, since UART IP's do not + * support implicit division of the Peripheral Clock. + */ + if (atmel_port->is_usart && cd > ATMEL_US_CD) { + cd /= 8; + mode |= ATMEL_US_USCLKS_MCK_DIV8; + } else { + cd = min_t(unsigned int, cd, ATMEL_US_CD); + } + + /* + * If there is no Fractional Part, there is a high chance that + * we may be able to generate a baudrate closer to the desired one + * if we use the GCLK as the clock source driving the baudrate + * generator. + */ + if (!atmel_port->has_frac_baudrate) { + if (__clk_is_enabled(atmel_port->gclk)) + clk_disable_unprepare(atmel_port->gclk); + gclk_rate = clk_round_rate(atmel_port->gclk, 16 * baud); + actual_baud = clk_get_rate(atmel_port->clk) / (16 * cd); + if (gclk_rate && abs(atmel_error_rate(baud, actual_baud)) > + abs(atmel_error_rate(baud, gclk_rate / 16))) { + clk_set_rate(atmel_port->gclk, 16 * baud); + ret = clk_prepare_enable(atmel_port->gclk); + if (ret) + goto gclk_fail; + + if (atmel_port->is_usart) { + mode &= ~ATMEL_US_USCLKS; + mode |= ATMEL_US_USCLKS_GCLK; + } else { + mode |= ATMEL_UA_BRSRCCK; + } + + /* + * Set the Clock Divisor for GCLK to 1. + * Since we were able to generate the smallest + * multiple of the desired baudrate times 16, + * then we surely can generate a bigger multiple + * with the exact error rate for an equally increased + * CD. Thus no need to take into account + * a higher value for CD. + */ + cd = 1; + } + } + +gclk_fail: + quot = cd | fp << ATMEL_US_FP_OFFSET; + + if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) + atmel_uart_writel(port, ATMEL_US_BRGR, quot); + + /* set the mode, clock divisor, parity, stop bits and data size */ + atmel_uart_writel(port, ATMEL_US_MR, mode); + + /* + * when switching the mode, set the RTS line state according to the + * new mode, otherwise keep the former state + */ + if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { + unsigned int rts_state; + + if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { + /* let the hardware control the RTS line */ + rts_state = ATMEL_US_RTSDIS; + } else { + /* force RTS line to low level */ + rts_state = ATMEL_US_RTSEN; + } + + atmel_uart_writel(port, ATMEL_US_CR, rts_state); + } + + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); + atmel_port->tx_stopped = false; + + /* restore interrupts */ + atmel_uart_writel(port, ATMEL_US_IER, imr); + + /* CTS flow-control and modem-status interrupts */ + if (UART_ENABLE_MS(port, termios->c_cflag)) + atmel_enable_ms(port); + else + atmel_disable_ms(port); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios) +{ + if (termios->c_line == N_PPS) { + port->flags |= UPF_HARDPPS_CD; + spin_lock_irq(&port->lock); + atmel_enable_ms(port); + spin_unlock_irq(&port->lock); + } else { + port->flags &= ~UPF_HARDPPS_CD; + if (!UART_ENABLE_MS(port, termios->c_cflag)) { + spin_lock_irq(&port->lock); + atmel_disable_ms(port); + spin_unlock_irq(&port->lock); + } + } +} + +/* + * Return string describing the specified port + */ +static const char *atmel_type(struct uart_port *port) +{ + return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL; +} + +/* + * Release the memory region(s) being used by 'port'. + */ +static void atmel_release_port(struct uart_port *port) +{ + struct platform_device *mpdev = to_platform_device(port->dev->parent); + int size = resource_size(mpdev->resource); + + release_mem_region(port->mapbase, size); + + if (port->flags & UPF_IOREMAP) { + iounmap(port->membase); + port->membase = NULL; + } +} + +/* + * Request the memory region(s) being used by 'port'. + */ +static int atmel_request_port(struct uart_port *port) +{ + struct platform_device *mpdev = to_platform_device(port->dev->parent); + int size = resource_size(mpdev->resource); + + if (!request_mem_region(port->mapbase, size, "atmel_serial")) + return -EBUSY; + + if (port->flags & UPF_IOREMAP) { + port->membase = ioremap(port->mapbase, size); + if (port->membase == NULL) { + release_mem_region(port->mapbase, size); + return -ENOMEM; + } + } + + return 0; +} + +/* + * Configure/autoconfigure the port. + */ +static void atmel_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_ATMEL; + atmel_request_port(port); + } +} + +/* + * Verify the new serial_struct (for TIOCSSERIAL). + */ +static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + int ret = 0; + if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL) + ret = -EINVAL; + if (port->irq != ser->irq) + ret = -EINVAL; + if (ser->io_type != SERIAL_IO_MEM) + ret = -EINVAL; + if (port->uartclk / 16 != ser->baud_base) + ret = -EINVAL; + if (port->mapbase != (unsigned long)ser->iomem_base) + ret = -EINVAL; + if (port->iobase != ser->port) + ret = -EINVAL; + if (ser->hub6 != 0) + ret = -EINVAL; + return ret; +} + +#ifdef CONFIG_CONSOLE_POLL +static int atmel_poll_get_char(struct uart_port *port) +{ + while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY)) + cpu_relax(); + + return atmel_uart_read_char(port); +} + +static void atmel_poll_put_char(struct uart_port *port, unsigned char ch) +{ + while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) + cpu_relax(); + + atmel_uart_write_char(port, ch); +} +#endif + +static const struct uart_ops atmel_pops = { + .tx_empty = atmel_tx_empty, + .set_mctrl = atmel_set_mctrl, + .get_mctrl = atmel_get_mctrl, + .stop_tx = atmel_stop_tx, + .start_tx = atmel_start_tx, + .stop_rx = atmel_stop_rx, + .enable_ms = atmel_enable_ms, + .break_ctl = atmel_break_ctl, + .startup = atmel_startup, + .shutdown = atmel_shutdown, + .flush_buffer = atmel_flush_buffer, + .set_termios = atmel_set_termios, + .set_ldisc = atmel_set_ldisc, + .type = atmel_type, + .release_port = atmel_release_port, + .request_port = atmel_request_port, + .config_port = atmel_config_port, + .verify_port = atmel_verify_port, + .pm = atmel_serial_pm, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = atmel_poll_get_char, + .poll_put_char = atmel_poll_put_char, +#endif +}; + +static const struct serial_rs485 atmel_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX, + .delay_rts_before_send = 1, + .delay_rts_after_send = 1, +}; + +/* + * Configure the port from the platform device resource info. + */ +static int atmel_init_port(struct atmel_uart_port *atmel_port, + struct platform_device *pdev) +{ + int ret; + struct uart_port *port = &atmel_port->uart; + struct platform_device *mpdev = to_platform_device(pdev->dev.parent); + + atmel_init_property(atmel_port, pdev); + atmel_set_ops(port); + + port->iotype = UPIO_MEM; + port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP; + port->ops = &atmel_pops; + port->fifosize = 1; + port->dev = &pdev->dev; + port->mapbase = mpdev->resource[0].start; + port->irq = platform_get_irq(mpdev, 0); + port->rs485_config = atmel_config_rs485; + port->rs485_supported = atmel_rs485_supported; + port->iso7816_config = atmel_config_iso7816; + port->membase = NULL; + + memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); + + ret = uart_get_rs485_mode(port); + if (ret) + return ret; + + port->uartclk = clk_get_rate(atmel_port->clk); + + /* + * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or + * ENDTX|TXBUFE + */ + if (atmel_uart_is_half_duplex(port)) + atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; + else if (atmel_use_pdc_tx(port)) { + port->fifosize = PDC_BUFFER_SIZE; + atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; + } else { + atmel_port->tx_done_mask = ATMEL_US_TXRDY; + } + + return 0; +} + +#ifdef CONFIG_SERIAL_ATMEL_CONSOLE +static void atmel_console_putchar(struct uart_port *port, unsigned char ch) +{ + while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) + cpu_relax(); + atmel_uart_write_char(port, ch); +} + +/* + * Interrupts are disabled on entering + */ +static void atmel_console_write(struct console *co, const char *s, u_int count) +{ + struct uart_port *port = &atmel_ports[co->index].uart; + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned int status, imr; + unsigned int pdc_tx; + + /* + * First, save IMR and then disable interrupts + */ + imr = atmel_uart_readl(port, ATMEL_US_IMR); + atmel_uart_writel(port, ATMEL_US_IDR, + ATMEL_US_RXRDY | atmel_port->tx_done_mask); + + /* Store PDC transmit status and disable it */ + pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); + + /* Make sure that tx path is actually able to send characters */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); + atmel_port->tx_stopped = false; + + uart_console_write(port, s, count, atmel_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore IMR + */ + do { + status = atmel_uart_readl(port, ATMEL_US_CSR); + } while (!(status & ATMEL_US_TXRDY)); + + /* Restore PDC transmit status */ + if (pdc_tx) + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); + + /* set interrupts back the way they were */ + atmel_uart_writel(port, ATMEL_US_IER, imr); +} + +/* + * If the port was already initialised (eg, by a boot loader), + * try to determine the current setup. + */ +static void __init atmel_console_get_options(struct uart_port *port, int *baud, + int *parity, int *bits) +{ + unsigned int mr, quot; + + /* + * If the baud rate generator isn't running, the port wasn't + * initialized by the boot loader. + */ + quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD; + if (!quot) + return; + + mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL; + if (mr == ATMEL_US_CHRL_8) + *bits = 8; + else + *bits = 7; + + mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR; + if (mr == ATMEL_US_PAR_EVEN) + *parity = 'e'; + else if (mr == ATMEL_US_PAR_ODD) + *parity = 'o'; + + *baud = port->uartclk / (16 * quot); +} + +static int __init atmel_console_setup(struct console *co, char *options) +{ + struct uart_port *port = &atmel_ports[co->index].uart; + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (port->membase == NULL) { + /* Port not initialized yet - delay setup */ + return -ENODEV; + } + + atmel_uart_writel(port, ATMEL_US_IDR, -1); + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); + atmel_port->tx_stopped = false; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + atmel_console_get_options(port, &baud, &parity, &bits); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct uart_driver atmel_uart; + +static struct console atmel_console = { + .name = ATMEL_DEVICENAME, + .write = atmel_console_write, + .device = uart_console_device, + .setup = atmel_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &atmel_uart, +}; + +static void atmel_serial_early_write(struct console *con, const char *s, + unsigned int n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, atmel_console_putchar); +} + +static int __init atmel_early_console_setup(struct earlycon_device *device, + const char *options) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = atmel_serial_early_write; + + return 0; +} + +OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91rm9200-usart", + atmel_early_console_setup); +OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91sam9260-usart", + atmel_early_console_setup); + +#define ATMEL_CONSOLE_DEVICE (&atmel_console) + +#else +#define ATMEL_CONSOLE_DEVICE NULL +#endif + +static struct uart_driver atmel_uart = { + .owner = THIS_MODULE, + .driver_name = "atmel_serial", + .dev_name = ATMEL_DEVICENAME, + .major = SERIAL_ATMEL_MAJOR, + .minor = MINOR_START, + .nr = ATMEL_MAX_UART, + .cons = ATMEL_CONSOLE_DEVICE, +}; + +static bool atmel_serial_clk_will_stop(void) +{ +#ifdef CONFIG_ARCH_AT91 + return at91_suspend_entering_slow_clock(); +#else + return false; +#endif +} + +static int __maybe_unused atmel_serial_suspend(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + if (uart_console(port) && console_suspend_enabled) { + /* Drain the TX shifter */ + while (!(atmel_uart_readl(port, ATMEL_US_CSR) & + ATMEL_US_TXEMPTY)) + cpu_relax(); + } + + if (uart_console(port) && !console_suspend_enabled) { + /* Cache register values as we won't get a full shutdown/startup + * cycle + */ + atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR); + atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR); + atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR); + atmel_port->cache.rtor = atmel_uart_readl(port, + atmel_port->rtor); + atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR); + atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR); + atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR); + } + + /* we can not wake up if we're running on slow clock */ + atmel_port->may_wakeup = device_may_wakeup(dev); + if (atmel_serial_clk_will_stop()) { + unsigned long flags; + + spin_lock_irqsave(&atmel_port->lock_suspended, flags); + atmel_port->suspended = true; + spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); + device_set_wakeup_enable(dev, 0); + } + + uart_suspend_port(&atmel_uart, port); + + return 0; +} + +static int __maybe_unused atmel_serial_resume(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned long flags; + + if (uart_console(port) && !console_suspend_enabled) { + atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr); + atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr); + atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr); + atmel_uart_writel(port, atmel_port->rtor, + atmel_port->cache.rtor); + atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr); + + if (atmel_port->fifo_size) { + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN | + ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR); + atmel_uart_writel(port, ATMEL_US_FMR, + atmel_port->cache.fmr); + atmel_uart_writel(port, ATMEL_US_FIER, + atmel_port->cache.fimr); + } + atmel_start_rx(port); + } + + spin_lock_irqsave(&atmel_port->lock_suspended, flags); + if (atmel_port->pending) { + atmel_handle_receive(port, atmel_port->pending); + atmel_handle_status(port, atmel_port->pending, + atmel_port->pending_status); + atmel_handle_transmit(port, atmel_port->pending); + atmel_port->pending = 0; + } + atmel_port->suspended = false; + spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); + + uart_resume_port(&atmel_uart, port); + device_set_wakeup_enable(dev, atmel_port->may_wakeup); + + return 0; +} + +static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port, + struct platform_device *pdev) +{ + atmel_port->fifo_size = 0; + atmel_port->rts_low = 0; + atmel_port->rts_high = 0; + + if (of_property_read_u32(pdev->dev.of_node, + "atmel,fifo-size", + &atmel_port->fifo_size)) + return; + + if (!atmel_port->fifo_size) + return; + + if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) { + atmel_port->fifo_size = 0; + dev_err(&pdev->dev, "Invalid FIFO size\n"); + return; + } + + /* + * 0 <= rts_low <= rts_high <= fifo_size + * Once their CTS line asserted by the remote peer, some x86 UARTs tend + * to flush their internal TX FIFO, commonly up to 16 data, before + * actually stopping to send new data. So we try to set the RTS High + * Threshold to a reasonably high value respecting this 16 data + * empirical rule when possible. + */ + atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1, + atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET); + atmel_port->rts_low = max_t(int, atmel_port->fifo_size >> 2, + atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET); + + dev_info(&pdev->dev, "Using FIFO (%u data)\n", + atmel_port->fifo_size); + dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n", + atmel_port->rts_high); + dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n", + atmel_port->rts_low); +} + +static int atmel_serial_probe(struct platform_device *pdev) +{ + struct atmel_uart_port *atmel_port; + struct device_node *np = pdev->dev.parent->of_node; + void *data; + int ret; + bool rs485_enabled; + + BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); + + /* + * In device tree there is no node with "atmel,at91rm9200-usart-serial" + * as compatible string. This driver is probed by at91-usart mfd driver + * which is just a wrapper over the atmel_serial driver and + * spi-at91-usart driver. All attributes needed by this driver are + * found in of_node of parent. + */ + pdev->dev.of_node = np; + + ret = of_alias_get_id(np, "serial"); + if (ret < 0) + /* port id not found in platform data nor device-tree aliases: + * auto-enumerate it */ + ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART); + + if (ret >= ATMEL_MAX_UART) { + ret = -ENODEV; + goto err; + } + + if (test_and_set_bit(ret, atmel_ports_in_use)) { + /* port already in use */ + ret = -EBUSY; + goto err; + } + + atmel_port = &atmel_ports[ret]; + atmel_port->backup_imr = 0; + atmel_port->uart.line = ret; + atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE); + atmel_serial_probe_fifos(atmel_port, pdev); + + atomic_set(&atmel_port->tasklet_shutdown, 0); + spin_lock_init(&atmel_port->lock_suspended); + + atmel_port->clk = devm_clk_get(&pdev->dev, "usart"); + if (IS_ERR(atmel_port->clk)) { + ret = PTR_ERR(atmel_port->clk); + goto err; + } + ret = clk_prepare_enable(atmel_port->clk); + if (ret) + goto err; + + atmel_port->gclk = devm_clk_get_optional(&pdev->dev, "gclk"); + if (IS_ERR(atmel_port->gclk)) { + ret = PTR_ERR(atmel_port->gclk); + goto err_clk_disable_unprepare; + } + + ret = atmel_init_port(atmel_port, pdev); + if (ret) + goto err_clk_disable_unprepare; + + atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0); + if (IS_ERR(atmel_port->gpios)) { + ret = PTR_ERR(atmel_port->gpios); + goto err_clk_disable_unprepare; + } + + if (!atmel_use_pdc_rx(&atmel_port->uart)) { + ret = -ENOMEM; + data = kmalloc_array(ATMEL_SERIAL_RINGSIZE, + sizeof(struct atmel_uart_char), + GFP_KERNEL); + if (!data) + goto err_clk_disable_unprepare; + atmel_port->rx_ring.buf = data; + } + + rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED; + + ret = uart_add_one_port(&atmel_uart, &atmel_port->uart); + if (ret) + goto err_add_port; + + device_init_wakeup(&pdev->dev, 1); + platform_set_drvdata(pdev, atmel_port); + + if (rs485_enabled) { + atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR, + ATMEL_US_USMODE_NORMAL); + atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR, + ATMEL_US_RTSEN); + } + + /* + * Get port name of usart or uart + */ + atmel_get_ip_name(&atmel_port->uart); + + /* + * The peripheral clock can now safely be disabled till the port + * is used + */ + clk_disable_unprepare(atmel_port->clk); + + return 0; + +err_add_port: + kfree(atmel_port->rx_ring.buf); + atmel_port->rx_ring.buf = NULL; +err_clk_disable_unprepare: + clk_disable_unprepare(atmel_port->clk); + clear_bit(atmel_port->uart.line, atmel_ports_in_use); +err: + return ret; +} + +/* + * Even if the driver is not modular, it makes sense to be able to + * unbind a device: there can be many bound devices, and there are + * situations where dynamic binding and unbinding can be useful. + * + * For example, a connected device can require a specific firmware update + * protocol that needs bitbanging on IO lines, but use the regular serial + * port in the normal case. + */ +static int atmel_serial_remove(struct platform_device *pdev) +{ + struct uart_port *port = platform_get_drvdata(pdev); + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + int ret = 0; + + tasklet_kill(&atmel_port->tasklet_rx); + tasklet_kill(&atmel_port->tasklet_tx); + + device_init_wakeup(&pdev->dev, 0); + + ret = uart_remove_one_port(&atmel_uart, port); + + kfree(atmel_port->rx_ring.buf); + + /* "port" is allocated statically, so we shouldn't free it */ + + clear_bit(port->line, atmel_ports_in_use); + + pdev->dev.of_node = NULL; + + return ret; +} + +static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend, + atmel_serial_resume); + +static struct platform_driver atmel_serial_driver = { + .probe = atmel_serial_probe, + .remove = atmel_serial_remove, + .driver = { + .name = "atmel_usart_serial", + .of_match_table = of_match_ptr(atmel_serial_dt_ids), + .pm = pm_ptr(&atmel_serial_pm_ops), + }, +}; + +static int __init atmel_serial_init(void) +{ + int ret; + + ret = uart_register_driver(&atmel_uart); + if (ret) + return ret; + + ret = platform_driver_register(&atmel_serial_driver); + if (ret) + uart_unregister_driver(&atmel_uart); + + return ret; +} +device_initcall(atmel_serial_init); diff --git a/drivers/tty/serial/atmel_serial.h b/drivers/tty/serial/atmel_serial.h new file mode 100644 index 000000000..87f8f7996 --- /dev/null +++ b/drivers/tty/serial/atmel_serial.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * include/linux/atmel_serial.h + * + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * USART registers. + * Based on AT91RM9200 datasheet revision E. + */ + +#include + +#ifndef ATMEL_SERIAL_H +#define ATMEL_SERIAL_H + +#define ATMEL_US_CR 0x00 /* Control Register */ +#define ATMEL_US_RSTRX BIT(2) /* Reset Receiver */ +#define ATMEL_US_RSTTX BIT(3) /* Reset Transmitter */ +#define ATMEL_US_RXEN BIT(4) /* Receiver Enable */ +#define ATMEL_US_RXDIS BIT(5) /* Receiver Disable */ +#define ATMEL_US_TXEN BIT(6) /* Transmitter Enable */ +#define ATMEL_US_TXDIS BIT(7) /* Transmitter Disable */ +#define ATMEL_US_RSTSTA BIT(8) /* Reset Status Bits */ +#define ATMEL_US_STTBRK BIT(9) /* Start Break */ +#define ATMEL_US_STPBRK BIT(10) /* Stop Break */ +#define ATMEL_US_STTTO BIT(11) /* Start Time-out */ +#define ATMEL_US_SENDA BIT(12) /* Send Address */ +#define ATMEL_US_RSTIT BIT(13) /* Reset Iterations */ +#define ATMEL_US_RSTNACK BIT(14) /* Reset Non Acknowledge */ +#define ATMEL_US_RETTO BIT(15) /* Rearm Time-out */ +#define ATMEL_US_DTREN BIT(16) /* Data Terminal Ready Enable */ +#define ATMEL_US_DTRDIS BIT(17) /* Data Terminal Ready Disable */ +#define ATMEL_US_RTSEN BIT(18) /* Request To Send Enable */ +#define ATMEL_US_RTSDIS BIT(19) /* Request To Send Disable */ +#define ATMEL_US_TXFCLR BIT(24) /* Transmit FIFO Clear */ +#define ATMEL_US_RXFCLR BIT(25) /* Receive FIFO Clear */ +#define ATMEL_US_TXFLCLR BIT(26) /* Transmit FIFO Lock Clear */ +#define ATMEL_US_FIFOEN BIT(30) /* FIFO enable */ +#define ATMEL_US_FIFODIS BIT(31) /* FIFO disable */ + +#define ATMEL_US_MR 0x04 /* Mode Register */ +#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */ +#define ATMEL_US_USMODE_NORMAL FIELD_PREP(ATMEL_US_USMODE, 0) +#define ATMEL_US_USMODE_RS485 FIELD_PREP(ATMEL_US_USMODE, 1) +#define ATMEL_US_USMODE_HWHS FIELD_PREP(ATMEL_US_USMODE, 2) +#define ATMEL_US_USMODE_MODEM FIELD_PREP(ATMEL_US_USMODE, 3) +#define ATMEL_US_USMODE_ISO7816_T0 FIELD_PREP(ATMEL_US_USMODE, 4) +#define ATMEL_US_USMODE_ISO7816_T1 FIELD_PREP(ATMEL_US_USMODE, 6) +#define ATMEL_US_USMODE_IRDA FIELD_PREP(ATMEL_US_USMODE, 8) +#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */ +#define ATMEL_US_USCLKS_MCK FIELD_PREP(ATMEL_US_USCLKS, 0) +#define ATMEL_US_USCLKS_MCK_DIV8 FIELD_PREP(ATMEL_US_USCLKS, 1) +#define ATMEL_US_USCLKS_GCLK FIELD_PREP(ATMEL_US_USCLKS, 2) +#define ATMEL_US_USCLKS_SCK FIELD_PREP(ATMEL_US_USCLKS, 3) +#define ATMEL_UA_FILTER BIT(4) +#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */ +#define ATMEL_US_CHRL_5 FIELD_PREP(ATMEL_US_CHRL, 0) +#define ATMEL_US_CHRL_6 FIELD_PREP(ATMEL_US_CHRL, 1) +#define ATMEL_US_CHRL_7 FIELD_PREP(ATMEL_US_CHRL, 2) +#define ATMEL_US_CHRL_8 FIELD_PREP(ATMEL_US_CHRL, 3) +#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */ +#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */ +#define ATMEL_US_PAR_EVEN FIELD_PREP(ATMEL_US_PAR, 0) +#define ATMEL_US_PAR_ODD FIELD_PREP(ATMEL_US_PAR, 1) +#define ATMEL_US_PAR_SPACE FIELD_PREP(ATMEL_US_PAR, 2) +#define ATMEL_US_PAR_MARK FIELD_PREP(ATMEL_US_PAR, 3) +#define ATMEL_US_PAR_NONE FIELD_PREP(ATMEL_US_PAR, 4) +#define ATMEL_US_PAR_MULTI_DROP FIELD_PREP(ATMEL_US_PAR, 6) +#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */ +#define ATMEL_US_NBSTOP_1 FIELD_PREP(ATMEL_US_NBSTOP, 0) +#define ATMEL_US_NBSTOP_1_5 FIELD_PREP(ATMEL_US_NBSTOP, 1) +#define ATMEL_US_NBSTOP_2 FIELD_PREP(ATMEL_US_NBSTOP, 2) +#define ATMEL_UA_BRSRCCK BIT(12) /* Clock Selection for UART */ +#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */ +#define ATMEL_US_CHMODE_NORMAL FIELD_PREP(ATMEL_US_CHMODE, 0) +#define ATMEL_US_CHMODE_ECHO FIELD_PREP(ATMEL_US_CHMODE, 1) +#define ATMEL_US_CHMODE_LOC_LOOP FIELD_PREP(ATMEL_US_CHMODE, 2) +#define ATMEL_US_CHMODE_REM_LOOP FIELD_PREP(ATMEL_US_CHMODE, 3) +#define ATMEL_US_MSBF BIT(16) /* Bit Order */ +#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */ +#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */ +#define ATMEL_US_OVER BIT(19) /* Oversampling Mode */ +#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */ +#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */ +#define ATMEL_US_MAX_ITER_MASK GENMASK(26, 24) /* Max Iterations */ +#define ATMEL_US_MAX_ITER(n) FIELD_PREP(ATMEL_US_MAX_ITER_MASK, (n)) +#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */ + +#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */ +#define ATMEL_US_RXRDY BIT(0) /* Receiver Ready */ +#define ATMEL_US_TXRDY BIT(1) /* Transmitter Ready */ +#define ATMEL_US_RXBRK BIT(2) /* Break Received / End of Break */ +#define ATMEL_US_ENDRX BIT(3) /* End of Receiver Transfer */ +#define ATMEL_US_ENDTX BIT(4) /* End of Transmitter Transfer */ +#define ATMEL_US_OVRE BIT(5) /* Overrun Error */ +#define ATMEL_US_FRAME BIT(6) /* Framing Error */ +#define ATMEL_US_PARE BIT(7) /* Parity Error */ +#define ATMEL_US_TIMEOUT BIT(8) /* Receiver Time-out */ +#define ATMEL_US_TXEMPTY BIT(9) /* Transmitter Empty */ +#define ATMEL_US_ITERATION BIT(10) /* Max number of Repetitions Reached */ +#define ATMEL_US_TXBUFE BIT(11) /* Transmission Buffer Empty */ +#define ATMEL_US_RXBUFF BIT(12) /* Reception Buffer Full */ +#define ATMEL_US_NACK BIT(13) /* Non Acknowledge */ +#define ATMEL_US_RIIC BIT(16) /* Ring Indicator Input Change */ +#define ATMEL_US_DSRIC BIT(17) /* Data Set Ready Input Change */ +#define ATMEL_US_DCDIC BIT(18) /* Data Carrier Detect Input Change */ +#define ATMEL_US_CTSIC BIT(19) /* Clear to Send Input Change */ +#define ATMEL_US_RI BIT(20) /* RI */ +#define ATMEL_US_DSR BIT(21) /* DSR */ +#define ATMEL_US_DCD BIT(22) /* DCD */ +#define ATMEL_US_CTS BIT(23) /* CTS */ + +#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */ +#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */ +#define ATMEL_US_CSR 0x14 /* Channel Status Register */ +#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */ +#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */ +#define ATMEL_US_SYNH BIT(15) /* Transmit/Receive Sync */ + +#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */ +#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */ +#define ATMEL_US_FP_OFFSET 16 /* Fractional Part */ +#define ATMEL_US_FP_MASK 0x7 + +#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register for USART */ +#define ATMEL_UA_RTOR 0x28 /* Receiver Time-out Register for UART */ +#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */ + +#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */ +#define ATMEL_US_TG GENMASK(7, 0) /* Timeguard Value */ + +#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */ +#define ATMEL_US_NER 0x44 /* Number of Errors Register */ +#define ATMEL_US_IF 0x4c /* IrDA Filter Register */ + +#define ATMEL_US_CMPR 0x90 /* Comparaison Register */ +#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */ +#define ATMEL_US_TXRDYM(data) FIELD_PREP(GENMASK(1, 0), (data)) /* TX Ready Mode */ +#define ATMEL_US_RXRDYM(data) FIELD_PREP(GENMASK(5, 4), (data)) /* RX Ready Mode */ +#define ATMEL_US_ONE_DATA 0x0 +#define ATMEL_US_TWO_DATA 0x1 +#define ATMEL_US_FOUR_DATA 0x2 +#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */ +#define ATMEL_US_TXFTHRES(thr) FIELD_PREP(GENMASK(13, 8), (thr)) /* TX FIFO Threshold */ +#define ATMEL_US_RXFTHRES(thr) FIELD_PREP(GENMASK(21, 16), (thr)) /* RX FIFO Threshold */ +#define ATMEL_US_RXFTHRES2(thr) FIELD_PREP(GENMASK(29, 24), (thr)) /* RX FIFO Threshold2 */ + +#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */ +#define ATMEL_US_TXFL(reg) FIELD_GET(GENMASK(5, 0), (reg)) /* TX FIFO Level */ +#define ATMEL_US_RXFL(reg) FIELD_GET(GENMASK(21, 16), (reg)) /* RX FIFO Level */ + +#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */ +#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */ +#define ATMEL_US_FIMR 0xb0 /* FIFO Interrupt Mask Register */ +#define ATMEL_US_FESR 0xb4 /* FIFO Event Status Register */ +#define ATMEL_US_TXFEF BIT(0) /* Transmit FIFO Empty Flag */ +#define ATMEL_US_TXFFF BIT(1) /* Transmit FIFO Full Flag */ +#define ATMEL_US_TXFTHF BIT(2) /* Transmit FIFO Threshold Flag */ +#define ATMEL_US_RXFEF BIT(3) /* Receive FIFO Empty Flag */ +#define ATMEL_US_RXFFF BIT(4) /* Receive FIFO Full Flag */ +#define ATMEL_US_RXFTHF BIT(5) /* Receive FIFO Threshold Flag */ +#define ATMEL_US_TXFPTEF BIT(6) /* Transmit FIFO Pointer Error Flag */ +#define ATMEL_US_RXFPTEF BIT(7) /* Receive FIFO Pointer Error Flag */ +#define ATMEL_US_TXFLOCK BIT(8) /* Transmit FIFO Lock (FESR only) */ +#define ATMEL_US_RXFTHF2 BIT(9) /* Receive FIFO Threshold Flag 2 */ + +#define ATMEL_US_NAME 0xf0 /* Ip Name */ +#define ATMEL_US_VERSION 0xfc /* Ip Version */ + +#endif diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c new file mode 100644 index 000000000..5d9737c2d --- /dev/null +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -0,0 +1,921 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Derived from many drivers using generic_serial interface. + * + * Copyright (C) 2008 Maxime Bizon + * + * Serial driver for BCM63xx integrated UART. + * + * Hardware flow control was _not_ tested since I only have RX/TX on + * my board. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BCM63XX_NR_UARTS 2 + +static struct uart_port ports[BCM63XX_NR_UARTS]; + +/* + * rx interrupt mask / stat + * + * mask: + * - rx fifo full + * - rx fifo above threshold + * - rx fifo not empty for too long + */ +#define UART_RX_INT_MASK (UART_IR_MASK(UART_IR_RXOVER) | \ + UART_IR_MASK(UART_IR_RXTHRESH) | \ + UART_IR_MASK(UART_IR_RXTIMEOUT)) + +#define UART_RX_INT_STAT (UART_IR_STAT(UART_IR_RXOVER) | \ + UART_IR_STAT(UART_IR_RXTHRESH) | \ + UART_IR_STAT(UART_IR_RXTIMEOUT)) + +/* + * tx interrupt mask / stat + * + * mask: + * - tx fifo empty + * - tx fifo below threshold + */ +#define UART_TX_INT_MASK (UART_IR_MASK(UART_IR_TXEMPTY) | \ + UART_IR_MASK(UART_IR_TXTRESH)) + +#define UART_TX_INT_STAT (UART_IR_STAT(UART_IR_TXEMPTY) | \ + UART_IR_STAT(UART_IR_TXTRESH)) + +/* + * external input interrupt + * + * mask: any edge on CTS, DCD + */ +#define UART_EXTINP_INT_MASK (UART_EXTINP_IRMASK(UART_EXTINP_IR_CTS) | \ + UART_EXTINP_IRMASK(UART_EXTINP_IR_DCD)) + +/* + * handy uart register accessor + */ +static inline unsigned int bcm_uart_readl(struct uart_port *port, + unsigned int offset) +{ + return __raw_readl(port->membase + offset); +} + +static inline void bcm_uart_writel(struct uart_port *port, + unsigned int value, unsigned int offset) +{ + __raw_writel(value, port->membase + offset); +} + +/* + * serial core request to check if uart tx fifo is empty + */ +static unsigned int bcm_uart_tx_empty(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_IR_REG); + return (val & UART_IR_STAT(UART_IR_TXEMPTY)) ? 1 : 0; +} + +/* + * serial core request to set RTS and DTR pin state and loopback mode + */ +static void bcm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_MCTL_REG); + val &= ~(UART_MCTL_DTR_MASK | UART_MCTL_RTS_MASK); + /* invert of written value is reflected on the pin */ + if (!(mctrl & TIOCM_DTR)) + val |= UART_MCTL_DTR_MASK; + if (!(mctrl & TIOCM_RTS)) + val |= UART_MCTL_RTS_MASK; + bcm_uart_writel(port, val, UART_MCTL_REG); + + val = bcm_uart_readl(port, UART_CTL_REG); + if (mctrl & TIOCM_LOOP) + val |= UART_CTL_LOOPBACK_MASK; + else + val &= ~UART_CTL_LOOPBACK_MASK; + bcm_uart_writel(port, val, UART_CTL_REG); +} + +/* + * serial core request to return RI, CTS, DCD and DSR pin state + */ +static unsigned int bcm_uart_get_mctrl(struct uart_port *port) +{ + unsigned int val, mctrl; + + mctrl = 0; + val = bcm_uart_readl(port, UART_EXTINP_REG); + if (val & UART_EXTINP_RI_MASK) + mctrl |= TIOCM_RI; + if (val & UART_EXTINP_CTS_MASK) + mctrl |= TIOCM_CTS; + if (val & UART_EXTINP_DCD_MASK) + mctrl |= TIOCM_CD; + if (val & UART_EXTINP_DSR_MASK) + mctrl |= TIOCM_DSR; + return mctrl; +} + +/* + * serial core request to disable tx ASAP (used for flow control) + */ +static void bcm_uart_stop_tx(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_CTL_REG); + val &= ~(UART_CTL_TXEN_MASK); + bcm_uart_writel(port, val, UART_CTL_REG); + + val = bcm_uart_readl(port, UART_IR_REG); + val &= ~UART_TX_INT_MASK; + bcm_uart_writel(port, val, UART_IR_REG); +} + +/* + * serial core request to (re)enable tx + */ +static void bcm_uart_start_tx(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_IR_REG); + val |= UART_TX_INT_MASK; + bcm_uart_writel(port, val, UART_IR_REG); + + val = bcm_uart_readl(port, UART_CTL_REG); + val |= UART_CTL_TXEN_MASK; + bcm_uart_writel(port, val, UART_CTL_REG); +} + +/* + * serial core request to stop rx, called before port shutdown + */ +static void bcm_uart_stop_rx(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_IR_REG); + val &= ~UART_RX_INT_MASK; + bcm_uart_writel(port, val, UART_IR_REG); +} + +/* + * serial core request to enable modem status interrupt reporting + */ +static void bcm_uart_enable_ms(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_IR_REG); + val |= UART_IR_MASK(UART_IR_EXTIP); + bcm_uart_writel(port, val, UART_IR_REG); +} + +/* + * serial core request to start/stop emitting break char + */ +static void bcm_uart_break_ctl(struct uart_port *port, int ctl) +{ + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(&port->lock, flags); + + val = bcm_uart_readl(port, UART_CTL_REG); + if (ctl) + val |= UART_CTL_XMITBRK_MASK; + else + val &= ~UART_CTL_XMITBRK_MASK; + bcm_uart_writel(port, val, UART_CTL_REG); + + spin_unlock_irqrestore(&port->lock, flags); +} + +/* + * return port type in string format + */ +static const char *bcm_uart_type(struct uart_port *port) +{ + return (port->type == PORT_BCM63XX) ? "bcm63xx_uart" : NULL; +} + +/* + * read all chars in rx fifo and send them to core + */ +static void bcm_uart_do_rx(struct uart_port *port) +{ + struct tty_port *tty_port = &port->state->port; + unsigned int max_count; + + /* limit number of char read in interrupt, should not be + * higher than fifo size anyway since we're much faster than + * serial port */ + max_count = 32; + do { + unsigned int iestat, c, cstat; + char flag; + + /* get overrun/fifo empty information from ier + * register */ + iestat = bcm_uart_readl(port, UART_IR_REG); + + if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) { + unsigned int val; + + /* fifo reset is required to clear + * interrupt */ + val = bcm_uart_readl(port, UART_CTL_REG); + val |= UART_CTL_RSTRXFIFO_MASK; + bcm_uart_writel(port, val, UART_CTL_REG); + + port->icount.overrun++; + tty_insert_flip_char(tty_port, 0, TTY_OVERRUN); + } + + if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY))) + break; + + cstat = c = bcm_uart_readl(port, UART_FIFO_REG); + port->icount.rx++; + flag = TTY_NORMAL; + c &= 0xff; + + if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) { + /* do stats first */ + if (cstat & UART_FIFO_BRKDET_MASK) { + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } + + if (cstat & UART_FIFO_PARERR_MASK) + port->icount.parity++; + if (cstat & UART_FIFO_FRAMEERR_MASK) + port->icount.frame++; + + /* update flag wrt read_status_mask */ + cstat &= port->read_status_mask; + if (cstat & UART_FIFO_BRKDET_MASK) + flag = TTY_BREAK; + if (cstat & UART_FIFO_FRAMEERR_MASK) + flag = TTY_FRAME; + if (cstat & UART_FIFO_PARERR_MASK) + flag = TTY_PARITY; + } + + if (uart_handle_sysrq_char(port, c)) + continue; + + + if ((cstat & port->ignore_status_mask) == 0) + tty_insert_flip_char(tty_port, c, flag); + + } while (--max_count); + + tty_flip_buffer_push(tty_port); +} + +/* + * fill tx fifo with chars to send, stop when fifo is about to be full + * or when all chars have been sent. + */ +static void bcm_uart_do_tx(struct uart_port *port) +{ + struct circ_buf *xmit; + unsigned int val, max_count; + + if (port->x_char) { + bcm_uart_writel(port, port->x_char, UART_FIFO_REG); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_tx_stopped(port)) { + bcm_uart_stop_tx(port); + return; + } + + xmit = &port->state->xmit; + if (uart_circ_empty(xmit)) + goto txq_empty; + + val = bcm_uart_readl(port, UART_MCTL_REG); + val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT; + max_count = port->fifosize - val; + + while (max_count--) { + unsigned int c; + + c = xmit->buf[xmit->tail]; + bcm_uart_writel(port, c, UART_FIFO_REG); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + goto txq_empty; + return; + +txq_empty: + /* nothing to send, disable transmit interrupt */ + val = bcm_uart_readl(port, UART_IR_REG); + val &= ~UART_TX_INT_MASK; + bcm_uart_writel(port, val, UART_IR_REG); + return; +} + +/* + * process uart interrupt + */ +static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id) +{ + struct uart_port *port; + unsigned int irqstat; + + port = dev_id; + spin_lock(&port->lock); + + irqstat = bcm_uart_readl(port, UART_IR_REG); + if (irqstat & UART_RX_INT_STAT) + bcm_uart_do_rx(port); + + if (irqstat & UART_TX_INT_STAT) + bcm_uart_do_tx(port); + + if (irqstat & UART_IR_MASK(UART_IR_EXTIP)) { + unsigned int estat; + + estat = bcm_uart_readl(port, UART_EXTINP_REG); + if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_CTS)) + uart_handle_cts_change(port, + estat & UART_EXTINP_CTS_MASK); + if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_DCD)) + uart_handle_dcd_change(port, + estat & UART_EXTINP_DCD_MASK); + } + + spin_unlock(&port->lock); + return IRQ_HANDLED; +} + +/* + * enable rx & tx operation on uart + */ +static void bcm_uart_enable(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_CTL_REG); + val |= (UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK); + bcm_uart_writel(port, val, UART_CTL_REG); +} + +/* + * disable rx & tx operation on uart + */ +static void bcm_uart_disable(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_CTL_REG); + val &= ~(UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | + UART_CTL_RXEN_MASK); + bcm_uart_writel(port, val, UART_CTL_REG); +} + +/* + * clear all unread data in rx fifo and unsent data in tx fifo + */ +static void bcm_uart_flush(struct uart_port *port) +{ + unsigned int val; + + /* empty rx and tx fifo */ + val = bcm_uart_readl(port, UART_CTL_REG); + val |= UART_CTL_RSTRXFIFO_MASK | UART_CTL_RSTTXFIFO_MASK; + bcm_uart_writel(port, val, UART_CTL_REG); + + /* read any pending char to make sure all irq status are + * cleared */ + (void)bcm_uart_readl(port, UART_FIFO_REG); +} + +/* + * serial core request to initialize uart and start rx operation + */ +static int bcm_uart_startup(struct uart_port *port) +{ + unsigned int val; + int ret; + + /* mask all irq and flush port */ + bcm_uart_disable(port); + bcm_uart_writel(port, 0, UART_IR_REG); + bcm_uart_flush(port); + + /* clear any pending external input interrupt */ + (void)bcm_uart_readl(port, UART_EXTINP_REG); + + /* set rx/tx fifo thresh to fifo half size */ + val = bcm_uart_readl(port, UART_MCTL_REG); + val &= ~(UART_MCTL_RXFIFOTHRESH_MASK | UART_MCTL_TXFIFOTHRESH_MASK); + val |= (port->fifosize / 2) << UART_MCTL_RXFIFOTHRESH_SHIFT; + val |= (port->fifosize / 2) << UART_MCTL_TXFIFOTHRESH_SHIFT; + bcm_uart_writel(port, val, UART_MCTL_REG); + + /* set rx fifo timeout to 1 char time */ + val = bcm_uart_readl(port, UART_CTL_REG); + val &= ~UART_CTL_RXTMOUTCNT_MASK; + val |= 1 << UART_CTL_RXTMOUTCNT_SHIFT; + bcm_uart_writel(port, val, UART_CTL_REG); + + /* report any edge on dcd and cts */ + val = UART_EXTINP_INT_MASK; + val |= UART_EXTINP_DCD_NOSENSE_MASK; + val |= UART_EXTINP_CTS_NOSENSE_MASK; + bcm_uart_writel(port, val, UART_EXTINP_REG); + + /* register irq and enable rx interrupts */ + ret = request_irq(port->irq, bcm_uart_interrupt, 0, + dev_name(port->dev), port); + if (ret) + return ret; + bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); + bcm_uart_enable(port); + return 0; +} + +/* + * serial core request to flush & disable uart + */ +static void bcm_uart_shutdown(struct uart_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + bcm_uart_writel(port, 0, UART_IR_REG); + spin_unlock_irqrestore(&port->lock, flags); + + bcm_uart_disable(port); + bcm_uart_flush(port); + free_irq(port->irq, port); +} + +/* + * serial core request to change current uart setting + */ +static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new, + const struct ktermios *old) +{ + unsigned int ctl, baud, quot, ier; + unsigned long flags; + int tries; + + spin_lock_irqsave(&port->lock, flags); + + /* Drain the hot tub fully before we power it off for the winter. */ + for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--) + mdelay(10); + + /* disable uart while changing speed */ + bcm_uart_disable(port); + bcm_uart_flush(port); + + /* update Control register */ + ctl = bcm_uart_readl(port, UART_CTL_REG); + ctl &= ~UART_CTL_BITSPERSYM_MASK; + + switch (new->c_cflag & CSIZE) { + case CS5: + ctl |= (0 << UART_CTL_BITSPERSYM_SHIFT); + break; + case CS6: + ctl |= (1 << UART_CTL_BITSPERSYM_SHIFT); + break; + case CS7: + ctl |= (2 << UART_CTL_BITSPERSYM_SHIFT); + break; + default: + ctl |= (3 << UART_CTL_BITSPERSYM_SHIFT); + break; + } + + ctl &= ~UART_CTL_STOPBITS_MASK; + if (new->c_cflag & CSTOPB) + ctl |= UART_CTL_STOPBITS_2; + else + ctl |= UART_CTL_STOPBITS_1; + + ctl &= ~(UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK); + if (new->c_cflag & PARENB) + ctl |= (UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK); + ctl &= ~(UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK); + if (new->c_cflag & PARODD) + ctl |= (UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK); + bcm_uart_writel(port, ctl, UART_CTL_REG); + + /* update Baudword register */ + baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); + quot = uart_get_divisor(port, baud) - 1; + bcm_uart_writel(port, quot, UART_BAUD_REG); + + /* update Interrupt register */ + ier = bcm_uart_readl(port, UART_IR_REG); + + ier &= ~UART_IR_MASK(UART_IR_EXTIP); + if (UART_ENABLE_MS(port, new->c_cflag)) + ier |= UART_IR_MASK(UART_IR_EXTIP); + + bcm_uart_writel(port, ier, UART_IR_REG); + + /* update read/ignore mask */ + port->read_status_mask = UART_FIFO_VALID_MASK; + if (new->c_iflag & INPCK) { + port->read_status_mask |= UART_FIFO_FRAMEERR_MASK; + port->read_status_mask |= UART_FIFO_PARERR_MASK; + } + if (new->c_iflag & (IGNBRK | BRKINT)) + port->read_status_mask |= UART_FIFO_BRKDET_MASK; + + port->ignore_status_mask = 0; + if (new->c_iflag & IGNPAR) + port->ignore_status_mask |= UART_FIFO_PARERR_MASK; + if (new->c_iflag & IGNBRK) + port->ignore_status_mask |= UART_FIFO_BRKDET_MASK; + if (!(new->c_cflag & CREAD)) + port->ignore_status_mask |= UART_FIFO_VALID_MASK; + + uart_update_timeout(port, new->c_cflag, baud); + bcm_uart_enable(port); + spin_unlock_irqrestore(&port->lock, flags); +} + +/* + * serial core request to claim uart iomem + */ +static int bcm_uart_request_port(struct uart_port *port) +{ + /* UARTs always present */ + return 0; +} + +/* + * serial core request to release uart iomem + */ +static void bcm_uart_release_port(struct uart_port *port) +{ + /* Nothing to release ... */ +} + +/* + * serial core request to do any port required autoconfiguration + */ +static void bcm_uart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + if (bcm_uart_request_port(port)) + return; + port->type = PORT_BCM63XX; + } +} + +/* + * serial core request to check that port information in serinfo are + * suitable + */ +static int bcm_uart_verify_port(struct uart_port *port, + struct serial_struct *serinfo) +{ + if (port->type != PORT_BCM63XX) + return -EINVAL; + if (port->irq != serinfo->irq) + return -EINVAL; + if (port->iotype != serinfo->io_type) + return -EINVAL; + if (port->mapbase != (unsigned long)serinfo->iomem_base) + return -EINVAL; + return 0; +} + +/* serial core callbacks */ +static const struct uart_ops bcm_uart_ops = { + .tx_empty = bcm_uart_tx_empty, + .get_mctrl = bcm_uart_get_mctrl, + .set_mctrl = bcm_uart_set_mctrl, + .start_tx = bcm_uart_start_tx, + .stop_tx = bcm_uart_stop_tx, + .stop_rx = bcm_uart_stop_rx, + .enable_ms = bcm_uart_enable_ms, + .break_ctl = bcm_uart_break_ctl, + .startup = bcm_uart_startup, + .shutdown = bcm_uart_shutdown, + .set_termios = bcm_uart_set_termios, + .type = bcm_uart_type, + .release_port = bcm_uart_release_port, + .request_port = bcm_uart_request_port, + .config_port = bcm_uart_config_port, + .verify_port = bcm_uart_verify_port, +}; + + + +#ifdef CONFIG_SERIAL_BCM63XX_CONSOLE +static void wait_for_xmitr(struct uart_port *port) +{ + unsigned int tmout; + + /* Wait up to 10ms for the character(s) to be sent. */ + tmout = 10000; + while (--tmout) { + unsigned int val; + + val = bcm_uart_readl(port, UART_IR_REG); + if (val & UART_IR_STAT(UART_IR_TXEMPTY)) + break; + udelay(1); + } + + /* Wait up to 1s for flow control if necessary */ + if (port->flags & UPF_CONS_FLOW) { + tmout = 1000000; + while (--tmout) { + unsigned int val; + + val = bcm_uart_readl(port, UART_EXTINP_REG); + if (val & UART_EXTINP_CTS_MASK) + break; + udelay(1); + } + } +} + +/* + * output given char + */ +static void bcm_console_putchar(struct uart_port *port, unsigned char ch) +{ + wait_for_xmitr(port); + bcm_uart_writel(port, ch, UART_FIFO_REG); +} + +/* + * console core request to output given string + */ +static void bcm_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *port; + unsigned long flags; + int locked; + + port = &ports[co->index]; + + local_irq_save(flags); + if (port->sysrq) { + /* bcm_uart_interrupt() already took the lock */ + locked = 0; + } else if (oops_in_progress) { + locked = spin_trylock(&port->lock); + } else { + spin_lock(&port->lock); + locked = 1; + } + + /* call helper to deal with \r\n */ + uart_console_write(port, s, count, bcm_console_putchar); + + /* and wait for char to be transmitted */ + wait_for_xmitr(port); + + if (locked) + spin_unlock(&port->lock); + local_irq_restore(flags); +} + +/* + * console core request to setup given console, find matching uart + * port and setup it. + */ +static int bcm_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= BCM63XX_NR_UARTS) + return -EINVAL; + port = &ports[co->index]; + if (!port->membase) + return -ENODEV; + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct uart_driver bcm_uart_driver; + +static struct console bcm63xx_console = { + .name = "ttyS", + .write = bcm_console_write, + .device = uart_console_device, + .setup = bcm_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &bcm_uart_driver, +}; + +static int __init bcm63xx_console_init(void) +{ + register_console(&bcm63xx_console); + return 0; +} + +console_initcall(bcm63xx_console_init); + +static void bcm_early_write(struct console *con, const char *s, unsigned n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, bcm_console_putchar); + wait_for_xmitr(&dev->port); +} + +static int __init bcm_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = bcm_early_write; + return 0; +} + +OF_EARLYCON_DECLARE(bcm63xx_uart, "brcm,bcm6345-uart", bcm_early_console_setup); + +#define BCM63XX_CONSOLE (&bcm63xx_console) +#else +#define BCM63XX_CONSOLE NULL +#endif /* CONFIG_SERIAL_BCM63XX_CONSOLE */ + +static struct uart_driver bcm_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "bcm63xx_uart", + .dev_name = "ttyS", + .major = TTY_MAJOR, + .minor = 64, + .nr = BCM63XX_NR_UARTS, + .cons = BCM63XX_CONSOLE, +}; + +/* + * platform driver probe/remove callback + */ +static int bcm_uart_probe(struct platform_device *pdev) +{ + struct resource *res_mem; + struct uart_port *port; + struct clk *clk; + int ret; + + if (pdev->dev.of_node) { + pdev->id = of_alias_get_id(pdev->dev.of_node, "serial"); + + if (pdev->id < 0) + pdev->id = of_alias_get_id(pdev->dev.of_node, "uart"); + } + + if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS) + return -EINVAL; + + port = &ports[pdev->id]; + if (port->membase) + return -EBUSY; + memset(port, 0, sizeof(*port)); + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_mem) + return -ENODEV; + + port->mapbase = res_mem->start; + port->membase = devm_ioremap_resource(&pdev->dev, res_mem); + if (IS_ERR(port->membase)) + return PTR_ERR(port->membase); + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + port->irq = ret; + + clk = clk_get(&pdev->dev, "refclk"); + if (IS_ERR(clk) && pdev->dev.of_node) + clk = of_clk_get(pdev->dev.of_node, 0); + + if (IS_ERR(clk)) + return -ENODEV; + + port->iotype = UPIO_MEM; + port->ops = &bcm_uart_ops; + port->flags = UPF_BOOT_AUTOCONF; + port->dev = &pdev->dev; + port->fifosize = 16; + port->uartclk = clk_get_rate(clk) / 2; + port->line = pdev->id; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_BCM63XX_CONSOLE); + clk_put(clk); + + ret = uart_add_one_port(&bcm_uart_driver, port); + if (ret) { + ports[pdev->id].membase = NULL; + return ret; + } + platform_set_drvdata(pdev, port); + return 0; +} + +static int bcm_uart_remove(struct platform_device *pdev) +{ + struct uart_port *port; + + port = platform_get_drvdata(pdev); + uart_remove_one_port(&bcm_uart_driver, port); + /* mark port as free */ + ports[pdev->id].membase = NULL; + return 0; +} + +static const struct of_device_id bcm63xx_of_match[] = { + { .compatible = "brcm,bcm6345-uart" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, bcm63xx_of_match); + +/* + * platform driver stuff + */ +static struct platform_driver bcm_uart_platform_driver = { + .probe = bcm_uart_probe, + .remove = bcm_uart_remove, + .driver = { + .name = "bcm63xx_uart", + .of_match_table = bcm63xx_of_match, + }, +}; + +static int __init bcm_uart_init(void) +{ + int ret; + + ret = uart_register_driver(&bcm_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&bcm_uart_platform_driver); + if (ret) + uart_unregister_driver(&bcm_uart_driver); + + return ret; +} + +static void __exit bcm_uart_exit(void) +{ + platform_driver_unregister(&bcm_uart_platform_driver); + uart_unregister_driver(&bcm_uart_driver); +} + +module_init(bcm_uart_init); +module_exit(bcm_uart_exit); + +MODULE_AUTHOR("Maxime Bizon "); +MODULE_DESCRIPTION("Broadcom 63xx integrated uart driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c new file mode 100644 index 000000000..404b43a5a --- /dev/null +++ b/drivers/tty/serial/clps711x.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for CLPS711x serial ports + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * Copyright 1999 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "serial_mctrl_gpio.h" + +#define UART_CLPS711X_DEVNAME "ttyCL" +#define UART_CLPS711X_NR 2 +#define UART_CLPS711X_MAJOR 204 +#define UART_CLPS711X_MINOR 40 + +#define UARTDR_OFFSET (0x00) +#define UBRLCR_OFFSET (0x40) + +#define UARTDR_FRMERR (1 << 8) +#define UARTDR_PARERR (1 << 9) +#define UARTDR_OVERR (1 << 10) + +#define UBRLCR_BAUD_MASK ((1 << 12) - 1) +#define UBRLCR_BREAK (1 << 12) +#define UBRLCR_PRTEN (1 << 13) +#define UBRLCR_EVENPRT (1 << 14) +#define UBRLCR_XSTOP (1 << 15) +#define UBRLCR_FIFOEN (1 << 16) +#define UBRLCR_WRDLEN5 (0 << 17) +#define UBRLCR_WRDLEN6 (1 << 17) +#define UBRLCR_WRDLEN7 (2 << 17) +#define UBRLCR_WRDLEN8 (3 << 17) +#define UBRLCR_WRDLEN_MASK (3 << 17) + +struct clps711x_port { + struct uart_port port; + unsigned int tx_enabled; + int rx_irq; + struct regmap *syscon; + struct mctrl_gpios *gpios; +}; + +static struct uart_driver clps711x_uart = { + .owner = THIS_MODULE, + .driver_name = UART_CLPS711X_DEVNAME, + .dev_name = UART_CLPS711X_DEVNAME, + .major = UART_CLPS711X_MAJOR, + .minor = UART_CLPS711X_MINOR, + .nr = UART_CLPS711X_NR, +}; + +static void uart_clps711x_stop_tx(struct uart_port *port) +{ + struct clps711x_port *s = dev_get_drvdata(port->dev); + + if (s->tx_enabled) { + disable_irq(port->irq); + s->tx_enabled = 0; + } +} + +static void uart_clps711x_start_tx(struct uart_port *port) +{ + struct clps711x_port *s = dev_get_drvdata(port->dev); + + if (!s->tx_enabled) { + s->tx_enabled = 1; + enable_irq(port->irq); + } +} + +static irqreturn_t uart_clps711x_int_rx(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct clps711x_port *s = dev_get_drvdata(port->dev); + unsigned int status, flg; + u16 ch; + + for (;;) { + u32 sysflg = 0; + + regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg); + if (sysflg & SYSFLG_URXFE) + break; + + ch = readw(port->membase + UARTDR_OFFSET); + status = ch & (UARTDR_FRMERR | UARTDR_PARERR | UARTDR_OVERR); + ch &= 0xff; + + port->icount.rx++; + flg = TTY_NORMAL; + + if (unlikely(status)) { + if (status & UARTDR_PARERR) + port->icount.parity++; + else if (status & UARTDR_FRMERR) + port->icount.frame++; + else if (status & UARTDR_OVERR) + port->icount.overrun++; + + status &= port->read_status_mask; + + if (status & UARTDR_PARERR) + flg = TTY_PARITY; + else if (status & UARTDR_FRMERR) + flg = TTY_FRAME; + else if (status & UARTDR_OVERR) + flg = TTY_OVERRUN; + } + + if (uart_handle_sysrq_char(port, ch)) + continue; + + if (status & port->ignore_status_mask) + continue; + + uart_insert_char(port, status, UARTDR_OVERR, ch, flg); + } + + tty_flip_buffer_push(&port->state->port); + + return IRQ_HANDLED; +} + +static irqreturn_t uart_clps711x_int_tx(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct clps711x_port *s = dev_get_drvdata(port->dev); + struct circ_buf *xmit = &port->state->xmit; + + if (port->x_char) { + writew(port->x_char, port->membase + UARTDR_OFFSET); + port->icount.tx++; + port->x_char = 0; + return IRQ_HANDLED; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (s->tx_enabled) { + disable_irq_nosync(port->irq); + s->tx_enabled = 0; + } + return IRQ_HANDLED; + } + + while (!uart_circ_empty(xmit)) { + u32 sysflg = 0; + + writew(xmit->buf[xmit->tail], port->membase + UARTDR_OFFSET); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + + regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg); + if (sysflg & SYSFLG_UTXFF) + break; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + return IRQ_HANDLED; +} + +static unsigned int uart_clps711x_tx_empty(struct uart_port *port) +{ + struct clps711x_port *s = dev_get_drvdata(port->dev); + u32 sysflg = 0; + + regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg); + + return (sysflg & SYSFLG_UBUSY) ? 0 : TIOCSER_TEMT; +} + +static unsigned int uart_clps711x_get_mctrl(struct uart_port *port) +{ + unsigned int result = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR; + struct clps711x_port *s = dev_get_drvdata(port->dev); + + return mctrl_gpio_get(s->gpios, &result); +} + +static void uart_clps711x_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct clps711x_port *s = dev_get_drvdata(port->dev); + + mctrl_gpio_set(s->gpios, mctrl); +} + +static void uart_clps711x_break_ctl(struct uart_port *port, int break_state) +{ + unsigned int ubrlcr; + + ubrlcr = readl(port->membase + UBRLCR_OFFSET); + if (break_state) + ubrlcr |= UBRLCR_BREAK; + else + ubrlcr &= ~UBRLCR_BREAK; + writel(ubrlcr, port->membase + UBRLCR_OFFSET); +} + +static void uart_clps711x_set_ldisc(struct uart_port *port, + struct ktermios *termios) +{ + if (!port->line) { + struct clps711x_port *s = dev_get_drvdata(port->dev); + + regmap_update_bits(s->syscon, SYSCON_OFFSET, SYSCON1_SIREN, + (termios->c_line == N_IRDA) ? SYSCON1_SIREN : 0); + } +} + +static int uart_clps711x_startup(struct uart_port *port) +{ + struct clps711x_port *s = dev_get_drvdata(port->dev); + + /* Disable break */ + writel(readl(port->membase + UBRLCR_OFFSET) & ~UBRLCR_BREAK, + port->membase + UBRLCR_OFFSET); + + /* Enable the port */ + return regmap_update_bits(s->syscon, SYSCON_OFFSET, + SYSCON_UARTEN, SYSCON_UARTEN); +} + +static void uart_clps711x_shutdown(struct uart_port *port) +{ + struct clps711x_port *s = dev_get_drvdata(port->dev); + + /* Disable the port */ + regmap_update_bits(s->syscon, SYSCON_OFFSET, SYSCON_UARTEN, 0); +} + +static void uart_clps711x_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + u32 ubrlcr; + unsigned int baud, quot; + + /* Mask termios capabilities we don't support */ + termios->c_cflag &= ~CMSPAR; + termios->c_iflag &= ~(BRKINT | IGNBRK); + + /* Ask the core to calculate the divisor for us */ + baud = uart_get_baud_rate(port, termios, old, port->uartclk / 4096, + port->uartclk / 16); + quot = uart_get_divisor(port, baud); + + switch (termios->c_cflag & CSIZE) { + case CS5: + ubrlcr = UBRLCR_WRDLEN5; + break; + case CS6: + ubrlcr = UBRLCR_WRDLEN6; + break; + case CS7: + ubrlcr = UBRLCR_WRDLEN7; + break; + case CS8: + default: + ubrlcr = UBRLCR_WRDLEN8; + break; + } + + if (termios->c_cflag & CSTOPB) + ubrlcr |= UBRLCR_XSTOP; + + if (termios->c_cflag & PARENB) { + ubrlcr |= UBRLCR_PRTEN; + if (!(termios->c_cflag & PARODD)) + ubrlcr |= UBRLCR_EVENPRT; + } + + /* Enable FIFO */ + ubrlcr |= UBRLCR_FIFOEN; + + /* Set read status mask */ + port->read_status_mask = UARTDR_OVERR; + if (termios->c_iflag & INPCK) + port->read_status_mask |= UARTDR_PARERR | UARTDR_FRMERR; + + /* Set status ignore mask */ + port->ignore_status_mask = 0; + if (!(termios->c_cflag & CREAD)) + port->ignore_status_mask |= UARTDR_OVERR | UARTDR_PARERR | + UARTDR_FRMERR; + + uart_update_timeout(port, termios->c_cflag, baud); + + writel(ubrlcr | (quot - 1), port->membase + UBRLCR_OFFSET); +} + +static const char *uart_clps711x_type(struct uart_port *port) +{ + return (port->type == PORT_CLPS711X) ? "CLPS711X" : NULL; +} + +static void uart_clps711x_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_CLPS711X; +} + +static void uart_clps711x_nop_void(struct uart_port *port) +{ +} + +static int uart_clps711x_nop_int(struct uart_port *port) +{ + return 0; +} + +static const struct uart_ops uart_clps711x_ops = { + .tx_empty = uart_clps711x_tx_empty, + .set_mctrl = uart_clps711x_set_mctrl, + .get_mctrl = uart_clps711x_get_mctrl, + .stop_tx = uart_clps711x_stop_tx, + .start_tx = uart_clps711x_start_tx, + .stop_rx = uart_clps711x_nop_void, + .break_ctl = uart_clps711x_break_ctl, + .set_ldisc = uart_clps711x_set_ldisc, + .startup = uart_clps711x_startup, + .shutdown = uart_clps711x_shutdown, + .set_termios = uart_clps711x_set_termios, + .type = uart_clps711x_type, + .config_port = uart_clps711x_config_port, + .release_port = uart_clps711x_nop_void, + .request_port = uart_clps711x_nop_int, +}; + +#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE +static void uart_clps711x_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct clps711x_port *s = dev_get_drvdata(port->dev); + u32 sysflg = 0; + + /* Wait for FIFO is not full */ + do { + regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg); + } while (sysflg & SYSFLG_UTXFF); + + writew(ch, port->membase + UARTDR_OFFSET); +} + +static void uart_clps711x_console_write(struct console *co, const char *c, + unsigned n) +{ + struct uart_port *port = clps711x_uart.state[co->index].uart_port; + struct clps711x_port *s = dev_get_drvdata(port->dev); + u32 sysflg = 0; + + uart_console_write(port, c, n, uart_clps711x_console_putchar); + + /* Wait for transmitter to become empty */ + do { + regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg); + } while (sysflg & SYSFLG_UBUSY); +} + +static int uart_clps711x_console_setup(struct console *co, char *options) +{ + int baud = 38400, bits = 8, parity = 'n', flow = 'n'; + int ret, index = co->index; + struct clps711x_port *s; + struct uart_port *port; + unsigned int quot; + u32 ubrlcr; + + if (index < 0 || index >= UART_CLPS711X_NR) + return -EINVAL; + + port = clps711x_uart.state[index].uart_port; + if (!port) + return -ENODEV; + + s = dev_get_drvdata(port->dev); + + if (!options) { + u32 syscon = 0; + + regmap_read(s->syscon, SYSCON_OFFSET, &syscon); + if (syscon & SYSCON_UARTEN) { + ubrlcr = readl(port->membase + UBRLCR_OFFSET); + + if (ubrlcr & UBRLCR_PRTEN) { + if (ubrlcr & UBRLCR_EVENPRT) + parity = 'e'; + else + parity = 'o'; + } + + if ((ubrlcr & UBRLCR_WRDLEN_MASK) == UBRLCR_WRDLEN7) + bits = 7; + + quot = ubrlcr & UBRLCR_BAUD_MASK; + baud = port->uartclk / (16 * (quot + 1)); + } + } else + uart_parse_options(options, &baud, &parity, &bits, &flow); + + ret = uart_set_options(port, co, baud, parity, bits, flow); + if (ret) + return ret; + + return regmap_update_bits(s->syscon, SYSCON_OFFSET, + SYSCON_UARTEN, SYSCON_UARTEN); +} + +static struct console clps711x_console = { + .name = UART_CLPS711X_DEVNAME, + .device = uart_console_device, + .write = uart_clps711x_console_write, + .setup = uart_clps711x_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, +}; +#endif + +static int uart_clps711x_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct clps711x_port *s; + struct resource *res; + struct clk *uart_clk; + int irq, ret; + + s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + uart_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(uart_clk)) + return PTR_ERR(uart_clk); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + s->port.membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(s->port.membase)) + return PTR_ERR(s->port.membase); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + s->port.irq = irq; + + s->rx_irq = platform_get_irq(pdev, 1); + if (s->rx_irq < 0) + return s->rx_irq; + + s->syscon = syscon_regmap_lookup_by_phandle(np, "syscon"); + if (IS_ERR(s->syscon)) + return PTR_ERR(s->syscon); + + s->port.line = of_alias_get_id(np, "serial"); + s->port.dev = &pdev->dev; + s->port.iotype = UPIO_MEM32; + s->port.mapbase = res->start; + s->port.type = PORT_CLPS711X; + s->port.fifosize = 16; + s->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_CLPS711X_CONSOLE); + s->port.flags = UPF_SKIP_TEST | UPF_FIXED_TYPE; + s->port.uartclk = clk_get_rate(uart_clk); + s->port.ops = &uart_clps711x_ops; + + platform_set_drvdata(pdev, s); + + s->gpios = mctrl_gpio_init_noauto(&pdev->dev, 0); + if (IS_ERR(s->gpios)) + return PTR_ERR(s->gpios); + + ret = uart_add_one_port(&clps711x_uart, &s->port); + if (ret) + return ret; + + /* Disable port */ + if (!uart_console(&s->port)) + regmap_update_bits(s->syscon, SYSCON_OFFSET, SYSCON_UARTEN, 0); + + s->tx_enabled = 1; + + ret = devm_request_irq(&pdev->dev, s->port.irq, uart_clps711x_int_tx, 0, + dev_name(&pdev->dev), &s->port); + if (ret) { + uart_remove_one_port(&clps711x_uart, &s->port); + return ret; + } + + ret = devm_request_irq(&pdev->dev, s->rx_irq, uart_clps711x_int_rx, 0, + dev_name(&pdev->dev), &s->port); + if (ret) + uart_remove_one_port(&clps711x_uart, &s->port); + + return ret; +} + +static int uart_clps711x_remove(struct platform_device *pdev) +{ + struct clps711x_port *s = platform_get_drvdata(pdev); + + return uart_remove_one_port(&clps711x_uart, &s->port); +} + +static const struct of_device_id __maybe_unused clps711x_uart_dt_ids[] = { + { .compatible = "cirrus,ep7209-uart", }, + { } +}; +MODULE_DEVICE_TABLE(of, clps711x_uart_dt_ids); + +static struct platform_driver clps711x_uart_platform = { + .driver = { + .name = "clps711x-uart", + .of_match_table = of_match_ptr(clps711x_uart_dt_ids), + }, + .probe = uart_clps711x_probe, + .remove = uart_clps711x_remove, +}; + +static int __init uart_clps711x_init(void) +{ + int ret; + +#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE + clps711x_uart.cons = &clps711x_console; + clps711x_console.data = &clps711x_uart; +#endif + + ret = uart_register_driver(&clps711x_uart); + if (ret) + return ret; + + return platform_driver_register(&clps711x_uart_platform); +} +module_init(uart_clps711x_init); + +static void __exit uart_clps711x_exit(void) +{ + platform_driver_unregister(&clps711x_uart_platform); + uart_unregister_driver(&clps711x_uart); +} +module_exit(uart_clps711x_exit); + +MODULE_AUTHOR("Deep Blue Solutions Ltd"); +MODULE_DESCRIPTION("CLPS711X serial driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/cpm_uart/Makefile b/drivers/tty/serial/cpm_uart/Makefile new file mode 100644 index 000000000..3f3a6ed02 --- /dev/null +++ b/drivers/tty/serial/cpm_uart/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Motorola 8xx FEC ethernet controller +# + +obj-$(CONFIG_SERIAL_CPM) += cpm_uart.o + +# Select the correct platform objects. +cpm_uart-objs-$(CONFIG_CPM2) += cpm_uart_cpm2.o +cpm_uart-objs-$(CONFIG_CPM1) += cpm_uart_cpm1.o + +cpm_uart-objs := cpm_uart_core.o $(cpm_uart-objs-y) diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h new file mode 100644 index 000000000..46c03ed71 --- /dev/null +++ b/drivers/tty/serial/cpm_uart/cpm_uart.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Driver for CPM (SCC/SMC) serial ports + * + * Copyright (C) 2004 Freescale Semiconductor, Inc. + * + * 2006 (c) MontaVista Software, Inc. + * Vitaly Bordug + */ +#ifndef CPM_UART_H +#define CPM_UART_H + +#include +#include + +struct gpio_desc; + +#if defined(CONFIG_CPM2) +#include "cpm_uart_cpm2.h" +#elif defined(CONFIG_CPM1) +#include "cpm_uart_cpm1.h" +#endif + +#define SERIAL_CPM_MAJOR 204 +#define SERIAL_CPM_MINOR 46 + +#define IS_SMC(pinfo) (pinfo->flags & FLAG_SMC) +#define IS_DISCARDING(pinfo) (pinfo->flags & FLAG_DISCARDING) +#define FLAG_DISCARDING 0x00000004 /* when set, don't discard */ +#define FLAG_SMC 0x00000002 +#define FLAG_CONSOLE 0x00000001 + +#define UART_SMC1 fsid_smc1_uart +#define UART_SMC2 fsid_smc2_uart +#define UART_SCC1 fsid_scc1_uart +#define UART_SCC2 fsid_scc2_uart +#define UART_SCC3 fsid_scc3_uart +#define UART_SCC4 fsid_scc4_uart + +#define UART_NR fs_uart_nr + +#define RX_NUM_FIFO 4 +#define RX_BUF_SIZE 32 +#define TX_NUM_FIFO 4 +#define TX_BUF_SIZE 32 + +#define SCC_WAIT_CLOSING 100 + +#define GPIO_CTS 0 +#define GPIO_RTS 1 +#define GPIO_DCD 2 +#define GPIO_DSR 3 +#define GPIO_DTR 4 +#define GPIO_RI 5 + +#define NUM_GPIOS (GPIO_RI+1) + +struct uart_cpm_port { + struct uart_port port; + u16 rx_nrfifos; + u16 rx_fifosize; + u16 tx_nrfifos; + u16 tx_fifosize; + smc_t __iomem *smcp; + smc_uart_t __iomem *smcup; + scc_t __iomem *sccp; + scc_uart_t __iomem *sccup; + cbd_t __iomem *rx_bd_base; + cbd_t __iomem *rx_cur; + cbd_t __iomem *tx_bd_base; + cbd_t __iomem *tx_cur; + unsigned char *tx_buf; + unsigned char *rx_buf; + u32 flags; + struct clk *clk; + u8 brg; + uint dp_addr; + void *mem_addr; + dma_addr_t dma_addr; + u32 mem_size; + /* wait on close if needed */ + int wait_closing; + /* value to combine with opcode to form cpm command */ + u32 command; + struct gpio_desc *gpios[NUM_GPIOS]; +}; + +extern struct uart_cpm_port cpm_uart_ports[UART_NR]; + +/* these are located in their respective files */ +void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd); +void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port, + struct device_node *np); +void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram); +int cpm_uart_init_portdesc(void); +int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con); +void cpm_uart_freebuf(struct uart_cpm_port *pinfo); + +void smc1_lineif(struct uart_cpm_port *pinfo); +void smc2_lineif(struct uart_cpm_port *pinfo); +void scc1_lineif(struct uart_cpm_port *pinfo); +void scc2_lineif(struct uart_cpm_port *pinfo); +void scc3_lineif(struct uart_cpm_port *pinfo); +void scc4_lineif(struct uart_cpm_port *pinfo); + +/* + virtual to phys transtalion +*/ +static inline unsigned long cpu2cpm_addr(void *addr, + struct uart_cpm_port *pinfo) +{ + int offset; + u32 val = (u32)addr; + u32 mem = (u32)pinfo->mem_addr; + /* sane check */ + if (likely(val >= mem && val < mem + pinfo->mem_size)) { + offset = val - mem; + return pinfo->dma_addr + offset; + } + /* something nasty happened */ + BUG(); + return 0; +} + +static inline void *cpm2cpu_addr(unsigned long addr, + struct uart_cpm_port *pinfo) +{ + int offset; + u32 val = addr; + u32 dma = (u32)pinfo->dma_addr; + /* sane check */ + if (likely(val >= dma && val < dma + pinfo->mem_size)) { + offset = val - dma; + return pinfo->mem_addr + offset; + } + /* something nasty happened */ + BUG(); + return NULL; +} + + +#endif /* CPM_UART_H */ diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c new file mode 100644 index 000000000..bb25691f5 --- /dev/null +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -0,0 +1,1485 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for CPM (SCC/SMC) serial ports; core driver + * + * Based on arch/ppc/cpm2_io/uart.c by Dan Malek + * Based on ppc8xx.c by Thomas Gleixner + * Based on drivers/serial/amba.c by Russell King + * + * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2) + * Pantelis Antoniou (panto@intracom.gr) (CPM1) + * + * Copyright (C) 2004, 2007 Freescale Semiconductor, Inc. + * (C) 2004 Intracom, S.A. + * (C) 2005-2006 MontaVista Software, Inc. + * Vitaly Bordug + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include "cpm_uart.h" + + +/**************************************************************/ + +static int cpm_uart_tx_pump(struct uart_port *port); +static void cpm_uart_init_smc(struct uart_cpm_port *pinfo); +static void cpm_uart_init_scc(struct uart_cpm_port *pinfo); +static void cpm_uart_initbd(struct uart_cpm_port *pinfo); + +/**************************************************************/ + +#define HW_BUF_SPD_THRESHOLD 2400 + +/* + * Check, if transmit buffers are processed +*/ +static unsigned int cpm_uart_tx_empty(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + cbd_t __iomem *bdp = pinfo->tx_bd_base; + int ret = 0; + + while (1) { + if (in_be16(&bdp->cbd_sc) & BD_SC_READY) + break; + + if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) { + ret = TIOCSER_TEMT; + break; + } + bdp++; + } + + pr_debug("CPM uart[%d]:tx_empty: %d\n", port->line, ret); + + return ret; +} + +static void cpm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + + if (pinfo->gpios[GPIO_RTS]) + gpiod_set_value(pinfo->gpios[GPIO_RTS], !(mctrl & TIOCM_RTS)); + + if (pinfo->gpios[GPIO_DTR]) + gpiod_set_value(pinfo->gpios[GPIO_DTR], !(mctrl & TIOCM_DTR)); +} + +static unsigned int cpm_uart_get_mctrl(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + unsigned int mctrl = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; + + if (pinfo->gpios[GPIO_CTS]) { + if (gpiod_get_value(pinfo->gpios[GPIO_CTS])) + mctrl &= ~TIOCM_CTS; + } + + if (pinfo->gpios[GPIO_DSR]) { + if (gpiod_get_value(pinfo->gpios[GPIO_DSR])) + mctrl &= ~TIOCM_DSR; + } + + if (pinfo->gpios[GPIO_DCD]) { + if (gpiod_get_value(pinfo->gpios[GPIO_DCD])) + mctrl &= ~TIOCM_CAR; + } + + if (pinfo->gpios[GPIO_RI]) { + if (!gpiod_get_value(pinfo->gpios[GPIO_RI])) + mctrl |= TIOCM_RNG; + } + + return mctrl; +} + +/* + * Stop transmitter + */ +static void cpm_uart_stop_tx(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + smc_t __iomem *smcp = pinfo->smcp; + scc_t __iomem *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:stop tx\n", port->line); + + if (IS_SMC(pinfo)) + clrbits8(&smcp->smc_smcm, SMCM_TX); + else + clrbits16(&sccp->scc_sccm, UART_SCCM_TX); +} + +/* + * Start transmitter + */ +static void cpm_uart_start_tx(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + smc_t __iomem *smcp = pinfo->smcp; + scc_t __iomem *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:start tx\n", port->line); + + if (IS_SMC(pinfo)) { + if (in_8(&smcp->smc_smcm) & SMCM_TX) + return; + } else { + if (in_be16(&sccp->scc_sccm) & UART_SCCM_TX) + return; + } + + if (cpm_uart_tx_pump(port) != 0) { + if (IS_SMC(pinfo)) { + setbits8(&smcp->smc_smcm, SMCM_TX); + } else { + setbits16(&sccp->scc_sccm, UART_SCCM_TX); + } + } +} + +/* + * Stop receiver + */ +static void cpm_uart_stop_rx(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + smc_t __iomem *smcp = pinfo->smcp; + scc_t __iomem *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:stop rx\n", port->line); + + if (IS_SMC(pinfo)) + clrbits8(&smcp->smc_smcm, SMCM_RX); + else + clrbits16(&sccp->scc_sccm, UART_SCCM_RX); +} + +/* + * Generate a break. + */ +static void cpm_uart_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + + pr_debug("CPM uart[%d]:break ctrl, break_state: %d\n", port->line, + break_state); + + if (break_state) + cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX); + else + cpm_line_cr_cmd(pinfo, CPM_CR_RESTART_TX); +} + +/* + * Transmit characters, refill buffer descriptor, if possible + */ +static void cpm_uart_int_tx(struct uart_port *port) +{ + pr_debug("CPM uart[%d]:TX INT\n", port->line); + + cpm_uart_tx_pump(port); +} + +#ifdef CONFIG_CONSOLE_POLL +static int serial_polled; +#endif + +/* + * Receive characters + */ +static void cpm_uart_int_rx(struct uart_port *port) +{ + int i; + unsigned char ch; + u8 *cp; + struct tty_port *tport = &port->state->port; + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + cbd_t __iomem *bdp; + u16 status; + unsigned int flg; + + pr_debug("CPM uart[%d]:RX INT\n", port->line); + + /* Just loop through the closed BDs and copy the characters into + * the buffer. + */ + bdp = pinfo->rx_cur; + for (;;) { +#ifdef CONFIG_CONSOLE_POLL + if (unlikely(serial_polled)) { + serial_polled = 0; + return; + } +#endif + /* get status */ + status = in_be16(&bdp->cbd_sc); + /* If this one is empty, return happy */ + if (status & BD_SC_EMPTY) + break; + + /* get number of characters, and check spce in flip-buffer */ + i = in_be16(&bdp->cbd_datlen); + + /* If we have not enough room in tty flip buffer, then we try + * later, which will be the next rx-interrupt or a timeout + */ + if (tty_buffer_request_room(tport, i) < i) { + printk(KERN_WARNING "No room in flip buffer\n"); + return; + } + + /* get pointer */ + cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); + + /* loop through the buffer */ + while (i-- > 0) { + ch = *cp++; + port->icount.rx++; + flg = TTY_NORMAL; + + if (status & + (BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV)) + goto handle_error; + if (uart_handle_sysrq_char(port, ch)) + continue; +#ifdef CONFIG_CONSOLE_POLL + if (unlikely(serial_polled)) { + serial_polled = 0; + return; + } +#endif + error_return: + tty_insert_flip_char(tport, ch, flg); + + } /* End while (i--) */ + + /* This BD is ready to be used again. Clear status. get next */ + clrbits16(&bdp->cbd_sc, BD_SC_BR | BD_SC_FR | BD_SC_PR | + BD_SC_OV | BD_SC_ID); + setbits16(&bdp->cbd_sc, BD_SC_EMPTY); + + if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) + bdp = pinfo->rx_bd_base; + else + bdp++; + + } /* End for (;;) */ + + /* Write back buffer pointer */ + pinfo->rx_cur = bdp; + + /* activate BH processing */ + tty_flip_buffer_push(tport); + + return; + + /* Error processing */ + + handle_error: + /* Statistics */ + if (status & BD_SC_BR) + port->icount.brk++; + if (status & BD_SC_PR) + port->icount.parity++; + if (status & BD_SC_FR) + port->icount.frame++; + if (status & BD_SC_OV) + port->icount.overrun++; + + /* Mask out ignored conditions */ + status &= port->read_status_mask; + + /* Handle the remaining ones */ + if (status & BD_SC_BR) + flg = TTY_BREAK; + else if (status & BD_SC_PR) + flg = TTY_PARITY; + else if (status & BD_SC_FR) + flg = TTY_FRAME; + + /* overrun does not affect the current character ! */ + if (status & BD_SC_OV) { + ch = 0; + flg = TTY_OVERRUN; + /* We skip this buffer */ + /* CHECK: Is really nothing senseful there */ + /* ASSUMPTION: it contains nothing valid */ + i = 0; + } + port->sysrq = 0; + goto error_return; +} + +/* + * Asynchron mode interrupt handler + */ +static irqreturn_t cpm_uart_int(int irq, void *data) +{ + u8 events; + struct uart_port *port = data; + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + smc_t __iomem *smcp = pinfo->smcp; + scc_t __iomem *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:IRQ\n", port->line); + + if (IS_SMC(pinfo)) { + events = in_8(&smcp->smc_smce); + out_8(&smcp->smc_smce, events); + if (events & SMCM_BRKE) + uart_handle_break(port); + if (events & SMCM_RX) + cpm_uart_int_rx(port); + if (events & SMCM_TX) + cpm_uart_int_tx(port); + } else { + events = in_be16(&sccp->scc_scce); + out_be16(&sccp->scc_scce, events); + if (events & UART_SCCM_BRKE) + uart_handle_break(port); + if (events & UART_SCCM_RX) + cpm_uart_int_rx(port); + if (events & UART_SCCM_TX) + cpm_uart_int_tx(port); + } + return (events) ? IRQ_HANDLED : IRQ_NONE; +} + +static int cpm_uart_startup(struct uart_port *port) +{ + int retval; + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + + pr_debug("CPM uart[%d]:startup\n", port->line); + + /* If the port is not the console, make sure rx is disabled. */ + if (!(pinfo->flags & FLAG_CONSOLE)) { + /* Disable UART rx */ + if (IS_SMC(pinfo)) { + clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN); + clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX); + } else { + clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR); + clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX); + } + cpm_uart_initbd(pinfo); + if (IS_SMC(pinfo)) { + out_be32(&pinfo->smcup->smc_rstate, 0); + out_be32(&pinfo->smcup->smc_tstate, 0); + out_be16(&pinfo->smcup->smc_rbptr, + in_be16(&pinfo->smcup->smc_rbase)); + out_be16(&pinfo->smcup->smc_tbptr, + in_be16(&pinfo->smcup->smc_tbase)); + } else { + cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); + } + } + /* Install interrupt handler. */ + retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port); + if (retval) + return retval; + + /* Startup rx-int */ + if (IS_SMC(pinfo)) { + setbits8(&pinfo->smcp->smc_smcm, SMCM_RX); + setbits16(&pinfo->smcp->smc_smcmr, (SMCMR_REN | SMCMR_TEN)); + } else { + setbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX); + setbits32(&pinfo->sccp->scc_gsmrl, (SCC_GSMRL_ENR | SCC_GSMRL_ENT)); + } + + return 0; +} + +inline void cpm_uart_wait_until_send(struct uart_cpm_port *pinfo) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(pinfo->wait_closing); +} + +/* + * Shutdown the uart + */ +static void cpm_uart_shutdown(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + + pr_debug("CPM uart[%d]:shutdown\n", port->line); + + /* free interrupt handler */ + free_irq(port->irq, port); + + /* If the port is not the console, disable Rx and Tx. */ + if (!(pinfo->flags & FLAG_CONSOLE)) { + /* Wait for all the BDs marked sent */ + while(!cpm_uart_tx_empty(port)) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(2); + } + + if (pinfo->wait_closing) + cpm_uart_wait_until_send(pinfo); + + /* Stop uarts */ + if (IS_SMC(pinfo)) { + smc_t __iomem *smcp = pinfo->smcp; + clrbits16(&smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN); + clrbits8(&smcp->smc_smcm, SMCM_RX | SMCM_TX); + } else { + scc_t __iomem *sccp = pinfo->sccp; + clrbits32(&sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); + clrbits16(&sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX); + } + + /* Shut them really down and reinit buffer descriptors */ + if (IS_SMC(pinfo)) { + out_be16(&pinfo->smcup->smc_brkcr, 0); + cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX); + } else { + out_be16(&pinfo->sccup->scc_brkcr, 0); + cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX); + } + + cpm_uart_initbd(pinfo); + } +} + +static void cpm_uart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + int baud; + unsigned long flags; + u16 cval, scval, prev_mode; + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + smc_t __iomem *smcp = pinfo->smcp; + scc_t __iomem *sccp = pinfo->sccp; + int maxidl; + + pr_debug("CPM uart[%d]:set_termios\n", port->line); + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); + if (baud < HW_BUF_SPD_THRESHOLD || port->flags & UPF_LOW_LATENCY) + pinfo->rx_fifosize = 1; + else + pinfo->rx_fifosize = RX_BUF_SIZE; + + /* MAXIDL is the timeout after which a receive buffer is closed + * when not full if no more characters are received. + * We calculate it from the baudrate so that the duration is + * always the same at standard rates: about 4ms. + */ + maxidl = baud / 2400; + if (maxidl < 1) + maxidl = 1; + if (maxidl > 0x10) + maxidl = 0x10; + + cval = 0; + scval = 0; + + if (termios->c_cflag & CSTOPB) { + cval |= SMCMR_SL; /* Two stops */ + scval |= SCU_PSMR_SL; + } + + if (termios->c_cflag & PARENB) { + cval |= SMCMR_PEN; + scval |= SCU_PSMR_PEN; + if (!(termios->c_cflag & PARODD)) { + cval |= SMCMR_PM_EVEN; + scval |= (SCU_PSMR_REVP | SCU_PSMR_TEVP); + } + } + + /* + * Update the timeout + */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* + * Set up parity check flag + */ + port->read_status_mask = (BD_SC_EMPTY | BD_SC_OV); + if (termios->c_iflag & INPCK) + port->read_status_mask |= BD_SC_FR | BD_SC_PR; + if ((termios->c_iflag & BRKINT) || (termios->c_iflag & PARMRK)) + port->read_status_mask |= BD_SC_BR; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= BD_SC_PR | BD_SC_FR; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= BD_SC_BR; + /* + * If we're ignore parity and break indicators, ignore + * overruns too. (For real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= BD_SC_OV; + } + /* + * !!! ignore all characters if CREAD is not set + */ + if ((termios->c_cflag & CREAD) == 0) + port->read_status_mask &= ~BD_SC_EMPTY; + + spin_lock_irqsave(&port->lock, flags); + + if (IS_SMC(pinfo)) { + unsigned int bits = tty_get_frame_size(termios->c_cflag); + + /* + * MRBLR can be changed while an SMC/SCC is operating only + * if it is done in a single bus cycle with one 16-bit move + * (not two 8-bit bus cycles back-to-back). This occurs when + * the cp shifts control to the next RxBD, so the change does + * not take effect immediately. To guarantee the exact RxBD + * on which the change occurs, change MRBLR only while the + * SMC/SCC receiver is disabled. + */ + out_be16(&pinfo->smcup->smc_mrblr, pinfo->rx_fifosize); + out_be16(&pinfo->smcup->smc_maxidl, maxidl); + + /* Set the mode register. We want to keep a copy of the + * enables, because we want to put them back if they were + * present. + */ + prev_mode = in_be16(&smcp->smc_smcmr) & (SMCMR_REN | SMCMR_TEN); + /* Output in *one* operation, so we don't interrupt RX/TX if they + * were already enabled. + * Character length programmed into the register is frame bits minus 1. + */ + out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits - 1) | cval | + SMCMR_SM_UART | prev_mode); + } else { + unsigned int bits = tty_get_char_size(termios->c_cflag); + + out_be16(&pinfo->sccup->scc_genscc.scc_mrblr, pinfo->rx_fifosize); + out_be16(&pinfo->sccup->scc_maxidl, maxidl); + out_be16(&sccp->scc_psmr, (UART_LCR_WLEN(bits) << 12) | scval); + } + + if (pinfo->clk) + clk_set_rate(pinfo->clk, baud); + else + cpm_set_brg(pinfo->brg - 1, baud); + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *cpm_uart_type(struct uart_port *port) +{ + pr_debug("CPM uart[%d]:uart_type\n", port->line); + + return port->type == PORT_CPM ? "CPM UART" : NULL; +} + +/* + * verify the new serial_struct (for TIOCSSERIAL). + */ +static int cpm_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + int ret = 0; + + pr_debug("CPM uart[%d]:verify_port\n", port->line); + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= nr_irqs) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + return ret; +} + +/* + * Transmit characters, refill buffer descriptor, if possible + */ +static int cpm_uart_tx_pump(struct uart_port *port) +{ + cbd_t __iomem *bdp; + u8 *p; + int count; + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + struct circ_buf *xmit = &port->state->xmit; + + /* Handle xon/xoff */ + if (port->x_char) { + /* Pick next descriptor and fill from buffer */ + bdp = pinfo->tx_cur; + + p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); + + *p++ = port->x_char; + + out_be16(&bdp->cbd_datlen, 1); + setbits16(&bdp->cbd_sc, BD_SC_READY); + /* Get next BD. */ + if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) + bdp = pinfo->tx_bd_base; + else + bdp++; + pinfo->tx_cur = bdp; + + port->icount.tx++; + port->x_char = 0; + return 1; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + cpm_uart_stop_tx(port); + return 0; + } + + /* Pick next descriptor and fill from buffer */ + bdp = pinfo->tx_cur; + + while (!(in_be16(&bdp->cbd_sc) & BD_SC_READY) && + xmit->tail != xmit->head) { + count = 0; + p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); + while (count < pinfo->tx_fifosize) { + *p++ = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + count++; + if (xmit->head == xmit->tail) + break; + } + out_be16(&bdp->cbd_datlen, count); + setbits16(&bdp->cbd_sc, BD_SC_READY); + /* Get next BD. */ + if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) + bdp = pinfo->tx_bd_base; + else + bdp++; + } + pinfo->tx_cur = bdp; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) { + cpm_uart_stop_tx(port); + return 0; + } + + return 1; +} + +/* + * init buffer descriptors + */ +static void cpm_uart_initbd(struct uart_cpm_port *pinfo) +{ + int i; + u8 *mem_addr; + cbd_t __iomem *bdp; + + pr_debug("CPM uart[%d]:initbd\n", pinfo->port.line); + + /* Set the physical address of the host memory + * buffers in the buffer descriptors, and the + * virtual address for us to work with. + */ + mem_addr = pinfo->mem_addr; + bdp = pinfo->rx_cur = pinfo->rx_bd_base; + for (i = 0; i < (pinfo->rx_nrfifos - 1); i++, bdp++) { + out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo)); + out_be16(&bdp->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT); + mem_addr += pinfo->rx_fifosize; + } + + out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo)); + out_be16(&bdp->cbd_sc, BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT); + + /* Set the physical address of the host memory + * buffers in the buffer descriptors, and the + * virtual address for us to work with. + */ + mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize); + bdp = pinfo->tx_cur = pinfo->tx_bd_base; + for (i = 0; i < (pinfo->tx_nrfifos - 1); i++, bdp++) { + out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo)); + out_be16(&bdp->cbd_sc, BD_SC_INTRPT); + mem_addr += pinfo->tx_fifosize; + } + + out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo)); + out_be16(&bdp->cbd_sc, BD_SC_WRAP | BD_SC_INTRPT); +} + +static void cpm_uart_init_scc(struct uart_cpm_port *pinfo) +{ + scc_t __iomem *scp; + scc_uart_t __iomem *sup; + + pr_debug("CPM uart[%d]:init_scc\n", pinfo->port.line); + + scp = pinfo->sccp; + sup = pinfo->sccup; + + /* Store address */ + out_be16(&pinfo->sccup->scc_genscc.scc_rbase, + (u8 __iomem *)pinfo->rx_bd_base - DPRAM_BASE); + out_be16(&pinfo->sccup->scc_genscc.scc_tbase, + (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE); + + /* Set up the uart parameters in the + * parameter ram. + */ + + cpm_set_scc_fcr(sup); + + out_be16(&sup->scc_genscc.scc_mrblr, pinfo->rx_fifosize); + out_be16(&sup->scc_maxidl, 0x10); + out_be16(&sup->scc_brkcr, 1); + out_be16(&sup->scc_parec, 0); + out_be16(&sup->scc_frmec, 0); + out_be16(&sup->scc_nosec, 0); + out_be16(&sup->scc_brkec, 0); + out_be16(&sup->scc_uaddr1, 0); + out_be16(&sup->scc_uaddr2, 0); + out_be16(&sup->scc_toseq, 0); + out_be16(&sup->scc_char1, 0x8000); + out_be16(&sup->scc_char2, 0x8000); + out_be16(&sup->scc_char3, 0x8000); + out_be16(&sup->scc_char4, 0x8000); + out_be16(&sup->scc_char5, 0x8000); + out_be16(&sup->scc_char6, 0x8000); + out_be16(&sup->scc_char7, 0x8000); + out_be16(&sup->scc_char8, 0x8000); + out_be16(&sup->scc_rccm, 0xc0ff); + + /* Send the CPM an initialize command. + */ + cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); + + /* Set UART mode, 8 bit, no parity, one stop. + * Enable receive and transmit. + */ + out_be32(&scp->scc_gsmrh, 0); + out_be32(&scp->scc_gsmrl, + SCC_GSMRL_MODE_UART | SCC_GSMRL_TDCR_16 | SCC_GSMRL_RDCR_16); + + /* Enable rx interrupts and clear all pending events. */ + out_be16(&scp->scc_sccm, 0); + out_be16(&scp->scc_scce, 0xffff); + out_be16(&scp->scc_dsr, 0x7e7e); + out_be16(&scp->scc_psmr, 0x3000); + + setbits32(&scp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); +} + +static void cpm_uart_init_smc(struct uart_cpm_port *pinfo) +{ + smc_t __iomem *sp; + smc_uart_t __iomem *up; + + pr_debug("CPM uart[%d]:init_smc\n", pinfo->port.line); + + sp = pinfo->smcp; + up = pinfo->smcup; + + /* Store address */ + out_be16(&pinfo->smcup->smc_rbase, + (u8 __iomem *)pinfo->rx_bd_base - DPRAM_BASE); + out_be16(&pinfo->smcup->smc_tbase, + (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE); + +/* + * In case SMC is being relocated... + */ + out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase)); + out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase)); + out_be32(&up->smc_rstate, 0); + out_be32(&up->smc_tstate, 0); + out_be16(&up->smc_brkcr, 1); /* number of break chars */ + out_be16(&up->smc_brkec, 0); + + /* Set up the uart parameters in the + * parameter ram. + */ + cpm_set_smc_fcr(up); + + /* Using idle character time requires some additional tuning. */ + out_be16(&up->smc_mrblr, pinfo->rx_fifosize); + out_be16(&up->smc_maxidl, 0x10); + out_be16(&up->smc_brklen, 0); + out_be16(&up->smc_brkec, 0); + out_be16(&up->smc_brkcr, 1); + + /* Set UART mode, 8 bit, no parity, one stop. + * Enable receive and transmit. + */ + out_be16(&sp->smc_smcmr, smcr_mk_clen(9) | SMCMR_SM_UART); + + /* Enable only rx interrupts clear all pending events. */ + out_8(&sp->smc_smcm, 0); + out_8(&sp->smc_smce, 0xff); + + setbits16(&sp->smc_smcmr, SMCMR_REN | SMCMR_TEN); +} + +/* + * Initialize port. This is called from early_console stuff + * so we have to be careful here ! + */ +static int cpm_uart_request_port(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + int ret; + + pr_debug("CPM uart[%d]:request port\n", port->line); + + if (pinfo->flags & FLAG_CONSOLE) + return 0; + + if (IS_SMC(pinfo)) { + clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX | SMCM_TX); + clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN); + } else { + clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX); + clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); + } + + ret = cpm_uart_allocbuf(pinfo, 0); + + if (ret) + return ret; + + cpm_uart_initbd(pinfo); + if (IS_SMC(pinfo)) + cpm_uart_init_smc(pinfo); + else + cpm_uart_init_scc(pinfo); + + return 0; +} + +static void cpm_uart_release_port(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + + if (!(pinfo->flags & FLAG_CONSOLE)) + cpm_uart_freebuf(pinfo); +} + +/* + * Configure/autoconfigure the port. + */ +static void cpm_uart_config_port(struct uart_port *port, int flags) +{ + pr_debug("CPM uart[%d]:config_port\n", port->line); + + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_CPM; + cpm_uart_request_port(port); + } +} + +#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE) +/* + * Write a string to the serial port + * Note that this is called with interrupts already disabled + */ +static void cpm_uart_early_write(struct uart_cpm_port *pinfo, + const char *string, u_int count, bool handle_linefeed) +{ + unsigned int i; + cbd_t __iomem *bdp, *bdbase; + unsigned char *cpm_outp_addr; + + /* Get the address of the host memory buffer. + */ + bdp = pinfo->tx_cur; + bdbase = pinfo->tx_bd_base; + + /* + * Now, do each character. This is not as bad as it looks + * since this is a holding FIFO and not a transmitting FIFO. + * We could add the complexity of filling the entire transmit + * buffer, but we would just wait longer between accesses...... + */ + for (i = 0; i < count; i++, string++) { + /* Wait for transmitter fifo to empty. + * Ready indicates output is ready, and xmt is doing + * that, not that it is ready for us to send. + */ + while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) + ; + + /* Send the character out. + * If the buffer address is in the CPM DPRAM, don't + * convert it. + */ + cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), + pinfo); + *cpm_outp_addr = *string; + + out_be16(&bdp->cbd_datlen, 1); + setbits16(&bdp->cbd_sc, BD_SC_READY); + + if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) + bdp = bdbase; + else + bdp++; + + /* if a LF, also do CR... */ + if (handle_linefeed && *string == 10) { + while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) + ; + + cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), + pinfo); + *cpm_outp_addr = 13; + + out_be16(&bdp->cbd_datlen, 1); + setbits16(&bdp->cbd_sc, BD_SC_READY); + + if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) + bdp = bdbase; + else + bdp++; + } + } + + /* + * Finally, Wait for transmitter & holding register to empty + * and restore the IER + */ + while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) + ; + + pinfo->tx_cur = bdp; +} +#endif + +#ifdef CONFIG_CONSOLE_POLL +/* Serial polling routines for writing and reading from the uart while + * in an interrupt or debug context. + */ + +#define GDB_BUF_SIZE 512 /* power of 2, please */ + +static char poll_buf[GDB_BUF_SIZE]; +static char *pollp; +static int poll_chars; + +static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo) +{ + u_char c, *cp; + volatile cbd_t *bdp; + int i; + + /* Get the address of the host memory buffer. + */ + bdp = pinfo->rx_cur; + if (bdp->cbd_sc & BD_SC_EMPTY) + return NO_POLL_CHAR; + + /* If the buffer address is in the CPM DPRAM, don't + * convert it. + */ + cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo); + + if (obuf) { + i = c = bdp->cbd_datlen; + while (i-- > 0) + *obuf++ = *cp++; + } else + c = *cp; + bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID); + bdp->cbd_sc |= BD_SC_EMPTY; + + if (bdp->cbd_sc & BD_SC_WRAP) + bdp = pinfo->rx_bd_base; + else + bdp++; + pinfo->rx_cur = (cbd_t *)bdp; + + return (int)c; +} + +static int cpm_get_poll_char(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + + if (!serial_polled) { + serial_polled = 1; + poll_chars = 0; + } + if (poll_chars <= 0) { + int ret = poll_wait_key(poll_buf, pinfo); + + if (ret == NO_POLL_CHAR) + return ret; + poll_chars = ret; + pollp = poll_buf; + } + poll_chars--; + return *pollp++; +} + +static void cpm_put_poll_char(struct uart_port *port, + unsigned char c) +{ + struct uart_cpm_port *pinfo = + container_of(port, struct uart_cpm_port, port); + static char ch[2]; + + ch[0] = (char)c; + cpm_uart_early_write(pinfo, ch, 1, false); +} + +#ifdef CONFIG_SERIAL_CPM_CONSOLE +static struct uart_port *udbg_port; + +static void udbg_cpm_putc(char c) +{ + if (c == '\n') + cpm_put_poll_char(udbg_port, '\r'); + cpm_put_poll_char(udbg_port, c); +} + +static int udbg_cpm_getc_poll(void) +{ + int c = cpm_get_poll_char(udbg_port); + + return c == NO_POLL_CHAR ? -1 : c; +} + +static int udbg_cpm_getc(void) +{ + int c; + + while ((c = udbg_cpm_getc_poll()) == -1) + cpu_relax(); + return c; +} +#endif /* CONFIG_SERIAL_CPM_CONSOLE */ + +#endif /* CONFIG_CONSOLE_POLL */ + +static const struct uart_ops cpm_uart_pops = { + .tx_empty = cpm_uart_tx_empty, + .set_mctrl = cpm_uart_set_mctrl, + .get_mctrl = cpm_uart_get_mctrl, + .stop_tx = cpm_uart_stop_tx, + .start_tx = cpm_uart_start_tx, + .stop_rx = cpm_uart_stop_rx, + .break_ctl = cpm_uart_break_ctl, + .startup = cpm_uart_startup, + .shutdown = cpm_uart_shutdown, + .set_termios = cpm_uart_set_termios, + .type = cpm_uart_type, + .release_port = cpm_uart_release_port, + .request_port = cpm_uart_request_port, + .config_port = cpm_uart_config_port, + .verify_port = cpm_uart_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = cpm_get_poll_char, + .poll_put_char = cpm_put_poll_char, +#endif +}; + +struct uart_cpm_port cpm_uart_ports[UART_NR]; + +static int cpm_uart_init_port(struct device_node *np, + struct uart_cpm_port *pinfo) +{ + const u32 *data; + void __iomem *mem, *pram; + struct device *dev = pinfo->port.dev; + int len; + int ret; + int i; + + data = of_get_property(np, "clock", NULL); + if (data) { + struct clk *clk = clk_get(NULL, (const char*)data); + if (!IS_ERR(clk)) + pinfo->clk = clk; + } + if (!pinfo->clk) { + data = of_get_property(np, "fsl,cpm-brg", &len); + if (!data || len != 4) { + printk(KERN_ERR "CPM UART %pOFn has no/invalid " + "fsl,cpm-brg property.\n", np); + return -EINVAL; + } + pinfo->brg = *data; + } + + data = of_get_property(np, "fsl,cpm-command", &len); + if (!data || len != 4) { + printk(KERN_ERR "CPM UART %pOFn has no/invalid " + "fsl,cpm-command property.\n", np); + return -EINVAL; + } + pinfo->command = *data; + + mem = of_iomap(np, 0); + if (!mem) + return -ENOMEM; + + if (of_device_is_compatible(np, "fsl,cpm1-scc-uart") || + of_device_is_compatible(np, "fsl,cpm2-scc-uart")) { + pinfo->sccp = mem; + pinfo->sccup = pram = cpm_uart_map_pram(pinfo, np); + } else if (of_device_is_compatible(np, "fsl,cpm1-smc-uart") || + of_device_is_compatible(np, "fsl,cpm2-smc-uart")) { + pinfo->flags |= FLAG_SMC; + pinfo->smcp = mem; + pinfo->smcup = pram = cpm_uart_map_pram(pinfo, np); + } else { + ret = -ENODEV; + goto out_mem; + } + + if (!pram) { + ret = -ENOMEM; + goto out_mem; + } + + pinfo->tx_nrfifos = TX_NUM_FIFO; + pinfo->tx_fifosize = TX_BUF_SIZE; + pinfo->rx_nrfifos = RX_NUM_FIFO; + pinfo->rx_fifosize = RX_BUF_SIZE; + + pinfo->port.uartclk = ppc_proc_freq; + pinfo->port.mapbase = (unsigned long)mem; + pinfo->port.type = PORT_CPM; + pinfo->port.ops = &cpm_uart_pops; + pinfo->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_CPM_CONSOLE); + pinfo->port.iotype = UPIO_MEM; + pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize; + spin_lock_init(&pinfo->port.lock); + + for (i = 0; i < NUM_GPIOS; i++) { + struct gpio_desc *gpiod; + + pinfo->gpios[i] = NULL; + + gpiod = devm_gpiod_get_index_optional(dev, NULL, i, GPIOD_ASIS); + + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + goto out_pram; + } + + if (gpiod) { + if (i == GPIO_RTS || i == GPIO_DTR) + ret = gpiod_direction_output(gpiod, 0); + else + ret = gpiod_direction_input(gpiod); + if (ret) { + pr_err("can't set direction for gpio #%d: %d\n", + i, ret); + continue; + } + pinfo->gpios[i] = gpiod; + } + } + +#ifdef CONFIG_PPC_EARLY_DEBUG_CPM +#if defined(CONFIG_CONSOLE_POLL) && defined(CONFIG_SERIAL_CPM_CONSOLE) + if (!udbg_port) +#endif + udbg_putc = NULL; +#endif + + return cpm_uart_request_port(&pinfo->port); + +out_pram: + cpm_uart_unmap_pram(pinfo, pram); +out_mem: + iounmap(mem); + return ret; +} + +#ifdef CONFIG_SERIAL_CPM_CONSOLE +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + * + * Note that this is called with interrupts already disabled + */ +static void cpm_uart_console_write(struct console *co, const char *s, + u_int count) +{ + struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index]; + unsigned long flags; + + if (unlikely(oops_in_progress)) { + local_irq_save(flags); + cpm_uart_early_write(pinfo, s, count, true); + local_irq_restore(flags); + } else { + spin_lock_irqsave(&pinfo->port.lock, flags); + cpm_uart_early_write(pinfo, s, count, true); + spin_unlock_irqrestore(&pinfo->port.lock, flags); + } +} + + +static int __init cpm_uart_console_setup(struct console *co, char *options) +{ + int baud = 38400; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + struct uart_cpm_port *pinfo; + struct uart_port *port; + + struct device_node *np; + int i = 0; + + if (co->index >= UART_NR) { + printk(KERN_ERR "cpm_uart: console index %d too high\n", + co->index); + return -ENODEV; + } + + for_each_node_by_type(np, "serial") { + if (!of_device_is_compatible(np, "fsl,cpm1-smc-uart") && + !of_device_is_compatible(np, "fsl,cpm1-scc-uart") && + !of_device_is_compatible(np, "fsl,cpm2-smc-uart") && + !of_device_is_compatible(np, "fsl,cpm2-scc-uart")) + continue; + + if (i++ == co->index) + break; + } + + if (!np) + return -ENODEV; + + pinfo = &cpm_uart_ports[co->index]; + + pinfo->flags |= FLAG_CONSOLE; + port = &pinfo->port; + + ret = cpm_uart_init_port(np, pinfo); + of_node_put(np); + if (ret) + return ret; + + if (options) { + uart_parse_options(options, &baud, &parity, &bits, &flow); + } else { + if ((baud = uart_baudrate()) == -1) + baud = 9600; + } + + if (IS_SMC(pinfo)) { + out_be16(&pinfo->smcup->smc_brkcr, 0); + cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX); + clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX | SMCM_TX); + clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN); + } else { + out_be16(&pinfo->sccup->scc_brkcr, 0); + cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX); + clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX); + clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); + } + + ret = cpm_uart_allocbuf(pinfo, 1); + + if (ret) + return ret; + + cpm_uart_initbd(pinfo); + + if (IS_SMC(pinfo)) + cpm_uart_init_smc(pinfo); + else + cpm_uart_init_scc(pinfo); + + uart_set_options(port, co, baud, parity, bits, flow); + cpm_line_cr_cmd(pinfo, CPM_CR_RESTART_TX); + +#ifdef CONFIG_CONSOLE_POLL + if (!udbg_port) { + udbg_port = &pinfo->port; + udbg_putc = udbg_cpm_putc; + udbg_getc = udbg_cpm_getc; + udbg_getc_poll = udbg_cpm_getc_poll; + } +#endif + + return 0; +} + +static struct uart_driver cpm_reg; +static struct console cpm_scc_uart_console = { + .name = "ttyCPM", + .write = cpm_uart_console_write, + .device = uart_console_device, + .setup = cpm_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &cpm_reg, +}; + +static int __init cpm_uart_console_init(void) +{ + cpm_muram_init(); + register_console(&cpm_scc_uart_console); + return 0; +} + +console_initcall(cpm_uart_console_init); + +#define CPM_UART_CONSOLE &cpm_scc_uart_console +#else +#define CPM_UART_CONSOLE NULL +#endif + +static struct uart_driver cpm_reg = { + .owner = THIS_MODULE, + .driver_name = "ttyCPM", + .dev_name = "ttyCPM", + .major = SERIAL_CPM_MAJOR, + .minor = SERIAL_CPM_MINOR, + .cons = CPM_UART_CONSOLE, + .nr = UART_NR, +}; + +static int probe_index; + +static int cpm_uart_probe(struct platform_device *ofdev) +{ + int index = probe_index++; + struct uart_cpm_port *pinfo = &cpm_uart_ports[index]; + int ret; + + pinfo->port.line = index; + + if (index >= UART_NR) + return -ENODEV; + + platform_set_drvdata(ofdev, pinfo); + + /* initialize the device pointer for the port */ + pinfo->port.dev = &ofdev->dev; + + pinfo->port.irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); + if (!pinfo->port.irq) + return -EINVAL; + + ret = cpm_uart_init_port(ofdev->dev.of_node, pinfo); + if (!ret) + return uart_add_one_port(&cpm_reg, &pinfo->port); + + irq_dispose_mapping(pinfo->port.irq); + + return ret; +} + +static int cpm_uart_remove(struct platform_device *ofdev) +{ + struct uart_cpm_port *pinfo = platform_get_drvdata(ofdev); + return uart_remove_one_port(&cpm_reg, &pinfo->port); +} + +static const struct of_device_id cpm_uart_match[] = { + { + .compatible = "fsl,cpm1-smc-uart", + }, + { + .compatible = "fsl,cpm1-scc-uart", + }, + { + .compatible = "fsl,cpm2-smc-uart", + }, + { + .compatible = "fsl,cpm2-scc-uart", + }, + {} +}; +MODULE_DEVICE_TABLE(of, cpm_uart_match); + +static struct platform_driver cpm_uart_driver = { + .driver = { + .name = "cpm_uart", + .of_match_table = cpm_uart_match, + }, + .probe = cpm_uart_probe, + .remove = cpm_uart_remove, + }; + +static int __init cpm_uart_init(void) +{ + int ret = uart_register_driver(&cpm_reg); + if (ret) + return ret; + + ret = platform_driver_register(&cpm_uart_driver); + if (ret) + uart_unregister_driver(&cpm_reg); + + return ret; +} + +static void __exit cpm_uart_exit(void) +{ + platform_driver_unregister(&cpm_uart_driver); + uart_unregister_driver(&cpm_reg); +} + +module_init(cpm_uart_init); +module_exit(cpm_uart_exit); + +MODULE_AUTHOR("Kumar Gala/Antoniou Pantelis"); +MODULE_DESCRIPTION("CPM SCC/SMC port driver $Revision: 0.01 $"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV(SERIAL_CPM_MAJOR, SERIAL_CPM_MINOR); diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c new file mode 100644 index 000000000..56fc52701 --- /dev/null +++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for CPM (SCC/SMC) serial ports; CPM1 definitions + * + * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2) + * Pantelis Antoniou (panto@intracom.gr) (CPM1) + * + * Copyright (C) 2004 Freescale Semiconductor, Inc. + * (C) 2004 Intracom, S.A. + * (C) 2006 MontaVista Software, Inc. + * Vitaly Bordug + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include "cpm_uart.h" + +/**************************************************************/ + +void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd) +{ + cpm_command(port->command, cmd); +} + +void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port, + struct device_node *np) +{ + return of_iomap(np, 1); +} + +void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram) +{ + iounmap(pram); +} + +/* + * Allocate DP-Ram and memory buffers. We need to allocate a transmit and + * receive buffer descriptors from dual port ram, and a character + * buffer area from host mem. If we are allocating for the console we need + * to do it from bootmem + */ +int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con) +{ + int dpmemsz, memsz; + u8 *dp_mem; + unsigned long dp_offset; + u8 *mem_addr; + dma_addr_t dma_addr = 0; + + pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line); + + dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos); + dp_offset = cpm_dpalloc(dpmemsz, 8); + if (IS_ERR_VALUE(dp_offset)) { + printk(KERN_ERR + "cpm_uart_cpm1.c: could not allocate buffer descriptors\n"); + return -ENOMEM; + } + dp_mem = cpm_dpram_addr(dp_offset); + + memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize); + if (is_con) { + /* was hostalloc but changed cause it blows away the */ + /* large tlb mapping when pinning the kernel area */ + mem_addr = (u8 *) cpm_dpram_addr(cpm_dpalloc(memsz, 8)); + dma_addr = (u32)cpm_dpram_phys(mem_addr); + } else + mem_addr = dma_alloc_coherent(pinfo->port.dev, memsz, &dma_addr, + GFP_KERNEL); + + if (mem_addr == NULL) { + cpm_dpfree(dp_offset); + printk(KERN_ERR + "cpm_uart_cpm1.c: could not allocate coherent memory\n"); + return -ENOMEM; + } + + pinfo->dp_addr = dp_offset; + pinfo->mem_addr = mem_addr; /* virtual address*/ + pinfo->dma_addr = dma_addr; /* physical address*/ + pinfo->mem_size = memsz; + + pinfo->rx_buf = mem_addr; + pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos + * pinfo->rx_fifosize); + + pinfo->rx_bd_base = (cbd_t __iomem __force *)dp_mem; + pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos; + + return 0; +} + +void cpm_uart_freebuf(struct uart_cpm_port *pinfo) +{ + dma_free_coherent(pinfo->port.dev, L1_CACHE_ALIGN(pinfo->rx_nrfifos * + pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * + pinfo->tx_fifosize), pinfo->mem_addr, + pinfo->dma_addr); + + cpm_dpfree(pinfo->dp_addr); +} diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h new file mode 100644 index 000000000..18ec08499 --- /dev/null +++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Driver for CPM (SCC/SMC) serial ports + * + * definitions for cpm1 + * + */ + +#ifndef CPM_UART_CPM1_H +#define CPM_UART_CPM1_H + +#include + +static inline void cpm_set_brg(int brg, int baud) +{ + cpm_setbrg(brg, baud); +} + +static inline void cpm_set_scc_fcr(scc_uart_t __iomem * sup) +{ + out_8(&sup->scc_genscc.scc_rfcr, SMC_EB); + out_8(&sup->scc_genscc.scc_tfcr, SMC_EB); +} + +static inline void cpm_set_smc_fcr(smc_uart_t __iomem * up) +{ + out_8(&up->smc_rfcr, SMC_EB); + out_8(&up->smc_tfcr, SMC_EB); +} + +#define DPRAM_BASE ((u8 __iomem __force *)cpm_dpram_addr(0)) + +#endif diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c new file mode 100644 index 000000000..108af254e --- /dev/null +++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for CPM (SCC/SMC) serial ports; CPM2 definitions + * + * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2) + * Pantelis Antoniou (panto@intracom.gr) (CPM1) + * + * Copyright (C) 2004 Freescale Semiconductor, Inc. + * (C) 2004 Intracom, S.A. + * (C) 2006 MontaVista Software, Inc. + * Vitaly Bordug + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "cpm_uart.h" + +/**************************************************************/ + +void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd) +{ + cpm_command(port->command, cmd); +} + +void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port, + struct device_node *np) +{ + void __iomem *pram; + unsigned long offset; + struct resource res; + resource_size_t len; + + /* Don't remap parameter RAM if it has already been initialized + * during console setup. + */ + if (IS_SMC(port) && port->smcup) + return port->smcup; + else if (!IS_SMC(port) && port->sccup) + return port->sccup; + + if (of_address_to_resource(np, 1, &res)) + return NULL; + + len = resource_size(&res); + pram = ioremap(res.start, len); + if (!pram) + return NULL; + + if (!IS_SMC(port)) + return pram; + + if (len != 2) { + printk(KERN_WARNING "cpm_uart[%d]: device tree references " + "SMC pram, using boot loader/wrapper pram mapping. " + "Please fix your device tree to reference the pram " + "base register instead.\n", + port->port.line); + return pram; + } + + offset = cpm_dpalloc(PROFF_SMC_SIZE, 64); + out_be16(pram, offset); + iounmap(pram); + return cpm_muram_addr(offset); +} + +void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram) +{ + if (!IS_SMC(port)) + iounmap(pram); +} + +/* + * Allocate DP-Ram and memory buffers. We need to allocate a transmit and + * receive buffer descriptors from dual port ram, and a character + * buffer area from host mem. If we are allocating for the console we need + * to do it from bootmem + */ +int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con) +{ + int dpmemsz, memsz; + u8 __iomem *dp_mem; + unsigned long dp_offset; + u8 *mem_addr; + dma_addr_t dma_addr = 0; + + pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line); + + dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos); + dp_offset = cpm_dpalloc(dpmemsz, 8); + if (IS_ERR_VALUE(dp_offset)) { + printk(KERN_ERR + "cpm_uart_cpm.c: could not allocate buffer descriptors\n"); + return -ENOMEM; + } + + dp_mem = cpm_dpram_addr(dp_offset); + + memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize); + if (is_con) { + mem_addr = kzalloc(memsz, GFP_NOWAIT); + dma_addr = virt_to_bus(mem_addr); + } + else + mem_addr = dma_alloc_coherent(pinfo->port.dev, memsz, &dma_addr, + GFP_KERNEL); + + if (mem_addr == NULL) { + cpm_dpfree(dp_offset); + printk(KERN_ERR + "cpm_uart_cpm.c: could not allocate coherent memory\n"); + return -ENOMEM; + } + + pinfo->dp_addr = dp_offset; + pinfo->mem_addr = mem_addr; + pinfo->dma_addr = dma_addr; + pinfo->mem_size = memsz; + + pinfo->rx_buf = mem_addr; + pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos + * pinfo->rx_fifosize); + + pinfo->rx_bd_base = (cbd_t __iomem *)dp_mem; + pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos; + + return 0; +} + +void cpm_uart_freebuf(struct uart_cpm_port *pinfo) +{ + dma_free_coherent(pinfo->port.dev, L1_CACHE_ALIGN(pinfo->rx_nrfifos * + pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * + pinfo->tx_fifosize), (void __force *)pinfo->mem_addr, + pinfo->dma_addr); + + cpm_dpfree(pinfo->dp_addr); +} diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h new file mode 100644 index 000000000..051a8509c --- /dev/null +++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Driver for CPM (SCC/SMC) serial ports + * + * definitions for cpm2 + * + */ + +#ifndef CPM_UART_CPM2_H +#define CPM_UART_CPM2_H + +#include + +static inline void cpm_set_brg(int brg, int baud) +{ + cpm_setbrg(brg, baud); +} + +static inline void cpm_set_scc_fcr(scc_uart_t __iomem *sup) +{ + out_8(&sup->scc_genscc.scc_rfcr, CPMFCR_GBL | CPMFCR_EB); + out_8(&sup->scc_genscc.scc_tfcr, CPMFCR_GBL | CPMFCR_EB); +} + +static inline void cpm_set_smc_fcr(smc_uart_t __iomem *up) +{ + out_8(&up->smc_rfcr, CPMFCR_GBL | CPMFCR_EB); + out_8(&up->smc_tfcr, CPMFCR_GBL | CPMFCR_EB); +} + +#define DPRAM_BASE ((u8 __iomem __force *)cpm_dpram_addr(0)) + +#endif diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c new file mode 100644 index 000000000..0c0a62346 --- /dev/null +++ b/drivers/tty/serial/digicolor-usart.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Conexant Digicolor serial ports (USART) + * + * Author: Baruch Siach + * + * Copyright (C) 2014 Paradox Innovation Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define UA_ENABLE 0x00 +#define UA_ENABLE_ENABLE BIT(0) + +#define UA_CONTROL 0x01 +#define UA_CONTROL_RX_ENABLE BIT(0) +#define UA_CONTROL_TX_ENABLE BIT(1) +#define UA_CONTROL_SOFT_RESET BIT(2) + +#define UA_STATUS 0x02 +#define UA_STATUS_PARITY_ERR BIT(0) +#define UA_STATUS_FRAME_ERR BIT(1) +#define UA_STATUS_OVERRUN_ERR BIT(2) +#define UA_STATUS_TX_READY BIT(6) + +#define UA_CONFIG 0x03 +#define UA_CONFIG_CHAR_LEN BIT(0) +#define UA_CONFIG_STOP_BITS BIT(1) +#define UA_CONFIG_PARITY BIT(2) +#define UA_CONFIG_ODD_PARITY BIT(4) + +#define UA_EMI_REC 0x04 + +#define UA_HBAUD_LO 0x08 +#define UA_HBAUD_HI 0x09 + +#define UA_STATUS_FIFO 0x0a +#define UA_STATUS_FIFO_RX_EMPTY BIT(2) +#define UA_STATUS_FIFO_RX_INT_ALMOST BIT(3) +#define UA_STATUS_FIFO_TX_FULL BIT(4) +#define UA_STATUS_FIFO_TX_INT_ALMOST BIT(7) + +#define UA_CONFIG_FIFO 0x0b +#define UA_CONFIG_FIFO_RX_THRESH 7 +#define UA_CONFIG_FIFO_RX_FIFO_MODE BIT(3) +#define UA_CONFIG_FIFO_TX_FIFO_MODE BIT(7) + +#define UA_INTFLAG_CLEAR 0x1c +#define UA_INTFLAG_SET 0x1d +#define UA_INT_ENABLE 0x1e +#define UA_INT_STATUS 0x1f + +#define UA_INT_TX BIT(0) +#define UA_INT_RX BIT(1) + +#define DIGICOLOR_USART_NR 3 + +/* + * We use the 16 bytes hardware FIFO to buffer Rx traffic. Rx interrupt is + * only produced when the FIFO is filled more than a certain configurable + * threshold. Unfortunately, there is no way to set this threshold below half + * FIFO. This means that we must periodically poll the FIFO status register to + * see whether there are waiting Rx bytes. + */ + +struct digicolor_port { + struct uart_port port; + struct delayed_work rx_poll_work; +}; + +static struct uart_port *digicolor_ports[DIGICOLOR_USART_NR]; + +static bool digicolor_uart_tx_full(struct uart_port *port) +{ + return !!(readb_relaxed(port->membase + UA_STATUS_FIFO) & + UA_STATUS_FIFO_TX_FULL); +} + +static bool digicolor_uart_rx_empty(struct uart_port *port) +{ + return !!(readb_relaxed(port->membase + UA_STATUS_FIFO) & + UA_STATUS_FIFO_RX_EMPTY); +} + +static void digicolor_uart_stop_tx(struct uart_port *port) +{ + u8 int_enable = readb_relaxed(port->membase + UA_INT_ENABLE); + + int_enable &= ~UA_INT_TX; + writeb_relaxed(int_enable, port->membase + UA_INT_ENABLE); +} + +static void digicolor_uart_start_tx(struct uart_port *port) +{ + u8 int_enable = readb_relaxed(port->membase + UA_INT_ENABLE); + + int_enable |= UA_INT_TX; + writeb_relaxed(int_enable, port->membase + UA_INT_ENABLE); +} + +static void digicolor_uart_stop_rx(struct uart_port *port) +{ + u8 int_enable = readb_relaxed(port->membase + UA_INT_ENABLE); + + int_enable &= ~UA_INT_RX; + writeb_relaxed(int_enable, port->membase + UA_INT_ENABLE); +} + +static void digicolor_rx_poll(struct work_struct *work) +{ + struct digicolor_port *dp = + container_of(to_delayed_work(work), + struct digicolor_port, rx_poll_work); + + if (!digicolor_uart_rx_empty(&dp->port)) + /* force RX interrupt */ + writeb_relaxed(UA_INT_RX, dp->port.membase + UA_INTFLAG_SET); + + schedule_delayed_work(&dp->rx_poll_work, msecs_to_jiffies(100)); +} + +static void digicolor_uart_rx(struct uart_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + while (1) { + u8 status, ch; + unsigned int ch_flag; + + if (digicolor_uart_rx_empty(port)) + break; + + ch = readb_relaxed(port->membase + UA_EMI_REC); + status = readb_relaxed(port->membase + UA_STATUS); + + port->icount.rx++; + ch_flag = TTY_NORMAL; + + if (status) { + if (status & UA_STATUS_PARITY_ERR) + port->icount.parity++; + else if (status & UA_STATUS_FRAME_ERR) + port->icount.frame++; + else if (status & UA_STATUS_OVERRUN_ERR) + port->icount.overrun++; + + status &= port->read_status_mask; + + if (status & UA_STATUS_PARITY_ERR) + ch_flag = TTY_PARITY; + else if (status & UA_STATUS_FRAME_ERR) + ch_flag = TTY_FRAME; + else if (status & UA_STATUS_OVERRUN_ERR) + ch_flag = TTY_OVERRUN; + } + + if (status & port->ignore_status_mask) + continue; + + uart_insert_char(port, status, UA_STATUS_OVERRUN_ERR, ch, + ch_flag); + } + + spin_unlock_irqrestore(&port->lock, flags); + + tty_flip_buffer_push(&port->state->port); +} + +static void digicolor_uart_tx(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + unsigned long flags; + + if (digicolor_uart_tx_full(port)) + return; + + spin_lock_irqsave(&port->lock, flags); + + if (port->x_char) { + writeb_relaxed(port->x_char, port->membase + UA_EMI_REC); + port->icount.tx++; + port->x_char = 0; + goto out; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + digicolor_uart_stop_tx(port); + goto out; + } + + while (!uart_circ_empty(xmit)) { + writeb(xmit->buf[xmit->tail], port->membase + UA_EMI_REC); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + + if (digicolor_uart_tx_full(port)) + break; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + +out: + spin_unlock_irqrestore(&port->lock, flags); +} + +static irqreturn_t digicolor_uart_int(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + u8 int_status = readb_relaxed(port->membase + UA_INT_STATUS); + + writeb_relaxed(UA_INT_RX | UA_INT_TX, + port->membase + UA_INTFLAG_CLEAR); + + if (int_status & UA_INT_RX) + digicolor_uart_rx(port); + if (int_status & UA_INT_TX) + digicolor_uart_tx(port); + + return IRQ_HANDLED; +} + +static unsigned int digicolor_uart_tx_empty(struct uart_port *port) +{ + u8 status = readb_relaxed(port->membase + UA_STATUS); + + return (status & UA_STATUS_TX_READY) ? TIOCSER_TEMT : 0; +} + +static unsigned int digicolor_uart_get_mctrl(struct uart_port *port) +{ + return TIOCM_CTS; +} + +static void digicolor_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static void digicolor_uart_break_ctl(struct uart_port *port, int state) +{ +} + +static int digicolor_uart_startup(struct uart_port *port) +{ + struct digicolor_port *dp = + container_of(port, struct digicolor_port, port); + + writeb_relaxed(UA_ENABLE_ENABLE, port->membase + UA_ENABLE); + writeb_relaxed(UA_CONTROL_SOFT_RESET, port->membase + UA_CONTROL); + writeb_relaxed(0, port->membase + UA_CONTROL); + + writeb_relaxed(UA_CONFIG_FIFO_RX_FIFO_MODE + | UA_CONFIG_FIFO_TX_FIFO_MODE | UA_CONFIG_FIFO_RX_THRESH, + port->membase + UA_CONFIG_FIFO); + writeb_relaxed(UA_STATUS_FIFO_RX_INT_ALMOST, + port->membase + UA_STATUS_FIFO); + writeb_relaxed(UA_CONTROL_RX_ENABLE | UA_CONTROL_TX_ENABLE, + port->membase + UA_CONTROL); + writeb_relaxed(UA_INT_TX | UA_INT_RX, + port->membase + UA_INT_ENABLE); + + schedule_delayed_work(&dp->rx_poll_work, msecs_to_jiffies(100)); + + return 0; +} + +static void digicolor_uart_shutdown(struct uart_port *port) +{ + struct digicolor_port *dp = + container_of(port, struct digicolor_port, port); + + writeb_relaxed(0, port->membase + UA_ENABLE); + cancel_delayed_work_sync(&dp->rx_poll_work); +} + +static void digicolor_uart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud, divisor; + u8 config = 0; + unsigned long flags; + + /* Mask termios capabilities we don't support */ + termios->c_cflag &= ~CMSPAR; + termios->c_iflag &= ~(BRKINT | IGNBRK); + + /* Limit baud rates so that we don't need the fractional divider */ + baud = uart_get_baud_rate(port, termios, old, + port->uartclk / (0x10000*16), + port->uartclk / 256); + divisor = uart_get_divisor(port, baud) - 1; + + switch (termios->c_cflag & CSIZE) { + case CS7: + break; + case CS8: + default: + config |= UA_CONFIG_CHAR_LEN; + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + break; + } + + if (termios->c_cflag & CSTOPB) + config |= UA_CONFIG_STOP_BITS; + + if (termios->c_cflag & PARENB) { + config |= UA_CONFIG_PARITY; + if (termios->c_cflag & PARODD) + config |= UA_CONFIG_ODD_PARITY; + } + + /* Set read status mask */ + port->read_status_mask = UA_STATUS_OVERRUN_ERR; + if (termios->c_iflag & INPCK) + port->read_status_mask |= UA_STATUS_PARITY_ERR + | UA_STATUS_FRAME_ERR; + + /* Set status ignore mask */ + port->ignore_status_mask = 0; + if (!(termios->c_cflag & CREAD)) + port->ignore_status_mask |= UA_STATUS_OVERRUN_ERR + | UA_STATUS_PARITY_ERR | UA_STATUS_FRAME_ERR; + + spin_lock_irqsave(&port->lock, flags); + + uart_update_timeout(port, termios->c_cflag, baud); + + writeb_relaxed(config, port->membase + UA_CONFIG); + writeb_relaxed(divisor & 0xff, port->membase + UA_HBAUD_LO); + writeb_relaxed(divisor >> 8, port->membase + UA_HBAUD_HI); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *digicolor_uart_type(struct uart_port *port) +{ + return (port->type == PORT_DIGICOLOR) ? "DIGICOLOR USART" : NULL; +} + +static void digicolor_uart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_DIGICOLOR; +} + +static void digicolor_uart_release_port(struct uart_port *port) +{ +} + +static int digicolor_uart_request_port(struct uart_port *port) +{ + return 0; +} + +static const struct uart_ops digicolor_uart_ops = { + .tx_empty = digicolor_uart_tx_empty, + .set_mctrl = digicolor_uart_set_mctrl, + .get_mctrl = digicolor_uart_get_mctrl, + .stop_tx = digicolor_uart_stop_tx, + .start_tx = digicolor_uart_start_tx, + .stop_rx = digicolor_uart_stop_rx, + .break_ctl = digicolor_uart_break_ctl, + .startup = digicolor_uart_startup, + .shutdown = digicolor_uart_shutdown, + .set_termios = digicolor_uart_set_termios, + .type = digicolor_uart_type, + .config_port = digicolor_uart_config_port, + .release_port = digicolor_uart_release_port, + .request_port = digicolor_uart_request_port, +}; + +static void digicolor_uart_console_putchar(struct uart_port *port, unsigned char ch) +{ + while (digicolor_uart_tx_full(port)) + cpu_relax(); + + writeb_relaxed(ch, port->membase + UA_EMI_REC); +} + +static void digicolor_uart_console_write(struct console *co, const char *c, + unsigned n) +{ + struct uart_port *port = digicolor_ports[co->index]; + u8 status; + unsigned long flags; + int locked = 1; + + if (oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); + + uart_console_write(port, c, n, digicolor_uart_console_putchar); + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); + + /* Wait for transmitter to become empty */ + do { + status = readb_relaxed(port->membase + UA_STATUS); + } while ((status & UA_STATUS_TX_READY) == 0); +} + +static int digicolor_uart_console_setup(struct console *co, char *options) +{ + int baud = 115200, bits = 8, parity = 'n', flow = 'n'; + struct uart_port *port; + + if (co->index < 0 || co->index >= DIGICOLOR_USART_NR) + return -EINVAL; + + port = digicolor_ports[co->index]; + if (!port) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct console digicolor_console = { + .name = "ttyS", + .device = uart_console_device, + .write = digicolor_uart_console_write, + .setup = digicolor_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +static struct uart_driver digicolor_uart = { + .driver_name = "digicolor-usart", + .dev_name = "ttyS", + .nr = DIGICOLOR_USART_NR, +}; + +static int digicolor_uart_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + int irq, ret, index; + struct digicolor_port *dp; + struct resource *res; + struct clk *uart_clk; + + if (!np) { + dev_err(&pdev->dev, "Missing device tree node\n"); + return -ENXIO; + } + + index = of_alias_get_id(np, "serial"); + if (index < 0 || index >= DIGICOLOR_USART_NR) + return -EINVAL; + + dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL); + if (!dp) + return -ENOMEM; + + uart_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(uart_clk)) + return PTR_ERR(uart_clk); + + dp->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(dp->port.membase)) + return PTR_ERR(dp->port.membase); + dp->port.mapbase = res->start; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + dp->port.irq = irq; + + dp->port.iotype = UPIO_MEM; + dp->port.uartclk = clk_get_rate(uart_clk); + dp->port.fifosize = 16; + dp->port.dev = &pdev->dev; + dp->port.ops = &digicolor_uart_ops; + dp->port.line = index; + dp->port.type = PORT_DIGICOLOR; + spin_lock_init(&dp->port.lock); + + digicolor_ports[index] = &dp->port; + platform_set_drvdata(pdev, &dp->port); + + INIT_DELAYED_WORK(&dp->rx_poll_work, digicolor_rx_poll); + + ret = devm_request_irq(&pdev->dev, dp->port.irq, digicolor_uart_int, 0, + dev_name(&pdev->dev), &dp->port); + if (ret) + return ret; + + return uart_add_one_port(&digicolor_uart, &dp->port); +} + +static int digicolor_uart_remove(struct platform_device *pdev) +{ + struct uart_port *port = platform_get_drvdata(pdev); + + uart_remove_one_port(&digicolor_uart, port); + + return 0; +} + +static const struct of_device_id digicolor_uart_dt_ids[] = { + { .compatible = "cnxt,cx92755-usart", }, + { } +}; +MODULE_DEVICE_TABLE(of, digicolor_uart_dt_ids); + +static struct platform_driver digicolor_uart_platform = { + .driver = { + .name = "digicolor-usart", + .of_match_table = of_match_ptr(digicolor_uart_dt_ids), + }, + .probe = digicolor_uart_probe, + .remove = digicolor_uart_remove, +}; + +static int __init digicolor_uart_init(void) +{ + int ret; + + if (IS_ENABLED(CONFIG_SERIAL_CONEXANT_DIGICOLOR_CONSOLE)) { + digicolor_uart.cons = &digicolor_console; + digicolor_console.data = &digicolor_uart; + } + + ret = uart_register_driver(&digicolor_uart); + if (ret) + return ret; + + ret = platform_driver_register(&digicolor_uart_platform); + if (ret) + uart_unregister_driver(&digicolor_uart); + + return ret; +} +module_init(digicolor_uart_init); + +static void __exit digicolor_uart_exit(void) +{ + platform_driver_unregister(&digicolor_uart_platform); + uart_unregister_driver(&digicolor_uart); +} +module_exit(digicolor_uart_exit); + +MODULE_AUTHOR("Baruch Siach "); +MODULE_DESCRIPTION("Conexant Digicolor USART serial driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c new file mode 100644 index 000000000..829b452da --- /dev/null +++ b/drivers/tty/serial/dz.c @@ -0,0 +1,948 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dz.c: Serial port driver for DECstations equipped + * with the DZ chipset. + * + * Copyright (C) 1998 Olivier A. D. Lebaillif + * + * Email: olivier.lebaillif@ifrsys.com + * + * Copyright (C) 2004, 2006, 2007 Maciej W. Rozycki + * + * [31-AUG-98] triemer + * Changed IRQ to use Harald's dec internals interrupts.h + * removed base_addr code - moving address assignment to setup.c + * Changed name of dz_init to rs_init to be consistent with tc code + * [13-NOV-98] triemer fixed code to receive characters + * after patches by harald to irq code. + * [09-JAN-99] triemer minor fix for schedule - due to removal of timeout + * field from "current" - somewhere between 2.1.121 and 2.1.131 + Qua Jun 27 15:02:26 BRT 2001 + * [27-JUN-2001] Arnaldo Carvalho de Melo - cleanups + * + * Parts (C) 1999 David Airlie, airlied@linux.ie + * [07-SEP-99] Bugfixes + * + * [06-Jan-2002] Russell King + * Converted to new serial core + */ + +#undef DEBUG_DZ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "dz.h" + + +MODULE_DESCRIPTION("DECstation DZ serial driver"); +MODULE_LICENSE("GPL"); + + +static char dz_name[] __initdata = "DECstation DZ serial driver version "; +static char dz_version[] __initdata = "1.04"; + +struct dz_port { + struct dz_mux *mux; + struct uart_port port; + unsigned int cflag; +}; + +struct dz_mux { + struct dz_port dport[DZ_NB_PORT]; + atomic_t map_guard; + atomic_t irq_guard; + int initialised; +}; + +static struct dz_mux dz_mux; + +static inline struct dz_port *to_dport(struct uart_port *uport) +{ + return container_of(uport, struct dz_port, port); +} + +/* + * ------------------------------------------------------------ + * dz_in () and dz_out () + * + * These routines are used to access the registers of the DZ + * chip, hiding relocation differences between implementation. + * ------------------------------------------------------------ + */ + +static u16 dz_in(struct dz_port *dport, unsigned offset) +{ + void __iomem *addr = dport->port.membase + offset; + + return readw(addr); +} + +static void dz_out(struct dz_port *dport, unsigned offset, u16 value) +{ + void __iomem *addr = dport->port.membase + offset; + + writew(value, addr); +} + +/* + * ------------------------------------------------------------ + * rs_stop () and rs_start () + * + * These routines are called before setting or resetting + * tty->flow.stopped. They enable or disable transmitter interrupts, + * as necessary. + * ------------------------------------------------------------ + */ + +static void dz_stop_tx(struct uart_port *uport) +{ + struct dz_port *dport = to_dport(uport); + u16 tmp, mask = 1 << dport->port.line; + + tmp = dz_in(dport, DZ_TCR); /* read the TX flag */ + tmp &= ~mask; /* clear the TX flag */ + dz_out(dport, DZ_TCR, tmp); +} + +static void dz_start_tx(struct uart_port *uport) +{ + struct dz_port *dport = to_dport(uport); + u16 tmp, mask = 1 << dport->port.line; + + tmp = dz_in(dport, DZ_TCR); /* read the TX flag */ + tmp |= mask; /* set the TX flag */ + dz_out(dport, DZ_TCR, tmp); +} + +static void dz_stop_rx(struct uart_port *uport) +{ + struct dz_port *dport = to_dport(uport); + + dport->cflag &= ~DZ_RXENAB; + dz_out(dport, DZ_LPR, dport->cflag); +} + +/* + * ------------------------------------------------------------ + * + * Here start the interrupt handling routines. All of the following + * subroutines are declared as inline and are folded into + * dz_interrupt. They were separated out for readability's sake. + * + * Note: dz_interrupt() is a "fast" interrupt, which means that it + * runs with interrupts turned off. People who may want to modify + * dz_interrupt() should try to keep the interrupt handler as fast as + * possible. After you are done making modifications, it is not a bad + * idea to do: + * + * make drivers/serial/dz.s + * + * and look at the resulting assemble code in dz.s. + * + * ------------------------------------------------------------ + */ + +/* + * ------------------------------------------------------------ + * receive_char () + * + * This routine deals with inputs from any lines. + * ------------------------------------------------------------ + */ +static inline void dz_receive_chars(struct dz_mux *mux) +{ + struct uart_port *uport; + struct dz_port *dport = &mux->dport[0]; + struct uart_icount *icount; + int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 }; + unsigned char ch, flag; + u16 status; + int i; + + while ((status = dz_in(dport, DZ_RBUF)) & DZ_DVAL) { + dport = &mux->dport[LINE(status)]; + uport = &dport->port; + + ch = UCHAR(status); /* grab the char */ + flag = TTY_NORMAL; + + icount = &uport->icount; + icount->rx++; + + if (unlikely(status & (DZ_OERR | DZ_FERR | DZ_PERR))) { + + /* + * There is no separate BREAK status bit, so treat + * null characters with framing errors as BREAKs; + * normally, otherwise. For this move the Framing + * Error bit to a simulated BREAK bit. + */ + if (!ch) { + status |= (status & DZ_FERR) >> + (ffs(DZ_FERR) - ffs(DZ_BREAK)); + status &= ~DZ_FERR; + } + + /* Handle SysRq/SAK & keep track of the statistics. */ + if (status & DZ_BREAK) { + icount->brk++; + if (uart_handle_break(uport)) + continue; + } else if (status & DZ_FERR) + icount->frame++; + else if (status & DZ_PERR) + icount->parity++; + if (status & DZ_OERR) + icount->overrun++; + + status &= uport->read_status_mask; + if (status & DZ_BREAK) + flag = TTY_BREAK; + else if (status & DZ_FERR) + flag = TTY_FRAME; + else if (status & DZ_PERR) + flag = TTY_PARITY; + + } + + if (uart_handle_sysrq_char(uport, ch)) + continue; + + uart_insert_char(uport, status, DZ_OERR, ch, flag); + lines_rx[LINE(status)] = 1; + } + for (i = 0; i < DZ_NB_PORT; i++) + if (lines_rx[i]) + tty_flip_buffer_push(&mux->dport[i].port.state->port); +} + +/* + * ------------------------------------------------------------ + * transmit_char () + * + * This routine deals with outputs to any lines. + * ------------------------------------------------------------ + */ +static inline void dz_transmit_chars(struct dz_mux *mux) +{ + struct dz_port *dport = &mux->dport[0]; + struct circ_buf *xmit; + unsigned char tmp; + u16 status; + + status = dz_in(dport, DZ_CSR); + dport = &mux->dport[LINE(status)]; + xmit = &dport->port.state->xmit; + + if (dport->port.x_char) { /* XON/XOFF chars */ + dz_out(dport, DZ_TDR, dport->port.x_char); + dport->port.icount.tx++; + dport->port.x_char = 0; + return; + } + /* If nothing to do or stopped or hardware stopped. */ + if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) { + spin_lock(&dport->port.lock); + dz_stop_tx(&dport->port); + spin_unlock(&dport->port.lock); + return; + } + + /* + * If something to do... (remember the dz has no output fifo, + * so we go one char at a time) :-< + */ + tmp = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (DZ_XMIT_SIZE - 1); + dz_out(dport, DZ_TDR, tmp); + dport->port.icount.tx++; + + if (uart_circ_chars_pending(xmit) < DZ_WAKEUP_CHARS) + uart_write_wakeup(&dport->port); + + /* Are we are done. */ + if (uart_circ_empty(xmit)) { + spin_lock(&dport->port.lock); + dz_stop_tx(&dport->port); + spin_unlock(&dport->port.lock); + } +} + +/* + * ------------------------------------------------------------ + * check_modem_status() + * + * DS 3100 & 5100: Only valid for the MODEM line, duh! + * DS 5000/200: Valid for the MODEM and PRINTER line. + * ------------------------------------------------------------ + */ +static inline void check_modem_status(struct dz_port *dport) +{ + /* + * FIXME: + * 1. No status change interrupt; use a timer. + * 2. Handle the 3100/5000 as appropriate. --macro + */ + u16 status; + + /* If not the modem line just return. */ + if (dport->port.line != DZ_MODEM) + return; + + status = dz_in(dport, DZ_MSR); + + /* it's easy, since DSR2 is the only bit in the register */ + if (status) + dport->port.icount.dsr++; +} + +/* + * ------------------------------------------------------------ + * dz_interrupt () + * + * this is the main interrupt routine for the DZ chip. + * It deals with the multiple ports. + * ------------------------------------------------------------ + */ +static irqreturn_t dz_interrupt(int irq, void *dev_id) +{ + struct dz_mux *mux = dev_id; + struct dz_port *dport = &mux->dport[0]; + u16 status; + + /* get the reason why we just got an irq */ + status = dz_in(dport, DZ_CSR); + + if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE)) + dz_receive_chars(mux); + + if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE)) + dz_transmit_chars(mux); + + return IRQ_HANDLED; +} + +/* + * ------------------------------------------------------------------- + * Here ends the DZ interrupt routines. + * ------------------------------------------------------------------- + */ + +static unsigned int dz_get_mctrl(struct uart_port *uport) +{ + /* + * FIXME: Handle the 3100/5000 as appropriate. --macro + */ + struct dz_port *dport = to_dport(uport); + unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; + + if (dport->port.line == DZ_MODEM) { + if (dz_in(dport, DZ_MSR) & DZ_MODEM_DSR) + mctrl &= ~TIOCM_DSR; + } + + return mctrl; +} + +static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl) +{ + /* + * FIXME: Handle the 3100/5000 as appropriate. --macro + */ + struct dz_port *dport = to_dport(uport); + u16 tmp; + + if (dport->port.line == DZ_MODEM) { + tmp = dz_in(dport, DZ_TCR); + if (mctrl & TIOCM_DTR) + tmp &= ~DZ_MODEM_DTR; + else + tmp |= DZ_MODEM_DTR; + dz_out(dport, DZ_TCR, tmp); + } +} + +/* + * ------------------------------------------------------------------- + * startup () + * + * various initialization tasks + * ------------------------------------------------------------------- + */ +static int dz_startup(struct uart_port *uport) +{ + struct dz_port *dport = to_dport(uport); + struct dz_mux *mux = dport->mux; + unsigned long flags; + int irq_guard; + int ret; + u16 tmp; + + irq_guard = atomic_add_return(1, &mux->irq_guard); + if (irq_guard != 1) + return 0; + + ret = request_irq(dport->port.irq, dz_interrupt, + IRQF_SHARED, "dz", mux); + if (ret) { + atomic_add(-1, &mux->irq_guard); + printk(KERN_ERR "dz: Cannot get IRQ %d!\n", dport->port.irq); + return ret; + } + + spin_lock_irqsave(&dport->port.lock, flags); + + /* Enable interrupts. */ + tmp = dz_in(dport, DZ_CSR); + tmp |= DZ_RIE | DZ_TIE; + dz_out(dport, DZ_CSR, tmp); + + spin_unlock_irqrestore(&dport->port.lock, flags); + + return 0; +} + +/* + * ------------------------------------------------------------------- + * shutdown () + * + * This routine will shutdown a serial port; interrupts are disabled, and + * DTR is dropped if the hangup on close termio flag is on. + * ------------------------------------------------------------------- + */ +static void dz_shutdown(struct uart_port *uport) +{ + struct dz_port *dport = to_dport(uport); + struct dz_mux *mux = dport->mux; + unsigned long flags; + int irq_guard; + u16 tmp; + + spin_lock_irqsave(&dport->port.lock, flags); + dz_stop_tx(&dport->port); + spin_unlock_irqrestore(&dport->port.lock, flags); + + irq_guard = atomic_add_return(-1, &mux->irq_guard); + if (!irq_guard) { + /* Disable interrupts. */ + tmp = dz_in(dport, DZ_CSR); + tmp &= ~(DZ_RIE | DZ_TIE); + dz_out(dport, DZ_CSR, tmp); + + free_irq(dport->port.irq, mux); + } +} + +/* + * ------------------------------------------------------------------- + * dz_tx_empty() -- get the transmitter empty status + * + * Purpose: Let user call ioctl() to get info when the UART physically + * is emptied. On bus types like RS485, the transmitter must + * release the bus after transmitting. This must be done when + * the transmit shift register is empty, not be done when the + * transmit holding register is empty. This functionality + * allows an RS485 driver to be written in user space. + * ------------------------------------------------------------------- + */ +static unsigned int dz_tx_empty(struct uart_port *uport) +{ + struct dz_port *dport = to_dport(uport); + unsigned short tmp, mask = 1 << dport->port.line; + + tmp = dz_in(dport, DZ_TCR); + tmp &= mask; + + return tmp ? 0 : TIOCSER_TEMT; +} + +static void dz_break_ctl(struct uart_port *uport, int break_state) +{ + /* + * FIXME: Can't access BREAK bits in TDR easily; + * reuse the code for polled TX. --macro + */ + struct dz_port *dport = to_dport(uport); + unsigned long flags; + unsigned short tmp, mask = 1 << dport->port.line; + + spin_lock_irqsave(&uport->lock, flags); + tmp = dz_in(dport, DZ_TCR); + if (break_state) + tmp |= mask; + else + tmp &= ~mask; + dz_out(dport, DZ_TCR, tmp); + spin_unlock_irqrestore(&uport->lock, flags); +} + +static int dz_encode_baud_rate(unsigned int baud) +{ + switch (baud) { + case 50: + return DZ_B50; + case 75: + return DZ_B75; + case 110: + return DZ_B110; + case 134: + return DZ_B134; + case 150: + return DZ_B150; + case 300: + return DZ_B300; + case 600: + return DZ_B600; + case 1200: + return DZ_B1200; + case 1800: + return DZ_B1800; + case 2000: + return DZ_B2000; + case 2400: + return DZ_B2400; + case 3600: + return DZ_B3600; + case 4800: + return DZ_B4800; + case 7200: + return DZ_B7200; + case 9600: + return DZ_B9600; + default: + return -1; + } +} + + +static void dz_reset(struct dz_port *dport) +{ + struct dz_mux *mux = dport->mux; + + if (mux->initialised) + return; + + dz_out(dport, DZ_CSR, DZ_CLR); + while (dz_in(dport, DZ_CSR) & DZ_CLR); + iob(); + + /* Enable scanning. */ + dz_out(dport, DZ_CSR, DZ_MSE); + + mux->initialised = 1; +} + +static void dz_set_termios(struct uart_port *uport, struct ktermios *termios, + const struct ktermios *old_termios) +{ + struct dz_port *dport = to_dport(uport); + unsigned long flags; + unsigned int cflag, baud; + int bflag; + + cflag = dport->port.line; + + switch (termios->c_cflag & CSIZE) { + case CS5: + cflag |= DZ_CS5; + break; + case CS6: + cflag |= DZ_CS6; + break; + case CS7: + cflag |= DZ_CS7; + break; + case CS8: + default: + cflag |= DZ_CS8; + } + + if (termios->c_cflag & CSTOPB) + cflag |= DZ_CSTOPB; + if (termios->c_cflag & PARENB) + cflag |= DZ_PARENB; + if (termios->c_cflag & PARODD) + cflag |= DZ_PARODD; + + baud = uart_get_baud_rate(uport, termios, old_termios, 50, 9600); + bflag = dz_encode_baud_rate(baud); + if (bflag < 0) { + if (old_termios) { + /* Keep unchanged. */ + baud = tty_termios_baud_rate(old_termios); + bflag = dz_encode_baud_rate(baud); + } + if (bflag < 0) { /* Resort to 9600. */ + baud = 9600; + bflag = DZ_B9600; + } + tty_termios_encode_baud_rate(termios, baud, baud); + } + cflag |= bflag; + + if (termios->c_cflag & CREAD) + cflag |= DZ_RXENAB; + + spin_lock_irqsave(&dport->port.lock, flags); + + uart_update_timeout(uport, termios->c_cflag, baud); + + dz_out(dport, DZ_LPR, cflag); + dport->cflag = cflag; + + /* setup accept flag */ + dport->port.read_status_mask = DZ_OERR; + if (termios->c_iflag & INPCK) + dport->port.read_status_mask |= DZ_FERR | DZ_PERR; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + dport->port.read_status_mask |= DZ_BREAK; + + /* characters to ignore */ + uport->ignore_status_mask = 0; + if ((termios->c_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK)) + dport->port.ignore_status_mask |= DZ_OERR; + if (termios->c_iflag & IGNPAR) + dport->port.ignore_status_mask |= DZ_FERR | DZ_PERR; + if (termios->c_iflag & IGNBRK) + dport->port.ignore_status_mask |= DZ_BREAK; + + spin_unlock_irqrestore(&dport->port.lock, flags); +} + +/* + * Hack alert! + * Required solely so that the initial PROM-based console + * works undisturbed in parallel with this one. + */ +static void dz_pm(struct uart_port *uport, unsigned int state, + unsigned int oldstate) +{ + struct dz_port *dport = to_dport(uport); + unsigned long flags; + + spin_lock_irqsave(&dport->port.lock, flags); + if (state < 3) + dz_start_tx(&dport->port); + else + dz_stop_tx(&dport->port); + spin_unlock_irqrestore(&dport->port.lock, flags); +} + + +static const char *dz_type(struct uart_port *uport) +{ + return "DZ"; +} + +static void dz_release_port(struct uart_port *uport) +{ + struct dz_mux *mux = to_dport(uport)->mux; + int map_guard; + + iounmap(uport->membase); + uport->membase = NULL; + + map_guard = atomic_add_return(-1, &mux->map_guard); + if (!map_guard) + release_mem_region(uport->mapbase, dec_kn_slot_size); +} + +static int dz_map_port(struct uart_port *uport) +{ + if (!uport->membase) + uport->membase = ioremap(uport->mapbase, + dec_kn_slot_size); + if (!uport->membase) { + printk(KERN_ERR "dz: Cannot map MMIO\n"); + return -ENOMEM; + } + return 0; +} + +static int dz_request_port(struct uart_port *uport) +{ + struct dz_mux *mux = to_dport(uport)->mux; + int map_guard; + int ret; + + map_guard = atomic_add_return(1, &mux->map_guard); + if (map_guard == 1) { + if (!request_mem_region(uport->mapbase, dec_kn_slot_size, + "dz")) { + atomic_add(-1, &mux->map_guard); + printk(KERN_ERR + "dz: Unable to reserve MMIO resource\n"); + return -EBUSY; + } + } + ret = dz_map_port(uport); + if (ret) { + map_guard = atomic_add_return(-1, &mux->map_guard); + if (!map_guard) + release_mem_region(uport->mapbase, dec_kn_slot_size); + return ret; + } + return 0; +} + +static void dz_config_port(struct uart_port *uport, int flags) +{ + struct dz_port *dport = to_dport(uport); + + if (flags & UART_CONFIG_TYPE) { + if (dz_request_port(uport)) + return; + + uport->type = PORT_DZ; + + dz_reset(dport); + } +} + +/* + * Verify the new serial_struct (for TIOCSSERIAL). + */ +static int dz_verify_port(struct uart_port *uport, struct serial_struct *ser) +{ + int ret = 0; + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_DZ) + ret = -EINVAL; + if (ser->irq != uport->irq) + ret = -EINVAL; + return ret; +} + +static const struct uart_ops dz_ops = { + .tx_empty = dz_tx_empty, + .get_mctrl = dz_get_mctrl, + .set_mctrl = dz_set_mctrl, + .stop_tx = dz_stop_tx, + .start_tx = dz_start_tx, + .stop_rx = dz_stop_rx, + .break_ctl = dz_break_ctl, + .startup = dz_startup, + .shutdown = dz_shutdown, + .set_termios = dz_set_termios, + .pm = dz_pm, + .type = dz_type, + .release_port = dz_release_port, + .request_port = dz_request_port, + .config_port = dz_config_port, + .verify_port = dz_verify_port, +}; + +static void __init dz_init_ports(void) +{ + static int first = 1; + unsigned long base; + int line; + + if (!first) + return; + first = 0; + + if (mips_machtype == MACH_DS23100 || mips_machtype == MACH_DS5100) + base = dec_kn_slot_base + KN01_DZ11; + else + base = dec_kn_slot_base + KN02_DZ11; + + for (line = 0; line < DZ_NB_PORT; line++) { + struct dz_port *dport = &dz_mux.dport[line]; + struct uart_port *uport = &dport->port; + + dport->mux = &dz_mux; + + uport->irq = dec_interrupt[DEC_IRQ_DZ11]; + uport->fifosize = 1; + uport->iotype = UPIO_MEM; + uport->flags = UPF_BOOT_AUTOCONF; + uport->ops = &dz_ops; + uport->line = line; + uport->mapbase = base; + uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_DZ_CONSOLE); + } +} + +#ifdef CONFIG_SERIAL_DZ_CONSOLE +/* + * ------------------------------------------------------------------- + * dz_console_putchar() -- transmit a character + * + * Polled transmission. This is tricky. We need to mask transmit + * interrupts so that they do not interfere, enable the transmitter + * for the line requested and then wait till the transmit scanner + * requests data for this line. But it may request data for another + * line first, in which case we have to disable its transmitter and + * repeat waiting till our line pops up. Only then the character may + * be transmitted. Finally, the state of the transmitter mask is + * restored. Welcome to the world of PDP-11! + * ------------------------------------------------------------------- + */ +static void dz_console_putchar(struct uart_port *uport, unsigned char ch) +{ + struct dz_port *dport = to_dport(uport); + unsigned long flags; + unsigned short csr, tcr, trdy, mask; + int loops = 10000; + + spin_lock_irqsave(&dport->port.lock, flags); + csr = dz_in(dport, DZ_CSR); + dz_out(dport, DZ_CSR, csr & ~DZ_TIE); + tcr = dz_in(dport, DZ_TCR); + tcr |= 1 << dport->port.line; + mask = tcr; + dz_out(dport, DZ_TCR, mask); + iob(); + spin_unlock_irqrestore(&dport->port.lock, flags); + + do { + trdy = dz_in(dport, DZ_CSR); + if (!(trdy & DZ_TRDY)) + continue; + trdy = (trdy & DZ_TLINE) >> 8; + if (trdy == dport->port.line) + break; + mask &= ~(1 << trdy); + dz_out(dport, DZ_TCR, mask); + iob(); + udelay(2); + } while (--loops); + + if (loops) /* Cannot send otherwise. */ + dz_out(dport, DZ_TDR, ch); + + dz_out(dport, DZ_TCR, tcr); + dz_out(dport, DZ_CSR, csr); +} + +/* + * ------------------------------------------------------------------- + * dz_console_print () + * + * dz_console_print is registered for printk. + * The console must be locked when we get here. + * ------------------------------------------------------------------- + */ +static void dz_console_print(struct console *co, + const char *str, + unsigned int count) +{ + struct dz_port *dport = &dz_mux.dport[co->index]; +#ifdef DEBUG_DZ + prom_printf((char *) str); +#endif + uart_console_write(&dport->port, str, count, dz_console_putchar); +} + +static int __init dz_console_setup(struct console *co, char *options) +{ + struct dz_port *dport = &dz_mux.dport[co->index]; + struct uart_port *uport = &dport->port; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + ret = dz_map_port(uport); + if (ret) + return ret; + + spin_lock_init(&dport->port.lock); /* For dz_pm(). */ + + dz_reset(dport); + dz_pm(uport, 0, -1); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&dport->port, co, baud, parity, bits, flow); +} + +static struct uart_driver dz_reg; +static struct console dz_console = { + .name = "ttyS", + .write = dz_console_print, + .device = uart_console_device, + .setup = dz_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &dz_reg, +}; + +static int __init dz_serial_console_init(void) +{ + if (!IOASIC) { + dz_init_ports(); + register_console(&dz_console); + return 0; + } else + return -ENXIO; +} + +console_initcall(dz_serial_console_init); + +#define SERIAL_DZ_CONSOLE &dz_console +#else +#define SERIAL_DZ_CONSOLE NULL +#endif /* CONFIG_SERIAL_DZ_CONSOLE */ + +static struct uart_driver dz_reg = { + .owner = THIS_MODULE, + .driver_name = "serial", + .dev_name = "ttyS", + .major = TTY_MAJOR, + .minor = 64, + .nr = DZ_NB_PORT, + .cons = SERIAL_DZ_CONSOLE, +}; + +static int __init dz_init(void) +{ + int ret, i; + + if (IOASIC) + return -ENXIO; + + printk("%s%s\n", dz_name, dz_version); + + dz_init_ports(); + + ret = uart_register_driver(&dz_reg); + if (ret) + return ret; + + for (i = 0; i < DZ_NB_PORT; i++) + uart_add_one_port(&dz_reg, &dz_mux.dport[i].port); + + return 0; +} + +module_init(dz_init); diff --git a/drivers/tty/serial/dz.h b/drivers/tty/serial/dz.h new file mode 100644 index 000000000..3b3e31954 --- /dev/null +++ b/drivers/tty/serial/dz.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * dz.h: Serial port driver for DECstations equipped + * with the DZ chipset. + * + * Copyright (C) 1998 Olivier A. D. Lebaillif + * + * Email: olivier.lebaillif@ifrsys.com + * + * Copyright (C) 2004, 2006 Maciej W. Rozycki + */ +#ifndef DZ_SERIAL_H +#define DZ_SERIAL_H + +/* + * Definitions for the Control and Status Register. + */ +#define DZ_TRDY 0x8000 /* Transmitter empty */ +#define DZ_TIE 0x4000 /* Transmitter Interrupt Enbl */ +#define DZ_TLINE 0x0300 /* Transmitter Line Number */ +#define DZ_RDONE 0x0080 /* Receiver data ready */ +#define DZ_RIE 0x0040 /* Receive Interrupt Enable */ +#define DZ_MSE 0x0020 /* Master Scan Enable */ +#define DZ_CLR 0x0010 /* Master reset */ +#define DZ_MAINT 0x0008 /* Loop Back Mode */ + +/* + * Definitions for the Receiver Buffer Register. + */ +#define DZ_RBUF_MASK 0x00FF /* Data Mask */ +#define DZ_LINE_MASK 0x0300 /* Line Mask */ +#define DZ_DVAL 0x8000 /* Valid Data indicator */ +#define DZ_OERR 0x4000 /* Overrun error indicator */ +#define DZ_FERR 0x2000 /* Frame error indicator */ +#define DZ_PERR 0x1000 /* Parity error indicator */ + +#define DZ_BREAK 0x0800 /* BREAK event software flag */ + +#define LINE(x) ((x & DZ_LINE_MASK) >> 8) /* Get the line number + from the input buffer */ +#define UCHAR(x) ((unsigned char)(x & DZ_RBUF_MASK)) + +/* + * Definitions for the Transmit Control Register. + */ +#define DZ_LINE_KEYBOARD 0x0001 +#define DZ_LINE_MOUSE 0x0002 +#define DZ_LINE_MODEM 0x0004 +#define DZ_LINE_PRINTER 0x0008 + +#define DZ_MODEM_RTS 0x0800 /* RTS for the modem line (2) */ +#define DZ_MODEM_DTR 0x0400 /* DTR for the modem line (2) */ +#define DZ_PRINT_RTS 0x0200 /* RTS for the prntr line (3) */ +#define DZ_PRINT_DTR 0x0100 /* DTR for the prntr line (3) */ +#define DZ_LNENB 0x000f /* Transmitter Line Enable */ + +/* + * Definitions for the Modem Status Register. + */ +#define DZ_MODEM_RI 0x0800 /* RI for the modem line (2) */ +#define DZ_MODEM_CD 0x0400 /* CD for the modem line (2) */ +#define DZ_MODEM_DSR 0x0200 /* DSR for the modem line (2) */ +#define DZ_MODEM_CTS 0x0100 /* CTS for the modem line (2) */ +#define DZ_PRINT_RI 0x0008 /* RI for the printer line (3) */ +#define DZ_PRINT_CD 0x0004 /* CD for the printer line (3) */ +#define DZ_PRINT_DSR 0x0002 /* DSR for the prntr line (3) */ +#define DZ_PRINT_CTS 0x0001 /* CTS for the prntr line (3) */ + +/* + * Definitions for the Transmit Data Register. + */ +#define DZ_BRK0 0x0100 /* Break assertion for line 0 */ +#define DZ_BRK1 0x0200 /* Break assertion for line 1 */ +#define DZ_BRK2 0x0400 /* Break assertion for line 2 */ +#define DZ_BRK3 0x0800 /* Break assertion for line 3 */ + +/* + * Definitions for the Line Parameter Register. + */ +#define DZ_KEYBOARD 0x0000 /* line 0 = keyboard */ +#define DZ_MOUSE 0x0001 /* line 1 = mouse */ +#define DZ_MODEM 0x0002 /* line 2 = modem */ +#define DZ_PRINTER 0x0003 /* line 3 = printer */ + +#define DZ_CSIZE 0x0018 /* Number of bits per byte (mask) */ +#define DZ_CS5 0x0000 /* 5 bits per byte */ +#define DZ_CS6 0x0008 /* 6 bits per byte */ +#define DZ_CS7 0x0010 /* 7 bits per byte */ +#define DZ_CS8 0x0018 /* 8 bits per byte */ + +#define DZ_CSTOPB 0x0020 /* 2 stop bits instead of one */ + +#define DZ_PARENB 0x0040 /* Parity enable */ +#define DZ_PARODD 0x0080 /* Odd parity instead of even */ + +#define DZ_CBAUD 0x0E00 /* Baud Rate (mask) */ +#define DZ_B50 0x0000 +#define DZ_B75 0x0100 +#define DZ_B110 0x0200 +#define DZ_B134 0x0300 +#define DZ_B150 0x0400 +#define DZ_B300 0x0500 +#define DZ_B600 0x0600 +#define DZ_B1200 0x0700 +#define DZ_B1800 0x0800 +#define DZ_B2000 0x0900 +#define DZ_B2400 0x0A00 +#define DZ_B3600 0x0B00 +#define DZ_B4800 0x0C00 +#define DZ_B7200 0x0D00 +#define DZ_B9600 0x0E00 + +#define DZ_RXENAB 0x1000 /* Receiver Enable */ + +/* + * Addresses for the DZ registers + */ +#define DZ_CSR 0x00 /* Control and Status Register */ +#define DZ_RBUF 0x08 /* Receive Buffer */ +#define DZ_LPR 0x08 /* Line Parameters Register */ +#define DZ_TCR 0x10 /* Transmitter Control Register */ +#define DZ_MSR 0x18 /* Modem Status Register */ +#define DZ_TDR 0x18 /* Transmit Data Register */ + +#define DZ_NB_PORT 4 + +#define DZ_XMIT_SIZE 4096 /* buffer size */ +#define DZ_WAKEUP_CHARS DZ_XMIT_SIZE/4 + +#endif /* DZ_SERIAL_H */ diff --git a/drivers/tty/serial/earlycon-arm-semihost.c b/drivers/tty/serial/earlycon-arm-semihost.c new file mode 100644 index 000000000..fcdec5f42 --- /dev/null +++ b/drivers/tty/serial/earlycon-arm-semihost.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier + * + * Adapted for ARM and earlycon: + * Copyright (C) 2014 Linaro Ltd. + * Author: Rob Herring + */ +#include +#include +#include +#include + +#ifdef CONFIG_THUMB2_KERNEL +#define SEMIHOST_SWI "0xab" +#else +#define SEMIHOST_SWI "0x123456" +#endif + +/* + * Semihosting-based debug console + */ +static void smh_putc(struct uart_port *port, unsigned char c) +{ +#ifdef CONFIG_ARM64 + asm volatile("mov x1, %0\n" + "mov x0, #3\n" + "hlt 0xf000\n" + : : "r" (&c) : "x0", "x1", "memory"); +#else + asm volatile("mov r1, %0\n" + "mov r0, #3\n" + "svc " SEMIHOST_SWI "\n" + : : "r" (&c) : "r0", "r1", "memory"); +#endif +} + +static void smh_write(struct console *con, const char *s, unsigned n) +{ + struct earlycon_device *dev = con->data; + uart_console_write(&dev->port, s, n, smh_putc); +} + +static int +__init early_smh_setup(struct earlycon_device *device, const char *opt) +{ + device->con->write = smh_write; + return 0; +} +EARLYCON_DECLARE(smh, early_smh_setup); diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c new file mode 100644 index 000000000..27afb0b74 --- /dev/null +++ b/drivers/tty/serial/earlycon-riscv-sbi.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RISC-V SBI based earlycon + * + * Copyright (C) 2018 Anup Patel + */ +#include +#include +#include +#include +#include + +static void sbi_putc(struct uart_port *port, unsigned char c) +{ + sbi_console_putchar(c); +} + +static void sbi_console_write(struct console *con, + const char *s, unsigned n) +{ + struct earlycon_device *dev = con->data; + uart_console_write(&dev->port, s, n, sbi_putc); +} + +static int __init early_sbi_setup(struct earlycon_device *device, + const char *opt) +{ + device->con->write = sbi_console_write; + return 0; +} +EARLYCON_DECLARE(sbi, early_sbi_setup); diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c new file mode 100644 index 000000000..a5f380584 --- /dev/null +++ b/drivers/tty/serial/earlycon.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2014 Linaro Ltd. + * Author: Rob Herring + * + * Based on 8250 earlycon: + * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_FIX_EARLYCON_MEM +#include +#endif + +#include + +static struct console early_con = { + .name = "uart", /* fixed up at earlycon registration */ + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = 0, +}; + +static struct earlycon_device early_console_dev = { + .con = &early_con, +}; + +static void __iomem * __init earlycon_map(resource_size_t paddr, size_t size) +{ + void __iomem *base; +#ifdef CONFIG_FIX_EARLYCON_MEM + set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr & PAGE_MASK); + base = (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE); + base += paddr & ~PAGE_MASK; +#else + base = ioremap(paddr, size); +#endif + if (!base) + pr_err("%s: Couldn't map %pa\n", __func__, &paddr); + + return base; +} + +static void __init earlycon_init(struct earlycon_device *device, + const char *name) +{ + struct console *earlycon = device->con; + const char *s; + size_t len; + + /* scan backwards from end of string for first non-numeral */ + for (s = name + strlen(name); + s > name && s[-1] >= '0' && s[-1] <= '9'; + s--) + ; + if (*s) + earlycon->index = simple_strtoul(s, NULL, 10); + len = s - name; + strscpy(earlycon->name, name, min(len + 1, sizeof(earlycon->name))); + earlycon->data = &early_console_dev; +} + +static void __init earlycon_print_info(struct earlycon_device *device) +{ + struct console *earlycon = device->con; + struct uart_port *port = &device->port; + + if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM16 || + port->iotype == UPIO_MEM32 || port->iotype == UPIO_MEM32BE) + pr_info("%s%d at MMIO%s %pa (options '%s')\n", + earlycon->name, earlycon->index, + (port->iotype == UPIO_MEM) ? "" : + (port->iotype == UPIO_MEM16) ? "16" : + (port->iotype == UPIO_MEM32) ? "32" : "32be", + &port->mapbase, device->options); + else + pr_info("%s%d at I/O port 0x%lx (options '%s')\n", + earlycon->name, earlycon->index, + port->iobase, device->options); +} + +static int __init parse_options(struct earlycon_device *device, char *options) +{ + struct uart_port *port = &device->port; + int length; + resource_size_t addr; + + if (uart_parse_earlycon(options, &port->iotype, &addr, &options)) + return -EINVAL; + + switch (port->iotype) { + case UPIO_MEM: + port->mapbase = addr; + break; + case UPIO_MEM16: + port->regshift = 1; + port->mapbase = addr; + break; + case UPIO_MEM32: + case UPIO_MEM32BE: + port->regshift = 2; + port->mapbase = addr; + break; + case UPIO_PORT: + port->iobase = addr; + break; + default: + return -EINVAL; + } + + if (options) { + device->baud = simple_strtoul(options, NULL, 0); + length = min(strcspn(options, " ") + 1, + (size_t)(sizeof(device->options))); + strscpy(device->options, options, length); + } + + return 0; +} + +static int __init register_earlycon(char *buf, const struct earlycon_id *match) +{ + int err; + struct uart_port *port = &early_console_dev.port; + + /* On parsing error, pass the options buf to the setup function */ + if (buf && !parse_options(&early_console_dev, buf)) + buf = NULL; + + spin_lock_init(&port->lock); + port->uartclk = BASE_BAUD * 16; + if (port->mapbase) + port->membase = earlycon_map(port->mapbase, 64); + + earlycon_init(&early_console_dev, match->name); + err = match->setup(&early_console_dev, buf); + earlycon_print_info(&early_console_dev); + if (err < 0) + return err; + if (!early_console_dev.con->write) + return -ENODEV; + + register_console(early_console_dev.con); + return 0; +} + +/** + * setup_earlycon - match and register earlycon console + * @buf: earlycon param string + * + * Registers the earlycon console matching the earlycon specified + * in the param string @buf. Acceptable param strings are of the form + * ,io|mmio|mmio32|mmio32be,, + * ,0x, + * , + * + * + * Only for the third form does the earlycon setup() method receive the + * string in the 'options' parameter; all other forms set + * the parameter to NULL. + * + * Returns 0 if an attempt to register the earlycon was made, + * otherwise negative error code + */ +int __init setup_earlycon(char *buf) +{ + const struct earlycon_id *match; + bool empty_compatible = true; + + if (!buf || !buf[0]) + return -EINVAL; + + if (early_con.flags & CON_ENABLED) + return -EALREADY; + +again: + for (match = __earlycon_table; match < __earlycon_table_end; match++) { + size_t len = strlen(match->name); + + if (strncmp(buf, match->name, len)) + continue; + + /* prefer entries with empty compatible */ + if (empty_compatible && *match->compatible) + continue; + + if (buf[len]) { + if (buf[len] != ',') + continue; + buf += len + 1; + } else + buf = NULL; + + return register_earlycon(buf, match); + } + + if (empty_compatible) { + empty_compatible = false; + goto again; + } + + return -ENOENT; +} + +/* + * This defers the initialization of the early console until after ACPI has + * been initialized. + */ +bool earlycon_acpi_spcr_enable __initdata; + +/* early_param wrapper for setup_earlycon() */ +static int __init param_setup_earlycon(char *buf) +{ + int err; + + /* Just 'earlycon' is a valid param for devicetree and ACPI SPCR. */ + if (!buf || !buf[0]) { + if (IS_ENABLED(CONFIG_ACPI_SPCR_TABLE)) { + earlycon_acpi_spcr_enable = true; + return 0; + } else if (!buf) { + return early_init_dt_scan_chosen_stdout(); + } + } + + err = setup_earlycon(buf); + if (err == -ENOENT || err == -EALREADY) + return 0; + return err; +} +early_param("earlycon", param_setup_earlycon); + +#ifdef CONFIG_OF_EARLY_FLATTREE + +int __init of_setup_earlycon(const struct earlycon_id *match, + unsigned long node, + const char *options) +{ + int err; + struct uart_port *port = &early_console_dev.port; + const __be32 *val; + bool big_endian; + u64 addr; + + if (early_con.flags & CON_ENABLED) + return -EALREADY; + + spin_lock_init(&port->lock); + port->iotype = UPIO_MEM; + addr = of_flat_dt_translate_address(node); + if (addr == OF_BAD_ADDR) { + pr_warn("[%s] bad address\n", match->name); + return -ENXIO; + } + port->mapbase = addr; + + val = of_get_flat_dt_prop(node, "reg-offset", NULL); + if (val) + port->mapbase += be32_to_cpu(*val); + port->membase = earlycon_map(port->mapbase, SZ_4K); + + val = of_get_flat_dt_prop(node, "reg-shift", NULL); + if (val) + port->regshift = be32_to_cpu(*val); + big_endian = of_get_flat_dt_prop(node, "big-endian", NULL) != NULL || + (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && + of_get_flat_dt_prop(node, "native-endian", NULL) != NULL); + val = of_get_flat_dt_prop(node, "reg-io-width", NULL); + if (val) { + switch (be32_to_cpu(*val)) { + case 1: + port->iotype = UPIO_MEM; + break; + case 2: + port->iotype = UPIO_MEM16; + break; + case 4: + port->iotype = (big_endian) ? UPIO_MEM32BE : UPIO_MEM32; + break; + default: + pr_warn("[%s] unsupported reg-io-width\n", match->name); + return -EINVAL; + } + } + + val = of_get_flat_dt_prop(node, "current-speed", NULL); + if (val) + early_console_dev.baud = be32_to_cpu(*val); + + val = of_get_flat_dt_prop(node, "clock-frequency", NULL); + if (val) + port->uartclk = be32_to_cpu(*val); + + if (options) { + early_console_dev.baud = simple_strtoul(options, NULL, 0); + strscpy(early_console_dev.options, options, + sizeof(early_console_dev.options)); + } + earlycon_init(&early_console_dev, match->name); + err = match->setup(&early_console_dev, options); + earlycon_print_info(&early_console_dev); + if (err < 0) + return err; + if (!early_console_dev.con->write) + return -ENODEV; + + + register_console(early_console_dev.con); + return 0; +} + +#endif /* CONFIG_OF_EARLY_FLATTREE */ diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c new file mode 100644 index 000000000..84e8153e5 --- /dev/null +++ b/drivers/tty/serial/fsl_linflexuart.c @@ -0,0 +1,921 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Freescale LINFlexD UART serial port driver + * + * Copyright 2012-2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* All registers are 32-bit width */ + +#define LINCR1 0x0000 /* LIN control register */ +#define LINIER 0x0004 /* LIN interrupt enable register */ +#define LINSR 0x0008 /* LIN status register */ +#define LINESR 0x000C /* LIN error status register */ +#define UARTCR 0x0010 /* UART mode control register */ +#define UARTSR 0x0014 /* UART mode status register */ +#define LINTCSR 0x0018 /* LIN timeout control status register */ +#define LINOCR 0x001C /* LIN output compare register */ +#define LINTOCR 0x0020 /* LIN timeout control register */ +#define LINFBRR 0x0024 /* LIN fractional baud rate register */ +#define LINIBRR 0x0028 /* LIN integer baud rate register */ +#define LINCFR 0x002C /* LIN checksum field register */ +#define LINCR2 0x0030 /* LIN control register 2 */ +#define BIDR 0x0034 /* Buffer identifier register */ +#define BDRL 0x0038 /* Buffer data register least significant */ +#define BDRM 0x003C /* Buffer data register most significant */ +#define IFER 0x0040 /* Identifier filter enable register */ +#define IFMI 0x0044 /* Identifier filter match index */ +#define IFMR 0x0048 /* Identifier filter mode register */ +#define GCR 0x004C /* Global control register */ +#define UARTPTO 0x0050 /* UART preset timeout register */ +#define UARTCTO 0x0054 /* UART current timeout register */ + +/* + * Register field definitions + */ + +#define LINFLEXD_LINCR1_INIT BIT(0) +#define LINFLEXD_LINCR1_MME BIT(4) +#define LINFLEXD_LINCR1_BF BIT(7) + +#define LINFLEXD_LINSR_LINS_INITMODE BIT(12) +#define LINFLEXD_LINSR_LINS_MASK (0xF << 12) + +#define LINFLEXD_LINIER_SZIE BIT(15) +#define LINFLEXD_LINIER_OCIE BIT(14) +#define LINFLEXD_LINIER_BEIE BIT(13) +#define LINFLEXD_LINIER_CEIE BIT(12) +#define LINFLEXD_LINIER_HEIE BIT(11) +#define LINFLEXD_LINIER_FEIE BIT(8) +#define LINFLEXD_LINIER_BOIE BIT(7) +#define LINFLEXD_LINIER_LSIE BIT(6) +#define LINFLEXD_LINIER_WUIE BIT(5) +#define LINFLEXD_LINIER_DBFIE BIT(4) +#define LINFLEXD_LINIER_DBEIETOIE BIT(3) +#define LINFLEXD_LINIER_DRIE BIT(2) +#define LINFLEXD_LINIER_DTIE BIT(1) +#define LINFLEXD_LINIER_HRIE BIT(0) + +#define LINFLEXD_UARTCR_OSR_MASK (0xF << 24) +#define LINFLEXD_UARTCR_OSR(uartcr) (((uartcr) \ + & LINFLEXD_UARTCR_OSR_MASK) >> 24) + +#define LINFLEXD_UARTCR_ROSE BIT(23) + +#define LINFLEXD_UARTCR_RFBM BIT(9) +#define LINFLEXD_UARTCR_TFBM BIT(8) +#define LINFLEXD_UARTCR_WL1 BIT(7) +#define LINFLEXD_UARTCR_PC1 BIT(6) + +#define LINFLEXD_UARTCR_RXEN BIT(5) +#define LINFLEXD_UARTCR_TXEN BIT(4) +#define LINFLEXD_UARTCR_PC0 BIT(3) + +#define LINFLEXD_UARTCR_PCE BIT(2) +#define LINFLEXD_UARTCR_WL0 BIT(1) +#define LINFLEXD_UARTCR_UART BIT(0) + +#define LINFLEXD_UARTSR_SZF BIT(15) +#define LINFLEXD_UARTSR_OCF BIT(14) +#define LINFLEXD_UARTSR_PE3 BIT(13) +#define LINFLEXD_UARTSR_PE2 BIT(12) +#define LINFLEXD_UARTSR_PE1 BIT(11) +#define LINFLEXD_UARTSR_PE0 BIT(10) +#define LINFLEXD_UARTSR_RMB BIT(9) +#define LINFLEXD_UARTSR_FEF BIT(8) +#define LINFLEXD_UARTSR_BOF BIT(7) +#define LINFLEXD_UARTSR_RPS BIT(6) +#define LINFLEXD_UARTSR_WUF BIT(5) +#define LINFLEXD_UARTSR_4 BIT(4) + +#define LINFLEXD_UARTSR_TO BIT(3) + +#define LINFLEXD_UARTSR_DRFRFE BIT(2) +#define LINFLEXD_UARTSR_DTFTFF BIT(1) +#define LINFLEXD_UARTSR_NF BIT(0) +#define LINFLEXD_UARTSR_PE (LINFLEXD_UARTSR_PE0 |\ + LINFLEXD_UARTSR_PE1 |\ + LINFLEXD_UARTSR_PE2 |\ + LINFLEXD_UARTSR_PE3) + +#define LINFLEX_LDIV_MULTIPLIER (16) + +#define DRIVER_NAME "fsl-linflexuart" +#define DEV_NAME "ttyLF" +#define UART_NR 4 + +#define EARLYCON_BUFFER_INITIAL_CAP 8 + +#define PREINIT_DELAY 2000 /* us */ + +static const struct of_device_id linflex_dt_ids[] = { + { + .compatible = "fsl,s32v234-linflexuart", + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, linflex_dt_ids); + +#ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE +static struct uart_port *earlycon_port; +static bool linflex_earlycon_same_instance; +static DEFINE_SPINLOCK(init_lock); +static bool during_init; + +static struct { + char *content; + unsigned int len, cap; +} earlycon_buf; +#endif + +static void linflex_stop_tx(struct uart_port *port) +{ + unsigned long ier; + + ier = readl(port->membase + LINIER); + ier &= ~(LINFLEXD_LINIER_DTIE); + writel(ier, port->membase + LINIER); +} + +static void linflex_stop_rx(struct uart_port *port) +{ + unsigned long ier; + + ier = readl(port->membase + LINIER); + writel(ier & ~LINFLEXD_LINIER_DRIE, port->membase + LINIER); +} + +static void linflex_put_char(struct uart_port *sport, unsigned char c) +{ + unsigned long status; + + writeb(c, sport->membase + BDRL); + + /* Waiting for data transmission completed. */ + while (((status = readl(sport->membase + UARTSR)) & + LINFLEXD_UARTSR_DTFTFF) != + LINFLEXD_UARTSR_DTFTFF) + ; + + writel(status | LINFLEXD_UARTSR_DTFTFF, sport->membase + UARTSR); +} + +static inline void linflex_transmit_buffer(struct uart_port *sport) +{ + struct circ_buf *xmit = &sport->state->xmit; + + while (!uart_circ_empty(xmit)) { + linflex_put_char(sport, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + sport->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(sport); + + if (uart_circ_empty(xmit)) + linflex_stop_tx(sport); +} + +static void linflex_start_tx(struct uart_port *port) +{ + unsigned long ier; + + linflex_transmit_buffer(port); + ier = readl(port->membase + LINIER); + writel(ier | LINFLEXD_LINIER_DTIE, port->membase + LINIER); +} + +static irqreturn_t linflex_txint(int irq, void *dev_id) +{ + struct uart_port *sport = dev_id; + struct circ_buf *xmit = &sport->state->xmit; + unsigned long flags; + + spin_lock_irqsave(&sport->lock, flags); + + if (sport->x_char) { + linflex_put_char(sport, sport->x_char); + goto out; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(sport)) { + linflex_stop_tx(sport); + goto out; + } + + linflex_transmit_buffer(sport); +out: + spin_unlock_irqrestore(&sport->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t linflex_rxint(int irq, void *dev_id) +{ + struct uart_port *sport = dev_id; + unsigned int flg; + struct tty_port *port = &sport->state->port; + unsigned long flags, status; + unsigned char rx; + bool brk; + + spin_lock_irqsave(&sport->lock, flags); + + status = readl(sport->membase + UARTSR); + while (status & LINFLEXD_UARTSR_RMB) { + rx = readb(sport->membase + BDRM); + brk = false; + flg = TTY_NORMAL; + sport->icount.rx++; + + if (status & (LINFLEXD_UARTSR_BOF | LINFLEXD_UARTSR_FEF | + LINFLEXD_UARTSR_PE)) { + if (status & LINFLEXD_UARTSR_BOF) + sport->icount.overrun++; + if (status & LINFLEXD_UARTSR_FEF) { + if (!rx) { + brk = true; + sport->icount.brk++; + } else + sport->icount.frame++; + } + if (status & LINFLEXD_UARTSR_PE) + sport->icount.parity++; + } + + writel(status, sport->membase + UARTSR); + status = readl(sport->membase + UARTSR); + + if (brk) { + uart_handle_break(sport); + } else { + if (uart_handle_sysrq_char(sport, (unsigned char)rx)) + continue; + tty_insert_flip_char(port, rx, flg); + } + } + + spin_unlock_irqrestore(&sport->lock, flags); + + tty_flip_buffer_push(port); + + return IRQ_HANDLED; +} + +static irqreturn_t linflex_int(int irq, void *dev_id) +{ + struct uart_port *sport = dev_id; + unsigned long status; + + status = readl(sport->membase + UARTSR); + + if (status & LINFLEXD_UARTSR_DRFRFE) + linflex_rxint(irq, dev_id); + if (status & LINFLEXD_UARTSR_DTFTFF) + linflex_txint(irq, dev_id); + + return IRQ_HANDLED; +} + +/* return TIOCSER_TEMT when transmitter is not busy */ +static unsigned int linflex_tx_empty(struct uart_port *port) +{ + unsigned long status; + + status = readl(port->membase + UARTSR) & LINFLEXD_UARTSR_DTFTFF; + + return status ? TIOCSER_TEMT : 0; +} + +static unsigned int linflex_get_mctrl(struct uart_port *port) +{ + return 0; +} + +static void linflex_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static void linflex_break_ctl(struct uart_port *port, int break_state) +{ +} + +static void linflex_setup_watermark(struct uart_port *sport) +{ + unsigned long cr, ier, cr1; + + /* Disable transmission/reception */ + ier = readl(sport->membase + LINIER); + ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE); + writel(ier, sport->membase + LINIER); + + cr = readl(sport->membase + UARTCR); + cr &= ~(LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN); + writel(cr, sport->membase + UARTCR); + + /* Enter initialization mode by setting INIT bit */ + + /* set the Linflex in master mode and activate by-pass filter */ + cr1 = LINFLEXD_LINCR1_BF | LINFLEXD_LINCR1_MME + | LINFLEXD_LINCR1_INIT; + writel(cr1, sport->membase + LINCR1); + + /* wait for init mode entry */ + while ((readl(sport->membase + LINSR) + & LINFLEXD_LINSR_LINS_MASK) + != LINFLEXD_LINSR_LINS_INITMODE) + ; + + /* + * UART = 0x1; - Linflex working in UART mode + * TXEN = 0x1; - Enable transmission of data now + * RXEn = 0x1; - Receiver enabled + * WL0 = 0x1; - 8 bit data + * PCE = 0x0; - No parity + */ + + /* set UART bit to allow writing other bits */ + writel(LINFLEXD_UARTCR_UART, sport->membase + UARTCR); + + cr = (LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN | + LINFLEXD_UARTCR_WL0 | LINFLEXD_UARTCR_UART); + + writel(cr, sport->membase + UARTCR); + + cr1 &= ~(LINFLEXD_LINCR1_INIT); + + writel(cr1, sport->membase + LINCR1); + + ier = readl(sport->membase + LINIER); + ier |= LINFLEXD_LINIER_DRIE; + ier |= LINFLEXD_LINIER_DTIE; + + writel(ier, sport->membase + LINIER); +} + +static int linflex_startup(struct uart_port *port) +{ + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + linflex_setup_watermark(port); + + spin_unlock_irqrestore(&port->lock, flags); + + ret = devm_request_irq(port->dev, port->irq, linflex_int, 0, + DRIVER_NAME, port); + + return ret; +} + +static void linflex_shutdown(struct uart_port *port) +{ + unsigned long ier; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* disable interrupts */ + ier = readl(port->membase + LINIER); + ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE); + writel(ier, port->membase + LINIER); + + spin_unlock_irqrestore(&port->lock, flags); + + devm_free_irq(port->dev, port->irq, port); +} + +static void +linflex_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned long flags; + unsigned long cr, old_cr, cr1; + unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; + + cr = readl(port->membase + UARTCR); + old_cr = cr; + + /* Enter initialization mode by setting INIT bit */ + cr1 = readl(port->membase + LINCR1); + cr1 |= LINFLEXD_LINCR1_INIT; + writel(cr1, port->membase + LINCR1); + + /* wait for init mode entry */ + while ((readl(port->membase + LINSR) + & LINFLEXD_LINSR_LINS_MASK) + != LINFLEXD_LINSR_LINS_INITMODE) + ; + + /* + * only support CS8 and CS7, and for CS7 must enable PE. + * supported mode: + * - (7,e/o,1) + * - (8,n,1) + * - (8,e/o,1) + */ + /* enter the UART into configuration mode */ + + while ((termios->c_cflag & CSIZE) != CS8 && + (termios->c_cflag & CSIZE) != CS7) { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= old_csize; + old_csize = CS8; + } + + if ((termios->c_cflag & CSIZE) == CS7) { + /* Word length: WL1WL0:00 */ + cr = old_cr & ~LINFLEXD_UARTCR_WL1 & ~LINFLEXD_UARTCR_WL0; + } + + if ((termios->c_cflag & CSIZE) == CS8) { + /* Word length: WL1WL0:01 */ + cr = (old_cr | LINFLEXD_UARTCR_WL0) & ~LINFLEXD_UARTCR_WL1; + } + + if (termios->c_cflag & CMSPAR) { + if ((termios->c_cflag & CSIZE) != CS8) { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + } + /* has a space/sticky bit */ + cr |= LINFLEXD_UARTCR_WL0; + } + + if (termios->c_cflag & CSTOPB) + termios->c_cflag &= ~CSTOPB; + + /* parity must be enabled when CS7 to match 8-bits format */ + if ((termios->c_cflag & CSIZE) == CS7) + termios->c_cflag |= PARENB; + + if ((termios->c_cflag & PARENB)) { + cr |= LINFLEXD_UARTCR_PCE; + if (termios->c_cflag & PARODD) + cr = (cr | LINFLEXD_UARTCR_PC0) & + (~LINFLEXD_UARTCR_PC1); + else + cr = cr & (~LINFLEXD_UARTCR_PC1 & + ~LINFLEXD_UARTCR_PC0); + } else { + cr &= ~LINFLEXD_UARTCR_PCE; + } + + spin_lock_irqsave(&port->lock, flags); + + port->read_status_mask = 0; + + if (termios->c_iflag & INPCK) + port->read_status_mask |= (LINFLEXD_UARTSR_FEF | + LINFLEXD_UARTSR_PE0 | + LINFLEXD_UARTSR_PE1 | + LINFLEXD_UARTSR_PE2 | + LINFLEXD_UARTSR_PE3); + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= LINFLEXD_UARTSR_FEF; + + /* characters to ignore */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= LINFLEXD_UARTSR_PE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= LINFLEXD_UARTSR_PE; + /* + * if we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= LINFLEXD_UARTSR_BOF; + } + + writel(cr, port->membase + UARTCR); + + cr1 &= ~(LINFLEXD_LINCR1_INIT); + + writel(cr1, port->membase + LINCR1); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *linflex_type(struct uart_port *port) +{ + return "FSL_LINFLEX"; +} + +static void linflex_release_port(struct uart_port *port) +{ + /* nothing to do */ +} + +static int linflex_request_port(struct uart_port *port) +{ + return 0; +} + +/* configure/auto-configure the port */ +static void linflex_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_LINFLEXUART; +} + +static const struct uart_ops linflex_pops = { + .tx_empty = linflex_tx_empty, + .set_mctrl = linflex_set_mctrl, + .get_mctrl = linflex_get_mctrl, + .stop_tx = linflex_stop_tx, + .start_tx = linflex_start_tx, + .stop_rx = linflex_stop_rx, + .break_ctl = linflex_break_ctl, + .startup = linflex_startup, + .shutdown = linflex_shutdown, + .set_termios = linflex_set_termios, + .type = linflex_type, + .request_port = linflex_request_port, + .release_port = linflex_release_port, + .config_port = linflex_config_port, +}; + +static struct uart_port *linflex_ports[UART_NR]; + +#ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE +static void linflex_console_putchar(struct uart_port *port, unsigned char ch) +{ + unsigned long cr; + + cr = readl(port->membase + UARTCR); + + writeb(ch, port->membase + BDRL); + + if (!(cr & LINFLEXD_UARTCR_TFBM)) + while ((readl(port->membase + UARTSR) & + LINFLEXD_UARTSR_DTFTFF) + != LINFLEXD_UARTSR_DTFTFF) + ; + else + while (readl(port->membase + UARTSR) & + LINFLEXD_UARTSR_DTFTFF) + ; + + if (!(cr & LINFLEXD_UARTCR_TFBM)) { + writel((readl(port->membase + UARTSR) | + LINFLEXD_UARTSR_DTFTFF), + port->membase + UARTSR); + } +} + +static void linflex_earlycon_putchar(struct uart_port *port, unsigned char ch) +{ + unsigned long flags; + char *ret; + + if (!linflex_earlycon_same_instance) { + linflex_console_putchar(port, ch); + return; + } + + spin_lock_irqsave(&init_lock, flags); + if (!during_init) + goto outside_init; + + if (earlycon_buf.len >= 1 << CONFIG_LOG_BUF_SHIFT) + goto init_release; + + if (!earlycon_buf.cap) { + earlycon_buf.content = kmalloc(EARLYCON_BUFFER_INITIAL_CAP, + GFP_ATOMIC); + earlycon_buf.cap = earlycon_buf.content ? + EARLYCON_BUFFER_INITIAL_CAP : 0; + } else if (earlycon_buf.len == earlycon_buf.cap) { + ret = krealloc(earlycon_buf.content, earlycon_buf.cap << 1, + GFP_ATOMIC); + if (ret) { + earlycon_buf.content = ret; + earlycon_buf.cap <<= 1; + } + } + + if (earlycon_buf.len < earlycon_buf.cap) + earlycon_buf.content[earlycon_buf.len++] = ch; + + goto init_release; + +outside_init: + linflex_console_putchar(port, ch); +init_release: + spin_unlock_irqrestore(&init_lock, flags); +} + +static void linflex_string_write(struct uart_port *sport, const char *s, + unsigned int count) +{ + unsigned long cr, ier = 0; + + ier = readl(sport->membase + LINIER); + linflex_stop_tx(sport); + + cr = readl(sport->membase + UARTCR); + cr |= (LINFLEXD_UARTCR_TXEN); + writel(cr, sport->membase + UARTCR); + + uart_console_write(sport, s, count, linflex_console_putchar); + + writel(ier, sport->membase + LINIER); +} + +static void +linflex_console_write(struct console *co, const char *s, unsigned int count) +{ + struct uart_port *sport = linflex_ports[co->index]; + unsigned long flags; + int locked = 1; + + if (sport->sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock_irqsave(&sport->lock, flags); + else + spin_lock_irqsave(&sport->lock, flags); + + linflex_string_write(sport, s, count); + + if (locked) + spin_unlock_irqrestore(&sport->lock, flags); +} + +/* + * if the port was already initialised (eg, by a boot loader), + * try to determine the current setup. + */ +static void __init +linflex_console_get_options(struct uart_port *sport, int *parity, int *bits) +{ + unsigned long cr; + + cr = readl(sport->membase + UARTCR); + cr &= LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN; + + if (!cr) + return; + + /* ok, the port was enabled */ + + *parity = 'n'; + if (cr & LINFLEXD_UARTCR_PCE) { + if (cr & LINFLEXD_UARTCR_PC0) + *parity = 'o'; + else + *parity = 'e'; + } + + if ((cr & LINFLEXD_UARTCR_WL0) && ((cr & LINFLEXD_UARTCR_WL1) == 0)) { + if (cr & LINFLEXD_UARTCR_PCE) + *bits = 9; + else + *bits = 8; + } +} + +static int __init linflex_console_setup(struct console *co, char *options) +{ + struct uart_port *sport; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + int i; + unsigned long flags; + /* + * check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index == -1 || co->index >= ARRAY_SIZE(linflex_ports)) + co->index = 0; + + sport = linflex_ports[co->index]; + if (!sport) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + linflex_console_get_options(sport, &parity, &bits); + + if (earlycon_port && sport->mapbase == earlycon_port->mapbase) { + linflex_earlycon_same_instance = true; + + spin_lock_irqsave(&init_lock, flags); + during_init = true; + spin_unlock_irqrestore(&init_lock, flags); + + /* Workaround for character loss or output of many invalid + * characters, when INIT mode is entered shortly after a + * character has just been printed. + */ + udelay(PREINIT_DELAY); + } + + linflex_setup_watermark(sport); + + ret = uart_set_options(sport, co, baud, parity, bits, flow); + + if (!linflex_earlycon_same_instance) + goto done; + + spin_lock_irqsave(&init_lock, flags); + + /* Emptying buffer */ + if (earlycon_buf.len) { + for (i = 0; i < earlycon_buf.len; i++) + linflex_console_putchar(earlycon_port, + earlycon_buf.content[i]); + + kfree(earlycon_buf.content); + earlycon_buf.len = 0; + } + + during_init = false; + spin_unlock_irqrestore(&init_lock, flags); + +done: + return ret; +} + +static struct uart_driver linflex_reg; +static struct console linflex_console = { + .name = DEV_NAME, + .write = linflex_console_write, + .device = uart_console_device, + .setup = linflex_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &linflex_reg, +}; + +static void linflex_earlycon_write(struct console *con, const char *s, + unsigned int n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, linflex_earlycon_putchar); +} + +static int __init linflex_early_console_setup(struct earlycon_device *device, + const char *options) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = linflex_earlycon_write; + earlycon_port = &device->port; + + return 0; +} + +OF_EARLYCON_DECLARE(linflex, "fsl,s32v234-linflexuart", + linflex_early_console_setup); + +#define LINFLEX_CONSOLE (&linflex_console) +#else +#define LINFLEX_CONSOLE NULL +#endif + +static struct uart_driver linflex_reg = { + .owner = THIS_MODULE, + .driver_name = DRIVER_NAME, + .dev_name = DEV_NAME, + .nr = ARRAY_SIZE(linflex_ports), + .cons = LINFLEX_CONSOLE, +}; + +static int linflex_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct uart_port *sport; + struct resource *res; + int ret; + + sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); + if (!sport) + return -ENOMEM; + + ret = of_alias_get_id(np, "serial"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); + return ret; + } + if (ret >= UART_NR) { + dev_err(&pdev->dev, "driver limited to %d serial ports\n", + UART_NR); + return -ENOMEM; + } + + sport->line = ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + sport->mapbase = res->start; + sport->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(sport->membase)) + return PTR_ERR(sport->membase); + + sport->dev = &pdev->dev; + sport->type = PORT_LINFLEXUART; + sport->iotype = UPIO_MEM; + sport->irq = platform_get_irq(pdev, 0); + sport->ops = &linflex_pops; + sport->flags = UPF_BOOT_AUTOCONF; + sport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE); + + linflex_ports[sport->line] = sport; + + platform_set_drvdata(pdev, sport); + + return uart_add_one_port(&linflex_reg, sport); +} + +static int linflex_remove(struct platform_device *pdev) +{ + struct uart_port *sport = platform_get_drvdata(pdev); + + uart_remove_one_port(&linflex_reg, sport); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int linflex_suspend(struct device *dev) +{ + struct uart_port *sport = dev_get_drvdata(dev); + + uart_suspend_port(&linflex_reg, sport); + + return 0; +} + +static int linflex_resume(struct device *dev) +{ + struct uart_port *sport = dev_get_drvdata(dev); + + uart_resume_port(&linflex_reg, sport); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(linflex_pm_ops, linflex_suspend, linflex_resume); + +static struct platform_driver linflex_driver = { + .probe = linflex_probe, + .remove = linflex_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = linflex_dt_ids, + .pm = &linflex_pm_ops, + }, +}; + +static int __init linflex_serial_init(void) +{ + int ret; + + ret = uart_register_driver(&linflex_reg); + if (ret) + return ret; + + ret = platform_driver_register(&linflex_driver); + if (ret) + uart_unregister_driver(&linflex_reg); + + return ret; +} + +static void __exit linflex_serial_exit(void) +{ + platform_driver_unregister(&linflex_driver); + uart_unregister_driver(&linflex_reg); +} + +module_init(linflex_serial_init); +module_exit(linflex_serial_exit); + +MODULE_DESCRIPTION("Freescale LINFlexD serial port driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c new file mode 100644 index 000000000..c5a9b89c4 --- /dev/null +++ b/drivers/tty/serial/fsl_lpuart.c @@ -0,0 +1,2961 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Freescale lpuart serial port driver + * + * Copyright 2012-2014 Freescale Semiconductor, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* All registers are 8-bit width */ +#define UARTBDH 0x00 +#define UARTBDL 0x01 +#define UARTCR1 0x02 +#define UARTCR2 0x03 +#define UARTSR1 0x04 +#define UARTCR3 0x06 +#define UARTDR 0x07 +#define UARTCR4 0x0a +#define UARTCR5 0x0b +#define UARTMODEM 0x0d +#define UARTPFIFO 0x10 +#define UARTCFIFO 0x11 +#define UARTSFIFO 0x12 +#define UARTTWFIFO 0x13 +#define UARTTCFIFO 0x14 +#define UARTRWFIFO 0x15 + +#define UARTBDH_LBKDIE 0x80 +#define UARTBDH_RXEDGIE 0x40 +#define UARTBDH_SBR_MASK 0x1f + +#define UARTCR1_LOOPS 0x80 +#define UARTCR1_RSRC 0x20 +#define UARTCR1_M 0x10 +#define UARTCR1_WAKE 0x08 +#define UARTCR1_ILT 0x04 +#define UARTCR1_PE 0x02 +#define UARTCR1_PT 0x01 + +#define UARTCR2_TIE 0x80 +#define UARTCR2_TCIE 0x40 +#define UARTCR2_RIE 0x20 +#define UARTCR2_ILIE 0x10 +#define UARTCR2_TE 0x08 +#define UARTCR2_RE 0x04 +#define UARTCR2_RWU 0x02 +#define UARTCR2_SBK 0x01 + +#define UARTSR1_TDRE 0x80 +#define UARTSR1_TC 0x40 +#define UARTSR1_RDRF 0x20 +#define UARTSR1_IDLE 0x10 +#define UARTSR1_OR 0x08 +#define UARTSR1_NF 0x04 +#define UARTSR1_FE 0x02 +#define UARTSR1_PE 0x01 + +#define UARTCR3_R8 0x80 +#define UARTCR3_T8 0x40 +#define UARTCR3_TXDIR 0x20 +#define UARTCR3_TXINV 0x10 +#define UARTCR3_ORIE 0x08 +#define UARTCR3_NEIE 0x04 +#define UARTCR3_FEIE 0x02 +#define UARTCR3_PEIE 0x01 + +#define UARTCR4_MAEN1 0x80 +#define UARTCR4_MAEN2 0x40 +#define UARTCR4_M10 0x20 +#define UARTCR4_BRFA_MASK 0x1f +#define UARTCR4_BRFA_OFF 0 + +#define UARTCR5_TDMAS 0x80 +#define UARTCR5_RDMAS 0x20 + +#define UARTMODEM_RXRTSE 0x08 +#define UARTMODEM_TXRTSPOL 0x04 +#define UARTMODEM_TXRTSE 0x02 +#define UARTMODEM_TXCTSE 0x01 + +#define UARTPFIFO_TXFE 0x80 +#define UARTPFIFO_FIFOSIZE_MASK 0x7 +#define UARTPFIFO_TXSIZE_OFF 4 +#define UARTPFIFO_RXFE 0x08 +#define UARTPFIFO_RXSIZE_OFF 0 + +#define UARTCFIFO_TXFLUSH 0x80 +#define UARTCFIFO_RXFLUSH 0x40 +#define UARTCFIFO_RXOFE 0x04 +#define UARTCFIFO_TXOFE 0x02 +#define UARTCFIFO_RXUFE 0x01 + +#define UARTSFIFO_TXEMPT 0x80 +#define UARTSFIFO_RXEMPT 0x40 +#define UARTSFIFO_RXOF 0x04 +#define UARTSFIFO_TXOF 0x02 +#define UARTSFIFO_RXUF 0x01 + +/* 32-bit global registers only for i.MX7ULP/i.MX8x + * Used to reset all internal logic and registers, except the Global Register. + */ +#define UART_GLOBAL 0x8 + +/* 32-bit register definition */ +#define UARTBAUD 0x00 +#define UARTSTAT 0x04 +#define UARTCTRL 0x08 +#define UARTDATA 0x0C +#define UARTMATCH 0x10 +#define UARTMODIR 0x14 +#define UARTFIFO 0x18 +#define UARTWATER 0x1c + +#define UARTBAUD_MAEN1 0x80000000 +#define UARTBAUD_MAEN2 0x40000000 +#define UARTBAUD_M10 0x20000000 +#define UARTBAUD_TDMAE 0x00800000 +#define UARTBAUD_RDMAE 0x00200000 +#define UARTBAUD_MATCFG 0x00400000 +#define UARTBAUD_BOTHEDGE 0x00020000 +#define UARTBAUD_RESYNCDIS 0x00010000 +#define UARTBAUD_LBKDIE 0x00008000 +#define UARTBAUD_RXEDGIE 0x00004000 +#define UARTBAUD_SBNS 0x00002000 +#define UARTBAUD_SBR 0x00000000 +#define UARTBAUD_SBR_MASK 0x1fff +#define UARTBAUD_OSR_MASK 0x1f +#define UARTBAUD_OSR_SHIFT 24 + +#define UARTSTAT_LBKDIF 0x80000000 +#define UARTSTAT_RXEDGIF 0x40000000 +#define UARTSTAT_MSBF 0x20000000 +#define UARTSTAT_RXINV 0x10000000 +#define UARTSTAT_RWUID 0x08000000 +#define UARTSTAT_BRK13 0x04000000 +#define UARTSTAT_LBKDE 0x02000000 +#define UARTSTAT_RAF 0x01000000 +#define UARTSTAT_TDRE 0x00800000 +#define UARTSTAT_TC 0x00400000 +#define UARTSTAT_RDRF 0x00200000 +#define UARTSTAT_IDLE 0x00100000 +#define UARTSTAT_OR 0x00080000 +#define UARTSTAT_NF 0x00040000 +#define UARTSTAT_FE 0x00020000 +#define UARTSTAT_PE 0x00010000 +#define UARTSTAT_MA1F 0x00008000 +#define UARTSTAT_M21F 0x00004000 + +#define UARTCTRL_R8T9 0x80000000 +#define UARTCTRL_R9T8 0x40000000 +#define UARTCTRL_TXDIR 0x20000000 +#define UARTCTRL_TXINV 0x10000000 +#define UARTCTRL_ORIE 0x08000000 +#define UARTCTRL_NEIE 0x04000000 +#define UARTCTRL_FEIE 0x02000000 +#define UARTCTRL_PEIE 0x01000000 +#define UARTCTRL_TIE 0x00800000 +#define UARTCTRL_TCIE 0x00400000 +#define UARTCTRL_RIE 0x00200000 +#define UARTCTRL_ILIE 0x00100000 +#define UARTCTRL_TE 0x00080000 +#define UARTCTRL_RE 0x00040000 +#define UARTCTRL_RWU 0x00020000 +#define UARTCTRL_SBK 0x00010000 +#define UARTCTRL_MA1IE 0x00008000 +#define UARTCTRL_MA2IE 0x00004000 +#define UARTCTRL_IDLECFG 0x00000100 +#define UARTCTRL_LOOPS 0x00000080 +#define UARTCTRL_DOZEEN 0x00000040 +#define UARTCTRL_RSRC 0x00000020 +#define UARTCTRL_M 0x00000010 +#define UARTCTRL_WAKE 0x00000008 +#define UARTCTRL_ILT 0x00000004 +#define UARTCTRL_PE 0x00000002 +#define UARTCTRL_PT 0x00000001 + +#define UARTDATA_NOISY 0x00008000 +#define UARTDATA_PARITYE 0x00004000 +#define UARTDATA_FRETSC 0x00002000 +#define UARTDATA_RXEMPT 0x00001000 +#define UARTDATA_IDLINE 0x00000800 +#define UARTDATA_MASK 0x3ff + +#define UARTMODIR_IREN 0x00020000 +#define UARTMODIR_TXCTSSRC 0x00000020 +#define UARTMODIR_TXCTSC 0x00000010 +#define UARTMODIR_RXRTSE 0x00000008 +#define UARTMODIR_TXRTSPOL 0x00000004 +#define UARTMODIR_TXRTSE 0x00000002 +#define UARTMODIR_TXCTSE 0x00000001 + +#define UARTFIFO_TXEMPT 0x00800000 +#define UARTFIFO_RXEMPT 0x00400000 +#define UARTFIFO_TXOF 0x00020000 +#define UARTFIFO_RXUF 0x00010000 +#define UARTFIFO_TXFLUSH 0x00008000 +#define UARTFIFO_RXFLUSH 0x00004000 +#define UARTFIFO_TXOFE 0x00000200 +#define UARTFIFO_RXUFE 0x00000100 +#define UARTFIFO_TXFE 0x00000080 +#define UARTFIFO_FIFOSIZE_MASK 0x7 +#define UARTFIFO_TXSIZE_OFF 4 +#define UARTFIFO_RXFE 0x00000008 +#define UARTFIFO_RXSIZE_OFF 0 +#define UARTFIFO_DEPTH(x) (0x1 << ((x) ? ((x) + 1) : 0)) + +#define UARTWATER_COUNT_MASK 0xff +#define UARTWATER_TXCNT_OFF 8 +#define UARTWATER_RXCNT_OFF 24 +#define UARTWATER_WATER_MASK 0xff +#define UARTWATER_TXWATER_OFF 0 +#define UARTWATER_RXWATER_OFF 16 + +#define UART_GLOBAL_RST 0x2 +#define GLOBAL_RST_MIN_US 20 +#define GLOBAL_RST_MAX_US 40 + +/* Rx DMA timeout in ms, which is used to calculate Rx ring buffer size */ +#define DMA_RX_TIMEOUT (10) + +#define DRIVER_NAME "fsl-lpuart" +#define DEV_NAME "ttyLP" +#define UART_NR 6 + +/* IMX lpuart has four extra unused regs located at the beginning */ +#define IMX_REG_OFF 0x10 + +enum lpuart_type { + VF610_LPUART, + LS1021A_LPUART, + LS1028A_LPUART, + IMX7ULP_LPUART, + IMX8QXP_LPUART, + IMXRT1050_LPUART, +}; + +struct lpuart_port { + struct uart_port port; + enum lpuart_type devtype; + struct clk *ipg_clk; + struct clk *baud_clk; + unsigned int txfifo_size; + unsigned int rxfifo_size; + + u8 rx_watermark; + bool lpuart_dma_tx_use; + bool lpuart_dma_rx_use; + struct dma_chan *dma_tx_chan; + struct dma_chan *dma_rx_chan; + struct dma_async_tx_descriptor *dma_tx_desc; + struct dma_async_tx_descriptor *dma_rx_desc; + dma_cookie_t dma_tx_cookie; + dma_cookie_t dma_rx_cookie; + unsigned int dma_tx_bytes; + unsigned int dma_rx_bytes; + bool dma_tx_in_progress; + unsigned int dma_rx_timeout; + struct timer_list lpuart_timer; + struct scatterlist rx_sgl, tx_sgl[2]; + struct circ_buf rx_ring; + int rx_dma_rng_buf_len; + unsigned int dma_tx_nents; + wait_queue_head_t dma_wait; + bool is_cs7; /* Set to true when character size is 7 */ + /* and the parity is enabled */ +}; + +struct lpuart_soc_data { + enum lpuart_type devtype; + char iotype; + u8 reg_off; + u8 rx_watermark; +}; + +static const struct lpuart_soc_data vf_data = { + .devtype = VF610_LPUART, + .iotype = UPIO_MEM, + .rx_watermark = 1, +}; + +static const struct lpuart_soc_data ls1021a_data = { + .devtype = LS1021A_LPUART, + .iotype = UPIO_MEM32BE, + .rx_watermark = 1, +}; + +static const struct lpuart_soc_data ls1028a_data = { + .devtype = LS1028A_LPUART, + .iotype = UPIO_MEM32, + .rx_watermark = 0, +}; + +static struct lpuart_soc_data imx7ulp_data = { + .devtype = IMX7ULP_LPUART, + .iotype = UPIO_MEM32, + .reg_off = IMX_REG_OFF, + .rx_watermark = 1, +}; + +static struct lpuart_soc_data imx8qxp_data = { + .devtype = IMX8QXP_LPUART, + .iotype = UPIO_MEM32, + .reg_off = IMX_REG_OFF, + .rx_watermark = 1, +}; +static struct lpuart_soc_data imxrt1050_data = { + .devtype = IMXRT1050_LPUART, + .iotype = UPIO_MEM32, + .reg_off = IMX_REG_OFF, + .rx_watermark = 1, +}; + +static const struct of_device_id lpuart_dt_ids[] = { + { .compatible = "fsl,vf610-lpuart", .data = &vf_data, }, + { .compatible = "fsl,ls1021a-lpuart", .data = &ls1021a_data, }, + { .compatible = "fsl,ls1028a-lpuart", .data = &ls1028a_data, }, + { .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, }, + { .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, }, + { .compatible = "fsl,imxrt1050-lpuart", .data = &imxrt1050_data}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, lpuart_dt_ids); + +/* Forward declare this for the dma callbacks*/ +static void lpuart_dma_tx_complete(void *arg); + +static inline bool is_layerscape_lpuart(struct lpuart_port *sport) +{ + return (sport->devtype == LS1021A_LPUART || + sport->devtype == LS1028A_LPUART); +} + +static inline bool is_imx7ulp_lpuart(struct lpuart_port *sport) +{ + return sport->devtype == IMX7ULP_LPUART; +} + +static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport) +{ + return sport->devtype == IMX8QXP_LPUART; +} + +static inline u32 lpuart32_read(struct uart_port *port, u32 off) +{ + switch (port->iotype) { + case UPIO_MEM32: + return readl(port->membase + off); + case UPIO_MEM32BE: + return ioread32be(port->membase + off); + default: + return 0; + } +} + +static inline void lpuart32_write(struct uart_port *port, u32 val, + u32 off) +{ + switch (port->iotype) { + case UPIO_MEM32: + writel(val, port->membase + off); + break; + case UPIO_MEM32BE: + iowrite32be(val, port->membase + off); + break; + } +} + +static int __lpuart_enable_clks(struct lpuart_port *sport, bool is_en) +{ + int ret = 0; + + if (is_en) { + ret = clk_prepare_enable(sport->ipg_clk); + if (ret) + return ret; + + ret = clk_prepare_enable(sport->baud_clk); + if (ret) { + clk_disable_unprepare(sport->ipg_clk); + return ret; + } + } else { + clk_disable_unprepare(sport->baud_clk); + clk_disable_unprepare(sport->ipg_clk); + } + + return 0; +} + +static unsigned int lpuart_get_baud_clk_rate(struct lpuart_port *sport) +{ + if (is_imx8qxp_lpuart(sport)) + return clk_get_rate(sport->baud_clk); + + return clk_get_rate(sport->ipg_clk); +} + +#define lpuart_enable_clks(x) __lpuart_enable_clks(x, true) +#define lpuart_disable_clks(x) __lpuart_enable_clks(x, false) + +static void lpuart_stop_tx(struct uart_port *port) +{ + unsigned char temp; + + temp = readb(port->membase + UARTCR2); + temp &= ~(UARTCR2_TIE | UARTCR2_TCIE); + writeb(temp, port->membase + UARTCR2); +} + +static void lpuart32_stop_tx(struct uart_port *port) +{ + unsigned long temp; + + temp = lpuart32_read(port, UARTCTRL); + temp &= ~(UARTCTRL_TIE | UARTCTRL_TCIE); + lpuart32_write(port, temp, UARTCTRL); +} + +static void lpuart_stop_rx(struct uart_port *port) +{ + unsigned char temp; + + temp = readb(port->membase + UARTCR2); + writeb(temp & ~UARTCR2_RE, port->membase + UARTCR2); +} + +static void lpuart32_stop_rx(struct uart_port *port) +{ + unsigned long temp; + + temp = lpuart32_read(port, UARTCTRL); + lpuart32_write(port, temp & ~UARTCTRL_RE, UARTCTRL); +} + +static void lpuart_dma_tx(struct lpuart_port *sport) +{ + struct circ_buf *xmit = &sport->port.state->xmit; + struct scatterlist *sgl = sport->tx_sgl; + struct device *dev = sport->port.dev; + struct dma_chan *chan = sport->dma_tx_chan; + int ret; + + if (sport->dma_tx_in_progress) + return; + + sport->dma_tx_bytes = uart_circ_chars_pending(xmit); + + if (xmit->tail < xmit->head || xmit->head == 0) { + sport->dma_tx_nents = 1; + sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes); + } else { + sport->dma_tx_nents = 2; + sg_init_table(sgl, 2); + sg_set_buf(sgl, xmit->buf + xmit->tail, + UART_XMIT_SIZE - xmit->tail); + sg_set_buf(sgl + 1, xmit->buf, xmit->head); + } + + ret = dma_map_sg(chan->device->dev, sgl, sport->dma_tx_nents, + DMA_TO_DEVICE); + if (!ret) { + dev_err(dev, "DMA mapping error for TX.\n"); + return; + } + + sport->dma_tx_desc = dmaengine_prep_slave_sg(chan, sgl, + ret, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT); + if (!sport->dma_tx_desc) { + dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents, + DMA_TO_DEVICE); + dev_err(dev, "Cannot prepare TX slave DMA!\n"); + return; + } + + sport->dma_tx_desc->callback = lpuart_dma_tx_complete; + sport->dma_tx_desc->callback_param = sport; + sport->dma_tx_in_progress = true; + sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc); + dma_async_issue_pending(chan); +} + +static bool lpuart_stopped_or_empty(struct uart_port *port) +{ + return uart_circ_empty(&port->state->xmit) || uart_tx_stopped(port); +} + +static void lpuart_dma_tx_complete(void *arg) +{ + struct lpuart_port *sport = arg; + struct scatterlist *sgl = &sport->tx_sgl[0]; + struct circ_buf *xmit = &sport->port.state->xmit; + struct dma_chan *chan = sport->dma_tx_chan; + unsigned long flags; + + spin_lock_irqsave(&sport->port.lock, flags); + if (!sport->dma_tx_in_progress) { + spin_unlock_irqrestore(&sport->port.lock, flags); + return; + } + + dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents, + DMA_TO_DEVICE); + + xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1); + + sport->port.icount.tx += sport->dma_tx_bytes; + sport->dma_tx_in_progress = false; + spin_unlock_irqrestore(&sport->port.lock, flags); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&sport->port); + + if (waitqueue_active(&sport->dma_wait)) { + wake_up(&sport->dma_wait); + return; + } + + spin_lock_irqsave(&sport->port.lock, flags); + + if (!lpuart_stopped_or_empty(&sport->port)) + lpuart_dma_tx(sport); + + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport) +{ + switch (sport->port.iotype) { + case UPIO_MEM32: + return sport->port.mapbase + UARTDATA; + case UPIO_MEM32BE: + return sport->port.mapbase + UARTDATA + sizeof(u32) - 1; + } + return sport->port.mapbase + UARTDR; +} + +static int lpuart_dma_tx_request(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, + struct lpuart_port, port); + struct dma_slave_config dma_tx_sconfig = {}; + int ret; + + dma_tx_sconfig.dst_addr = lpuart_dma_datareg_addr(sport); + dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_tx_sconfig.dst_maxburst = 1; + dma_tx_sconfig.direction = DMA_MEM_TO_DEV; + ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig); + + if (ret) { + dev_err(sport->port.dev, + "DMA slave config failed, err = %d\n", ret); + return ret; + } + + return 0; +} + +static bool lpuart_is_32(struct lpuart_port *sport) +{ + return sport->port.iotype == UPIO_MEM32 || + sport->port.iotype == UPIO_MEM32BE; +} + +static void lpuart_flush_buffer(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, struct lpuart_port, port); + struct dma_chan *chan = sport->dma_tx_chan; + u32 val; + + if (sport->lpuart_dma_tx_use) { + if (sport->dma_tx_in_progress) { + dma_unmap_sg(chan->device->dev, &sport->tx_sgl[0], + sport->dma_tx_nents, DMA_TO_DEVICE); + sport->dma_tx_in_progress = false; + } + dmaengine_terminate_async(chan); + } + + if (lpuart_is_32(sport)) { + val = lpuart32_read(&sport->port, UARTFIFO); + val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH; + lpuart32_write(&sport->port, val, UARTFIFO); + } else { + val = readb(sport->port.membase + UARTCFIFO); + val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH; + writeb(val, sport->port.membase + UARTCFIFO); + } +} + +static void lpuart_wait_bit_set(struct uart_port *port, unsigned int offset, + u8 bit) +{ + while (!(readb(port->membase + offset) & bit)) + cpu_relax(); +} + +static void lpuart32_wait_bit_set(struct uart_port *port, unsigned int offset, + u32 bit) +{ + while (!(lpuart32_read(port, offset) & bit)) + cpu_relax(); +} + +#if defined(CONFIG_CONSOLE_POLL) + +static int lpuart_poll_init(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, + struct lpuart_port, port); + unsigned long flags; + unsigned char temp; + + sport->port.fifosize = 0; + + spin_lock_irqsave(&sport->port.lock, flags); + /* Disable Rx & Tx */ + writeb(0, sport->port.membase + UARTCR2); + + temp = readb(sport->port.membase + UARTPFIFO); + /* Enable Rx and Tx FIFO */ + writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE, + sport->port.membase + UARTPFIFO); + + /* flush Tx and Rx FIFO */ + writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, + sport->port.membase + UARTCFIFO); + + /* explicitly clear RDRF */ + if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) { + readb(sport->port.membase + UARTDR); + writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO); + } + + writeb(0, sport->port.membase + UARTTWFIFO); + writeb(1, sport->port.membase + UARTRWFIFO); + + /* Enable Rx and Tx */ + writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2); + spin_unlock_irqrestore(&sport->port.lock, flags); + + return 0; +} + +static void lpuart_poll_put_char(struct uart_port *port, unsigned char c) +{ + /* drain */ + lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TDRE); + writeb(c, port->membase + UARTDR); +} + +static int lpuart_poll_get_char(struct uart_port *port) +{ + if (!(readb(port->membase + UARTSR1) & UARTSR1_RDRF)) + return NO_POLL_CHAR; + + return readb(port->membase + UARTDR); +} + +static int lpuart32_poll_init(struct uart_port *port) +{ + unsigned long flags; + struct lpuart_port *sport = container_of(port, struct lpuart_port, port); + u32 temp; + + sport->port.fifosize = 0; + + spin_lock_irqsave(&sport->port.lock, flags); + + /* Disable Rx & Tx */ + lpuart32_write(&sport->port, 0, UARTCTRL); + + temp = lpuart32_read(&sport->port, UARTFIFO); + + /* Enable Rx and Tx FIFO */ + lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO); + + /* flush Tx and Rx FIFO */ + lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO); + + /* explicitly clear RDRF */ + if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) { + lpuart32_read(&sport->port, UARTDATA); + lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO); + } + + /* Enable Rx and Tx */ + lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL); + spin_unlock_irqrestore(&sport->port.lock, flags); + + return 0; +} + +static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c) +{ + lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE); + lpuart32_write(port, c, UARTDATA); +} + +static int lpuart32_poll_get_char(struct uart_port *port) +{ + if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF)) + return NO_POLL_CHAR; + + return lpuart32_read(port, UARTDATA); +} +#endif + +static inline void lpuart_transmit_buffer(struct lpuart_port *sport) +{ + struct circ_buf *xmit = &sport->port.state->xmit; + + if (sport->port.x_char) { + writeb(sport->port.x_char, sport->port.membase + UARTDR); + sport->port.icount.tx++; + sport->port.x_char = 0; + return; + } + + if (lpuart_stopped_or_empty(&sport->port)) { + lpuart_stop_tx(&sport->port); + return; + } + + while (!uart_circ_empty(xmit) && + (readb(sport->port.membase + UARTTCFIFO) < sport->txfifo_size)) { + writeb(xmit->buf[xmit->tail], sport->port.membase + UARTDR); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + sport->port.icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&sport->port); + + if (uart_circ_empty(xmit)) + lpuart_stop_tx(&sport->port); +} + +static inline void lpuart32_transmit_buffer(struct lpuart_port *sport) +{ + struct circ_buf *xmit = &sport->port.state->xmit; + unsigned long txcnt; + + if (sport->port.x_char) { + lpuart32_write(&sport->port, sport->port.x_char, UARTDATA); + sport->port.icount.tx++; + sport->port.x_char = 0; + return; + } + + if (lpuart_stopped_or_empty(&sport->port)) { + lpuart32_stop_tx(&sport->port); + return; + } + + txcnt = lpuart32_read(&sport->port, UARTWATER); + txcnt = txcnt >> UARTWATER_TXCNT_OFF; + txcnt &= UARTWATER_COUNT_MASK; + while (!uart_circ_empty(xmit) && (txcnt < sport->txfifo_size)) { + lpuart32_write(&sport->port, xmit->buf[xmit->tail], UARTDATA); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + sport->port.icount.tx++; + txcnt = lpuart32_read(&sport->port, UARTWATER); + txcnt = txcnt >> UARTWATER_TXCNT_OFF; + txcnt &= UARTWATER_COUNT_MASK; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&sport->port); + + if (uart_circ_empty(xmit)) + lpuart32_stop_tx(&sport->port); +} + +static void lpuart_start_tx(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, + struct lpuart_port, port); + unsigned char temp; + + temp = readb(port->membase + UARTCR2); + writeb(temp | UARTCR2_TIE, port->membase + UARTCR2); + + if (sport->lpuart_dma_tx_use) { + if (!lpuart_stopped_or_empty(port)) + lpuart_dma_tx(sport); + } else { + if (readb(port->membase + UARTSR1) & UARTSR1_TDRE) + lpuart_transmit_buffer(sport); + } +} + +static void lpuart32_start_tx(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, struct lpuart_port, port); + unsigned long temp; + + if (sport->lpuart_dma_tx_use) { + if (!lpuart_stopped_or_empty(port)) + lpuart_dma_tx(sport); + } else { + temp = lpuart32_read(port, UARTCTRL); + lpuart32_write(port, temp | UARTCTRL_TIE, UARTCTRL); + + if (lpuart32_read(port, UARTSTAT) & UARTSTAT_TDRE) + lpuart32_transmit_buffer(sport); + } +} + +/* return TIOCSER_TEMT when transmitter is not busy */ +static unsigned int lpuart_tx_empty(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, + struct lpuart_port, port); + unsigned char sr1 = readb(port->membase + UARTSR1); + unsigned char sfifo = readb(port->membase + UARTSFIFO); + + if (sport->dma_tx_in_progress) + return 0; + + if (sr1 & UARTSR1_TC && sfifo & UARTSFIFO_TXEMPT) + return TIOCSER_TEMT; + + return 0; +} + +static unsigned int lpuart32_tx_empty(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, + struct lpuart_port, port); + unsigned long stat = lpuart32_read(port, UARTSTAT); + unsigned long sfifo = lpuart32_read(port, UARTFIFO); + unsigned long ctrl = lpuart32_read(port, UARTCTRL); + + if (sport->dma_tx_in_progress) + return 0; + + /* + * LPUART Transmission Complete Flag may never be set while queuing a break + * character, so avoid checking for transmission complete when UARTCTRL_SBK + * is asserted. + */ + if ((stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT) || ctrl & UARTCTRL_SBK) + return TIOCSER_TEMT; + + return 0; +} + +static void lpuart_txint(struct lpuart_port *sport) +{ + spin_lock(&sport->port.lock); + lpuart_transmit_buffer(sport); + spin_unlock(&sport->port.lock); +} + +static void lpuart_rxint(struct lpuart_port *sport) +{ + unsigned int flg, ignored = 0, overrun = 0; + struct tty_port *port = &sport->port.state->port; + unsigned char rx, sr; + + spin_lock(&sport->port.lock); + + while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) { + flg = TTY_NORMAL; + sport->port.icount.rx++; + /* + * to clear the FE, OR, NF, FE, PE flags, + * read SR1 then read DR + */ + sr = readb(sport->port.membase + UARTSR1); + rx = readb(sport->port.membase + UARTDR); + + if (uart_prepare_sysrq_char(&sport->port, rx)) + continue; + + if (sr & (UARTSR1_PE | UARTSR1_OR | UARTSR1_FE)) { + if (sr & UARTSR1_PE) + sport->port.icount.parity++; + else if (sr & UARTSR1_FE) + sport->port.icount.frame++; + + if (sr & UARTSR1_OR) + overrun++; + + if (sr & sport->port.ignore_status_mask) { + if (++ignored > 100) + goto out; + continue; + } + + sr &= sport->port.read_status_mask; + + if (sr & UARTSR1_PE) + flg = TTY_PARITY; + else if (sr & UARTSR1_FE) + flg = TTY_FRAME; + + if (sr & UARTSR1_OR) + flg = TTY_OVERRUN; + + sport->port.sysrq = 0; + } + + if (tty_insert_flip_char(port, rx, flg) == 0) + sport->port.icount.buf_overrun++; + } + +out: + if (overrun) { + sport->port.icount.overrun += overrun; + + /* + * Overruns cause FIFO pointers to become missaligned. + * Flushing the receive FIFO reinitializes the pointers. + */ + writeb(UARTCFIFO_RXFLUSH, sport->port.membase + UARTCFIFO); + writeb(UARTSFIFO_RXOF, sport->port.membase + UARTSFIFO); + } + + uart_unlock_and_check_sysrq(&sport->port); + + tty_flip_buffer_push(port); +} + +static void lpuart32_txint(struct lpuart_port *sport) +{ + spin_lock(&sport->port.lock); + lpuart32_transmit_buffer(sport); + spin_unlock(&sport->port.lock); +} + +static void lpuart32_rxint(struct lpuart_port *sport) +{ + unsigned int flg, ignored = 0; + struct tty_port *port = &sport->port.state->port; + unsigned long rx, sr; + bool is_break; + + spin_lock(&sport->port.lock); + + while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) { + flg = TTY_NORMAL; + sport->port.icount.rx++; + /* + * to clear the FE, OR, NF, FE, PE flags, + * read STAT then read DATA reg + */ + sr = lpuart32_read(&sport->port, UARTSTAT); + rx = lpuart32_read(&sport->port, UARTDATA); + rx &= UARTDATA_MASK; + + /* + * The LPUART can't distinguish between a break and a framing error, + * thus we assume it is a break if the received data is zero. + */ + is_break = (sr & UARTSTAT_FE) && !rx; + + if (is_break && uart_handle_break(&sport->port)) + continue; + + if (uart_prepare_sysrq_char(&sport->port, rx)) + continue; + + if (sr & (UARTSTAT_PE | UARTSTAT_OR | UARTSTAT_FE)) { + if (sr & UARTSTAT_PE) { + sport->port.icount.parity++; + } else if (sr & UARTSTAT_FE) { + if (is_break) + sport->port.icount.brk++; + else + sport->port.icount.frame++; + } + + if (sr & UARTSTAT_OR) + sport->port.icount.overrun++; + + if (sr & sport->port.ignore_status_mask) { + if (++ignored > 100) + goto out; + continue; + } + + sr &= sport->port.read_status_mask; + + if (sr & UARTSTAT_PE) { + flg = TTY_PARITY; + } else if (sr & UARTSTAT_FE) { + if (is_break) + flg = TTY_BREAK; + else + flg = TTY_FRAME; + } + + if (sr & UARTSTAT_OR) + flg = TTY_OVERRUN; + } + + if (sport->is_cs7) + rx &= 0x7F; + + if (tty_insert_flip_char(port, rx, flg) == 0) + sport->port.icount.buf_overrun++; + } + +out: + uart_unlock_and_check_sysrq(&sport->port); + + tty_flip_buffer_push(port); +} + +static irqreturn_t lpuart_int(int irq, void *dev_id) +{ + struct lpuart_port *sport = dev_id; + unsigned char sts; + + sts = readb(sport->port.membase + UARTSR1); + + /* SysRq, using dma, check for linebreak by framing err. */ + if (sts & UARTSR1_FE && sport->lpuart_dma_rx_use) { + readb(sport->port.membase + UARTDR); + uart_handle_break(&sport->port); + /* linebreak produces some garbage, removing it */ + writeb(UARTCFIFO_RXFLUSH, sport->port.membase + UARTCFIFO); + return IRQ_HANDLED; + } + + if (sts & UARTSR1_RDRF && !sport->lpuart_dma_rx_use) + lpuart_rxint(sport); + + if (sts & UARTSR1_TDRE && !sport->lpuart_dma_tx_use) + lpuart_txint(sport); + + return IRQ_HANDLED; +} + +static irqreturn_t lpuart32_int(int irq, void *dev_id) +{ + struct lpuart_port *sport = dev_id; + unsigned long sts, rxcount; + + sts = lpuart32_read(&sport->port, UARTSTAT); + rxcount = lpuart32_read(&sport->port, UARTWATER); + rxcount = rxcount >> UARTWATER_RXCNT_OFF; + + if ((sts & UARTSTAT_RDRF || rxcount > 0) && !sport->lpuart_dma_rx_use) + lpuart32_rxint(sport); + + if ((sts & UARTSTAT_TDRE) && !sport->lpuart_dma_tx_use) + lpuart32_txint(sport); + + lpuart32_write(&sport->port, sts, UARTSTAT); + return IRQ_HANDLED; +} + + +static inline void lpuart_handle_sysrq_chars(struct uart_port *port, + unsigned char *p, int count) +{ + while (count--) { + if (*p && uart_handle_sysrq_char(port, *p)) + return; + p++; + } +} + +static void lpuart_handle_sysrq(struct lpuart_port *sport) +{ + struct circ_buf *ring = &sport->rx_ring; + int count; + + if (ring->head < ring->tail) { + count = sport->rx_sgl.length - ring->tail; + lpuart_handle_sysrq_chars(&sport->port, + ring->buf + ring->tail, count); + ring->tail = 0; + } + + if (ring->head > ring->tail) { + count = ring->head - ring->tail; + lpuart_handle_sysrq_chars(&sport->port, + ring->buf + ring->tail, count); + ring->tail = ring->head; + } +} + +static int lpuart_tty_insert_flip_string(struct tty_port *port, + unsigned char *chars, size_t size, bool is_cs7) +{ + int i; + + if (is_cs7) + for (i = 0; i < size; i++) + chars[i] &= 0x7F; + return tty_insert_flip_string(port, chars, size); +} + +static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) +{ + struct tty_port *port = &sport->port.state->port; + struct dma_tx_state state; + enum dma_status dmastat; + struct dma_chan *chan = sport->dma_rx_chan; + struct circ_buf *ring = &sport->rx_ring; + unsigned long flags; + int count, copied; + + if (lpuart_is_32(sport)) { + unsigned long sr = lpuart32_read(&sport->port, UARTSTAT); + + if (sr & (UARTSTAT_PE | UARTSTAT_FE)) { + /* Clear the error flags */ + lpuart32_write(&sport->port, sr, UARTSTAT); + + if (sr & UARTSTAT_PE) + sport->port.icount.parity++; + else if (sr & UARTSTAT_FE) + sport->port.icount.frame++; + } + } else { + unsigned char sr = readb(sport->port.membase + UARTSR1); + + if (sr & (UARTSR1_PE | UARTSR1_FE)) { + unsigned char cr2; + + /* Disable receiver during this operation... */ + cr2 = readb(sport->port.membase + UARTCR2); + cr2 &= ~UARTCR2_RE; + writeb(cr2, sport->port.membase + UARTCR2); + + /* Read DR to clear the error flags */ + readb(sport->port.membase + UARTDR); + + if (sr & UARTSR1_PE) + sport->port.icount.parity++; + else if (sr & UARTSR1_FE) + sport->port.icount.frame++; + /* + * At this point parity/framing error is + * cleared However, since the DMA already read + * the data register and we had to read it + * again after reading the status register to + * properly clear the flags, the FIFO actually + * underflowed... This requires a clearing of + * the FIFO... + */ + if (readb(sport->port.membase + UARTSFIFO) & + UARTSFIFO_RXUF) { + writeb(UARTSFIFO_RXUF, + sport->port.membase + UARTSFIFO); + writeb(UARTCFIFO_RXFLUSH, + sport->port.membase + UARTCFIFO); + } + + cr2 |= UARTCR2_RE; + writeb(cr2, sport->port.membase + UARTCR2); + } + } + + async_tx_ack(sport->dma_rx_desc); + + spin_lock_irqsave(&sport->port.lock, flags); + + dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state); + if (dmastat == DMA_ERROR) { + dev_err(sport->port.dev, "Rx DMA transfer failed!\n"); + spin_unlock_irqrestore(&sport->port.lock, flags); + return; + } + + /* CPU claims ownership of RX DMA buffer */ + dma_sync_sg_for_cpu(chan->device->dev, &sport->rx_sgl, 1, + DMA_FROM_DEVICE); + + /* + * ring->head points to the end of data already written by the DMA. + * ring->tail points to the beginning of data to be read by the + * framework. + * The current transfer size should not be larger than the dma buffer + * length. + */ + ring->head = sport->rx_sgl.length - state.residue; + BUG_ON(ring->head > sport->rx_sgl.length); + + /* + * Silent handling of keys pressed in the sysrq timeframe + */ + if (sport->port.sysrq) { + lpuart_handle_sysrq(sport); + goto exit; + } + + /* + * At this point ring->head may point to the first byte right after the + * last byte of the dma buffer: + * 0 <= ring->head <= sport->rx_sgl.length + * + * However ring->tail must always points inside the dma buffer: + * 0 <= ring->tail <= sport->rx_sgl.length - 1 + * + * Since we use a ring buffer, we have to handle the case + * where head is lower than tail. In such a case, we first read from + * tail to the end of the buffer then reset tail. + */ + if (ring->head < ring->tail) { + count = sport->rx_sgl.length - ring->tail; + + copied = lpuart_tty_insert_flip_string(port, ring->buf + ring->tail, + count, sport->is_cs7); + if (copied != count) + sport->port.icount.buf_overrun++; + ring->tail = 0; + sport->port.icount.rx += copied; + } + + /* Finally we read data from tail to head */ + if (ring->tail < ring->head) { + count = ring->head - ring->tail; + copied = lpuart_tty_insert_flip_string(port, ring->buf + ring->tail, + count, sport->is_cs7); + if (copied != count) + sport->port.icount.buf_overrun++; + /* Wrap ring->head if needed */ + if (ring->head >= sport->rx_sgl.length) + ring->head = 0; + ring->tail = ring->head; + sport->port.icount.rx += copied; + } + +exit: + dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1, + DMA_FROM_DEVICE); + + spin_unlock_irqrestore(&sport->port.lock, flags); + + tty_flip_buffer_push(port); + mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout); +} + +static void lpuart_dma_rx_complete(void *arg) +{ + struct lpuart_port *sport = arg; + + lpuart_copy_rx_to_tty(sport); +} + +static void lpuart_timer_func(struct timer_list *t) +{ + struct lpuart_port *sport = from_timer(sport, t, lpuart_timer); + + lpuart_copy_rx_to_tty(sport); +} + +static inline int lpuart_start_rx_dma(struct lpuart_port *sport) +{ + struct dma_slave_config dma_rx_sconfig = {}; + struct circ_buf *ring = &sport->rx_ring; + int ret, nent; + struct tty_port *port = &sport->port.state->port; + struct tty_struct *tty = port->tty; + struct ktermios *termios = &tty->termios; + struct dma_chan *chan = sport->dma_rx_chan; + unsigned int bits = tty_get_frame_size(termios->c_cflag); + unsigned int baud = tty_get_baud_rate(tty); + + /* + * Calculate length of one DMA buffer size to keep latency below + * 10ms at any baud rate. + */ + sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud / bits / 1000) * 2; + sport->rx_dma_rng_buf_len = (1 << fls(sport->rx_dma_rng_buf_len)); + if (sport->rx_dma_rng_buf_len < 16) + sport->rx_dma_rng_buf_len = 16; + + ring->buf = kzalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC); + if (!ring->buf) + return -ENOMEM; + + sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len); + nent = dma_map_sg(chan->device->dev, &sport->rx_sgl, 1, + DMA_FROM_DEVICE); + + if (!nent) { + dev_err(sport->port.dev, "DMA Rx mapping error\n"); + return -EINVAL; + } + + dma_rx_sconfig.src_addr = lpuart_dma_datareg_addr(sport); + dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_rx_sconfig.src_maxburst = 1; + dma_rx_sconfig.direction = DMA_DEV_TO_MEM; + ret = dmaengine_slave_config(chan, &dma_rx_sconfig); + + if (ret < 0) { + dev_err(sport->port.dev, + "DMA Rx slave config failed, err = %d\n", ret); + return ret; + } + + sport->dma_rx_desc = dmaengine_prep_dma_cyclic(chan, + sg_dma_address(&sport->rx_sgl), + sport->rx_sgl.length, + sport->rx_sgl.length / 2, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!sport->dma_rx_desc) { + dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n"); + return -EFAULT; + } + + sport->dma_rx_desc->callback = lpuart_dma_rx_complete; + sport->dma_rx_desc->callback_param = sport; + sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc); + dma_async_issue_pending(chan); + + if (lpuart_is_32(sport)) { + unsigned long temp = lpuart32_read(&sport->port, UARTBAUD); + + lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD); + } else { + writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS, + sport->port.membase + UARTCR5); + } + + return 0; +} + +static void lpuart_dma_rx_free(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, + struct lpuart_port, port); + struct dma_chan *chan = sport->dma_rx_chan; + + dmaengine_terminate_sync(chan); + del_timer_sync(&sport->lpuart_timer); + dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); + kfree(sport->rx_ring.buf); + sport->rx_ring.tail = 0; + sport->rx_ring.head = 0; + sport->dma_rx_desc = NULL; + sport->dma_rx_cookie = -EINVAL; +} + +static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + struct lpuart_port *sport = container_of(port, + struct lpuart_port, port); + + u8 modem = readb(sport->port.membase + UARTMODEM) & + ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE); + writeb(modem, sport->port.membase + UARTMODEM); + + if (rs485->flags & SER_RS485_ENABLED) { + /* Enable auto RS-485 RTS mode */ + modem |= UARTMODEM_TXRTSE; + + /* + * The hardware defaults to RTS logic HIGH while transfer. + * Switch polarity in case RTS shall be logic HIGH + * after transfer. + * Note: UART is assumed to be active high. + */ + if (rs485->flags & SER_RS485_RTS_ON_SEND) + modem |= UARTMODEM_TXRTSPOL; + else if (rs485->flags & SER_RS485_RTS_AFTER_SEND) + modem &= ~UARTMODEM_TXRTSPOL; + } + + writeb(modem, sport->port.membase + UARTMODEM); + return 0; +} + +static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + struct lpuart_port *sport = container_of(port, + struct lpuart_port, port); + + unsigned long modem = lpuart32_read(&sport->port, UARTMODIR) + & ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE); + lpuart32_write(&sport->port, modem, UARTMODIR); + + if (rs485->flags & SER_RS485_ENABLED) { + /* Enable auto RS-485 RTS mode */ + modem |= UARTMODEM_TXRTSE; + + /* + * The hardware defaults to RTS logic HIGH while transfer. + * Switch polarity in case RTS shall be logic HIGH + * after transfer. + * Note: UART is assumed to be active high. + */ + if (rs485->flags & SER_RS485_RTS_ON_SEND) + modem |= UARTMODEM_TXRTSPOL; + else if (rs485->flags & SER_RS485_RTS_AFTER_SEND) + modem &= ~UARTMODEM_TXRTSPOL; + } + + lpuart32_write(&sport->port, modem, UARTMODIR); + return 0; +} + +static unsigned int lpuart_get_mctrl(struct uart_port *port) +{ + unsigned int mctrl = 0; + u8 reg; + + reg = readb(port->membase + UARTCR1); + if (reg & UARTCR1_LOOPS) + mctrl |= TIOCM_LOOP; + + return mctrl; +} + +static unsigned int lpuart32_get_mctrl(struct uart_port *port) +{ + unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; + u32 reg; + + reg = lpuart32_read(port, UARTCTRL); + if (reg & UARTCTRL_LOOPS) + mctrl |= TIOCM_LOOP; + + return mctrl; +} + +static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + u8 reg; + + reg = readb(port->membase + UARTCR1); + + /* for internal loopback we need LOOPS=1 and RSRC=0 */ + reg &= ~(UARTCR1_LOOPS | UARTCR1_RSRC); + if (mctrl & TIOCM_LOOP) + reg |= UARTCR1_LOOPS; + + writeb(reg, port->membase + UARTCR1); +} + +static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + u32 reg; + + reg = lpuart32_read(port, UARTCTRL); + + /* for internal loopback we need LOOPS=1 and RSRC=0 */ + reg &= ~(UARTCTRL_LOOPS | UARTCTRL_RSRC); + if (mctrl & TIOCM_LOOP) + reg |= UARTCTRL_LOOPS; + + lpuart32_write(port, reg, UARTCTRL); +} + +static void lpuart_break_ctl(struct uart_port *port, int break_state) +{ + unsigned char temp; + + temp = readb(port->membase + UARTCR2) & ~UARTCR2_SBK; + + if (break_state != 0) + temp |= UARTCR2_SBK; + + writeb(temp, port->membase + UARTCR2); +} + +static void lpuart32_break_ctl(struct uart_port *port, int break_state) +{ + unsigned long temp; + + temp = lpuart32_read(port, UARTCTRL); + + /* + * LPUART IP now has two known bugs, one is CTS has higher priority than the + * break signal, which causes the break signal sending through UARTCTRL_SBK + * may impacted by the CTS input if the HW flow control is enabled. It + * exists on all platforms we support in this driver. + * Another bug is i.MX8QM LPUART may have an additional break character + * being sent after SBK was cleared. + * To avoid above two bugs, we use Transmit Data Inversion function to send + * the break signal instead of UARTCTRL_SBK. + */ + if (break_state != 0) { + /* + * Disable the transmitter to prevent any data from being sent out + * during break, then invert the TX line to send break. + */ + temp &= ~UARTCTRL_TE; + lpuart32_write(port, temp, UARTCTRL); + temp |= UARTCTRL_TXINV; + lpuart32_write(port, temp, UARTCTRL); + } else { + /* Disable the TXINV to turn off break and re-enable transmitter. */ + temp &= ~UARTCTRL_TXINV; + lpuart32_write(port, temp, UARTCTRL); + temp |= UARTCTRL_TE; + lpuart32_write(port, temp, UARTCTRL); + } +} + +static void lpuart_setup_watermark(struct lpuart_port *sport) +{ + unsigned char val, cr2; + unsigned char cr2_saved; + + cr2 = readb(sport->port.membase + UARTCR2); + cr2_saved = cr2; + cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_TE | + UARTCR2_RIE | UARTCR2_RE); + writeb(cr2, sport->port.membase + UARTCR2); + + val = readb(sport->port.membase + UARTPFIFO); + writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE, + sport->port.membase + UARTPFIFO); + + /* flush Tx and Rx FIFO */ + writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, + sport->port.membase + UARTCFIFO); + + /* explicitly clear RDRF */ + if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) { + readb(sport->port.membase + UARTDR); + writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO); + } + + writeb(0, sport->port.membase + UARTTWFIFO); + writeb(sport->rx_watermark, sport->port.membase + UARTRWFIFO); + + /* Restore cr2 */ + writeb(cr2_saved, sport->port.membase + UARTCR2); +} + +static void lpuart_setup_watermark_enable(struct lpuart_port *sport) +{ + unsigned char cr2; + + lpuart_setup_watermark(sport); + + cr2 = readb(sport->port.membase + UARTCR2); + cr2 |= UARTCR2_RIE | UARTCR2_RE | UARTCR2_TE; + writeb(cr2, sport->port.membase + UARTCR2); +} + +static void lpuart32_setup_watermark(struct lpuart_port *sport) +{ + unsigned long val, ctrl; + unsigned long ctrl_saved; + + ctrl = lpuart32_read(&sport->port, UARTCTRL); + ctrl_saved = ctrl; + ctrl &= ~(UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_TE | + UARTCTRL_RIE | UARTCTRL_RE); + lpuart32_write(&sport->port, ctrl, UARTCTRL); + + /* enable FIFO mode */ + val = lpuart32_read(&sport->port, UARTFIFO); + val |= UARTFIFO_TXFE | UARTFIFO_RXFE; + val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH; + lpuart32_write(&sport->port, val, UARTFIFO); + + /* set the watermark */ + val = (sport->rx_watermark << UARTWATER_RXWATER_OFF) | + (0x0 << UARTWATER_TXWATER_OFF); + lpuart32_write(&sport->port, val, UARTWATER); + + /* Restore cr2 */ + lpuart32_write(&sport->port, ctrl_saved, UARTCTRL); +} + +static void lpuart32_setup_watermark_enable(struct lpuart_port *sport) +{ + u32 temp; + + lpuart32_setup_watermark(sport); + + temp = lpuart32_read(&sport->port, UARTCTRL); + temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE; + lpuart32_write(&sport->port, temp, UARTCTRL); +} + +static void rx_dma_timer_init(struct lpuart_port *sport) +{ + timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0); + sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout; + add_timer(&sport->lpuart_timer); +} + +static void lpuart_request_dma(struct lpuart_port *sport) +{ + sport->dma_tx_chan = dma_request_chan(sport->port.dev, "tx"); + if (IS_ERR(sport->dma_tx_chan)) { + dev_dbg_once(sport->port.dev, + "DMA tx channel request failed, operating without tx DMA (%ld)\n", + PTR_ERR(sport->dma_tx_chan)); + sport->dma_tx_chan = NULL; + } + + sport->dma_rx_chan = dma_request_chan(sport->port.dev, "rx"); + if (IS_ERR(sport->dma_rx_chan)) { + dev_dbg_once(sport->port.dev, + "DMA rx channel request failed, operating without rx DMA (%ld)\n", + PTR_ERR(sport->dma_rx_chan)); + sport->dma_rx_chan = NULL; + } +} + +static void lpuart_tx_dma_startup(struct lpuart_port *sport) +{ + u32 uartbaud; + int ret; + + if (uart_console(&sport->port)) + goto err; + + if (!sport->dma_tx_chan) + goto err; + + ret = lpuart_dma_tx_request(&sport->port); + if (ret) + goto err; + + init_waitqueue_head(&sport->dma_wait); + sport->lpuart_dma_tx_use = true; + if (lpuart_is_32(sport)) { + uartbaud = lpuart32_read(&sport->port, UARTBAUD); + lpuart32_write(&sport->port, + uartbaud | UARTBAUD_TDMAE, UARTBAUD); + } else { + writeb(readb(sport->port.membase + UARTCR5) | + UARTCR5_TDMAS, sport->port.membase + UARTCR5); + } + + return; + +err: + sport->lpuart_dma_tx_use = false; +} + +static void lpuart_rx_dma_startup(struct lpuart_port *sport) +{ + int ret; + unsigned char cr3; + + if (uart_console(&sport->port)) + goto err; + + if (!sport->dma_rx_chan) + goto err; + + ret = lpuart_start_rx_dma(sport); + if (ret) + goto err; + + /* set Rx DMA timeout */ + sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT); + if (!sport->dma_rx_timeout) + sport->dma_rx_timeout = 1; + + sport->lpuart_dma_rx_use = true; + rx_dma_timer_init(sport); + + if (sport->port.has_sysrq && !lpuart_is_32(sport)) { + cr3 = readb(sport->port.membase + UARTCR3); + cr3 |= UARTCR3_FEIE; + writeb(cr3, sport->port.membase + UARTCR3); + } + + return; + +err: + sport->lpuart_dma_rx_use = false; +} + +static int lpuart_startup(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, struct lpuart_port, port); + unsigned long flags; + unsigned char temp; + + /* determine FIFO size and enable FIFO mode */ + temp = readb(sport->port.membase + UARTPFIFO); + + sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_TXSIZE_OFF) & + UARTPFIFO_FIFOSIZE_MASK); + sport->port.fifosize = sport->txfifo_size; + + sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) & + UARTPFIFO_FIFOSIZE_MASK); + + lpuart_request_dma(sport); + + spin_lock_irqsave(&sport->port.lock, flags); + + lpuart_setup_watermark_enable(sport); + + lpuart_rx_dma_startup(sport); + lpuart_tx_dma_startup(sport); + + spin_unlock_irqrestore(&sport->port.lock, flags); + + return 0; +} + +static void lpuart32_configure(struct lpuart_port *sport) +{ + unsigned long temp; + + temp = lpuart32_read(&sport->port, UARTCTRL); + if (!sport->lpuart_dma_rx_use) + temp |= UARTCTRL_RIE; + if (!sport->lpuart_dma_tx_use) + temp |= UARTCTRL_TIE; + lpuart32_write(&sport->port, temp, UARTCTRL); +} + +static int lpuart32_startup(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, struct lpuart_port, port); + unsigned long flags; + unsigned long temp; + + /* determine FIFO size */ + temp = lpuart32_read(&sport->port, UARTFIFO); + + sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_TXSIZE_OFF) & + UARTFIFO_FIFOSIZE_MASK); + sport->port.fifosize = sport->txfifo_size; + + sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) & + UARTFIFO_FIFOSIZE_MASK); + + /* + * The LS1021A and LS1028A have a fixed FIFO depth of 16 words. + * Although they support the RX/TXSIZE fields, their encoding is + * different. Eg the reference manual states 0b101 is 16 words. + */ + if (is_layerscape_lpuart(sport)) { + sport->rxfifo_size = 16; + sport->txfifo_size = 16; + sport->port.fifosize = sport->txfifo_size; + } + + lpuart_request_dma(sport); + + spin_lock_irqsave(&sport->port.lock, flags); + + lpuart32_setup_watermark_enable(sport); + + lpuart_rx_dma_startup(sport); + lpuart_tx_dma_startup(sport); + + lpuart32_configure(sport); + + spin_unlock_irqrestore(&sport->port.lock, flags); + return 0; +} + +static void lpuart_dma_shutdown(struct lpuart_port *sport) +{ + if (sport->lpuart_dma_rx_use) { + lpuart_dma_rx_free(&sport->port); + sport->lpuart_dma_rx_use = false; + } + + if (sport->lpuart_dma_tx_use) { + if (wait_event_interruptible_timeout(sport->dma_wait, + !sport->dma_tx_in_progress, msecs_to_jiffies(300)) <= 0) { + sport->dma_tx_in_progress = false; + dmaengine_terminate_sync(sport->dma_tx_chan); + } + sport->lpuart_dma_tx_use = false; + } + + if (sport->dma_tx_chan) + dma_release_channel(sport->dma_tx_chan); + if (sport->dma_rx_chan) + dma_release_channel(sport->dma_rx_chan); +} + +static void lpuart_shutdown(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, struct lpuart_port, port); + unsigned char temp; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* disable Rx/Tx and interrupts */ + temp = readb(port->membase + UARTCR2); + temp &= ~(UARTCR2_TE | UARTCR2_RE | + UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE); + writeb(temp, port->membase + UARTCR2); + + spin_unlock_irqrestore(&port->lock, flags); + + lpuart_dma_shutdown(sport); +} + +static void lpuart32_shutdown(struct uart_port *port) +{ + struct lpuart_port *sport = + container_of(port, struct lpuart_port, port); + unsigned long temp; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* clear status */ + temp = lpuart32_read(&sport->port, UARTSTAT); + lpuart32_write(&sport->port, temp, UARTSTAT); + + /* disable Rx/Tx DMA */ + temp = lpuart32_read(port, UARTBAUD); + temp &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE); + lpuart32_write(port, temp, UARTBAUD); + + /* disable Rx/Tx and interrupts */ + temp = lpuart32_read(port, UARTCTRL); + temp &= ~(UARTCTRL_TE | UARTCTRL_RE | + UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE); + lpuart32_write(port, temp, UARTCTRL); + + spin_unlock_irqrestore(&port->lock, flags); + + lpuart_dma_shutdown(sport); +} + +static void +lpuart_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct lpuart_port *sport = container_of(port, struct lpuart_port, port); + unsigned long flags; + unsigned char cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem; + unsigned int baud; + unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; + unsigned int sbr, brfa; + + cr1 = old_cr1 = readb(sport->port.membase + UARTCR1); + old_cr2 = readb(sport->port.membase + UARTCR2); + cr3 = readb(sport->port.membase + UARTCR3); + cr4 = readb(sport->port.membase + UARTCR4); + bdh = readb(sport->port.membase + UARTBDH); + modem = readb(sport->port.membase + UARTMODEM); + /* + * only support CS8 and CS7, and for CS7 must enable PE. + * supported mode: + * - (7,e/o,1) + * - (8,n,1) + * - (8,m/s,1) + * - (8,e/o,1) + */ + while ((termios->c_cflag & CSIZE) != CS8 && + (termios->c_cflag & CSIZE) != CS7) { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= old_csize; + old_csize = CS8; + } + + if ((termios->c_cflag & CSIZE) == CS8 || + (termios->c_cflag & CSIZE) == CS7) + cr1 = old_cr1 & ~UARTCR1_M; + + if (termios->c_cflag & CMSPAR) { + if ((termios->c_cflag & CSIZE) != CS8) { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + } + cr1 |= UARTCR1_M; + } + + /* + * When auto RS-485 RTS mode is enabled, + * hardware flow control need to be disabled. + */ + if (sport->port.rs485.flags & SER_RS485_ENABLED) + termios->c_cflag &= ~CRTSCTS; + + if (termios->c_cflag & CRTSCTS) + modem |= UARTMODEM_RXRTSE | UARTMODEM_TXCTSE; + else + modem &= ~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE); + + termios->c_cflag &= ~CSTOPB; + + /* parity must be enabled when CS7 to match 8-bits format */ + if ((termios->c_cflag & CSIZE) == CS7) + termios->c_cflag |= PARENB; + + if (termios->c_cflag & PARENB) { + if (termios->c_cflag & CMSPAR) { + cr1 &= ~UARTCR1_PE; + if (termios->c_cflag & PARODD) + cr3 |= UARTCR3_T8; + else + cr3 &= ~UARTCR3_T8; + } else { + cr1 |= UARTCR1_PE; + if ((termios->c_cflag & CSIZE) == CS8) + cr1 |= UARTCR1_M; + if (termios->c_cflag & PARODD) + cr1 |= UARTCR1_PT; + else + cr1 &= ~UARTCR1_PT; + } + } else { + cr1 &= ~UARTCR1_PE; + } + + /* ask the core to calculate the divisor */ + baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); + + /* + * Need to update the Ring buffer length according to the selected + * baud rate and restart Rx DMA path. + * + * Since timer function acqures sport->port.lock, need to stop before + * acquring same lock because otherwise del_timer_sync() can deadlock. + */ + if (old && sport->lpuart_dma_rx_use) + lpuart_dma_rx_free(&sport->port); + + spin_lock_irqsave(&sport->port.lock, flags); + + sport->port.read_status_mask = 0; + if (termios->c_iflag & INPCK) + sport->port.read_status_mask |= UARTSR1_FE | UARTSR1_PE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + sport->port.read_status_mask |= UARTSR1_FE; + + /* characters to ignore */ + sport->port.ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + sport->port.ignore_status_mask |= UARTSR1_PE; + if (termios->c_iflag & IGNBRK) { + sport->port.ignore_status_mask |= UARTSR1_FE; + /* + * if we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + sport->port.ignore_status_mask |= UARTSR1_OR; + } + + /* update the per-port timeout */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* wait transmit engin complete */ + lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC); + + /* disable transmit and receive */ + writeb(old_cr2 & ~(UARTCR2_TE | UARTCR2_RE), + sport->port.membase + UARTCR2); + + sbr = sport->port.uartclk / (16 * baud); + brfa = ((sport->port.uartclk - (16 * sbr * baud)) * 2) / baud; + bdh &= ~UARTBDH_SBR_MASK; + bdh |= (sbr >> 8) & 0x1F; + cr4 &= ~UARTCR4_BRFA_MASK; + brfa &= UARTCR4_BRFA_MASK; + writeb(cr4 | brfa, sport->port.membase + UARTCR4); + writeb(bdh, sport->port.membase + UARTBDH); + writeb(sbr & 0xFF, sport->port.membase + UARTBDL); + writeb(cr3, sport->port.membase + UARTCR3); + writeb(cr1, sport->port.membase + UARTCR1); + writeb(modem, sport->port.membase + UARTMODEM); + + /* restore control register */ + writeb(old_cr2, sport->port.membase + UARTCR2); + + if (old && sport->lpuart_dma_rx_use) { + if (!lpuart_start_rx_dma(sport)) + rx_dma_timer_init(sport); + else + sport->lpuart_dma_rx_use = false; + } + + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +static void __lpuart32_serial_setbrg(struct uart_port *port, + unsigned int baudrate, bool use_rx_dma, + bool use_tx_dma) +{ + u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, tmp; + u32 clk = port->uartclk; + + /* + * The idea is to use the best OSR (over-sampling rate) possible. + * Note, OSR is typically hard-set to 16 in other LPUART instantiations. + * Loop to find the best OSR value possible, one that generates minimum + * baud_diff iterate through the rest of the supported values of OSR. + * + * Calculation Formula: + * Baud Rate = baud clock / ((OSR+1) × SBR) + */ + baud_diff = baudrate; + osr = 0; + sbr = 0; + + for (tmp_osr = 4; tmp_osr <= 32; tmp_osr++) { + /* calculate the temporary sbr value */ + tmp_sbr = (clk / (baudrate * tmp_osr)); + if (tmp_sbr == 0) + tmp_sbr = 1; + + /* + * calculate the baud rate difference based on the temporary + * osr and sbr values + */ + tmp_diff = clk / (tmp_osr * tmp_sbr) - baudrate; + + /* select best values between sbr and sbr+1 */ + tmp = clk / (tmp_osr * (tmp_sbr + 1)); + if (tmp_diff > (baudrate - tmp)) { + tmp_diff = baudrate - tmp; + tmp_sbr++; + } + + if (tmp_sbr > UARTBAUD_SBR_MASK) + continue; + + if (tmp_diff <= baud_diff) { + baud_diff = tmp_diff; + osr = tmp_osr; + sbr = tmp_sbr; + + if (!baud_diff) + break; + } + } + + /* handle buadrate outside acceptable rate */ + if (baud_diff > ((baudrate / 100) * 3)) + dev_warn(port->dev, + "unacceptable baud rate difference of more than 3%%\n"); + + tmp = lpuart32_read(port, UARTBAUD); + + if ((osr > 3) && (osr < 8)) + tmp |= UARTBAUD_BOTHEDGE; + + tmp &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT); + tmp |= ((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT; + + tmp &= ~UARTBAUD_SBR_MASK; + tmp |= sbr & UARTBAUD_SBR_MASK; + + if (!use_rx_dma) + tmp &= ~UARTBAUD_RDMAE; + if (!use_tx_dma) + tmp &= ~UARTBAUD_TDMAE; + + lpuart32_write(port, tmp, UARTBAUD); +} + +static void lpuart32_serial_setbrg(struct lpuart_port *sport, + unsigned int baudrate) +{ + __lpuart32_serial_setbrg(&sport->port, baudrate, + sport->lpuart_dma_rx_use, + sport->lpuart_dma_tx_use); +} + + +static void +lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct lpuart_port *sport = container_of(port, struct lpuart_port, port); + unsigned long flags; + unsigned long ctrl, old_ctrl, bd, modem; + unsigned int baud; + unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; + + ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL); + bd = lpuart32_read(&sport->port, UARTBAUD); + modem = lpuart32_read(&sport->port, UARTMODIR); + sport->is_cs7 = false; + /* + * only support CS8 and CS7, and for CS7 must enable PE. + * supported mode: + * - (7,e/o,1) + * - (8,n,1) + * - (8,m/s,1) + * - (8,e/o,1) + */ + while ((termios->c_cflag & CSIZE) != CS8 && + (termios->c_cflag & CSIZE) != CS7) { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= old_csize; + old_csize = CS8; + } + + if ((termios->c_cflag & CSIZE) == CS8 || + (termios->c_cflag & CSIZE) == CS7) + ctrl = old_ctrl & ~UARTCTRL_M; + + if (termios->c_cflag & CMSPAR) { + if ((termios->c_cflag & CSIZE) != CS8) { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + } + ctrl |= UARTCTRL_M; + } + + /* + * When auto RS-485 RTS mode is enabled, + * hardware flow control need to be disabled. + */ + if (sport->port.rs485.flags & SER_RS485_ENABLED) + termios->c_cflag &= ~CRTSCTS; + + if (termios->c_cflag & CRTSCTS) + modem |= UARTMODIR_RXRTSE | UARTMODIR_TXCTSE; + else + modem &= ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE); + + if (termios->c_cflag & CSTOPB) + bd |= UARTBAUD_SBNS; + else + bd &= ~UARTBAUD_SBNS; + + /* parity must be enabled when CS7 to match 8-bits format */ + if ((termios->c_cflag & CSIZE) == CS7) + termios->c_cflag |= PARENB; + + if ((termios->c_cflag & PARENB)) { + if (termios->c_cflag & CMSPAR) { + ctrl &= ~UARTCTRL_PE; + ctrl |= UARTCTRL_M; + } else { + ctrl |= UARTCTRL_PE; + if ((termios->c_cflag & CSIZE) == CS8) + ctrl |= UARTCTRL_M; + if (termios->c_cflag & PARODD) + ctrl |= UARTCTRL_PT; + else + ctrl &= ~UARTCTRL_PT; + } + } else { + ctrl &= ~UARTCTRL_PE; + } + + /* ask the core to calculate the divisor */ + baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4); + + /* + * Need to update the Ring buffer length according to the selected + * baud rate and restart Rx DMA path. + * + * Since timer function acqures sport->port.lock, need to stop before + * acquring same lock because otherwise del_timer_sync() can deadlock. + */ + if (old && sport->lpuart_dma_rx_use) + lpuart_dma_rx_free(&sport->port); + + spin_lock_irqsave(&sport->port.lock, flags); + + sport->port.read_status_mask = 0; + if (termios->c_iflag & INPCK) + sport->port.read_status_mask |= UARTSTAT_FE | UARTSTAT_PE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + sport->port.read_status_mask |= UARTSTAT_FE; + + /* characters to ignore */ + sport->port.ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + sport->port.ignore_status_mask |= UARTSTAT_PE; + if (termios->c_iflag & IGNBRK) { + sport->port.ignore_status_mask |= UARTSTAT_FE; + /* + * if we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + sport->port.ignore_status_mask |= UARTSTAT_OR; + } + + /* update the per-port timeout */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* + * LPUART Transmission Complete Flag may never be set while queuing a break + * character, so skip waiting for transmission complete when UARTCTRL_SBK is + * asserted. + */ + if (!(old_ctrl & UARTCTRL_SBK)) { + lpuart32_write(&sport->port, 0, UARTMODIR); + lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC); + } + + /* disable transmit and receive */ + lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE), + UARTCTRL); + + lpuart32_write(&sport->port, bd, UARTBAUD); + lpuart32_serial_setbrg(sport, baud); + lpuart32_write(&sport->port, modem, UARTMODIR); + lpuart32_write(&sport->port, ctrl, UARTCTRL); + /* restore control register */ + + if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE) + sport->is_cs7 = true; + + if (old && sport->lpuart_dma_rx_use) { + if (!lpuart_start_rx_dma(sport)) + rx_dma_timer_init(sport); + else + sport->lpuart_dma_rx_use = false; + } + + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +static const char *lpuart_type(struct uart_port *port) +{ + return "FSL_LPUART"; +} + +static void lpuart_release_port(struct uart_port *port) +{ + /* nothing to do */ +} + +static int lpuart_request_port(struct uart_port *port) +{ + return 0; +} + +/* configure/autoconfigure the port */ +static void lpuart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_LPUART; +} + +static int lpuart_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + int ret = 0; + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_LPUART) + ret = -EINVAL; + if (port->irq != ser->irq) + ret = -EINVAL; + if (ser->io_type != UPIO_MEM) + ret = -EINVAL; + if (port->uartclk / 16 != ser->baud_base) + ret = -EINVAL; + if (port->iobase != ser->port) + ret = -EINVAL; + if (ser->hub6 != 0) + ret = -EINVAL; + return ret; +} + +static const struct uart_ops lpuart_pops = { + .tx_empty = lpuart_tx_empty, + .set_mctrl = lpuart_set_mctrl, + .get_mctrl = lpuart_get_mctrl, + .stop_tx = lpuart_stop_tx, + .start_tx = lpuart_start_tx, + .stop_rx = lpuart_stop_rx, + .break_ctl = lpuart_break_ctl, + .startup = lpuart_startup, + .shutdown = lpuart_shutdown, + .set_termios = lpuart_set_termios, + .type = lpuart_type, + .request_port = lpuart_request_port, + .release_port = lpuart_release_port, + .config_port = lpuart_config_port, + .verify_port = lpuart_verify_port, + .flush_buffer = lpuart_flush_buffer, +#if defined(CONFIG_CONSOLE_POLL) + .poll_init = lpuart_poll_init, + .poll_get_char = lpuart_poll_get_char, + .poll_put_char = lpuart_poll_put_char, +#endif +}; + +static const struct uart_ops lpuart32_pops = { + .tx_empty = lpuart32_tx_empty, + .set_mctrl = lpuart32_set_mctrl, + .get_mctrl = lpuart32_get_mctrl, + .stop_tx = lpuart32_stop_tx, + .start_tx = lpuart32_start_tx, + .stop_rx = lpuart32_stop_rx, + .break_ctl = lpuart32_break_ctl, + .startup = lpuart32_startup, + .shutdown = lpuart32_shutdown, + .set_termios = lpuart32_set_termios, + .type = lpuart_type, + .request_port = lpuart_request_port, + .release_port = lpuart_release_port, + .config_port = lpuart_config_port, + .verify_port = lpuart_verify_port, + .flush_buffer = lpuart_flush_buffer, +#if defined(CONFIG_CONSOLE_POLL) + .poll_init = lpuart32_poll_init, + .poll_get_char = lpuart32_poll_get_char, + .poll_put_char = lpuart32_poll_put_char, +#endif +}; + +static struct lpuart_port *lpuart_ports[UART_NR]; + +#ifdef CONFIG_SERIAL_FSL_LPUART_CONSOLE +static void lpuart_console_putchar(struct uart_port *port, unsigned char ch) +{ + lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TDRE); + writeb(ch, port->membase + UARTDR); +} + +static void lpuart32_console_putchar(struct uart_port *port, unsigned char ch) +{ + lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE); + lpuart32_write(port, ch, UARTDATA); +} + +static void +lpuart_console_write(struct console *co, const char *s, unsigned int count) +{ + struct lpuart_port *sport = lpuart_ports[co->index]; + unsigned char old_cr2, cr2; + unsigned long flags; + int locked = 1; + + if (oops_in_progress) + locked = spin_trylock_irqsave(&sport->port.lock, flags); + else + spin_lock_irqsave(&sport->port.lock, flags); + + /* first save CR2 and then disable interrupts */ + cr2 = old_cr2 = readb(sport->port.membase + UARTCR2); + cr2 |= UARTCR2_TE | UARTCR2_RE; + cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE); + writeb(cr2, sport->port.membase + UARTCR2); + + uart_console_write(&sport->port, s, count, lpuart_console_putchar); + + /* wait for transmitter finish complete and restore CR2 */ + lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC); + + writeb(old_cr2, sport->port.membase + UARTCR2); + + if (locked) + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +static void +lpuart32_console_write(struct console *co, const char *s, unsigned int count) +{ + struct lpuart_port *sport = lpuart_ports[co->index]; + unsigned long old_cr, cr; + unsigned long flags; + int locked = 1; + + if (oops_in_progress) + locked = spin_trylock_irqsave(&sport->port.lock, flags); + else + spin_lock_irqsave(&sport->port.lock, flags); + + /* first save CR2 and then disable interrupts */ + cr = old_cr = lpuart32_read(&sport->port, UARTCTRL); + cr |= UARTCTRL_TE | UARTCTRL_RE; + cr &= ~(UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE); + lpuart32_write(&sport->port, cr, UARTCTRL); + + uart_console_write(&sport->port, s, count, lpuart32_console_putchar); + + /* wait for transmitter finish complete and restore CR2 */ + lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC); + + lpuart32_write(&sport->port, old_cr, UARTCTRL); + + if (locked) + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +/* + * if the port was already initialised (eg, by a boot loader), + * try to determine the current setup. + */ +static void __init +lpuart_console_get_options(struct lpuart_port *sport, int *baud, + int *parity, int *bits) +{ + unsigned char cr, bdh, bdl, brfa; + unsigned int sbr, uartclk, baud_raw; + + cr = readb(sport->port.membase + UARTCR2); + cr &= UARTCR2_TE | UARTCR2_RE; + if (!cr) + return; + + /* ok, the port was enabled */ + + cr = readb(sport->port.membase + UARTCR1); + + *parity = 'n'; + if (cr & UARTCR1_PE) { + if (cr & UARTCR1_PT) + *parity = 'o'; + else + *parity = 'e'; + } + + if (cr & UARTCR1_M) + *bits = 9; + else + *bits = 8; + + bdh = readb(sport->port.membase + UARTBDH); + bdh &= UARTBDH_SBR_MASK; + bdl = readb(sport->port.membase + UARTBDL); + sbr = bdh; + sbr <<= 8; + sbr |= bdl; + brfa = readb(sport->port.membase + UARTCR4); + brfa &= UARTCR4_BRFA_MASK; + + uartclk = lpuart_get_baud_clk_rate(sport); + /* + * baud = mod_clk/(16*(sbr[13]+(brfa)/32) + */ + baud_raw = uartclk / (16 * (sbr + brfa / 32)); + + if (*baud != baud_raw) + dev_info(sport->port.dev, "Serial: Console lpuart rounded baud rate" + "from %d to %d\n", baud_raw, *baud); +} + +static void __init +lpuart32_console_get_options(struct lpuart_port *sport, int *baud, + int *parity, int *bits) +{ + unsigned long cr, bd; + unsigned int sbr, uartclk, baud_raw; + + cr = lpuart32_read(&sport->port, UARTCTRL); + cr &= UARTCTRL_TE | UARTCTRL_RE; + if (!cr) + return; + + /* ok, the port was enabled */ + + cr = lpuart32_read(&sport->port, UARTCTRL); + + *parity = 'n'; + if (cr & UARTCTRL_PE) { + if (cr & UARTCTRL_PT) + *parity = 'o'; + else + *parity = 'e'; + } + + if (cr & UARTCTRL_M) + *bits = 9; + else + *bits = 8; + + bd = lpuart32_read(&sport->port, UARTBAUD); + bd &= UARTBAUD_SBR_MASK; + if (!bd) + return; + + sbr = bd; + uartclk = lpuart_get_baud_clk_rate(sport); + /* + * baud = mod_clk/(16*(sbr[13]+(brfa)/32) + */ + baud_raw = uartclk / (16 * sbr); + + if (*baud != baud_raw) + dev_info(sport->port.dev, "Serial: Console lpuart rounded baud rate" + "from %d to %d\n", baud_raw, *baud); +} + +static int __init lpuart_console_setup(struct console *co, char *options) +{ + struct lpuart_port *sport; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + /* + * check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index == -1 || co->index >= ARRAY_SIZE(lpuart_ports)) + co->index = 0; + + sport = lpuart_ports[co->index]; + if (sport == NULL) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + if (lpuart_is_32(sport)) + lpuart32_console_get_options(sport, &baud, &parity, &bits); + else + lpuart_console_get_options(sport, &baud, &parity, &bits); + + if (lpuart_is_32(sport)) + lpuart32_setup_watermark(sport); + else + lpuart_setup_watermark(sport); + + return uart_set_options(&sport->port, co, baud, parity, bits, flow); +} + +static struct uart_driver lpuart_reg; +static struct console lpuart_console = { + .name = DEV_NAME, + .write = lpuart_console_write, + .device = uart_console_device, + .setup = lpuart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &lpuart_reg, +}; + +static struct console lpuart32_console = { + .name = DEV_NAME, + .write = lpuart32_console_write, + .device = uart_console_device, + .setup = lpuart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &lpuart_reg, +}; + +static void lpuart_early_write(struct console *con, const char *s, unsigned n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, lpuart_console_putchar); +} + +static void lpuart32_early_write(struct console *con, const char *s, unsigned n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, lpuart32_console_putchar); +} + +static int __init lpuart_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = lpuart_early_write; + return 0; +} + +static int __init lpuart32_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + if (device->port.iotype != UPIO_MEM32) + device->port.iotype = UPIO_MEM32BE; + + device->con->write = lpuart32_early_write; + return 0; +} + +static int __init ls1028a_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + u32 cr; + + if (!device->port.membase) + return -ENODEV; + + device->port.iotype = UPIO_MEM32; + device->con->write = lpuart32_early_write; + + /* set the baudrate */ + if (device->port.uartclk && device->baud) + __lpuart32_serial_setbrg(&device->port, device->baud, + false, false); + + /* enable transmitter */ + cr = lpuart32_read(&device->port, UARTCTRL); + cr |= UARTCTRL_TE; + lpuart32_write(&device->port, cr, UARTCTRL); + + return 0; +} + +static int __init lpuart32_imx_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->port.iotype = UPIO_MEM32; + device->port.membase += IMX_REG_OFF; + device->con->write = lpuart32_early_write; + + return 0; +} +OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8ulp-lpuart", lpuart32_imx_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,imxrt1050-lpuart", lpuart32_imx_early_console_setup); +EARLYCON_DECLARE(lpuart, lpuart_early_console_setup); +EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup); + +#define LPUART_CONSOLE (&lpuart_console) +#define LPUART32_CONSOLE (&lpuart32_console) +#else +#define LPUART_CONSOLE NULL +#define LPUART32_CONSOLE NULL +#endif + +static struct uart_driver lpuart_reg = { + .owner = THIS_MODULE, + .driver_name = DRIVER_NAME, + .dev_name = DEV_NAME, + .nr = ARRAY_SIZE(lpuart_ports), + .cons = LPUART_CONSOLE, +}; + +static const struct serial_rs485 lpuart_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND, + /* delay_rts_* and RX_DURING_TX are not supported */ +}; + +static int lpuart_global_reset(struct lpuart_port *sport) +{ + struct uart_port *port = &sport->port; + void __iomem *global_addr; + unsigned long ctrl, bd; + unsigned int val = 0; + int ret; + + ret = clk_prepare_enable(sport->ipg_clk); + if (ret) { + dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret); + return ret; + } + + if (is_imx7ulp_lpuart(sport) || is_imx8qxp_lpuart(sport)) { + /* + * If the transmitter is used by earlycon, wait for transmit engine to + * complete and then reset. + */ + ctrl = lpuart32_read(port, UARTCTRL); + if (ctrl & UARTCTRL_TE) { + bd = lpuart32_read(&sport->port, UARTBAUD); + if (read_poll_timeout(lpuart32_tx_empty, val, val, 1, 100000, false, + port)) { + dev_warn(sport->port.dev, + "timeout waiting for transmit engine to complete\n"); + clk_disable_unprepare(sport->ipg_clk); + return 0; + } + } + + global_addr = port->membase + UART_GLOBAL - IMX_REG_OFF; + writel(UART_GLOBAL_RST, global_addr); + usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US); + writel(0, global_addr); + usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US); + + /* Recover the transmitter for earlycon. */ + if (ctrl & UARTCTRL_TE) { + lpuart32_write(port, bd, UARTBAUD); + lpuart32_write(port, ctrl, UARTCTRL); + } + } + + clk_disable_unprepare(sport->ipg_clk); + return 0; +} + +static int lpuart_probe(struct platform_device *pdev) +{ + const struct lpuart_soc_data *sdata = of_device_get_match_data(&pdev->dev); + struct device_node *np = pdev->dev.of_node; + struct lpuart_port *sport; + struct resource *res; + irq_handler_t handler; + int ret; + + sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); + if (!sport) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sport->port.membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(sport->port.membase)) + return PTR_ERR(sport->port.membase); + + sport->port.membase += sdata->reg_off; + sport->port.mapbase = res->start + sdata->reg_off; + sport->port.dev = &pdev->dev; + sport->port.type = PORT_LPUART; + sport->devtype = sdata->devtype; + sport->rx_watermark = sdata->rx_watermark; + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + sport->port.irq = ret; + sport->port.iotype = sdata->iotype; + if (lpuart_is_32(sport)) + sport->port.ops = &lpuart32_pops; + else + sport->port.ops = &lpuart_pops; + sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LPUART_CONSOLE); + sport->port.flags = UPF_BOOT_AUTOCONF; + + if (lpuart_is_32(sport)) + sport->port.rs485_config = lpuart32_config_rs485; + else + sport->port.rs485_config = lpuart_config_rs485; + sport->port.rs485_supported = lpuart_rs485_supported; + + sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(sport->ipg_clk)) { + ret = PTR_ERR(sport->ipg_clk); + dev_err(&pdev->dev, "failed to get uart ipg clk: %d\n", ret); + return ret; + } + + sport->baud_clk = NULL; + if (is_imx8qxp_lpuart(sport)) { + sport->baud_clk = devm_clk_get(&pdev->dev, "baud"); + if (IS_ERR(sport->baud_clk)) { + ret = PTR_ERR(sport->baud_clk); + dev_err(&pdev->dev, "failed to get uart baud clk: %d\n", ret); + return ret; + } + } + + ret = of_alias_get_id(np, "serial"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); + return ret; + } + if (ret >= ARRAY_SIZE(lpuart_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", ret); + return -EINVAL; + } + sport->port.line = ret; + + ret = lpuart_enable_clks(sport); + if (ret) + return ret; + sport->port.uartclk = lpuart_get_baud_clk_rate(sport); + + lpuart_ports[sport->port.line] = sport; + + platform_set_drvdata(pdev, &sport->port); + + if (lpuart_is_32(sport)) { + lpuart_reg.cons = LPUART32_CONSOLE; + handler = lpuart32_int; + } else { + lpuart_reg.cons = LPUART_CONSOLE; + handler = lpuart_int; + } + + ret = lpuart_global_reset(sport); + if (ret) + goto failed_reset; + + ret = uart_get_rs485_mode(&sport->port); + if (ret) + goto failed_get_rs485; + + ret = uart_add_one_port(&lpuart_reg, &sport->port); + if (ret) + goto failed_attach_port; + + ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0, + DRIVER_NAME, sport); + if (ret) + goto failed_irq_request; + + return 0; + +failed_irq_request: + uart_remove_one_port(&lpuart_reg, &sport->port); +failed_attach_port: +failed_get_rs485: +failed_reset: + lpuart_disable_clks(sport); + return ret; +} + +static int lpuart_remove(struct platform_device *pdev) +{ + struct lpuart_port *sport = platform_get_drvdata(pdev); + + uart_remove_one_port(&lpuart_reg, &sport->port); + + lpuart_disable_clks(sport); + + if (sport->dma_tx_chan) + dma_release_channel(sport->dma_tx_chan); + + if (sport->dma_rx_chan) + dma_release_channel(sport->dma_rx_chan); + + return 0; +} + +static int __maybe_unused lpuart_suspend(struct device *dev) +{ + struct lpuart_port *sport = dev_get_drvdata(dev); + unsigned long temp; + bool irq_wake; + + if (lpuart_is_32(sport)) { + /* disable Rx/Tx and interrupts */ + temp = lpuart32_read(&sport->port, UARTCTRL); + temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE); + lpuart32_write(&sport->port, temp, UARTCTRL); + } else { + /* disable Rx/Tx and interrupts */ + temp = readb(sport->port.membase + UARTCR2); + temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE); + writeb(temp, sport->port.membase + UARTCR2); + } + + uart_suspend_port(&lpuart_reg, &sport->port); + + /* uart_suspend_port() might set wakeup flag */ + irq_wake = irqd_is_wakeup_set(irq_get_irq_data(sport->port.irq)); + + if (sport->lpuart_dma_rx_use) { + /* + * EDMA driver during suspend will forcefully release any + * non-idle DMA channels. If port wakeup is enabled or if port + * is console port or 'no_console_suspend' is set the Rx DMA + * cannot resume as expected, hence gracefully release the + * Rx DMA path before suspend and start Rx DMA path on resume. + */ + if (irq_wake) { + lpuart_dma_rx_free(&sport->port); + } + + /* Disable Rx DMA to use UART port as wakeup source */ + if (lpuart_is_32(sport)) { + temp = lpuart32_read(&sport->port, UARTBAUD); + lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE, + UARTBAUD); + } else { + writeb(readb(sport->port.membase + UARTCR5) & + ~UARTCR5_RDMAS, sport->port.membase + UARTCR5); + } + } + + if (sport->lpuart_dma_tx_use) { + sport->dma_tx_in_progress = false; + dmaengine_terminate_sync(sport->dma_tx_chan); + } + + if (sport->port.suspended && !irq_wake) + lpuart_disable_clks(sport); + + return 0; +} + +static int __maybe_unused lpuart_resume(struct device *dev) +{ + struct lpuart_port *sport = dev_get_drvdata(dev); + bool irq_wake = irqd_is_wakeup_set(irq_get_irq_data(sport->port.irq)); + + if (sport->port.suspended && !irq_wake) + lpuart_enable_clks(sport); + + if (lpuart_is_32(sport)) + lpuart32_setup_watermark_enable(sport); + else + lpuart_setup_watermark_enable(sport); + + if (sport->lpuart_dma_rx_use) { + if (irq_wake) { + if (!lpuart_start_rx_dma(sport)) + rx_dma_timer_init(sport); + else + sport->lpuart_dma_rx_use = false; + } + } + + lpuart_tx_dma_startup(sport); + + if (lpuart_is_32(sport)) + lpuart32_configure(sport); + + uart_resume_port(&lpuart_reg, &sport->port); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(lpuart_pm_ops, lpuart_suspend, lpuart_resume); + +static struct platform_driver lpuart_driver = { + .probe = lpuart_probe, + .remove = lpuart_remove, + .driver = { + .name = "fsl-lpuart", + .of_match_table = lpuart_dt_ids, + .pm = &lpuart_pm_ops, + }, +}; + +static int __init lpuart_serial_init(void) +{ + int ret = uart_register_driver(&lpuart_reg); + + if (ret) + return ret; + + ret = platform_driver_register(&lpuart_driver); + if (ret) + uart_unregister_driver(&lpuart_reg); + + return ret; +} + +static void __exit lpuart_serial_exit(void) +{ + platform_driver_unregister(&lpuart_driver); + uart_unregister_driver(&lpuart_reg); +} + +module_init(lpuart_serial_init); +module_exit(lpuart_serial_exit); + +MODULE_DESCRIPTION("Freescale lpuart serial port driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c new file mode 100644 index 000000000..819f957b6 --- /dev/null +++ b/drivers/tty/serial/icom.c @@ -0,0 +1,1876 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * icom.c + * + * Copyright (C) 2001 IBM Corporation. All rights reserved. + * + * Serial device driver. + * + * Based on code from serial.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/*#define ICOM_TRACE enable port trace capabilities */ + +#define ICOM_DRIVER_NAME "icom" +#define NR_PORTS 128 + +static const unsigned int icom_acfg_baud[] = { + 300, + 600, + 900, + 1200, + 1800, + 2400, + 3600, + 4800, + 7200, + 9600, + 14400, + 19200, + 28800, + 38400, + 57600, + 76800, + 115200, + 153600, + 230400, + 307200, + 460800, +}; +#define BAUD_TABLE_LIMIT (ARRAY_SIZE(icom_acfg_baud) - 1) + +struct icom_regs { + u32 control; /* Adapter Control Register */ + u32 interrupt; /* Adapter Interrupt Register */ + u32 int_mask; /* Adapter Interrupt Mask Reg */ + u32 int_pri; /* Adapter Interrupt Priority r */ + u32 int_reg_b; /* Adapter non-masked Interrupt */ + u32 resvd01; + u32 resvd02; + u32 resvd03; + u32 control_2; /* Adapter Control Register 2 */ + u32 interrupt_2; /* Adapter Interrupt Register 2 */ + u32 int_mask_2; /* Adapter Interrupt Mask 2 */ + u32 int_pri_2; /* Adapter Interrupt Prior 2 */ + u32 int_reg_2b; /* Adapter non-masked 2 */ +}; + +struct func_dram { + u32 reserved[108]; /* 0-1B0 reserved by personality code */ + u32 RcvStatusAddr; /* 1B0-1B3 Status Address for Next rcv */ + u8 RcvStnAddr; /* 1B4 Receive Station Addr */ + u8 IdleState; /* 1B5 Idle State */ + u8 IdleMonitor; /* 1B6 Idle Monitor */ + u8 FlagFillIdleTimer; /* 1B7 Flag Fill Idle Timer */ + u32 XmitStatusAddr; /* 1B8-1BB Transmit Status Address */ + u8 StartXmitCmd; /* 1BC Start Xmit Command */ + u8 HDLCConfigReg; /* 1BD Reserved */ + u8 CauseCode; /* 1BE Cause code for fatal error */ + u8 xchar; /* 1BF High priority send */ + u32 reserved3; /* 1C0-1C3 Reserved */ + u8 PrevCmdReg; /* 1C4 Reserved */ + u8 CmdReg; /* 1C5 Command Register */ + u8 async_config2; /* 1C6 Async Config Byte 2 */ + u8 async_config3; /* 1C7 Async Config Byte 3 */ + u8 dce_resvd[20]; /* 1C8-1DB DCE Rsvd */ + u8 dce_resvd21; /* 1DC DCE Rsvd (21st byte */ + u8 misc_flags; /* 1DD misc flags */ +#define V2_HARDWARE 0x40 +#define ICOM_HDW_ACTIVE 0x01 + u8 call_length; /* 1DE Phone #/CFI buff ln */ + u8 call_length2; /* 1DF Upper byte (unused) */ + u32 call_addr; /* 1E0-1E3 Phn #/CFI buff addr */ + u16 timer_value; /* 1E4-1E5 general timer value */ + u8 timer_command; /* 1E6 general timer cmd */ + u8 dce_command; /* 1E7 dce command reg */ + u8 dce_cmd_status; /* 1E8 dce command stat */ + u8 x21_r1_ioff; /* 1E9 dce ready counter */ + u8 x21_r0_ioff; /* 1EA dce not ready ctr */ + u8 x21_ralt_ioff; /* 1EB dce CNR counter */ + u8 x21_r1_ion; /* 1EC dce ready I on ctr */ + u8 rsvd_ier; /* 1ED Rsvd for IER (if ne */ + u8 ier; /* 1EE Interrupt Enable */ + u8 isr; /* 1EF Input Signal Reg */ + u8 osr; /* 1F0 Output Signal Reg */ + u8 reset; /* 1F1 Reset/Reload Reg */ + u8 disable; /* 1F2 Disable Reg */ + u8 sync; /* 1F3 Sync Reg */ + u8 error_stat; /* 1F4 Error Status */ + u8 cable_id; /* 1F5 Cable ID */ + u8 cs_length; /* 1F6 CS Load Length */ + u8 mac_length; /* 1F7 Mac Load Length */ + u32 cs_load_addr; /* 1F8-1FB Call Load PCI Addr */ + u32 mac_load_addr; /* 1FC-1FF Mac Load PCI Addr */ +}; + +/* + * adapter defines and structures + */ +#define ICOM_CONTROL_START_A 0x00000008 +#define ICOM_CONTROL_STOP_A 0x00000004 +#define ICOM_CONTROL_START_B 0x00000002 +#define ICOM_CONTROL_STOP_B 0x00000001 +#define ICOM_CONTROL_START_C 0x00000008 +#define ICOM_CONTROL_STOP_C 0x00000004 +#define ICOM_CONTROL_START_D 0x00000002 +#define ICOM_CONTROL_STOP_D 0x00000001 +#define ICOM_IRAM_OFFSET 0x1000 +#define ICOM_IRAM_SIZE 0x0C00 +#define ICOM_DCE_IRAM_OFFSET 0x0A00 +#define ICOM_CABLE_ID_VALID 0x01 +#define ICOM_CABLE_ID_MASK 0xF0 +#define ICOM_DISABLE 0x80 +#define CMD_XMIT_RCV_ENABLE 0xC0 +#define CMD_XMIT_ENABLE 0x40 +#define CMD_RCV_DISABLE 0x00 +#define CMD_RCV_ENABLE 0x80 +#define CMD_RESTART 0x01 +#define CMD_HOLD_XMIT 0x02 +#define CMD_SND_BREAK 0x04 +#define RS232_CABLE 0x06 +#define V24_CABLE 0x0E +#define V35_CABLE 0x0C +#define V36_CABLE 0x02 +#define NO_CABLE 0x00 +#define START_DOWNLOAD 0x80 +#define ICOM_INT_MASK_PRC_A 0x00003FFF +#define ICOM_INT_MASK_PRC_B 0x3FFF0000 +#define ICOM_INT_MASK_PRC_C 0x00003FFF +#define ICOM_INT_MASK_PRC_D 0x3FFF0000 +#define INT_RCV_COMPLETED 0x1000 +#define INT_XMIT_COMPLETED 0x2000 +#define INT_IDLE_DETECT 0x0800 +#define INT_RCV_DISABLED 0x0400 +#define INT_XMIT_DISABLED 0x0200 +#define INT_RCV_XMIT_SHUTDOWN 0x0100 +#define INT_FATAL_ERROR 0x0080 +#define INT_CABLE_PULL 0x0020 +#define INT_SIGNAL_CHANGE 0x0010 +#define HDLC_PPP_PURE_ASYNC 0x02 +#define HDLC_FF_FILL 0x00 +#define HDLC_HDW_FLOW 0x01 +#define START_XMIT 0x80 +#define ICOM_ACFG_DRIVE1 0x20 +#define ICOM_ACFG_NO_PARITY 0x00 +#define ICOM_ACFG_PARITY_ENAB 0x02 +#define ICOM_ACFG_PARITY_ODD 0x01 +#define ICOM_ACFG_8BPC 0x00 +#define ICOM_ACFG_7BPC 0x04 +#define ICOM_ACFG_6BPC 0x08 +#define ICOM_ACFG_5BPC 0x0C +#define ICOM_ACFG_1STOP_BIT 0x00 +#define ICOM_ACFG_2STOP_BIT 0x10 +#define ICOM_DTR 0x80 +#define ICOM_RTS 0x40 +#define ICOM_RI 0x08 +#define ICOM_DSR 0x80 +#define ICOM_DCD 0x20 +#define ICOM_CTS 0x40 + +#define NUM_XBUFFS 1 +#define NUM_RBUFFS 2 +#define RCV_BUFF_SZ 0x0200 +#define XMIT_BUFF_SZ 0x1000 +struct statusArea { + /**********************************************/ + /* Transmit Status Area */ + /**********************************************/ + struct xmit_status_area{ + __le32 leNext; /* Next entry in Little Endian on Adapter */ + __le32 leNextASD; + __le32 leBuffer; /* Buffer for entry in LE for Adapter */ + __le16 leLengthASD; + __le16 leOffsetASD; + __le16 leLength; /* Length of data in segment */ + __le16 flags; +#define SA_FLAGS_DONE 0x0080 /* Done with Segment */ +#define SA_FLAGS_CONTINUED 0x8000 /* More Segments */ +#define SA_FLAGS_IDLE 0x4000 /* Mark IDLE after frm */ +#define SA_FLAGS_READY_TO_XMIT 0x0800 +#define SA_FLAGS_STAT_MASK 0x007F + } xmit[NUM_XBUFFS]; + + /**********************************************/ + /* Receive Status Area */ + /**********************************************/ + struct { + __le32 leNext; /* Next entry in Little Endian on Adapter */ + __le32 leNextASD; + __le32 leBuffer; /* Buffer for entry in LE for Adapter */ + __le16 WorkingLength; /* size of segment */ + __le16 reserv01; + __le16 leLength; /* Length of data in segment */ + __le16 flags; +#define SA_FL_RCV_DONE 0x0010 /* Data ready */ +#define SA_FLAGS_OVERRUN 0x0040 +#define SA_FLAGS_PARITY_ERROR 0x0080 +#define SA_FLAGS_FRAME_ERROR 0x0001 +#define SA_FLAGS_FRAME_TRUNC 0x0002 +#define SA_FLAGS_BREAK_DET 0x0004 /* set conditionally by device driver, not hardware */ +#define SA_FLAGS_RCV_MASK 0xFFE6 + } rcv[NUM_RBUFFS]; +}; + +struct icom_adapter; + + +#define ICOM_MAJOR 243 +#define ICOM_MINOR_START 0 + +struct icom_port { + struct uart_port uart_port; + unsigned char cable_id; + unsigned char read_status_mask; + unsigned char ignore_status_mask; + void __iomem * int_reg; + struct icom_regs __iomem *global_reg; + struct func_dram __iomem *dram; + int port; + struct statusArea *statStg; + dma_addr_t statStg_pci; + __le32 *xmitRestart; + dma_addr_t xmitRestart_pci; + unsigned char *xmit_buf; + dma_addr_t xmit_buf_pci; + unsigned char *recv_buf; + dma_addr_t recv_buf_pci; + int next_rcv; + int status; +#define ICOM_PORT_ACTIVE 1 /* Port exists. */ +#define ICOM_PORT_OFF 0 /* Port does not exist. */ + struct icom_adapter *adapter; +}; + +struct icom_adapter { + void __iomem * base_addr; + unsigned long base_addr_pci; + struct pci_dev *pci_dev; + struct icom_port port_info[4]; + int index; + int version; +#define ADAPTER_V1 0x0001 +#define ADAPTER_V2 0x0002 + u32 subsystem_id; +#define FOUR_PORT_MODEL 0x0252 +#define V2_TWO_PORTS_RVX 0x021A +#define V2_ONE_PORT_RVX_ONE_PORT_IMBED_MDM 0x0251 + int numb_ports; + struct list_head icom_adapter_entry; + struct kref kref; +}; + +/* prototype */ +extern void iCom_sercons_init(void); + +struct lookup_proc_table { + u32 __iomem *global_control_reg; + unsigned long processor_id; +}; + +struct lookup_int_table { + u32 __iomem *global_int_mask; + unsigned long processor_id; +}; + +static inline struct icom_port *to_icom_port(struct uart_port *port) +{ + return container_of(port, struct icom_port, uart_port); +} + +static const struct pci_device_id icom_pci_table[] = { + { + .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = ADAPTER_V1, + }, + { + .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, + .subvendor = PCI_VENDOR_ID_IBM, + .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX, + .driver_data = ADAPTER_V2, + }, + { + .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, + .subvendor = PCI_VENDOR_ID_IBM, + .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM, + .driver_data = ADAPTER_V2, + }, + { + .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, + .subvendor = PCI_VENDOR_ID_IBM, + .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL, + .driver_data = ADAPTER_V2, + }, + { + .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, + .subvendor = PCI_VENDOR_ID_IBM, + .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE, + .driver_data = ADAPTER_V2, + }, + {} +}; + +static struct lookup_proc_table start_proc[4] = { + {NULL, ICOM_CONTROL_START_A}, + {NULL, ICOM_CONTROL_START_B}, + {NULL, ICOM_CONTROL_START_C}, + {NULL, ICOM_CONTROL_START_D} +}; + + +static struct lookup_proc_table stop_proc[4] = { + {NULL, ICOM_CONTROL_STOP_A}, + {NULL, ICOM_CONTROL_STOP_B}, + {NULL, ICOM_CONTROL_STOP_C}, + {NULL, ICOM_CONTROL_STOP_D} +}; + +static struct lookup_int_table int_mask_tbl[4] = { + {NULL, ICOM_INT_MASK_PRC_A}, + {NULL, ICOM_INT_MASK_PRC_B}, + {NULL, ICOM_INT_MASK_PRC_C}, + {NULL, ICOM_INT_MASK_PRC_D}, +}; + + +MODULE_DEVICE_TABLE(pci, icom_pci_table); + +static LIST_HEAD(icom_adapter_head); + +/* spinlock for adapter initialization and changing adapter operations */ +static DEFINE_SPINLOCK(icom_lock); + +#ifdef ICOM_TRACE +static inline void trace(struct icom_port *icom_port, char *trace_pt, + unsigned long trace_data) +{ + dev_info(&icom_port->adapter->pci_dev->dev, ":%d:%s - %lx\n", + icom_port->port, trace_pt, trace_data); +} +#else +static inline void trace(struct icom_port *icom_port, char *trace_pt, unsigned long trace_data) {}; +#endif +static void icom_kref_release(struct kref *kref); + +static void free_port_memory(struct icom_port *icom_port) +{ + struct pci_dev *dev = icom_port->adapter->pci_dev; + + trace(icom_port, "RET_PORT_MEM", 0); + if (icom_port->recv_buf) { + dma_free_coherent(&dev->dev, 4096, icom_port->recv_buf, + icom_port->recv_buf_pci); + icom_port->recv_buf = NULL; + } + if (icom_port->xmit_buf) { + dma_free_coherent(&dev->dev, 4096, icom_port->xmit_buf, + icom_port->xmit_buf_pci); + icom_port->xmit_buf = NULL; + } + if (icom_port->statStg) { + dma_free_coherent(&dev->dev, 4096, icom_port->statStg, + icom_port->statStg_pci); + icom_port->statStg = NULL; + } + + if (icom_port->xmitRestart) { + dma_free_coherent(&dev->dev, 4096, icom_port->xmitRestart, + icom_port->xmitRestart_pci); + icom_port->xmitRestart = NULL; + } +} + +static int get_port_memory(struct icom_port *icom_port) +{ + int index; + unsigned long stgAddr; + unsigned long startStgAddr; + unsigned long offset; + struct pci_dev *dev = icom_port->adapter->pci_dev; + + icom_port->xmit_buf = + dma_alloc_coherent(&dev->dev, 4096, &icom_port->xmit_buf_pci, + GFP_KERNEL); + if (!icom_port->xmit_buf) { + dev_err(&dev->dev, "Can not allocate Transmit buffer\n"); + return -ENOMEM; + } + + trace(icom_port, "GET_PORT_MEM", + (unsigned long) icom_port->xmit_buf); + + icom_port->recv_buf = + dma_alloc_coherent(&dev->dev, 4096, &icom_port->recv_buf_pci, + GFP_KERNEL); + if (!icom_port->recv_buf) { + dev_err(&dev->dev, "Can not allocate Receive buffer\n"); + free_port_memory(icom_port); + return -ENOMEM; + } + trace(icom_port, "GET_PORT_MEM", + (unsigned long) icom_port->recv_buf); + + icom_port->statStg = + dma_alloc_coherent(&dev->dev, 4096, &icom_port->statStg_pci, + GFP_KERNEL); + if (!icom_port->statStg) { + dev_err(&dev->dev, "Can not allocate Status buffer\n"); + free_port_memory(icom_port); + return -ENOMEM; + } + trace(icom_port, "GET_PORT_MEM", + (unsigned long) icom_port->statStg); + + icom_port->xmitRestart = + dma_alloc_coherent(&dev->dev, 4096, &icom_port->xmitRestart_pci, + GFP_KERNEL); + if (!icom_port->xmitRestart) { + dev_err(&dev->dev, + "Can not allocate xmit Restart buffer\n"); + free_port_memory(icom_port); + return -ENOMEM; + } + + /* FODs: Frame Out Descriptor Queue, this is a FIFO queue that + indicates that frames are to be transmitted + */ + + stgAddr = (unsigned long) icom_port->statStg; + for (index = 0; index < NUM_XBUFFS; index++) { + trace(icom_port, "FOD_ADDR", stgAddr); + stgAddr = stgAddr + sizeof(icom_port->statStg->xmit[0]); + if (index < (NUM_XBUFFS - 1)) { + memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area)); + icom_port->statStg->xmit[index].leLengthASD = + cpu_to_le16(XMIT_BUFF_SZ); + trace(icom_port, "FOD_ADDR", stgAddr); + trace(icom_port, "FOD_XBUFF", + (unsigned long) icom_port->xmit_buf); + icom_port->statStg->xmit[index].leBuffer = + cpu_to_le32(icom_port->xmit_buf_pci); + } else if (index == (NUM_XBUFFS - 1)) { + memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area)); + icom_port->statStg->xmit[index].leLengthASD = + cpu_to_le16(XMIT_BUFF_SZ); + trace(icom_port, "FOD_XBUFF", + (unsigned long) icom_port->xmit_buf); + icom_port->statStg->xmit[index].leBuffer = + cpu_to_le32(icom_port->xmit_buf_pci); + } else { + memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area)); + } + } + /* FIDs */ + startStgAddr = stgAddr; + + /* fill in every entry, even if no buffer */ + for (index = 0; index < NUM_RBUFFS; index++) { + trace(icom_port, "FID_ADDR", stgAddr); + stgAddr = stgAddr + sizeof(icom_port->statStg->rcv[0]); + icom_port->statStg->rcv[index].leLength = 0; + icom_port->statStg->rcv[index].WorkingLength = + cpu_to_le16(RCV_BUFF_SZ); + if (index < (NUM_RBUFFS - 1) ) { + offset = stgAddr - (unsigned long) icom_port->statStg; + icom_port->statStg->rcv[index].leNext = + cpu_to_le32(icom_port-> statStg_pci + offset); + trace(icom_port, "FID_RBUFF", + (unsigned long) icom_port->recv_buf); + icom_port->statStg->rcv[index].leBuffer = + cpu_to_le32(icom_port->recv_buf_pci); + } else if (index == (NUM_RBUFFS -1) ) { + offset = startStgAddr - (unsigned long) icom_port->statStg; + icom_port->statStg->rcv[index].leNext = + cpu_to_le32(icom_port-> statStg_pci + offset); + trace(icom_port, "FID_RBUFF", + (unsigned long) icom_port->recv_buf + 2048); + icom_port->statStg->rcv[index].leBuffer = + cpu_to_le32(icom_port->recv_buf_pci + 2048); + } else { + icom_port->statStg->rcv[index].leNext = 0; + icom_port->statStg->rcv[index].leBuffer = 0; + } + } + + return 0; +} + +static void stop_processor(struct icom_port *icom_port) +{ + unsigned long temp; + unsigned long flags; + int port; + + spin_lock_irqsave(&icom_lock, flags); + + port = icom_port->port; + if (port >= ARRAY_SIZE(stop_proc)) { + dev_err(&icom_port->adapter->pci_dev->dev, + "Invalid port assignment\n"); + goto unlock; + } + + if (port == 0 || port == 1) + stop_proc[port].global_control_reg = &icom_port->global_reg->control; + else + stop_proc[port].global_control_reg = &icom_port->global_reg->control_2; + + temp = readl(stop_proc[port].global_control_reg); + temp = (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id; + writel(temp, stop_proc[port].global_control_reg); + + /* write flush */ + readl(stop_proc[port].global_control_reg); + +unlock: + spin_unlock_irqrestore(&icom_lock, flags); +} + +static void start_processor(struct icom_port *icom_port) +{ + unsigned long temp; + unsigned long flags; + int port; + + spin_lock_irqsave(&icom_lock, flags); + + port = icom_port->port; + if (port >= ARRAY_SIZE(start_proc)) { + dev_err(&icom_port->adapter->pci_dev->dev, + "Invalid port assignment\n"); + goto unlock; + } + + if (port == 0 || port == 1) + start_proc[port].global_control_reg = &icom_port->global_reg->control; + else + start_proc[port].global_control_reg = &icom_port->global_reg->control_2; + + temp = readl(start_proc[port].global_control_reg); + temp = (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id; + writel(temp, start_proc[port].global_control_reg); + + /* write flush */ + readl(start_proc[port].global_control_reg); + +unlock: + spin_unlock_irqrestore(&icom_lock, flags); +} + +static void load_code(struct icom_port *icom_port) +{ + const struct firmware *fw; + char __iomem *iram_ptr; + int index; + int status = 0; + void __iomem *dram_ptr = icom_port->dram; + dma_addr_t temp_pci; + unsigned char *new_page = NULL; + unsigned char cable_id = NO_CABLE; + struct pci_dev *dev = icom_port->adapter->pci_dev; + + /* Clear out any pending interrupts */ + writew(0x3FFF, icom_port->int_reg); + + trace(icom_port, "CLEAR_INTERRUPTS", 0); + + /* Stop processor */ + stop_processor(icom_port); + + /* Zero out DRAM */ + memset_io(dram_ptr, 0, 512); + + /* Load Call Setup into Adapter */ + if (request_firmware(&fw, "icom_call_setup.bin", &dev->dev) < 0) { + dev_err(&dev->dev,"Unable to load icom_call_setup.bin firmware image\n"); + status = -1; + goto load_code_exit; + } + + if (fw->size > ICOM_DCE_IRAM_OFFSET) { + dev_err(&dev->dev, "Invalid firmware image for icom_call_setup.bin found.\n"); + release_firmware(fw); + status = -1; + goto load_code_exit; + } + + iram_ptr = (char __iomem *)icom_port->dram + ICOM_IRAM_OFFSET; + for (index = 0; index < fw->size; index++) + writeb(fw->data[index], &iram_ptr[index]); + + release_firmware(fw); + + /* Load Resident DCE portion of Adapter */ + if (request_firmware(&fw, "icom_res_dce.bin", &dev->dev) < 0) { + dev_err(&dev->dev,"Unable to load icom_res_dce.bin firmware image\n"); + status = -1; + goto load_code_exit; + } + + if (fw->size > ICOM_IRAM_SIZE) { + dev_err(&dev->dev, "Invalid firmware image for icom_res_dce.bin found.\n"); + release_firmware(fw); + status = -1; + goto load_code_exit; + } + + iram_ptr = (char __iomem *) icom_port->dram + ICOM_IRAM_OFFSET; + for (index = ICOM_DCE_IRAM_OFFSET; index < fw->size; index++) + writeb(fw->data[index], &iram_ptr[index]); + + release_firmware(fw); + + /* Set Hardware level */ + if (icom_port->adapter->version == ADAPTER_V2) + writeb(V2_HARDWARE, &(icom_port->dram->misc_flags)); + + /* Start the processor in Adapter */ + start_processor(icom_port); + + writeb((HDLC_PPP_PURE_ASYNC | HDLC_FF_FILL), + &(icom_port->dram->HDLCConfigReg)); + writeb(0x04, &(icom_port->dram->FlagFillIdleTimer)); /* 0.5 seconds */ + writeb(0x00, &(icom_port->dram->CmdReg)); + writeb(0x10, &(icom_port->dram->async_config3)); + writeb((ICOM_ACFG_DRIVE1 | ICOM_ACFG_NO_PARITY | ICOM_ACFG_8BPC | + ICOM_ACFG_1STOP_BIT), &(icom_port->dram->async_config2)); + + /*Set up data in icom DRAM to indicate where personality + *code is located and its length. + */ + new_page = dma_alloc_coherent(&dev->dev, 4096, &temp_pci, GFP_KERNEL); + + if (!new_page) { + dev_err(&dev->dev, "Can not allocate DMA buffer\n"); + status = -1; + goto load_code_exit; + } + + if (request_firmware(&fw, "icom_asc.bin", &dev->dev) < 0) { + dev_err(&dev->dev,"Unable to load icom_asc.bin firmware image\n"); + status = -1; + goto load_code_exit; + } + + if (fw->size > ICOM_DCE_IRAM_OFFSET) { + dev_err(&dev->dev, "Invalid firmware image for icom_asc.bin found.\n"); + release_firmware(fw); + status = -1; + goto load_code_exit; + } + + for (index = 0; index < fw->size; index++) + new_page[index] = fw->data[index]; + + writeb((char) ((fw->size + 16)/16), &icom_port->dram->mac_length); + writel(temp_pci, &icom_port->dram->mac_load_addr); + + release_firmware(fw); + + /*Setting the syncReg to 0x80 causes adapter to start downloading + the personality code into adapter instruction RAM. + Once code is loaded, it will begin executing and, based on + information provided above, will start DMAing data from + shared memory to adapter DRAM. + */ + /* the wait loop below verifies this write operation has been done + and processed + */ + writeb(START_DOWNLOAD, &icom_port->dram->sync); + + /* Wait max 1 Sec for data download and processor to start */ + for (index = 0; index < 10; index++) { + msleep(100); + if (readb(&icom_port->dram->misc_flags) & ICOM_HDW_ACTIVE) + break; + } + + if (index == 10) + status = -1; + + /* + * check Cable ID + */ + cable_id = readb(&icom_port->dram->cable_id); + + if (cable_id & ICOM_CABLE_ID_VALID) { + /* Get cable ID into the lower 4 bits (standard form) */ + cable_id = (cable_id & ICOM_CABLE_ID_MASK) >> 4; + icom_port->cable_id = cable_id; + } else { + dev_err(&dev->dev,"Invalid or no cable attached\n"); + icom_port->cable_id = NO_CABLE; + } + + load_code_exit: + + if (status != 0) { + /* Clear out any pending interrupts */ + writew(0x3FFF, icom_port->int_reg); + + /* Turn off port */ + writeb(ICOM_DISABLE, &(icom_port->dram->disable)); + + /* Stop processor */ + stop_processor(icom_port); + + dev_err(&icom_port->adapter->pci_dev->dev,"Port not operational\n"); + } + + if (new_page != NULL) + dma_free_coherent(&dev->dev, 4096, new_page, temp_pci); +} + +static int startup(struct icom_port *icom_port) +{ + unsigned long temp; + unsigned char cable_id, raw_cable_id; + unsigned long flags; + int port; + + trace(icom_port, "STARTUP", 0); + + if (!icom_port->dram) { + /* should NEVER be NULL */ + dev_err(&icom_port->adapter->pci_dev->dev, + "Unusable Port, port configuration missing\n"); + return -ENODEV; + } + + /* + * check Cable ID + */ + raw_cable_id = readb(&icom_port->dram->cable_id); + trace(icom_port, "CABLE_ID", raw_cable_id); + + /* Get cable ID into the lower 4 bits (standard form) */ + cable_id = (raw_cable_id & ICOM_CABLE_ID_MASK) >> 4; + + /* Check for valid Cable ID */ + if (!(raw_cable_id & ICOM_CABLE_ID_VALID) || + (cable_id != icom_port->cable_id)) { + + /* reload adapter code, pick up any potential changes in cable id */ + load_code(icom_port); + + /* still no sign of cable, error out */ + raw_cable_id = readb(&icom_port->dram->cable_id); + cable_id = (raw_cable_id & ICOM_CABLE_ID_MASK) >> 4; + if (!(raw_cable_id & ICOM_CABLE_ID_VALID) || + (icom_port->cable_id == NO_CABLE)) + return -EIO; + } + + /* + * Finally, clear and enable interrupts + */ + spin_lock_irqsave(&icom_lock, flags); + port = icom_port->port; + if (port >= ARRAY_SIZE(int_mask_tbl)) { + dev_err(&icom_port->adapter->pci_dev->dev, + "Invalid port assignment\n"); + goto unlock; + } + + if (port == 0 || port == 1) + int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask; + else + int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask_2; + + if (port == 0 || port == 2) + writew(0x00FF, icom_port->int_reg); + else + writew(0x3F00, icom_port->int_reg); + + temp = readl(int_mask_tbl[port].global_int_mask); + writel(temp & ~int_mask_tbl[port].processor_id, int_mask_tbl[port].global_int_mask); + + /* write flush */ + readl(int_mask_tbl[port].global_int_mask); + +unlock: + spin_unlock_irqrestore(&icom_lock, flags); + return 0; +} + +static void shutdown(struct icom_port *icom_port) +{ + unsigned long temp; + unsigned char cmdReg; + unsigned long flags; + int port; + + spin_lock_irqsave(&icom_lock, flags); + trace(icom_port, "SHUTDOWN", 0); + + /* + * disable all interrupts + */ + port = icom_port->port; + if (port >= ARRAY_SIZE(int_mask_tbl)) { + dev_err(&icom_port->adapter->pci_dev->dev, + "Invalid port assignment\n"); + goto unlock; + } + if (port == 0 || port == 1) + int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask; + else + int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask_2; + + temp = readl(int_mask_tbl[port].global_int_mask); + writel(temp | int_mask_tbl[port].processor_id, int_mask_tbl[port].global_int_mask); + + /* write flush */ + readl(int_mask_tbl[port].global_int_mask); + +unlock: + spin_unlock_irqrestore(&icom_lock, flags); + + /* + * disable break condition + */ + cmdReg = readb(&icom_port->dram->CmdReg); + if (cmdReg & CMD_SND_BREAK) { + writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg); + } +} + +static int icom_write(struct uart_port *port) +{ + struct icom_port *icom_port = to_icom_port(port); + unsigned long data_count; + unsigned char cmdReg; + unsigned long offset; + int temp_tail = port->state->xmit.tail; + + trace(icom_port, "WRITE", 0); + + if (le16_to_cpu(icom_port->statStg->xmit[0].flags) & + SA_FLAGS_READY_TO_XMIT) { + trace(icom_port, "WRITE_FULL", 0); + return 0; + } + + data_count = 0; + while ((port->state->xmit.head != temp_tail) && + (data_count <= XMIT_BUFF_SZ)) { + + icom_port->xmit_buf[data_count++] = + port->state->xmit.buf[temp_tail]; + + temp_tail++; + temp_tail &= (UART_XMIT_SIZE - 1); + } + + if (data_count) { + icom_port->statStg->xmit[0].flags = + cpu_to_le16(SA_FLAGS_READY_TO_XMIT); + icom_port->statStg->xmit[0].leLength = + cpu_to_le16(data_count); + offset = + (unsigned long) &icom_port->statStg->xmit[0] - + (unsigned long) icom_port->statStg; + *icom_port->xmitRestart = + cpu_to_le32(icom_port->statStg_pci + offset); + cmdReg = readb(&icom_port->dram->CmdReg); + writeb(cmdReg | CMD_XMIT_RCV_ENABLE, + &icom_port->dram->CmdReg); + writeb(START_XMIT, &icom_port->dram->StartXmitCmd); + trace(icom_port, "WRITE_START", data_count); + /* write flush */ + readb(&icom_port->dram->StartXmitCmd); + } + + return data_count; +} + +static inline void check_modem_status(struct icom_port *icom_port) +{ + static char old_status = 0; + char delta_status; + unsigned char status; + + spin_lock(&icom_port->uart_port.lock); + + /*modem input register */ + status = readb(&icom_port->dram->isr); + trace(icom_port, "CHECK_MODEM", status); + delta_status = status ^ old_status; + if (delta_status) { + if (delta_status & ICOM_RI) + icom_port->uart_port.icount.rng++; + if (delta_status & ICOM_DSR) + icom_port->uart_port.icount.dsr++; + if (delta_status & ICOM_DCD) + uart_handle_dcd_change(&icom_port->uart_port, + delta_status & ICOM_DCD); + if (delta_status & ICOM_CTS) + uart_handle_cts_change(&icom_port->uart_port, + delta_status & ICOM_CTS); + + wake_up_interruptible(&icom_port->uart_port.state-> + port.delta_msr_wait); + old_status = status; + } + spin_unlock(&icom_port->uart_port.lock); +} + +static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port) +{ + u16 count, i; + + if (port_int_reg & (INT_XMIT_COMPLETED)) { + trace(icom_port, "XMIT_COMPLETE", 0); + + /* clear buffer in use bit */ + icom_port->statStg->xmit[0].flags &= + cpu_to_le16(~SA_FLAGS_READY_TO_XMIT); + + count = le16_to_cpu(icom_port->statStg->xmit[0].leLength); + icom_port->uart_port.icount.tx += count; + + for (i=0; iuart_port.state->xmit); i++) { + + icom_port->uart_port.state->xmit.tail++; + icom_port->uart_port.state->xmit.tail &= + (UART_XMIT_SIZE - 1); + } + + if (!icom_write(&icom_port->uart_port)) + /* activate write queue */ + uart_write_wakeup(&icom_port->uart_port); + } else + trace(icom_port, "XMIT_DISABLED", 0); +} + +static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port) +{ + short int count, rcv_buff; + struct tty_port *port = &icom_port->uart_port.state->port; + u16 status; + struct uart_icount *icount; + unsigned long offset; + unsigned char flag; + + trace(icom_port, "RCV_COMPLETE", 0); + rcv_buff = icom_port->next_rcv; + + status = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].flags); + while (status & SA_FL_RCV_DONE) { + int first = -1; + + trace(icom_port, "FID_STATUS", status); + count = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].leLength); + + trace(icom_port, "RCV_COUNT", count); + + trace(icom_port, "REAL_COUNT", count); + + offset = le32_to_cpu(icom_port->statStg->rcv[rcv_buff].leBuffer) - + icom_port->recv_buf_pci; + + /* Block copy all but the last byte as this may have status */ + if (count > 0) { + first = icom_port->recv_buf[offset]; + tty_insert_flip_string(port, icom_port->recv_buf + offset, count - 1); + } + + icount = &icom_port->uart_port.icount; + icount->rx += count; + + /* Break detect logic */ + if ((status & SA_FLAGS_FRAME_ERROR) + && first == 0) { + status &= ~SA_FLAGS_FRAME_ERROR; + status |= SA_FLAGS_BREAK_DET; + trace(icom_port, "BREAK_DET", 0); + } + + flag = TTY_NORMAL; + + if (status & + (SA_FLAGS_BREAK_DET | SA_FLAGS_PARITY_ERROR | + SA_FLAGS_FRAME_ERROR | SA_FLAGS_OVERRUN)) { + + if (status & SA_FLAGS_BREAK_DET) + icount->brk++; + if (status & SA_FLAGS_PARITY_ERROR) + icount->parity++; + if (status & SA_FLAGS_FRAME_ERROR) + icount->frame++; + if (status & SA_FLAGS_OVERRUN) + icount->overrun++; + + /* + * Now check to see if character should be + * ignored, and mask off conditions which + * should be ignored. + */ + if (status & icom_port->ignore_status_mask) { + trace(icom_port, "IGNORE_CHAR", 0); + goto ignore_char; + } + + status &= icom_port->read_status_mask; + + if (status & SA_FLAGS_BREAK_DET) { + flag = TTY_BREAK; + } else if (status & SA_FLAGS_PARITY_ERROR) { + trace(icom_port, "PARITY_ERROR", 0); + flag = TTY_PARITY; + } else if (status & SA_FLAGS_FRAME_ERROR) + flag = TTY_FRAME; + + } + + tty_insert_flip_char(port, *(icom_port->recv_buf + offset + count - 1), flag); + + if (status & SA_FLAGS_OVERRUN) + /* + * Overrun is special, since it's + * reported immediately, and doesn't + * affect the current character + */ + tty_insert_flip_char(port, 0, TTY_OVERRUN); +ignore_char: + icom_port->statStg->rcv[rcv_buff].flags = 0; + icom_port->statStg->rcv[rcv_buff].leLength = 0; + icom_port->statStg->rcv[rcv_buff].WorkingLength = + cpu_to_le16(RCV_BUFF_SZ); + + rcv_buff++; + if (rcv_buff == NUM_RBUFFS) + rcv_buff = 0; + + status = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].flags); + } + icom_port->next_rcv = rcv_buff; + + tty_flip_buffer_push(port); +} + +static void process_interrupt(u16 port_int_reg, + struct icom_port *icom_port) +{ + + spin_lock(&icom_port->uart_port.lock); + trace(icom_port, "INTERRUPT", port_int_reg); + + if (port_int_reg & (INT_XMIT_COMPLETED | INT_XMIT_DISABLED)) + xmit_interrupt(port_int_reg, icom_port); + + if (port_int_reg & INT_RCV_COMPLETED) + recv_interrupt(port_int_reg, icom_port); + + spin_unlock(&icom_port->uart_port.lock); +} + +static irqreturn_t icom_interrupt(int irq, void *dev_id) +{ + void __iomem * int_reg; + u32 adapter_interrupts; + u16 port_int_reg; + struct icom_adapter *icom_adapter; + struct icom_port *icom_port; + + /* find icom_port for this interrupt */ + icom_adapter = (struct icom_adapter *) dev_id; + + if (icom_adapter->version == ADAPTER_V2) { + int_reg = icom_adapter->base_addr + 0x8024; + + adapter_interrupts = readl(int_reg); + + if (adapter_interrupts & 0x00003FFF) { + /* port 2 interrupt, NOTE: for all ADAPTER_V2, port 2 will be active */ + icom_port = &icom_adapter->port_info[2]; + port_int_reg = (u16) adapter_interrupts; + process_interrupt(port_int_reg, icom_port); + check_modem_status(icom_port); + } + if (adapter_interrupts & 0x3FFF0000) { + /* port 3 interrupt */ + icom_port = &icom_adapter->port_info[3]; + if (icom_port->status == ICOM_PORT_ACTIVE) { + port_int_reg = + (u16) (adapter_interrupts >> 16); + process_interrupt(port_int_reg, icom_port); + check_modem_status(icom_port); + } + } + + /* Clear out any pending interrupts */ + writel(adapter_interrupts, int_reg); + + int_reg = icom_adapter->base_addr + 0x8004; + } else { + int_reg = icom_adapter->base_addr + 0x4004; + } + + adapter_interrupts = readl(int_reg); + + if (adapter_interrupts & 0x00003FFF) { + /* port 0 interrupt, NOTE: for all adapters, port 0 will be active */ + icom_port = &icom_adapter->port_info[0]; + port_int_reg = (u16) adapter_interrupts; + process_interrupt(port_int_reg, icom_port); + check_modem_status(icom_port); + } + if (adapter_interrupts & 0x3FFF0000) { + /* port 1 interrupt */ + icom_port = &icom_adapter->port_info[1]; + if (icom_port->status == ICOM_PORT_ACTIVE) { + port_int_reg = (u16) (adapter_interrupts >> 16); + process_interrupt(port_int_reg, icom_port); + check_modem_status(icom_port); + } + } + + /* Clear out any pending interrupts */ + writel(adapter_interrupts, int_reg); + + /* flush the write */ + adapter_interrupts = readl(int_reg); + + return IRQ_HANDLED; +} + +/* + * ------------------------------------------------------------------ + * Begin serial-core API + * ------------------------------------------------------------------ + */ +static unsigned int icom_tx_empty(struct uart_port *port) +{ + struct icom_port *icom_port = to_icom_port(port); + int ret; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + if (le16_to_cpu(icom_port->statStg->xmit[0].flags) & + SA_FLAGS_READY_TO_XMIT) + ret = TIOCSER_TEMT; + else + ret = 0; + + spin_unlock_irqrestore(&port->lock, flags); + return ret; +} + +static void icom_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct icom_port *icom_port = to_icom_port(port); + unsigned char local_osr; + + trace(icom_port, "SET_MODEM", 0); + local_osr = readb(&icom_port->dram->osr); + + if (mctrl & TIOCM_RTS) { + trace(icom_port, "RAISE_RTS", 0); + local_osr |= ICOM_RTS; + } else { + trace(icom_port, "LOWER_RTS", 0); + local_osr &= ~ICOM_RTS; + } + + if (mctrl & TIOCM_DTR) { + trace(icom_port, "RAISE_DTR", 0); + local_osr |= ICOM_DTR; + } else { + trace(icom_port, "LOWER_DTR", 0); + local_osr &= ~ICOM_DTR; + } + + writeb(local_osr, &icom_port->dram->osr); +} + +static unsigned int icom_get_mctrl(struct uart_port *port) +{ + struct icom_port *icom_port = to_icom_port(port); + unsigned char status; + unsigned int result; + + trace(icom_port, "GET_MODEM", 0); + + status = readb(&icom_port->dram->isr); + + result = ((status & ICOM_DCD) ? TIOCM_CAR : 0) + | ((status & ICOM_RI) ? TIOCM_RNG : 0) + | ((status & ICOM_DSR) ? TIOCM_DSR : 0) + | ((status & ICOM_CTS) ? TIOCM_CTS : 0); + return result; +} + +static void icom_stop_tx(struct uart_port *port) +{ + struct icom_port *icom_port = to_icom_port(port); + unsigned char cmdReg; + + trace(icom_port, "STOP", 0); + cmdReg = readb(&icom_port->dram->CmdReg); + writeb(cmdReg | CMD_HOLD_XMIT, &icom_port->dram->CmdReg); +} + +static void icom_start_tx(struct uart_port *port) +{ + struct icom_port *icom_port = to_icom_port(port); + unsigned char cmdReg; + + trace(icom_port, "START", 0); + cmdReg = readb(&icom_port->dram->CmdReg); + if ((cmdReg & CMD_HOLD_XMIT) == CMD_HOLD_XMIT) + writeb(cmdReg & ~CMD_HOLD_XMIT, + &icom_port->dram->CmdReg); + + icom_write(port); +} + +static void icom_send_xchar(struct uart_port *port, char ch) +{ + struct icom_port *icom_port = to_icom_port(port); + unsigned char xdata; + int index; + unsigned long flags; + + trace(icom_port, "SEND_XCHAR", ch); + + /* wait .1 sec to send char */ + for (index = 0; index < 10; index++) { + spin_lock_irqsave(&port->lock, flags); + xdata = readb(&icom_port->dram->xchar); + if (xdata == 0x00) { + trace(icom_port, "QUICK_WRITE", 0); + writeb(ch, &icom_port->dram->xchar); + + /* flush write operation */ + xdata = readb(&icom_port->dram->xchar); + spin_unlock_irqrestore(&port->lock, flags); + break; + } + spin_unlock_irqrestore(&port->lock, flags); + msleep(10); + } +} + +static void icom_stop_rx(struct uart_port *port) +{ + struct icom_port *icom_port = to_icom_port(port); + unsigned char cmdReg; + + cmdReg = readb(&icom_port->dram->CmdReg); + writeb(cmdReg & ~CMD_RCV_ENABLE, &icom_port->dram->CmdReg); +} + +static void icom_break(struct uart_port *port, int break_state) +{ + struct icom_port *icom_port = to_icom_port(port); + unsigned char cmdReg; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + trace(icom_port, "BREAK", 0); + cmdReg = readb(&icom_port->dram->CmdReg); + if (break_state == -1) { + writeb(cmdReg | CMD_SND_BREAK, &icom_port->dram->CmdReg); + } else { + writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg); + } + spin_unlock_irqrestore(&port->lock, flags); +} + +static int icom_open(struct uart_port *port) +{ + struct icom_port *icom_port = to_icom_port(port); + int retval; + + kref_get(&icom_port->adapter->kref); + retval = startup(icom_port); + + if (retval) { + kref_put(&icom_port->adapter->kref, icom_kref_release); + trace(icom_port, "STARTUP_ERROR", 0); + return retval; + } + + return 0; +} + +static void icom_close(struct uart_port *port) +{ + struct icom_port *icom_port = to_icom_port(port); + unsigned char cmdReg; + + trace(icom_port, "CLOSE", 0); + + /* stop receiver */ + cmdReg = readb(&icom_port->dram->CmdReg); + writeb(cmdReg & ~CMD_RCV_ENABLE, &icom_port->dram->CmdReg); + + shutdown(icom_port); + + kref_put(&icom_port->adapter->kref, icom_kref_release); +} + +static void icom_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old_termios) +{ + struct icom_port *icom_port = to_icom_port(port); + int baud; + unsigned cflag, iflag; + char new_config2; + char new_config3 = 0; + char tmp_byte; + int index; + int rcv_buff, xmit_buff; + unsigned long offset; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + trace(icom_port, "CHANGE_SPEED", 0); + + cflag = termios->c_cflag; + iflag = termios->c_iflag; + + new_config2 = ICOM_ACFG_DRIVE1; + + /* byte size and parity */ + switch (cflag & CSIZE) { + case CS5: /* 5 bits/char */ + new_config2 |= ICOM_ACFG_5BPC; + break; + case CS6: /* 6 bits/char */ + new_config2 |= ICOM_ACFG_6BPC; + break; + case CS7: /* 7 bits/char */ + new_config2 |= ICOM_ACFG_7BPC; + break; + case CS8: /* 8 bits/char */ + new_config2 |= ICOM_ACFG_8BPC; + break; + default: + break; + } + if (cflag & CSTOPB) { + /* 2 stop bits */ + new_config2 |= ICOM_ACFG_2STOP_BIT; + } + if (cflag & PARENB) { + /* parity bit enabled */ + new_config2 |= ICOM_ACFG_PARITY_ENAB; + trace(icom_port, "PARENB", 0); + } + if (cflag & PARODD) { + /* odd parity */ + new_config2 |= ICOM_ACFG_PARITY_ODD; + trace(icom_port, "PARODD", 0); + } + + /* Determine divisor based on baud rate */ + baud = uart_get_baud_rate(port, termios, old_termios, + icom_acfg_baud[0], + icom_acfg_baud[BAUD_TABLE_LIMIT]); + if (!baud) + baud = 9600; /* B0 transition handled in rs_set_termios */ + + for (index = 0; index < BAUD_TABLE_LIMIT; index++) { + if (icom_acfg_baud[index] == baud) { + new_config3 = index; + break; + } + } + + uart_update_timeout(port, cflag, baud); + + /* CTS flow control flag and modem status interrupts */ + tmp_byte = readb(&(icom_port->dram->HDLCConfigReg)); + if (cflag & CRTSCTS) + tmp_byte |= HDLC_HDW_FLOW; + else + tmp_byte &= ~HDLC_HDW_FLOW; + writeb(tmp_byte, &(icom_port->dram->HDLCConfigReg)); + + /* + * Set up parity check flag + */ + icom_port->read_status_mask = SA_FLAGS_OVERRUN | SA_FL_RCV_DONE; + if (iflag & INPCK) + icom_port->read_status_mask |= + SA_FLAGS_FRAME_ERROR | SA_FLAGS_PARITY_ERROR; + + if ((iflag & BRKINT) || (iflag & PARMRK)) + icom_port->read_status_mask |= SA_FLAGS_BREAK_DET; + + /* + * Characters to ignore + */ + icom_port->ignore_status_mask = 0; + if (iflag & IGNPAR) + icom_port->ignore_status_mask |= + SA_FLAGS_PARITY_ERROR | SA_FLAGS_FRAME_ERROR; + if (iflag & IGNBRK) { + icom_port->ignore_status_mask |= SA_FLAGS_BREAK_DET; + /* + * If we're ignore parity and break indicators, ignore + * overruns too. (For real raw support). + */ + if (iflag & IGNPAR) + icom_port->ignore_status_mask |= SA_FLAGS_OVERRUN; + } + + /* + * !!! ignore all characters if CREAD is not set + */ + if ((cflag & CREAD) == 0) + icom_port->ignore_status_mask |= SA_FL_RCV_DONE; + + /* Turn off Receiver to prepare for reset */ + writeb(CMD_RCV_DISABLE, &icom_port->dram->CmdReg); + + for (index = 0; index < 10; index++) { + if (readb(&icom_port->dram->PrevCmdReg) == 0x00) { + break; + } + } + + /* clear all current buffers of data */ + for (rcv_buff = 0; rcv_buff < NUM_RBUFFS; rcv_buff++) { + icom_port->statStg->rcv[rcv_buff].flags = 0; + icom_port->statStg->rcv[rcv_buff].leLength = 0; + icom_port->statStg->rcv[rcv_buff].WorkingLength = + cpu_to_le16(RCV_BUFF_SZ); + } + + for (xmit_buff = 0; xmit_buff < NUM_XBUFFS; xmit_buff++) { + icom_port->statStg->xmit[xmit_buff].flags = 0; + } + + /* activate changes and start xmit and receiver here */ + /* Enable the receiver */ + writeb(new_config3, &(icom_port->dram->async_config3)); + writeb(new_config2, &(icom_port->dram->async_config2)); + tmp_byte = readb(&(icom_port->dram->HDLCConfigReg)); + tmp_byte |= HDLC_PPP_PURE_ASYNC | HDLC_FF_FILL; + writeb(tmp_byte, &(icom_port->dram->HDLCConfigReg)); + writeb(0x04, &(icom_port->dram->FlagFillIdleTimer)); /* 0.5 seconds */ + writeb(0xFF, &(icom_port->dram->ier)); /* enable modem signal interrupts */ + + /* reset processor */ + writeb(CMD_RESTART, &icom_port->dram->CmdReg); + + for (index = 0; index < 10; index++) { + if (readb(&icom_port->dram->CmdReg) == 0x00) { + break; + } + } + + /* Enable Transmitter and Receiver */ + offset = + (unsigned long) &icom_port->statStg->rcv[0] - + (unsigned long) icom_port->statStg; + writel(icom_port->statStg_pci + offset, + &icom_port->dram->RcvStatusAddr); + icom_port->next_rcv = 0; + *icom_port->xmitRestart = 0; + writel(icom_port->xmitRestart_pci, + &icom_port->dram->XmitStatusAddr); + trace(icom_port, "XR_ENAB", 0); + writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *icom_type(struct uart_port *port) +{ + return "icom"; +} + +static void icom_config_port(struct uart_port *port, int flags) +{ + port->type = PORT_ICOM; +} + +static const struct uart_ops icom_ops = { + .tx_empty = icom_tx_empty, + .set_mctrl = icom_set_mctrl, + .get_mctrl = icom_get_mctrl, + .stop_tx = icom_stop_tx, + .start_tx = icom_start_tx, + .send_xchar = icom_send_xchar, + .stop_rx = icom_stop_rx, + .break_ctl = icom_break, + .startup = icom_open, + .shutdown = icom_close, + .set_termios = icom_set_termios, + .type = icom_type, + .config_port = icom_config_port, +}; + +#define ICOM_CONSOLE NULL + +static struct uart_driver icom_uart_driver = { + .owner = THIS_MODULE, + .driver_name = ICOM_DRIVER_NAME, + .dev_name = "ttyA", + .major = ICOM_MAJOR, + .minor = ICOM_MINOR_START, + .nr = NR_PORTS, + .cons = ICOM_CONSOLE, +}; + +static int icom_init_ports(struct icom_adapter *icom_adapter) +{ + u32 subsystem_id = icom_adapter->subsystem_id; + int i; + struct icom_port *icom_port; + + if (icom_adapter->version == ADAPTER_V1) { + icom_adapter->numb_ports = 2; + + for (i = 0; i < 2; i++) { + icom_port = &icom_adapter->port_info[i]; + icom_port->port = i; + icom_port->status = ICOM_PORT_ACTIVE; + } + } else { + if (subsystem_id == PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL) { + icom_adapter->numb_ports = 4; + + for (i = 0; i < 4; i++) { + icom_port = &icom_adapter->port_info[i]; + + icom_port->port = i; + icom_port->status = ICOM_PORT_ACTIVE; + } + } else { + icom_adapter->numb_ports = 4; + + icom_adapter->port_info[0].port = 0; + icom_adapter->port_info[0].status = ICOM_PORT_ACTIVE; + icom_adapter->port_info[1].status = ICOM_PORT_OFF; + icom_adapter->port_info[2].port = 2; + icom_adapter->port_info[2].status = ICOM_PORT_ACTIVE; + icom_adapter->port_info[3].status = ICOM_PORT_OFF; + } + } + + return 0; +} + +static void icom_port_active(struct icom_port *icom_port, struct icom_adapter *icom_adapter, int port_num) +{ + if (icom_adapter->version == ADAPTER_V1) { + icom_port->global_reg = icom_adapter->base_addr + 0x4000; + icom_port->int_reg = icom_adapter->base_addr + + 0x4004 + 2 - 2 * port_num; + } else { + icom_port->global_reg = icom_adapter->base_addr + 0x8000; + if (icom_port->port < 2) + icom_port->int_reg = icom_adapter->base_addr + + 0x8004 + 2 - 2 * icom_port->port; + else + icom_port->int_reg = icom_adapter->base_addr + + 0x8024 + 2 - 2 * (icom_port->port - 2); + } +} +static int icom_load_ports(struct icom_adapter *icom_adapter) +{ + struct icom_port *icom_port; + int port_num; + + for (port_num = 0; port_num < icom_adapter->numb_ports; port_num++) { + + icom_port = &icom_adapter->port_info[port_num]; + + if (icom_port->status == ICOM_PORT_ACTIVE) { + icom_port_active(icom_port, icom_adapter, port_num); + icom_port->dram = icom_adapter->base_addr + + 0x2000 * icom_port->port; + + icom_port->adapter = icom_adapter; + + /* get port memory */ + if (get_port_memory(icom_port) != 0) { + dev_err(&icom_port->adapter->pci_dev->dev, + "Memory allocation for port FAILED\n"); + } + } + } + return 0; +} + +static int icom_alloc_adapter(struct icom_adapter + **icom_adapter_ref) +{ + int adapter_count = 0; + struct icom_adapter *icom_adapter; + struct icom_adapter *cur_adapter_entry; + + icom_adapter = kzalloc(sizeof(struct icom_adapter), GFP_KERNEL); + + if (!icom_adapter) { + return -ENOMEM; + } + + list_for_each_entry(cur_adapter_entry, &icom_adapter_head, + icom_adapter_entry) { + if (cur_adapter_entry->index != adapter_count) { + break; + } + adapter_count++; + } + + icom_adapter->index = adapter_count; + list_add_tail(&icom_adapter->icom_adapter_entry, + &cur_adapter_entry->icom_adapter_entry); + + *icom_adapter_ref = icom_adapter; + return 0; +} + +static void icom_free_adapter(struct icom_adapter *icom_adapter) +{ + list_del(&icom_adapter->icom_adapter_entry); + kfree(icom_adapter); +} + +static void icom_kref_release(struct kref *kref) +{ + struct icom_adapter *icom_adapter = container_of(kref, + struct icom_adapter, kref); + struct icom_port *icom_port; + int index; + + for (index = 0; index < icom_adapter->numb_ports; index++) { + icom_port = &icom_adapter->port_info[index]; + + if (icom_port->status == ICOM_PORT_ACTIVE) { + dev_info(&icom_adapter->pci_dev->dev, + "Device removed\n"); + + uart_remove_one_port(&icom_uart_driver, + &icom_port->uart_port); + + /* be sure that DTR and RTS are dropped */ + writeb(0x00, &icom_port->dram->osr); + + /* Wait 0.1 Sec for simple Init to complete */ + msleep(100); + + /* Stop proccessor */ + stop_processor(icom_port); + + free_port_memory(icom_port); + } + } + + free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter); + iounmap(icom_adapter->base_addr); + pci_release_regions(icom_adapter->pci_dev); + icom_free_adapter(icom_adapter); +} + +static int icom_probe(struct pci_dev *dev, + const struct pci_device_id *ent) +{ + int index; + unsigned int command_reg; + int retval; + struct icom_adapter *icom_adapter; + struct icom_port *icom_port; + + retval = pci_enable_device(dev); + if (retval) { + dev_err(&dev->dev, "Device enable FAILED\n"); + return retval; + } + + retval = pci_request_regions(dev, "icom"); + if (retval) { + dev_err(&dev->dev, "pci_request_regions FAILED\n"); + pci_disable_device(dev); + return retval; + } + + pci_set_master(dev); + + retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg); + if (retval) { + dev_err(&dev->dev, "PCI Config read FAILED\n"); + goto probe_exit0; + } + + pci_write_config_dword(dev, PCI_COMMAND, + command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER + | PCI_COMMAND_PARITY | PCI_COMMAND_SERR); + + if (ent->driver_data == ADAPTER_V1) { + pci_write_config_dword(dev, 0x44, 0x8300830A); + } else { + pci_write_config_dword(dev, 0x44, 0x42004200); + pci_write_config_dword(dev, 0x48, 0x42004200); + } + + + retval = icom_alloc_adapter(&icom_adapter); + if (retval) { + dev_err(&dev->dev, "icom_alloc_adapter FAILED\n"); + retval = -EIO; + goto probe_exit0; + } + + icom_adapter->base_addr_pci = pci_resource_start(dev, 0); + icom_adapter->pci_dev = dev; + icom_adapter->version = ent->driver_data; + icom_adapter->subsystem_id = ent->subdevice; + + + retval = icom_init_ports(icom_adapter); + if (retval) { + dev_err(&dev->dev, "Port configuration failed\n"); + goto probe_exit1; + } + + icom_adapter->base_addr = pci_ioremap_bar(dev, 0); + + if (!icom_adapter->base_addr) { + retval = -ENOMEM; + goto probe_exit1; + } + + /* save off irq and request irq line */ + retval = request_irq(dev->irq, icom_interrupt, IRQF_SHARED, ICOM_DRIVER_NAME, (void *)icom_adapter); + if (retval) { + goto probe_exit2; + } + + retval = icom_load_ports(icom_adapter); + + for (index = 0; index < icom_adapter->numb_ports; index++) { + icom_port = &icom_adapter->port_info[index]; + + if (icom_port->status == ICOM_PORT_ACTIVE) { + icom_port->uart_port.irq = icom_port->adapter->pci_dev->irq; + icom_port->uart_port.type = PORT_ICOM; + icom_port->uart_port.iotype = UPIO_MEM; + icom_port->uart_port.membase = + (unsigned char __iomem *)icom_adapter->base_addr_pci; + icom_port->uart_port.fifosize = 16; + icom_port->uart_port.ops = &icom_ops; + icom_port->uart_port.line = + icom_port->port + icom_adapter->index * 4; + if (uart_add_one_port (&icom_uart_driver, &icom_port->uart_port)) { + icom_port->status = ICOM_PORT_OFF; + dev_err(&dev->dev, "Device add failed\n"); + } else + dev_info(&dev->dev, "Device added\n"); + } + } + + kref_init(&icom_adapter->kref); + return 0; + +probe_exit2: + iounmap(icom_adapter->base_addr); +probe_exit1: + icom_free_adapter(icom_adapter); + +probe_exit0: + pci_release_regions(dev); + pci_disable_device(dev); + + return retval; +} + +static void icom_remove(struct pci_dev *dev) +{ + struct icom_adapter *icom_adapter; + + list_for_each_entry(icom_adapter, &icom_adapter_head, + icom_adapter_entry) { + if (icom_adapter->pci_dev == dev) { + kref_put(&icom_adapter->kref, icom_kref_release); + return; + } + } + + dev_err(&dev->dev, "Unable to find device to remove\n"); +} + +static struct pci_driver icom_pci_driver = { + .name = ICOM_DRIVER_NAME, + .id_table = icom_pci_table, + .probe = icom_probe, + .remove = icom_remove, +}; + +static int __init icom_init(void) +{ + int ret; + + ret = uart_register_driver(&icom_uart_driver); + if (ret) + return ret; + + ret = pci_register_driver(&icom_pci_driver); + + if (ret < 0) + uart_unregister_driver(&icom_uart_driver); + + return ret; +} + +static void __exit icom_exit(void) +{ + pci_unregister_driver(&icom_pci_driver); + uart_unregister_driver(&icom_uart_driver); +} + +module_init(icom_init); +module_exit(icom_exit); + +MODULE_AUTHOR("Michael Anderson "); +MODULE_DESCRIPTION("IBM iSeries Serial IOA driver"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("icom_call_setup.bin"); +MODULE_FIRMWARE("icom_res_dce.bin"); +MODULE_FIRMWARE("icom_asc.bin"); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c new file mode 100644 index 000000000..f8962a3d4 --- /dev/null +++ b/drivers/tty/serial/imx.c @@ -0,0 +1,2702 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Motorola/Freescale IMX serial ports + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * Author: Sascha Hauer + * Copyright (C) 2004 Pengutronix + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "serial_mctrl_gpio.h" + +/* Register definitions */ +#define URXD0 0x0 /* Receiver Register */ +#define URTX0 0x40 /* Transmitter Register */ +#define UCR1 0x80 /* Control Register 1 */ +#define UCR2 0x84 /* Control Register 2 */ +#define UCR3 0x88 /* Control Register 3 */ +#define UCR4 0x8c /* Control Register 4 */ +#define UFCR 0x90 /* FIFO Control Register */ +#define USR1 0x94 /* Status Register 1 */ +#define USR2 0x98 /* Status Register 2 */ +#define UESC 0x9c /* Escape Character Register */ +#define UTIM 0xa0 /* Escape Timer Register */ +#define UBIR 0xa4 /* BRM Incremental Register */ +#define UBMR 0xa8 /* BRM Modulator Register */ +#define UBRC 0xac /* Baud Rate Count Register */ +#define IMX21_ONEMS 0xb0 /* One Millisecond register */ +#define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */ +#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/ + +/* UART Control Register Bit Fields.*/ +#define URXD_DUMMY_READ (1<<16) +#define URXD_CHARRDY (1<<15) +#define URXD_ERR (1<<14) +#define URXD_OVRRUN (1<<13) +#define URXD_FRMERR (1<<12) +#define URXD_BRK (1<<11) +#define URXD_PRERR (1<<10) +#define URXD_RX_DATA (0xFF<<0) +#define UCR1_ADEN (1<<15) /* Auto detect interrupt */ +#define UCR1_ADBR (1<<14) /* Auto detect baud rate */ +#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */ +#define UCR1_IDEN (1<<12) /* Idle condition interrupt */ +#define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */ +#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */ +#define UCR1_RXDMAEN (1<<8) /* Recv ready DMA enable */ +#define UCR1_IREN (1<<7) /* Infrared interface enable */ +#define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */ +#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */ +#define UCR1_SNDBRK (1<<4) /* Send break */ +#define UCR1_TXDMAEN (1<<3) /* Transmitter ready DMA enable */ +#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */ +#define UCR1_ATDMAEN (1<<2) /* Aging DMA Timer Enable */ +#define UCR1_DOZE (1<<1) /* Doze */ +#define UCR1_UARTEN (1<<0) /* UART enabled */ +#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */ +#define UCR2_IRTS (1<<14) /* Ignore RTS pin */ +#define UCR2_CTSC (1<<13) /* CTS pin control */ +#define UCR2_CTS (1<<12) /* Clear to send */ +#define UCR2_ESCEN (1<<11) /* Escape enable */ +#define UCR2_PREN (1<<8) /* Parity enable */ +#define UCR2_PROE (1<<7) /* Parity odd/even */ +#define UCR2_STPB (1<<6) /* Stop */ +#define UCR2_WS (1<<5) /* Word size */ +#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */ +#define UCR2_ATEN (1<<3) /* Aging Timer Enable */ +#define UCR2_TXEN (1<<2) /* Transmitter enabled */ +#define UCR2_RXEN (1<<1) /* Receiver enabled */ +#define UCR2_SRST (1<<0) /* SW reset */ +#define UCR3_DTREN (1<<13) /* DTR interrupt enable */ +#define UCR3_PARERREN (1<<12) /* Parity enable */ +#define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */ +#define UCR3_DSR (1<<10) /* Data set ready */ +#define UCR3_DCD (1<<9) /* Data carrier detect */ +#define UCR3_RI (1<<8) /* Ring indicator */ +#define UCR3_ADNIMP (1<<7) /* Autobaud Detection Not Improved */ +#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */ +#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */ +#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */ +#define UCR3_DTRDEN (1<<3) /* Data Terminal Ready Delta Enable. */ +#define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */ +#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */ +#define UCR3_BPEN (1<<0) /* Preset registers enable */ +#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */ +#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */ +#define UCR4_INVR (1<<9) /* Inverted infrared reception */ +#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */ +#define UCR4_WKEN (1<<7) /* Wake interrupt enable */ +#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */ +#define UCR4_IDDMAEN (1<<6) /* DMA IDLE Condition Detected */ +#define UCR4_IRSC (1<<5) /* IR special case */ +#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */ +#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */ +#define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */ +#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */ +#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */ +#define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */ +#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */ +#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7) +#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */ +#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */ +#define USR1_RTSS (1<<14) /* RTS pin status */ +#define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */ +#define USR1_RTSD (1<<12) /* RTS delta */ +#define USR1_ESCF (1<<11) /* Escape seq interrupt flag */ +#define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */ +#define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */ +#define USR1_AGTIM (1<<8) /* Ageing timer interrupt flag */ +#define USR1_DTRD (1<<7) /* DTR Delta */ +#define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */ +#define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */ +#define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */ +#define USR2_ADET (1<<15) /* Auto baud rate detect complete */ +#define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */ +#define USR2_DTRF (1<<13) /* DTR edge interrupt flag */ +#define USR2_IDLE (1<<12) /* Idle condition */ +#define USR2_RIDELT (1<<10) /* Ring Interrupt Delta */ +#define USR2_RIIN (1<<9) /* Ring Indicator Input */ +#define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */ +#define USR2_WAKE (1<<7) /* Wake */ +#define USR2_DCDIN (1<<5) /* Data Carrier Detect Input */ +#define USR2_RTSF (1<<4) /* RTS edge interrupt flag */ +#define USR2_TXDC (1<<3) /* Transmitter complete */ +#define USR2_BRCD (1<<2) /* Break condition */ +#define USR2_ORE (1<<1) /* Overrun error */ +#define USR2_RDR (1<<0) /* Recv data ready */ +#define UTS_FRCPERR (1<<13) /* Force parity error */ +#define UTS_LOOP (1<<12) /* Loop tx and rx */ +#define UTS_TXEMPTY (1<<6) /* TxFIFO empty */ +#define UTS_RXEMPTY (1<<5) /* RxFIFO empty */ +#define UTS_TXFULL (1<<4) /* TxFIFO full */ +#define UTS_RXFULL (1<<3) /* RxFIFO full */ +#define UTS_SOFTRST (1<<0) /* Software reset */ + +/* We've been assigned a range on the "Low-density serial ports" major */ +#define SERIAL_IMX_MAJOR 207 +#define MINOR_START 16 +#define DEV_NAME "ttymxc" + +/* + * This determines how often we check the modem status signals + * for any change. They generally aren't connected to an IRQ + * so we have to poll them. We also check immediately before + * filling the TX fifo incase CTS has been dropped. + */ +#define MCTRL_TIMEOUT (250*HZ/1000) + +#define DRIVER_NAME "IMX-uart" + +#define UART_NR 8 + +/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */ +enum imx_uart_type { + IMX1_UART, + IMX21_UART, + IMX53_UART, + IMX6Q_UART, +}; + +/* device type dependent stuff */ +struct imx_uart_data { + unsigned uts_reg; + enum imx_uart_type devtype; +}; + +enum imx_tx_state { + OFF, + WAIT_AFTER_RTS, + SEND, + WAIT_AFTER_SEND, +}; + +struct imx_port { + struct uart_port port; + struct timer_list timer; + unsigned int old_status; + unsigned int have_rtscts:1; + unsigned int have_rtsgpio:1; + unsigned int dte_mode:1; + unsigned int inverted_tx:1; + unsigned int inverted_rx:1; + struct clk *clk_ipg; + struct clk *clk_per; + const struct imx_uart_data *devdata; + + struct mctrl_gpios *gpios; + + /* shadow registers */ + unsigned int ucr1; + unsigned int ucr2; + unsigned int ucr3; + unsigned int ucr4; + unsigned int ufcr; + + /* DMA fields */ + unsigned int dma_is_enabled:1; + unsigned int dma_is_rxing:1; + unsigned int dma_is_txing:1; + struct dma_chan *dma_chan_rx, *dma_chan_tx; + struct scatterlist rx_sgl, tx_sgl[2]; + void *rx_buf; + struct circ_buf rx_ring; + unsigned int rx_buf_size; + unsigned int rx_period_length; + unsigned int rx_periods; + dma_cookie_t rx_cookie; + unsigned int tx_bytes; + unsigned int dma_tx_nents; + unsigned int saved_reg[10]; + bool context_saved; + + enum imx_tx_state tx_state; + struct hrtimer trigger_start_tx; + struct hrtimer trigger_stop_tx; +}; + +struct imx_port_ucrs { + unsigned int ucr1; + unsigned int ucr2; + unsigned int ucr3; +}; + +static struct imx_uart_data imx_uart_devdata[] = { + [IMX1_UART] = { + .uts_reg = IMX1_UTS, + .devtype = IMX1_UART, + }, + [IMX21_UART] = { + .uts_reg = IMX21_UTS, + .devtype = IMX21_UART, + }, + [IMX53_UART] = { + .uts_reg = IMX21_UTS, + .devtype = IMX53_UART, + }, + [IMX6Q_UART] = { + .uts_reg = IMX21_UTS, + .devtype = IMX6Q_UART, + }, +}; + +static const struct of_device_id imx_uart_dt_ids[] = { + { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], }, + { .compatible = "fsl,imx53-uart", .data = &imx_uart_devdata[IMX53_UART], }, + { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], }, + { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, imx_uart_dt_ids); + +static void imx_uart_writel(struct imx_port *sport, u32 val, u32 offset) +{ + switch (offset) { + case UCR1: + sport->ucr1 = val; + break; + case UCR2: + sport->ucr2 = val; + break; + case UCR3: + sport->ucr3 = val; + break; + case UCR4: + sport->ucr4 = val; + break; + case UFCR: + sport->ufcr = val; + break; + default: + break; + } + writel(val, sport->port.membase + offset); +} + +static u32 imx_uart_readl(struct imx_port *sport, u32 offset) +{ + switch (offset) { + case UCR1: + return sport->ucr1; + break; + case UCR2: + /* + * UCR2_SRST is the only bit in the cached registers that might + * differ from the value that was last written. As it only + * automatically becomes one after being cleared, reread + * conditionally. + */ + if (!(sport->ucr2 & UCR2_SRST)) + sport->ucr2 = readl(sport->port.membase + offset); + return sport->ucr2; + break; + case UCR3: + return sport->ucr3; + break; + case UCR4: + return sport->ucr4; + break; + case UFCR: + return sport->ufcr; + break; + default: + return readl(sport->port.membase + offset); + } +} + +static inline unsigned imx_uart_uts_reg(struct imx_port *sport) +{ + return sport->devdata->uts_reg; +} + +static inline int imx_uart_is_imx1(struct imx_port *sport) +{ + return sport->devdata->devtype == IMX1_UART; +} + +static inline int imx_uart_is_imx21(struct imx_port *sport) +{ + return sport->devdata->devtype == IMX21_UART; +} + +static inline int imx_uart_is_imx53(struct imx_port *sport) +{ + return sport->devdata->devtype == IMX53_UART; +} + +static inline int imx_uart_is_imx6q(struct imx_port *sport) +{ + return sport->devdata->devtype == IMX6Q_UART; +} +/* + * Save and restore functions for UCR1, UCR2 and UCR3 registers + */ +#if IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE) +static void imx_uart_ucrs_save(struct imx_port *sport, + struct imx_port_ucrs *ucr) +{ + /* save control registers */ + ucr->ucr1 = imx_uart_readl(sport, UCR1); + ucr->ucr2 = imx_uart_readl(sport, UCR2); + ucr->ucr3 = imx_uart_readl(sport, UCR3); +} + +static void imx_uart_ucrs_restore(struct imx_port *sport, + struct imx_port_ucrs *ucr) +{ + /* restore control registers */ + imx_uart_writel(sport, ucr->ucr1, UCR1); + imx_uart_writel(sport, ucr->ucr2, UCR2); + imx_uart_writel(sport, ucr->ucr3, UCR3); +} +#endif + +/* called with port.lock taken and irqs caller dependent */ +static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2) +{ + *ucr2 &= ~(UCR2_CTSC | UCR2_CTS); + + mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS); +} + +/* called with port.lock taken and irqs caller dependent */ +static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2) +{ + *ucr2 &= ~UCR2_CTSC; + *ucr2 |= UCR2_CTS; + + mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS); +} + +static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec) +{ + hrtimer_start(hrt, ms_to_ktime(msec), HRTIMER_MODE_REL); +} + +static void imx_uart_disable_loopback_rs485(struct imx_port *sport) +{ + unsigned int uts; + + /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */ + uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)); + uts &= ~UTS_LOOP; + imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); +} + +/* called with port.lock taken and irqs off */ +static void imx_uart_start_rx(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + unsigned int ucr1, ucr2; + + ucr1 = imx_uart_readl(sport, UCR1); + ucr2 = imx_uart_readl(sport, UCR2); + + ucr2 |= UCR2_RXEN; + + if (sport->dma_is_enabled) { + ucr1 |= UCR1_RXDMAEN | UCR1_ATDMAEN; + } else { + ucr1 |= UCR1_RRDYEN; + ucr2 |= UCR2_ATEN; + } + + /* Write UCR2 first as it includes RXEN */ + imx_uart_writel(sport, ucr2, UCR2); + imx_uart_writel(sport, ucr1, UCR1); + imx_uart_disable_loopback_rs485(sport); +} + +/* called with port.lock taken and irqs off */ +static void imx_uart_stop_tx(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + u32 ucr1, ucr4, usr2; + + if (sport->tx_state == OFF) + return; + + /* + * We are maybe in the SMP context, so if the DMA TX thread is running + * on other cpu, we have to wait for it to finish. + */ + if (sport->dma_is_txing) + return; + + ucr1 = imx_uart_readl(sport, UCR1); + imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1); + + ucr4 = imx_uart_readl(sport, UCR4); + usr2 = imx_uart_readl(sport, USR2); + if ((!(usr2 & USR2_TXDC)) && (ucr4 & UCR4_TCEN)) { + /* The shifter is still busy, so retry once TC triggers */ + return; + } + + ucr4 &= ~UCR4_TCEN; + imx_uart_writel(sport, ucr4, UCR4); + + /* in rs485 mode disable transmitter */ + if (port->rs485.flags & SER_RS485_ENABLED) { + if (sport->tx_state == SEND) { + sport->tx_state = WAIT_AFTER_SEND; + + if (port->rs485.delay_rts_after_send > 0) { + start_hrtimer_ms(&sport->trigger_stop_tx, + port->rs485.delay_rts_after_send); + return; + } + + /* continue without any delay */ + } + + if (sport->tx_state == WAIT_AFTER_RTS || + sport->tx_state == WAIT_AFTER_SEND) { + u32 ucr2; + + hrtimer_try_to_cancel(&sport->trigger_start_tx); + + ucr2 = imx_uart_readl(sport, UCR2); + if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) + imx_uart_rts_active(sport, &ucr2); + else + imx_uart_rts_inactive(sport, &ucr2); + imx_uart_writel(sport, ucr2, UCR2); + + imx_uart_start_rx(port); + + sport->tx_state = OFF; + } + } else { + sport->tx_state = OFF; + } +} + +/* called with port.lock taken and irqs off */ +static void imx_uart_stop_rx(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + u32 ucr1, ucr2, ucr4, uts; + + ucr1 = imx_uart_readl(sport, UCR1); + ucr2 = imx_uart_readl(sport, UCR2); + ucr4 = imx_uart_readl(sport, UCR4); + + if (sport->dma_is_enabled) { + ucr1 &= ~(UCR1_RXDMAEN | UCR1_ATDMAEN); + } else { + ucr1 &= ~UCR1_RRDYEN; + ucr2 &= ~UCR2_ATEN; + ucr4 &= ~UCR4_OREN; + } + imx_uart_writel(sport, ucr1, UCR1); + imx_uart_writel(sport, ucr4, UCR4); + + /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */ + if (port->rs485.flags & SER_RS485_ENABLED && + port->rs485.flags & SER_RS485_RTS_ON_SEND && + sport->have_rtscts && !sport->have_rtsgpio) { + uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)); + uts |= UTS_LOOP; + imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); + ucr2 |= UCR2_RXEN; + } else { + ucr2 &= ~UCR2_RXEN; + } + + imx_uart_writel(sport, ucr2, UCR2); +} + +/* called with port.lock taken and irqs off */ +static void imx_uart_enable_ms(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + + mod_timer(&sport->timer, jiffies); + + mctrl_gpio_enable_ms(sport->gpios); +} + +static void imx_uart_dma_tx(struct imx_port *sport); + +/* called with port.lock taken and irqs off */ +static inline void imx_uart_transmit_buffer(struct imx_port *sport) +{ + struct circ_buf *xmit = &sport->port.state->xmit; + + if (sport->port.x_char) { + /* Send next char */ + imx_uart_writel(sport, sport->port.x_char, URTX0); + sport->port.icount.tx++; + sport->port.x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { + imx_uart_stop_tx(&sport->port); + return; + } + + if (sport->dma_is_enabled) { + u32 ucr1; + /* + * We've just sent a X-char Ensure the TX DMA is enabled + * and the TX IRQ is disabled. + **/ + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 &= ~UCR1_TRDYEN; + if (sport->dma_is_txing) { + ucr1 |= UCR1_TXDMAEN; + imx_uart_writel(sport, ucr1, UCR1); + } else { + imx_uart_writel(sport, ucr1, UCR1); + imx_uart_dma_tx(sport); + } + + return; + } + + while (!uart_circ_empty(xmit) && + !(imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL)) { + /* send xmit->buf[xmit->tail] + * out the port here */ + imx_uart_writel(sport, xmit->buf[xmit->tail], URTX0); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + sport->port.icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&sport->port); + + if (uart_circ_empty(xmit)) + imx_uart_stop_tx(&sport->port); +} + +static void imx_uart_dma_tx_callback(void *data) +{ + struct imx_port *sport = data; + struct scatterlist *sgl = &sport->tx_sgl[0]; + struct circ_buf *xmit = &sport->port.state->xmit; + unsigned long flags; + u32 ucr1; + + spin_lock_irqsave(&sport->port.lock, flags); + + dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); + + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 &= ~UCR1_TXDMAEN; + imx_uart_writel(sport, ucr1, UCR1); + + /* update the stat */ + xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1); + sport->port.icount.tx += sport->tx_bytes; + + dev_dbg(sport->port.dev, "we finish the TX DMA.\n"); + + sport->dma_is_txing = 0; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&sport->port); + + if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port)) + imx_uart_dma_tx(sport); + else if (sport->port.rs485.flags & SER_RS485_ENABLED) { + u32 ucr4 = imx_uart_readl(sport, UCR4); + ucr4 |= UCR4_TCEN; + imx_uart_writel(sport, ucr4, UCR4); + } + + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +/* called with port.lock taken and irqs off */ +static void imx_uart_dma_tx(struct imx_port *sport) +{ + struct circ_buf *xmit = &sport->port.state->xmit; + struct scatterlist *sgl = sport->tx_sgl; + struct dma_async_tx_descriptor *desc; + struct dma_chan *chan = sport->dma_chan_tx; + struct device *dev = sport->port.dev; + u32 ucr1, ucr4; + int ret; + + if (sport->dma_is_txing) + return; + + ucr4 = imx_uart_readl(sport, UCR4); + ucr4 &= ~UCR4_TCEN; + imx_uart_writel(sport, ucr4, UCR4); + + sport->tx_bytes = uart_circ_chars_pending(xmit); + + if (xmit->tail < xmit->head || xmit->head == 0) { + sport->dma_tx_nents = 1; + sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes); + } else { + sport->dma_tx_nents = 2; + sg_init_table(sgl, 2); + sg_set_buf(sgl, xmit->buf + xmit->tail, + UART_XMIT_SIZE - xmit->tail); + sg_set_buf(sgl + 1, xmit->buf, xmit->head); + } + + ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); + if (ret == 0) { + dev_err(dev, "DMA mapping error for TX.\n"); + return; + } + desc = dmaengine_prep_slave_sg(chan, sgl, ret, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); + if (!desc) { + dma_unmap_sg(dev, sgl, sport->dma_tx_nents, + DMA_TO_DEVICE); + dev_err(dev, "We cannot prepare for the TX slave dma!\n"); + return; + } + desc->callback = imx_uart_dma_tx_callback; + desc->callback_param = sport; + + dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n", + uart_circ_chars_pending(xmit)); + + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 |= UCR1_TXDMAEN; + imx_uart_writel(sport, ucr1, UCR1); + + /* fire it */ + sport->dma_is_txing = 1; + dmaengine_submit(desc); + dma_async_issue_pending(chan); + return; +} + +/* called with port.lock taken and irqs off */ +static void imx_uart_start_tx(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + u32 ucr1; + + if (!sport->port.x_char && uart_circ_empty(&port->state->xmit)) + return; + + /* + * We cannot simply do nothing here if sport->tx_state == SEND already + * because UCR1_TXMPTYEN might already have been cleared in + * imx_uart_stop_tx(), but tx_state is still SEND. + */ + + if (port->rs485.flags & SER_RS485_ENABLED) { + if (sport->tx_state == OFF) { + u32 ucr2 = imx_uart_readl(sport, UCR2); + if (port->rs485.flags & SER_RS485_RTS_ON_SEND) + imx_uart_rts_active(sport, &ucr2); + else + imx_uart_rts_inactive(sport, &ucr2); + imx_uart_writel(sport, ucr2, UCR2); + + if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) + imx_uart_stop_rx(port); + + sport->tx_state = WAIT_AFTER_RTS; + + if (port->rs485.delay_rts_before_send > 0) { + start_hrtimer_ms(&sport->trigger_start_tx, + port->rs485.delay_rts_before_send); + return; + } + + /* continue without any delay */ + } + + if (sport->tx_state == WAIT_AFTER_SEND + || sport->tx_state == WAIT_AFTER_RTS) { + + hrtimer_try_to_cancel(&sport->trigger_stop_tx); + + /* + * Enable transmitter and shifter empty irq only if DMA + * is off. In the DMA case this is done in the + * tx-callback. + */ + if (!sport->dma_is_enabled) { + u32 ucr4 = imx_uart_readl(sport, UCR4); + ucr4 |= UCR4_TCEN; + imx_uart_writel(sport, ucr4, UCR4); + } + + sport->tx_state = SEND; + } + } else { + sport->tx_state = SEND; + } + + if (!sport->dma_is_enabled) { + ucr1 = imx_uart_readl(sport, UCR1); + imx_uart_writel(sport, ucr1 | UCR1_TRDYEN, UCR1); + } + + if (sport->dma_is_enabled) { + if (sport->port.x_char) { + /* We have X-char to send, so enable TX IRQ and + * disable TX DMA to let TX interrupt to send X-char */ + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 &= ~UCR1_TXDMAEN; + ucr1 |= UCR1_TRDYEN; + imx_uart_writel(sport, ucr1, UCR1); + return; + } + + if (!uart_circ_empty(&port->state->xmit) && + !uart_tx_stopped(port)) + imx_uart_dma_tx(sport); + return; + } +} + +static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id) +{ + struct imx_port *sport = dev_id; + u32 usr1; + + imx_uart_writel(sport, USR1_RTSD, USR1); + usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS; + uart_handle_cts_change(&sport->port, !!usr1); + wake_up_interruptible(&sport->port.state->port.delta_msr_wait); + + return IRQ_HANDLED; +} + +static irqreturn_t imx_uart_rtsint(int irq, void *dev_id) +{ + struct imx_port *sport = dev_id; + irqreturn_t ret; + + spin_lock(&sport->port.lock); + + ret = __imx_uart_rtsint(irq, dev_id); + + spin_unlock(&sport->port.lock); + + return ret; +} + +static irqreturn_t imx_uart_txint(int irq, void *dev_id) +{ + struct imx_port *sport = dev_id; + + spin_lock(&sport->port.lock); + imx_uart_transmit_buffer(sport); + spin_unlock(&sport->port.lock); + return IRQ_HANDLED; +} + +static irqreturn_t __imx_uart_rxint(int irq, void *dev_id) +{ + struct imx_port *sport = dev_id; + unsigned int rx, flg, ignored = 0; + struct tty_port *port = &sport->port.state->port; + + while (imx_uart_readl(sport, USR2) & USR2_RDR) { + u32 usr2; + + flg = TTY_NORMAL; + sport->port.icount.rx++; + + rx = imx_uart_readl(sport, URXD0); + + usr2 = imx_uart_readl(sport, USR2); + if (usr2 & USR2_BRCD) { + imx_uart_writel(sport, USR2_BRCD, USR2); + if (uart_handle_break(&sport->port)) + continue; + } + + if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx)) + continue; + + if (unlikely(rx & URXD_ERR)) { + if (rx & URXD_BRK) + sport->port.icount.brk++; + else if (rx & URXD_PRERR) + sport->port.icount.parity++; + else if (rx & URXD_FRMERR) + sport->port.icount.frame++; + if (rx & URXD_OVRRUN) + sport->port.icount.overrun++; + + if (rx & sport->port.ignore_status_mask) { + if (++ignored > 100) + goto out; + continue; + } + + rx &= (sport->port.read_status_mask | 0xFF); + + if (rx & URXD_BRK) + flg = TTY_BREAK; + else if (rx & URXD_PRERR) + flg = TTY_PARITY; + else if (rx & URXD_FRMERR) + flg = TTY_FRAME; + if (rx & URXD_OVRRUN) + flg = TTY_OVERRUN; + + sport->port.sysrq = 0; + } + + if (sport->port.ignore_status_mask & URXD_DUMMY_READ) + goto out; + + if (tty_insert_flip_char(port, rx, flg) == 0) + sport->port.icount.buf_overrun++; + } + +out: + tty_flip_buffer_push(port); + + return IRQ_HANDLED; +} + +static irqreturn_t imx_uart_rxint(int irq, void *dev_id) +{ + struct imx_port *sport = dev_id; + irqreturn_t ret; + + spin_lock(&sport->port.lock); + + ret = __imx_uart_rxint(irq, dev_id); + + spin_unlock(&sport->port.lock); + + return ret; +} + +static void imx_uart_clear_rx_errors(struct imx_port *sport); + +/* + * We have a modem side uart, so the meanings of RTS and CTS are inverted. + */ +static unsigned int imx_uart_get_hwmctrl(struct imx_port *sport) +{ + unsigned int tmp = TIOCM_DSR; + unsigned usr1 = imx_uart_readl(sport, USR1); + unsigned usr2 = imx_uart_readl(sport, USR2); + + if (usr1 & USR1_RTSS) + tmp |= TIOCM_CTS; + + /* in DCE mode DCDIN is always 0 */ + if (!(usr2 & USR2_DCDIN)) + tmp |= TIOCM_CAR; + + if (sport->dte_mode) + if (!(imx_uart_readl(sport, USR2) & USR2_RIIN)) + tmp |= TIOCM_RI; + + return tmp; +} + +/* + * Handle any change of modem status signal since we were last called. + */ +static void imx_uart_mctrl_check(struct imx_port *sport) +{ + unsigned int status, changed; + + status = imx_uart_get_hwmctrl(sport); + changed = status ^ sport->old_status; + + if (changed == 0) + return; + + sport->old_status = status; + + if (changed & TIOCM_RI && status & TIOCM_RI) + sport->port.icount.rng++; + if (changed & TIOCM_DSR) + sport->port.icount.dsr++; + if (changed & TIOCM_CAR) + uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); + if (changed & TIOCM_CTS) + uart_handle_cts_change(&sport->port, status & TIOCM_CTS); + + wake_up_interruptible(&sport->port.state->port.delta_msr_wait); +} + +static irqreturn_t imx_uart_int(int irq, void *dev_id) +{ + struct imx_port *sport = dev_id; + unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4; + irqreturn_t ret = IRQ_NONE; + + spin_lock(&sport->port.lock); + + usr1 = imx_uart_readl(sport, USR1); + usr2 = imx_uart_readl(sport, USR2); + ucr1 = imx_uart_readl(sport, UCR1); + ucr2 = imx_uart_readl(sport, UCR2); + ucr3 = imx_uart_readl(sport, UCR3); + ucr4 = imx_uart_readl(sport, UCR4); + + /* + * Even if a condition is true that can trigger an irq only handle it if + * the respective irq source is enabled. This prevents some undesired + * actions, for example if a character that sits in the RX FIFO and that + * should be fetched via DMA is tried to be fetched using PIO. Or the + * receiver is currently off and so reading from URXD0 results in an + * exception. So just mask the (raw) status bits for disabled irqs. + */ + if ((ucr1 & UCR1_RRDYEN) == 0) + usr1 &= ~USR1_RRDY; + if ((ucr2 & UCR2_ATEN) == 0) + usr1 &= ~USR1_AGTIM; + if ((ucr1 & UCR1_TRDYEN) == 0) + usr1 &= ~USR1_TRDY; + if ((ucr4 & UCR4_TCEN) == 0) + usr2 &= ~USR2_TXDC; + if ((ucr3 & UCR3_DTRDEN) == 0) + usr1 &= ~USR1_DTRD; + if ((ucr1 & UCR1_RTSDEN) == 0) + usr1 &= ~USR1_RTSD; + if ((ucr3 & UCR3_AWAKEN) == 0) + usr1 &= ~USR1_AWAKE; + if ((ucr4 & UCR4_OREN) == 0) + usr2 &= ~USR2_ORE; + + if (usr1 & (USR1_RRDY | USR1_AGTIM)) { + imx_uart_writel(sport, USR1_AGTIM, USR1); + + __imx_uart_rxint(irq, dev_id); + ret = IRQ_HANDLED; + } + + if ((usr1 & USR1_TRDY) || (usr2 & USR2_TXDC)) { + imx_uart_transmit_buffer(sport); + ret = IRQ_HANDLED; + } + + if (usr1 & USR1_DTRD) { + imx_uart_writel(sport, USR1_DTRD, USR1); + + imx_uart_mctrl_check(sport); + + ret = IRQ_HANDLED; + } + + if (usr1 & USR1_RTSD) { + __imx_uart_rtsint(irq, dev_id); + ret = IRQ_HANDLED; + } + + if (usr1 & USR1_AWAKE) { + imx_uart_writel(sport, USR1_AWAKE, USR1); + ret = IRQ_HANDLED; + } + + if (usr2 & USR2_ORE) { + sport->port.icount.overrun++; + imx_uart_writel(sport, USR2_ORE, USR2); + ret = IRQ_HANDLED; + } + + spin_unlock(&sport->port.lock); + + return ret; +} + +/* + * Return TIOCSER_TEMT when transmitter is not busy. + */ +static unsigned int imx_uart_tx_empty(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + unsigned int ret; + + ret = (imx_uart_readl(sport, USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0; + + /* If the TX DMA is working, return 0. */ + if (sport->dma_is_txing) + ret = 0; + + return ret; +} + +/* called with port.lock taken and irqs off */ +static unsigned int imx_uart_get_mctrl(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + unsigned int ret = imx_uart_get_hwmctrl(sport); + + mctrl_gpio_get(sport->gpios, &ret); + + return ret; +} + +/* called with port.lock taken and irqs off */ +static void imx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct imx_port *sport = (struct imx_port *)port; + u32 ucr3, uts; + + if (!(port->rs485.flags & SER_RS485_ENABLED)) { + u32 ucr2; + + /* + * Turn off autoRTS if RTS is lowered and restore autoRTS + * setting if RTS is raised. + */ + ucr2 = imx_uart_readl(sport, UCR2); + ucr2 &= ~(UCR2_CTS | UCR2_CTSC); + if (mctrl & TIOCM_RTS) { + ucr2 |= UCR2_CTS; + /* + * UCR2_IRTS is unset if and only if the port is + * configured for CRTSCTS, so we use inverted UCR2_IRTS + * to get the state to restore to. + */ + if (!(ucr2 & UCR2_IRTS)) + ucr2 |= UCR2_CTSC; + } + imx_uart_writel(sport, ucr2, UCR2); + } + + ucr3 = imx_uart_readl(sport, UCR3) & ~UCR3_DSR; + if (!(mctrl & TIOCM_DTR)) + ucr3 |= UCR3_DSR; + imx_uart_writel(sport, ucr3, UCR3); + + uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)) & ~UTS_LOOP; + if (mctrl & TIOCM_LOOP) + uts |= UTS_LOOP; + imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); + + mctrl_gpio_set(sport->gpios, mctrl); +} + +/* + * Interrupts always disabled. + */ +static void imx_uart_break_ctl(struct uart_port *port, int break_state) +{ + struct imx_port *sport = (struct imx_port *)port; + unsigned long flags; + u32 ucr1; + + spin_lock_irqsave(&sport->port.lock, flags); + + ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK; + + if (break_state != 0) + ucr1 |= UCR1_SNDBRK; + + imx_uart_writel(sport, ucr1, UCR1); + + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +/* + * This is our per-port timeout handler, for checking the + * modem status signals. + */ +static void imx_uart_timeout(struct timer_list *t) +{ + struct imx_port *sport = from_timer(sport, t, timer); + unsigned long flags; + + if (sport->port.state) { + spin_lock_irqsave(&sport->port.lock, flags); + imx_uart_mctrl_check(sport); + spin_unlock_irqrestore(&sport->port.lock, flags); + + mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); + } +} + +/* + * There are two kinds of RX DMA interrupts(such as in the MX6Q): + * [1] the RX DMA buffer is full. + * [2] the aging timer expires + * + * Condition [2] is triggered when a character has been sitting in the FIFO + * for at least 8 byte durations. + */ +static void imx_uart_dma_rx_callback(void *data) +{ + struct imx_port *sport = data; + struct dma_chan *chan = sport->dma_chan_rx; + struct scatterlist *sgl = &sport->rx_sgl; + struct tty_port *port = &sport->port.state->port; + struct dma_tx_state state; + struct circ_buf *rx_ring = &sport->rx_ring; + enum dma_status status; + unsigned int w_bytes = 0; + unsigned int r_bytes; + unsigned int bd_size; + + status = dmaengine_tx_status(chan, sport->rx_cookie, &state); + + if (status == DMA_ERROR) { + imx_uart_clear_rx_errors(sport); + return; + } + + if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) { + + /* + * The state-residue variable represents the empty space + * relative to the entire buffer. Taking this in consideration + * the head is always calculated base on the buffer total + * length - DMA transaction residue. The UART script from the + * SDMA firmware will jump to the next buffer descriptor, + * once a DMA transaction if finalized (IMX53 RM - A.4.1.2.4). + * Taking this in consideration the tail is always at the + * beginning of the buffer descriptor that contains the head. + */ + + /* Calculate the head */ + rx_ring->head = sg_dma_len(sgl) - state.residue; + + /* Calculate the tail. */ + bd_size = sg_dma_len(sgl) / sport->rx_periods; + rx_ring->tail = ((rx_ring->head-1) / bd_size) * bd_size; + + if (rx_ring->head <= sg_dma_len(sgl) && + rx_ring->head > rx_ring->tail) { + + /* Move data from tail to head */ + r_bytes = rx_ring->head - rx_ring->tail; + + /* CPU claims ownership of RX DMA buffer */ + dma_sync_sg_for_cpu(sport->port.dev, sgl, 1, + DMA_FROM_DEVICE); + + w_bytes = tty_insert_flip_string(port, + sport->rx_buf + rx_ring->tail, r_bytes); + + /* UART retrieves ownership of RX DMA buffer */ + dma_sync_sg_for_device(sport->port.dev, sgl, 1, + DMA_FROM_DEVICE); + + if (w_bytes != r_bytes) + sport->port.icount.buf_overrun++; + + sport->port.icount.rx += w_bytes; + } else { + WARN_ON(rx_ring->head > sg_dma_len(sgl)); + WARN_ON(rx_ring->head <= rx_ring->tail); + } + } + + if (w_bytes) { + tty_flip_buffer_push(port); + dev_dbg(sport->port.dev, "We get %d bytes.\n", w_bytes); + } +} + +static int imx_uart_start_rx_dma(struct imx_port *sport) +{ + struct scatterlist *sgl = &sport->rx_sgl; + struct dma_chan *chan = sport->dma_chan_rx; + struct device *dev = sport->port.dev; + struct dma_async_tx_descriptor *desc; + int ret; + + sport->rx_ring.head = 0; + sport->rx_ring.tail = 0; + + sg_init_one(sgl, sport->rx_buf, sport->rx_buf_size); + ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE); + if (ret == 0) { + dev_err(dev, "DMA mapping error for RX.\n"); + return -EINVAL; + } + + desc = dmaengine_prep_dma_cyclic(chan, sg_dma_address(sgl), + sg_dma_len(sgl), sg_dma_len(sgl) / sport->rx_periods, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); + + if (!desc) { + dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE); + dev_err(dev, "We cannot prepare for the RX slave dma!\n"); + return -EINVAL; + } + desc->callback = imx_uart_dma_rx_callback; + desc->callback_param = sport; + + dev_dbg(dev, "RX: prepare for the DMA.\n"); + sport->dma_is_rxing = 1; + sport->rx_cookie = dmaengine_submit(desc); + dma_async_issue_pending(chan); + return 0; +} + +static void imx_uart_clear_rx_errors(struct imx_port *sport) +{ + struct tty_port *port = &sport->port.state->port; + u32 usr1, usr2; + + usr1 = imx_uart_readl(sport, USR1); + usr2 = imx_uart_readl(sport, USR2); + + if (usr2 & USR2_BRCD) { + sport->port.icount.brk++; + imx_uart_writel(sport, USR2_BRCD, USR2); + uart_handle_break(&sport->port); + if (tty_insert_flip_char(port, 0, TTY_BREAK) == 0) + sport->port.icount.buf_overrun++; + tty_flip_buffer_push(port); + } else { + if (usr1 & USR1_FRAMERR) { + sport->port.icount.frame++; + imx_uart_writel(sport, USR1_FRAMERR, USR1); + } else if (usr1 & USR1_PARITYERR) { + sport->port.icount.parity++; + imx_uart_writel(sport, USR1_PARITYERR, USR1); + } + } + + if (usr2 & USR2_ORE) { + sport->port.icount.overrun++; + imx_uart_writel(sport, USR2_ORE, USR2); + } + +} + +#define TXTL_DEFAULT 2 /* reset default */ +#define RXTL_DEFAULT 8 /* 8 characters or aging timer */ +#define TXTL_DMA 8 /* DMA burst setting */ +#define RXTL_DMA 9 /* DMA burst setting */ + +static void imx_uart_setup_ufcr(struct imx_port *sport, + unsigned char txwl, unsigned char rxwl) +{ + unsigned int val; + + /* set receiver / transmitter trigger level */ + val = imx_uart_readl(sport, UFCR) & (UFCR_RFDIV | UFCR_DCEDTE); + val |= txwl << UFCR_TXTL_SHF | rxwl; + imx_uart_writel(sport, val, UFCR); +} + +static void imx_uart_dma_exit(struct imx_port *sport) +{ + if (sport->dma_chan_rx) { + dmaengine_terminate_sync(sport->dma_chan_rx); + dma_release_channel(sport->dma_chan_rx); + sport->dma_chan_rx = NULL; + sport->rx_cookie = -EINVAL; + kfree(sport->rx_buf); + sport->rx_buf = NULL; + } + + if (sport->dma_chan_tx) { + dmaengine_terminate_sync(sport->dma_chan_tx); + dma_release_channel(sport->dma_chan_tx); + sport->dma_chan_tx = NULL; + } +} + +static int imx_uart_dma_init(struct imx_port *sport) +{ + struct dma_slave_config slave_config = {}; + struct device *dev = sport->port.dev; + int ret; + + /* Prepare for RX : */ + sport->dma_chan_rx = dma_request_slave_channel(dev, "rx"); + if (!sport->dma_chan_rx) { + dev_dbg(dev, "cannot get the DMA channel.\n"); + ret = -EINVAL; + goto err; + } + + slave_config.direction = DMA_DEV_TO_MEM; + slave_config.src_addr = sport->port.mapbase + URXD0; + slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + /* one byte less than the watermark level to enable the aging timer */ + slave_config.src_maxburst = RXTL_DMA - 1; + ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config); + if (ret) { + dev_err(dev, "error in RX dma configuration.\n"); + goto err; + } + + sport->rx_buf_size = sport->rx_period_length * sport->rx_periods; + sport->rx_buf = kzalloc(sport->rx_buf_size, GFP_KERNEL); + if (!sport->rx_buf) { + ret = -ENOMEM; + goto err; + } + sport->rx_ring.buf = sport->rx_buf; + + /* Prepare for TX : */ + sport->dma_chan_tx = dma_request_slave_channel(dev, "tx"); + if (!sport->dma_chan_tx) { + dev_err(dev, "cannot get the TX DMA channel!\n"); + ret = -EINVAL; + goto err; + } + + slave_config.direction = DMA_MEM_TO_DEV; + slave_config.dst_addr = sport->port.mapbase + URTX0; + slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + slave_config.dst_maxburst = TXTL_DMA; + ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config); + if (ret) { + dev_err(dev, "error in TX dma configuration."); + goto err; + } + + return 0; +err: + imx_uart_dma_exit(sport); + return ret; +} + +static void imx_uart_enable_dma(struct imx_port *sport) +{ + u32 ucr1; + + imx_uart_setup_ufcr(sport, TXTL_DMA, RXTL_DMA); + + /* set UCR1 */ + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 |= UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN; + imx_uart_writel(sport, ucr1, UCR1); + + sport->dma_is_enabled = 1; +} + +static void imx_uart_disable_dma(struct imx_port *sport) +{ + u32 ucr1; + + /* clear UCR1 */ + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 &= ~(UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN); + imx_uart_writel(sport, ucr1, UCR1); + + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); + + sport->dma_is_enabled = 0; +} + +/* half the RX buffer size */ +#define CTSTL 16 + +static int imx_uart_startup(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + int retval, i; + unsigned long flags; + int dma_is_inited = 0; + u32 ucr1, ucr2, ucr3, ucr4; + + retval = clk_prepare_enable(sport->clk_per); + if (retval) + return retval; + retval = clk_prepare_enable(sport->clk_ipg); + if (retval) { + clk_disable_unprepare(sport->clk_per); + return retval; + } + + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); + + /* disable the DREN bit (Data Ready interrupt enable) before + * requesting IRQs + */ + ucr4 = imx_uart_readl(sport, UCR4); + + /* set the trigger level for CTS */ + ucr4 &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF); + ucr4 |= CTSTL << UCR4_CTSTL_SHF; + + imx_uart_writel(sport, ucr4 & ~UCR4_DREN, UCR4); + + /* Can we enable the DMA support? */ + if (!uart_console(port) && imx_uart_dma_init(sport) == 0) + dma_is_inited = 1; + + spin_lock_irqsave(&sport->port.lock, flags); + /* Reset fifo's and state machines */ + i = 100; + + ucr2 = imx_uart_readl(sport, UCR2); + ucr2 &= ~UCR2_SRST; + imx_uart_writel(sport, ucr2, UCR2); + + while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0)) + udelay(1); + + /* + * Finally, clear and enable interrupts + */ + imx_uart_writel(sport, USR1_RTSD | USR1_DTRD, USR1); + imx_uart_writel(sport, USR2_ORE, USR2); + + ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_RRDYEN; + ucr1 |= UCR1_UARTEN; + if (sport->have_rtscts) + ucr1 |= UCR1_RTSDEN; + + imx_uart_writel(sport, ucr1, UCR1); + + ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR); + if (!dma_is_inited) + ucr4 |= UCR4_OREN; + if (sport->inverted_rx) + ucr4 |= UCR4_INVR; + imx_uart_writel(sport, ucr4, UCR4); + + ucr3 = imx_uart_readl(sport, UCR3) & ~UCR3_INVT; + /* + * configure tx polarity before enabling tx + */ + if (sport->inverted_tx) + ucr3 |= UCR3_INVT; + + if (!imx_uart_is_imx1(sport)) { + ucr3 |= UCR3_DTRDEN | UCR3_RI | UCR3_DCD; + + if (sport->dte_mode) + /* disable broken interrupts */ + ucr3 &= ~(UCR3_RI | UCR3_DCD); + } + imx_uart_writel(sport, ucr3, UCR3); + + ucr2 = imx_uart_readl(sport, UCR2) & ~UCR2_ATEN; + ucr2 |= (UCR2_RXEN | UCR2_TXEN); + if (!sport->have_rtscts) + ucr2 |= UCR2_IRTS; + /* + * make sure the edge sensitive RTS-irq is disabled, + * we're using RTSD instead. + */ + if (!imx_uart_is_imx1(sport)) + ucr2 &= ~UCR2_RTSEN; + imx_uart_writel(sport, ucr2, UCR2); + + /* + * Enable modem status interrupts + */ + imx_uart_enable_ms(&sport->port); + + if (dma_is_inited) { + imx_uart_enable_dma(sport); + imx_uart_start_rx_dma(sport); + } else { + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 |= UCR1_RRDYEN; + imx_uart_writel(sport, ucr1, UCR1); + + ucr2 = imx_uart_readl(sport, UCR2); + ucr2 |= UCR2_ATEN; + imx_uart_writel(sport, ucr2, UCR2); + } + + imx_uart_disable_loopback_rs485(sport); + + spin_unlock_irqrestore(&sport->port.lock, flags); + + return 0; +} + +static void imx_uart_shutdown(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + unsigned long flags; + u32 ucr1, ucr2, ucr4, uts; + + if (sport->dma_is_enabled) { + dmaengine_terminate_sync(sport->dma_chan_tx); + if (sport->dma_is_txing) { + dma_unmap_sg(sport->port.dev, &sport->tx_sgl[0], + sport->dma_tx_nents, DMA_TO_DEVICE); + sport->dma_is_txing = 0; + } + dmaengine_terminate_sync(sport->dma_chan_rx); + if (sport->dma_is_rxing) { + dma_unmap_sg(sport->port.dev, &sport->rx_sgl, + 1, DMA_FROM_DEVICE); + sport->dma_is_rxing = 0; + } + + spin_lock_irqsave(&sport->port.lock, flags); + imx_uart_stop_tx(port); + imx_uart_stop_rx(port); + imx_uart_disable_dma(sport); + spin_unlock_irqrestore(&sport->port.lock, flags); + imx_uart_dma_exit(sport); + } + + mctrl_gpio_disable_ms(sport->gpios); + + spin_lock_irqsave(&sport->port.lock, flags); + ucr2 = imx_uart_readl(sport, UCR2); + ucr2 &= ~(UCR2_TXEN | UCR2_ATEN); + imx_uart_writel(sport, ucr2, UCR2); + spin_unlock_irqrestore(&sport->port.lock, flags); + + /* + * Stop our timer. + */ + del_timer_sync(&sport->timer); + + /* + * Disable all interrupts, port and break condition. + */ + + spin_lock_irqsave(&sport->port.lock, flags); + + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN | UCR1_ATDMAEN); + /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */ + if (port->rs485.flags & SER_RS485_ENABLED && + port->rs485.flags & SER_RS485_RTS_ON_SEND && + sport->have_rtscts && !sport->have_rtsgpio) { + uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)); + uts |= UTS_LOOP; + imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); + ucr1 |= UCR1_UARTEN; + } else { + ucr1 &= ~UCR1_UARTEN; + } + imx_uart_writel(sport, ucr1, UCR1); + + ucr4 = imx_uart_readl(sport, UCR4); + ucr4 &= ~UCR4_TCEN; + imx_uart_writel(sport, ucr4, UCR4); + + spin_unlock_irqrestore(&sport->port.lock, flags); + + clk_disable_unprepare(sport->clk_per); + clk_disable_unprepare(sport->clk_ipg); +} + +/* called with port.lock taken and irqs off */ +static void imx_uart_flush_buffer(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + struct scatterlist *sgl = &sport->tx_sgl[0]; + u32 ucr2; + int i = 100, ubir, ubmr, uts; + + if (!sport->dma_chan_tx) + return; + + sport->tx_bytes = 0; + dmaengine_terminate_all(sport->dma_chan_tx); + if (sport->dma_is_txing) { + u32 ucr1; + + dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, + DMA_TO_DEVICE); + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 &= ~UCR1_TXDMAEN; + imx_uart_writel(sport, ucr1, UCR1); + sport->dma_is_txing = 0; + } + + /* + * According to the Reference Manual description of the UART SRST bit: + * + * "Reset the transmit and receive state machines, + * all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD + * and UTS[6-3]". + * + * We don't need to restore the old values from USR1, USR2, URXD and + * UTXD. UBRC is read only, so only save/restore the other three + * registers. + */ + ubir = imx_uart_readl(sport, UBIR); + ubmr = imx_uart_readl(sport, UBMR); + uts = imx_uart_readl(sport, IMX21_UTS); + + ucr2 = imx_uart_readl(sport, UCR2); + ucr2 &= ~UCR2_SRST; + imx_uart_writel(sport, ucr2, UCR2); + + while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0)) + udelay(1); + + /* Restore the registers */ + imx_uart_writel(sport, ubir, UBIR); + imx_uart_writel(sport, ubmr, UBMR); + imx_uart_writel(sport, uts, IMX21_UTS); +} + +static void +imx_uart_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct imx_port *sport = (struct imx_port *)port; + unsigned long flags; + u32 ucr2, old_ucr2, ufcr; + unsigned int baud, quot; + unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; + unsigned long div; + unsigned long num, denom, old_ubir, old_ubmr; + uint64_t tdiv64; + + /* + * We only support CS7 and CS8. + */ + while ((termios->c_cflag & CSIZE) != CS7 && + (termios->c_cflag & CSIZE) != CS8) { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= old_csize; + old_csize = CS8; + } + + del_timer_sync(&sport->timer); + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); + quot = uart_get_divisor(port, baud); + + spin_lock_irqsave(&sport->port.lock, flags); + + /* + * Read current UCR2 and save it for future use, then clear all the bits + * except those we will or may need to preserve. + */ + old_ucr2 = imx_uart_readl(sport, UCR2); + ucr2 = old_ucr2 & (UCR2_TXEN | UCR2_RXEN | UCR2_ATEN | UCR2_CTS); + + ucr2 |= UCR2_SRST | UCR2_IRTS; + if ((termios->c_cflag & CSIZE) == CS8) + ucr2 |= UCR2_WS; + + if (!sport->have_rtscts) + termios->c_cflag &= ~CRTSCTS; + + if (port->rs485.flags & SER_RS485_ENABLED) { + /* + * RTS is mandatory for rs485 operation, so keep + * it under manual control and keep transmitter + * disabled. + */ + if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) + imx_uart_rts_active(sport, &ucr2); + else + imx_uart_rts_inactive(sport, &ucr2); + + } else if (termios->c_cflag & CRTSCTS) { + /* + * Only let receiver control RTS output if we were not requested + * to have RTS inactive (which then should take precedence). + */ + if (ucr2 & UCR2_CTS) + ucr2 |= UCR2_CTSC; + } + + if (termios->c_cflag & CRTSCTS) + ucr2 &= ~UCR2_IRTS; + if (termios->c_cflag & CSTOPB) + ucr2 |= UCR2_STPB; + if (termios->c_cflag & PARENB) { + ucr2 |= UCR2_PREN; + if (termios->c_cflag & PARODD) + ucr2 |= UCR2_PROE; + } + + sport->port.read_status_mask = 0; + if (termios->c_iflag & INPCK) + sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR); + if (termios->c_iflag & (BRKINT | PARMRK)) + sport->port.read_status_mask |= URXD_BRK; + + /* + * Characters to ignore + */ + sport->port.ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + sport->port.ignore_status_mask |= URXD_PRERR | URXD_FRMERR; + if (termios->c_iflag & IGNBRK) { + sport->port.ignore_status_mask |= URXD_BRK; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + sport->port.ignore_status_mask |= URXD_OVRRUN; + } + + if ((termios->c_cflag & CREAD) == 0) + sport->port.ignore_status_mask |= URXD_DUMMY_READ; + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* custom-baudrate handling */ + div = sport->port.uartclk / (baud * 16); + if (baud == 38400 && quot != div) + baud = sport->port.uartclk / (quot * 16); + + div = sport->port.uartclk / (baud * 16); + if (div > 7) + div = 7; + if (!div) + div = 1; + + rational_best_approximation(16 * div * baud, sport->port.uartclk, + 1 << 16, 1 << 16, &num, &denom); + + tdiv64 = sport->port.uartclk; + tdiv64 *= num; + do_div(tdiv64, denom * 16 * div); + tty_termios_encode_baud_rate(termios, + (speed_t)tdiv64, (speed_t)tdiv64); + + num -= 1; + denom -= 1; + + ufcr = imx_uart_readl(sport, UFCR); + ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div); + imx_uart_writel(sport, ufcr, UFCR); + + /* + * Two registers below should always be written both and in this + * particular order. One consequence is that we need to check if any of + * them changes and then update both. We do need the check for change + * as even writing the same values seem to "restart" + * transmission/receiving logic in the hardware, that leads to data + * breakage even when rate doesn't in fact change. E.g., user switches + * RTS/CTS handshake and suddenly gets broken bytes. + */ + old_ubir = imx_uart_readl(sport, UBIR); + old_ubmr = imx_uart_readl(sport, UBMR); + if (old_ubir != num || old_ubmr != denom) { + imx_uart_writel(sport, num, UBIR); + imx_uart_writel(sport, denom, UBMR); + } + + if (!imx_uart_is_imx1(sport)) + imx_uart_writel(sport, sport->port.uartclk / div / 1000, + IMX21_ONEMS); + + imx_uart_writel(sport, ucr2, UCR2); + + if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) + imx_uart_enable_ms(&sport->port); + + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +static const char *imx_uart_type(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + + return sport->port.type == PORT_IMX ? "IMX" : NULL; +} + +/* + * Configure/autoconfigure the port. + */ +static void imx_uart_config_port(struct uart_port *port, int flags) +{ + struct imx_port *sport = (struct imx_port *)port; + + if (flags & UART_CONFIG_TYPE) + sport->port.type = PORT_IMX; +} + +/* + * Verify the new serial_struct (for TIOCSSERIAL). + * The only change we allow are to the flags and type, and + * even then only between PORT_IMX and PORT_UNKNOWN + */ +static int +imx_uart_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + struct imx_port *sport = (struct imx_port *)port; + int ret = 0; + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX) + ret = -EINVAL; + if (sport->port.irq != ser->irq) + ret = -EINVAL; + if (ser->io_type != UPIO_MEM) + ret = -EINVAL; + if (sport->port.uartclk / 16 != ser->baud_base) + ret = -EINVAL; + if (sport->port.mapbase != (unsigned long)ser->iomem_base) + ret = -EINVAL; + if (sport->port.iobase != ser->port) + ret = -EINVAL; + if (ser->hub6 != 0) + ret = -EINVAL; + return ret; +} + +#if defined(CONFIG_CONSOLE_POLL) + +static int imx_uart_poll_init(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + unsigned long flags; + u32 ucr1, ucr2; + int retval; + + retval = clk_prepare_enable(sport->clk_ipg); + if (retval) + return retval; + retval = clk_prepare_enable(sport->clk_per); + if (retval) + clk_disable_unprepare(sport->clk_ipg); + + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); + + spin_lock_irqsave(&sport->port.lock, flags); + + /* + * Be careful about the order of enabling bits here. First enable the + * receiver (UARTEN + RXEN) and only then the corresponding irqs. + * This prevents that a character that already sits in the RX fifo is + * triggering an irq but the try to fetch it from there results in an + * exception because UARTEN or RXEN is still off. + */ + ucr1 = imx_uart_readl(sport, UCR1); + ucr2 = imx_uart_readl(sport, UCR2); + + if (imx_uart_is_imx1(sport)) + ucr1 |= IMX1_UCR1_UARTCLKEN; + + ucr1 |= UCR1_UARTEN; + ucr1 &= ~(UCR1_TRDYEN | UCR1_RTSDEN | UCR1_RRDYEN); + + ucr2 |= UCR2_RXEN | UCR2_TXEN; + ucr2 &= ~UCR2_ATEN; + + imx_uart_writel(sport, ucr1, UCR1); + imx_uart_writel(sport, ucr2, UCR2); + + /* now enable irqs */ + imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1); + imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2); + + spin_unlock_irqrestore(&sport->port.lock, flags); + + return 0; +} + +static int imx_uart_poll_get_char(struct uart_port *port) +{ + struct imx_port *sport = (struct imx_port *)port; + if (!(imx_uart_readl(sport, USR2) & USR2_RDR)) + return NO_POLL_CHAR; + + return imx_uart_readl(sport, URXD0) & URXD_RX_DATA; +} + +static void imx_uart_poll_put_char(struct uart_port *port, unsigned char c) +{ + struct imx_port *sport = (struct imx_port *)port; + unsigned int status; + + /* drain */ + do { + status = imx_uart_readl(sport, USR1); + } while (~status & USR1_TRDY); + + /* write */ + imx_uart_writel(sport, c, URTX0); + + /* flush */ + do { + status = imx_uart_readl(sport, USR2); + } while (~status & USR2_TXDC); +} +#endif + +/* called with port.lock taken and irqs off or from .probe without locking */ +static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485conf) +{ + struct imx_port *sport = (struct imx_port *)port; + u32 ucr2; + + if (rs485conf->flags & SER_RS485_ENABLED) { + /* Enable receiver if low-active RTS signal is requested */ + if (sport->have_rtscts && !sport->have_rtsgpio && + !(rs485conf->flags & SER_RS485_RTS_ON_SEND)) + rs485conf->flags |= SER_RS485_RX_DURING_TX; + + /* disable transmitter */ + ucr2 = imx_uart_readl(sport, UCR2); + if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) + imx_uart_rts_active(sport, &ucr2); + else + imx_uart_rts_inactive(sport, &ucr2); + imx_uart_writel(sport, ucr2, UCR2); + } + + /* Make sure Rx is enabled in case Tx is active with Rx disabled */ + if (!(rs485conf->flags & SER_RS485_ENABLED) || + rs485conf->flags & SER_RS485_RX_DURING_TX) + imx_uart_start_rx(port); + + return 0; +} + +static const struct uart_ops imx_uart_pops = { + .tx_empty = imx_uart_tx_empty, + .set_mctrl = imx_uart_set_mctrl, + .get_mctrl = imx_uart_get_mctrl, + .stop_tx = imx_uart_stop_tx, + .start_tx = imx_uart_start_tx, + .stop_rx = imx_uart_stop_rx, + .enable_ms = imx_uart_enable_ms, + .break_ctl = imx_uart_break_ctl, + .startup = imx_uart_startup, + .shutdown = imx_uart_shutdown, + .flush_buffer = imx_uart_flush_buffer, + .set_termios = imx_uart_set_termios, + .type = imx_uart_type, + .config_port = imx_uart_config_port, + .verify_port = imx_uart_verify_port, +#if defined(CONFIG_CONSOLE_POLL) + .poll_init = imx_uart_poll_init, + .poll_get_char = imx_uart_poll_get_char, + .poll_put_char = imx_uart_poll_put_char, +#endif +}; + +static struct imx_port *imx_uart_ports[UART_NR]; + +#if IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE) +static void imx_uart_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct imx_port *sport = (struct imx_port *)port; + + while (imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL) + barrier(); + + imx_uart_writel(sport, ch, URTX0); +} + +/* + * Interrupts are disabled on entering + */ +static void +imx_uart_console_write(struct console *co, const char *s, unsigned int count) +{ + struct imx_port *sport = imx_uart_ports[co->index]; + struct imx_port_ucrs old_ucr; + unsigned long flags; + unsigned int ucr1; + int locked = 1; + + if (sport->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock_irqsave(&sport->port.lock, flags); + else + spin_lock_irqsave(&sport->port.lock, flags); + + /* + * First, save UCR1/2/3 and then disable interrupts + */ + imx_uart_ucrs_save(sport, &old_ucr); + ucr1 = old_ucr.ucr1; + + if (imx_uart_is_imx1(sport)) + ucr1 |= IMX1_UCR1_UARTCLKEN; + ucr1 |= UCR1_UARTEN; + ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN); + + imx_uart_writel(sport, ucr1, UCR1); + + imx_uart_writel(sport, old_ucr.ucr2 | UCR2_TXEN, UCR2); + + uart_console_write(&sport->port, s, count, imx_uart_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore UCR1/2/3 + */ + while (!(imx_uart_readl(sport, USR2) & USR2_TXDC)); + + imx_uart_ucrs_restore(sport, &old_ucr); + + if (locked) + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +/* + * If the port was already initialised (eg, by a boot loader), + * try to determine the current setup. + */ +static void +imx_uart_console_get_options(struct imx_port *sport, int *baud, + int *parity, int *bits) +{ + + if (imx_uart_readl(sport, UCR1) & UCR1_UARTEN) { + /* ok, the port was enabled */ + unsigned int ucr2, ubir, ubmr, uartclk; + unsigned int baud_raw; + unsigned int ucfr_rfdiv; + + ucr2 = imx_uart_readl(sport, UCR2); + + *parity = 'n'; + if (ucr2 & UCR2_PREN) { + if (ucr2 & UCR2_PROE) + *parity = 'o'; + else + *parity = 'e'; + } + + if (ucr2 & UCR2_WS) + *bits = 8; + else + *bits = 7; + + ubir = imx_uart_readl(sport, UBIR) & 0xffff; + ubmr = imx_uart_readl(sport, UBMR) & 0xffff; + + ucfr_rfdiv = (imx_uart_readl(sport, UFCR) & UFCR_RFDIV) >> 7; + if (ucfr_rfdiv == 6) + ucfr_rfdiv = 7; + else + ucfr_rfdiv = 6 - ucfr_rfdiv; + + uartclk = clk_get_rate(sport->clk_per); + uartclk /= ucfr_rfdiv; + + { /* + * The next code provides exact computation of + * baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1)) + * without need of float support or long long division, + * which would be required to prevent 32bit arithmetic overflow + */ + unsigned int mul = ubir + 1; + unsigned int div = 16 * (ubmr + 1); + unsigned int rem = uartclk % div; + + baud_raw = (uartclk / div) * mul; + baud_raw += (rem * mul + div / 2) / div; + *baud = (baud_raw + 50) / 100 * 100; + } + + if (*baud != baud_raw) + dev_info(sport->port.dev, "Console IMX rounded baud rate from %d to %d\n", + baud_raw, *baud); + } +} + +static int +imx_uart_console_setup(struct console *co, char *options) +{ + struct imx_port *sport; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int retval; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index == -1 || co->index >= ARRAY_SIZE(imx_uart_ports)) + co->index = 0; + sport = imx_uart_ports[co->index]; + if (sport == NULL) + return -ENODEV; + + /* For setting the registers, we only need to enable the ipg clock. */ + retval = clk_prepare_enable(sport->clk_ipg); + if (retval) + goto error_console; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + imx_uart_console_get_options(sport, &baud, &parity, &bits); + + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); + + retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); + + if (retval) { + clk_disable_unprepare(sport->clk_ipg); + goto error_console; + } + + retval = clk_prepare_enable(sport->clk_per); + if (retval) + clk_disable_unprepare(sport->clk_ipg); + +error_console: + return retval; +} + +static int +imx_uart_console_exit(struct console *co) +{ + struct imx_port *sport = imx_uart_ports[co->index]; + + clk_disable_unprepare(sport->clk_per); + clk_disable_unprepare(sport->clk_ipg); + + return 0; +} + +static struct uart_driver imx_uart_uart_driver; +static struct console imx_uart_console = { + .name = DEV_NAME, + .write = imx_uart_console_write, + .device = uart_console_device, + .setup = imx_uart_console_setup, + .exit = imx_uart_console_exit, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &imx_uart_uart_driver, +}; + +#define IMX_CONSOLE &imx_uart_console + +#else +#define IMX_CONSOLE NULL +#endif + +static struct uart_driver imx_uart_uart_driver = { + .owner = THIS_MODULE, + .driver_name = DRIVER_NAME, + .dev_name = DEV_NAME, + .major = SERIAL_IMX_MAJOR, + .minor = MINOR_START, + .nr = ARRAY_SIZE(imx_uart_ports), + .cons = IMX_CONSOLE, +}; + +static enum hrtimer_restart imx_trigger_start_tx(struct hrtimer *t) +{ + struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx); + unsigned long flags; + + spin_lock_irqsave(&sport->port.lock, flags); + if (sport->tx_state == WAIT_AFTER_RTS) + imx_uart_start_tx(&sport->port); + spin_unlock_irqrestore(&sport->port.lock, flags); + + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t) +{ + struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx); + unsigned long flags; + + spin_lock_irqsave(&sport->port.lock, flags); + if (sport->tx_state == WAIT_AFTER_SEND) + imx_uart_stop_tx(&sport->port); + spin_unlock_irqrestore(&sport->port.lock, flags); + + return HRTIMER_NORESTART; +} + +static const struct serial_rs485 imx_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | + SER_RS485_RX_DURING_TX, + .delay_rts_before_send = 1, + .delay_rts_after_send = 1, +}; + +/* Default RX DMA buffer configuration */ +#define RX_DMA_PERIODS 16 +#define RX_DMA_PERIOD_LEN (PAGE_SIZE / 4) + +static int imx_uart_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct imx_port *sport; + void __iomem *base; + u32 dma_buf_conf[2]; + int ret = 0; + u32 ucr1, ucr2, uts; + struct resource *res; + int txirq, rxirq, rtsirq; + + sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); + if (!sport) + return -ENOMEM; + + sport->devdata = of_device_get_match_data(&pdev->dev); + + ret = of_alias_get_id(np, "serial"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); + return ret; + } + sport->port.line = ret; + + if (of_get_property(np, "uart-has-rtscts", NULL) || + of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */) + sport->have_rtscts = 1; + + if (of_get_property(np, "fsl,dte-mode", NULL)) + sport->dte_mode = 1; + + if (of_get_property(np, "rts-gpios", NULL)) + sport->have_rtsgpio = 1; + + if (of_get_property(np, "fsl,inverted-tx", NULL)) + sport->inverted_tx = 1; + + if (of_get_property(np, "fsl,inverted-rx", NULL)) + sport->inverted_rx = 1; + + if (!of_property_read_u32_array(np, "fsl,dma-info", dma_buf_conf, 2)) { + sport->rx_period_length = dma_buf_conf[0]; + sport->rx_periods = dma_buf_conf[1]; + } else { + sport->rx_period_length = RX_DMA_PERIOD_LEN; + sport->rx_periods = RX_DMA_PERIODS; + } + + if (sport->port.line >= ARRAY_SIZE(imx_uart_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", + sport->port.line); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + rxirq = platform_get_irq(pdev, 0); + if (rxirq < 0) + return rxirq; + txirq = platform_get_irq_optional(pdev, 1); + rtsirq = platform_get_irq_optional(pdev, 2); + + sport->port.dev = &pdev->dev; + sport->port.mapbase = res->start; + sport->port.membase = base; + sport->port.type = PORT_IMX; + sport->port.iotype = UPIO_MEM; + sport->port.irq = rxirq; + sport->port.fifosize = 32; + sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE); + sport->port.ops = &imx_uart_pops; + sport->port.rs485_config = imx_uart_rs485_config; + /* RTS is required to control the RS485 transmitter */ + if (sport->have_rtscts || sport->have_rtsgpio) + sport->port.rs485_supported = imx_rs485_supported; + sport->port.flags = UPF_BOOT_AUTOCONF; + timer_setup(&sport->timer, imx_uart_timeout, 0); + + sport->gpios = mctrl_gpio_init(&sport->port, 0); + if (IS_ERR(sport->gpios)) + return PTR_ERR(sport->gpios); + + sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(sport->clk_ipg)) { + ret = PTR_ERR(sport->clk_ipg); + dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret); + return ret; + } + + sport->clk_per = devm_clk_get(&pdev->dev, "per"); + if (IS_ERR(sport->clk_per)) { + ret = PTR_ERR(sport->clk_per); + dev_err(&pdev->dev, "failed to get per clk: %d\n", ret); + return ret; + } + + sport->port.uartclk = clk_get_rate(sport->clk_per); + + /* For register access, we only need to enable the ipg clock. */ + ret = clk_prepare_enable(sport->clk_ipg); + if (ret) { + dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret); + return ret; + } + + /* initialize shadow register values */ + sport->ucr1 = readl(sport->port.membase + UCR1); + sport->ucr2 = readl(sport->port.membase + UCR2); + sport->ucr3 = readl(sport->port.membase + UCR3); + sport->ucr4 = readl(sport->port.membase + UCR4); + sport->ufcr = readl(sport->port.membase + UFCR); + + ret = uart_get_rs485_mode(&sport->port); + if (ret) + goto err_clk; + + /* + * If using the i.MX UART RTS/CTS control then the RTS (CTS_B) + * signal cannot be set low during transmission in case the + * receiver is off (limitation of the i.MX UART IP). + */ + if (sport->port.rs485.flags & SER_RS485_ENABLED && + sport->have_rtscts && !sport->have_rtsgpio && + (!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) && + !(sport->port.rs485.flags & SER_RS485_RX_DURING_TX))) + dev_err(&pdev->dev, + "low-active RTS not possible when receiver is off, enabling receiver\n"); + + /* Disable interrupts before requesting them */ + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN); + imx_uart_writel(sport, ucr1, UCR1); + + /* Disable Ageing Timer interrupt */ + ucr2 = imx_uart_readl(sport, UCR2); + ucr2 &= ~UCR2_ATEN; + imx_uart_writel(sport, ucr2, UCR2); + + /* + * In case RS485 is enabled without GPIO RTS control, the UART IP + * is used to control CTS signal. Keep both the UART and Receiver + * enabled, otherwise the UART IP pulls CTS signal always HIGH no + * matter how the UCR2 CTSC and CTS bits are set. To prevent any + * data from being fed into the RX FIFO, enable loopback mode in + * UTS register, which disconnects the RX path from external RXD + * pin and connects it to the Transceiver, which is disabled, so + * no data can be fed to the RX FIFO that way. + */ + if (sport->port.rs485.flags & SER_RS485_ENABLED && + sport->have_rtscts && !sport->have_rtsgpio) { + uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)); + uts |= UTS_LOOP; + imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); + + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 |= UCR1_UARTEN; + imx_uart_writel(sport, ucr1, UCR1); + + ucr2 = imx_uart_readl(sport, UCR2); + ucr2 |= UCR2_RXEN; + imx_uart_writel(sport, ucr2, UCR2); + } + + if (!imx_uart_is_imx1(sport) && sport->dte_mode) { + /* + * The DCEDTE bit changes the direction of DSR, DCD, DTR and RI + * and influences if UCR3_RI and UCR3_DCD changes the level of RI + * and DCD (when they are outputs) or enables the respective + * irqs. So set this bit early, i.e. before requesting irqs. + */ + u32 ufcr = imx_uart_readl(sport, UFCR); + if (!(ufcr & UFCR_DCEDTE)) + imx_uart_writel(sport, ufcr | UFCR_DCEDTE, UFCR); + + /* + * Disable UCR3_RI and UCR3_DCD irqs. They are also not + * enabled later because they cannot be cleared + * (confirmed on i.MX25) which makes them unusable. + */ + imx_uart_writel(sport, + IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP | UCR3_DSR, + UCR3); + + } else { + u32 ucr3 = UCR3_DSR; + u32 ufcr = imx_uart_readl(sport, UFCR); + if (ufcr & UFCR_DCEDTE) + imx_uart_writel(sport, ufcr & ~UFCR_DCEDTE, UFCR); + + if (!imx_uart_is_imx1(sport)) + ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP; + imx_uart_writel(sport, ucr3, UCR3); + } + + hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + sport->trigger_start_tx.function = imx_trigger_start_tx; + sport->trigger_stop_tx.function = imx_trigger_stop_tx; + + /* + * Allocate the IRQ(s) i.MX1 has three interrupts whereas later + * chips only have one interrupt. + */ + if (txirq > 0) { + ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_rxint, 0, + dev_name(&pdev->dev), sport); + if (ret) { + dev_err(&pdev->dev, "failed to request rx irq: %d\n", + ret); + goto err_clk; + } + + ret = devm_request_irq(&pdev->dev, txirq, imx_uart_txint, 0, + dev_name(&pdev->dev), sport); + if (ret) { + dev_err(&pdev->dev, "failed to request tx irq: %d\n", + ret); + goto err_clk; + } + + ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0, + dev_name(&pdev->dev), sport); + if (ret) { + dev_err(&pdev->dev, "failed to request rts irq: %d\n", + ret); + goto err_clk; + } + } else { + ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, + dev_name(&pdev->dev), sport); + if (ret) { + dev_err(&pdev->dev, "failed to request irq: %d\n", ret); + goto err_clk; + } + } + + imx_uart_ports[sport->port.line] = sport; + + platform_set_drvdata(pdev, sport); + + ret = uart_add_one_port(&imx_uart_uart_driver, &sport->port); + +err_clk: + clk_disable_unprepare(sport->clk_ipg); + + return ret; +} + +static int imx_uart_remove(struct platform_device *pdev) +{ + struct imx_port *sport = platform_get_drvdata(pdev); + + return uart_remove_one_port(&imx_uart_uart_driver, &sport->port); +} + +static void imx_uart_restore_context(struct imx_port *sport) +{ + unsigned long flags; + + spin_lock_irqsave(&sport->port.lock, flags); + if (!sport->context_saved) { + spin_unlock_irqrestore(&sport->port.lock, flags); + return; + } + + imx_uart_writel(sport, sport->saved_reg[4], UFCR); + imx_uart_writel(sport, sport->saved_reg[5], UESC); + imx_uart_writel(sport, sport->saved_reg[6], UTIM); + imx_uart_writel(sport, sport->saved_reg[7], UBIR); + imx_uart_writel(sport, sport->saved_reg[8], UBMR); + imx_uart_writel(sport, sport->saved_reg[9], IMX21_UTS); + imx_uart_writel(sport, sport->saved_reg[0], UCR1); + imx_uart_writel(sport, sport->saved_reg[1] | UCR2_SRST, UCR2); + imx_uart_writel(sport, sport->saved_reg[2], UCR3); + imx_uart_writel(sport, sport->saved_reg[3], UCR4); + sport->context_saved = false; + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +static void imx_uart_save_context(struct imx_port *sport) +{ + unsigned long flags; + + /* Save necessary regs */ + spin_lock_irqsave(&sport->port.lock, flags); + sport->saved_reg[0] = imx_uart_readl(sport, UCR1); + sport->saved_reg[1] = imx_uart_readl(sport, UCR2); + sport->saved_reg[2] = imx_uart_readl(sport, UCR3); + sport->saved_reg[3] = imx_uart_readl(sport, UCR4); + sport->saved_reg[4] = imx_uart_readl(sport, UFCR); + sport->saved_reg[5] = imx_uart_readl(sport, UESC); + sport->saved_reg[6] = imx_uart_readl(sport, UTIM); + sport->saved_reg[7] = imx_uart_readl(sport, UBIR); + sport->saved_reg[8] = imx_uart_readl(sport, UBMR); + sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS); + sport->context_saved = true; + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +static void imx_uart_enable_wakeup(struct imx_port *sport, bool on) +{ + u32 ucr3; + + ucr3 = imx_uart_readl(sport, UCR3); + if (on) { + imx_uart_writel(sport, USR1_AWAKE, USR1); + ucr3 |= UCR3_AWAKEN; + } else { + ucr3 &= ~UCR3_AWAKEN; + } + imx_uart_writel(sport, ucr3, UCR3); + + if (sport->have_rtscts) { + u32 ucr1 = imx_uart_readl(sport, UCR1); + if (on) { + imx_uart_writel(sport, USR1_RTSD, USR1); + ucr1 |= UCR1_RTSDEN; + } else { + ucr1 &= ~UCR1_RTSDEN; + } + imx_uart_writel(sport, ucr1, UCR1); + } +} + +static int imx_uart_suspend_noirq(struct device *dev) +{ + struct imx_port *sport = dev_get_drvdata(dev); + + imx_uart_save_context(sport); + + clk_disable(sport->clk_ipg); + + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int imx_uart_resume_noirq(struct device *dev) +{ + struct imx_port *sport = dev_get_drvdata(dev); + int ret; + + pinctrl_pm_select_default_state(dev); + + ret = clk_enable(sport->clk_ipg); + if (ret) + return ret; + + imx_uart_restore_context(sport); + + return 0; +} + +static int imx_uart_suspend(struct device *dev) +{ + struct imx_port *sport = dev_get_drvdata(dev); + int ret; + + uart_suspend_port(&imx_uart_uart_driver, &sport->port); + disable_irq(sport->port.irq); + + ret = clk_prepare_enable(sport->clk_ipg); + if (ret) + return ret; + + /* enable wakeup from i.MX UART */ + imx_uart_enable_wakeup(sport, true); + + return 0; +} + +static int imx_uart_resume(struct device *dev) +{ + struct imx_port *sport = dev_get_drvdata(dev); + + /* disable wakeup from i.MX UART */ + imx_uart_enable_wakeup(sport, false); + + uart_resume_port(&imx_uart_uart_driver, &sport->port); + enable_irq(sport->port.irq); + + clk_disable_unprepare(sport->clk_ipg); + + return 0; +} + +static int imx_uart_freeze(struct device *dev) +{ + struct imx_port *sport = dev_get_drvdata(dev); + + uart_suspend_port(&imx_uart_uart_driver, &sport->port); + + return clk_prepare_enable(sport->clk_ipg); +} + +static int imx_uart_thaw(struct device *dev) +{ + struct imx_port *sport = dev_get_drvdata(dev); + + uart_resume_port(&imx_uart_uart_driver, &sport->port); + + clk_disable_unprepare(sport->clk_ipg); + + return 0; +} + +static const struct dev_pm_ops imx_uart_pm_ops = { + .suspend_noirq = imx_uart_suspend_noirq, + .resume_noirq = imx_uart_resume_noirq, + .freeze_noirq = imx_uart_suspend_noirq, + .thaw_noirq = imx_uart_resume_noirq, + .restore_noirq = imx_uart_resume_noirq, + .suspend = imx_uart_suspend, + .resume = imx_uart_resume, + .freeze = imx_uart_freeze, + .thaw = imx_uart_thaw, + .restore = imx_uart_thaw, +}; + +static struct platform_driver imx_uart_platform_driver = { + .probe = imx_uart_probe, + .remove = imx_uart_remove, + + .driver = { + .name = "imx-uart", + .of_match_table = imx_uart_dt_ids, + .pm = &imx_uart_pm_ops, + }, +}; + +static int __init imx_uart_init(void) +{ + int ret = uart_register_driver(&imx_uart_uart_driver); + + if (ret) + return ret; + + ret = platform_driver_register(&imx_uart_platform_driver); + if (ret != 0) + uart_unregister_driver(&imx_uart_uart_driver); + + return ret; +} + +static void __exit imx_uart_exit(void) +{ + platform_driver_unregister(&imx_uart_platform_driver); + uart_unregister_driver(&imx_uart_uart_driver); +} + +module_init(imx_uart_init); +module_exit(imx_uart_exit); + +MODULE_AUTHOR("Sascha Hauer"); +MODULE_DESCRIPTION("IMX generic serial port driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:imx-uart"); diff --git a/drivers/tty/serial/imx_earlycon.c b/drivers/tty/serial/imx_earlycon.c new file mode 100644 index 000000000..7aab38b2b --- /dev/null +++ b/drivers/tty/serial/imx_earlycon.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2020 NXP + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define URTX0 0x40 /* Transmitter Register */ +#define UTS_TXFULL (1<<4) /* TxFIFO full */ +#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/ + +static void imx_uart_console_early_putchar(struct uart_port *port, unsigned char ch) +{ + while (readl_relaxed(port->membase + IMX21_UTS) & UTS_TXFULL) + cpu_relax(); + + writel_relaxed(ch, port->membase + URTX0); +} + +static void imx_uart_console_early_write(struct console *con, const char *s, + unsigned count) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, count, imx_uart_console_early_putchar); +} + +static int __init +imx_console_early_setup(struct earlycon_device *dev, const char *opt) +{ + if (!dev->port.membase) + return -ENODEV; + + dev->con->write = imx_uart_console_early_write; + + return 0; +} +OF_EARLYCON_DECLARE(ec_imx6q, "fsl,imx6q-uart", imx_console_early_setup); +OF_EARLYCON_DECLARE(ec_imx21, "fsl,imx21-uart", imx_console_early_setup); + +MODULE_AUTHOR("NXP"); +MODULE_DESCRIPTION("IMX earlycon driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c new file mode 100644 index 000000000..dd0a8915c --- /dev/null +++ b/drivers/tty/serial/ip22zilog.c @@ -0,0 +1,1223 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Zilog serial chips found on SGI workstations and + * servers. This driver could actually be made more generic. + * + * This is based on the drivers/serial/sunzilog.c code as of 2.6.0-test7 and the + * old drivers/sgi/char/sgiserial.c code which itself is based of the original + * drivers/sbus/char/zs.c code. A lot of code has been simply moved over + * directly from there but much has been rewritten. Credits therefore go out + * to David S. Miller, Eddie C. Dost, Pete Zaitcev, Ted Ts'o and Alex Buell + * for their work there. + * + * Copyright (C) 2002 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2002 David S. Miller (davem@redhat.com) + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "ip22zilog.h" + +/* + * On IP22 we need to delay after register accesses but we do not need to + * flush writes. + */ +#define ZSDELAY() udelay(5) +#define ZSDELAY_LONG() udelay(20) +#define ZS_WSYNC(channel) do { } while (0) + +#define NUM_IP22ZILOG 1 +#define NUM_CHANNELS (NUM_IP22ZILOG * 2) + +#define ZS_CLOCK 3672000 /* Zilog input clock rate. */ +#define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */ + +/* + * We wrap our port structure around the generic uart_port. + */ +struct uart_ip22zilog_port { + struct uart_port port; + + /* IRQ servicing chain. */ + struct uart_ip22zilog_port *next; + + /* Current values of Zilog write registers. */ + unsigned char curregs[NUM_ZSREGS]; + + unsigned int flags; +#define IP22ZILOG_FLAG_IS_CONS 0x00000004 +#define IP22ZILOG_FLAG_IS_KGDB 0x00000008 +#define IP22ZILOG_FLAG_MODEM_STATUS 0x00000010 +#define IP22ZILOG_FLAG_IS_CHANNEL_A 0x00000020 +#define IP22ZILOG_FLAG_REGS_HELD 0x00000040 +#define IP22ZILOG_FLAG_TX_STOPPED 0x00000080 +#define IP22ZILOG_FLAG_TX_ACTIVE 0x00000100 +#define IP22ZILOG_FLAG_RESET_DONE 0x00000200 + + unsigned int tty_break; + + unsigned char parity_mask; + unsigned char prev_status; +}; + +#define ZILOG_CHANNEL_FROM_PORT(PORT) ((struct zilog_channel *)((PORT)->membase)) +#define UART_ZILOG(PORT) ((struct uart_ip22zilog_port *)(PORT)) +#define IP22ZILOG_GET_CURR_REG(PORT, REGNUM) \ + (UART_ZILOG(PORT)->curregs[REGNUM]) +#define IP22ZILOG_SET_CURR_REG(PORT, REGNUM, REGVAL) \ + ((UART_ZILOG(PORT)->curregs[REGNUM]) = (REGVAL)) +#define ZS_IS_CONS(UP) ((UP)->flags & IP22ZILOG_FLAG_IS_CONS) +#define ZS_IS_KGDB(UP) ((UP)->flags & IP22ZILOG_FLAG_IS_KGDB) +#define ZS_WANTS_MODEM_STATUS(UP) ((UP)->flags & IP22ZILOG_FLAG_MODEM_STATUS) +#define ZS_IS_CHANNEL_A(UP) ((UP)->flags & IP22ZILOG_FLAG_IS_CHANNEL_A) +#define ZS_REGS_HELD(UP) ((UP)->flags & IP22ZILOG_FLAG_REGS_HELD) +#define ZS_TX_STOPPED(UP) ((UP)->flags & IP22ZILOG_FLAG_TX_STOPPED) +#define ZS_TX_ACTIVE(UP) ((UP)->flags & IP22ZILOG_FLAG_TX_ACTIVE) + +/* Reading and writing Zilog8530 registers. The delays are to make this + * driver work on the IP22 which needs a settling delay after each chip + * register access, other machines handle this in hardware via auxiliary + * flip-flops which implement the settle time we do in software. + * + * The port lock must be held and local IRQs must be disabled + * when {read,write}_zsreg is invoked. + */ +static unsigned char read_zsreg(struct zilog_channel *channel, + unsigned char reg) +{ + unsigned char retval; + + writeb(reg, &channel->control); + ZSDELAY(); + retval = readb(&channel->control); + ZSDELAY(); + + return retval; +} + +static void write_zsreg(struct zilog_channel *channel, + unsigned char reg, unsigned char value) +{ + writeb(reg, &channel->control); + ZSDELAY(); + writeb(value, &channel->control); + ZSDELAY(); +} + +static void ip22zilog_clear_fifo(struct zilog_channel *channel) +{ + int i; + + for (i = 0; i < 32; i++) { + unsigned char regval; + + regval = readb(&channel->control); + ZSDELAY(); + if (regval & Rx_CH_AV) + break; + + regval = read_zsreg(channel, R1); + readb(&channel->data); + ZSDELAY(); + + if (regval & (PAR_ERR | Rx_OVR | CRC_ERR)) { + writeb(ERR_RES, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + } + } +} + +/* This function must only be called when the TX is not busy. The UART + * port lock must be held and local interrupts disabled. + */ +static void __load_zsregs(struct zilog_channel *channel, unsigned char *regs) +{ + int i; + + /* Let pending transmits finish. */ + for (i = 0; i < 1000; i++) { + unsigned char stat = read_zsreg(channel, R1); + if (stat & ALL_SNT) + break; + udelay(100); + } + + writeb(ERR_RES, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + + ip22zilog_clear_fifo(channel); + + /* Disable all interrupts. */ + write_zsreg(channel, R1, + regs[R1] & ~(RxINT_MASK | TxINT_ENAB | EXT_INT_ENAB)); + + /* Set parity, sync config, stop bits, and clock divisor. */ + write_zsreg(channel, R4, regs[R4]); + + /* Set misc. TX/RX control bits. */ + write_zsreg(channel, R10, regs[R10]); + + /* Set TX/RX controls sans the enable bits. */ + write_zsreg(channel, R3, regs[R3] & ~RxENAB); + write_zsreg(channel, R5, regs[R5] & ~TxENAB); + + /* Synchronous mode config. */ + write_zsreg(channel, R6, regs[R6]); + write_zsreg(channel, R7, regs[R7]); + + /* Don't mess with the interrupt vector (R2, unused by us) and + * master interrupt control (R9). We make sure this is setup + * properly at probe time then never touch it again. + */ + + /* Disable baud generator. */ + write_zsreg(channel, R14, regs[R14] & ~BRENAB); + + /* Clock mode control. */ + write_zsreg(channel, R11, regs[R11]); + + /* Lower and upper byte of baud rate generator divisor. */ + write_zsreg(channel, R12, regs[R12]); + write_zsreg(channel, R13, regs[R13]); + + /* Now rewrite R14, with BRENAB (if set). */ + write_zsreg(channel, R14, regs[R14]); + + /* External status interrupt control. */ + write_zsreg(channel, R15, regs[R15]); + + /* Reset external status interrupts. */ + write_zsreg(channel, R0, RES_EXT_INT); + write_zsreg(channel, R0, RES_EXT_INT); + + /* Rewrite R3/R5, this time without enables masked. */ + write_zsreg(channel, R3, regs[R3]); + write_zsreg(channel, R5, regs[R5]); + + /* Rewrite R1, this time without IRQ enabled masked. */ + write_zsreg(channel, R1, regs[R1]); +} + +/* Reprogram the Zilog channel HW registers with the copies found in the + * software state struct. If the transmitter is busy, we defer this update + * until the next TX complete interrupt. Else, we do it right now. + * + * The UART port lock must be held and local interrupts disabled. + */ +static void ip22zilog_maybe_update_regs(struct uart_ip22zilog_port *up, + struct zilog_channel *channel) +{ + if (!ZS_REGS_HELD(up)) { + if (ZS_TX_ACTIVE(up)) { + up->flags |= IP22ZILOG_FLAG_REGS_HELD; + } else { + __load_zsregs(channel, up->curregs); + } + } +} + +#define Rx_BRK 0x0100 /* BREAK event software flag. */ +#define Rx_SYS 0x0200 /* SysRq event software flag. */ + +static bool ip22zilog_receive_chars(struct uart_ip22zilog_port *up, + struct zilog_channel *channel) +{ + unsigned char ch, flag; + unsigned int r1; + bool push = up->port.state != NULL; + + for (;;) { + ch = readb(&channel->control); + ZSDELAY(); + if (!(ch & Rx_CH_AV)) + break; + + r1 = read_zsreg(channel, R1); + if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) { + writeb(ERR_RES, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + } + + ch = readb(&channel->data); + ZSDELAY(); + + ch &= up->parity_mask; + + /* Handle the null char got when BREAK is removed. */ + if (!ch) + r1 |= up->tty_break; + + /* A real serial line, record the character and status. */ + flag = TTY_NORMAL; + up->port.icount.rx++; + if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR | Rx_SYS | Rx_BRK)) { + up->tty_break = 0; + + if (r1 & (Rx_SYS | Rx_BRK)) { + up->port.icount.brk++; + if (r1 & Rx_SYS) + continue; + r1 &= ~(PAR_ERR | CRC_ERR); + } + else if (r1 & PAR_ERR) + up->port.icount.parity++; + else if (r1 & CRC_ERR) + up->port.icount.frame++; + if (r1 & Rx_OVR) + up->port.icount.overrun++; + r1 &= up->port.read_status_mask; + if (r1 & Rx_BRK) + flag = TTY_BREAK; + else if (r1 & PAR_ERR) + flag = TTY_PARITY; + else if (r1 & CRC_ERR) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(&up->port, ch)) + continue; + + if (push) + uart_insert_char(&up->port, r1, Rx_OVR, ch, flag); + } + return push; +} + +static void ip22zilog_status_handle(struct uart_ip22zilog_port *up, + struct zilog_channel *channel) +{ + unsigned char status; + + status = readb(&channel->control); + ZSDELAY(); + + writeb(RES_EXT_INT, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + + if (up->curregs[R15] & BRKIE) { + if ((status & BRK_ABRT) && !(up->prev_status & BRK_ABRT)) { + if (uart_handle_break(&up->port)) + up->tty_break = Rx_SYS; + else + up->tty_break = Rx_BRK; + } + } + + if (ZS_WANTS_MODEM_STATUS(up)) { + if (status & SYNC) + up->port.icount.dsr++; + + /* The Zilog just gives us an interrupt when DCD/CTS/etc. change. + * But it does not tell us which bit has changed, we have to keep + * track of this ourselves. + */ + if ((status ^ up->prev_status) ^ DCD) + uart_handle_dcd_change(&up->port, + (status & DCD)); + if ((status ^ up->prev_status) ^ CTS) + uart_handle_cts_change(&up->port, + (status & CTS)); + + wake_up_interruptible(&up->port.state->port.delta_msr_wait); + } + + up->prev_status = status; +} + +static void ip22zilog_transmit_chars(struct uart_ip22zilog_port *up, + struct zilog_channel *channel) +{ + struct circ_buf *xmit; + + if (ZS_IS_CONS(up)) { + unsigned char status = readb(&channel->control); + ZSDELAY(); + + /* TX still busy? Just wait for the next TX done interrupt. + * + * It can occur because of how we do serial console writes. It would + * be nice to transmit console writes just like we normally would for + * a TTY line. (ie. buffered and TX interrupt driven). That is not + * easy because console writes cannot sleep. One solution might be + * to poll on enough port->xmit space becoming free. -DaveM + */ + if (!(status & Tx_BUF_EMP)) + return; + } + + up->flags &= ~IP22ZILOG_FLAG_TX_ACTIVE; + + if (ZS_REGS_HELD(up)) { + __load_zsregs(channel, up->curregs); + up->flags &= ~IP22ZILOG_FLAG_REGS_HELD; + } + + if (ZS_TX_STOPPED(up)) { + up->flags &= ~IP22ZILOG_FLAG_TX_STOPPED; + goto ack_tx_int; + } + + if (up->port.x_char) { + up->flags |= IP22ZILOG_FLAG_TX_ACTIVE; + writeb(up->port.x_char, &channel->data); + ZSDELAY(); + ZS_WSYNC(channel); + + up->port.icount.tx++; + up->port.x_char = 0; + return; + } + + if (up->port.state == NULL) + goto ack_tx_int; + xmit = &up->port.state->xmit; + if (uart_circ_empty(xmit)) + goto ack_tx_int; + if (uart_tx_stopped(&up->port)) + goto ack_tx_int; + + up->flags |= IP22ZILOG_FLAG_TX_ACTIVE; + writeb(xmit->buf[xmit->tail], &channel->data); + ZSDELAY(); + ZS_WSYNC(channel); + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->port.icount.tx++; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); + + return; + +ack_tx_int: + writeb(RES_Tx_P, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); +} + +static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id) +{ + struct uart_ip22zilog_port *up = dev_id; + + while (up) { + struct zilog_channel *channel + = ZILOG_CHANNEL_FROM_PORT(&up->port); + unsigned char r3; + bool push = false; + + spin_lock(&up->port.lock); + r3 = read_zsreg(channel, R3); + + /* Channel A */ + if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { + writeb(RES_H_IUS, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + + if (r3 & CHARxIP) + push = ip22zilog_receive_chars(up, channel); + if (r3 & CHAEXT) + ip22zilog_status_handle(up, channel); + if (r3 & CHATxIP) + ip22zilog_transmit_chars(up, channel); + } + spin_unlock(&up->port.lock); + + if (push) + tty_flip_buffer_push(&up->port.state->port); + + /* Channel B */ + up = up->next; + channel = ZILOG_CHANNEL_FROM_PORT(&up->port); + push = false; + + spin_lock(&up->port.lock); + if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { + writeb(RES_H_IUS, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + + if (r3 & CHBRxIP) + push = ip22zilog_receive_chars(up, channel); + if (r3 & CHBEXT) + ip22zilog_status_handle(up, channel); + if (r3 & CHBTxIP) + ip22zilog_transmit_chars(up, channel); + } + spin_unlock(&up->port.lock); + + if (push) + tty_flip_buffer_push(&up->port.state->port); + + up = up->next; + } + + return IRQ_HANDLED; +} + +/* A convenient way to quickly get R0 status. The caller must _not_ hold the + * port lock, it is acquired here. + */ +static __inline__ unsigned char ip22zilog_read_channel_status(struct uart_port *port) +{ + struct zilog_channel *channel; + unsigned char status; + + channel = ZILOG_CHANNEL_FROM_PORT(port); + status = readb(&channel->control); + ZSDELAY(); + + return status; +} + +/* The port lock is not held. */ +static unsigned int ip22zilog_tx_empty(struct uart_port *port) +{ + unsigned long flags; + unsigned char status; + unsigned int ret; + + spin_lock_irqsave(&port->lock, flags); + + status = ip22zilog_read_channel_status(port); + + spin_unlock_irqrestore(&port->lock, flags); + + if (status & Tx_BUF_EMP) + ret = TIOCSER_TEMT; + else + ret = 0; + + return ret; +} + +/* The port lock is held and interrupts are disabled. */ +static unsigned int ip22zilog_get_mctrl(struct uart_port *port) +{ + unsigned char status; + unsigned int ret; + + status = ip22zilog_read_channel_status(port); + + ret = 0; + if (status & DCD) + ret |= TIOCM_CAR; + if (status & SYNC) + ret |= TIOCM_DSR; + if (status & CTS) + ret |= TIOCM_CTS; + + return ret; +} + +/* The port lock is held and interrupts are disabled. */ +static void ip22zilog_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_ip22zilog_port *up = + container_of(port, struct uart_ip22zilog_port, port); + struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port); + unsigned char set_bits, clear_bits; + + set_bits = clear_bits = 0; + + if (mctrl & TIOCM_RTS) + set_bits |= RTS; + else + clear_bits |= RTS; + if (mctrl & TIOCM_DTR) + set_bits |= DTR; + else + clear_bits |= DTR; + + /* NOTE: Not subject to 'transmitter active' rule. */ + up->curregs[R5] |= set_bits; + up->curregs[R5] &= ~clear_bits; + write_zsreg(channel, R5, up->curregs[R5]); +} + +/* The port lock is held and interrupts are disabled. */ +static void ip22zilog_stop_tx(struct uart_port *port) +{ + struct uart_ip22zilog_port *up = + container_of(port, struct uart_ip22zilog_port, port); + + up->flags |= IP22ZILOG_FLAG_TX_STOPPED; +} + +/* The port lock is held and interrupts are disabled. */ +static void ip22zilog_start_tx(struct uart_port *port) +{ + struct uart_ip22zilog_port *up = + container_of(port, struct uart_ip22zilog_port, port); + struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port); + unsigned char status; + + up->flags |= IP22ZILOG_FLAG_TX_ACTIVE; + up->flags &= ~IP22ZILOG_FLAG_TX_STOPPED; + + status = readb(&channel->control); + ZSDELAY(); + + /* TX busy? Just wait for the TX done interrupt. */ + if (!(status & Tx_BUF_EMP)) + return; + + /* Send the first character to jump-start the TX done + * IRQ sending engine. + */ + if (port->x_char) { + writeb(port->x_char, &channel->data); + ZSDELAY(); + ZS_WSYNC(channel); + + port->icount.tx++; + port->x_char = 0; + } else { + struct circ_buf *xmit = &port->state->xmit; + + if (uart_circ_empty(xmit)) + return; + writeb(xmit->buf[xmit->tail], &channel->data); + ZSDELAY(); + ZS_WSYNC(channel); + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); + } +} + +/* The port lock is held and interrupts are disabled. */ +static void ip22zilog_stop_rx(struct uart_port *port) +{ + struct uart_ip22zilog_port *up = UART_ZILOG(port); + struct zilog_channel *channel; + + if (ZS_IS_CONS(up)) + return; + + channel = ZILOG_CHANNEL_FROM_PORT(port); + + /* Disable all RX interrupts. */ + up->curregs[R1] &= ~RxINT_MASK; + ip22zilog_maybe_update_regs(up, channel); +} + +/* The port lock is held. */ +static void ip22zilog_enable_ms(struct uart_port *port) +{ + struct uart_ip22zilog_port *up = + container_of(port, struct uart_ip22zilog_port, port); + struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port); + unsigned char new_reg; + + new_reg = up->curregs[R15] | (DCDIE | SYNCIE | CTSIE); + if (new_reg != up->curregs[R15]) { + up->curregs[R15] = new_reg; + + /* NOTE: Not subject to 'transmitter active' rule. */ + write_zsreg(channel, R15, up->curregs[R15]); + } +} + +/* The port lock is not held. */ +static void ip22zilog_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_ip22zilog_port *up = + container_of(port, struct uart_ip22zilog_port, port); + struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port); + unsigned char set_bits, clear_bits, new_reg; + unsigned long flags; + + set_bits = clear_bits = 0; + + if (break_state) + set_bits |= SND_BRK; + else + clear_bits |= SND_BRK; + + spin_lock_irqsave(&port->lock, flags); + + new_reg = (up->curregs[R5] | set_bits) & ~clear_bits; + if (new_reg != up->curregs[R5]) { + up->curregs[R5] = new_reg; + + /* NOTE: Not subject to 'transmitter active' rule. */ + write_zsreg(channel, R5, up->curregs[R5]); + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void __ip22zilog_reset(struct uart_ip22zilog_port *up) +{ + struct zilog_channel *channel; + int i; + + if (up->flags & IP22ZILOG_FLAG_RESET_DONE) + return; + + /* Let pending transmits finish. */ + channel = ZILOG_CHANNEL_FROM_PORT(&up->port); + for (i = 0; i < 1000; i++) { + unsigned char stat = read_zsreg(channel, R1); + if (stat & ALL_SNT) + break; + udelay(100); + } + + if (!ZS_IS_CHANNEL_A(up)) { + up++; + channel = ZILOG_CHANNEL_FROM_PORT(&up->port); + } + write_zsreg(channel, R9, FHWRES); + ZSDELAY_LONG(); + (void) read_zsreg(channel, R0); + + up->flags |= IP22ZILOG_FLAG_RESET_DONE; + up->next->flags |= IP22ZILOG_FLAG_RESET_DONE; +} + +static void __ip22zilog_startup(struct uart_ip22zilog_port *up) +{ + struct zilog_channel *channel; + + channel = ZILOG_CHANNEL_FROM_PORT(&up->port); + + __ip22zilog_reset(up); + + __load_zsregs(channel, up->curregs); + /* set master interrupt enable */ + write_zsreg(channel, R9, up->curregs[R9]); + up->prev_status = readb(&channel->control); + + /* Enable receiver and transmitter. */ + up->curregs[R3] |= RxENAB; + up->curregs[R5] |= TxENAB; + + up->curregs[R1] |= EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB; + ip22zilog_maybe_update_regs(up, channel); +} + +static int ip22zilog_startup(struct uart_port *port) +{ + struct uart_ip22zilog_port *up = UART_ZILOG(port); + unsigned long flags; + + if (ZS_IS_CONS(up)) + return 0; + + spin_lock_irqsave(&port->lock, flags); + __ip22zilog_startup(up); + spin_unlock_irqrestore(&port->lock, flags); + return 0; +} + +/* + * The test for ZS_IS_CONS is explained by the following e-mail: + ***** + * From: Russell King + * Date: Sun, 8 Dec 2002 10:18:38 +0000 + * + * On Sun, Dec 08, 2002 at 02:43:36AM -0500, Pete Zaitcev wrote: + * > I boot my 2.5 boxes using "console=ttyS0,9600" argument, + * > and I noticed that something is not right with reference + * > counting in this case. It seems that when the console + * > is open by kernel initially, this is not accounted + * > as an open, and uart_startup is not called. + * + * That is correct. We are unable to call uart_startup when the serial + * console is initialised because it may need to allocate memory (as + * request_irq does) and the memory allocators may not have been + * initialised. + * + * 1. initialise the port into a state where it can send characters in the + * console write method. + * + * 2. don't do the actual hardware shutdown in your shutdown() method (but + * do the normal software shutdown - ie, free irqs etc) + ***** + */ +static void ip22zilog_shutdown(struct uart_port *port) +{ + struct uart_ip22zilog_port *up = UART_ZILOG(port); + struct zilog_channel *channel; + unsigned long flags; + + if (ZS_IS_CONS(up)) + return; + + spin_lock_irqsave(&port->lock, flags); + + channel = ZILOG_CHANNEL_FROM_PORT(port); + + /* Disable receiver and transmitter. */ + up->curregs[R3] &= ~RxENAB; + up->curregs[R5] &= ~TxENAB; + + /* Disable all interrupts and BRK assertion. */ + up->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK); + up->curregs[R5] &= ~SND_BRK; + ip22zilog_maybe_update_regs(up, channel); + + spin_unlock_irqrestore(&port->lock, flags); +} + +/* Shared by TTY driver and serial console setup. The port lock is held + * and local interrupts are disabled. + */ +static void +ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag, + unsigned int iflag, int brg) +{ + + up->curregs[R10] = NRZ; + up->curregs[R11] = TCBR | RCBR; + + /* Program BAUD and clock source. */ + up->curregs[R4] &= ~XCLK_MASK; + up->curregs[R4] |= X16CLK; + up->curregs[R12] = brg & 0xff; + up->curregs[R13] = (brg >> 8) & 0xff; + up->curregs[R14] = BRENAB; + + /* Character size, stop bits, and parity. */ + up->curregs[3] &= ~RxN_MASK; + up->curregs[5] &= ~TxN_MASK; + switch (cflag & CSIZE) { + case CS5: + up->curregs[3] |= Rx5; + up->curregs[5] |= Tx5; + up->parity_mask = 0x1f; + break; + case CS6: + up->curregs[3] |= Rx6; + up->curregs[5] |= Tx6; + up->parity_mask = 0x3f; + break; + case CS7: + up->curregs[3] |= Rx7; + up->curregs[5] |= Tx7; + up->parity_mask = 0x7f; + break; + case CS8: + default: + up->curregs[3] |= Rx8; + up->curregs[5] |= Tx8; + up->parity_mask = 0xff; + break; + } + up->curregs[4] &= ~0x0c; + if (cflag & CSTOPB) + up->curregs[4] |= SB2; + else + up->curregs[4] |= SB1; + if (cflag & PARENB) + up->curregs[4] |= PAR_ENAB; + else + up->curregs[4] &= ~PAR_ENAB; + if (!(cflag & PARODD)) + up->curregs[4] |= PAR_EVEN; + else + up->curregs[4] &= ~PAR_EVEN; + + up->port.read_status_mask = Rx_OVR; + if (iflag & INPCK) + up->port.read_status_mask |= CRC_ERR | PAR_ERR; + if (iflag & (IGNBRK | BRKINT | PARMRK)) + up->port.read_status_mask |= BRK_ABRT; + + up->port.ignore_status_mask = 0; + if (iflag & IGNPAR) + up->port.ignore_status_mask |= CRC_ERR | PAR_ERR; + if (iflag & IGNBRK) { + up->port.ignore_status_mask |= BRK_ABRT; + if (iflag & IGNPAR) + up->port.ignore_status_mask |= Rx_OVR; + } + + if ((cflag & CREAD) == 0) + up->port.ignore_status_mask = 0xff; +} + +/* The port lock is not held. */ +static void +ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_ip22zilog_port *up = + container_of(port, struct uart_ip22zilog_port, port); + unsigned long flags; + int baud, brg; + + baud = uart_get_baud_rate(port, termios, old, 1200, 76800); + + spin_lock_irqsave(&up->port.lock, flags); + + brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); + + ip22zilog_convert_to_zs(up, termios->c_cflag, termios->c_iflag, brg); + + if (UART_ENABLE_MS(&up->port, termios->c_cflag)) + up->flags |= IP22ZILOG_FLAG_MODEM_STATUS; + else + up->flags &= ~IP22ZILOG_FLAG_MODEM_STATUS; + + ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port)); + uart_update_timeout(port, termios->c_cflag, baud); + + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static const char *ip22zilog_type(struct uart_port *port) +{ + return "IP22-Zilog"; +} + +/* We do not request/release mappings of the registers here, this + * happens at early serial probe time. + */ +static void ip22zilog_release_port(struct uart_port *port) +{ +} + +static int ip22zilog_request_port(struct uart_port *port) +{ + return 0; +} + +/* These do not need to do anything interesting either. */ +static void ip22zilog_config_port(struct uart_port *port, int flags) +{ +} + +/* We do not support letting the user mess with the divisor, IRQ, etc. */ +static int ip22zilog_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + return -EINVAL; +} + +static const struct uart_ops ip22zilog_pops = { + .tx_empty = ip22zilog_tx_empty, + .set_mctrl = ip22zilog_set_mctrl, + .get_mctrl = ip22zilog_get_mctrl, + .stop_tx = ip22zilog_stop_tx, + .start_tx = ip22zilog_start_tx, + .stop_rx = ip22zilog_stop_rx, + .enable_ms = ip22zilog_enable_ms, + .break_ctl = ip22zilog_break_ctl, + .startup = ip22zilog_startup, + .shutdown = ip22zilog_shutdown, + .set_termios = ip22zilog_set_termios, + .type = ip22zilog_type, + .release_port = ip22zilog_release_port, + .request_port = ip22zilog_request_port, + .config_port = ip22zilog_config_port, + .verify_port = ip22zilog_verify_port, +}; + +static struct uart_ip22zilog_port *ip22zilog_port_table; +static struct zilog_layout **ip22zilog_chip_regs; + +static struct uart_ip22zilog_port *ip22zilog_irq_chain; +static int zilog_irq = -1; + +static void * __init alloc_one_table(unsigned long size) +{ + return kzalloc(size, GFP_KERNEL); +} + +static void __init ip22zilog_alloc_tables(void) +{ + ip22zilog_port_table = (struct uart_ip22zilog_port *) + alloc_one_table(NUM_CHANNELS * sizeof(struct uart_ip22zilog_port)); + ip22zilog_chip_regs = (struct zilog_layout **) + alloc_one_table(NUM_IP22ZILOG * sizeof(struct zilog_layout *)); + + if (ip22zilog_port_table == NULL || ip22zilog_chip_regs == NULL) { + panic("IP22-Zilog: Cannot allocate IP22-Zilog tables."); + } +} + +/* Get the address of the registers for IP22-Zilog instance CHIP. */ +static struct zilog_layout * __init get_zs(int chip) +{ + unsigned long base; + + if (chip < 0 || chip >= NUM_IP22ZILOG) { + panic("IP22-Zilog: Illegal chip number %d in get_zs.", chip); + } + + /* Not probe-able, hard code it. */ + base = (unsigned long) &sgioc->uart; + + zilog_irq = SGI_SERIAL_IRQ; + request_mem_region(base, 8, "IP22-Zilog"); + + return (struct zilog_layout *) base; +} + +#define ZS_PUT_CHAR_MAX_DELAY 2000 /* 10 ms */ + +#ifdef CONFIG_SERIAL_IP22_ZILOG_CONSOLE +static void ip22zilog_put_char(struct uart_port *port, unsigned char ch) +{ + struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port); + int loops = ZS_PUT_CHAR_MAX_DELAY; + + /* This is a timed polling loop so do not switch the explicit + * udelay with ZSDELAY as that is a NOP on some platforms. -DaveM + */ + do { + unsigned char val = readb(&channel->control); + if (val & Tx_BUF_EMP) { + ZSDELAY(); + break; + } + udelay(5); + } while (--loops); + + writeb(ch, &channel->data); + ZSDELAY(); + ZS_WSYNC(channel); +} + +static void +ip22zilog_console_write(struct console *con, const char *s, unsigned int count) +{ + struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index]; + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + uart_console_write(&up->port, s, count, ip22zilog_put_char); + udelay(2); + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static int __init ip22zilog_console_setup(struct console *con, char *options) +{ + struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index]; + unsigned long flags; + int baud = 9600, bits = 8; + int parity = 'n'; + int flow = 'n'; + + up->flags |= IP22ZILOG_FLAG_IS_CONS; + + printk(KERN_INFO "Console: ttyS%d (IP22-Zilog)\n", con->index); + + spin_lock_irqsave(&up->port.lock, flags); + + up->curregs[R15] |= BRKIE; + + __ip22zilog_startup(up); + + spin_unlock_irqrestore(&up->port.lock, flags); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + return uart_set_options(&up->port, con, baud, parity, bits, flow); +} + +static struct uart_driver ip22zilog_reg; + +static struct console ip22zilog_console = { + .name = "ttyS", + .write = ip22zilog_console_write, + .device = uart_console_device, + .setup = ip22zilog_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &ip22zilog_reg, +}; +#endif /* CONFIG_SERIAL_IP22_ZILOG_CONSOLE */ + +static struct uart_driver ip22zilog_reg = { + .owner = THIS_MODULE, + .driver_name = "serial", + .dev_name = "ttyS", + .major = TTY_MAJOR, + .minor = 64, + .nr = NUM_CHANNELS, +#ifdef CONFIG_SERIAL_IP22_ZILOG_CONSOLE + .cons = &ip22zilog_console, +#endif +}; + +static void __init ip22zilog_prepare(void) +{ + unsigned char sysrq_on = IS_ENABLED(CONFIG_SERIAL_IP22_ZILOG_CONSOLE); + struct uart_ip22zilog_port *up; + struct zilog_layout *rp; + int channel, chip; + + /* + * Temporary fix. + */ + for (channel = 0; channel < NUM_CHANNELS; channel++) + spin_lock_init(&ip22zilog_port_table[channel].port.lock); + + ip22zilog_irq_chain = &ip22zilog_port_table[NUM_CHANNELS - 1]; + up = &ip22zilog_port_table[0]; + for (channel = NUM_CHANNELS - 1 ; channel > 0; channel--) + up[channel].next = &up[channel - 1]; + up[channel].next = NULL; + + for (chip = 0; chip < NUM_IP22ZILOG; chip++) { + if (!ip22zilog_chip_regs[chip]) { + ip22zilog_chip_regs[chip] = rp = get_zs(chip); + + up[(chip * 2) + 0].port.membase = (char *) &rp->channelB; + up[(chip * 2) + 1].port.membase = (char *) &rp->channelA; + + /* In theory mapbase is the physical address ... */ + up[(chip * 2) + 0].port.mapbase = + (unsigned long) ioremap((unsigned long) &rp->channelB, 8); + up[(chip * 2) + 1].port.mapbase = + (unsigned long) ioremap((unsigned long) &rp->channelA, 8); + } + + /* Channel A */ + up[(chip * 2) + 0].port.iotype = UPIO_MEM; + up[(chip * 2) + 0].port.irq = zilog_irq; + up[(chip * 2) + 0].port.uartclk = ZS_CLOCK; + up[(chip * 2) + 0].port.fifosize = 1; + up[(chip * 2) + 0].port.has_sysrq = sysrq_on; + up[(chip * 2) + 0].port.ops = &ip22zilog_pops; + up[(chip * 2) + 0].port.type = PORT_IP22ZILOG; + up[(chip * 2) + 0].port.flags = 0; + up[(chip * 2) + 0].port.line = (chip * 2) + 0; + up[(chip * 2) + 0].flags = 0; + + /* Channel B */ + up[(chip * 2) + 1].port.iotype = UPIO_MEM; + up[(chip * 2) + 1].port.irq = zilog_irq; + up[(chip * 2) + 1].port.uartclk = ZS_CLOCK; + up[(chip * 2) + 1].port.fifosize = 1; + up[(chip * 2) + 1].port.has_sysrq = sysrq_on; + up[(chip * 2) + 1].port.ops = &ip22zilog_pops; + up[(chip * 2) + 1].port.type = PORT_IP22ZILOG; + up[(chip * 2) + 1].port.line = (chip * 2) + 1; + up[(chip * 2) + 1].flags |= IP22ZILOG_FLAG_IS_CHANNEL_A; + } + + for (channel = 0; channel < NUM_CHANNELS; channel++) { + struct uart_ip22zilog_port *up = &ip22zilog_port_table[channel]; + int brg; + + /* Normal serial TTY. */ + up->parity_mask = 0xff; + up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB; + up->curregs[R4] = PAR_EVEN | X16CLK | SB1; + up->curregs[R3] = RxENAB | Rx8; + up->curregs[R5] = TxENAB | Tx8; + up->curregs[R9] = NV | MIE; + up->curregs[R10] = NRZ; + up->curregs[R11] = TCBR | RCBR; + brg = BPS_TO_BRG(9600, ZS_CLOCK / ZS_CLOCK_DIVISOR); + up->curregs[R12] = (brg & 0xff); + up->curregs[R13] = (brg >> 8) & 0xff; + up->curregs[R14] = BRENAB; + } +} + +static int __init ip22zilog_ports_init(void) +{ + int ret; + + printk(KERN_INFO "Serial: IP22 Zilog driver (%d chips).\n", NUM_IP22ZILOG); + + ip22zilog_prepare(); + + if (request_irq(zilog_irq, ip22zilog_interrupt, 0, + "IP22-Zilog", ip22zilog_irq_chain)) { + panic("IP22-Zilog: Unable to register zs interrupt handler.\n"); + } + + ret = uart_register_driver(&ip22zilog_reg); + if (ret == 0) { + int i; + + for (i = 0; i < NUM_CHANNELS; i++) { + struct uart_ip22zilog_port *up = &ip22zilog_port_table[i]; + + uart_add_one_port(&ip22zilog_reg, &up->port); + } + } + + return ret; +} + +static int __init ip22zilog_init(void) +{ + /* IP22 Zilog setup is hard coded, no probing to do. */ + ip22zilog_alloc_tables(); + ip22zilog_ports_init(); + + return 0; +} + +static void __exit ip22zilog_exit(void) +{ + int i; + struct uart_ip22zilog_port *up; + + for (i = 0; i < NUM_CHANNELS; i++) { + up = &ip22zilog_port_table[i]; + + uart_remove_one_port(&ip22zilog_reg, &up->port); + } + + /* Free IO mem */ + up = &ip22zilog_port_table[0]; + for (i = 0; i < NUM_IP22ZILOG; i++) { + if (up[(i * 2) + 0].port.mapbase) { + iounmap((void*)up[(i * 2) + 0].port.mapbase); + up[(i * 2) + 0].port.mapbase = 0; + } + if (up[(i * 2) + 1].port.mapbase) { + iounmap((void*)up[(i * 2) + 1].port.mapbase); + up[(i * 2) + 1].port.mapbase = 0; + } + } + + uart_unregister_driver(&ip22zilog_reg); +} + +module_init(ip22zilog_init); +module_exit(ip22zilog_exit); + +/* David wrote it but I'm to blame for the bugs ... */ +MODULE_AUTHOR("Ralf Baechle "); +MODULE_DESCRIPTION("SGI Zilog serial port driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/ip22zilog.h b/drivers/tty/serial/ip22zilog.h new file mode 100644 index 000000000..b52801fe2 --- /dev/null +++ b/drivers/tty/serial/ip22zilog.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _IP22_ZILOG_H +#define _IP22_ZILOG_H + +#include + +struct zilog_channel { +#ifdef __BIG_ENDIAN + volatile unsigned char unused0[3]; + volatile unsigned char control; + volatile unsigned char unused1[3]; + volatile unsigned char data; +#else /* __LITTLE_ENDIAN */ + volatile unsigned char control; + volatile unsigned char unused0[3]; + volatile unsigned char data; + volatile unsigned char unused1[3]; +#endif +}; + +struct zilog_layout { + struct zilog_channel channelB; + struct zilog_channel channelA; +}; + +#define NUM_ZSREGS 16 + +/* Conversion routines to/from brg time constants from/to bits + * per second. + */ +#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2)) +#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2) + +/* The Zilog register set */ + +#define FLAG 0x7e + +/* Write Register 0 */ +#define R0 0 /* Register selects */ +#define R1 1 +#define R2 2 +#define R3 3 +#define R4 4 +#define R5 5 +#define R6 6 +#define R7 7 +#define R8 8 +#define R9 9 +#define R10 10 +#define R11 11 +#define R12 12 +#define R13 13 +#define R14 14 +#define R15 15 + +#define NULLCODE 0 /* Null Code */ +#define POINT_HIGH 0x8 /* Select upper half of registers */ +#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */ +#define SEND_ABORT 0x18 /* HDLC Abort */ +#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */ +#define RES_Tx_P 0x28 /* Reset TxINT Pending */ +#define ERR_RES 0x30 /* Error Reset */ +#define RES_H_IUS 0x38 /* Reset highest IUS */ + +#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */ +#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */ +#define RES_EOM_L 0xC0 /* Reset EOM latch */ + +/* Write Register 1 */ + +#define EXT_INT_ENAB 0x1 /* Ext Int Enable */ +#define TxINT_ENAB 0x2 /* Tx Int Enable */ +#define PAR_SPEC 0x4 /* Parity is special condition */ + +#define RxINT_DISAB 0 /* Rx Int Disable */ +#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */ +#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */ +#define INT_ERR_Rx 0x18 /* Int on error only */ +#define RxINT_MASK 0x18 + +#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */ +#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */ +#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */ + +/* Write Register #2 (Interrupt Vector) */ + +/* Write Register 3 */ + +#define RxENAB 0x1 /* Rx Enable */ +#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */ +#define ADD_SM 0x4 /* Address Search Mode (SDLC) */ +#define RxCRC_ENAB 0x8 /* Rx CRC Enable */ +#define ENT_HM 0x10 /* Enter Hunt Mode */ +#define AUTO_ENAB 0x20 /* Auto Enables */ +#define Rx5 0x0 /* Rx 5 Bits/Character */ +#define Rx7 0x40 /* Rx 7 Bits/Character */ +#define Rx6 0x80 /* Rx 6 Bits/Character */ +#define Rx8 0xc0 /* Rx 8 Bits/Character */ +#define RxN_MASK 0xc0 + +/* Write Register 4 */ + +#define PAR_ENAB 0x1 /* Parity Enable */ +#define PAR_EVEN 0x2 /* Parity Even/Odd* */ + +#define SYNC_ENAB 0 /* Sync Modes Enable */ +#define SB1 0x4 /* 1 stop bit/char */ +#define SB15 0x8 /* 1.5 stop bits/char */ +#define SB2 0xc /* 2 stop bits/char */ + +#define MONSYNC 0 /* 8 Bit Sync character */ +#define BISYNC 0x10 /* 16 bit sync character */ +#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */ +#define EXTSYNC 0x30 /* External Sync Mode */ + +#define X1CLK 0x0 /* x1 clock mode */ +#define X16CLK 0x40 /* x16 clock mode */ +#define X32CLK 0x80 /* x32 clock mode */ +#define X64CLK 0xC0 /* x64 clock mode */ +#define XCLK_MASK 0xC0 + +/* Write Register 5 */ + +#define TxCRC_ENAB 0x1 /* Tx CRC Enable */ +#define RTS 0x2 /* RTS */ +#define SDLC_CRC 0x4 /* SDLC/CRC-16 */ +#define TxENAB 0x8 /* Tx Enable */ +#define SND_BRK 0x10 /* Send Break */ +#define Tx5 0x0 /* Tx 5 bits (or less)/character */ +#define Tx7 0x20 /* Tx 7 bits/character */ +#define Tx6 0x40 /* Tx 6 bits/character */ +#define Tx8 0x60 /* Tx 8 bits/character */ +#define TxN_MASK 0x60 +#define DTR 0x80 /* DTR */ + +/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */ + +/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */ + +/* Write Register 8 (transmit buffer) */ + +/* Write Register 9 (Master interrupt control) */ +#define VIS 1 /* Vector Includes Status */ +#define NV 2 /* No Vector */ +#define DLC 4 /* Disable Lower Chain */ +#define MIE 8 /* Master Interrupt Enable */ +#define STATHI 0x10 /* Status high */ +#define NORESET 0 /* No reset on write to R9 */ +#define CHRB 0x40 /* Reset channel B */ +#define CHRA 0x80 /* Reset channel A */ +#define FHWRES 0xc0 /* Force hardware reset */ + +/* Write Register 10 (misc control bits) */ +#define BIT6 1 /* 6 bit/8bit sync */ +#define LOOPMODE 2 /* SDLC Loop mode */ +#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */ +#define MARKIDLE 8 /* Mark/flag on idle */ +#define GAOP 0x10 /* Go active on poll */ +#define NRZ 0 /* NRZ mode */ +#define NRZI 0x20 /* NRZI mode */ +#define FM1 0x40 /* FM1 (transition = 1) */ +#define FM0 0x60 /* FM0 (transition = 0) */ +#define CRCPS 0x80 /* CRC Preset I/O */ + +/* Write Register 11 (Clock Mode control) */ +#define TRxCXT 0 /* TRxC = Xtal output */ +#define TRxCTC 1 /* TRxC = Transmit clock */ +#define TRxCBR 2 /* TRxC = BR Generator Output */ +#define TRxCDP 3 /* TRxC = DPLL output */ +#define TRxCOI 4 /* TRxC O/I */ +#define TCRTxCP 0 /* Transmit clock = RTxC pin */ +#define TCTRxCP 8 /* Transmit clock = TRxC pin */ +#define TCBR 0x10 /* Transmit clock = BR Generator output */ +#define TCDPLL 0x18 /* Transmit clock = DPLL output */ +#define RCRTxCP 0 /* Receive clock = RTxC pin */ +#define RCTRxCP 0x20 /* Receive clock = TRxC pin */ +#define RCBR 0x40 /* Receive clock = BR Generator output */ +#define RCDPLL 0x60 /* Receive clock = DPLL output */ +#define RTxCX 0x80 /* RTxC Xtal/No Xtal */ + +/* Write Register 12 (lower byte of baud rate generator time constant) */ + +/* Write Register 13 (upper byte of baud rate generator time constant) */ + +/* Write Register 14 (Misc control bits) */ +#define BRENAB 1 /* Baud rate generator enable */ +#define BRSRC 2 /* Baud rate generator source */ +#define DTRREQ 4 /* DTR/Request function */ +#define AUTOECHO 8 /* Auto Echo */ +#define LOOPBAK 0x10 /* Local loopback */ +#define SEARCH 0x20 /* Enter search mode */ +#define RMC 0x40 /* Reset missing clock */ +#define DISDPLL 0x60 /* Disable DPLL */ +#define SSBR 0x80 /* Set DPLL source = BR generator */ +#define SSRTxC 0xa0 /* Set DPLL source = RTxC */ +#define SFMM 0xc0 /* Set FM mode */ +#define SNRZI 0xe0 /* Set NRZI mode */ + +/* Write Register 15 (external/status interrupt control) */ +#define ZCIE 2 /* Zero count IE */ +#define DCDIE 8 /* DCD IE */ +#define SYNCIE 0x10 /* Sync/hunt IE */ +#define CTSIE 0x20 /* CTS IE */ +#define TxUIE 0x40 /* Tx Underrun/EOM IE */ +#define BRKIE 0x80 /* Break/Abort IE */ + + +/* Read Register 0 */ +#define Rx_CH_AV 0x1 /* Rx Character Available */ +#define ZCOUNT 0x2 /* Zero count */ +#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */ +#define DCD 0x8 /* DCD */ +#define SYNC 0x10 /* Sync/hunt */ +#define CTS 0x20 /* CTS */ +#define TxEOM 0x40 /* Tx underrun */ +#define BRK_ABRT 0x80 /* Break/Abort */ + +/* Read Register 1 */ +#define ALL_SNT 0x1 /* All sent */ +/* Residue Data for 8 Rx bits/char programmed */ +#define RES3 0x8 /* 0/3 */ +#define RES4 0x4 /* 0/4 */ +#define RES5 0xc /* 0/5 */ +#define RES6 0x2 /* 0/6 */ +#define RES7 0xa /* 0/7 */ +#define RES8 0x6 /* 0/8 */ +#define RES18 0xe /* 1/8 */ +#define RES28 0x0 /* 2/8 */ +/* Special Rx Condition Interrupts */ +#define PAR_ERR 0x10 /* Parity error */ +#define Rx_OVR 0x20 /* Rx Overrun Error */ +#define CRC_ERR 0x40 /* CRC/Framing Error */ +#define END_FR 0x80 /* End of Frame (SDLC) */ + +/* Read Register 2 (channel b only) - Interrupt vector */ +#define CHB_Tx_EMPTY 0x00 +#define CHB_EXT_STAT 0x02 +#define CHB_Rx_AVAIL 0x04 +#define CHB_SPECIAL 0x06 +#define CHA_Tx_EMPTY 0x08 +#define CHA_EXT_STAT 0x0a +#define CHA_Rx_AVAIL 0x0c +#define CHA_SPECIAL 0x0e +#define STATUS_MASK 0x0e + +/* Read Register 3 (interrupt pending register) ch a only */ +#define CHBEXT 0x1 /* Channel B Ext/Stat IP */ +#define CHBTxIP 0x2 /* Channel B Tx IP */ +#define CHBRxIP 0x4 /* Channel B Rx IP */ +#define CHAEXT 0x8 /* Channel A Ext/Stat IP */ +#define CHATxIP 0x10 /* Channel A Tx IP */ +#define CHARxIP 0x20 /* Channel A Rx IP */ + +/* Read Register 8 (receive data register) */ + +/* Read Register 10 (misc status bits) */ +#define ONLOOP 2 /* On loop */ +#define LOOPSEND 0x10 /* Loop sending */ +#define CLK2MIS 0x40 /* Two clocks missing */ +#define CLK1MIS 0x80 /* One clock missing */ + +/* Read Register 12 (lower byte of baud rate generator constant) */ + +/* Read Register 13 (upper byte of baud rate generator constant) */ + +/* Read Register 15 (value of WR 15) */ + +/* Misc macros */ +#define ZS_CLEARERR(channel) do { writeb(ERR_RES, &channel->control); \ + udelay(5); } while(0) + +#define ZS_CLEARSTAT(channel) do { writeb(RES_EXT_INT, &channel->control); \ + udelay(5); } while(0) + +#define ZS_CLEARFIFO(channel) do { readb(&channel->data); \ + udelay(2); \ + readb(&channel->data); \ + udelay(2); \ + readb(&channel->data); \ + udelay(2); } while(0) + +#endif /* _IP22_ZILOG_H */ diff --git a/drivers/tty/serial/jsm/Makefile b/drivers/tty/serial/jsm/Makefile new file mode 100644 index 000000000..4f2dbada7 --- /dev/null +++ b/drivers/tty/serial/jsm/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Jasmine adapter +# + +obj-$(CONFIG_SERIAL_JSM) += jsm.o + +jsm-objs := jsm_driver.o jsm_neo.o jsm_tty.o jsm_cls.o + diff --git a/drivers/tty/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h new file mode 100644 index 000000000..8489c07f4 --- /dev/null +++ b/drivers/tty/serial/jsm/jsm.h @@ -0,0 +1,438 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/************************************************************************ + * Copyright 2003 Digi International (www.digi.com) + * + * Copyright (C) 2004 IBM Corporation. All rights reserved. + * + * Contact Information: + * Scott H Kilau + * Wendy Xiong + * + ***********************************************************************/ + +#ifndef __JSM_DRIVER_H +#define __JSM_DRIVER_H + +#include +#include /* To pick up the varions Linux types */ +#include +#include +#include + +/* + * Debugging levels can be set using debug insmod variable + * They can also be compiled out completely. + */ +enum { + DBG_INIT = 0x01, + DBG_BASIC = 0x02, + DBG_CORE = 0x04, + DBG_OPEN = 0x08, + DBG_CLOSE = 0x10, + DBG_READ = 0x20, + DBG_WRITE = 0x40, + DBG_IOCTL = 0x80, + DBG_PROC = 0x100, + DBG_PARAM = 0x200, + DBG_PSCAN = 0x400, + DBG_EVENT = 0x800, + DBG_DRAIN = 0x1000, + DBG_MSIGS = 0x2000, + DBG_MGMT = 0x4000, + DBG_INTR = 0x8000, + DBG_CARR = 0x10000, +}; + +#define jsm_dbg(nlevel, pdev, fmt, ...) \ +do { \ + if (DBG_##nlevel & jsm_debug) \ + dev_dbg(pdev->dev, fmt, ##__VA_ARGS__); \ +} while (0) + +#define MAXLINES 256 +#define MAXPORTS 8 +#define MAX_STOPS_SENT 5 + +/* Board ids */ +#define PCI_DEVICE_ID_CLASSIC_4 0x0028 +#define PCI_DEVICE_ID_CLASSIC_8 0x0029 +#define PCI_DEVICE_ID_CLASSIC_4_422 0x00D0 +#define PCI_DEVICE_ID_CLASSIC_8_422 0x00D1 +#define PCI_DEVICE_ID_NEO_4 0x00B0 +#define PCI_DEVICE_ID_NEO_1_422 0x00CC +#define PCI_DEVICE_ID_NEO_1_422_485 0x00CD +#define PCI_DEVICE_ID_NEO_2_422_485 0x00CE +#define PCIE_DEVICE_ID_NEO_8 0x00F0 +#define PCIE_DEVICE_ID_NEO_4 0x00F1 +#define PCIE_DEVICE_ID_NEO_4RJ45 0x00F2 +#define PCIE_DEVICE_ID_NEO_8RJ45 0x00F3 + +/* Board type definitions */ + +#define T_NEO 0000 +#define T_CLASSIC 0001 +#define T_PCIBUS 0400 + +/* Board State Definitions */ + +#define BD_RUNNING 0x0 +#define BD_REASON 0x7f +#define BD_NOTFOUND 0x1 +#define BD_NOIOPORT 0x2 +#define BD_NOMEM 0x3 +#define BD_NOBIOS 0x4 +#define BD_NOFEP 0x5 +#define BD_FAILED 0x6 +#define BD_ALLOCATED 0x7 +#define BD_TRIBOOT 0x8 +#define BD_BADKME 0x80 + + +/* 4 extra for alignment play space */ +#define WRITEBUFLEN ((4096) + 4) + +#define JSM_VERSION "jsm: 1.2-1-INKERNEL" +#define JSM_PARTNUM "40002438_A-INKERNEL" + +struct jsm_board; +struct jsm_channel; + +/************************************************************************ + * Per board operations structure * + ************************************************************************/ +struct board_ops { + irq_handler_t intr; + void (*uart_init)(struct jsm_channel *ch); + void (*uart_off)(struct jsm_channel *ch); + void (*param)(struct jsm_channel *ch); + void (*assert_modem_signals)(struct jsm_channel *ch); + void (*flush_uart_write)(struct jsm_channel *ch); + void (*flush_uart_read)(struct jsm_channel *ch); + void (*disable_receiver)(struct jsm_channel *ch); + void (*enable_receiver)(struct jsm_channel *ch); + void (*send_break)(struct jsm_channel *ch); + void (*clear_break)(struct jsm_channel *ch); + void (*send_start_character)(struct jsm_channel *ch); + void (*send_stop_character)(struct jsm_channel *ch); + void (*copy_data_from_queue_to_uart)(struct jsm_channel *ch); + u32 (*get_uart_bytes_left)(struct jsm_channel *ch); + void (*send_immediate_char)(struct jsm_channel *ch, unsigned char); +}; + + +/* + * Per-board information + */ +struct jsm_board +{ + int boardnum; /* Board number: 0-32 */ + + int type; /* Type of board */ + u8 rev; /* PCI revision ID */ + struct pci_dev *pci_dev; + u32 maxports; /* MAX ports this board can handle */ + + spinlock_t bd_intr_lock; /* Used to protect the poller tasklet and + * the interrupt routine from each other. + */ + + u32 nasync; /* Number of ports on card */ + + u32 irq; /* Interrupt request number */ + + u64 membase; /* Start of base memory of the card */ + u64 membase_end; /* End of base memory of the card */ + + u8 __iomem *re_map_membase;/* Remapped memory of the card */ + + u64 iobase; /* Start of io base of the card */ + u64 iobase_end; /* End of io base of the card */ + + u32 bd_uart_offset; /* Space between each UART */ + + struct jsm_channel *channels[MAXPORTS]; /* array of pointers to our channels. */ + + u32 bd_dividend; /* Board/UARTs specific dividend */ + + struct board_ops *bd_ops; + + struct list_head jsm_board_entry; +}; + +/************************************************************************ + * Device flag definitions for ch_flags. + ************************************************************************/ +#define CH_PRON 0x0001 /* Printer on string */ +#define CH_STOP 0x0002 /* Output is stopped */ +#define CH_STOPI 0x0004 /* Input is stopped */ +#define CH_CD 0x0008 /* Carrier is present */ +#define CH_FCAR 0x0010 /* Carrier forced on */ +#define CH_HANGUP 0x0020 /* Hangup received */ + +#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */ +#define CH_OPENING 0x0080 /* Port in fragile open state */ +#define CH_CLOSING 0x0100 /* Port in fragile close state */ +#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */ +#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */ +#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */ +#define CH_BREAK_SENDING 0x1000 /* Break is being sent */ +#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */ +#define CH_BAUD0 0x08000 /* Used for checking B0 transitions */ + +/* Our Read/Error queue sizes */ +#define RQUEUEMASK 0x1FFF /* 8 K - 1 */ +#define EQUEUEMASK 0x1FFF /* 8 K - 1 */ +#define RQUEUESIZE (RQUEUEMASK + 1) +#define EQUEUESIZE RQUEUESIZE + + +/************************************************************************ + * Channel information structure. + ************************************************************************/ +struct jsm_channel { + struct uart_port uart_port; + struct jsm_board *ch_bd; /* Board structure pointer */ + + spinlock_t ch_lock; /* provide for serialization */ + wait_queue_head_t ch_flags_wait; + + u32 ch_portnum; /* Port number, 0 offset. */ + u32 ch_open_count; /* open count */ + u32 ch_flags; /* Channel flags */ + + u64 ch_close_delay; /* How long we should drop RTS/DTR for */ + + tcflag_t ch_c_iflag; /* channel iflags */ + tcflag_t ch_c_cflag; /* channel cflags */ + tcflag_t ch_c_oflag; /* channel oflags */ + tcflag_t ch_c_lflag; /* channel lflags */ + u8 ch_stopc; /* Stop character */ + u8 ch_startc; /* Start character */ + + u8 ch_mostat; /* FEP output modem status */ + u8 ch_mistat; /* FEP input modem status */ + + /* Pointers to the "mapped" UART structs */ + struct neo_uart_struct __iomem *ch_neo_uart; /* NEO card */ + struct cls_uart_struct __iomem *ch_cls_uart; /* Classic card */ + + u8 ch_cached_lsr; /* Cached value of the LSR register */ + + u8 *ch_rqueue; /* Our read queue buffer - malloc'ed */ + u16 ch_r_head; /* Head location of the read queue */ + u16 ch_r_tail; /* Tail location of the read queue */ + + u8 *ch_equeue; /* Our error queue buffer - malloc'ed */ + u16 ch_e_head; /* Head location of the error queue */ + u16 ch_e_tail; /* Tail location of the error queue */ + + u64 ch_rxcount; /* total of data received so far */ + u64 ch_txcount; /* total of data transmitted so far */ + + u8 ch_r_tlevel; /* Receive Trigger level */ + u8 ch_t_tlevel; /* Transmit Trigger level */ + + u8 ch_r_watermark; /* Receive Watermark */ + + + u32 ch_stops_sent; /* How many times I have sent a stop character + * to try to stop the other guy sending. + */ + u64 ch_err_parity; /* Count of parity errors on channel */ + u64 ch_err_frame; /* Count of framing errors on channel */ + u64 ch_err_break; /* Count of breaks on channel */ + u64 ch_err_overrun; /* Count of overruns on channel */ + + u64 ch_xon_sends; /* Count of xons transmitted */ + u64 ch_xoff_sends; /* Count of xoffs transmitted */ +}; + +/************************************************************************ + * Per channel/port Classic UART structures * + ************************************************************************ + * Base Structure Entries Usage Meanings to Host * + * * + * W = read write R = read only * + * U = Unused. * + ************************************************************************/ + +struct cls_uart_struct { + u8 txrx; /* WR RHR/THR - Holding Reg */ + u8 ier; /* WR IER - Interrupt Enable Reg */ + u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg*/ + u8 lcr; /* WR LCR - Line Control Reg */ + u8 mcr; /* WR MCR - Modem Control Reg */ + u8 lsr; /* WR LSR - Line Status Reg */ + u8 msr; /* WR MSR - Modem Status Reg */ + u8 spr; /* WR SPR - Scratch Pad Reg */ +}; + +/* Where to read the interrupt register (8bits) */ +#define UART_CLASSIC_POLL_ADDR_OFFSET 0x40 + +#define UART_EXAR654_ENHANCED_REGISTER_SET 0xBF + +#define UART_16654_FCR_TXTRIGGER_8 0x0 +#define UART_16654_FCR_TXTRIGGER_16 0x10 +#define UART_16654_FCR_TXTRIGGER_32 0x20 +#define UART_16654_FCR_TXTRIGGER_56 0x30 + +#define UART_16654_FCR_RXTRIGGER_8 0x0 +#define UART_16654_FCR_RXTRIGGER_16 0x40 +#define UART_16654_FCR_RXTRIGGER_56 0x80 +#define UART_16654_FCR_RXTRIGGER_60 0xC0 + +#define UART_IIR_CTSRTS 0x20 /* Received CTS/RTS change of state */ +#define UART_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */ + +/* + * These are the EXTENDED definitions for the Exar 654's Interrupt + * Enable Register. + */ +#define UART_EXAR654_EFR_ECB 0x10 /* Enhanced control bit */ +#define UART_EXAR654_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */ +#define UART_EXAR654_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */ +#define UART_EXAR654_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */ +#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */ + +#define UART_EXAR654_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */ +#define UART_EXAR654_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */ + +#define UART_EXAR654_IER_XOFF 0x20 /* Xoff Interrupt Enable */ +#define UART_EXAR654_IER_RTSDTR 0x40 /* Output Interrupt Enable */ +#define UART_EXAR654_IER_CTSDSR 0x80 /* Input Interrupt Enable */ + +/************************************************************************ + * Per channel/port NEO UART structure * + ************************************************************************ + * Base Structure Entries Usage Meanings to Host * + * * + * W = read write R = read only * + * U = Unused. * + ************************************************************************/ + +struct neo_uart_struct { + u8 txrx; /* WR RHR/THR - Holding Reg */ + u8 ier; /* WR IER - Interrupt Enable Reg */ + u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg */ + u8 lcr; /* WR LCR - Line Control Reg */ + u8 mcr; /* WR MCR - Modem Control Reg */ + u8 lsr; /* WR LSR - Line Status Reg */ + u8 msr; /* WR MSR - Modem Status Reg */ + u8 spr; /* WR SPR - Scratch Pad Reg */ + u8 fctr; /* WR FCTR - Feature Control Reg */ + u8 efr; /* WR EFR - Enhanced Function Reg */ + u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */ + u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */ + u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */ + u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */ + u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */ + u8 xonchar2; /* WR XON 2 - XOn Character 2 Reg */ + + u8 reserved1[0x2ff - 0x200]; /* U Reserved by Exar */ + u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */ + u8 reserved2[0x37f - 0x340]; /* U Reserved by Exar */ + u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */ +}; + +/* Where to read the extended interrupt register (32bits instead of 8bits) */ +#define UART_17158_POLL_ADDR_OFFSET 0x80 + +/* + * These are the redefinitions for the FCTR on the XR17C158, since + * Exar made them different than their earlier design. (XR16C854) + */ + +/* These are only applicable when table D is selected */ +#define UART_17158_FCTR_RTS_NODELAY 0x00 +#define UART_17158_FCTR_RTS_4DELAY 0x01 +#define UART_17158_FCTR_RTS_6DELAY 0x02 +#define UART_17158_FCTR_RTS_8DELAY 0x03 +#define UART_17158_FCTR_RTS_12DELAY 0x12 +#define UART_17158_FCTR_RTS_16DELAY 0x05 +#define UART_17158_FCTR_RTS_20DELAY 0x13 +#define UART_17158_FCTR_RTS_24DELAY 0x06 +#define UART_17158_FCTR_RTS_28DELAY 0x14 +#define UART_17158_FCTR_RTS_32DELAY 0x07 +#define UART_17158_FCTR_RTS_36DELAY 0x16 +#define UART_17158_FCTR_RTS_40DELAY 0x08 +#define UART_17158_FCTR_RTS_44DELAY 0x09 +#define UART_17158_FCTR_RTS_48DELAY 0x10 +#define UART_17158_FCTR_RTS_52DELAY 0x11 + +#define UART_17158_FCTR_RTS_IRDA 0x10 +#define UART_17158_FCTR_RS485 0x20 +#define UART_17158_FCTR_TRGA 0x00 +#define UART_17158_FCTR_TRGB 0x40 +#define UART_17158_FCTR_TRGC 0x80 +#define UART_17158_FCTR_TRGD 0xC0 + +/* 17158 trigger table selects.. */ +#define UART_17158_FCTR_BIT6 0x40 +#define UART_17158_FCTR_BIT7 0x80 + +/* 17158 TX/RX memmapped buffer offsets */ +#define UART_17158_RX_FIFOSIZE 64 +#define UART_17158_TX_FIFOSIZE 64 + +/* 17158 Extended IIR's */ +#define UART_17158_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */ +#define UART_17158_IIR_XONXOFF 0x10 /* Received an XON/XOFF char */ +#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR state change */ +#define UART_17158_IIR_FIFO_ENABLED 0xC0 /* 16550 FIFOs are Enabled */ + +/* + * These are the extended interrupts that get sent + * back to us from the UART's 32bit interrupt register + */ +#define UART_17158_RX_LINE_STATUS 0x1 /* RX Ready */ +#define UART_17158_RXRDY_TIMEOUT 0x2 /* RX Ready Timeout */ +#define UART_17158_TXRDY 0x3 /* TX Ready */ +#define UART_17158_MSR 0x4 /* Modem State Change */ +#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding Reg Empty */ +#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO Data error */ + +/* + * These are the EXTENDED definitions for the 17C158's Interrupt + * Enable Register. + */ +#define UART_17158_EFR_ECB 0x10 /* Enhanced control bit */ +#define UART_17158_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */ +#define UART_17158_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */ +#define UART_17158_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */ +#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */ + +#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */ +#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */ + +#define UART_17158_IER_RSVD1 0x10 /* Reserved by Exar */ +#define UART_17158_IER_XOFF 0x20 /* Xoff Interrupt Enable */ +#define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */ +#define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */ + +#define PCI_DEVICE_NEO_2DB9_PCI_NAME "Neo 2 - DB9 Universal PCI" +#define PCI_DEVICE_NEO_2DB9PRI_PCI_NAME "Neo 2 - DB9 Universal PCI - Powered Ring Indicator" +#define PCI_DEVICE_NEO_2RJ45_PCI_NAME "Neo 2 - RJ45 Universal PCI" +#define PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME "Neo 2 - RJ45 Universal PCI - Powered Ring Indicator" +#define PCIE_DEVICE_NEO_IBM_PCI_NAME "Neo 4 - PCI Express - IBM" + +/* + * Our Global Variables. + */ +extern struct uart_driver jsm_uart_driver; +extern struct board_ops jsm_neo_ops; +extern struct board_ops jsm_cls_ops; +extern int jsm_debug; + +/************************************************************************* + * + * Prototypes for non-static functions used in more than one module + * + *************************************************************************/ +int jsm_tty_init(struct jsm_board *); +int jsm_uart_port_init(struct jsm_board *); +int jsm_remove_uart_port(struct jsm_board *); +void jsm_input(struct jsm_channel *ch); +void jsm_check_queue_flow_control(struct jsm_channel *ch); + +#endif diff --git a/drivers/tty/serial/jsm/jsm_cls.c b/drivers/tty/serial/jsm/jsm_cls.c new file mode 100644 index 000000000..3fd57ac3a --- /dev/null +++ b/drivers/tty/serial/jsm/jsm_cls.c @@ -0,0 +1,949 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2003 Digi International (www.digi.com) + * Scott H Kilau + * + * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE! + * + * This is shared code between Digi's CVS archive and the + * Linux Kernel sources. + * Changing the source just for reformatting needlessly breaks + * our CVS diff history. + * + * Send any bug fixes/changes to: Eng.Linux at digi dot com. + * Thank you. + * + */ + +#include /* For udelay */ +#include /* For read[bwl]/write[bwl] */ +#include /* For struct async_serial */ +#include /* For the various UART offsets */ +#include +#include + +#include "jsm.h" /* Driver main header file */ + +static struct { + unsigned int rate; + unsigned int cflag; +} baud_rates[] = { + { 921600, B921600 }, + { 460800, B460800 }, + { 230400, B230400 }, + { 115200, B115200 }, + { 57600, B57600 }, + { 38400, B38400 }, + { 19200, B19200 }, + { 9600, B9600 }, + { 4800, B4800 }, + { 2400, B2400 }, + { 1200, B1200 }, + { 600, B600 }, + { 300, B300 }, + { 200, B200 }, + { 150, B150 }, + { 134, B134 }, + { 110, B110 }, + { 75, B75 }, + { 50, B50 }, +}; + +static void cls_set_cts_flow_control(struct jsm_channel *ch) +{ + u8 lcrb = readb(&ch->ch_cls_uart->lcr); + u8 ier = readb(&ch->ch_cls_uart->ier); + u8 isr_fcr = 0; + + /* + * The Enhanced Register Set may only be accessed when + * the Line Control Register is set to 0xBFh. + */ + writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); + + isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); + + /* Turn on CTS flow control, turn off IXON flow control */ + isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_CTSDSR); + isr_fcr &= ~(UART_EXAR654_EFR_IXON); + + writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); + + /* Write old LCR value back out, which turns enhanced access off */ + writeb(lcrb, &ch->ch_cls_uart->lcr); + + /* + * Enable interrupts for CTS flow, turn off interrupts for + * received XOFF chars + */ + ier |= (UART_EXAR654_IER_CTSDSR); + ier &= ~(UART_EXAR654_IER_XOFF); + writeb(ier, &ch->ch_cls_uart->ier); + + /* Set the usual FIFO values */ + writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); + + writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 | + UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), + &ch->ch_cls_uart->isr_fcr); + + ch->ch_t_tlevel = 16; +} + +static void cls_set_ixon_flow_control(struct jsm_channel *ch) +{ + u8 lcrb = readb(&ch->ch_cls_uart->lcr); + u8 ier = readb(&ch->ch_cls_uart->ier); + u8 isr_fcr = 0; + + /* + * The Enhanced Register Set may only be accessed when + * the Line Control Register is set to 0xBFh. + */ + writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); + + isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); + + /* Turn on IXON flow control, turn off CTS flow control */ + isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXON); + isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR); + + writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); + + /* Now set our current start/stop chars while in enhanced mode */ + writeb(ch->ch_startc, &ch->ch_cls_uart->mcr); + writeb(0, &ch->ch_cls_uart->lsr); + writeb(ch->ch_stopc, &ch->ch_cls_uart->msr); + writeb(0, &ch->ch_cls_uart->spr); + + /* Write old LCR value back out, which turns enhanced access off */ + writeb(lcrb, &ch->ch_cls_uart->lcr); + + /* + * Disable interrupts for CTS flow, turn on interrupts for + * received XOFF chars + */ + ier &= ~(UART_EXAR654_IER_CTSDSR); + ier |= (UART_EXAR654_IER_XOFF); + writeb(ier, &ch->ch_cls_uart->ier); + + /* Set the usual FIFO values */ + writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); + + writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 | + UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), + &ch->ch_cls_uart->isr_fcr); +} + +static void cls_set_no_output_flow_control(struct jsm_channel *ch) +{ + u8 lcrb = readb(&ch->ch_cls_uart->lcr); + u8 ier = readb(&ch->ch_cls_uart->ier); + u8 isr_fcr = 0; + + /* + * The Enhanced Register Set may only be accessed when + * the Line Control Register is set to 0xBFh. + */ + writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); + + isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); + + /* Turn off IXON flow control, turn off CTS flow control */ + isr_fcr |= (UART_EXAR654_EFR_ECB); + isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR | UART_EXAR654_EFR_IXON); + + writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); + + /* Write old LCR value back out, which turns enhanced access off */ + writeb(lcrb, &ch->ch_cls_uart->lcr); + + /* + * Disable interrupts for CTS flow, turn off interrupts for + * received XOFF chars + */ + ier &= ~(UART_EXAR654_IER_CTSDSR); + ier &= ~(UART_EXAR654_IER_XOFF); + writeb(ier, &ch->ch_cls_uart->ier); + + /* Set the usual FIFO values */ + writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); + + writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 | + UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), + &ch->ch_cls_uart->isr_fcr); + + ch->ch_r_watermark = 0; + ch->ch_t_tlevel = 16; + ch->ch_r_tlevel = 16; +} + +static void cls_set_rts_flow_control(struct jsm_channel *ch) +{ + u8 lcrb = readb(&ch->ch_cls_uart->lcr); + u8 ier = readb(&ch->ch_cls_uart->ier); + u8 isr_fcr = 0; + + /* + * The Enhanced Register Set may only be accessed when + * the Line Control Register is set to 0xBFh. + */ + writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); + + isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); + + /* Turn on RTS flow control, turn off IXOFF flow control */ + isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_RTSDTR); + isr_fcr &= ~(UART_EXAR654_EFR_IXOFF); + + writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); + + /* Write old LCR value back out, which turns enhanced access off */ + writeb(lcrb, &ch->ch_cls_uart->lcr); + + /* Enable interrupts for RTS flow */ + ier |= (UART_EXAR654_IER_RTSDTR); + writeb(ier, &ch->ch_cls_uart->ier); + + /* Set the usual FIFO values */ + writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); + + writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 | + UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), + &ch->ch_cls_uart->isr_fcr); + + ch->ch_r_watermark = 4; + ch->ch_r_tlevel = 8; +} + +static void cls_set_ixoff_flow_control(struct jsm_channel *ch) +{ + u8 lcrb = readb(&ch->ch_cls_uart->lcr); + u8 ier = readb(&ch->ch_cls_uart->ier); + u8 isr_fcr = 0; + + /* + * The Enhanced Register Set may only be accessed when + * the Line Control Register is set to 0xBFh. + */ + writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); + + isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); + + /* Turn on IXOFF flow control, turn off RTS flow control */ + isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXOFF); + isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR); + + writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); + + /* Now set our current start/stop chars while in enhanced mode */ + writeb(ch->ch_startc, &ch->ch_cls_uart->mcr); + writeb(0, &ch->ch_cls_uart->lsr); + writeb(ch->ch_stopc, &ch->ch_cls_uart->msr); + writeb(0, &ch->ch_cls_uart->spr); + + /* Write old LCR value back out, which turns enhanced access off */ + writeb(lcrb, &ch->ch_cls_uart->lcr); + + /* Disable interrupts for RTS flow */ + ier &= ~(UART_EXAR654_IER_RTSDTR); + writeb(ier, &ch->ch_cls_uart->ier); + + /* Set the usual FIFO values */ + writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); + + writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 | + UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), + &ch->ch_cls_uart->isr_fcr); +} + +static void cls_set_no_input_flow_control(struct jsm_channel *ch) +{ + u8 lcrb = readb(&ch->ch_cls_uart->lcr); + u8 ier = readb(&ch->ch_cls_uart->ier); + u8 isr_fcr = 0; + + /* + * The Enhanced Register Set may only be accessed when + * the Line Control Register is set to 0xBFh. + */ + writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); + + isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); + + /* Turn off IXOFF flow control, turn off RTS flow control */ + isr_fcr |= (UART_EXAR654_EFR_ECB); + isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR | UART_EXAR654_EFR_IXOFF); + + writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); + + /* Write old LCR value back out, which turns enhanced access off */ + writeb(lcrb, &ch->ch_cls_uart->lcr); + + /* Disable interrupts for RTS flow */ + ier &= ~(UART_EXAR654_IER_RTSDTR); + writeb(ier, &ch->ch_cls_uart->ier); + + /* Set the usual FIFO values */ + writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); + + writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 | + UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), + &ch->ch_cls_uart->isr_fcr); + + ch->ch_t_tlevel = 16; + ch->ch_r_tlevel = 16; +} + +/* + * cls_clear_break. + * Determines whether its time to shut off break condition. + * + * No locks are assumed to be held when calling this function. + * channel lock is held and released in this function. + */ +static void cls_clear_break(struct jsm_channel *ch) +{ + unsigned long lock_flags; + + spin_lock_irqsave(&ch->ch_lock, lock_flags); + + /* Turn break off, and unset some variables */ + if (ch->ch_flags & CH_BREAK_SENDING) { + u8 temp = readb(&ch->ch_cls_uart->lcr); + + writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr); + + ch->ch_flags &= ~(CH_BREAK_SENDING); + jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, + "clear break Finishing UART_LCR_SBC! finished: %lx\n", + jiffies); + } + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); +} + +static void cls_disable_receiver(struct jsm_channel *ch) +{ + u8 tmp = readb(&ch->ch_cls_uart->ier); + + tmp &= ~(UART_IER_RDI); + writeb(tmp, &ch->ch_cls_uart->ier); +} + +static void cls_enable_receiver(struct jsm_channel *ch) +{ + u8 tmp = readb(&ch->ch_cls_uart->ier); + + tmp |= (UART_IER_RDI); + writeb(tmp, &ch->ch_cls_uart->ier); +} + +/* Make the UART raise any of the output signals we want up */ +static void cls_assert_modem_signals(struct jsm_channel *ch) +{ + if (!ch) + return; + + writeb(ch->ch_mostat, &ch->ch_cls_uart->mcr); +} + +static void cls_copy_data_from_uart_to_queue(struct jsm_channel *ch) +{ + int qleft = 0; + u8 linestatus; + u8 error_mask = 0; + u16 head; + u16 tail; + unsigned long flags; + + if (!ch) + return; + + spin_lock_irqsave(&ch->ch_lock, flags); + + /* cache head and tail of queue */ + head = ch->ch_r_head & RQUEUEMASK; + tail = ch->ch_r_tail & RQUEUEMASK; + + ch->ch_cached_lsr = 0; + + /* Store how much space we have left in the queue */ + qleft = tail - head - 1; + if (qleft < 0) + qleft += RQUEUEMASK + 1; + + /* + * Create a mask to determine whether we should + * insert the character (if any) into our queue. + */ + if (ch->ch_c_iflag & IGNBRK) + error_mask |= UART_LSR_BI; + + while (1) { + /* + * Grab the linestatus register, we need to + * check to see if there is any data to read + */ + linestatus = readb(&ch->ch_cls_uart->lsr); + + /* Break out if there is no data to fetch */ + if (!(linestatus & UART_LSR_DR)) + break; + + /* + * Discard character if we are ignoring the error mask + * which in this case is the break signal. + */ + if (linestatus & error_mask) { + linestatus = 0; + readb(&ch->ch_cls_uart->txrx); + continue; + } + + /* + * If our queue is full, we have no choice but to drop some + * data. The assumption is that HWFLOW or SWFLOW should have + * stopped things way way before we got to this point. + * + * I decided that I wanted to ditch the oldest data first, + * I hope thats okay with everyone? Yes? Good. + */ + while (qleft < 1) { + tail = (tail + 1) & RQUEUEMASK; + ch->ch_r_tail = tail; + ch->ch_err_overrun++; + qleft++; + } + + ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE + | UART_LSR_FE); + ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx); + + qleft--; + + if (ch->ch_equeue[head] & UART_LSR_PE) + ch->ch_err_parity++; + if (ch->ch_equeue[head] & UART_LSR_BI) + ch->ch_err_break++; + if (ch->ch_equeue[head] & UART_LSR_FE) + ch->ch_err_frame++; + + /* Add to, and flip head if needed */ + head = (head + 1) & RQUEUEMASK; + ch->ch_rxcount++; + } + + /* + * Write new final heads to channel structure. + */ + ch->ch_r_head = head & RQUEUEMASK; + ch->ch_e_head = head & EQUEUEMASK; + + spin_unlock_irqrestore(&ch->ch_lock, flags); +} + +static void cls_copy_data_from_queue_to_uart(struct jsm_channel *ch) +{ + u16 tail; + int n; + int qlen; + u32 len_written = 0; + struct circ_buf *circ; + + if (!ch) + return; + + circ = &ch->uart_port.state->xmit; + + /* No data to write to the UART */ + if (uart_circ_empty(circ)) + return; + + /* If port is "stopped", don't send any data to the UART */ + if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) + return; + + /* We have to do it this way, because of the EXAR TXFIFO count bug. */ + if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) + return; + + n = 32; + + /* cache tail of queue */ + tail = circ->tail & (UART_XMIT_SIZE - 1); + qlen = uart_circ_chars_pending(circ); + + /* Find minimum of the FIFO space, versus queue length */ + n = min(n, qlen); + + while (n > 0) { + writeb(circ->buf[tail], &ch->ch_cls_uart->txrx); + tail = (tail + 1) & (UART_XMIT_SIZE - 1); + n--; + ch->ch_txcount++; + len_written++; + } + + /* Update the final tail */ + circ->tail = tail & (UART_XMIT_SIZE - 1); + + if (len_written > ch->ch_t_tlevel) + ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); + + if (uart_circ_empty(circ)) + uart_write_wakeup(&ch->uart_port); +} + +static void cls_parse_modem(struct jsm_channel *ch, u8 signals) +{ + u8 msignals = signals; + + jsm_dbg(MSIGS, &ch->ch_bd->pci_dev, + "neo_parse_modem: port: %d msignals: %x\n", + ch->ch_portnum, msignals); + + /* + * Scrub off lower bits. + * They signify delta's, which I don't care about + * Keep DDCD and DDSR though + */ + msignals &= 0xf8; + + if (msignals & UART_MSR_DDCD) + uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_DCD); + if (msignals & UART_MSR_DDSR) + uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_CTS); + + if (msignals & UART_MSR_DCD) + ch->ch_mistat |= UART_MSR_DCD; + else + ch->ch_mistat &= ~UART_MSR_DCD; + + if (msignals & UART_MSR_DSR) + ch->ch_mistat |= UART_MSR_DSR; + else + ch->ch_mistat &= ~UART_MSR_DSR; + + if (msignals & UART_MSR_RI) + ch->ch_mistat |= UART_MSR_RI; + else + ch->ch_mistat &= ~UART_MSR_RI; + + if (msignals & UART_MSR_CTS) + ch->ch_mistat |= UART_MSR_CTS; + else + ch->ch_mistat &= ~UART_MSR_CTS; + + jsm_dbg(MSIGS, &ch->ch_bd->pci_dev, + "Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n", + ch->ch_portnum, + !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR), + !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS), + !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS), + !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR), + !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI), + !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD)); +} + +/* Parse the ISR register for the specific port */ +static inline void cls_parse_isr(struct jsm_board *brd, uint port) +{ + struct jsm_channel *ch; + u8 isr = 0; + unsigned long flags; + + /* + * No need to verify board pointer, it was already + * verified in the interrupt routine. + */ + + if (port >= brd->nasync) + return; + + ch = brd->channels[port]; + if (!ch) + return; + + /* Here we try to figure out what caused the interrupt to happen */ + while (1) { + isr = readb(&ch->ch_cls_uart->isr_fcr); + + /* Bail if no pending interrupt on port */ + if (isr & UART_IIR_NO_INT) + break; + + /* Receive Interrupt pending */ + if (isr & (UART_IIR_RDI | UART_IIR_RDI_TIMEOUT)) { + /* Read data from uart -> queue */ + cls_copy_data_from_uart_to_queue(ch); + jsm_check_queue_flow_control(ch); + } + + /* Transmit Hold register empty pending */ + if (isr & UART_IIR_THRI) { + /* Transfer data (if any) from Write Queue -> UART. */ + spin_lock_irqsave(&ch->ch_lock, flags); + ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); + spin_unlock_irqrestore(&ch->ch_lock, flags); + cls_copy_data_from_queue_to_uart(ch); + } + + /* + * CTS/RTS change of state: + * Don't need to do anything, the cls_parse_modem + * below will grab the updated modem signals. + */ + + /* Parse any modem signal changes */ + cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr)); + } +} + +/* Channel lock MUST be held before calling this function! */ +static void cls_flush_uart_write(struct jsm_channel *ch) +{ + u8 tmp = 0; + u8 i = 0; + + if (!ch) + return; + + writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), + &ch->ch_cls_uart->isr_fcr); + + for (i = 0; i < 10; i++) { + /* Check to see if the UART feels it completely flushed FIFO */ + tmp = readb(&ch->ch_cls_uart->isr_fcr); + if (tmp & UART_FCR_CLEAR_XMIT) { + jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, + "Still flushing TX UART... i: %d\n", i); + udelay(10); + } else + break; + } + + ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); +} + +/* Channel lock MUST be held before calling this function! */ +static void cls_flush_uart_read(struct jsm_channel *ch) +{ + if (!ch) + return; + + /* + * For complete POSIX compatibility, we should be purging the + * read FIFO in the UART here. + * + * However, clearing the read FIFO (UART_FCR_CLEAR_RCVR) also + * incorrectly flushes write data as well as just basically trashing the + * FIFO. + * + * Presumably, this is a bug in this UART. + */ + + udelay(10); +} + +static void cls_send_start_character(struct jsm_channel *ch) +{ + if (!ch) + return; + + if (ch->ch_startc != __DISABLED_CHAR) { + ch->ch_xon_sends++; + writeb(ch->ch_startc, &ch->ch_cls_uart->txrx); + } +} + +static void cls_send_stop_character(struct jsm_channel *ch) +{ + if (!ch) + return; + + if (ch->ch_stopc != __DISABLED_CHAR) { + ch->ch_xoff_sends++; + writeb(ch->ch_stopc, &ch->ch_cls_uart->txrx); + } +} + +/* + * cls_param() + * Send any/all changes to the line to the UART. + */ +static void cls_param(struct jsm_channel *ch) +{ + u8 lcr = 0; + u8 uart_lcr = 0; + u8 ier = 0; + u32 baud = 9600; + int quot = 0; + struct jsm_board *bd; + int i; + unsigned int cflag; + + bd = ch->ch_bd; + if (!bd) + return; + + /* + * If baud rate is zero, flush queues, and set mval to drop DTR. + */ + if ((ch->ch_c_cflag & CBAUD) == B0) { + ch->ch_r_head = 0; + ch->ch_r_tail = 0; + ch->ch_e_head = 0; + ch->ch_e_tail = 0; + + cls_flush_uart_write(ch); + cls_flush_uart_read(ch); + + /* The baudrate is B0 so all modem lines are to be dropped. */ + ch->ch_flags |= (CH_BAUD0); + ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR); + cls_assert_modem_signals(ch); + return; + } + + cflag = C_BAUD(ch->uart_port.state->port.tty); + baud = 9600; + for (i = 0; i < ARRAY_SIZE(baud_rates); i++) { + if (baud_rates[i].cflag == cflag) { + baud = baud_rates[i].rate; + break; + } + } + + if (ch->ch_flags & CH_BAUD0) + ch->ch_flags &= ~(CH_BAUD0); + + if (ch->ch_c_cflag & PARENB) + lcr |= UART_LCR_PARITY; + + if (!(ch->ch_c_cflag & PARODD)) + lcr |= UART_LCR_EPAR; + + if (ch->ch_c_cflag & CMSPAR) + lcr |= UART_LCR_SPAR; + + if (ch->ch_c_cflag & CSTOPB) + lcr |= UART_LCR_STOP; + + lcr |= UART_LCR_WLEN(tty_get_char_size(ch->ch_c_cflag)); + + ier = readb(&ch->ch_cls_uart->ier); + uart_lcr = readb(&ch->ch_cls_uart->lcr); + + quot = ch->ch_bd->bd_dividend / baud; + + if (quot != 0) { + writeb(UART_LCR_DLAB, &ch->ch_cls_uart->lcr); + writeb((quot & 0xff), &ch->ch_cls_uart->txrx); + writeb((quot >> 8), &ch->ch_cls_uart->ier); + writeb(lcr, &ch->ch_cls_uart->lcr); + } + + if (uart_lcr != lcr) + writeb(lcr, &ch->ch_cls_uart->lcr); + + if (ch->ch_c_cflag & CREAD) + ier |= (UART_IER_RDI | UART_IER_RLSI); + + ier |= (UART_IER_THRI | UART_IER_MSI); + + writeb(ier, &ch->ch_cls_uart->ier); + + if (ch->ch_c_cflag & CRTSCTS) + cls_set_cts_flow_control(ch); + else if (ch->ch_c_iflag & IXON) { + /* + * If start/stop is set to disable, + * then we should disable flow control. + */ + if ((ch->ch_startc == __DISABLED_CHAR) || + (ch->ch_stopc == __DISABLED_CHAR)) + cls_set_no_output_flow_control(ch); + else + cls_set_ixon_flow_control(ch); + } else + cls_set_no_output_flow_control(ch); + + if (ch->ch_c_cflag & CRTSCTS) + cls_set_rts_flow_control(ch); + else if (ch->ch_c_iflag & IXOFF) { + /* + * If start/stop is set to disable, + * then we should disable flow control. + */ + if ((ch->ch_startc == __DISABLED_CHAR) || + (ch->ch_stopc == __DISABLED_CHAR)) + cls_set_no_input_flow_control(ch); + else + cls_set_ixoff_flow_control(ch); + } else + cls_set_no_input_flow_control(ch); + + cls_assert_modem_signals(ch); + + /* get current status of the modem signals now */ + cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr)); +} + +/* + * cls_intr() + * + * Classic specific interrupt handler. + */ +static irqreturn_t cls_intr(int irq, void *voidbrd) +{ + struct jsm_board *brd = voidbrd; + unsigned long lock_flags; + unsigned char uart_poll; + uint i = 0; + + /* Lock out the slow poller from running on this board. */ + spin_lock_irqsave(&brd->bd_intr_lock, lock_flags); + + /* + * Check the board's global interrupt offset to see if we + * acctually do have an interrupt pending on us. + */ + uart_poll = readb(brd->re_map_membase + UART_CLASSIC_POLL_ADDR_OFFSET); + + jsm_dbg(INTR, &brd->pci_dev, "%s:%d uart_poll: %x\n", + __FILE__, __LINE__, uart_poll); + + if (!uart_poll) { + jsm_dbg(INTR, &brd->pci_dev, + "Kernel interrupted to me, but no pending interrupts...\n"); + spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags); + return IRQ_NONE; + } + + /* At this point, we have at least SOMETHING to service, dig further. */ + + /* Parse each port to find out what caused the interrupt */ + for (i = 0; i < brd->nasync; i++) + cls_parse_isr(brd, i); + + spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags); + + return IRQ_HANDLED; +} + +/* Inits UART */ +static void cls_uart_init(struct jsm_channel *ch) +{ + unsigned char lcrb = readb(&ch->ch_cls_uart->lcr); + unsigned char isr_fcr = 0; + + writeb(0, &ch->ch_cls_uart->ier); + + /* + * The Enhanced Register Set may only be accessed when + * the Line Control Register is set to 0xBFh. + */ + writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); + + isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); + + /* Turn on Enhanced/Extended controls */ + isr_fcr |= (UART_EXAR654_EFR_ECB); + + writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); + + /* Write old LCR value back out, which turns enhanced access off */ + writeb(lcrb, &ch->ch_cls_uart->lcr); + + /* Clear out UART and FIFO */ + readb(&ch->ch_cls_uart->txrx); + + writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), + &ch->ch_cls_uart->isr_fcr); + udelay(10); + + ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); + + readb(&ch->ch_cls_uart->lsr); + readb(&ch->ch_cls_uart->msr); +} + +/* + * Turns off UART. + */ +static void cls_uart_off(struct jsm_channel *ch) +{ + /* Stop all interrupts from accurring. */ + writeb(0, &ch->ch_cls_uart->ier); +} + +/* + * cls_get_uarts_bytes_left. + * Returns 0 is nothing left in the FIFO, returns 1 otherwise. + * + * The channel lock MUST be held by the calling function. + */ +static u32 cls_get_uart_bytes_left(struct jsm_channel *ch) +{ + u8 left = 0; + u8 lsr = readb(&ch->ch_cls_uart->lsr); + + /* Determine whether the Transmitter is empty or not */ + if (!(lsr & UART_LSR_TEMT)) + left = 1; + else { + ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); + left = 0; + } + + return left; +} + +/* + * cls_send_break. + * Starts sending a break thru the UART. + * + * The channel lock MUST be held by the calling function. + */ +static void cls_send_break(struct jsm_channel *ch) +{ + /* Tell the UART to start sending the break */ + if (!(ch->ch_flags & CH_BREAK_SENDING)) { + u8 temp = readb(&ch->ch_cls_uart->lcr); + + writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr); + ch->ch_flags |= (CH_BREAK_SENDING); + } +} + +/* + * cls_send_immediate_char. + * Sends a specific character as soon as possible to the UART, + * jumping over any bytes that might be in the write queue. + * + * The channel lock MUST be held by the calling function. + */ +static void cls_send_immediate_char(struct jsm_channel *ch, unsigned char c) +{ + writeb(c, &ch->ch_cls_uart->txrx); +} + +struct board_ops jsm_cls_ops = { + .intr = cls_intr, + .uart_init = cls_uart_init, + .uart_off = cls_uart_off, + .param = cls_param, + .assert_modem_signals = cls_assert_modem_signals, + .flush_uart_write = cls_flush_uart_write, + .flush_uart_read = cls_flush_uart_read, + .disable_receiver = cls_disable_receiver, + .enable_receiver = cls_enable_receiver, + .send_break = cls_send_break, + .clear_break = cls_clear_break, + .send_start_character = cls_send_start_character, + .send_stop_character = cls_send_stop_character, + .copy_data_from_queue_to_uart = cls_copy_data_from_queue_to_uart, + .get_uart_bytes_left = cls_get_uart_bytes_left, + .send_immediate_char = cls_send_immediate_char +}; + diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c new file mode 100644 index 000000000..417a5b6bf --- /dev/null +++ b/drivers/tty/serial/jsm/jsm_driver.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0+ +/************************************************************************ + * Copyright 2003 Digi International (www.digi.com) + * + * Copyright (C) 2004 IBM Corporation. All rights reserved. + * + * Contact Information: + * Scott H Kilau + * Wendy Xiong + * + * + ***********************************************************************/ +#include +#include +#include + +#include "jsm.h" + +MODULE_AUTHOR("Digi International, https://www.digi.com"); +MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line"); +MODULE_LICENSE("GPL"); + +#define JSM_DRIVER_NAME "jsm" +#define NR_PORTS 32 +#define JSM_MINOR_START 0 + +struct uart_driver jsm_uart_driver = { + .owner = THIS_MODULE, + .driver_name = JSM_DRIVER_NAME, + .dev_name = "ttyn", + .major = 0, + .minor = JSM_MINOR_START, + .nr = NR_PORTS, +}; + +static pci_ers_result_t jsm_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t jsm_io_slot_reset(struct pci_dev *pdev); +static void jsm_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers jsm_err_handler = { + .error_detected = jsm_io_error_detected, + .slot_reset = jsm_io_slot_reset, + .resume = jsm_io_resume, +}; + +int jsm_debug; +module_param(jsm_debug, int, 0); +MODULE_PARM_DESC(jsm_debug, "Driver debugging level"); + +static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int rc = 0; + struct jsm_board *brd; + static int adapter_count; + + rc = pci_enable_device(pdev); + if (rc) { + dev_err(&pdev->dev, "Device enable FAILED\n"); + goto out; + } + + rc = pci_request_regions(pdev, JSM_DRIVER_NAME); + if (rc) { + dev_err(&pdev->dev, "pci_request_region FAILED\n"); + goto out_disable_device; + } + + brd = kzalloc(sizeof(*brd), GFP_KERNEL); + if (!brd) { + rc = -ENOMEM; + goto out_release_regions; + } + + /* store the info for the board we've found */ + brd->boardnum = adapter_count++; + brd->pci_dev = pdev; + + switch (pdev->device) { + case PCI_DEVICE_ID_NEO_2DB9: + case PCI_DEVICE_ID_NEO_2DB9PRI: + case PCI_DEVICE_ID_NEO_2RJ45: + case PCI_DEVICE_ID_NEO_2RJ45PRI: + case PCI_DEVICE_ID_NEO_2_422_485: + brd->maxports = 2; + break; + + case PCI_DEVICE_ID_CLASSIC_4: + case PCI_DEVICE_ID_CLASSIC_4_422: + case PCI_DEVICE_ID_NEO_4: + case PCIE_DEVICE_ID_NEO_4: + case PCIE_DEVICE_ID_NEO_4RJ45: + case PCIE_DEVICE_ID_NEO_4_IBM: + brd->maxports = 4; + break; + + case PCI_DEVICE_ID_CLASSIC_8: + case PCI_DEVICE_ID_CLASSIC_8_422: + case PCI_DEVICE_ID_DIGI_NEO_8: + case PCIE_DEVICE_ID_NEO_8: + case PCIE_DEVICE_ID_NEO_8RJ45: + brd->maxports = 8; + break; + + default: + brd->maxports = 1; + break; + } + + spin_lock_init(&brd->bd_intr_lock); + + /* store which revision we have */ + brd->rev = pdev->revision; + + brd->irq = pdev->irq; + + switch (pdev->device) { + case PCI_DEVICE_ID_CLASSIC_4: + case PCI_DEVICE_ID_CLASSIC_4_422: + case PCI_DEVICE_ID_CLASSIC_8: + case PCI_DEVICE_ID_CLASSIC_8_422: + + jsm_dbg(INIT, &brd->pci_dev, + "jsm_found_board - Classic adapter\n"); + + /* + * For PCI ClassicBoards + * PCI Local Address (.i.e. "resource" number) space + * 0 PLX Memory Mapped Config + * 1 PLX I/O Mapped Config + * 2 I/O Mapped UARTs and Status + * 3 Memory Mapped VPD + * 4 Memory Mapped UARTs and Status + */ + + /* Get the PCI Base Address Registers */ + brd->membase = pci_resource_start(pdev, 4); + brd->membase_end = pci_resource_end(pdev, 4); + + if (brd->membase & 0x1) + brd->membase &= ~0x3; + else + brd->membase &= ~0xF; + + brd->iobase = pci_resource_start(pdev, 1); + brd->iobase_end = pci_resource_end(pdev, 1); + brd->iobase = ((unsigned int)(brd->iobase)) & 0xFFFE; + + /* Assign the board_ops struct */ + brd->bd_ops = &jsm_cls_ops; + + brd->bd_uart_offset = 0x8; + brd->bd_dividend = 921600; + + brd->re_map_membase = ioremap(brd->membase, + pci_resource_len(pdev, 4)); + if (!brd->re_map_membase) { + dev_err(&pdev->dev, + "Card has no PCI Memory resources, failing board.\n"); + rc = -ENOMEM; + goto out_kfree_brd; + } + + /* + * Enable Local Interrupt 1 (0x1), + * Local Interrupt 1 Polarity Active high (0x2), + * Enable PCI interrupt (0x43) + */ + outb(0x43, brd->iobase + 0x4c); + + break; + + case PCI_DEVICE_ID_NEO_2DB9: + case PCI_DEVICE_ID_NEO_2DB9PRI: + case PCI_DEVICE_ID_NEO_2RJ45: + case PCI_DEVICE_ID_NEO_2RJ45PRI: + case PCI_DEVICE_ID_NEO_2_422_485: + case PCI_DEVICE_ID_NEO_4: + case PCIE_DEVICE_ID_NEO_4: + case PCIE_DEVICE_ID_NEO_4RJ45: + case PCIE_DEVICE_ID_NEO_4_IBM: + case PCI_DEVICE_ID_DIGI_NEO_8: + case PCIE_DEVICE_ID_NEO_8: + case PCIE_DEVICE_ID_NEO_8RJ45: + + jsm_dbg(INIT, &brd->pci_dev, "jsm_found_board - NEO adapter\n"); + + /* get the PCI Base Address Registers */ + brd->membase = pci_resource_start(pdev, 0); + brd->membase_end = pci_resource_end(pdev, 0); + + if (brd->membase & 1) + brd->membase &= ~0x3; + else + brd->membase &= ~0xF; + + /* Assign the board_ops struct */ + brd->bd_ops = &jsm_neo_ops; + + brd->bd_uart_offset = 0x200; + brd->bd_dividend = 921600; + + brd->re_map_membase = ioremap(brd->membase, + pci_resource_len(pdev, 0)); + if (!brd->re_map_membase) { + dev_err(&pdev->dev, + "Card has no PCI Memory resources, failing board.\n"); + rc = -ENOMEM; + goto out_kfree_brd; + } + + break; + default: + rc = -ENXIO; + goto out_kfree_brd; + } + + rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "JSM", brd); + if (rc) { + dev_warn(&pdev->dev, "Failed to hook IRQ %d\n", brd->irq); + goto out_iounmap; + } + + rc = jsm_tty_init(brd); + if (rc < 0) { + dev_err(&pdev->dev, "Can't init tty devices (%d)\n", rc); + rc = -ENXIO; + goto out_free_irq; + } + + rc = jsm_uart_port_init(brd); + if (rc < 0) { + /* XXX: leaking all resources from jsm_tty_init here! */ + dev_err(&pdev->dev, "Can't init uart port (%d)\n", rc); + rc = -ENXIO; + goto out_free_irq; + } + + /* Log the information about the board */ + dev_info(&pdev->dev, "board %d: Digi Classic/Neo (rev %d), irq %d\n", + adapter_count, brd->rev, brd->irq); + + pci_set_drvdata(pdev, brd); + pci_save_state(pdev); + + return 0; + out_free_irq: + jsm_remove_uart_port(brd); + free_irq(brd->irq, brd); + out_iounmap: + iounmap(brd->re_map_membase); + out_kfree_brd: + kfree(brd); + out_release_regions: + pci_release_regions(pdev); + out_disable_device: + pci_disable_device(pdev); + out: + return rc; +} + +static void jsm_remove_one(struct pci_dev *pdev) +{ + struct jsm_board *brd = pci_get_drvdata(pdev); + int i = 0; + + switch (pdev->device) { + case PCI_DEVICE_ID_CLASSIC_4: + case PCI_DEVICE_ID_CLASSIC_4_422: + case PCI_DEVICE_ID_CLASSIC_8: + case PCI_DEVICE_ID_CLASSIC_8_422: + /* Tell card not to interrupt anymore. */ + outb(0x0, brd->iobase + 0x4c); + break; + default: + break; + } + + jsm_remove_uart_port(brd); + + free_irq(brd->irq, brd); + iounmap(brd->re_map_membase); + + /* Free all allocated channels structs */ + for (i = 0; i < brd->maxports; i++) { + if (brd->channels[i]) { + kfree(brd->channels[i]->ch_rqueue); + kfree(brd->channels[i]->ch_equeue); + kfree(brd->channels[i]); + } + } + + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(brd); +} + +static const struct pci_device_id jsm_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2DB9), 0, 0, 0 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2DB9PRI), 0, 0, 1 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45), 0, 0, 2 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45PRI), 0, 0, 3 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_4_IBM), 0, 0, 4 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_NEO_8), 0, 0, 5 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_4), 0, 0, 6 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_1_422), 0, 0, 7 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_1_422_485), 0, 0, 8 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2_422_485), 0, 0, 9 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_8), 0, 0, 10 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_4), 0, 0, 11 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_4RJ45), 0, 0, 12 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_8RJ45), 0, 0, 13 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_CLASSIC_4), 0, 0, 14 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_CLASSIC_4_422), 0, 0, 15 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_CLASSIC_8), 0, 0, 16 }, + { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_CLASSIC_8_422), 0, 0, 17 }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, jsm_pci_tbl); + +static struct pci_driver jsm_driver = { + .name = JSM_DRIVER_NAME, + .id_table = jsm_pci_tbl, + .probe = jsm_probe_one, + .remove = jsm_remove_one, + .err_handler = &jsm_err_handler, +}; + +static pci_ers_result_t jsm_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct jsm_board *brd = pci_get_drvdata(pdev); + + jsm_remove_uart_port(brd); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t jsm_io_slot_reset(struct pci_dev *pdev) +{ + int rc; + + rc = pci_enable_device(pdev); + + if (rc) + return PCI_ERS_RESULT_DISCONNECT; + + pci_set_master(pdev); + + return PCI_ERS_RESULT_RECOVERED; +} + +static void jsm_io_resume(struct pci_dev *pdev) +{ + struct jsm_board *brd = pci_get_drvdata(pdev); + + pci_restore_state(pdev); + pci_save_state(pdev); + + jsm_uart_port_init(brd); +} + +static int __init jsm_init_module(void) +{ + int rc; + + rc = uart_register_driver(&jsm_uart_driver); + if (!rc) { + rc = pci_register_driver(&jsm_driver); + if (rc) + uart_unregister_driver(&jsm_uart_driver); + } + return rc; +} + +static void __exit jsm_exit_module(void) +{ + pci_unregister_driver(&jsm_driver); + uart_unregister_driver(&jsm_uart_driver); +} + +module_init(jsm_init_module); +module_exit(jsm_exit_module); diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c new file mode 100644 index 000000000..0c78f6627 --- /dev/null +++ b/drivers/tty/serial/jsm/jsm_neo.c @@ -0,0 +1,1387 @@ +// SPDX-License-Identifier: GPL-2.0+ +/************************************************************************ + * Copyright 2003 Digi International (www.digi.com) + * + * Copyright (C) 2004 IBM Corporation. All rights reserved. + * + * Contact Information: + * Scott H Kilau + * Wendy Xiong + * + ***********************************************************************/ +#include /* For udelay */ +#include /* For the various UART offsets */ +#include +#include +#include + +#include "jsm.h" /* Driver main header file */ + +static u32 jsm_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; + +/* + * This function allows calls to ensure that all outstanding + * PCI writes have been completed, by doing a PCI read against + * a non-destructive, read-only location on the Neo card. + * + * In this case, we are reading the DVID (Read-only Device Identification) + * value of the Neo card. + */ +static inline void neo_pci_posting_flush(struct jsm_board *bd) +{ + readb(bd->re_map_membase + 0x8D); +} + +static void neo_set_cts_flow_control(struct jsm_channel *ch) +{ + u8 ier, efr; + ier = readb(&ch->ch_neo_uart->ier); + efr = readb(&ch->ch_neo_uart->efr); + + jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Setting CTSFLOW\n"); + + /* Turn on auto CTS flow control */ + ier |= (UART_17158_IER_CTSDSR); + efr |= (UART_17158_EFR_ECB | UART_17158_EFR_CTSDSR); + + /* Turn off auto Xon flow control */ + efr &= ~(UART_17158_EFR_IXON); + + /* Why? Becuz Exar's spec says we have to zero it out before setting it */ + writeb(0, &ch->ch_neo_uart->efr); + + /* Turn on UART enhanced bits */ + writeb(efr, &ch->ch_neo_uart->efr); + + /* Turn on table D, with 8 char hi/low watermarks */ + writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); + + /* Feed the UART our trigger levels */ + writeb(8, &ch->ch_neo_uart->tfifo); + ch->ch_t_tlevel = 8; + + writeb(ier, &ch->ch_neo_uart->ier); +} + +static void neo_set_rts_flow_control(struct jsm_channel *ch) +{ + u8 ier, efr; + ier = readb(&ch->ch_neo_uart->ier); + efr = readb(&ch->ch_neo_uart->efr); + + jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Setting RTSFLOW\n"); + + /* Turn on auto RTS flow control */ + ier |= (UART_17158_IER_RTSDTR); + efr |= (UART_17158_EFR_ECB | UART_17158_EFR_RTSDTR); + + /* Turn off auto Xoff flow control */ + ier &= ~(UART_17158_IER_XOFF); + efr &= ~(UART_17158_EFR_IXOFF); + + /* Why? Becuz Exar's spec says we have to zero it out before setting it */ + writeb(0, &ch->ch_neo_uart->efr); + + /* Turn on UART enhanced bits */ + writeb(efr, &ch->ch_neo_uart->efr); + + writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); + ch->ch_r_watermark = 4; + + writeb(56, &ch->ch_neo_uart->rfifo); + ch->ch_r_tlevel = 56; + + writeb(ier, &ch->ch_neo_uart->ier); + + /* + * From the Neo UART spec sheet: + * The auto RTS/DTR function must be started by asserting + * RTS/DTR# output pin (MCR bit-0 or 1 to logic 1 after + * it is enabled. + */ + ch->ch_mostat |= (UART_MCR_RTS); +} + + +static void neo_set_ixon_flow_control(struct jsm_channel *ch) +{ + u8 ier, efr; + ier = readb(&ch->ch_neo_uart->ier); + efr = readb(&ch->ch_neo_uart->efr); + + jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Setting IXON FLOW\n"); + + /* Turn off auto CTS flow control */ + ier &= ~(UART_17158_IER_CTSDSR); + efr &= ~(UART_17158_EFR_CTSDSR); + + /* Turn on auto Xon flow control */ + efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON); + + /* Why? Becuz Exar's spec says we have to zero it out before setting it */ + writeb(0, &ch->ch_neo_uart->efr); + + /* Turn on UART enhanced bits */ + writeb(efr, &ch->ch_neo_uart->efr); + + writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); + ch->ch_r_watermark = 4; + + writeb(32, &ch->ch_neo_uart->rfifo); + ch->ch_r_tlevel = 32; + + /* Tell UART what start/stop chars it should be looking for */ + writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); + writeb(0, &ch->ch_neo_uart->xonchar2); + + writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); + writeb(0, &ch->ch_neo_uart->xoffchar2); + + writeb(ier, &ch->ch_neo_uart->ier); +} + +static void neo_set_ixoff_flow_control(struct jsm_channel *ch) +{ + u8 ier, efr; + ier = readb(&ch->ch_neo_uart->ier); + efr = readb(&ch->ch_neo_uart->efr); + + jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Setting IXOFF FLOW\n"); + + /* Turn off auto RTS flow control */ + ier &= ~(UART_17158_IER_RTSDTR); + efr &= ~(UART_17158_EFR_RTSDTR); + + /* Turn on auto Xoff flow control */ + ier |= (UART_17158_IER_XOFF); + efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF); + + /* Why? Becuz Exar's spec says we have to zero it out before setting it */ + writeb(0, &ch->ch_neo_uart->efr); + + /* Turn on UART enhanced bits */ + writeb(efr, &ch->ch_neo_uart->efr); + + /* Turn on table D, with 8 char hi/low watermarks */ + writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); + + writeb(8, &ch->ch_neo_uart->tfifo); + ch->ch_t_tlevel = 8; + + /* Tell UART what start/stop chars it should be looking for */ + writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); + writeb(0, &ch->ch_neo_uart->xonchar2); + + writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); + writeb(0, &ch->ch_neo_uart->xoffchar2); + + writeb(ier, &ch->ch_neo_uart->ier); +} + +static void neo_set_no_input_flow_control(struct jsm_channel *ch) +{ + u8 ier, efr; + ier = readb(&ch->ch_neo_uart->ier); + efr = readb(&ch->ch_neo_uart->efr); + + jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Unsetting Input FLOW\n"); + + /* Turn off auto RTS flow control */ + ier &= ~(UART_17158_IER_RTSDTR); + efr &= ~(UART_17158_EFR_RTSDTR); + + /* Turn off auto Xoff flow control */ + ier &= ~(UART_17158_IER_XOFF); + if (ch->ch_c_iflag & IXON) + efr &= ~(UART_17158_EFR_IXOFF); + else + efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF); + + /* Why? Becuz Exar's spec says we have to zero it out before setting it */ + writeb(0, &ch->ch_neo_uart->efr); + + /* Turn on UART enhanced bits */ + writeb(efr, &ch->ch_neo_uart->efr); + + /* Turn on table D, with 8 char hi/low watermarks */ + writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); + + ch->ch_r_watermark = 0; + + writeb(16, &ch->ch_neo_uart->tfifo); + ch->ch_t_tlevel = 16; + + writeb(16, &ch->ch_neo_uart->rfifo); + ch->ch_r_tlevel = 16; + + writeb(ier, &ch->ch_neo_uart->ier); +} + +static void neo_set_no_output_flow_control(struct jsm_channel *ch) +{ + u8 ier, efr; + ier = readb(&ch->ch_neo_uart->ier); + efr = readb(&ch->ch_neo_uart->efr); + + jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Unsetting Output FLOW\n"); + + /* Turn off auto CTS flow control */ + ier &= ~(UART_17158_IER_CTSDSR); + efr &= ~(UART_17158_EFR_CTSDSR); + + /* Turn off auto Xon flow control */ + if (ch->ch_c_iflag & IXOFF) + efr &= ~(UART_17158_EFR_IXON); + else + efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON); + + /* Why? Becuz Exar's spec says we have to zero it out before setting it */ + writeb(0, &ch->ch_neo_uart->efr); + + /* Turn on UART enhanced bits */ + writeb(efr, &ch->ch_neo_uart->efr); + + /* Turn on table D, with 8 char hi/low watermarks */ + writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); + + ch->ch_r_watermark = 0; + + writeb(16, &ch->ch_neo_uart->tfifo); + ch->ch_t_tlevel = 16; + + writeb(16, &ch->ch_neo_uart->rfifo); + ch->ch_r_tlevel = 16; + + writeb(ier, &ch->ch_neo_uart->ier); +} + +static inline void neo_set_new_start_stop_chars(struct jsm_channel *ch) +{ + + /* if hardware flow control is set, then skip this whole thing */ + if (ch->ch_c_cflag & CRTSCTS) + return; + + jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "start\n"); + + /* Tell UART what start/stop chars it should be looking for */ + writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); + writeb(0, &ch->ch_neo_uart->xonchar2); + + writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); + writeb(0, &ch->ch_neo_uart->xoffchar2); +} + +static void neo_copy_data_from_uart_to_queue(struct jsm_channel *ch) +{ + int qleft = 0; + u8 linestatus = 0; + u8 error_mask = 0; + int n = 0; + int total = 0; + u16 head; + u16 tail; + + /* cache head and tail of queue */ + head = ch->ch_r_head & RQUEUEMASK; + tail = ch->ch_r_tail & RQUEUEMASK; + + /* Get our cached LSR */ + linestatus = ch->ch_cached_lsr; + ch->ch_cached_lsr = 0; + + /* Store how much space we have left in the queue */ + qleft = tail - head - 1; + if (qleft < 0) + qleft += RQUEUEMASK + 1; + + /* + * If the UART is not in FIFO mode, force the FIFO copy to + * NOT be run, by setting total to 0. + * + * On the other hand, if the UART IS in FIFO mode, then ask + * the UART to give us an approximation of data it has RX'ed. + */ + if (!(ch->ch_flags & CH_FIFO_ENABLED)) + total = 0; + else { + total = readb(&ch->ch_neo_uart->rfifo); + + /* + * EXAR chip bug - RX FIFO COUNT - Fudge factor. + * + * This resolves a problem/bug with the Exar chip that sometimes + * returns a bogus value in the rfifo register. + * The count can be any where from 0-3 bytes "off". + * Bizarre, but true. + */ + total -= 3; + } + + /* + * Finally, bound the copy to make sure we don't overflow + * our own queue... + * The byte by byte copy loop below this loop this will + * deal with the queue overflow possibility. + */ + total = min(total, qleft); + + while (total > 0) { + /* + * Grab the linestatus register, we need to check + * to see if there are any errors in the FIFO. + */ + linestatus = readb(&ch->ch_neo_uart->lsr); + + /* + * Break out if there is a FIFO error somewhere. + * This will allow us to go byte by byte down below, + * finding the exact location of the error. + */ + if (linestatus & UART_17158_RX_FIFO_DATA_ERROR) + break; + + /* Make sure we don't go over the end of our queue */ + n = min(((u32) total), (RQUEUESIZE - (u32) head)); + + /* + * Cut down n even further if needed, this is to fix + * a problem with memcpy_fromio() with the Neo on the + * IBM pSeries platform. + * 15 bytes max appears to be the magic number. + */ + n = min((u32) n, (u32) 12); + + /* + * Since we are grabbing the linestatus register, which + * will reset some bits after our read, we need to ensure + * we don't miss our TX FIFO emptys. + */ + if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) + ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); + + linestatus = 0; + + /* Copy data from uart to the queue */ + memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n); + /* + * Since RX_FIFO_DATA_ERROR was 0, we are guaranteed + * that all the data currently in the FIFO is free of + * breaks and parity/frame/orun errors. + */ + memset(ch->ch_equeue + head, 0, n); + + /* Add to and flip head if needed */ + head = (head + n) & RQUEUEMASK; + total -= n; + qleft -= n; + ch->ch_rxcount += n; + } + + /* + * Create a mask to determine whether we should + * insert the character (if any) into our queue. + */ + if (ch->ch_c_iflag & IGNBRK) + error_mask |= UART_LSR_BI; + + /* + * Now cleanup any leftover bytes still in the UART. + * Also deal with any possible queue overflow here as well. + */ + while (1) { + + /* + * Its possible we have a linestatus from the loop above + * this, so we "OR" on any extra bits. + */ + linestatus |= readb(&ch->ch_neo_uart->lsr); + + /* + * If the chip tells us there is no more data pending to + * be read, we can then leave. + * But before we do, cache the linestatus, just in case. + */ + if (!(linestatus & UART_LSR_DR)) { + ch->ch_cached_lsr = linestatus; + break; + } + + /* No need to store this bit */ + linestatus &= ~UART_LSR_DR; + + /* + * Since we are grabbing the linestatus register, which + * will reset some bits after our read, we need to ensure + * we don't miss our TX FIFO emptys. + */ + if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) { + linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR); + ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); + } + + /* + * Discard character if we are ignoring the error mask. + */ + if (linestatus & error_mask) { + u8 discard; + linestatus = 0; + memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1); + continue; + } + + /* + * If our queue is full, we have no choice but to drop some data. + * The assumption is that HWFLOW or SWFLOW should have stopped + * things way way before we got to this point. + * + * I decided that I wanted to ditch the oldest data first, + * I hope thats okay with everyone? Yes? Good. + */ + while (qleft < 1) { + jsm_dbg(READ, &ch->ch_bd->pci_dev, + "Queue full, dropping DATA:%x LSR:%x\n", + ch->ch_rqueue[tail], ch->ch_equeue[tail]); + + ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK; + ch->ch_err_overrun++; + qleft++; + } + + memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1); + ch->ch_equeue[head] = (u8) linestatus; + + jsm_dbg(READ, &ch->ch_bd->pci_dev, "DATA/LSR pair: %x %x\n", + ch->ch_rqueue[head], ch->ch_equeue[head]); + + /* Ditch any remaining linestatus value. */ + linestatus = 0; + + /* Add to and flip head if needed */ + head = (head + 1) & RQUEUEMASK; + + qleft--; + ch->ch_rxcount++; + } + + /* + * Write new final heads to channel structure. + */ + ch->ch_r_head = head & RQUEUEMASK; + ch->ch_e_head = head & EQUEUEMASK; + jsm_input(ch); +} + +static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch) +{ + u16 head; + u16 tail; + int n; + int s; + int qlen; + u32 len_written = 0; + struct circ_buf *circ; + + if (!ch) + return; + + circ = &ch->uart_port.state->xmit; + + /* No data to write to the UART */ + if (uart_circ_empty(circ)) + return; + + /* If port is "stopped", don't send any data to the UART */ + if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) + return; + /* + * If FIFOs are disabled. Send data directly to txrx register + */ + if (!(ch->ch_flags & CH_FIFO_ENABLED)) { + u8 lsrbits = readb(&ch->ch_neo_uart->lsr); + + ch->ch_cached_lsr |= lsrbits; + if (ch->ch_cached_lsr & UART_LSR_THRE) { + ch->ch_cached_lsr &= ~(UART_LSR_THRE); + + writeb(circ->buf[circ->tail], &ch->ch_neo_uart->txrx); + jsm_dbg(WRITE, &ch->ch_bd->pci_dev, + "Tx data: %x\n", circ->buf[circ->tail]); + circ->tail = (circ->tail + 1) & (UART_XMIT_SIZE - 1); + ch->ch_txcount++; + } + return; + } + + /* + * We have to do it this way, because of the EXAR TXFIFO count bug. + */ + if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) + return; + + n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel; + + /* cache head and tail of queue */ + head = circ->head & (UART_XMIT_SIZE - 1); + tail = circ->tail & (UART_XMIT_SIZE - 1); + qlen = uart_circ_chars_pending(circ); + + /* Find minimum of the FIFO space, versus queue length */ + n = min(n, qlen); + + while (n > 0) { + + s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail; + s = min(s, n); + + if (s <= 0) + break; + + memcpy_toio(&ch->ch_neo_uart->txrxburst, circ->buf + tail, s); + /* Add and flip queue if needed */ + tail = (tail + s) & (UART_XMIT_SIZE - 1); + n -= s; + ch->ch_txcount += s; + len_written += s; + } + + /* Update the final tail */ + circ->tail = tail & (UART_XMIT_SIZE - 1); + + if (len_written >= ch->ch_t_tlevel) + ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); + + if (uart_circ_empty(circ)) + uart_write_wakeup(&ch->uart_port); +} + +static void neo_parse_modem(struct jsm_channel *ch, u8 signals) +{ + u8 msignals = signals; + + jsm_dbg(MSIGS, &ch->ch_bd->pci_dev, + "neo_parse_modem: port: %d msignals: %x\n", + ch->ch_portnum, msignals); + + /* Scrub off lower bits. They signify delta's, which I don't care about */ + /* Keep DDCD and DDSR though */ + msignals &= 0xf8; + + if (msignals & UART_MSR_DDCD) + uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_DCD); + if (msignals & UART_MSR_DDSR) + uart_handle_cts_change(&ch->uart_port, msignals & UART_MSR_CTS); + if (msignals & UART_MSR_DCD) + ch->ch_mistat |= UART_MSR_DCD; + else + ch->ch_mistat &= ~UART_MSR_DCD; + + if (msignals & UART_MSR_DSR) + ch->ch_mistat |= UART_MSR_DSR; + else + ch->ch_mistat &= ~UART_MSR_DSR; + + if (msignals & UART_MSR_RI) + ch->ch_mistat |= UART_MSR_RI; + else + ch->ch_mistat &= ~UART_MSR_RI; + + if (msignals & UART_MSR_CTS) + ch->ch_mistat |= UART_MSR_CTS; + else + ch->ch_mistat &= ~UART_MSR_CTS; + + jsm_dbg(MSIGS, &ch->ch_bd->pci_dev, + "Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n", + ch->ch_portnum, + !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR), + !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS), + !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS), + !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR), + !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI), + !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD)); +} + +/* Make the UART raise any of the output signals we want up */ +static void neo_assert_modem_signals(struct jsm_channel *ch) +{ + if (!ch) + return; + + writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr); + + /* flush write operation */ + neo_pci_posting_flush(ch->ch_bd); +} + +/* + * Flush the WRITE FIFO on the Neo. + * + * NOTE: Channel lock MUST be held before calling this function! + */ +static void neo_flush_uart_write(struct jsm_channel *ch) +{ + u8 tmp = 0; + int i = 0; + + if (!ch) + return; + + writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); + + for (i = 0; i < 10; i++) { + + /* Check to see if the UART feels it completely flushed the FIFO. */ + tmp = readb(&ch->ch_neo_uart->isr_fcr); + if (tmp & UART_FCR_CLEAR_XMIT) { + jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, + "Still flushing TX UART... i: %d\n", i); + udelay(10); + } + else + break; + } + + ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); +} + + +/* + * Flush the READ FIFO on the Neo. + * + * NOTE: Channel lock MUST be held before calling this function! + */ +static void neo_flush_uart_read(struct jsm_channel *ch) +{ + u8 tmp = 0; + int i = 0; + + if (!ch) + return; + + writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr); + + for (i = 0; i < 10; i++) { + + /* Check to see if the UART feels it completely flushed the FIFO. */ + tmp = readb(&ch->ch_neo_uart->isr_fcr); + if (tmp & 2) { + jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, + "Still flushing RX UART... i: %d\n", i); + udelay(10); + } + else + break; + } +} + +/* + * No locks are assumed to be held when calling this function. + */ +static void neo_clear_break(struct jsm_channel *ch) +{ + unsigned long lock_flags; + + spin_lock_irqsave(&ch->ch_lock, lock_flags); + + /* Turn break off, and unset some variables */ + if (ch->ch_flags & CH_BREAK_SENDING) { + u8 temp = readb(&ch->ch_neo_uart->lcr); + writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr); + + ch->ch_flags &= ~(CH_BREAK_SENDING); + jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, + "clear break Finishing UART_LCR_SBC! finished: %lx\n", + jiffies); + + /* flush write operation */ + neo_pci_posting_flush(ch->ch_bd); + } + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); +} + +/* + * Parse the ISR register. + */ +static void neo_parse_isr(struct jsm_board *brd, u32 port) +{ + struct jsm_channel *ch; + u8 isr; + u8 cause; + unsigned long lock_flags; + + if (!brd) + return; + + if (port >= brd->maxports) + return; + + ch = brd->channels[port]; + if (!ch) + return; + + /* Here we try to figure out what caused the interrupt to happen */ + while (1) { + + isr = readb(&ch->ch_neo_uart->isr_fcr); + + /* Bail if no pending interrupt */ + if (isr & UART_IIR_NO_INT) + break; + + /* + * Yank off the upper 2 bits, which just show that the FIFO's are enabled. + */ + isr &= ~(UART_17158_IIR_FIFO_ENABLED); + + jsm_dbg(INTR, &ch->ch_bd->pci_dev, "%s:%d isr: %x\n", + __FILE__, __LINE__, isr); + + if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) { + /* Read data from uart -> queue */ + neo_copy_data_from_uart_to_queue(ch); + + /* Call our tty layer to enforce queue flow control if needed. */ + spin_lock_irqsave(&ch->ch_lock, lock_flags); + jsm_check_queue_flow_control(ch); + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); + } + + if (isr & UART_IIR_THRI) { + /* Transfer data (if any) from Write Queue -> UART. */ + spin_lock_irqsave(&ch->ch_lock, lock_flags); + ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); + neo_copy_data_from_queue_to_uart(ch); + } + + if (isr & UART_17158_IIR_XONXOFF) { + cause = readb(&ch->ch_neo_uart->xoffchar1); + + jsm_dbg(INTR, &ch->ch_bd->pci_dev, + "Port %d. Got ISR_XONXOFF: cause:%x\n", + port, cause); + + /* + * Since the UART detected either an XON or + * XOFF match, we need to figure out which + * one it was, so we can suspend or resume data flow. + */ + spin_lock_irqsave(&ch->ch_lock, lock_flags); + if (cause == UART_17158_XON_DETECT) { + /* Is output stopped right now, if so, resume it */ + if (brd->channels[port]->ch_flags & CH_STOP) { + ch->ch_flags &= ~(CH_STOP); + } + jsm_dbg(INTR, &ch->ch_bd->pci_dev, + "Port %d. XON detected in incoming data\n", + port); + } + else if (cause == UART_17158_XOFF_DETECT) { + if (!(brd->channels[port]->ch_flags & CH_STOP)) { + ch->ch_flags |= CH_STOP; + jsm_dbg(INTR, &ch->ch_bd->pci_dev, + "Setting CH_STOP\n"); + } + jsm_dbg(INTR, &ch->ch_bd->pci_dev, + "Port: %d. XOFF detected in incoming data\n", + port); + } + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); + } + + if (isr & UART_17158_IIR_HWFLOW_STATE_CHANGE) { + /* + * If we get here, this means the hardware is doing auto flow control. + * Check to see whether RTS/DTR or CTS/DSR caused this interrupt. + */ + cause = readb(&ch->ch_neo_uart->mcr); + + /* Which pin is doing auto flow? RTS or DTR? */ + spin_lock_irqsave(&ch->ch_lock, lock_flags); + if ((cause & 0x4) == 0) { + if (cause & UART_MCR_RTS) + ch->ch_mostat |= UART_MCR_RTS; + else + ch->ch_mostat &= ~(UART_MCR_RTS); + } else { + if (cause & UART_MCR_DTR) + ch->ch_mostat |= UART_MCR_DTR; + else + ch->ch_mostat &= ~(UART_MCR_DTR); + } + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); + } + + /* Parse any modem signal changes */ + jsm_dbg(INTR, &ch->ch_bd->pci_dev, + "MOD_STAT: sending to parse_modem_sigs\n"); + spin_lock_irqsave(&ch->uart_port.lock, lock_flags); + neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); + spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags); + } +} + +static inline void neo_parse_lsr(struct jsm_board *brd, u32 port) +{ + struct jsm_channel *ch; + int linestatus; + unsigned long lock_flags; + + if (!brd) + return; + + if (port >= brd->maxports) + return; + + ch = brd->channels[port]; + if (!ch) + return; + + linestatus = readb(&ch->ch_neo_uart->lsr); + + jsm_dbg(INTR, &ch->ch_bd->pci_dev, "%s:%d port: %d linestatus: %x\n", + __FILE__, __LINE__, port, linestatus); + + ch->ch_cached_lsr |= linestatus; + + if (ch->ch_cached_lsr & UART_LSR_DR) { + /* Read data from uart -> queue */ + neo_copy_data_from_uart_to_queue(ch); + spin_lock_irqsave(&ch->ch_lock, lock_flags); + jsm_check_queue_flow_control(ch); + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); + } + + /* + * This is a special flag. It indicates that at least 1 + * RX error (parity, framing, or break) has happened. + * Mark this in our struct, which will tell me that I have + *to do the special RX+LSR read for this FIFO load. + */ + if (linestatus & UART_17158_RX_FIFO_DATA_ERROR) + jsm_dbg(INTR, &ch->ch_bd->pci_dev, + "%s:%d Port: %d Got an RX error, need to parse LSR\n", + __FILE__, __LINE__, port); + + /* + * The next 3 tests should *NOT* happen, as the above test + * should encapsulate all 3... At least, thats what Exar says. + */ + + if (linestatus & UART_LSR_PE) { + ch->ch_err_parity++; + jsm_dbg(INTR, &ch->ch_bd->pci_dev, "%s:%d Port: %d. PAR ERR!\n", + __FILE__, __LINE__, port); + } + + if (linestatus & UART_LSR_FE) { + ch->ch_err_frame++; + jsm_dbg(INTR, &ch->ch_bd->pci_dev, "%s:%d Port: %d. FRM ERR!\n", + __FILE__, __LINE__, port); + } + + if (linestatus & UART_LSR_BI) { + ch->ch_err_break++; + jsm_dbg(INTR, &ch->ch_bd->pci_dev, + "%s:%d Port: %d. BRK INTR!\n", + __FILE__, __LINE__, port); + } + + if (linestatus & UART_LSR_OE) { + /* + * Rx Oruns. Exar says that an orun will NOT corrupt + * the FIFO. It will just replace the holding register + * with this new data byte. So basically just ignore this. + * Probably we should eventually have an orun stat in our driver... + */ + ch->ch_err_overrun++; + jsm_dbg(INTR, &ch->ch_bd->pci_dev, + "%s:%d Port: %d. Rx Overrun!\n", + __FILE__, __LINE__, port); + } + + if (linestatus & UART_LSR_THRE) { + spin_lock_irqsave(&ch->ch_lock, lock_flags); + ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); + + /* Transfer data (if any) from Write Queue -> UART. */ + neo_copy_data_from_queue_to_uart(ch); + } + else if (linestatus & UART_17158_TX_AND_FIFO_CLR) { + spin_lock_irqsave(&ch->ch_lock, lock_flags); + ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); + + /* Transfer data (if any) from Write Queue -> UART. */ + neo_copy_data_from_queue_to_uart(ch); + } +} + +/* + * neo_param() + * Send any/all changes to the line to the UART. + */ +static void neo_param(struct jsm_channel *ch) +{ + u8 lcr = 0; + u8 uart_lcr, ier; + u32 baud; + int quot; + struct jsm_board *bd; + + bd = ch->ch_bd; + if (!bd) + return; + + /* + * If baud rate is zero, flush queues, and set mval to drop DTR. + */ + if ((ch->ch_c_cflag & CBAUD) == B0) { + ch->ch_r_head = ch->ch_r_tail = 0; + ch->ch_e_head = ch->ch_e_tail = 0; + + neo_flush_uart_write(ch); + neo_flush_uart_read(ch); + + ch->ch_flags |= (CH_BAUD0); + ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR); + neo_assert_modem_signals(ch); + return; + + } else { + int i; + unsigned int cflag; + static struct { + unsigned int rate; + unsigned int cflag; + } baud_rates[] = { + { 921600, B921600 }, + { 460800, B460800 }, + { 230400, B230400 }, + { 115200, B115200 }, + { 57600, B57600 }, + { 38400, B38400 }, + { 19200, B19200 }, + { 9600, B9600 }, + { 4800, B4800 }, + { 2400, B2400 }, + { 1200, B1200 }, + { 600, B600 }, + { 300, B300 }, + { 200, B200 }, + { 150, B150 }, + { 134, B134 }, + { 110, B110 }, + { 75, B75 }, + { 50, B50 }, + }; + + cflag = C_BAUD(ch->uart_port.state->port.tty); + baud = 9600; + for (i = 0; i < ARRAY_SIZE(baud_rates); i++) { + if (baud_rates[i].cflag == cflag) { + baud = baud_rates[i].rate; + break; + } + } + + if (ch->ch_flags & CH_BAUD0) + ch->ch_flags &= ~(CH_BAUD0); + } + + if (ch->ch_c_cflag & PARENB) + lcr |= UART_LCR_PARITY; + + if (!(ch->ch_c_cflag & PARODD)) + lcr |= UART_LCR_EPAR; + + if (ch->ch_c_cflag & CMSPAR) + lcr |= UART_LCR_SPAR; + + if (ch->ch_c_cflag & CSTOPB) + lcr |= UART_LCR_STOP; + + lcr |= UART_LCR_WLEN(tty_get_char_size(ch->ch_c_cflag)); + + ier = readb(&ch->ch_neo_uart->ier); + uart_lcr = readb(&ch->ch_neo_uart->lcr); + + quot = ch->ch_bd->bd_dividend / baud; + + if (quot != 0) { + writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr); + writeb((quot & 0xff), &ch->ch_neo_uart->txrx); + writeb((quot >> 8), &ch->ch_neo_uart->ier); + writeb(lcr, &ch->ch_neo_uart->lcr); + } + + if (uart_lcr != lcr) + writeb(lcr, &ch->ch_neo_uart->lcr); + + if (ch->ch_c_cflag & CREAD) + ier |= (UART_IER_RDI | UART_IER_RLSI); + + ier |= (UART_IER_THRI | UART_IER_MSI); + + writeb(ier, &ch->ch_neo_uart->ier); + + /* Set new start/stop chars */ + neo_set_new_start_stop_chars(ch); + + if (ch->ch_c_cflag & CRTSCTS) + neo_set_cts_flow_control(ch); + else if (ch->ch_c_iflag & IXON) { + /* If start/stop is set to disable, then we should disable flow control */ + if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR)) + neo_set_no_output_flow_control(ch); + else + neo_set_ixon_flow_control(ch); + } + else + neo_set_no_output_flow_control(ch); + + if (ch->ch_c_cflag & CRTSCTS) + neo_set_rts_flow_control(ch); + else if (ch->ch_c_iflag & IXOFF) { + /* If start/stop is set to disable, then we should disable flow control */ + if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR)) + neo_set_no_input_flow_control(ch); + else + neo_set_ixoff_flow_control(ch); + } + else + neo_set_no_input_flow_control(ch); + /* + * Adjust the RX FIFO Trigger level if baud is less than 9600. + * Not exactly elegant, but this is needed because of the Exar chip's + * delay on firing off the RX FIFO interrupt on slower baud rates. + */ + if (baud < 9600) { + writeb(1, &ch->ch_neo_uart->rfifo); + ch->ch_r_tlevel = 1; + } + + neo_assert_modem_signals(ch); + + /* Get current status of the modem signals now */ + neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); + return; +} + +/* + * jsm_neo_intr() + * + * Neo specific interrupt handler. + */ +static irqreturn_t neo_intr(int irq, void *voidbrd) +{ + struct jsm_board *brd = voidbrd; + struct jsm_channel *ch; + int port = 0; + int type = 0; + int current_port; + u32 tmp; + u32 uart_poll; + unsigned long lock_flags; + unsigned long lock_flags2; + int outofloop_count = 0; + + /* Lock out the slow poller from running on this board. */ + spin_lock_irqsave(&brd->bd_intr_lock, lock_flags); + + /* + * Read in "extended" IRQ information from the 32bit Neo register. + * Bits 0-7: What port triggered the interrupt. + * Bits 8-31: Each 3bits indicate what type of interrupt occurred. + */ + uart_poll = readl(brd->re_map_membase + UART_17158_POLL_ADDR_OFFSET); + + jsm_dbg(INTR, &brd->pci_dev, "%s:%d uart_poll: %x\n", + __FILE__, __LINE__, uart_poll); + + if (!uart_poll) { + jsm_dbg(INTR, &brd->pci_dev, + "Kernel interrupted to me, but no pending interrupts...\n"); + spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags); + return IRQ_NONE; + } + + /* At this point, we have at least SOMETHING to service, dig further... */ + + current_port = 0; + + /* Loop on each port */ + while (((uart_poll & 0xff) != 0) && (outofloop_count < 0xff)){ + + tmp = uart_poll; + outofloop_count++; + + /* Check current port to see if it has interrupt pending */ + if ((tmp & jsm_offset_table[current_port]) != 0) { + port = current_port; + type = tmp >> (8 + (port * 3)); + type &= 0x7; + } else { + current_port++; + continue; + } + + jsm_dbg(INTR, &brd->pci_dev, "%s:%d port: %x type: %x\n", + __FILE__, __LINE__, port, type); + + /* Remove this port + type from uart_poll */ + uart_poll &= ~(jsm_offset_table[port]); + + if (!type) { + /* If no type, just ignore it, and move onto next port */ + jsm_dbg(INTR, &brd->pci_dev, + "Interrupt with no type! port: %d\n", port); + continue; + } + + /* Switch on type of interrupt we have */ + switch (type) { + + case UART_17158_RXRDY_TIMEOUT: + /* + * RXRDY Time-out is cleared by reading data in the + * RX FIFO until it falls below the trigger level. + */ + + /* Verify the port is in range. */ + if (port >= brd->nasync) + continue; + + ch = brd->channels[port]; + if (!ch) + continue; + + neo_copy_data_from_uart_to_queue(ch); + + /* Call our tty layer to enforce queue flow control if needed. */ + spin_lock_irqsave(&ch->ch_lock, lock_flags2); + jsm_check_queue_flow_control(ch); + spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); + + continue; + + case UART_17158_RX_LINE_STATUS: + /* + * RXRDY and RX LINE Status (logic OR of LSR[4:1]) + */ + neo_parse_lsr(brd, port); + continue; + + case UART_17158_TXRDY: + /* + * TXRDY interrupt clears after reading ISR register for the UART channel. + */ + + /* + * Yes, this is odd... + * Why would I check EVERY possibility of type of + * interrupt, when we know its TXRDY??? + * Becuz for some reason, even tho we got triggered for TXRDY, + * it seems to be occasionally wrong. Instead of TX, which + * it should be, I was getting things like RXDY too. Weird. + */ + neo_parse_isr(brd, port); + continue; + + case UART_17158_MSR: + /* + * MSR or flow control was seen. + */ + neo_parse_isr(brd, port); + continue; + + default: + /* + * The UART triggered us with a bogus interrupt type. + * It appears the Exar chip, when REALLY bogged down, will throw + * these once and awhile. + * Its harmless, just ignore it and move on. + */ + jsm_dbg(INTR, &brd->pci_dev, + "%s:%d Unknown Interrupt type: %x\n", + __FILE__, __LINE__, type); + continue; + } + } + + spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags); + + jsm_dbg(INTR, &brd->pci_dev, "finish\n"); + return IRQ_HANDLED; +} + +/* + * Neo specific way of turning off the receiver. + * Used as a way to enforce queue flow control when in + * hardware flow control mode. + */ +static void neo_disable_receiver(struct jsm_channel *ch) +{ + u8 tmp = readb(&ch->ch_neo_uart->ier); + tmp &= ~(UART_IER_RDI); + writeb(tmp, &ch->ch_neo_uart->ier); + + /* flush write operation */ + neo_pci_posting_flush(ch->ch_bd); +} + + +/* + * Neo specific way of turning on the receiver. + * Used as a way to un-enforce queue flow control when in + * hardware flow control mode. + */ +static void neo_enable_receiver(struct jsm_channel *ch) +{ + u8 tmp = readb(&ch->ch_neo_uart->ier); + tmp |= (UART_IER_RDI); + writeb(tmp, &ch->ch_neo_uart->ier); + + /* flush write operation */ + neo_pci_posting_flush(ch->ch_bd); +} + +static void neo_send_start_character(struct jsm_channel *ch) +{ + if (!ch) + return; + + if (ch->ch_startc != __DISABLED_CHAR) { + ch->ch_xon_sends++; + writeb(ch->ch_startc, &ch->ch_neo_uart->txrx); + + /* flush write operation */ + neo_pci_posting_flush(ch->ch_bd); + } +} + +static void neo_send_stop_character(struct jsm_channel *ch) +{ + if (!ch) + return; + + if (ch->ch_stopc != __DISABLED_CHAR) { + ch->ch_xoff_sends++; + writeb(ch->ch_stopc, &ch->ch_neo_uart->txrx); + + /* flush write operation */ + neo_pci_posting_flush(ch->ch_bd); + } +} + +/* + * neo_uart_init + */ +static void neo_uart_init(struct jsm_channel *ch) +{ + writeb(0, &ch->ch_neo_uart->ier); + writeb(0, &ch->ch_neo_uart->efr); + writeb(UART_EFR_ECB, &ch->ch_neo_uart->efr); + + /* Clear out UART and FIFO */ + readb(&ch->ch_neo_uart->txrx); + writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); + readb(&ch->ch_neo_uart->lsr); + readb(&ch->ch_neo_uart->msr); + + ch->ch_flags |= CH_FIFO_ENABLED; + + /* Assert any signals we want up */ + writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr); +} + +/* + * Make the UART completely turn off. + */ +static void neo_uart_off(struct jsm_channel *ch) +{ + /* Turn off UART enhanced bits */ + writeb(0, &ch->ch_neo_uart->efr); + + /* Stop all interrupts from occurring. */ + writeb(0, &ch->ch_neo_uart->ier); +} + +static u32 neo_get_uart_bytes_left(struct jsm_channel *ch) +{ + u8 left = 0; + u8 lsr = readb(&ch->ch_neo_uart->lsr); + + /* We must cache the LSR as some of the bits get reset once read... */ + ch->ch_cached_lsr |= lsr; + + /* Determine whether the Transmitter is empty or not */ + if (!(lsr & UART_LSR_TEMT)) + left = 1; + else { + ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); + left = 0; + } + + return left; +} + +/* Channel lock MUST be held by the calling function! */ +static void neo_send_break(struct jsm_channel *ch) +{ + /* + * Set the time we should stop sending the break. + * If we are already sending a break, toss away the existing + * time to stop, and use this new value instead. + */ + + /* Tell the UART to start sending the break */ + if (!(ch->ch_flags & CH_BREAK_SENDING)) { + u8 temp = readb(&ch->ch_neo_uart->lcr); + writeb((temp | UART_LCR_SBC), &ch->ch_neo_uart->lcr); + ch->ch_flags |= (CH_BREAK_SENDING); + + /* flush write operation */ + neo_pci_posting_flush(ch->ch_bd); + } +} + +/* + * neo_send_immediate_char. + * + * Sends a specific character as soon as possible to the UART, + * jumping over any bytes that might be in the write queue. + * + * The channel lock MUST be held by the calling function. + */ +static void neo_send_immediate_char(struct jsm_channel *ch, unsigned char c) +{ + if (!ch) + return; + + writeb(c, &ch->ch_neo_uart->txrx); + + /* flush write operation */ + neo_pci_posting_flush(ch->ch_bd); +} + +struct board_ops jsm_neo_ops = { + .intr = neo_intr, + .uart_init = neo_uart_init, + .uart_off = neo_uart_off, + .param = neo_param, + .assert_modem_signals = neo_assert_modem_signals, + .flush_uart_write = neo_flush_uart_write, + .flush_uart_read = neo_flush_uart_read, + .disable_receiver = neo_disable_receiver, + .enable_receiver = neo_enable_receiver, + .send_break = neo_send_break, + .clear_break = neo_clear_break, + .send_start_character = neo_send_start_character, + .send_stop_character = neo_send_stop_character, + .copy_data_from_queue_to_uart = neo_copy_data_from_queue_to_uart, + .get_uart_bytes_left = neo_get_uart_bytes_left, + .send_immediate_char = neo_send_immediate_char +}; diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c new file mode 100644 index 000000000..222afc270 --- /dev/null +++ b/drivers/tty/serial/jsm/jsm_tty.c @@ -0,0 +1,828 @@ +// SPDX-License-Identifier: GPL-2.0+ +/************************************************************************ + * Copyright 2003 Digi International (www.digi.com) + * + * Copyright (C) 2004 IBM Corporation. All rights reserved. + * + * Contact Information: + * Scott H Kilau + * Ananda Venkatarman + * Modifications: + * 01/19/06: changed jsm_input routine to use the dynamically allocated + * tty_buffer changes. Contributors: Scott Kilau and Ananda V. + ***********************************************************************/ +#include +#include +#include +#include /* For udelay */ +#include +#include + +#include "jsm.h" + +static DECLARE_BITMAP(linemap, MAXLINES); + +static void jsm_carrier(struct jsm_channel *ch); + +static inline int jsm_get_mstat(struct jsm_channel *ch) +{ + unsigned char mstat; + int result; + + jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "start\n"); + + mstat = (ch->ch_mostat | ch->ch_mistat); + + result = 0; + + if (mstat & UART_MCR_DTR) + result |= TIOCM_DTR; + if (mstat & UART_MCR_RTS) + result |= TIOCM_RTS; + if (mstat & UART_MSR_CTS) + result |= TIOCM_CTS; + if (mstat & UART_MSR_DSR) + result |= TIOCM_DSR; + if (mstat & UART_MSR_RI) + result |= TIOCM_RI; + if (mstat & UART_MSR_DCD) + result |= TIOCM_CD; + + jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n"); + return result; +} + +static unsigned int jsm_tty_tx_empty(struct uart_port *port) +{ + return TIOCSER_TEMT; +} + +/* + * Return modem signals to ld. + */ +static unsigned int jsm_tty_get_mctrl(struct uart_port *port) +{ + int result; + struct jsm_channel *channel = + container_of(port, struct jsm_channel, uart_port); + + jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); + + result = jsm_get_mstat(channel); + + if (result < 0) + return -ENXIO; + + jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); + + return result; +} + +/* + * jsm_set_modem_info() + * + * Set modem signals, called by ld. + */ +static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct jsm_channel *channel = + container_of(port, struct jsm_channel, uart_port); + + jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); + + if (mctrl & TIOCM_RTS) + channel->ch_mostat |= UART_MCR_RTS; + else + channel->ch_mostat &= ~UART_MCR_RTS; + + if (mctrl & TIOCM_DTR) + channel->ch_mostat |= UART_MCR_DTR; + else + channel->ch_mostat &= ~UART_MCR_DTR; + + channel->ch_bd->bd_ops->assert_modem_signals(channel); + + jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); + udelay(10); +} + +/* + * jsm_tty_write() + * + * Take data from the user or kernel and send it out to the FEP. + * In here exists all the Transparent Print magic as well. + */ +static void jsm_tty_write(struct uart_port *port) +{ + struct jsm_channel *channel; + + channel = container_of(port, struct jsm_channel, uart_port); + channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel); +} + +static void jsm_tty_start_tx(struct uart_port *port) +{ + struct jsm_channel *channel = + container_of(port, struct jsm_channel, uart_port); + + jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); + + channel->ch_flags &= ~(CH_STOP); + jsm_tty_write(port); + + jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); +} + +static void jsm_tty_stop_tx(struct uart_port *port) +{ + struct jsm_channel *channel = + container_of(port, struct jsm_channel, uart_port); + + jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); + + channel->ch_flags |= (CH_STOP); + + jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); +} + +static void jsm_tty_send_xchar(struct uart_port *port, char ch) +{ + unsigned long lock_flags; + struct jsm_channel *channel = + container_of(port, struct jsm_channel, uart_port); + struct ktermios *termios; + + spin_lock_irqsave(&port->lock, lock_flags); + termios = &port->state->port.tty->termios; + if (ch == termios->c_cc[VSTART]) + channel->ch_bd->bd_ops->send_start_character(channel); + + if (ch == termios->c_cc[VSTOP]) + channel->ch_bd->bd_ops->send_stop_character(channel); + spin_unlock_irqrestore(&port->lock, lock_flags); +} + +static void jsm_tty_stop_rx(struct uart_port *port) +{ + struct jsm_channel *channel = + container_of(port, struct jsm_channel, uart_port); + + channel->ch_bd->bd_ops->disable_receiver(channel); +} + +static void jsm_tty_break(struct uart_port *port, int break_state) +{ + unsigned long lock_flags; + struct jsm_channel *channel = + container_of(port, struct jsm_channel, uart_port); + + spin_lock_irqsave(&port->lock, lock_flags); + if (break_state == -1) + channel->ch_bd->bd_ops->send_break(channel); + else + channel->ch_bd->bd_ops->clear_break(channel); + + spin_unlock_irqrestore(&port->lock, lock_flags); +} + +static int jsm_tty_open(struct uart_port *port) +{ + unsigned long lock_flags; + struct jsm_board *brd; + struct jsm_channel *channel = + container_of(port, struct jsm_channel, uart_port); + struct ktermios *termios; + + /* Get board pointer from our array of majors we have allocated */ + brd = channel->ch_bd; + + /* + * Allocate channel buffers for read/write/error. + * Set flag, so we don't get trounced on. + */ + channel->ch_flags |= (CH_OPENING); + + /* Drop locks, as malloc with GFP_KERNEL can sleep */ + + if (!channel->ch_rqueue) { + channel->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL); + if (!channel->ch_rqueue) { + jsm_dbg(INIT, &channel->ch_bd->pci_dev, + "unable to allocate read queue buf\n"); + return -ENOMEM; + } + } + if (!channel->ch_equeue) { + channel->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL); + if (!channel->ch_equeue) { + jsm_dbg(INIT, &channel->ch_bd->pci_dev, + "unable to allocate error queue buf\n"); + return -ENOMEM; + } + } + + channel->ch_flags &= ~(CH_OPENING); + /* + * Initialize if neither terminal is open. + */ + jsm_dbg(OPEN, &channel->ch_bd->pci_dev, + "jsm_open: initializing channel in open...\n"); + + /* + * Flush input queues. + */ + channel->ch_r_head = channel->ch_r_tail = 0; + channel->ch_e_head = channel->ch_e_tail = 0; + + brd->bd_ops->flush_uart_write(channel); + brd->bd_ops->flush_uart_read(channel); + + channel->ch_flags = 0; + channel->ch_cached_lsr = 0; + channel->ch_stops_sent = 0; + + spin_lock_irqsave(&port->lock, lock_flags); + termios = &port->state->port.tty->termios; + channel->ch_c_cflag = termios->c_cflag; + channel->ch_c_iflag = termios->c_iflag; + channel->ch_c_oflag = termios->c_oflag; + channel->ch_c_lflag = termios->c_lflag; + channel->ch_startc = termios->c_cc[VSTART]; + channel->ch_stopc = termios->c_cc[VSTOP]; + + /* Tell UART to init itself */ + brd->bd_ops->uart_init(channel); + + /* + * Run param in case we changed anything + */ + brd->bd_ops->param(channel); + + jsm_carrier(channel); + + channel->ch_open_count++; + spin_unlock_irqrestore(&port->lock, lock_flags); + + jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n"); + return 0; +} + +static void jsm_tty_close(struct uart_port *port) +{ + struct jsm_board *bd; + struct jsm_channel *channel = + container_of(port, struct jsm_channel, uart_port); + + jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "start\n"); + + bd = channel->ch_bd; + + channel->ch_flags &= ~(CH_STOPI); + + channel->ch_open_count--; + + /* + * If we have HUPCL set, lower DTR and RTS + */ + if (channel->ch_c_cflag & HUPCL) { + jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, + "Close. HUPCL set, dropping DTR/RTS\n"); + + /* Drop RTS/DTR */ + channel->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS); + bd->bd_ops->assert_modem_signals(channel); + } + + /* Turn off UART interrupts for this port */ + channel->ch_bd->bd_ops->uart_off(channel); + + jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "finish\n"); +} + +static void jsm_tty_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old_termios) +{ + unsigned long lock_flags; + struct jsm_channel *channel = + container_of(port, struct jsm_channel, uart_port); + + spin_lock_irqsave(&port->lock, lock_flags); + channel->ch_c_cflag = termios->c_cflag; + channel->ch_c_iflag = termios->c_iflag; + channel->ch_c_oflag = termios->c_oflag; + channel->ch_c_lflag = termios->c_lflag; + channel->ch_startc = termios->c_cc[VSTART]; + channel->ch_stopc = termios->c_cc[VSTOP]; + + channel->ch_bd->bd_ops->param(channel); + jsm_carrier(channel); + spin_unlock_irqrestore(&port->lock, lock_flags); +} + +static const char *jsm_tty_type(struct uart_port *port) +{ + return "jsm"; +} + +static void jsm_tty_release_port(struct uart_port *port) +{ +} + +static int jsm_tty_request_port(struct uart_port *port) +{ + return 0; +} + +static void jsm_config_port(struct uart_port *port, int flags) +{ + port->type = PORT_JSM; +} + +static const struct uart_ops jsm_ops = { + .tx_empty = jsm_tty_tx_empty, + .set_mctrl = jsm_tty_set_mctrl, + .get_mctrl = jsm_tty_get_mctrl, + .stop_tx = jsm_tty_stop_tx, + .start_tx = jsm_tty_start_tx, + .send_xchar = jsm_tty_send_xchar, + .stop_rx = jsm_tty_stop_rx, + .break_ctl = jsm_tty_break, + .startup = jsm_tty_open, + .shutdown = jsm_tty_close, + .set_termios = jsm_tty_set_termios, + .type = jsm_tty_type, + .release_port = jsm_tty_release_port, + .request_port = jsm_tty_request_port, + .config_port = jsm_config_port, +}; + +/* + * jsm_tty_init() + * + * Init the tty subsystem. Called once per board after board has been + * downloaded and init'ed. + */ +int jsm_tty_init(struct jsm_board *brd) +{ + int i; + void __iomem *vaddr; + struct jsm_channel *ch; + + if (!brd) + return -ENXIO; + + jsm_dbg(INIT, &brd->pci_dev, "start\n"); + + /* + * Initialize board structure elements. + */ + + brd->nasync = brd->maxports; + + /* + * Allocate channel memory that might not have been allocated + * when the driver was first loaded. + */ + for (i = 0; i < brd->nasync; i++) { + if (!brd->channels[i]) { + + /* + * Okay to malloc with GFP_KERNEL, we are not at + * interrupt context, and there are no locks held. + */ + brd->channels[i] = kzalloc(sizeof(struct jsm_channel), GFP_KERNEL); + if (!brd->channels[i]) { + jsm_dbg(CORE, &brd->pci_dev, + "%s:%d Unable to allocate memory for channel struct\n", + __FILE__, __LINE__); + } + } + } + + ch = brd->channels[0]; + vaddr = brd->re_map_membase; + + /* Set up channel variables */ + for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { + + if (!brd->channels[i]) + continue; + + spin_lock_init(&ch->ch_lock); + + if (brd->bd_uart_offset == 0x200) + ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i); + else + ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i); + + ch->ch_bd = brd; + ch->ch_portnum = i; + + /* .25 second delay */ + ch->ch_close_delay = 250; + + init_waitqueue_head(&ch->ch_flags_wait); + } + + jsm_dbg(INIT, &brd->pci_dev, "finish\n"); + return 0; +} + +int jsm_uart_port_init(struct jsm_board *brd) +{ + int i, rc; + unsigned int line; + + if (!brd) + return -ENXIO; + + jsm_dbg(INIT, &brd->pci_dev, "start\n"); + + /* + * Initialize board structure elements. + */ + + brd->nasync = brd->maxports; + + /* Set up channel variables */ + for (i = 0; i < brd->nasync; i++) { + + if (!brd->channels[i]) + continue; + + brd->channels[i]->uart_port.irq = brd->irq; + brd->channels[i]->uart_port.uartclk = 14745600; + brd->channels[i]->uart_port.type = PORT_JSM; + brd->channels[i]->uart_port.iotype = UPIO_MEM; + brd->channels[i]->uart_port.membase = brd->re_map_membase; + brd->channels[i]->uart_port.fifosize = 16; + brd->channels[i]->uart_port.ops = &jsm_ops; + line = find_first_zero_bit(linemap, MAXLINES); + if (line >= MAXLINES) { + printk(KERN_INFO "jsm: linemap is full, added device failed\n"); + continue; + } else + set_bit(line, linemap); + brd->channels[i]->uart_port.line = line; + rc = uart_add_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port); + if (rc) { + printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i); + return rc; + } else + printk(KERN_INFO "jsm: Port %d added\n", i); + } + + jsm_dbg(INIT, &brd->pci_dev, "finish\n"); + return 0; +} + +int jsm_remove_uart_port(struct jsm_board *brd) +{ + int i; + struct jsm_channel *ch; + + if (!brd) + return -ENXIO; + + jsm_dbg(INIT, &brd->pci_dev, "start\n"); + + /* + * Initialize board structure elements. + */ + + brd->nasync = brd->maxports; + + /* Set up channel variables */ + for (i = 0; i < brd->nasync; i++) { + + if (!brd->channels[i]) + continue; + + ch = brd->channels[i]; + + clear_bit(ch->uart_port.line, linemap); + uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port); + } + + jsm_dbg(INIT, &brd->pci_dev, "finish\n"); + return 0; +} + +void jsm_input(struct jsm_channel *ch) +{ + struct jsm_board *bd; + struct tty_struct *tp; + struct tty_port *port; + u32 rmask; + u16 head; + u16 tail; + int data_len; + unsigned long lock_flags; + int len = 0; + int s = 0; + int i = 0; + + jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n"); + + port = &ch->uart_port.state->port; + tp = port->tty; + + bd = ch->ch_bd; + if (!bd) + return; + + spin_lock_irqsave(&ch->ch_lock, lock_flags); + + /* + *Figure the number of characters in the buffer. + *Exit immediately if none. + */ + + rmask = RQUEUEMASK; + + head = ch->ch_r_head & rmask; + tail = ch->ch_r_tail & rmask; + + data_len = (head - tail) & rmask; + if (data_len == 0) { + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); + return; + } + + jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n"); + + /* + *If the device is not open, or CREAD is off, flush + *input data and return immediately. + */ + if (!tp || !C_CREAD(tp)) { + + jsm_dbg(READ, &ch->ch_bd->pci_dev, + "input. dropping %d bytes on port %d...\n", + data_len, ch->ch_portnum); + ch->ch_r_head = tail; + + /* Force queue flow control to be released, if needed */ + jsm_check_queue_flow_control(ch); + + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); + return; + } + + /* + * If we are throttled, simply don't read any data. + */ + if (ch->ch_flags & CH_STOPI) { + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); + jsm_dbg(READ, &ch->ch_bd->pci_dev, + "Port %d throttled, not reading any data. head: %x tail: %x\n", + ch->ch_portnum, head, tail); + return; + } + + jsm_dbg(READ, &ch->ch_bd->pci_dev, "start 2\n"); + + len = tty_buffer_request_room(port, data_len); + + /* + * len now contains the most amount of data we can copy, + * bounded either by the flip buffer size or the amount + * of data the card actually has pending... + */ + while (len) { + s = ((head >= tail) ? head : RQUEUESIZE) - tail; + s = min(s, len); + + if (s <= 0) + break; + + /* + * If conditions are such that ld needs to see all + * UART errors, we will have to walk each character + * and error byte and send them to the buffer one at + * a time. + */ + + if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { + for (i = 0; i < s; i++) { + u8 chr = ch->ch_rqueue[tail + i]; + u8 error = ch->ch_equeue[tail + i]; + char flag = TTY_NORMAL; + + /* + * Give the Linux ld the flags in the format it + * likes. + */ + if (error & UART_LSR_BI) + flag = TTY_BREAK; + else if (error & UART_LSR_PE) + flag = TTY_PARITY; + else if (error & UART_LSR_FE) + flag = TTY_FRAME; + + tty_insert_flip_char(port, chr, flag); + } + } else { + tty_insert_flip_string(port, ch->ch_rqueue + tail, s); + } + tail += s; + len -= s; + /* Flip queue if needed */ + tail &= rmask; + } + + ch->ch_r_tail = tail & rmask; + ch->ch_e_tail = tail & rmask; + jsm_check_queue_flow_control(ch); + spin_unlock_irqrestore(&ch->ch_lock, lock_flags); + + /* Tell the tty layer its okay to "eat" the data now */ + tty_flip_buffer_push(port); + + jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n"); +} + +static void jsm_carrier(struct jsm_channel *ch) +{ + struct jsm_board *bd; + + int virt_carrier = 0; + int phys_carrier = 0; + + jsm_dbg(CARR, &ch->ch_bd->pci_dev, "start\n"); + + bd = ch->ch_bd; + if (!bd) + return; + + if (ch->ch_mistat & UART_MSR_DCD) { + jsm_dbg(CARR, &ch->ch_bd->pci_dev, "mistat: %x D_CD: %x\n", + ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD); + phys_carrier = 1; + } + + if (ch->ch_c_cflag & CLOCAL) + virt_carrier = 1; + + jsm_dbg(CARR, &ch->ch_bd->pci_dev, "DCD: physical: %d virt: %d\n", + phys_carrier, virt_carrier); + + /* + * Test for a VIRTUAL carrier transition to HIGH. + */ + if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) { + + /* + * When carrier rises, wake any threads waiting + * for carrier in the open routine. + */ + + jsm_dbg(CARR, &ch->ch_bd->pci_dev, "carrier: virt DCD rose\n"); + + if (waitqueue_active(&(ch->ch_flags_wait))) + wake_up_interruptible(&ch->ch_flags_wait); + } + + /* + * Test for a PHYSICAL carrier transition to HIGH. + */ + if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) { + + /* + * When carrier rises, wake any threads waiting + * for carrier in the open routine. + */ + + jsm_dbg(CARR, &ch->ch_bd->pci_dev, + "carrier: physical DCD rose\n"); + + if (waitqueue_active(&(ch->ch_flags_wait))) + wake_up_interruptible(&ch->ch_flags_wait); + } + + /* + * Test for a PHYSICAL transition to low, so long as we aren't + * currently ignoring physical transitions (which is what "virtual + * carrier" indicates). + * + * The transition of the virtual carrier to low really doesn't + * matter... it really only means "ignore carrier state", not + * "make pretend that carrier is there". + */ + if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) + && (phys_carrier == 0)) { + /* + * When carrier drops: + * + * Drop carrier on all open units. + * + * Flush queues, waking up any task waiting in the + * line discipline. + * + * Send a hangup to the control terminal. + * + * Enable all select calls. + */ + if (waitqueue_active(&(ch->ch_flags_wait))) + wake_up_interruptible(&ch->ch_flags_wait); + } + + /* + * Make sure that our cached values reflect the current reality. + */ + if (virt_carrier == 1) + ch->ch_flags |= CH_FCAR; + else + ch->ch_flags &= ~CH_FCAR; + + if (phys_carrier == 1) + ch->ch_flags |= CH_CD; + else + ch->ch_flags &= ~CH_CD; +} + + +void jsm_check_queue_flow_control(struct jsm_channel *ch) +{ + struct board_ops *bd_ops = ch->ch_bd->bd_ops; + int qleft; + + /* Store how much space we have left in the queue */ + qleft = ch->ch_r_tail - ch->ch_r_head - 1; + if (qleft < 0) + qleft += RQUEUEMASK + 1; + + /* + * Check to see if we should enforce flow control on our queue because + * the ld (or user) isn't reading data out of our queue fast enuf. + * + * NOTE: This is done based on what the current flow control of the + * port is set for. + * + * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt. + * This will cause the UART's FIFO to back up, and force + * the RTS signal to be dropped. + * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to + * the other side, in hopes it will stop sending data to us. + * 3) NONE - Nothing we can do. We will simply drop any extra data + * that gets sent into us when the queue fills up. + */ + if (qleft < 256) { + /* HWFLOW */ + if (ch->ch_c_cflag & CRTSCTS) { + if (!(ch->ch_flags & CH_RECEIVER_OFF)) { + bd_ops->disable_receiver(ch); + ch->ch_flags |= (CH_RECEIVER_OFF); + jsm_dbg(READ, &ch->ch_bd->pci_dev, + "Internal queue hit hilevel mark (%d)! Turning off interrupts\n", + qleft); + } + } + /* SWFLOW */ + else if (ch->ch_c_iflag & IXOFF) { + if (ch->ch_stops_sent <= MAX_STOPS_SENT) { + bd_ops->send_stop_character(ch); + ch->ch_stops_sent++; + jsm_dbg(READ, &ch->ch_bd->pci_dev, + "Sending stop char! Times sent: %x\n", + ch->ch_stops_sent); + } + } + } + + /* + * Check to see if we should unenforce flow control because + * ld (or user) finally read enuf data out of our queue. + * + * NOTE: This is done based on what the current flow control of the + * port is set for. + * + * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt. + * This will cause the UART's FIFO to raise RTS back up, + * which will allow the other side to start sending data again. + * 2) SWFLOW (IXOFF) - Send a start character to + * the other side, so it will start sending data to us again. + * 3) NONE - Do nothing. Since we didn't do anything to turn off the + * other side, we don't need to do anything now. + */ + if (qleft > (RQUEUESIZE / 2)) { + /* HWFLOW */ + if (ch->ch_c_cflag & CRTSCTS) { + if (ch->ch_flags & CH_RECEIVER_OFF) { + bd_ops->enable_receiver(ch); + ch->ch_flags &= ~(CH_RECEIVER_OFF); + jsm_dbg(READ, &ch->ch_bd->pci_dev, + "Internal queue hit lowlevel mark (%d)! Turning on interrupts\n", + qleft); + } + } + /* SWFLOW */ + else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) { + ch->ch_stops_sent = 0; + bd_ops->send_start_character(ch); + jsm_dbg(READ, &ch->ch_bd->pci_dev, + "Sending start char!\n"); + } + } +} diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c new file mode 100644 index 000000000..55c3c9db7 --- /dev/null +++ b/drivers/tty/serial/kgdb_nmi.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KGDB NMI serial console + * + * Copyright 2010 Google, Inc. + * Arve HjønnevÃ¥g + * Colin Cross + * Copyright 2012 Linaro Ltd. + * Anton Vorontsov + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int kgdb_nmi_knock = 1; +module_param_named(knock, kgdb_nmi_knock, int, 0600); +MODULE_PARM_DESC(knock, "if set to 1 (default), the special '$3#33' command " \ + "must be used to enter the debugger; when set to 0, " \ + "hitting return key is enough to enter the debugger; " \ + "when set to -1, the debugger is entered immediately " \ + "upon NMI"); + +static char *kgdb_nmi_magic = "$3#33"; +module_param_named(magic, kgdb_nmi_magic, charp, 0600); +MODULE_PARM_DESC(magic, "magic sequence to enter NMI debugger (default $3#33)"); + +static atomic_t kgdb_nmi_num_readers = ATOMIC_INIT(0); + +static int kgdb_nmi_console_setup(struct console *co, char *options) +{ + arch_kgdb_ops.enable_nmi(1); + + /* The NMI console uses the dbg_io_ops to issue console messages. To + * avoid duplicate messages during kdb sessions we must inform kdb's + * I/O utilities that messages sent to the console will automatically + * be displayed on the dbg_io. + */ + dbg_io_ops->cons = co; + + return 0; +} + +static void kgdb_nmi_console_write(struct console *co, const char *s, uint c) +{ + int i; + + for (i = 0; i < c; i++) + dbg_io_ops->write_char(s[i]); +} + +static struct tty_driver *kgdb_nmi_tty_driver; + +static struct tty_driver *kgdb_nmi_console_device(struct console *co, int *idx) +{ + *idx = co->index; + return kgdb_nmi_tty_driver; +} + +static struct console kgdb_nmi_console = { + .name = "ttyNMI", + .setup = kgdb_nmi_console_setup, + .write = kgdb_nmi_console_write, + .device = kgdb_nmi_console_device, + .flags = CON_PRINTBUFFER | CON_ANYTIME, + .index = -1, +}; + +/* + * This is usually the maximum rate on debug ports. We make fifo large enough + * to make copy-pasting to the terminal usable. + */ +#define KGDB_NMI_BAUD 115200 +#define KGDB_NMI_FIFO_SIZE roundup_pow_of_two(KGDB_NMI_BAUD / 8 / HZ) + +struct kgdb_nmi_tty_priv { + struct tty_port port; + struct timer_list timer; + STRUCT_KFIFO(char, KGDB_NMI_FIFO_SIZE) fifo; +}; + +static struct tty_port *kgdb_nmi_port; + +static void kgdb_tty_recv(int ch) +{ + struct kgdb_nmi_tty_priv *priv; + char c = ch; + + if (!kgdb_nmi_port || ch < 0) + return; + /* + * Can't use port->tty->driver_data as tty might be not there. Timer + * will check for tty and will get the ref, but here we don't have to + * do that, and actually, we can't: we're in NMI context, no locks are + * possible. + */ + priv = container_of(kgdb_nmi_port, struct kgdb_nmi_tty_priv, port); + kfifo_in(&priv->fifo, &c, 1); +} + +static int kgdb_nmi_poll_one_knock(void) +{ + static int n; + int c; + const char *magic = kgdb_nmi_magic; + size_t m = strlen(magic); + bool printch = false; + + c = dbg_io_ops->read_char(); + if (c == NO_POLL_CHAR) + return c; + + if (!kgdb_nmi_knock && (c == '\r' || c == '\n')) { + return 1; + } else if (c == magic[n]) { + n = (n + 1) % m; + if (!n) + return 1; + printch = true; + } else { + n = 0; + } + + if (atomic_read(&kgdb_nmi_num_readers)) { + kgdb_tty_recv(c); + return 0; + } + + if (printch) { + kdb_printf("%c", c); + return 0; + } + + kdb_printf("\r%s %s to enter the debugger> %*s", + kgdb_nmi_knock ? "Type" : "Hit", + kgdb_nmi_knock ? magic : "", (int)m, ""); + while (m--) + kdb_printf("\b"); + return 0; +} + +/** + * kgdb_nmi_poll_knock - Check if it is time to enter the debugger + * + * "Serial ports are often noisy, especially when muxed over another port (we + * often use serial over the headset connector). Noise on the async command + * line just causes characters that are ignored, on a command line that blocked + * execution noise would be catastrophic." -- Colin Cross + * + * So, this function implements KGDB/KDB knocking on the serial line: we won't + * enter the debugger until we receive a known magic phrase (which is actually + * "$3#33", known as "escape to KDB" command. There is also a relaxed variant + * of knocking, i.e. just pressing the return key is enough to enter the + * debugger. And if knocking is disabled, the function always returns 1. + */ +bool kgdb_nmi_poll_knock(void) +{ + if (kgdb_nmi_knock < 0) + return true; + + while (1) { + int ret; + + ret = kgdb_nmi_poll_one_knock(); + if (ret == NO_POLL_CHAR) + return false; + else if (ret == 1) + break; + } + return true; +} + +/* + * The tasklet is cheap, it does not cause wakeups when reschedules itself, + * instead it waits for the next tick. + */ +static void kgdb_nmi_tty_receiver(struct timer_list *t) +{ + struct kgdb_nmi_tty_priv *priv = from_timer(priv, t, timer); + char ch; + + priv->timer.expires = jiffies + (HZ/100); + add_timer(&priv->timer); + + if (likely(!atomic_read(&kgdb_nmi_num_readers) || + !kfifo_len(&priv->fifo))) + return; + + while (kfifo_out(&priv->fifo, &ch, 1)) + tty_insert_flip_char(&priv->port, ch, TTY_NORMAL); + tty_flip_buffer_push(&priv->port); +} + +static int kgdb_nmi_tty_activate(struct tty_port *port, struct tty_struct *tty) +{ + struct kgdb_nmi_tty_priv *priv = + container_of(port, struct kgdb_nmi_tty_priv, port); + + kgdb_nmi_port = port; + priv->timer.expires = jiffies + (HZ/100); + add_timer(&priv->timer); + + return 0; +} + +static void kgdb_nmi_tty_shutdown(struct tty_port *port) +{ + struct kgdb_nmi_tty_priv *priv = + container_of(port, struct kgdb_nmi_tty_priv, port); + + del_timer(&priv->timer); + kgdb_nmi_port = NULL; +} + +static const struct tty_port_operations kgdb_nmi_tty_port_ops = { + .activate = kgdb_nmi_tty_activate, + .shutdown = kgdb_nmi_tty_shutdown, +}; + +static int kgdb_nmi_tty_install(struct tty_driver *drv, struct tty_struct *tty) +{ + struct kgdb_nmi_tty_priv *priv; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + INIT_KFIFO(priv->fifo); + timer_setup(&priv->timer, kgdb_nmi_tty_receiver, 0); + tty_port_init(&priv->port); + priv->port.ops = &kgdb_nmi_tty_port_ops; + tty->driver_data = priv; + + ret = tty_port_install(&priv->port, drv, tty); + if (ret) { + pr_err("%s: can't install tty port: %d\n", __func__, ret); + goto err; + } + return 0; +err: + tty_port_destroy(&priv->port); + kfree(priv); + return ret; +} + +static void kgdb_nmi_tty_cleanup(struct tty_struct *tty) +{ + struct kgdb_nmi_tty_priv *priv = tty->driver_data; + + tty->driver_data = NULL; + tty_port_destroy(&priv->port); + kfree(priv); +} + +static int kgdb_nmi_tty_open(struct tty_struct *tty, struct file *file) +{ + struct kgdb_nmi_tty_priv *priv = tty->driver_data; + unsigned int mode = file->f_flags & O_ACCMODE; + int ret; + + ret = tty_port_open(&priv->port, tty, file); + if (!ret && (mode == O_RDONLY || mode == O_RDWR)) + atomic_inc(&kgdb_nmi_num_readers); + + return ret; +} + +static void kgdb_nmi_tty_close(struct tty_struct *tty, struct file *file) +{ + struct kgdb_nmi_tty_priv *priv = tty->driver_data; + unsigned int mode = file->f_flags & O_ACCMODE; + + if (mode == O_RDONLY || mode == O_RDWR) + atomic_dec(&kgdb_nmi_num_readers); + + tty_port_close(&priv->port, tty, file); +} + +static void kgdb_nmi_tty_hangup(struct tty_struct *tty) +{ + struct kgdb_nmi_tty_priv *priv = tty->driver_data; + + tty_port_hangup(&priv->port); +} + +static unsigned int kgdb_nmi_tty_write_room(struct tty_struct *tty) +{ + /* Actually, we can handle any amount as we use polled writes. */ + return 2048; +} + +static int kgdb_nmi_tty_write(struct tty_struct *tty, const unchar *buf, int c) +{ + int i; + + for (i = 0; i < c; i++) + dbg_io_ops->write_char(buf[i]); + return c; +} + +static const struct tty_operations kgdb_nmi_tty_ops = { + .open = kgdb_nmi_tty_open, + .close = kgdb_nmi_tty_close, + .install = kgdb_nmi_tty_install, + .cleanup = kgdb_nmi_tty_cleanup, + .hangup = kgdb_nmi_tty_hangup, + .write_room = kgdb_nmi_tty_write_room, + .write = kgdb_nmi_tty_write, +}; + +int kgdb_register_nmi_console(void) +{ + int ret; + + if (!arch_kgdb_ops.enable_nmi) + return 0; + + kgdb_nmi_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW); + if (IS_ERR(kgdb_nmi_tty_driver)) { + pr_err("%s: cannot allocate tty\n", __func__); + return PTR_ERR(kgdb_nmi_tty_driver); + } + kgdb_nmi_tty_driver->driver_name = "ttyNMI"; + kgdb_nmi_tty_driver->name = "ttyNMI"; + kgdb_nmi_tty_driver->num = 1; + kgdb_nmi_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + kgdb_nmi_tty_driver->subtype = SERIAL_TYPE_NORMAL; + kgdb_nmi_tty_driver->init_termios = tty_std_termios; + tty_termios_encode_baud_rate(&kgdb_nmi_tty_driver->init_termios, + KGDB_NMI_BAUD, KGDB_NMI_BAUD); + tty_set_operations(kgdb_nmi_tty_driver, &kgdb_nmi_tty_ops); + + ret = tty_register_driver(kgdb_nmi_tty_driver); + if (ret) { + pr_err("%s: can't register tty driver: %d\n", __func__, ret); + goto err_drv_reg; + } + + register_console(&kgdb_nmi_console); + + return 0; +err_drv_reg: + tty_driver_kref_put(kgdb_nmi_tty_driver); + return ret; +} +EXPORT_SYMBOL_GPL(kgdb_register_nmi_console); + +int kgdb_unregister_nmi_console(void) +{ + int ret; + + if (!arch_kgdb_ops.enable_nmi) + return 0; + arch_kgdb_ops.enable_nmi(0); + + ret = unregister_console(&kgdb_nmi_console); + if (ret) + return ret; + + tty_unregister_driver(kgdb_nmi_tty_driver); + tty_driver_kref_put(kgdb_nmi_tty_driver); + + return 0; +} +EXPORT_SYMBOL_GPL(kgdb_unregister_nmi_console); diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c new file mode 100644 index 000000000..7aa37be32 --- /dev/null +++ b/drivers/tty/serial/kgdboc.c @@ -0,0 +1,604 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on the same principle as kgdboe using the NETPOLL api, this + * driver uses a console polling api to implement a gdb serial inteface + * which is multiplexed on a console port. + * + * Maintainer: Jason Wessel + * + * 2007-2008 (c) Jason Wessel - Wind River Systems, Inc. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_CONFIG_LEN 40 + +static struct kgdb_io kgdboc_io_ops; + +/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ +static int configured = -1; +static DEFINE_MUTEX(config_mutex); + +static char config[MAX_CONFIG_LEN]; +static struct kparam_string kps = { + .string = config, + .maxlen = MAX_CONFIG_LEN, +}; + +static int kgdboc_use_kms; /* 1 if we use kernel mode switching */ +static struct tty_driver *kgdb_tty_driver; +static int kgdb_tty_line; + +static struct platform_device *kgdboc_pdev; + +#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) +static struct kgdb_io kgdboc_earlycon_io_ops; +static int (*earlycon_orig_exit)(struct console *con); +#endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */ + +#ifdef CONFIG_KDB_KEYBOARD +static int kgdboc_reset_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id) +{ + input_reset_device(dev); + + /* Return an error - we do not want to bind, just to reset */ + return -ENODEV; +} + +static void kgdboc_reset_disconnect(struct input_handle *handle) +{ + /* We do not expect anyone to actually bind to us */ + BUG(); +} + +static const struct input_device_id kgdboc_reset_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT, + .evbit = { BIT_MASK(EV_KEY) }, + }, + { } +}; + +static struct input_handler kgdboc_reset_handler = { + .connect = kgdboc_reset_connect, + .disconnect = kgdboc_reset_disconnect, + .name = "kgdboc_reset", + .id_table = kgdboc_reset_ids, +}; + +static DEFINE_MUTEX(kgdboc_reset_mutex); + +static void kgdboc_restore_input_helper(struct work_struct *dummy) +{ + /* + * We need to take a mutex to prevent several instances of + * this work running on different CPUs so they don't try + * to register again already registered handler. + */ + mutex_lock(&kgdboc_reset_mutex); + + if (input_register_handler(&kgdboc_reset_handler) == 0) + input_unregister_handler(&kgdboc_reset_handler); + + mutex_unlock(&kgdboc_reset_mutex); +} + +static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper); + +static void kgdboc_restore_input(void) +{ + if (likely(system_state == SYSTEM_RUNNING)) + schedule_work(&kgdboc_restore_input_work); +} + +static int kgdboc_register_kbd(char **cptr) +{ + if (strncmp(*cptr, "kbd", 3) == 0 || + strncmp(*cptr, "kdb", 3) == 0) { + if (kdb_poll_idx < KDB_POLL_FUNC_MAX) { + kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char; + kdb_poll_idx++; + if (cptr[0][3] == ',') + *cptr += 4; + else + return 1; + } + } + return 0; +} + +static void kgdboc_unregister_kbd(void) +{ + int i; + + for (i = 0; i < kdb_poll_idx; i++) { + if (kdb_poll_funcs[i] == kdb_get_kbd_char) { + kdb_poll_idx--; + kdb_poll_funcs[i] = kdb_poll_funcs[kdb_poll_idx]; + kdb_poll_funcs[kdb_poll_idx] = NULL; + i--; + } + } + flush_work(&kgdboc_restore_input_work); +} +#else /* ! CONFIG_KDB_KEYBOARD */ +#define kgdboc_register_kbd(x) 0 +#define kgdboc_unregister_kbd() +#define kgdboc_restore_input() +#endif /* ! CONFIG_KDB_KEYBOARD */ + +#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) +static void cleanup_earlycon(void) +{ + if (kgdboc_earlycon_io_ops.cons) + kgdb_unregister_io_module(&kgdboc_earlycon_io_ops); +} +#else /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */ +static inline void cleanup_earlycon(void) { } +#endif /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */ + +static void cleanup_kgdboc(void) +{ + cleanup_earlycon(); + + if (configured != 1) + return; + + if (kgdb_unregister_nmi_console()) + return; + kgdboc_unregister_kbd(); + kgdb_unregister_io_module(&kgdboc_io_ops); +} + +static int configure_kgdboc(void) +{ + struct tty_driver *p; + int tty_line = 0; + int err = -ENODEV; + char *cptr = config; + struct console *cons; + + if (!strlen(config) || isspace(config[0])) { + err = 0; + goto noconfig; + } + + kgdboc_io_ops.cons = NULL; + kgdb_tty_driver = NULL; + + kgdboc_use_kms = 0; + if (strncmp(cptr, "kms,", 4) == 0) { + cptr += 4; + kgdboc_use_kms = 1; + } + + if (kgdboc_register_kbd(&cptr)) + goto do_register; + + p = tty_find_polling_driver(cptr, &tty_line); + if (!p) + goto noconfig; + + for_each_console(cons) { + int idx; + if (cons->device && cons->device(cons, &idx) == p && + idx == tty_line) { + kgdboc_io_ops.cons = cons; + break; + } + } + + kgdb_tty_driver = p; + kgdb_tty_line = tty_line; + +do_register: + err = kgdb_register_io_module(&kgdboc_io_ops); + if (err) + goto noconfig; + + err = kgdb_register_nmi_console(); + if (err) + goto nmi_con_failed; + + configured = 1; + + return 0; + +nmi_con_failed: + kgdb_unregister_io_module(&kgdboc_io_ops); +noconfig: + kgdboc_unregister_kbd(); + configured = 0; + + return err; +} + +static int kgdboc_probe(struct platform_device *pdev) +{ + int ret = 0; + + mutex_lock(&config_mutex); + if (configured != 1) { + ret = configure_kgdboc(); + + /* Convert "no device" to "defer" so we'll keep trying */ + if (ret == -ENODEV) + ret = -EPROBE_DEFER; + } + mutex_unlock(&config_mutex); + + return ret; +} + +static struct platform_driver kgdboc_platform_driver = { + .probe = kgdboc_probe, + .driver = { + .name = "kgdboc", + .suppress_bind_attrs = true, + }, +}; + +static int __init init_kgdboc(void) +{ + int ret; + + /* + * kgdboc is a little bit of an odd "platform_driver". It can be + * up and running long before the platform_driver object is + * created and thus doesn't actually store anything in it. There's + * only one instance of kgdb so anything is stored as global state. + * The platform_driver is only created so that we can leverage the + * kernel's mechanisms (like -EPROBE_DEFER) to call us when our + * underlying tty is ready. Here we init our platform driver and + * then create the single kgdboc instance. + */ + ret = platform_driver_register(&kgdboc_platform_driver); + if (ret) + return ret; + + kgdboc_pdev = platform_device_alloc("kgdboc", PLATFORM_DEVID_NONE); + if (!kgdboc_pdev) { + ret = -ENOMEM; + goto err_did_register; + } + + ret = platform_device_add(kgdboc_pdev); + if (!ret) + return 0; + + platform_device_put(kgdboc_pdev); + +err_did_register: + platform_driver_unregister(&kgdboc_platform_driver); + return ret; +} + +static void exit_kgdboc(void) +{ + mutex_lock(&config_mutex); + cleanup_kgdboc(); + mutex_unlock(&config_mutex); + + platform_device_unregister(kgdboc_pdev); + platform_driver_unregister(&kgdboc_platform_driver); +} + +static int kgdboc_get_char(void) +{ + if (!kgdb_tty_driver) + return -1; + return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver, + kgdb_tty_line); +} + +static void kgdboc_put_char(u8 chr) +{ + if (!kgdb_tty_driver) + return; + kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver, + kgdb_tty_line, chr); +} + +static int param_set_kgdboc_var(const char *kmessage, + const struct kernel_param *kp) +{ + size_t len = strlen(kmessage); + int ret = 0; + + if (len >= MAX_CONFIG_LEN) { + pr_err("config string too long\n"); + return -ENOSPC; + } + + if (kgdb_connected) { + pr_err("Cannot reconfigure while KGDB is connected.\n"); + return -EBUSY; + } + + mutex_lock(&config_mutex); + + strcpy(config, kmessage); + /* Chop out \n char as a result of echo */ + if (len && config[len - 1] == '\n') + config[len - 1] = '\0'; + + if (configured == 1) + cleanup_kgdboc(); + + /* + * Configure with the new params as long as init already ran. + * Note that we can get called before init if someone loads us + * with "modprobe kgdboc kgdboc=..." or if they happen to use + * the odd syntax of "kgdboc.kgdboc=..." on the kernel command. + */ + if (configured >= 0) + ret = configure_kgdboc(); + + /* + * If we couldn't configure then clear out the config. Note that + * specifying an invalid config on the kernel command line vs. + * through sysfs have slightly different behaviors. If we fail + * to configure what was specified on the kernel command line + * we'll leave it in the 'config' and return -EPROBE_DEFER from + * our probe. When specified through sysfs userspace is + * responsible for loading the tty driver before setting up. + */ + if (ret) + config[0] = '\0'; + + mutex_unlock(&config_mutex); + + return ret; +} + +static int dbg_restore_graphics; + +static void kgdboc_pre_exp_handler(void) +{ + if (!dbg_restore_graphics && kgdboc_use_kms) { + dbg_restore_graphics = 1; + con_debug_enter(vc_cons[fg_console].d); + } + /* Increment the module count when the debugger is active */ + if (!kgdb_connected) + try_module_get(THIS_MODULE); +} + +static void kgdboc_post_exp_handler(void) +{ + /* decrement the module count when the debugger detaches */ + if (!kgdb_connected) + module_put(THIS_MODULE); + if (kgdboc_use_kms && dbg_restore_graphics) { + dbg_restore_graphics = 0; + con_debug_leave(); + } + kgdboc_restore_input(); +} + +static struct kgdb_io kgdboc_io_ops = { + .name = "kgdboc", + .read_char = kgdboc_get_char, + .write_char = kgdboc_put_char, + .pre_exception = kgdboc_pre_exp_handler, + .post_exception = kgdboc_post_exp_handler, +}; + +#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) +static int kgdboc_option_setup(char *opt) +{ + if (!opt) { + pr_err("config string not provided\n"); + return 1; + } + + if (strlen(opt) >= MAX_CONFIG_LEN) { + pr_err("config string too long\n"); + return 1; + } + strcpy(config, opt); + + return 1; +} + +__setup("kgdboc=", kgdboc_option_setup); + + +/* This is only available if kgdboc is a built in for early debugging */ +static int __init kgdboc_early_init(char *opt) +{ + kgdboc_option_setup(opt); + configure_kgdboc(); + return 0; +} + +early_param("ekgdboc", kgdboc_early_init); + +static int kgdboc_earlycon_get_char(void) +{ + char c; + + if (!kgdboc_earlycon_io_ops.cons->read(kgdboc_earlycon_io_ops.cons, + &c, 1)) + return NO_POLL_CHAR; + + return c; +} + +static void kgdboc_earlycon_put_char(u8 chr) +{ + kgdboc_earlycon_io_ops.cons->write(kgdboc_earlycon_io_ops.cons, &chr, + 1); +} + +static void kgdboc_earlycon_pre_exp_handler(void) +{ + struct console *con; + static bool already_warned; + + if (already_warned) + return; + + /* + * When the first normal console comes up the kernel will take all + * the boot consoles out of the list. Really, we should stop using + * the boot console when it does that but until a TTY is registered + * we have no other choice so we keep using it. Since not all + * serial drivers might be OK with this, print a warning once per + * boot if we detect this case. + */ + for_each_console(con) + if (con == kgdboc_earlycon_io_ops.cons) + return; + + already_warned = true; + pr_warn("kgdboc_earlycon is still using bootconsole\n"); +} + +static int kgdboc_earlycon_deferred_exit(struct console *con) +{ + /* + * If we get here it means the boot console is going away but we + * don't yet have a suitable replacement. Don't pass through to + * the original exit routine. We'll call it later in our deinit() + * function. For now, restore the original exit() function pointer + * as a sentinal that we've hit this point. + */ + con->exit = earlycon_orig_exit; + + return 0; +} + +static void kgdboc_earlycon_deinit(void) +{ + if (!kgdboc_earlycon_io_ops.cons) + return; + + if (kgdboc_earlycon_io_ops.cons->exit == kgdboc_earlycon_deferred_exit) + /* + * kgdboc_earlycon is exiting but original boot console exit + * was never called (AKA kgdboc_earlycon_deferred_exit() + * didn't ever run). Undo our trap. + */ + kgdboc_earlycon_io_ops.cons->exit = earlycon_orig_exit; + else if (kgdboc_earlycon_io_ops.cons->exit) + /* + * We skipped calling the exit() routine so we could try to + * keep using the boot console even after it went away. We're + * finally done so call the function now. + */ + kgdboc_earlycon_io_ops.cons->exit(kgdboc_earlycon_io_ops.cons); + + kgdboc_earlycon_io_ops.cons = NULL; +} + +static struct kgdb_io kgdboc_earlycon_io_ops = { + .name = "kgdboc_earlycon", + .read_char = kgdboc_earlycon_get_char, + .write_char = kgdboc_earlycon_put_char, + .pre_exception = kgdboc_earlycon_pre_exp_handler, + .deinit = kgdboc_earlycon_deinit, +}; + +#define MAX_CONSOLE_NAME_LEN (sizeof((struct console *) 0)->name) +static char kgdboc_earlycon_param[MAX_CONSOLE_NAME_LEN] __initdata; +static bool kgdboc_earlycon_late_enable __initdata; + +static int __init kgdboc_earlycon_init(char *opt) +{ + struct console *con; + + kdb_init(KDB_INIT_EARLY); + + /* + * Look for a matching console, or if the name was left blank just + * pick the first one we find. + */ + console_lock(); + for_each_console(con) { + if (con->write && con->read && + (con->flags & (CON_BOOT | CON_ENABLED)) && + (!opt || !opt[0] || strcmp(con->name, opt) == 0)) + break; + } + + if (!con) { + /* + * Both earlycon and kgdboc_earlycon are initialized during + * early parameter parsing. We cannot guarantee earlycon gets + * in first and, in any case, on ACPI systems earlycon may + * defer its own initialization (usually to somewhere within + * setup_arch() ). To cope with either of these situations + * we can defer our own initialization to a little later in + * the boot. + */ + if (!kgdboc_earlycon_late_enable) { + pr_info("No suitable earlycon yet, will try later\n"); + if (opt) + strscpy(kgdboc_earlycon_param, opt, + sizeof(kgdboc_earlycon_param)); + kgdboc_earlycon_late_enable = true; + } else { + pr_info("Couldn't find kgdb earlycon\n"); + } + goto unlock; + } + + kgdboc_earlycon_io_ops.cons = con; + pr_info("Going to register kgdb with earlycon '%s'\n", con->name); + if (kgdb_register_io_module(&kgdboc_earlycon_io_ops) != 0) { + kgdboc_earlycon_io_ops.cons = NULL; + pr_info("Failed to register kgdb with earlycon\n"); + } else { + /* Trap exit so we can keep earlycon longer if needed. */ + earlycon_orig_exit = con->exit; + con->exit = kgdboc_earlycon_deferred_exit; + } + +unlock: + console_unlock(); + + /* Non-zero means malformed option so we always return zero */ + return 0; +} + +early_param("kgdboc_earlycon", kgdboc_earlycon_init); + +/* + * This is only intended for the late adoption of an early console. + * + * It is not a reliable way to adopt regular consoles because we can not + * control what order console initcalls are made and, in any case, many + * regular consoles are registered much later in the boot process than + * the console initcalls! + */ +static int __init kgdboc_earlycon_late_init(void) +{ + if (kgdboc_earlycon_late_enable) + kgdboc_earlycon_init(kgdboc_earlycon_param); + return 0; +} +console_initcall(kgdboc_earlycon_late_init); + +#endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */ + +module_init(init_kgdboc); +module_exit(exit_kgdboc); +module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644); +MODULE_PARM_DESC(kgdboc, "[,baud]"); +MODULE_DESCRIPTION("KGDB Console TTY Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c new file mode 100644 index 000000000..112a2f5f6 --- /dev/null +++ b/drivers/tty/serial/lantiq.c @@ -0,0 +1,978 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * Copyright (C) 2004 Infineon IFAP DC COM CPE + * Copyright (C) 2007 Felix Fietkau + * Copyright (C) 2007 John Crispin + * Copyright (C) 2010 Thomas Langer, + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PORT_LTQ_ASC 111 +#define MAXPORTS 2 +#define UART_DUMMY_UER_RX 1 +#define DRVNAME "lantiq,asc" +#ifdef __BIG_ENDIAN +#define LTQ_ASC_TBUF (0x0020 + 3) +#define LTQ_ASC_RBUF (0x0024 + 3) +#else +#define LTQ_ASC_TBUF 0x0020 +#define LTQ_ASC_RBUF 0x0024 +#endif +#define LTQ_ASC_FSTAT 0x0048 +#define LTQ_ASC_WHBSTATE 0x0018 +#define LTQ_ASC_STATE 0x0014 +#define LTQ_ASC_IRNCR 0x00F8 +#define LTQ_ASC_CLC 0x0000 +#define LTQ_ASC_ID 0x0008 +#define LTQ_ASC_PISEL 0x0004 +#define LTQ_ASC_TXFCON 0x0044 +#define LTQ_ASC_RXFCON 0x0040 +#define LTQ_ASC_CON 0x0010 +#define LTQ_ASC_BG 0x0050 +#define LTQ_ASC_IRNREN 0x00F4 + +#define ASC_IRNREN_TX 0x1 +#define ASC_IRNREN_RX 0x2 +#define ASC_IRNREN_ERR 0x4 +#define ASC_IRNREN_TX_BUF 0x8 +#define ASC_IRNCR_TIR 0x1 +#define ASC_IRNCR_RIR 0x2 +#define ASC_IRNCR_EIR 0x4 +#define ASC_IRNCR_MASK GENMASK(2, 0) + +#define ASCOPT_CSIZE 0x3 +#define TXFIFO_FL 1 +#define RXFIFO_FL 1 +#define ASCCLC_DISS 0x2 +#define ASCCLC_RMCMASK 0x0000FF00 +#define ASCCLC_RMCOFFSET 8 +#define ASCCON_M_8ASYNC 0x0 +#define ASCCON_M_7ASYNC 0x2 +#define ASCCON_ODD 0x00000020 +#define ASCCON_STP 0x00000080 +#define ASCCON_BRS 0x00000100 +#define ASCCON_FDE 0x00000200 +#define ASCCON_R 0x00008000 +#define ASCCON_FEN 0x00020000 +#define ASCCON_ROEN 0x00080000 +#define ASCCON_TOEN 0x00100000 +#define ASCSTATE_PE 0x00010000 +#define ASCSTATE_FE 0x00020000 +#define ASCSTATE_ROE 0x00080000 +#define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE) +#define ASCWHBSTATE_CLRREN 0x00000001 +#define ASCWHBSTATE_SETREN 0x00000002 +#define ASCWHBSTATE_CLRPE 0x00000004 +#define ASCWHBSTATE_CLRFE 0x00000008 +#define ASCWHBSTATE_CLRROE 0x00000020 +#define ASCTXFCON_TXFEN 0x0001 +#define ASCTXFCON_TXFFLU 0x0002 +#define ASCTXFCON_TXFITLMASK 0x3F00 +#define ASCTXFCON_TXFITLOFF 8 +#define ASCRXFCON_RXFEN 0x0001 +#define ASCRXFCON_RXFFLU 0x0002 +#define ASCRXFCON_RXFITLMASK 0x3F00 +#define ASCRXFCON_RXFITLOFF 8 +#define ASCFSTAT_RXFFLMASK 0x003F +#define ASCFSTAT_TXFFLMASK 0x3F00 +#define ASCFSTAT_TXFREEMASK 0x3F000000 + +static void lqasc_tx_chars(struct uart_port *port); +static struct ltq_uart_port *lqasc_port[MAXPORTS]; +static struct uart_driver lqasc_reg; + +struct ltq_soc_data { + int (*fetch_irq)(struct device *dev, struct ltq_uart_port *ltq_port); + int (*request_irq)(struct uart_port *port); + void (*free_irq)(struct uart_port *port); +}; + +struct ltq_uart_port { + struct uart_port port; + /* clock used to derive divider */ + struct clk *freqclk; + /* clock gating of the ASC core */ + struct clk *clk; + unsigned int tx_irq; + unsigned int rx_irq; + unsigned int err_irq; + unsigned int common_irq; + spinlock_t lock; /* exclusive access for multi core */ + + const struct ltq_soc_data *soc; +}; + +static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg) +{ + u32 tmp = __raw_readl(reg); + + __raw_writel((tmp & ~clear) | set, reg); +} + +static inline struct +ltq_uart_port *to_ltq_uart_port(struct uart_port *port) +{ + return container_of(port, struct ltq_uart_port, port); +} + +static void +lqasc_stop_tx(struct uart_port *port) +{ + return; +} + +static bool lqasc_tx_ready(struct uart_port *port) +{ + u32 fstat = __raw_readl(port->membase + LTQ_ASC_FSTAT); + + return FIELD_GET(ASCFSTAT_TXFREEMASK, fstat); +} + +static void +lqasc_start_tx(struct uart_port *port) +{ + unsigned long flags; + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + + spin_lock_irqsave(<q_port->lock, flags); + lqasc_tx_chars(port); + spin_unlock_irqrestore(<q_port->lock, flags); + return; +} + +static void +lqasc_stop_rx(struct uart_port *port) +{ + __raw_writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); +} + +static int +lqasc_rx_chars(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + unsigned int ch = 0, rsr = 0, fifocnt; + + fifocnt = __raw_readl(port->membase + LTQ_ASC_FSTAT) & + ASCFSTAT_RXFFLMASK; + while (fifocnt--) { + u8 flag = TTY_NORMAL; + ch = readb(port->membase + LTQ_ASC_RBUF); + rsr = (__raw_readl(port->membase + LTQ_ASC_STATE) + & ASCSTATE_ANY) | UART_DUMMY_UER_RX; + tty_flip_buffer_push(tport); + port->icount.rx++; + + /* + * Note that the error handling code is + * out of the main execution path + */ + if (rsr & ASCSTATE_ANY) { + if (rsr & ASCSTATE_PE) { + port->icount.parity++; + asc_update_bits(0, ASCWHBSTATE_CLRPE, + port->membase + LTQ_ASC_WHBSTATE); + } else if (rsr & ASCSTATE_FE) { + port->icount.frame++; + asc_update_bits(0, ASCWHBSTATE_CLRFE, + port->membase + LTQ_ASC_WHBSTATE); + } + if (rsr & ASCSTATE_ROE) { + port->icount.overrun++; + asc_update_bits(0, ASCWHBSTATE_CLRROE, + port->membase + LTQ_ASC_WHBSTATE); + } + + rsr &= port->read_status_mask; + + if (rsr & ASCSTATE_PE) + flag = TTY_PARITY; + else if (rsr & ASCSTATE_FE) + flag = TTY_FRAME; + } + + if ((rsr & port->ignore_status_mask) == 0) + tty_insert_flip_char(tport, ch, flag); + + if (rsr & ASCSTATE_ROE) + /* + * Overrun is special, since it's reported + * immediately, and doesn't affect the current + * character + */ + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + } + + if (ch != 0) + tty_flip_buffer_push(tport); + + return 0; +} + +static void +lqasc_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + if (uart_tx_stopped(port)) { + lqasc_stop_tx(port); + return; + } + + while (lqasc_tx_ready(port)) { + if (port->x_char) { + writeb(port->x_char, port->membase + LTQ_ASC_TBUF); + port->icount.tx++; + port->x_char = 0; + continue; + } + + if (uart_circ_empty(xmit)) + break; + + writeb(port->state->xmit.buf[port->state->xmit.tail], + port->membase + LTQ_ASC_TBUF); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static irqreturn_t +lqasc_tx_int(int irq, void *_port) +{ + unsigned long flags; + struct uart_port *port = (struct uart_port *)_port; + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + + spin_lock_irqsave(<q_port->lock, flags); + __raw_writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); + spin_unlock_irqrestore(<q_port->lock, flags); + lqasc_start_tx(port); + return IRQ_HANDLED; +} + +static irqreturn_t +lqasc_err_int(int irq, void *_port) +{ + unsigned long flags; + struct uart_port *port = (struct uart_port *)_port; + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + + spin_lock_irqsave(<q_port->lock, flags); + __raw_writel(ASC_IRNCR_EIR, port->membase + LTQ_ASC_IRNCR); + /* clear any pending interrupts */ + asc_update_bits(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE | + ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); + spin_unlock_irqrestore(<q_port->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t +lqasc_rx_int(int irq, void *_port) +{ + unsigned long flags; + struct uart_port *port = (struct uart_port *)_port; + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + + spin_lock_irqsave(<q_port->lock, flags); + __raw_writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); + lqasc_rx_chars(port); + spin_unlock_irqrestore(<q_port->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t lqasc_irq(int irq, void *p) +{ + unsigned long flags; + u32 stat; + struct uart_port *port = p; + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + + spin_lock_irqsave(<q_port->lock, flags); + stat = readl(port->membase + LTQ_ASC_IRNCR); + spin_unlock_irqrestore(<q_port->lock, flags); + if (!(stat & ASC_IRNCR_MASK)) + return IRQ_NONE; + + if (stat & ASC_IRNCR_TIR) + lqasc_tx_int(irq, p); + + if (stat & ASC_IRNCR_RIR) + lqasc_rx_int(irq, p); + + if (stat & ASC_IRNCR_EIR) + lqasc_err_int(irq, p); + + return IRQ_HANDLED; +} + +static unsigned int +lqasc_tx_empty(struct uart_port *port) +{ + int status; + status = __raw_readl(port->membase + LTQ_ASC_FSTAT) & + ASCFSTAT_TXFFLMASK; + return status ? 0 : TIOCSER_TEMT; +} + +static unsigned int +lqasc_get_mctrl(struct uart_port *port) +{ + return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR; +} + +static void +lqasc_set_mctrl(struct uart_port *port, u_int mctrl) +{ +} + +static void +lqasc_break_ctl(struct uart_port *port, int break_state) +{ +} + +static int +lqasc_startup(struct uart_port *port) +{ + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + int retval; + unsigned long flags; + + if (!IS_ERR(ltq_port->clk)) + clk_prepare_enable(ltq_port->clk); + port->uartclk = clk_get_rate(ltq_port->freqclk); + + spin_lock_irqsave(<q_port->lock, flags); + asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), + port->membase + LTQ_ASC_CLC); + + __raw_writel(0, port->membase + LTQ_ASC_PISEL); + __raw_writel( + ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | + ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, + port->membase + LTQ_ASC_TXFCON); + __raw_writel( + ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) + | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, + port->membase + LTQ_ASC_RXFCON); + /* make sure other settings are written to hardware before + * setting enable bits + */ + wmb(); + asc_update_bits(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN | + ASCCON_ROEN, port->membase + LTQ_ASC_CON); + + spin_unlock_irqrestore(<q_port->lock, flags); + + retval = ltq_port->soc->request_irq(port); + if (retval) + return retval; + + __raw_writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, + port->membase + LTQ_ASC_IRNREN); + return retval; +} + +static void +lqasc_shutdown(struct uart_port *port) +{ + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + unsigned long flags; + + ltq_port->soc->free_irq(port); + + spin_lock_irqsave(<q_port->lock, flags); + __raw_writel(0, port->membase + LTQ_ASC_CON); + asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, + port->membase + LTQ_ASC_RXFCON); + asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, + port->membase + LTQ_ASC_TXFCON); + spin_unlock_irqrestore(<q_port->lock, flags); + if (!IS_ERR(ltq_port->clk)) + clk_disable_unprepare(ltq_port->clk); +} + +static void +lqasc_set_termios(struct uart_port *port, struct ktermios *new, + const struct ktermios *old) +{ + unsigned int cflag; + unsigned int iflag; + unsigned int divisor; + unsigned int baud; + unsigned int con = 0; + unsigned long flags; + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + + cflag = new->c_cflag; + iflag = new->c_iflag; + + switch (cflag & CSIZE) { + case CS7: + con = ASCCON_M_7ASYNC; + break; + + case CS5: + case CS6: + default: + new->c_cflag &= ~ CSIZE; + new->c_cflag |= CS8; + con = ASCCON_M_8ASYNC; + break; + } + + cflag &= ~CMSPAR; /* Mark/Space parity is not supported */ + + if (cflag & CSTOPB) + con |= ASCCON_STP; + + if (cflag & PARENB) { + if (!(cflag & PARODD)) + con &= ~ASCCON_ODD; + else + con |= ASCCON_ODD; + } + + port->read_status_mask = ASCSTATE_ROE; + if (iflag & INPCK) + port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE; + + port->ignore_status_mask = 0; + if (iflag & IGNPAR) + port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE; + + if (iflag & IGNBRK) { + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (iflag & IGNPAR) + port->ignore_status_mask |= ASCSTATE_ROE; + } + + if ((cflag & CREAD) == 0) + port->ignore_status_mask |= UART_DUMMY_UER_RX; + + /* set error signals - framing, parity and overrun, enable receiver */ + con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN; + + spin_lock_irqsave(<q_port->lock, flags); + + /* set up CON */ + asc_update_bits(0, con, port->membase + LTQ_ASC_CON); + + /* Set baud rate - take a divider of 2 into account */ + baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); + divisor = uart_get_divisor(port, baud); + divisor = divisor / 2 - 1; + + /* disable the baudrate generator */ + asc_update_bits(ASCCON_R, 0, port->membase + LTQ_ASC_CON); + + /* make sure the fractional divider is off */ + asc_update_bits(ASCCON_FDE, 0, port->membase + LTQ_ASC_CON); + + /* set up to use divisor of 2 */ + asc_update_bits(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); + + /* now we can write the new baudrate into the register */ + __raw_writel(divisor, port->membase + LTQ_ASC_BG); + + /* turn the baudrate generator back on */ + asc_update_bits(0, ASCCON_R, port->membase + LTQ_ASC_CON); + + /* enable rx */ + __raw_writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); + + spin_unlock_irqrestore(<q_port->lock, flags); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(new)) + tty_termios_encode_baud_rate(new, baud, baud); + + uart_update_timeout(port, cflag, baud); +} + +static const char* +lqasc_type(struct uart_port *port) +{ + if (port->type == PORT_LTQ_ASC) + return DRVNAME; + else + return NULL; +} + +static void +lqasc_release_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + + if (port->flags & UPF_IOREMAP) { + devm_iounmap(&pdev->dev, port->membase); + port->membase = NULL; + } +} + +static int +lqasc_request_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res; + int size; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "cannot obtain I/O memory region"); + return -ENODEV; + } + size = resource_size(res); + + res = devm_request_mem_region(&pdev->dev, res->start, + size, dev_name(&pdev->dev)); + if (!res) { + dev_err(&pdev->dev, "cannot request I/O memory region"); + return -EBUSY; + } + + if (port->flags & UPF_IOREMAP) { + port->membase = devm_ioremap(&pdev->dev, + port->mapbase, size); + if (port->membase == NULL) + return -ENOMEM; + } + return 0; +} + +static void +lqasc_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_LTQ_ASC; + lqasc_request_port(port); + } +} + +static int +lqasc_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + int ret = 0; + if (ser->type != PORT_UNKNOWN && ser->type != PORT_LTQ_ASC) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= NR_IRQS) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + return ret; +} + +static const struct uart_ops lqasc_pops = { + .tx_empty = lqasc_tx_empty, + .set_mctrl = lqasc_set_mctrl, + .get_mctrl = lqasc_get_mctrl, + .stop_tx = lqasc_stop_tx, + .start_tx = lqasc_start_tx, + .stop_rx = lqasc_stop_rx, + .break_ctl = lqasc_break_ctl, + .startup = lqasc_startup, + .shutdown = lqasc_shutdown, + .set_termios = lqasc_set_termios, + .type = lqasc_type, + .release_port = lqasc_release_port, + .request_port = lqasc_request_port, + .config_port = lqasc_config_port, + .verify_port = lqasc_verify_port, +}; + +#ifdef CONFIG_SERIAL_LANTIQ_CONSOLE +static void +lqasc_console_putchar(struct uart_port *port, unsigned char ch) +{ + if (!port->membase) + return; + + while (!lqasc_tx_ready(port)) + ; + + writeb(ch, port->membase + LTQ_ASC_TBUF); +} + +static void lqasc_serial_port_write(struct uart_port *port, const char *s, + u_int count) +{ + uart_console_write(port, s, count, lqasc_console_putchar); +} + +static void +lqasc_console_write(struct console *co, const char *s, u_int count) +{ + struct ltq_uart_port *ltq_port; + unsigned long flags; + + if (co->index >= MAXPORTS) + return; + + ltq_port = lqasc_port[co->index]; + if (!ltq_port) + return; + + spin_lock_irqsave(<q_port->lock, flags); + lqasc_serial_port_write(<q_port->port, s, count); + spin_unlock_irqrestore(<q_port->lock, flags); +} + +static int __init +lqasc_console_setup(struct console *co, char *options) +{ + struct ltq_uart_port *ltq_port; + struct uart_port *port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index >= MAXPORTS) + return -ENODEV; + + ltq_port = lqasc_port[co->index]; + if (!ltq_port) + return -ENODEV; + + port = <q_port->port; + + if (!IS_ERR(ltq_port->clk)) + clk_prepare_enable(ltq_port->clk); + + port->uartclk = clk_get_rate(ltq_port->freqclk); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct console lqasc_console = { + .name = "ttyLTQ", + .write = lqasc_console_write, + .device = uart_console_device, + .setup = lqasc_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &lqasc_reg, +}; + +static int __init +lqasc_console_init(void) +{ + register_console(&lqasc_console); + return 0; +} +console_initcall(lqasc_console_init); + +static void lqasc_serial_early_console_write(struct console *co, + const char *s, + u_int count) +{ + struct earlycon_device *dev = co->data; + + lqasc_serial_port_write(&dev->port, s, count); +} + +static int __init +lqasc_serial_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = lqasc_serial_early_console_write; + return 0; +} +OF_EARLYCON_DECLARE(lantiq, "lantiq,asc", lqasc_serial_early_console_setup); +OF_EARLYCON_DECLARE(lantiq, "intel,lgm-asc", lqasc_serial_early_console_setup); + +#define LANTIQ_SERIAL_CONSOLE (&lqasc_console) + +#else + +#define LANTIQ_SERIAL_CONSOLE NULL + +#endif /* CONFIG_SERIAL_LANTIQ_CONSOLE */ + +static struct uart_driver lqasc_reg = { + .owner = THIS_MODULE, + .driver_name = DRVNAME, + .dev_name = "ttyLTQ", + .major = 0, + .minor = 0, + .nr = MAXPORTS, + .cons = LANTIQ_SERIAL_CONSOLE, +}; + +static int fetch_irq_lantiq(struct device *dev, struct ltq_uart_port *ltq_port) +{ + struct uart_port *port = <q_port->port; + struct platform_device *pdev = to_platform_device(dev); + int irq; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + ltq_port->tx_irq = irq; + irq = platform_get_irq(pdev, 1); + if (irq < 0) + return irq; + ltq_port->rx_irq = irq; + irq = platform_get_irq(pdev, 2); + if (irq < 0) + return irq; + ltq_port->err_irq = irq; + + port->irq = ltq_port->tx_irq; + + return 0; +} + +static int request_irq_lantiq(struct uart_port *port) +{ + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + int retval; + + retval = request_irq(ltq_port->tx_irq, lqasc_tx_int, + 0, "asc_tx", port); + if (retval) { + dev_err(port->dev, "failed to request asc_tx\n"); + return retval; + } + + retval = request_irq(ltq_port->rx_irq, lqasc_rx_int, + 0, "asc_rx", port); + if (retval) { + dev_err(port->dev, "failed to request asc_rx\n"); + goto err1; + } + + retval = request_irq(ltq_port->err_irq, lqasc_err_int, + 0, "asc_err", port); + if (retval) { + dev_err(port->dev, "failed to request asc_err\n"); + goto err2; + } + return 0; + +err2: + free_irq(ltq_port->rx_irq, port); +err1: + free_irq(ltq_port->tx_irq, port); + return retval; +} + +static void free_irq_lantiq(struct uart_port *port) +{ + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + + free_irq(ltq_port->tx_irq, port); + free_irq(ltq_port->rx_irq, port); + free_irq(ltq_port->err_irq, port); +} + +static int fetch_irq_intel(struct device *dev, struct ltq_uart_port *ltq_port) +{ + struct uart_port *port = <q_port->port; + int ret; + + ret = platform_get_irq(to_platform_device(dev), 0); + if (ret < 0) { + dev_err(dev, "failed to fetch IRQ for serial port\n"); + return ret; + } + ltq_port->common_irq = ret; + port->irq = ret; + + return 0; +} + +static int request_irq_intel(struct uart_port *port) +{ + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + int retval; + + retval = request_irq(ltq_port->common_irq, lqasc_irq, 0, + "asc_irq", port); + if (retval) + dev_err(port->dev, "failed to request asc_irq\n"); + + return retval; +} + +static void free_irq_intel(struct uart_port *port) +{ + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + + free_irq(ltq_port->common_irq, port); +} + +static int lqasc_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct ltq_uart_port *ltq_port; + struct uart_port *port; + struct resource *mmres; + int line; + int ret; + + mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mmres) { + dev_err(&pdev->dev, + "failed to get memory for serial port\n"); + return -ENODEV; + } + + ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port), + GFP_KERNEL); + if (!ltq_port) + return -ENOMEM; + + port = <q_port->port; + + ltq_port->soc = of_device_get_match_data(&pdev->dev); + ret = ltq_port->soc->fetch_irq(&pdev->dev, ltq_port); + if (ret) + return ret; + + /* get serial id */ + line = of_alias_get_id(node, "serial"); + if (line < 0) { + if (IS_ENABLED(CONFIG_LANTIQ)) { + if (mmres->start == CPHYSADDR(LTQ_EARLY_ASC)) + line = 0; + else + line = 1; + } else { + dev_err(&pdev->dev, "failed to get alias id, errno %d\n", + line); + return line; + } + } + + if (lqasc_port[line]) { + dev_err(&pdev->dev, "port %d already allocated\n", line); + return -EBUSY; + } + + port->iotype = SERIAL_IO_MEM; + port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP; + port->ops = &lqasc_pops; + port->fifosize = 16; + port->type = PORT_LTQ_ASC; + port->line = line; + port->dev = &pdev->dev; + /* unused, just to be backward-compatible */ + port->mapbase = mmres->start; + + if (IS_ENABLED(CONFIG_LANTIQ) && !IS_ENABLED(CONFIG_COMMON_CLK)) + ltq_port->freqclk = clk_get_fpi(); + else + ltq_port->freqclk = devm_clk_get(&pdev->dev, "freq"); + + + if (IS_ERR(ltq_port->freqclk)) { + pr_err("failed to get fpi clk\n"); + return -ENOENT; + } + + /* not all asc ports have clock gates, lets ignore the return code */ + if (IS_ENABLED(CONFIG_LANTIQ) && !IS_ENABLED(CONFIG_COMMON_CLK)) + ltq_port->clk = clk_get(&pdev->dev, NULL); + else + ltq_port->clk = devm_clk_get(&pdev->dev, "asc"); + + spin_lock_init(<q_port->lock); + lqasc_port[line] = ltq_port; + platform_set_drvdata(pdev, ltq_port); + + ret = uart_add_one_port(&lqasc_reg, port); + + return ret; +} + +static int lqasc_remove(struct platform_device *pdev) +{ + struct uart_port *port = platform_get_drvdata(pdev); + + return uart_remove_one_port(&lqasc_reg, port); +} + +static const struct ltq_soc_data soc_data_lantiq = { + .fetch_irq = fetch_irq_lantiq, + .request_irq = request_irq_lantiq, + .free_irq = free_irq_lantiq, +}; + +static const struct ltq_soc_data soc_data_intel = { + .fetch_irq = fetch_irq_intel, + .request_irq = request_irq_intel, + .free_irq = free_irq_intel, +}; + +static const struct of_device_id ltq_asc_match[] = { + { .compatible = "lantiq,asc", .data = &soc_data_lantiq }, + { .compatible = "intel,lgm-asc", .data = &soc_data_intel }, + {}, +}; +MODULE_DEVICE_TABLE(of, ltq_asc_match); + +static struct platform_driver lqasc_driver = { + .probe = lqasc_probe, + .remove = lqasc_remove, + .driver = { + .name = DRVNAME, + .of_match_table = ltq_asc_match, + }, +}; + +static int __init +init_lqasc(void) +{ + int ret; + + ret = uart_register_driver(&lqasc_reg); + if (ret != 0) + return ret; + + ret = platform_driver_register(&lqasc_driver); + if (ret != 0) + uart_unregister_driver(&lqasc_reg); + + return ret; +} + +static void __exit exit_lqasc(void) +{ + platform_driver_unregister(&lqasc_driver); + uart_unregister_driver(&lqasc_reg); +} + +module_init(init_lqasc); +module_exit(exit_lqasc); + +MODULE_DESCRIPTION("Serial driver for Lantiq & Intel gateway SoCs"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c new file mode 100644 index 000000000..4c0604325 --- /dev/null +++ b/drivers/tty/serial/liteuart.c @@ -0,0 +1,439 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * LiteUART serial controller (LiteX) Driver + * + * Copyright (C) 2019-2020 Antmicro + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * CSRs definitions (base address offsets + width) + * + * The definitions below are true for LiteX SoC configured for 8-bit CSR Bus, + * 32-bit aligned. + * + * Supporting other configurations might require new definitions or a more + * generic way of indexing the LiteX CSRs. + * + * For more details on how CSRs are defined and handled in LiteX, see comments + * in the LiteX SoC Driver: drivers/soc/litex/litex_soc_ctrl.c + */ +#define OFF_RXTX 0x00 +#define OFF_TXFULL 0x04 +#define OFF_RXEMPTY 0x08 +#define OFF_EV_STATUS 0x0c +#define OFF_EV_PENDING 0x10 +#define OFF_EV_ENABLE 0x14 + +/* events */ +#define EV_TX 0x1 +#define EV_RX 0x2 + +struct liteuart_port { + struct uart_port port; + struct timer_list timer; + u32 id; +}; + +#define to_liteuart_port(port) container_of(port, struct liteuart_port, port) + +static DEFINE_XARRAY_FLAGS(liteuart_array, XA_FLAGS_ALLOC); + +#ifdef CONFIG_SERIAL_LITEUART_CONSOLE +static struct console liteuart_console; +#endif + +static struct uart_driver liteuart_driver = { + .owner = THIS_MODULE, + .driver_name = "liteuart", + .dev_name = "ttyLXU", + .major = 0, + .minor = 0, + .nr = CONFIG_SERIAL_LITEUART_MAX_PORTS, +#ifdef CONFIG_SERIAL_LITEUART_CONSOLE + .cons = &liteuart_console, +#endif +}; + +static void liteuart_timer(struct timer_list *t) +{ + struct liteuart_port *uart = from_timer(uart, t, timer); + struct uart_port *port = &uart->port; + unsigned char __iomem *membase = port->membase; + unsigned int flg = TTY_NORMAL; + int ch; + unsigned long status; + + while ((status = !litex_read8(membase + OFF_RXEMPTY)) == 1) { + ch = litex_read8(membase + OFF_RXTX); + port->icount.rx++; + + /* necessary for RXEMPTY to refresh its value */ + litex_write8(membase + OFF_EV_PENDING, EV_TX | EV_RX); + + /* no overflow bits in status */ + if (!(uart_handle_sysrq_char(port, ch))) + uart_insert_char(port, status, 0, ch, flg); + + tty_flip_buffer_push(&port->state->port); + } + + mod_timer(&uart->timer, jiffies + uart_poll_timeout(port)); +} + +static void liteuart_putchar(struct uart_port *port, unsigned char ch) +{ + while (litex_read8(port->membase + OFF_TXFULL)) + cpu_relax(); + + litex_write8(port->membase + OFF_RXTX, ch); +} + +static unsigned int liteuart_tx_empty(struct uart_port *port) +{ + /* not really tx empty, just checking if tx is not full */ + if (!litex_read8(port->membase + OFF_TXFULL)) + return TIOCSER_TEMT; + + return 0; +} + +static void liteuart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* modem control register is not present in LiteUART */ +} + +static unsigned int liteuart_get_mctrl(struct uart_port *port) +{ + return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; +} + +static void liteuart_stop_tx(struct uart_port *port) +{ +} + +static void liteuart_start_tx(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + unsigned char ch; + + if (unlikely(port->x_char)) { + litex_write8(port->membase + OFF_RXTX, port->x_char); + port->icount.tx++; + port->x_char = 0; + } else if (!uart_circ_empty(xmit)) { + while (xmit->head != xmit->tail) { + ch = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + liteuart_putchar(port, ch); + } + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static void liteuart_stop_rx(struct uart_port *port) +{ + struct liteuart_port *uart = to_liteuart_port(port); + + /* just delete timer */ + del_timer(&uart->timer); +} + +static void liteuart_break_ctl(struct uart_port *port, int break_state) +{ + /* LiteUART doesn't support sending break signal */ +} + +static int liteuart_startup(struct uart_port *port) +{ + struct liteuart_port *uart = to_liteuart_port(port); + + /* disable events */ + litex_write8(port->membase + OFF_EV_ENABLE, 0); + + /* prepare timer for polling */ + timer_setup(&uart->timer, liteuart_timer, 0); + mod_timer(&uart->timer, jiffies + uart_poll_timeout(port)); + + return 0; +} + +static void liteuart_shutdown(struct uart_port *port) +{ +} + +static void liteuart_set_termios(struct uart_port *port, struct ktermios *new, + const struct ktermios *old) +{ + unsigned int baud; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* update baudrate */ + baud = uart_get_baud_rate(port, new, old, 0, 460800); + uart_update_timeout(port, new->c_cflag, baud); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *liteuart_type(struct uart_port *port) +{ + return "liteuart"; +} + +static void liteuart_release_port(struct uart_port *port) +{ +} + +static int liteuart_request_port(struct uart_port *port) +{ + return 0; +} + +static void liteuart_config_port(struct uart_port *port, int flags) +{ + /* + * Driver core for serial ports forces a non-zero value for port type. + * Write an arbitrary value here to accommodate the serial core driver, + * as ID part of UAPI is redundant. + */ + port->type = 1; +} + +static int liteuart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + if (port->type != PORT_UNKNOWN && ser->type != 1) + return -EINVAL; + + return 0; +} + +static const struct uart_ops liteuart_ops = { + .tx_empty = liteuart_tx_empty, + .set_mctrl = liteuart_set_mctrl, + .get_mctrl = liteuart_get_mctrl, + .stop_tx = liteuart_stop_tx, + .start_tx = liteuart_start_tx, + .stop_rx = liteuart_stop_rx, + .break_ctl = liteuart_break_ctl, + .startup = liteuart_startup, + .shutdown = liteuart_shutdown, + .set_termios = liteuart_set_termios, + .type = liteuart_type, + .release_port = liteuart_release_port, + .request_port = liteuart_request_port, + .config_port = liteuart_config_port, + .verify_port = liteuart_verify_port, +}; + +static int liteuart_probe(struct platform_device *pdev) +{ + struct liteuart_port *uart; + struct uart_port *port; + struct xa_limit limit; + int dev_id, ret; + + /* look for aliases; auto-enumerate for free index if not found */ + dev_id = of_alias_get_id(pdev->dev.of_node, "serial"); + if (dev_id < 0) + limit = XA_LIMIT(0, CONFIG_SERIAL_LITEUART_MAX_PORTS); + else + limit = XA_LIMIT(dev_id, dev_id); + + uart = devm_kzalloc(&pdev->dev, sizeof(struct liteuart_port), GFP_KERNEL); + if (!uart) + return -ENOMEM; + + ret = xa_alloc(&liteuart_array, &dev_id, uart, limit, GFP_KERNEL); + if (ret) + return ret; + + uart->id = dev_id; + port = &uart->port; + + /* get membase */ + port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(port->membase)) { + ret = PTR_ERR(port->membase); + goto err_erase_id; + } + + /* values not from device tree */ + port->dev = &pdev->dev; + port->iotype = UPIO_MEM; + port->flags = UPF_BOOT_AUTOCONF; + port->ops = &liteuart_ops; + port->regshift = 2; + port->fifosize = 16; + port->iobase = 1; + port->type = PORT_UNKNOWN; + port->line = dev_id; + spin_lock_init(&port->lock); + + platform_set_drvdata(pdev, port); + + ret = uart_add_one_port(&liteuart_driver, &uart->port); + if (ret) + goto err_erase_id; + + return 0; + +err_erase_id: + xa_erase(&liteuart_array, uart->id); + + return ret; +} + +static int liteuart_remove(struct platform_device *pdev) +{ + struct uart_port *port = platform_get_drvdata(pdev); + struct liteuart_port *uart = to_liteuart_port(port); + + uart_remove_one_port(&liteuart_driver, port); + xa_erase(&liteuart_array, uart->id); + + return 0; +} + +static const struct of_device_id liteuart_of_match[] = { + { .compatible = "litex,liteuart" }, + {} +}; +MODULE_DEVICE_TABLE(of, liteuart_of_match); + +static struct platform_driver liteuart_platform_driver = { + .probe = liteuart_probe, + .remove = liteuart_remove, + .driver = { + .name = "liteuart", + .of_match_table = liteuart_of_match, + }, +}; + +#ifdef CONFIG_SERIAL_LITEUART_CONSOLE + +static void liteuart_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct liteuart_port *uart; + struct uart_port *port; + unsigned long flags; + + uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index); + port = &uart->port; + + spin_lock_irqsave(&port->lock, flags); + uart_console_write(port, s, count, liteuart_putchar); + spin_unlock_irqrestore(&port->lock, flags); +} + +static int liteuart_console_setup(struct console *co, char *options) +{ + struct liteuart_port *uart; + struct uart_port *port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index); + if (!uart) + return -ENODEV; + + port = &uart->port; + if (!port->membase) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct console liteuart_console = { + .name = "liteuart", + .write = liteuart_console_write, + .device = uart_console_device, + .setup = liteuart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &liteuart_driver, +}; + +static int __init liteuart_console_init(void) +{ + register_console(&liteuart_console); + + return 0; +} +console_initcall(liteuart_console_init); + +static void early_liteuart_write(struct console *console, const char *s, + unsigned int count) +{ + struct earlycon_device *device = console->data; + struct uart_port *port = &device->port; + + uart_console_write(port, s, count, liteuart_putchar); +} + +static int __init early_liteuart_setup(struct earlycon_device *device, + const char *options) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = early_liteuart_write; + return 0; +} + +OF_EARLYCON_DECLARE(liteuart, "litex,liteuart", early_liteuart_setup); +#endif /* CONFIG_SERIAL_LITEUART_CONSOLE */ + +static int __init liteuart_init(void) +{ + int res; + + res = uart_register_driver(&liteuart_driver); + if (res) + return res; + + res = platform_driver_register(&liteuart_platform_driver); + if (res) { + uart_unregister_driver(&liteuart_driver); + return res; + } + + return 0; +} + +static void __exit liteuart_exit(void) +{ + platform_driver_unregister(&liteuart_platform_driver); + uart_unregister_driver(&liteuart_driver); +} + +module_init(liteuart_init); +module_exit(liteuart_exit); + +MODULE_AUTHOR("Antmicro "); +MODULE_DESCRIPTION("LiteUART serial driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:liteuart"); diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c new file mode 100644 index 000000000..ed47f4768 --- /dev/null +++ b/drivers/tty/serial/lpc32xx_hs.c @@ -0,0 +1,766 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * High Speed Serial Ports on NXP LPC32xx SoC + * + * Authors: Kevin Wells + * Roland Stigge + * + * Copyright (C) 2010 NXP Semiconductors + * Copyright (C) 2012 Roland Stigge + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * High Speed UART register offsets + */ +#define LPC32XX_HSUART_FIFO(x) ((x) + 0x00) +#define LPC32XX_HSUART_LEVEL(x) ((x) + 0x04) +#define LPC32XX_HSUART_IIR(x) ((x) + 0x08) +#define LPC32XX_HSUART_CTRL(x) ((x) + 0x0C) +#define LPC32XX_HSUART_RATE(x) ((x) + 0x10) + +#define LPC32XX_HSU_BREAK_DATA (1 << 10) +#define LPC32XX_HSU_ERROR_DATA (1 << 9) +#define LPC32XX_HSU_RX_EMPTY (1 << 8) + +#define LPC32XX_HSU_TX_LEV(n) (((n) >> 8) & 0xFF) +#define LPC32XX_HSU_RX_LEV(n) ((n) & 0xFF) + +#define LPC32XX_HSU_TX_INT_SET (1 << 6) +#define LPC32XX_HSU_RX_OE_INT (1 << 5) +#define LPC32XX_HSU_BRK_INT (1 << 4) +#define LPC32XX_HSU_FE_INT (1 << 3) +#define LPC32XX_HSU_RX_TIMEOUT_INT (1 << 2) +#define LPC32XX_HSU_RX_TRIG_INT (1 << 1) +#define LPC32XX_HSU_TX_INT (1 << 0) + +#define LPC32XX_HSU_HRTS_INV (1 << 21) +#define LPC32XX_HSU_HRTS_TRIG_8B (0x0 << 19) +#define LPC32XX_HSU_HRTS_TRIG_16B (0x1 << 19) +#define LPC32XX_HSU_HRTS_TRIG_32B (0x2 << 19) +#define LPC32XX_HSU_HRTS_TRIG_48B (0x3 << 19) +#define LPC32XX_HSU_HRTS_EN (1 << 18) +#define LPC32XX_HSU_TMO_DISABLED (0x0 << 16) +#define LPC32XX_HSU_TMO_INACT_4B (0x1 << 16) +#define LPC32XX_HSU_TMO_INACT_8B (0x2 << 16) +#define LPC32XX_HSU_TMO_INACT_16B (0x3 << 16) +#define LPC32XX_HSU_HCTS_INV (1 << 15) +#define LPC32XX_HSU_HCTS_EN (1 << 14) +#define LPC32XX_HSU_OFFSET(n) ((n) << 9) +#define LPC32XX_HSU_BREAK (1 << 8) +#define LPC32XX_HSU_ERR_INT_EN (1 << 7) +#define LPC32XX_HSU_RX_INT_EN (1 << 6) +#define LPC32XX_HSU_TX_INT_EN (1 << 5) +#define LPC32XX_HSU_RX_TL1B (0x0 << 2) +#define LPC32XX_HSU_RX_TL4B (0x1 << 2) +#define LPC32XX_HSU_RX_TL8B (0x2 << 2) +#define LPC32XX_HSU_RX_TL16B (0x3 << 2) +#define LPC32XX_HSU_RX_TL32B (0x4 << 2) +#define LPC32XX_HSU_RX_TL48B (0x5 << 2) +#define LPC32XX_HSU_TX_TLEMPTY (0x0 << 0) +#define LPC32XX_HSU_TX_TL0B (0x0 << 0) +#define LPC32XX_HSU_TX_TL4B (0x1 << 0) +#define LPC32XX_HSU_TX_TL8B (0x2 << 0) +#define LPC32XX_HSU_TX_TL16B (0x3 << 0) + +#define LPC32XX_MAIN_OSC_FREQ 13000000 + +#define MODNAME "lpc32xx_hsuart" + +struct lpc32xx_hsuart_port { + struct uart_port port; +}; + +#define FIFO_READ_LIMIT 128 +#define MAX_PORTS 3 +#define LPC32XX_TTY_NAME "ttyTX" +static struct lpc32xx_hsuart_port lpc32xx_hs_ports[MAX_PORTS]; + +#ifdef CONFIG_SERIAL_HS_LPC32XX_CONSOLE +static void wait_for_xmit_empty(struct uart_port *port) +{ + unsigned int timeout = 10000; + + do { + if (LPC32XX_HSU_TX_LEV(readl(LPC32XX_HSUART_LEVEL( + port->membase))) == 0) + break; + if (--timeout == 0) + break; + udelay(1); + } while (1); +} + +static void wait_for_xmit_ready(struct uart_port *port) +{ + unsigned int timeout = 10000; + + while (1) { + if (LPC32XX_HSU_TX_LEV(readl(LPC32XX_HSUART_LEVEL( + port->membase))) < 32) + break; + if (--timeout == 0) + break; + udelay(1); + } +} + +static void lpc32xx_hsuart_console_putchar(struct uart_port *port, unsigned char ch) +{ + wait_for_xmit_ready(port); + writel((u32)ch, LPC32XX_HSUART_FIFO(port->membase)); +} + +static void lpc32xx_hsuart_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct lpc32xx_hsuart_port *up = &lpc32xx_hs_ports[co->index]; + unsigned long flags; + int locked = 1; + + touch_nmi_watchdog(); + local_irq_save(flags); + if (up->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&up->port.lock); + else + spin_lock(&up->port.lock); + + uart_console_write(&up->port, s, count, lpc32xx_hsuart_console_putchar); + wait_for_xmit_empty(&up->port); + + if (locked) + spin_unlock(&up->port.lock); + local_irq_restore(flags); +} + +static int __init lpc32xx_hsuart_console_setup(struct console *co, + char *options) +{ + struct uart_port *port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index >= MAX_PORTS) + co->index = 0; + + port = &lpc32xx_hs_ports[co->index].port; + if (!port->membase) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + lpc32xx_loopback_set(port->mapbase, 0); /* get out of loopback mode */ + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct uart_driver lpc32xx_hsuart_reg; +static struct console lpc32xx_hsuart_console = { + .name = LPC32XX_TTY_NAME, + .write = lpc32xx_hsuart_console_write, + .device = uart_console_device, + .setup = lpc32xx_hsuart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &lpc32xx_hsuart_reg, +}; + +static int __init lpc32xx_hsuart_console_init(void) +{ + register_console(&lpc32xx_hsuart_console); + return 0; +} +console_initcall(lpc32xx_hsuart_console_init); + +#define LPC32XX_HSUART_CONSOLE (&lpc32xx_hsuart_console) +#else +#define LPC32XX_HSUART_CONSOLE NULL +#endif + +static struct uart_driver lpc32xx_hs_reg = { + .owner = THIS_MODULE, + .driver_name = MODNAME, + .dev_name = LPC32XX_TTY_NAME, + .nr = MAX_PORTS, + .cons = LPC32XX_HSUART_CONSOLE, +}; +static int uarts_registered; + +static unsigned int __serial_get_clock_div(unsigned long uartclk, + unsigned long rate) +{ + u32 div, goodrate, hsu_rate, l_hsu_rate, comprate; + u32 rate_diff; + + /* Find the closest divider to get the desired clock rate */ + div = uartclk / rate; + goodrate = hsu_rate = (div / 14) - 1; + if (hsu_rate != 0) + hsu_rate--; + + /* Tweak divider */ + l_hsu_rate = hsu_rate + 3; + rate_diff = 0xFFFFFFFF; + + while (hsu_rate < l_hsu_rate) { + comprate = uartclk / ((hsu_rate + 1) * 14); + if (abs(comprate - rate) < rate_diff) { + goodrate = hsu_rate; + rate_diff = abs(comprate - rate); + } + + hsu_rate++; + } + if (hsu_rate > 0xFF) + hsu_rate = 0xFF; + + return goodrate; +} + +static void __serial_uart_flush(struct uart_port *port) +{ + int cnt = 0; + + while ((readl(LPC32XX_HSUART_LEVEL(port->membase)) > 0) && + (cnt++ < FIFO_READ_LIMIT)) + readl(LPC32XX_HSUART_FIFO(port->membase)); +} + +static void __serial_lpc32xx_rx(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + unsigned int tmp, flag; + + /* Read data from FIFO and push into terminal */ + tmp = readl(LPC32XX_HSUART_FIFO(port->membase)); + while (!(tmp & LPC32XX_HSU_RX_EMPTY)) { + flag = TTY_NORMAL; + port->icount.rx++; + + if (tmp & LPC32XX_HSU_ERROR_DATA) { + /* Framing error */ + writel(LPC32XX_HSU_FE_INT, + LPC32XX_HSUART_IIR(port->membase)); + port->icount.frame++; + flag = TTY_FRAME; + tty_insert_flip_char(tport, 0, TTY_FRAME); + } + + tty_insert_flip_char(tport, (tmp & 0xFF), flag); + + tmp = readl(LPC32XX_HSUART_FIFO(port->membase)); + } + + tty_flip_buffer_push(tport); +} + +static void serial_lpc32xx_stop_tx(struct uart_port *port); + +static bool serial_lpc32xx_tx_ready(struct uart_port *port) +{ + u32 level = readl(LPC32XX_HSUART_LEVEL(port->membase)); + + return LPC32XX_HSU_TX_LEV(level) < 64; +} + +static void __serial_lpc32xx_tx(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + + if (port->x_char) { + writel((u32)port->x_char, LPC32XX_HSUART_FIFO(port->membase)); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + goto exit_tx; + + /* Transfer data */ + while (serial_lpc32xx_tx_ready(port)) { + writel((u32) xmit->buf[xmit->tail], + LPC32XX_HSUART_FIFO(port->membase)); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + +exit_tx: + if (uart_circ_empty(xmit)) + serial_lpc32xx_stop_tx(port); +} + +static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct tty_port *tport = &port->state->port; + u32 status; + + spin_lock(&port->lock); + + /* Read UART status and clear latched interrupts */ + status = readl(LPC32XX_HSUART_IIR(port->membase)); + + if (status & LPC32XX_HSU_BRK_INT) { + /* Break received */ + writel(LPC32XX_HSU_BRK_INT, LPC32XX_HSUART_IIR(port->membase)); + port->icount.brk++; + uart_handle_break(port); + } + + /* Framing error */ + if (status & LPC32XX_HSU_FE_INT) + writel(LPC32XX_HSU_FE_INT, LPC32XX_HSUART_IIR(port->membase)); + + if (status & LPC32XX_HSU_RX_OE_INT) { + /* Receive FIFO overrun */ + writel(LPC32XX_HSU_RX_OE_INT, + LPC32XX_HSUART_IIR(port->membase)); + port->icount.overrun++; + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + tty_flip_buffer_push(tport); + } + + /* Data received? */ + if (status & (LPC32XX_HSU_RX_TIMEOUT_INT | LPC32XX_HSU_RX_TRIG_INT)) + __serial_lpc32xx_rx(port); + + /* Transmit data request? */ + if ((status & LPC32XX_HSU_TX_INT) && (!uart_tx_stopped(port))) { + writel(LPC32XX_HSU_TX_INT, LPC32XX_HSUART_IIR(port->membase)); + __serial_lpc32xx_tx(port); + } + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +/* port->lock is not held. */ +static unsigned int serial_lpc32xx_tx_empty(struct uart_port *port) +{ + unsigned int ret = 0; + + if (LPC32XX_HSU_TX_LEV(readl(LPC32XX_HSUART_LEVEL(port->membase))) == 0) + ret = TIOCSER_TEMT; + + return ret; +} + +/* port->lock held by caller. */ +static void serial_lpc32xx_set_mctrl(struct uart_port *port, + unsigned int mctrl) +{ + /* No signals are supported on HS UARTs */ +} + +/* port->lock is held by caller and interrupts are disabled. */ +static unsigned int serial_lpc32xx_get_mctrl(struct uart_port *port) +{ + /* No signals are supported on HS UARTs */ + return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; +} + +/* port->lock held by caller. */ +static void serial_lpc32xx_stop_tx(struct uart_port *port) +{ + u32 tmp; + + tmp = readl(LPC32XX_HSUART_CTRL(port->membase)); + tmp &= ~LPC32XX_HSU_TX_INT_EN; + writel(tmp, LPC32XX_HSUART_CTRL(port->membase)); +} + +/* port->lock held by caller. */ +static void serial_lpc32xx_start_tx(struct uart_port *port) +{ + u32 tmp; + + __serial_lpc32xx_tx(port); + tmp = readl(LPC32XX_HSUART_CTRL(port->membase)); + tmp |= LPC32XX_HSU_TX_INT_EN; + writel(tmp, LPC32XX_HSUART_CTRL(port->membase)); +} + +/* port->lock held by caller. */ +static void serial_lpc32xx_stop_rx(struct uart_port *port) +{ + u32 tmp; + + tmp = readl(LPC32XX_HSUART_CTRL(port->membase)); + tmp &= ~(LPC32XX_HSU_RX_INT_EN | LPC32XX_HSU_ERR_INT_EN); + writel(tmp, LPC32XX_HSUART_CTRL(port->membase)); + + writel((LPC32XX_HSU_BRK_INT | LPC32XX_HSU_RX_OE_INT | + LPC32XX_HSU_FE_INT), LPC32XX_HSUART_IIR(port->membase)); +} + +/* port->lock is not held. */ +static void serial_lpc32xx_break_ctl(struct uart_port *port, + int break_state) +{ + unsigned long flags; + u32 tmp; + + spin_lock_irqsave(&port->lock, flags); + tmp = readl(LPC32XX_HSUART_CTRL(port->membase)); + if (break_state != 0) + tmp |= LPC32XX_HSU_BREAK; + else + tmp &= ~LPC32XX_HSU_BREAK; + writel(tmp, LPC32XX_HSUART_CTRL(port->membase)); + spin_unlock_irqrestore(&port->lock, flags); +} + +/* port->lock is not held. */ +static int serial_lpc32xx_startup(struct uart_port *port) +{ + int retval; + unsigned long flags; + u32 tmp; + + spin_lock_irqsave(&port->lock, flags); + + __serial_uart_flush(port); + + writel((LPC32XX_HSU_TX_INT | LPC32XX_HSU_FE_INT | + LPC32XX_HSU_BRK_INT | LPC32XX_HSU_RX_OE_INT), + LPC32XX_HSUART_IIR(port->membase)); + + writel(0xFF, LPC32XX_HSUART_RATE(port->membase)); + + /* + * Set receiver timeout, HSU offset of 20, no break, no interrupts, + * and default FIFO trigger levels + */ + tmp = LPC32XX_HSU_TX_TL8B | LPC32XX_HSU_RX_TL32B | + LPC32XX_HSU_OFFSET(20) | LPC32XX_HSU_TMO_INACT_4B; + writel(tmp, LPC32XX_HSUART_CTRL(port->membase)); + + lpc32xx_loopback_set(port->mapbase, 0); /* get out of loopback mode */ + + spin_unlock_irqrestore(&port->lock, flags); + + retval = request_irq(port->irq, serial_lpc32xx_interrupt, + 0, MODNAME, port); + if (!retval) + writel((tmp | LPC32XX_HSU_RX_INT_EN | LPC32XX_HSU_ERR_INT_EN), + LPC32XX_HSUART_CTRL(port->membase)); + + return retval; +} + +/* port->lock is not held. */ +static void serial_lpc32xx_shutdown(struct uart_port *port) +{ + u32 tmp; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + tmp = LPC32XX_HSU_TX_TL8B | LPC32XX_HSU_RX_TL32B | + LPC32XX_HSU_OFFSET(20) | LPC32XX_HSU_TMO_INACT_4B; + writel(tmp, LPC32XX_HSUART_CTRL(port->membase)); + + lpc32xx_loopback_set(port->mapbase, 1); /* go to loopback mode */ + + spin_unlock_irqrestore(&port->lock, flags); + + free_irq(port->irq, port); +} + +/* port->lock is not held. */ +static void serial_lpc32xx_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + unsigned long flags; + unsigned int baud, quot; + u32 tmp; + + /* Always 8-bit, no parity, 1 stop bit */ + termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD); + termios->c_cflag |= CS8; + + termios->c_cflag &= ~(HUPCL | CMSPAR | CLOCAL | CRTSCTS); + + baud = uart_get_baud_rate(port, termios, old, 0, + port->uartclk / 14); + + quot = __serial_get_clock_div(port->uartclk, baud); + + spin_lock_irqsave(&port->lock, flags); + + /* Ignore characters? */ + tmp = readl(LPC32XX_HSUART_CTRL(port->membase)); + if ((termios->c_cflag & CREAD) == 0) + tmp &= ~(LPC32XX_HSU_RX_INT_EN | LPC32XX_HSU_ERR_INT_EN); + else + tmp |= LPC32XX_HSU_RX_INT_EN | LPC32XX_HSU_ERR_INT_EN; + writel(tmp, LPC32XX_HSUART_CTRL(port->membase)); + + writel(quot, LPC32XX_HSUART_RATE(port->membase)); + + uart_update_timeout(port, termios->c_cflag, baud); + + spin_unlock_irqrestore(&port->lock, flags); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} + +static const char *serial_lpc32xx_type(struct uart_port *port) +{ + return MODNAME; +} + +static void serial_lpc32xx_release_port(struct uart_port *port) +{ + if ((port->iotype == UPIO_MEM32) && (port->mapbase)) { + if (port->flags & UPF_IOREMAP) { + iounmap(port->membase); + port->membase = NULL; + } + + release_mem_region(port->mapbase, SZ_4K); + } +} + +static int serial_lpc32xx_request_port(struct uart_port *port) +{ + int ret = -ENODEV; + + if ((port->iotype == UPIO_MEM32) && (port->mapbase)) { + ret = 0; + + if (!request_mem_region(port->mapbase, SZ_4K, MODNAME)) + ret = -EBUSY; + else if (port->flags & UPF_IOREMAP) { + port->membase = ioremap(port->mapbase, SZ_4K); + if (!port->membase) { + release_mem_region(port->mapbase, SZ_4K); + ret = -ENOMEM; + } + } + } + + return ret; +} + +static void serial_lpc32xx_config_port(struct uart_port *port, int uflags) +{ + int ret; + + ret = serial_lpc32xx_request_port(port); + if (ret < 0) + return; + port->type = PORT_UART00; + port->fifosize = 64; + + __serial_uart_flush(port); + + writel((LPC32XX_HSU_TX_INT | LPC32XX_HSU_FE_INT | + LPC32XX_HSU_BRK_INT | LPC32XX_HSU_RX_OE_INT), + LPC32XX_HSUART_IIR(port->membase)); + + writel(0xFF, LPC32XX_HSUART_RATE(port->membase)); + + /* Set receiver timeout, HSU offset of 20, no break, no interrupts, + and default FIFO trigger levels */ + writel(LPC32XX_HSU_TX_TL8B | LPC32XX_HSU_RX_TL32B | + LPC32XX_HSU_OFFSET(20) | LPC32XX_HSU_TMO_INACT_4B, + LPC32XX_HSUART_CTRL(port->membase)); +} + +static int serial_lpc32xx_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + int ret = 0; + + if (ser->type != PORT_UART00) + ret = -EINVAL; + + return ret; +} + +static const struct uart_ops serial_lpc32xx_pops = { + .tx_empty = serial_lpc32xx_tx_empty, + .set_mctrl = serial_lpc32xx_set_mctrl, + .get_mctrl = serial_lpc32xx_get_mctrl, + .stop_tx = serial_lpc32xx_stop_tx, + .start_tx = serial_lpc32xx_start_tx, + .stop_rx = serial_lpc32xx_stop_rx, + .break_ctl = serial_lpc32xx_break_ctl, + .startup = serial_lpc32xx_startup, + .shutdown = serial_lpc32xx_shutdown, + .set_termios = serial_lpc32xx_set_termios, + .type = serial_lpc32xx_type, + .release_port = serial_lpc32xx_release_port, + .request_port = serial_lpc32xx_request_port, + .config_port = serial_lpc32xx_config_port, + .verify_port = serial_lpc32xx_verify_port, +}; + +/* + * Register a set of serial devices attached to a platform device + */ +static int serial_hs_lpc32xx_probe(struct platform_device *pdev) +{ + struct lpc32xx_hsuart_port *p = &lpc32xx_hs_ports[uarts_registered]; + int ret = 0; + struct resource *res; + + if (uarts_registered >= MAX_PORTS) { + dev_err(&pdev->dev, + "Error: Number of possible ports exceeded (%d)!\n", + uarts_registered + 1); + return -ENXIO; + } + + memset(p, 0, sizeof(*p)); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, + "Error getting mem resource for HS UART port %d\n", + uarts_registered); + return -ENXIO; + } + p->port.mapbase = res->start; + p->port.membase = NULL; + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + p->port.irq = ret; + + p->port.iotype = UPIO_MEM32; + p->port.uartclk = LPC32XX_MAIN_OSC_FREQ; + p->port.regshift = 2; + p->port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_IOREMAP; + p->port.dev = &pdev->dev; + p->port.ops = &serial_lpc32xx_pops; + p->port.line = uarts_registered++; + spin_lock_init(&p->port.lock); + + /* send port to loopback mode by default */ + lpc32xx_loopback_set(p->port.mapbase, 1); + + ret = uart_add_one_port(&lpc32xx_hs_reg, &p->port); + + platform_set_drvdata(pdev, p); + + return ret; +} + +/* + * Remove serial ports registered against a platform device. + */ +static int serial_hs_lpc32xx_remove(struct platform_device *pdev) +{ + struct lpc32xx_hsuart_port *p = platform_get_drvdata(pdev); + + uart_remove_one_port(&lpc32xx_hs_reg, &p->port); + + return 0; +} + + +#ifdef CONFIG_PM +static int serial_hs_lpc32xx_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct lpc32xx_hsuart_port *p = platform_get_drvdata(pdev); + + uart_suspend_port(&lpc32xx_hs_reg, &p->port); + + return 0; +} + +static int serial_hs_lpc32xx_resume(struct platform_device *pdev) +{ + struct lpc32xx_hsuart_port *p = platform_get_drvdata(pdev); + + uart_resume_port(&lpc32xx_hs_reg, &p->port); + + return 0; +} +#else +#define serial_hs_lpc32xx_suspend NULL +#define serial_hs_lpc32xx_resume NULL +#endif + +static const struct of_device_id serial_hs_lpc32xx_dt_ids[] = { + { .compatible = "nxp,lpc3220-hsuart" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, serial_hs_lpc32xx_dt_ids); + +static struct platform_driver serial_hs_lpc32xx_driver = { + .probe = serial_hs_lpc32xx_probe, + .remove = serial_hs_lpc32xx_remove, + .suspend = serial_hs_lpc32xx_suspend, + .resume = serial_hs_lpc32xx_resume, + .driver = { + .name = MODNAME, + .of_match_table = serial_hs_lpc32xx_dt_ids, + }, +}; + +static int __init lpc32xx_hsuart_init(void) +{ + int ret; + + ret = uart_register_driver(&lpc32xx_hs_reg); + if (ret) + return ret; + + ret = platform_driver_register(&serial_hs_lpc32xx_driver); + if (ret) + uart_unregister_driver(&lpc32xx_hs_reg); + + return ret; +} + +static void __exit lpc32xx_hsuart_exit(void) +{ + platform_driver_unregister(&serial_hs_lpc32xx_driver); + uart_unregister_driver(&lpc32xx_hs_reg); +} + +module_init(lpc32xx_hsuart_init); +module_exit(lpc32xx_hsuart_exit); + +MODULE_AUTHOR("Kevin Wells "); +MODULE_AUTHOR("Roland Stigge "); +MODULE_DESCRIPTION("NXP LPC32XX High Speed UART driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c new file mode 100644 index 000000000..c69602f35 --- /dev/null +++ b/drivers/tty/serial/max3100.c @@ -0,0 +1,904 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * + * Copyright (C) 2008 Christian Pellegrin + * + * Notes: the MAX3100 doesn't provide an interrupt on CTS so we have + * to use polling for flow control. TX empty IRQ is unusable, since + * writing conf clears FIFO buffer and we cannot have this interrupt + * always asking us for attention. + * + * Example platform data: + + static struct plat_max3100 max3100_plat_data = { + .loopback = 0, + .crystal = 0, + .poll_time = 100, + }; + + static struct spi_board_info spi_board_info[] = { + { + .modalias = "max3100", + .platform_data = &max3100_plat_data, + .irq = IRQ_EINT12, + .max_speed_hz = 5*1000*1000, + .chip_select = 0, + }, + }; + + * The initial minor number is 209 in the low-density serial port: + * mknod /dev/ttyMAX0 c 204 209 + */ + +#define MAX3100_MAJOR 204 +#define MAX3100_MINOR 209 +/* 4 MAX3100s should be enough for everyone */ +#define MAX_MAX3100 4 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define MAX3100_C (1<<14) +#define MAX3100_D (0<<14) +#define MAX3100_W (1<<15) +#define MAX3100_RX (0<<15) + +#define MAX3100_WC (MAX3100_W | MAX3100_C) +#define MAX3100_RC (MAX3100_RX | MAX3100_C) +#define MAX3100_WD (MAX3100_W | MAX3100_D) +#define MAX3100_RD (MAX3100_RX | MAX3100_D) +#define MAX3100_CMD (3 << 14) + +#define MAX3100_T (1<<14) +#define MAX3100_R (1<<15) + +#define MAX3100_FEN (1<<13) +#define MAX3100_SHDN (1<<12) +#define MAX3100_TM (1<<11) +#define MAX3100_RM (1<<10) +#define MAX3100_PM (1<<9) +#define MAX3100_RAM (1<<8) +#define MAX3100_IR (1<<7) +#define MAX3100_ST (1<<6) +#define MAX3100_PE (1<<5) +#define MAX3100_L (1<<4) +#define MAX3100_BAUD (0xf) + +#define MAX3100_TE (1<<10) +#define MAX3100_RAFE (1<<10) +#define MAX3100_RTS (1<<9) +#define MAX3100_CTS (1<<9) +#define MAX3100_PT (1<<8) +#define MAX3100_DATA (0xff) + +#define MAX3100_RT (MAX3100_R | MAX3100_T) +#define MAX3100_RTC (MAX3100_RT | MAX3100_CTS | MAX3100_RAFE) + +/* the following simulate a status reg for ignore_status_mask */ +#define MAX3100_STATUS_PE 1 +#define MAX3100_STATUS_FE 2 +#define MAX3100_STATUS_OE 4 + +struct max3100_port { + struct uart_port port; + struct spi_device *spi; + + int cts; /* last CTS received for flow ctrl */ + int tx_empty; /* last TX empty bit */ + + spinlock_t conf_lock; /* shared data */ + int conf_commit; /* need to make changes */ + int conf; /* configuration for the MAX31000 + * (bits 0-7, bits 8-11 are irqs) */ + int rts_commit; /* need to change rts */ + int rts; /* rts status */ + int baud; /* current baud rate */ + + int parity; /* keeps track if we should send parity */ +#define MAX3100_PARITY_ON 1 +#define MAX3100_PARITY_ODD 2 +#define MAX3100_7BIT 4 + int rx_enabled; /* if we should rx chars */ + + int irq; /* irq assigned to the max3100 */ + + int minor; /* minor number */ + int crystal; /* 1 if 3.6864Mhz crystal 0 for 1.8432 */ + int loopback; /* 1 if we are in loopback mode */ + + /* for handling irqs: need workqueue since we do spi_sync */ + struct workqueue_struct *workqueue; + struct work_struct work; + /* set to 1 to make the workhandler exit as soon as possible */ + int force_end_work; + /* need to know we are suspending to avoid deadlock on workqueue */ + int suspending; + + /* hook for suspending MAX3100 via dedicated pin */ + void (*max3100_hw_suspend) (int suspend); + + /* poll time (in ms) for ctrl lines */ + int poll_time; + /* and its timer */ + struct timer_list timer; +}; + +static struct max3100_port *max3100s[MAX_MAX3100]; /* the chips */ +static DEFINE_MUTEX(max3100s_lock); /* race on probe */ + +static int max3100_do_parity(struct max3100_port *s, u16 c) +{ + int parity; + + if (s->parity & MAX3100_PARITY_ODD) + parity = 1; + else + parity = 0; + + if (s->parity & MAX3100_7BIT) + c &= 0x7f; + else + c &= 0xff; + + parity = parity ^ (hweight8(c) & 1); + return parity; +} + +static int max3100_check_parity(struct max3100_port *s, u16 c) +{ + return max3100_do_parity(s, c) == ((c >> 8) & 1); +} + +static void max3100_calc_parity(struct max3100_port *s, u16 *c) +{ + if (s->parity & MAX3100_7BIT) + *c &= 0x7f; + else + *c &= 0xff; + + if (s->parity & MAX3100_PARITY_ON) + *c |= max3100_do_parity(s, *c) << 8; +} + +static void max3100_work(struct work_struct *w); + +static void max3100_dowork(struct max3100_port *s) +{ + if (!s->force_end_work && !freezing(current) && !s->suspending) + queue_work(s->workqueue, &s->work); +} + +static void max3100_timeout(struct timer_list *t) +{ + struct max3100_port *s = from_timer(s, t, timer); + + if (s->port.state) { + max3100_dowork(s); + mod_timer(&s->timer, jiffies + s->poll_time); + } +} + +static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx) +{ + struct spi_message message; + u16 etx, erx; + int status; + struct spi_transfer tran = { + .tx_buf = &etx, + .rx_buf = &erx, + .len = 2, + }; + + etx = cpu_to_be16(tx); + spi_message_init(&message); + spi_message_add_tail(&tran, &message); + status = spi_sync(s->spi, &message); + if (status) { + dev_warn(&s->spi->dev, "error while calling spi_sync\n"); + return -EIO; + } + *rx = be16_to_cpu(erx); + s->tx_empty = (*rx & MAX3100_T) > 0; + dev_dbg(&s->spi->dev, "%04x - %04x\n", tx, *rx); + return 0; +} + +static int max3100_handlerx(struct max3100_port *s, u16 rx) +{ + unsigned int ch, flg, status = 0; + int ret = 0, cts; + + if (rx & MAX3100_R && s->rx_enabled) { + dev_dbg(&s->spi->dev, "%s\n", __func__); + ch = rx & (s->parity & MAX3100_7BIT ? 0x7f : 0xff); + if (rx & MAX3100_RAFE) { + s->port.icount.frame++; + flg = TTY_FRAME; + status |= MAX3100_STATUS_FE; + } else { + if (s->parity & MAX3100_PARITY_ON) { + if (max3100_check_parity(s, rx)) { + s->port.icount.rx++; + flg = TTY_NORMAL; + } else { + s->port.icount.parity++; + flg = TTY_PARITY; + status |= MAX3100_STATUS_PE; + } + } else { + s->port.icount.rx++; + flg = TTY_NORMAL; + } + } + uart_insert_char(&s->port, status, MAX3100_STATUS_OE, ch, flg); + ret = 1; + } + + cts = (rx & MAX3100_CTS) > 0; + if (s->cts != cts) { + s->cts = cts; + uart_handle_cts_change(&s->port, cts ? TIOCM_CTS : 0); + } + + return ret; +} + +static void max3100_work(struct work_struct *w) +{ + struct max3100_port *s = container_of(w, struct max3100_port, work); + int rxchars; + u16 tx, rx; + int conf, cconf, crts; + struct circ_buf *xmit = &s->port.state->xmit; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + rxchars = 0; + do { + spin_lock(&s->conf_lock); + conf = s->conf; + cconf = s->conf_commit; + s->conf_commit = 0; + crts = s->rts_commit; + s->rts_commit = 0; + spin_unlock(&s->conf_lock); + if (cconf) + max3100_sr(s, MAX3100_WC | conf, &rx); + if (crts) { + max3100_sr(s, MAX3100_WD | MAX3100_TE | + (s->rts ? MAX3100_RTS : 0), &rx); + rxchars += max3100_handlerx(s, rx); + } + + max3100_sr(s, MAX3100_RD, &rx); + rxchars += max3100_handlerx(s, rx); + + if (rx & MAX3100_T) { + tx = 0xffff; + if (s->port.x_char) { + tx = s->port.x_char; + s->port.icount.tx++; + s->port.x_char = 0; + } else if (!uart_circ_empty(xmit) && + !uart_tx_stopped(&s->port)) { + tx = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & + (UART_XMIT_SIZE - 1); + s->port.icount.tx++; + } + if (tx != 0xffff) { + max3100_calc_parity(s, &tx); + tx |= MAX3100_WD | (s->rts ? MAX3100_RTS : 0); + max3100_sr(s, tx, &rx); + rxchars += max3100_handlerx(s, rx); + } + } + + if (rxchars > 16) { + tty_flip_buffer_push(&s->port.state->port); + rxchars = 0; + } + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&s->port); + + } while (!s->force_end_work && + !freezing(current) && + ((rx & MAX3100_R) || + (!uart_circ_empty(xmit) && + !uart_tx_stopped(&s->port)))); + + if (rxchars > 0) + tty_flip_buffer_push(&s->port.state->port); +} + +static irqreturn_t max3100_irq(int irqno, void *dev_id) +{ + struct max3100_port *s = dev_id; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + max3100_dowork(s); + return IRQ_HANDLED; +} + +static void max3100_enable_ms(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + if (s->poll_time > 0) + mod_timer(&s->timer, jiffies); + dev_dbg(&s->spi->dev, "%s\n", __func__); +} + +static void max3100_start_tx(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + max3100_dowork(s); +} + +static void max3100_stop_rx(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + s->rx_enabled = 0; + spin_lock(&s->conf_lock); + s->conf &= ~MAX3100_RM; + s->conf_commit = 1; + spin_unlock(&s->conf_lock); + max3100_dowork(s); +} + +static unsigned int max3100_tx_empty(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + /* may not be truly up-to-date */ + max3100_dowork(s); + return s->tx_empty; +} + +static unsigned int max3100_get_mctrl(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + /* may not be truly up-to-date */ + max3100_dowork(s); + /* always assert DCD and DSR since these lines are not wired */ + return (s->cts ? TIOCM_CTS : 0) | TIOCM_DSR | TIOCM_CAR; +} + +static void max3100_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + int rts; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + rts = (mctrl & TIOCM_RTS) > 0; + + spin_lock(&s->conf_lock); + if (s->rts != rts) { + s->rts = rts; + s->rts_commit = 1; + max3100_dowork(s); + } + spin_unlock(&s->conf_lock); +} + +static void +max3100_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + int baud = 0; + unsigned cflag; + u32 param_new, param_mask, parity = 0; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + cflag = termios->c_cflag; + param_mask = 0; + + baud = tty_termios_baud_rate(termios); + param_new = s->conf & MAX3100_BAUD; + switch (baud) { + case 300: + if (s->crystal) + baud = s->baud; + else + param_new = 15; + break; + case 600: + param_new = 14 + s->crystal; + break; + case 1200: + param_new = 13 + s->crystal; + break; + case 2400: + param_new = 12 + s->crystal; + break; + case 4800: + param_new = 11 + s->crystal; + break; + case 9600: + param_new = 10 + s->crystal; + break; + case 19200: + param_new = 9 + s->crystal; + break; + case 38400: + param_new = 8 + s->crystal; + break; + case 57600: + param_new = 1 + s->crystal; + break; + case 115200: + param_new = 0 + s->crystal; + break; + case 230400: + if (s->crystal) + param_new = 0; + else + baud = s->baud; + break; + default: + baud = s->baud; + } + tty_termios_encode_baud_rate(termios, baud, baud); + s->baud = baud; + param_mask |= MAX3100_BAUD; + + if ((cflag & CSIZE) == CS8) { + param_new &= ~MAX3100_L; + parity &= ~MAX3100_7BIT; + } else { + param_new |= MAX3100_L; + parity |= MAX3100_7BIT; + cflag = (cflag & ~CSIZE) | CS7; + } + param_mask |= MAX3100_L; + + if (cflag & CSTOPB) + param_new |= MAX3100_ST; + else + param_new &= ~MAX3100_ST; + param_mask |= MAX3100_ST; + + if (cflag & PARENB) { + param_new |= MAX3100_PE; + parity |= MAX3100_PARITY_ON; + } else { + param_new &= ~MAX3100_PE; + parity &= ~MAX3100_PARITY_ON; + } + param_mask |= MAX3100_PE; + + if (cflag & PARODD) + parity |= MAX3100_PARITY_ODD; + else + parity &= ~MAX3100_PARITY_ODD; + + /* mask termios capabilities we don't support */ + cflag &= ~CMSPAR; + termios->c_cflag = cflag; + + s->port.ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + s->port.ignore_status_mask |= + MAX3100_STATUS_PE | MAX3100_STATUS_FE | + MAX3100_STATUS_OE; + + if (s->poll_time > 0) + del_timer_sync(&s->timer); + + uart_update_timeout(port, termios->c_cflag, baud); + + spin_lock(&s->conf_lock); + s->conf = (s->conf & ~param_mask) | (param_new & param_mask); + s->conf_commit = 1; + s->parity = parity; + spin_unlock(&s->conf_lock); + max3100_dowork(s); + + if (UART_ENABLE_MS(&s->port, termios->c_cflag)) + max3100_enable_ms(&s->port); +} + +static void max3100_shutdown(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + if (s->suspending) + return; + + s->force_end_work = 1; + + if (s->poll_time > 0) + del_timer_sync(&s->timer); + + if (s->workqueue) { + destroy_workqueue(s->workqueue); + s->workqueue = NULL; + } + if (s->irq) + free_irq(s->irq, s); + + /* set shutdown mode to save power */ + if (s->max3100_hw_suspend) + s->max3100_hw_suspend(1); + else { + u16 tx, rx; + + tx = MAX3100_WC | MAX3100_SHDN; + max3100_sr(s, tx, &rx); + } +} + +static int max3100_startup(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + char b[12]; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + s->conf = MAX3100_RM; + s->baud = s->crystal ? 230400 : 115200; + s->rx_enabled = 1; + + if (s->suspending) + return 0; + + s->force_end_work = 0; + s->parity = 0; + s->rts = 0; + + sprintf(b, "max3100-%d", s->minor); + s->workqueue = create_freezable_workqueue(b); + if (!s->workqueue) { + dev_warn(&s->spi->dev, "cannot create workqueue\n"); + return -EBUSY; + } + INIT_WORK(&s->work, max3100_work); + + if (request_irq(s->irq, max3100_irq, + IRQF_TRIGGER_FALLING, "max3100", s) < 0) { + dev_warn(&s->spi->dev, "cannot allocate irq %d\n", s->irq); + s->irq = 0; + destroy_workqueue(s->workqueue); + s->workqueue = NULL; + return -EBUSY; + } + + if (s->loopback) { + u16 tx, rx; + tx = 0x4001; + max3100_sr(s, tx, &rx); + } + + if (s->max3100_hw_suspend) + s->max3100_hw_suspend(0); + s->conf_commit = 1; + max3100_dowork(s); + /* wait for clock to settle */ + msleep(50); + + max3100_enable_ms(&s->port); + + return 0; +} + +static const char *max3100_type(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + return s->port.type == PORT_MAX3100 ? "MAX3100" : NULL; +} + +static void max3100_release_port(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); +} + +static void max3100_config_port(struct uart_port *port, int flags) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + if (flags & UART_CONFIG_TYPE) + s->port.type = PORT_MAX3100; +} + +static int max3100_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + int ret = -EINVAL; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + if (ser->type == PORT_UNKNOWN || ser->type == PORT_MAX3100) + ret = 0; + return ret; +} + +static void max3100_stop_tx(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); +} + +static int max3100_request_port(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + return 0; +} + +static void max3100_break_ctl(struct uart_port *port, int break_state) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); +} + +static const struct uart_ops max3100_ops = { + .tx_empty = max3100_tx_empty, + .set_mctrl = max3100_set_mctrl, + .get_mctrl = max3100_get_mctrl, + .stop_tx = max3100_stop_tx, + .start_tx = max3100_start_tx, + .stop_rx = max3100_stop_rx, + .enable_ms = max3100_enable_ms, + .break_ctl = max3100_break_ctl, + .startup = max3100_startup, + .shutdown = max3100_shutdown, + .set_termios = max3100_set_termios, + .type = max3100_type, + .release_port = max3100_release_port, + .request_port = max3100_request_port, + .config_port = max3100_config_port, + .verify_port = max3100_verify_port, +}; + +static struct uart_driver max3100_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "ttyMAX", + .dev_name = "ttyMAX", + .major = MAX3100_MAJOR, + .minor = MAX3100_MINOR, + .nr = MAX_MAX3100, +}; +static int uart_driver_registered; + +static int max3100_probe(struct spi_device *spi) +{ + int i, retval; + struct plat_max3100 *pdata; + u16 tx, rx; + + mutex_lock(&max3100s_lock); + + if (!uart_driver_registered) { + uart_driver_registered = 1; + retval = uart_register_driver(&max3100_uart_driver); + if (retval) { + printk(KERN_ERR "Couldn't register max3100 uart driver\n"); + mutex_unlock(&max3100s_lock); + return retval; + } + } + + for (i = 0; i < MAX_MAX3100; i++) + if (!max3100s[i]) + break; + if (i == MAX_MAX3100) { + dev_warn(&spi->dev, "too many MAX3100 chips\n"); + mutex_unlock(&max3100s_lock); + return -ENOMEM; + } + + max3100s[i] = kzalloc(sizeof(struct max3100_port), GFP_KERNEL); + if (!max3100s[i]) { + dev_warn(&spi->dev, + "kmalloc for max3100 structure %d failed!\n", i); + mutex_unlock(&max3100s_lock); + return -ENOMEM; + } + max3100s[i]->spi = spi; + max3100s[i]->irq = spi->irq; + spin_lock_init(&max3100s[i]->conf_lock); + spi_set_drvdata(spi, max3100s[i]); + pdata = dev_get_platdata(&spi->dev); + max3100s[i]->crystal = pdata->crystal; + max3100s[i]->loopback = pdata->loopback; + max3100s[i]->poll_time = msecs_to_jiffies(pdata->poll_time); + if (pdata->poll_time > 0 && max3100s[i]->poll_time == 0) + max3100s[i]->poll_time = 1; + max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend; + max3100s[i]->minor = i; + timer_setup(&max3100s[i]->timer, max3100_timeout, 0); + + dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i); + max3100s[i]->port.irq = max3100s[i]->irq; + max3100s[i]->port.uartclk = max3100s[i]->crystal ? 3686400 : 1843200; + max3100s[i]->port.fifosize = 16; + max3100s[i]->port.ops = &max3100_ops; + max3100s[i]->port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; + max3100s[i]->port.line = i; + max3100s[i]->port.type = PORT_MAX3100; + max3100s[i]->port.dev = &spi->dev; + retval = uart_add_one_port(&max3100_uart_driver, &max3100s[i]->port); + if (retval < 0) + dev_warn(&spi->dev, + "uart_add_one_port failed for line %d with error %d\n", + i, retval); + + /* set shutdown mode to save power. Will be woken-up on open */ + if (max3100s[i]->max3100_hw_suspend) + max3100s[i]->max3100_hw_suspend(1); + else { + tx = MAX3100_WC | MAX3100_SHDN; + max3100_sr(max3100s[i], tx, &rx); + } + mutex_unlock(&max3100s_lock); + return 0; +} + +static void max3100_remove(struct spi_device *spi) +{ + struct max3100_port *s = spi_get_drvdata(spi); + int i; + + mutex_lock(&max3100s_lock); + + /* find out the index for the chip we are removing */ + for (i = 0; i < MAX_MAX3100; i++) + if (max3100s[i] == s) { + dev_dbg(&spi->dev, "%s: removing port %d\n", __func__, i); + uart_remove_one_port(&max3100_uart_driver, &max3100s[i]->port); + kfree(max3100s[i]); + max3100s[i] = NULL; + break; + } + + WARN_ON(i == MAX_MAX3100); + + /* check if this is the last chip we have */ + for (i = 0; i < MAX_MAX3100; i++) + if (max3100s[i]) { + mutex_unlock(&max3100s_lock); + return; + } + pr_debug("removing max3100 driver\n"); + uart_unregister_driver(&max3100_uart_driver); + + mutex_unlock(&max3100s_lock); +} + +#ifdef CONFIG_PM_SLEEP + +static int max3100_suspend(struct device *dev) +{ + struct max3100_port *s = dev_get_drvdata(dev); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + disable_irq(s->irq); + + s->suspending = 1; + uart_suspend_port(&max3100_uart_driver, &s->port); + + if (s->max3100_hw_suspend) + s->max3100_hw_suspend(1); + else { + /* no HW suspend, so do SW one */ + u16 tx, rx; + + tx = MAX3100_WC | MAX3100_SHDN; + max3100_sr(s, tx, &rx); + } + return 0; +} + +static int max3100_resume(struct device *dev) +{ + struct max3100_port *s = dev_get_drvdata(dev); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + if (s->max3100_hw_suspend) + s->max3100_hw_suspend(0); + uart_resume_port(&max3100_uart_driver, &s->port); + s->suspending = 0; + + enable_irq(s->irq); + + s->conf_commit = 1; + if (s->workqueue) + max3100_dowork(s); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(max3100_pm_ops, max3100_suspend, max3100_resume); +#define MAX3100_PM_OPS (&max3100_pm_ops) + +#else +#define MAX3100_PM_OPS NULL +#endif + +static struct spi_driver max3100_driver = { + .driver = { + .name = "max3100", + .pm = MAX3100_PM_OPS, + }, + .probe = max3100_probe, + .remove = max3100_remove, +}; + +module_spi_driver(max3100_driver); + +MODULE_DESCRIPTION("MAX3100 driver"); +MODULE_AUTHOR("Christian Pellegrin "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:max3100"); diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c new file mode 100644 index 000000000..338cb19de --- /dev/null +++ b/drivers/tty/serial/max310x.c @@ -0,0 +1,1700 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver + * + * Copyright (C) 2012-2016 Alexander Shiyan + * + * Based on max3100.c, by Christian Pellegrin + * Based on max3110.c, by Feng Tang + * Based on max3107.c, by Aavamobile + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX310X_NAME "max310x" +#define MAX310X_MAJOR 204 +#define MAX310X_MINOR 209 +#define MAX310X_UART_NRMAX 16 + +/* MAX310X register definitions */ +#define MAX310X_RHR_REG (0x00) /* RX FIFO */ +#define MAX310X_THR_REG (0x00) /* TX FIFO */ +#define MAX310X_IRQEN_REG (0x01) /* IRQ enable */ +#define MAX310X_IRQSTS_REG (0x02) /* IRQ status */ +#define MAX310X_LSR_IRQEN_REG (0x03) /* LSR IRQ enable */ +#define MAX310X_LSR_IRQSTS_REG (0x04) /* LSR IRQ status */ +#define MAX310X_REG_05 (0x05) +#define MAX310X_SPCHR_IRQEN_REG MAX310X_REG_05 /* Special char IRQ en */ +#define MAX310X_SPCHR_IRQSTS_REG (0x06) /* Special char IRQ status */ +#define MAX310X_STS_IRQEN_REG (0x07) /* Status IRQ enable */ +#define MAX310X_STS_IRQSTS_REG (0x08) /* Status IRQ status */ +#define MAX310X_MODE1_REG (0x09) /* MODE1 */ +#define MAX310X_MODE2_REG (0x0a) /* MODE2 */ +#define MAX310X_LCR_REG (0x0b) /* LCR */ +#define MAX310X_RXTO_REG (0x0c) /* RX timeout */ +#define MAX310X_HDPIXDELAY_REG (0x0d) /* Auto transceiver delays */ +#define MAX310X_IRDA_REG (0x0e) /* IRDA settings */ +#define MAX310X_FLOWLVL_REG (0x0f) /* Flow control levels */ +#define MAX310X_FIFOTRIGLVL_REG (0x10) /* FIFO IRQ trigger levels */ +#define MAX310X_TXFIFOLVL_REG (0x11) /* TX FIFO level */ +#define MAX310X_RXFIFOLVL_REG (0x12) /* RX FIFO level */ +#define MAX310X_FLOWCTRL_REG (0x13) /* Flow control */ +#define MAX310X_XON1_REG (0x14) /* XON1 character */ +#define MAX310X_XON2_REG (0x15) /* XON2 character */ +#define MAX310X_XOFF1_REG (0x16) /* XOFF1 character */ +#define MAX310X_XOFF2_REG (0x17) /* XOFF2 character */ +#define MAX310X_GPIOCFG_REG (0x18) /* GPIO config */ +#define MAX310X_GPIODATA_REG (0x19) /* GPIO data */ +#define MAX310X_PLLCFG_REG (0x1a) /* PLL config */ +#define MAX310X_BRGCFG_REG (0x1b) /* Baud rate generator conf */ +#define MAX310X_BRGDIVLSB_REG (0x1c) /* Baud rate divisor LSB */ +#define MAX310X_BRGDIVMSB_REG (0x1d) /* Baud rate divisor MSB */ +#define MAX310X_CLKSRC_REG (0x1e) /* Clock source */ +#define MAX310X_REG_1F (0x1f) + +#define MAX310X_REVID_REG MAX310X_REG_1F /* Revision ID */ + +#define MAX310X_GLOBALIRQ_REG MAX310X_REG_1F /* Global IRQ (RO) */ +#define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */ + +/* Extended registers */ +#define MAX310X_SPI_REVID_EXTREG MAX310X_REG_05 /* Revision ID */ +#define MAX310X_I2C_REVID_EXTREG (0x25) /* Revision ID */ + +/* IRQ register bits */ +#define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */ +#define MAX310X_IRQ_SPCHR_BIT (1 << 1) /* Special char interrupt */ +#define MAX310X_IRQ_STS_BIT (1 << 2) /* Status interrupt */ +#define MAX310X_IRQ_RXFIFO_BIT (1 << 3) /* RX FIFO interrupt */ +#define MAX310X_IRQ_TXFIFO_BIT (1 << 4) /* TX FIFO interrupt */ +#define MAX310X_IRQ_TXEMPTY_BIT (1 << 5) /* TX FIFO empty interrupt */ +#define MAX310X_IRQ_RXEMPTY_BIT (1 << 6) /* RX FIFO empty interrupt */ +#define MAX310X_IRQ_CTS_BIT (1 << 7) /* CTS interrupt */ + +/* LSR register bits */ +#define MAX310X_LSR_RXTO_BIT (1 << 0) /* RX timeout */ +#define MAX310X_LSR_RXOVR_BIT (1 << 1) /* RX overrun */ +#define MAX310X_LSR_RXPAR_BIT (1 << 2) /* RX parity error */ +#define MAX310X_LSR_FRERR_BIT (1 << 3) /* Frame error */ +#define MAX310X_LSR_RXBRK_BIT (1 << 4) /* RX break */ +#define MAX310X_LSR_RXNOISE_BIT (1 << 5) /* RX noise */ +#define MAX310X_LSR_CTS_BIT (1 << 7) /* CTS pin state */ + +/* Special character register bits */ +#define MAX310X_SPCHR_XON1_BIT (1 << 0) /* XON1 character */ +#define MAX310X_SPCHR_XON2_BIT (1 << 1) /* XON2 character */ +#define MAX310X_SPCHR_XOFF1_BIT (1 << 2) /* XOFF1 character */ +#define MAX310X_SPCHR_XOFF2_BIT (1 << 3) /* XOFF2 character */ +#define MAX310X_SPCHR_BREAK_BIT (1 << 4) /* RX break */ +#define MAX310X_SPCHR_MULTIDROP_BIT (1 << 5) /* 9-bit multidrop addr char */ + +/* Status register bits */ +#define MAX310X_STS_GPIO0_BIT (1 << 0) /* GPIO 0 interrupt */ +#define MAX310X_STS_GPIO1_BIT (1 << 1) /* GPIO 1 interrupt */ +#define MAX310X_STS_GPIO2_BIT (1 << 2) /* GPIO 2 interrupt */ +#define MAX310X_STS_GPIO3_BIT (1 << 3) /* GPIO 3 interrupt */ +#define MAX310X_STS_CLKREADY_BIT (1 << 5) /* Clock ready */ +#define MAX310X_STS_SLEEP_BIT (1 << 6) /* Sleep interrupt */ + +/* MODE1 register bits */ +#define MAX310X_MODE1_RXDIS_BIT (1 << 0) /* RX disable */ +#define MAX310X_MODE1_TXDIS_BIT (1 << 1) /* TX disable */ +#define MAX310X_MODE1_TXHIZ_BIT (1 << 2) /* TX pin three-state */ +#define MAX310X_MODE1_RTSHIZ_BIT (1 << 3) /* RTS pin three-state */ +#define MAX310X_MODE1_TRNSCVCTRL_BIT (1 << 4) /* Transceiver ctrl enable */ +#define MAX310X_MODE1_FORCESLEEP_BIT (1 << 5) /* Force sleep mode */ +#define MAX310X_MODE1_AUTOSLEEP_BIT (1 << 6) /* Auto sleep enable */ +#define MAX310X_MODE1_IRQSEL_BIT (1 << 7) /* IRQ pin enable */ + +/* MODE2 register bits */ +#define MAX310X_MODE2_RST_BIT (1 << 0) /* Chip reset */ +#define MAX310X_MODE2_FIFORST_BIT (1 << 1) /* FIFO reset */ +#define MAX310X_MODE2_RXTRIGINV_BIT (1 << 2) /* RX FIFO INT invert */ +#define MAX310X_MODE2_RXEMPTINV_BIT (1 << 3) /* RX FIFO empty INT invert */ +#define MAX310X_MODE2_SPCHR_BIT (1 << 4) /* Special chr detect enable */ +#define MAX310X_MODE2_LOOPBACK_BIT (1 << 5) /* Internal loopback enable */ +#define MAX310X_MODE2_MULTIDROP_BIT (1 << 6) /* 9-bit multidrop enable */ +#define MAX310X_MODE2_ECHOSUPR_BIT (1 << 7) /* ECHO suppression enable */ + +/* LCR register bits */ +#define MAX310X_LCR_LENGTH0_BIT (1 << 0) /* Word length bit 0 */ +#define MAX310X_LCR_LENGTH1_BIT (1 << 1) /* Word length bit 1 + * + * Word length bits table: + * 00 -> 5 bit words + * 01 -> 6 bit words + * 10 -> 7 bit words + * 11 -> 8 bit words + */ +#define MAX310X_LCR_STOPLEN_BIT (1 << 2) /* STOP length bit + * + * STOP length bit table: + * 0 -> 1 stop bit + * 1 -> 1-1.5 stop bits if + * word length is 5, + * 2 stop bits otherwise + */ +#define MAX310X_LCR_PARITY_BIT (1 << 3) /* Parity bit enable */ +#define MAX310X_LCR_EVENPARITY_BIT (1 << 4) /* Even parity bit enable */ +#define MAX310X_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */ +#define MAX310X_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */ +#define MAX310X_LCR_RTS_BIT (1 << 7) /* RTS pin control */ + +/* IRDA register bits */ +#define MAX310X_IRDA_IRDAEN_BIT (1 << 0) /* IRDA mode enable */ +#define MAX310X_IRDA_SIR_BIT (1 << 1) /* SIR mode enable */ + +/* Flow control trigger level register masks */ +#define MAX310X_FLOWLVL_HALT_MASK (0x000f) /* Flow control halt level */ +#define MAX310X_FLOWLVL_RES_MASK (0x00f0) /* Flow control resume level */ +#define MAX310X_FLOWLVL_HALT(words) ((words / 8) & 0x0f) +#define MAX310X_FLOWLVL_RES(words) (((words / 8) & 0x0f) << 4) + +/* FIFO interrupt trigger level register masks */ +#define MAX310X_FIFOTRIGLVL_TX_MASK (0x0f) /* TX FIFO trigger level */ +#define MAX310X_FIFOTRIGLVL_RX_MASK (0xf0) /* RX FIFO trigger level */ +#define MAX310X_FIFOTRIGLVL_TX(words) ((words / 8) & 0x0f) +#define MAX310X_FIFOTRIGLVL_RX(words) (((words / 8) & 0x0f) << 4) + +/* Flow control register bits */ +#define MAX310X_FLOWCTRL_AUTORTS_BIT (1 << 0) /* Auto RTS flow ctrl enable */ +#define MAX310X_FLOWCTRL_AUTOCTS_BIT (1 << 1) /* Auto CTS flow ctrl enable */ +#define MAX310X_FLOWCTRL_GPIADDR_BIT (1 << 2) /* Enables that GPIO inputs + * are used in conjunction with + * XOFF2 for definition of + * special character */ +#define MAX310X_FLOWCTRL_SWFLOWEN_BIT (1 << 3) /* Auto SW flow ctrl enable */ +#define MAX310X_FLOWCTRL_SWFLOW0_BIT (1 << 4) /* SWFLOW bit 0 */ +#define MAX310X_FLOWCTRL_SWFLOW1_BIT (1 << 5) /* SWFLOW bit 1 + * + * SWFLOW bits 1 & 0 table: + * 00 -> no transmitter flow + * control + * 01 -> receiver compares + * XON2 and XOFF2 + * and controls + * transmitter + * 10 -> receiver compares + * XON1 and XOFF1 + * and controls + * transmitter + * 11 -> receiver compares + * XON1, XON2, XOFF1 and + * XOFF2 and controls + * transmitter + */ +#define MAX310X_FLOWCTRL_SWFLOW2_BIT (1 << 6) /* SWFLOW bit 2 */ +#define MAX310X_FLOWCTRL_SWFLOW3_BIT (1 << 7) /* SWFLOW bit 3 + * + * SWFLOW bits 3 & 2 table: + * 00 -> no received flow + * control + * 01 -> transmitter generates + * XON2 and XOFF2 + * 10 -> transmitter generates + * XON1 and XOFF1 + * 11 -> transmitter generates + * XON1, XON2, XOFF1 and + * XOFF2 + */ + +/* PLL configuration register masks */ +#define MAX310X_PLLCFG_PREDIV_MASK (0x3f) /* PLL predivision value */ +#define MAX310X_PLLCFG_PLLFACTOR_MASK (0xc0) /* PLL multiplication factor */ + +/* Baud rate generator configuration register bits */ +#define MAX310X_BRGCFG_2XMODE_BIT (1 << 4) /* Double baud rate */ +#define MAX310X_BRGCFG_4XMODE_BIT (1 << 5) /* Quadruple baud rate */ + +/* Clock source register bits */ +#define MAX310X_CLKSRC_CRYST_BIT (1 << 1) /* Crystal osc enable */ +#define MAX310X_CLKSRC_PLL_BIT (1 << 2) /* PLL enable */ +#define MAX310X_CLKSRC_PLLBYP_BIT (1 << 3) /* PLL bypass */ +#define MAX310X_CLKSRC_EXTCLK_BIT (1 << 4) /* External clock enable */ +#define MAX310X_CLKSRC_CLK2RTS_BIT (1 << 7) /* Baud clk to RTS pin */ + +/* Global commands */ +#define MAX310X_EXTREG_ENBL (0xce) +#define MAX310X_EXTREG_DSBL (0xcd) + +/* Misc definitions */ +#define MAX310X_FIFO_SIZE (128) +#define MAX310x_REV_MASK (0xf8) +#define MAX310X_WRITE_BIT 0x80 + +/* MAX3107 specific */ +#define MAX3107_REV_ID (0xa0) + +/* MAX3109 specific */ +#define MAX3109_REV_ID (0xc0) + +/* MAX14830 specific */ +#define MAX14830_BRGCFG_CLKDIS_BIT (1 << 6) /* Clock Disable */ +#define MAX14830_REV_ID (0xb0) + +struct max310x_if_cfg { + int (*extended_reg_enable)(struct device *dev, bool enable); + + unsigned int rev_id_reg; +}; + +struct max310x_devtype { + struct { + unsigned short min; + unsigned short max; + } slave_addr; + char name[9]; + int nr; + u8 mode1; + int (*detect)(struct device *); + void (*power)(struct uart_port *, int); +}; + +struct max310x_one { + struct uart_port port; + struct work_struct tx_work; + struct work_struct md_work; + struct work_struct rs_work; + struct regmap *regmap; + + u8 rx_buf[MAX310X_FIFO_SIZE]; +}; +#define to_max310x_port(_port) \ + container_of(_port, struct max310x_one, port) + +struct max310x_port { + const struct max310x_devtype *devtype; + const struct max310x_if_cfg *if_cfg; + struct regmap *regmap; + struct clk *clk; +#ifdef CONFIG_GPIOLIB + struct gpio_chip gpio; +#endif + struct max310x_one p[]; +}; + +static struct uart_driver max310x_uart = { + .owner = THIS_MODULE, + .driver_name = MAX310X_NAME, + .dev_name = "ttyMAX", + .major = MAX310X_MAJOR, + .minor = MAX310X_MINOR, + .nr = MAX310X_UART_NRMAX, +}; + +static DECLARE_BITMAP(max310x_lines, MAX310X_UART_NRMAX); + +static u8 max310x_port_read(struct uart_port *port, u8 reg) +{ + struct max310x_one *one = to_max310x_port(port); + unsigned int val = 0; + + regmap_read(one->regmap, reg, &val); + + return val; +} + +static void max310x_port_write(struct uart_port *port, u8 reg, u8 val) +{ + struct max310x_one *one = to_max310x_port(port); + + regmap_write(one->regmap, reg, val); +} + +static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val) +{ + struct max310x_one *one = to_max310x_port(port); + + regmap_update_bits(one->regmap, reg, mask, val); +} + +static int max3107_detect(struct device *dev) +{ + struct max310x_port *s = dev_get_drvdata(dev); + unsigned int val = 0; + int ret; + + ret = regmap_read(s->regmap, MAX310X_REVID_REG, &val); + if (ret) + return ret; + + if (((val & MAX310x_REV_MASK) != MAX3107_REV_ID)) { + dev_err(dev, + "%s ID 0x%02x does not match\n", s->devtype->name, val); + return -ENODEV; + } + + return 0; +} + +static int max3108_detect(struct device *dev) +{ + struct max310x_port *s = dev_get_drvdata(dev); + unsigned int val = 0; + int ret; + + /* MAX3108 have not REV ID register, we just check default value + * from clocksource register to make sure everything works. + */ + ret = regmap_read(s->regmap, MAX310X_CLKSRC_REG, &val); + if (ret) + return ret; + + if (val != (MAX310X_CLKSRC_EXTCLK_BIT | MAX310X_CLKSRC_PLLBYP_BIT)) { + dev_err(dev, "%s not present\n", s->devtype->name); + return -ENODEV; + } + + return 0; +} + +static int max3109_detect(struct device *dev) +{ + struct max310x_port *s = dev_get_drvdata(dev); + unsigned int val = 0; + int ret; + + ret = s->if_cfg->extended_reg_enable(dev, true); + if (ret) + return ret; + + regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val); + s->if_cfg->extended_reg_enable(dev, false); + if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) { + dev_err(dev, + "%s ID 0x%02x does not match\n", s->devtype->name, val); + return -ENODEV; + } + + return 0; +} + +static void max310x_power(struct uart_port *port, int on) +{ + max310x_port_update(port, MAX310X_MODE1_REG, + MAX310X_MODE1_FORCESLEEP_BIT, + on ? 0 : MAX310X_MODE1_FORCESLEEP_BIT); + if (on) + msleep(50); +} + +static int max14830_detect(struct device *dev) +{ + struct max310x_port *s = dev_get_drvdata(dev); + unsigned int val = 0; + int ret; + + ret = s->if_cfg->extended_reg_enable(dev, true); + if (ret) + return ret; + + regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val); + s->if_cfg->extended_reg_enable(dev, false); + if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) { + dev_err(dev, + "%s ID 0x%02x does not match\n", s->devtype->name, val); + return -ENODEV; + } + + return 0; +} + +static void max14830_power(struct uart_port *port, int on) +{ + max310x_port_update(port, MAX310X_BRGCFG_REG, + MAX14830_BRGCFG_CLKDIS_BIT, + on ? 0 : MAX14830_BRGCFG_CLKDIS_BIT); + if (on) + msleep(50); +} + +static const struct max310x_devtype max3107_devtype = { + .name = "MAX3107", + .nr = 1, + .mode1 = MAX310X_MODE1_AUTOSLEEP_BIT | MAX310X_MODE1_IRQSEL_BIT, + .detect = max3107_detect, + .power = max310x_power, + .slave_addr = { + .min = 0x2c, + .max = 0x2f, + }, +}; + +static const struct max310x_devtype max3108_devtype = { + .name = "MAX3108", + .nr = 1, + .mode1 = MAX310X_MODE1_AUTOSLEEP_BIT, + .detect = max3108_detect, + .power = max310x_power, + .slave_addr = { + .min = 0x60, + .max = 0x6f, + }, +}; + +static const struct max310x_devtype max3109_devtype = { + .name = "MAX3109", + .nr = 2, + .mode1 = MAX310X_MODE1_AUTOSLEEP_BIT, + .detect = max3109_detect, + .power = max310x_power, + .slave_addr = { + .min = 0x60, + .max = 0x6f, + }, +}; + +static const struct max310x_devtype max14830_devtype = { + .name = "MAX14830", + .nr = 4, + .mode1 = MAX310X_MODE1_IRQSEL_BIT, + .detect = max14830_detect, + .power = max14830_power, + .slave_addr = { + .min = 0x60, + .max = 0x6f, + }, +}; + +static bool max310x_reg_writeable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case MAX310X_IRQSTS_REG: + case MAX310X_LSR_IRQSTS_REG: + case MAX310X_SPCHR_IRQSTS_REG: + case MAX310X_STS_IRQSTS_REG: + case MAX310X_TXFIFOLVL_REG: + case MAX310X_RXFIFOLVL_REG: + return false; + default: + break; + } + + return true; +} + +static bool max310x_reg_volatile(struct device *dev, unsigned int reg) +{ + switch (reg) { + case MAX310X_RHR_REG: + case MAX310X_IRQSTS_REG: + case MAX310X_LSR_IRQSTS_REG: + case MAX310X_SPCHR_IRQSTS_REG: + case MAX310X_STS_IRQSTS_REG: + case MAX310X_TXFIFOLVL_REG: + case MAX310X_RXFIFOLVL_REG: + case MAX310X_GPIODATA_REG: + case MAX310X_BRGDIVLSB_REG: + case MAX310X_REG_05: + case MAX310X_REG_1F: + return true; + default: + break; + } + + return false; +} + +static bool max310x_reg_precious(struct device *dev, unsigned int reg) +{ + switch (reg) { + case MAX310X_RHR_REG: + case MAX310X_IRQSTS_REG: + case MAX310X_SPCHR_IRQSTS_REG: + case MAX310X_STS_IRQSTS_REG: + return true; + default: + break; + } + + return false; +} + +static bool max310x_reg_noinc(struct device *dev, unsigned int reg) +{ + return reg == MAX310X_RHR_REG; +} + +static int max310x_set_baud(struct uart_port *port, int baud) +{ + unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0; + + /* + * Calculate the integer divisor first. Select a proper mode + * in case if the requested baud is too high for the pre-defined + * clocks frequency. + */ + div = port->uartclk / baud; + if (div < 8) { + /* Mode x4 */ + c = 4; + mode = MAX310X_BRGCFG_4XMODE_BIT; + } else if (div < 16) { + /* Mode x2 */ + c = 8; + mode = MAX310X_BRGCFG_2XMODE_BIT; + } else { + c = 16; + } + + /* Calculate the divisor in accordance with the fraction coefficient */ + div /= c; + F = c*baud; + + /* Calculate the baud rate fraction */ + if (div > 0) + frac = (16*(port->uartclk % F)) / F; + else + div = 1; + + max310x_port_write(port, MAX310X_BRGDIVMSB_REG, div >> 8); + max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div); + max310x_port_write(port, MAX310X_BRGCFG_REG, frac | mode); + + /* Return the actual baud rate we just programmed */ + return (16*port->uartclk) / (c*(16*div + frac)); +} + +static int max310x_update_best_err(unsigned long f, long *besterr) +{ + /* Use baudrate 115200 for calculate error */ + long err = f % (460800 * 16); + + if ((*besterr < 0) || (*besterr > err)) { + *besterr = err; + return 0; + } + + return 1; +} + +static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s, + unsigned long freq, bool xtal) +{ + unsigned int div, clksrc, pllcfg = 0; + long besterr = -1; + unsigned long fdiv, fmul, bestfreq = freq; + + /* First, update error without PLL */ + max310x_update_best_err(freq, &besterr); + + /* Try all possible PLL dividers */ + for (div = 1; (div <= 63) && besterr; div++) { + fdiv = DIV_ROUND_CLOSEST(freq, div); + + /* Try multiplier 6 */ + fmul = fdiv * 6; + if ((fdiv >= 500000) && (fdiv <= 800000)) + if (!max310x_update_best_err(fmul, &besterr)) { + pllcfg = (0 << 6) | div; + bestfreq = fmul; + } + /* Try multiplier 48 */ + fmul = fdiv * 48; + if ((fdiv >= 850000) && (fdiv <= 1200000)) + if (!max310x_update_best_err(fmul, &besterr)) { + pllcfg = (1 << 6) | div; + bestfreq = fmul; + } + /* Try multiplier 96 */ + fmul = fdiv * 96; + if ((fdiv >= 425000) && (fdiv <= 1000000)) + if (!max310x_update_best_err(fmul, &besterr)) { + pllcfg = (2 << 6) | div; + bestfreq = fmul; + } + /* Try multiplier 144 */ + fmul = fdiv * 144; + if ((fdiv >= 390000) && (fdiv <= 667000)) + if (!max310x_update_best_err(fmul, &besterr)) { + pllcfg = (3 << 6) | div; + bestfreq = fmul; + } + } + + /* Configure clock source */ + clksrc = MAX310X_CLKSRC_EXTCLK_BIT | (xtal ? MAX310X_CLKSRC_CRYST_BIT : 0); + + /* Configure PLL */ + if (pllcfg) { + clksrc |= MAX310X_CLKSRC_PLL_BIT; + regmap_write(s->regmap, MAX310X_PLLCFG_REG, pllcfg); + } else + clksrc |= MAX310X_CLKSRC_PLLBYP_BIT; + + regmap_write(s->regmap, MAX310X_CLKSRC_REG, clksrc); + + /* Wait for crystal */ + if (xtal) { + unsigned int val; + msleep(10); + regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val); + if (!(val & MAX310X_STS_CLKREADY_BIT)) { + dev_warn(dev, "clock is not stable yet\n"); + } + } + + return bestfreq; +} + +static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int len) +{ + struct max310x_one *one = to_max310x_port(port); + + regmap_noinc_write(one->regmap, MAX310X_THR_REG, txbuf, len); +} + +static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len) +{ + struct max310x_one *one = to_max310x_port(port); + + regmap_noinc_read(one->regmap, MAX310X_RHR_REG, rxbuf, len); +} + +static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen) +{ + struct max310x_one *one = to_max310x_port(port); + unsigned int sts, ch, flag, i; + + if (port->read_status_mask == MAX310X_LSR_RXOVR_BIT) { + /* We are just reading, happily ignoring any error conditions. + * Break condition, parity checking, framing errors -- they + * are all ignored. That means that we can do a batch-read. + * + * There is a small opportunity for race if the RX FIFO + * overruns while we're reading the buffer; the datasheets says + * that the LSR register applies to the "current" character. + * That's also the reason why we cannot do batched reads when + * asked to check the individual statuses. + * */ + + sts = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG); + max310x_batch_read(port, one->rx_buf, rxlen); + + port->icount.rx += rxlen; + flag = TTY_NORMAL; + sts &= port->read_status_mask; + + if (sts & MAX310X_LSR_RXOVR_BIT) { + dev_warn_ratelimited(port->dev, "Hardware RX FIFO overrun\n"); + port->icount.overrun++; + } + + for (i = 0; i < (rxlen - 1); ++i) + uart_insert_char(port, sts, 0, one->rx_buf[i], flag); + + /* + * Handle the overrun case for the last character only, since + * the RxFIFO overflow happens after it is pushed to the FIFO + * tail. + */ + uart_insert_char(port, sts, MAX310X_LSR_RXOVR_BIT, + one->rx_buf[rxlen-1], flag); + + } else { + if (unlikely(rxlen >= port->fifosize)) { + dev_warn_ratelimited(port->dev, "Possible RX FIFO overrun\n"); + port->icount.buf_overrun++; + /* Ensure sanity of RX level */ + rxlen = port->fifosize; + } + + while (rxlen--) { + ch = max310x_port_read(port, MAX310X_RHR_REG); + sts = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG); + + sts &= MAX310X_LSR_RXPAR_BIT | MAX310X_LSR_FRERR_BIT | + MAX310X_LSR_RXOVR_BIT | MAX310X_LSR_RXBRK_BIT; + + port->icount.rx++; + flag = TTY_NORMAL; + + if (unlikely(sts)) { + if (sts & MAX310X_LSR_RXBRK_BIT) { + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } else if (sts & MAX310X_LSR_RXPAR_BIT) + port->icount.parity++; + else if (sts & MAX310X_LSR_FRERR_BIT) + port->icount.frame++; + else if (sts & MAX310X_LSR_RXOVR_BIT) + port->icount.overrun++; + + sts &= port->read_status_mask; + if (sts & MAX310X_LSR_RXBRK_BIT) + flag = TTY_BREAK; + else if (sts & MAX310X_LSR_RXPAR_BIT) + flag = TTY_PARITY; + else if (sts & MAX310X_LSR_FRERR_BIT) + flag = TTY_FRAME; + else if (sts & MAX310X_LSR_RXOVR_BIT) + flag = TTY_OVERRUN; + } + + if (uart_handle_sysrq_char(port, ch)) + continue; + + if (sts & port->ignore_status_mask) + continue; + + uart_insert_char(port, sts, MAX310X_LSR_RXOVR_BIT, ch, flag); + } + } + + tty_flip_buffer_push(&port->state->port); +} + +static void max310x_handle_tx(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + unsigned int txlen, to_send, until_end; + + if (unlikely(port->x_char)) { + max310x_port_write(port, MAX310X_THR_REG, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + return; + + /* Get length of data pending in circular buffer */ + to_send = uart_circ_chars_pending(xmit); + until_end = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + if (likely(to_send)) { + /* Limit to size of TX FIFO */ + txlen = max310x_port_read(port, MAX310X_TXFIFOLVL_REG); + txlen = port->fifosize - txlen; + to_send = (to_send > txlen) ? txlen : to_send; + + if (until_end < to_send) { + /* It's a circ buffer -- wrap around. + * We could do that in one SPI transaction, but meh. */ + max310x_batch_write(port, xmit->buf + xmit->tail, until_end); + max310x_batch_write(port, xmit->buf, to_send - until_end); + } else { + max310x_batch_write(port, xmit->buf + xmit->tail, to_send); + } + + /* Add data to send */ + port->icount.tx += to_send; + xmit->tail = (xmit->tail + to_send) & (UART_XMIT_SIZE - 1); + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static void max310x_start_tx(struct uart_port *port) +{ + struct max310x_one *one = to_max310x_port(port); + + schedule_work(&one->tx_work); +} + +static irqreturn_t max310x_port_irq(struct max310x_port *s, int portno) +{ + struct uart_port *port = &s->p[portno].port; + irqreturn_t res = IRQ_NONE; + + do { + unsigned int ists, lsr, rxlen; + + /* Read IRQ status & RX FIFO level */ + ists = max310x_port_read(port, MAX310X_IRQSTS_REG); + rxlen = max310x_port_read(port, MAX310X_RXFIFOLVL_REG); + if (!ists && !rxlen) + break; + + res = IRQ_HANDLED; + + if (ists & MAX310X_IRQ_CTS_BIT) { + lsr = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG); + uart_handle_cts_change(port, + !!(lsr & MAX310X_LSR_CTS_BIT)); + } + if (rxlen) + max310x_handle_rx(port, rxlen); + if (ists & MAX310X_IRQ_TXEMPTY_BIT) + max310x_start_tx(port); + } while (1); + return res; +} + +static irqreturn_t max310x_ist(int irq, void *dev_id) +{ + struct max310x_port *s = (struct max310x_port *)dev_id; + bool handled = false; + + if (s->devtype->nr > 1) { + do { + unsigned int val = ~0; + + WARN_ON_ONCE(regmap_read(s->regmap, + MAX310X_GLOBALIRQ_REG, &val)); + val = ((1 << s->devtype->nr) - 1) & ~val; + if (!val) + break; + if (max310x_port_irq(s, fls(val) - 1) == IRQ_HANDLED) + handled = true; + } while (1); + } else { + if (max310x_port_irq(s, 0) == IRQ_HANDLED) + handled = true; + } + + return IRQ_RETVAL(handled); +} + +static void max310x_tx_proc(struct work_struct *ws) +{ + struct max310x_one *one = container_of(ws, struct max310x_one, tx_work); + + max310x_handle_tx(&one->port); +} + +static unsigned int max310x_tx_empty(struct uart_port *port) +{ + u8 lvl = max310x_port_read(port, MAX310X_TXFIFOLVL_REG); + + return lvl ? 0 : TIOCSER_TEMT; +} + +static unsigned int max310x_get_mctrl(struct uart_port *port) +{ + /* DCD and DSR are not wired and CTS/RTS is handled automatically + * so just indicate DSR and CAR asserted + */ + return TIOCM_DSR | TIOCM_CAR; +} + +static void max310x_md_proc(struct work_struct *ws) +{ + struct max310x_one *one = container_of(ws, struct max310x_one, md_work); + + max310x_port_update(&one->port, MAX310X_MODE2_REG, + MAX310X_MODE2_LOOPBACK_BIT, + (one->port.mctrl & TIOCM_LOOP) ? + MAX310X_MODE2_LOOPBACK_BIT : 0); +} + +static void max310x_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct max310x_one *one = to_max310x_port(port); + + schedule_work(&one->md_work); +} + +static void max310x_break_ctl(struct uart_port *port, int break_state) +{ + max310x_port_update(port, MAX310X_LCR_REG, + MAX310X_LCR_TXBREAK_BIT, + break_state ? MAX310X_LCR_TXBREAK_BIT : 0); +} + +static void max310x_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int lcr = 0, flow = 0; + int baud; + + /* Mask termios capabilities we don't support */ + termios->c_cflag &= ~CMSPAR; + + /* Word size */ + switch (termios->c_cflag & CSIZE) { + case CS5: + break; + case CS6: + lcr = MAX310X_LCR_LENGTH0_BIT; + break; + case CS7: + lcr = MAX310X_LCR_LENGTH1_BIT; + break; + case CS8: + default: + lcr = MAX310X_LCR_LENGTH1_BIT | MAX310X_LCR_LENGTH0_BIT; + break; + } + + /* Parity */ + if (termios->c_cflag & PARENB) { + lcr |= MAX310X_LCR_PARITY_BIT; + if (!(termios->c_cflag & PARODD)) + lcr |= MAX310X_LCR_EVENPARITY_BIT; + } + + /* Stop bits */ + if (termios->c_cflag & CSTOPB) + lcr |= MAX310X_LCR_STOPLEN_BIT; /* 2 stops */ + + /* Update LCR register */ + max310x_port_write(port, MAX310X_LCR_REG, lcr); + + /* Set read status mask */ + port->read_status_mask = MAX310X_LSR_RXOVR_BIT; + if (termios->c_iflag & INPCK) + port->read_status_mask |= MAX310X_LSR_RXPAR_BIT | + MAX310X_LSR_FRERR_BIT; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= MAX310X_LSR_RXBRK_BIT; + + /* Set status ignore mask */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNBRK) + port->ignore_status_mask |= MAX310X_LSR_RXBRK_BIT; + if (!(termios->c_cflag & CREAD)) + port->ignore_status_mask |= MAX310X_LSR_RXPAR_BIT | + MAX310X_LSR_RXOVR_BIT | + MAX310X_LSR_FRERR_BIT | + MAX310X_LSR_RXBRK_BIT; + + /* Configure flow control */ + max310x_port_write(port, MAX310X_XON1_REG, termios->c_cc[VSTART]); + max310x_port_write(port, MAX310X_XOFF1_REG, termios->c_cc[VSTOP]); + + /* Disable transmitter before enabling AutoCTS or auto transmitter + * flow control + */ + if (termios->c_cflag & CRTSCTS || termios->c_iflag & IXOFF) { + max310x_port_update(port, MAX310X_MODE1_REG, + MAX310X_MODE1_TXDIS_BIT, + MAX310X_MODE1_TXDIS_BIT); + } + + port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF); + + if (termios->c_cflag & CRTSCTS) { + /* Enable AUTORTS and AUTOCTS */ + port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; + flow |= MAX310X_FLOWCTRL_AUTOCTS_BIT | + MAX310X_FLOWCTRL_AUTORTS_BIT; + } + if (termios->c_iflag & IXON) + flow |= MAX310X_FLOWCTRL_SWFLOW3_BIT | + MAX310X_FLOWCTRL_SWFLOWEN_BIT; + if (termios->c_iflag & IXOFF) { + port->status |= UPSTAT_AUTOXOFF; + flow |= MAX310X_FLOWCTRL_SWFLOW1_BIT | + MAX310X_FLOWCTRL_SWFLOWEN_BIT; + } + max310x_port_write(port, MAX310X_FLOWCTRL_REG, flow); + + /* Enable transmitter after disabling AutoCTS and auto transmitter + * flow control + */ + if (!(termios->c_cflag & CRTSCTS) && !(termios->c_iflag & IXOFF)) { + max310x_port_update(port, MAX310X_MODE1_REG, + MAX310X_MODE1_TXDIS_BIT, + 0); + } + + /* Get baud rate generator configuration */ + baud = uart_get_baud_rate(port, termios, old, + port->uartclk / 16 / 0xffff, + port->uartclk / 4); + + /* Setup baudrate generator */ + baud = max310x_set_baud(port, baud); + + /* Update timeout according to new baud rate */ + uart_update_timeout(port, termios->c_cflag, baud); +} + +static void max310x_rs_proc(struct work_struct *ws) +{ + struct max310x_one *one = container_of(ws, struct max310x_one, rs_work); + unsigned int delay, mode1 = 0, mode2 = 0; + + delay = (one->port.rs485.delay_rts_before_send << 4) | + one->port.rs485.delay_rts_after_send; + max310x_port_write(&one->port, MAX310X_HDPIXDELAY_REG, delay); + + if (one->port.rs485.flags & SER_RS485_ENABLED) { + mode1 = MAX310X_MODE1_TRNSCVCTRL_BIT; + + if (!(one->port.rs485.flags & SER_RS485_RX_DURING_TX)) + mode2 = MAX310X_MODE2_ECHOSUPR_BIT; + } + + max310x_port_update(&one->port, MAX310X_MODE1_REG, + MAX310X_MODE1_TRNSCVCTRL_BIT, mode1); + max310x_port_update(&one->port, MAX310X_MODE2_REG, + MAX310X_MODE2_ECHOSUPR_BIT, mode2); +} + +static int max310x_rs485_config(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + struct max310x_one *one = to_max310x_port(port); + + if ((rs485->delay_rts_before_send > 0x0f) || + (rs485->delay_rts_after_send > 0x0f)) + return -ERANGE; + + port->rs485 = *rs485; + + schedule_work(&one->rs_work); + + return 0; +} + +static int max310x_startup(struct uart_port *port) +{ + struct max310x_port *s = dev_get_drvdata(port->dev); + unsigned int val; + + s->devtype->power(port, 1); + + /* Configure MODE1 register */ + max310x_port_update(port, MAX310X_MODE1_REG, + MAX310X_MODE1_TRNSCVCTRL_BIT, 0); + + /* Configure MODE2 register & Reset FIFOs*/ + val = MAX310X_MODE2_RXEMPTINV_BIT | MAX310X_MODE2_FIFORST_BIT; + max310x_port_write(port, MAX310X_MODE2_REG, val); + max310x_port_update(port, MAX310X_MODE2_REG, + MAX310X_MODE2_FIFORST_BIT, 0); + + /* Configure mode1/mode2 to have rs485/rs232 enabled at startup */ + val = (clamp(port->rs485.delay_rts_before_send, 0U, 15U) << 4) | + clamp(port->rs485.delay_rts_after_send, 0U, 15U); + max310x_port_write(port, MAX310X_HDPIXDELAY_REG, val); + + if (port->rs485.flags & SER_RS485_ENABLED) { + max310x_port_update(port, MAX310X_MODE1_REG, + MAX310X_MODE1_TRNSCVCTRL_BIT, + MAX310X_MODE1_TRNSCVCTRL_BIT); + + if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) + max310x_port_update(port, MAX310X_MODE2_REG, + MAX310X_MODE2_ECHOSUPR_BIT, + MAX310X_MODE2_ECHOSUPR_BIT); + } + + /* Configure flow control levels */ + /* Flow control halt level 96, resume level 48 */ + max310x_port_write(port, MAX310X_FLOWLVL_REG, + MAX310X_FLOWLVL_RES(48) | MAX310X_FLOWLVL_HALT(96)); + + /* Clear IRQ status register */ + max310x_port_read(port, MAX310X_IRQSTS_REG); + + /* Enable RX, TX, CTS change interrupts */ + val = MAX310X_IRQ_RXEMPTY_BIT | MAX310X_IRQ_TXEMPTY_BIT; + max310x_port_write(port, MAX310X_IRQEN_REG, val | MAX310X_IRQ_CTS_BIT); + + return 0; +} + +static void max310x_shutdown(struct uart_port *port) +{ + struct max310x_port *s = dev_get_drvdata(port->dev); + + /* Disable all interrupts */ + max310x_port_write(port, MAX310X_IRQEN_REG, 0); + + s->devtype->power(port, 0); +} + +static const char *max310x_type(struct uart_port *port) +{ + struct max310x_port *s = dev_get_drvdata(port->dev); + + return (port->type == PORT_MAX310X) ? s->devtype->name : NULL; +} + +static int max310x_request_port(struct uart_port *port) +{ + /* Do nothing */ + return 0; +} + +static void max310x_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_MAX310X; +} + +static int max310x_verify_port(struct uart_port *port, struct serial_struct *s) +{ + if ((s->type != PORT_UNKNOWN) && (s->type != PORT_MAX310X)) + return -EINVAL; + if (s->irq != port->irq) + return -EINVAL; + + return 0; +} + +static void max310x_null_void(struct uart_port *port) +{ + /* Do nothing */ +} + +static const struct uart_ops max310x_ops = { + .tx_empty = max310x_tx_empty, + .set_mctrl = max310x_set_mctrl, + .get_mctrl = max310x_get_mctrl, + .stop_tx = max310x_null_void, + .start_tx = max310x_start_tx, + .stop_rx = max310x_null_void, + .break_ctl = max310x_break_ctl, + .startup = max310x_startup, + .shutdown = max310x_shutdown, + .set_termios = max310x_set_termios, + .type = max310x_type, + .request_port = max310x_request_port, + .release_port = max310x_null_void, + .config_port = max310x_config_port, + .verify_port = max310x_verify_port, +}; + +static int __maybe_unused max310x_suspend(struct device *dev) +{ + struct max310x_port *s = dev_get_drvdata(dev); + int i; + + for (i = 0; i < s->devtype->nr; i++) { + uart_suspend_port(&max310x_uart, &s->p[i].port); + s->devtype->power(&s->p[i].port, 0); + } + + return 0; +} + +static int __maybe_unused max310x_resume(struct device *dev) +{ + struct max310x_port *s = dev_get_drvdata(dev); + int i; + + for (i = 0; i < s->devtype->nr; i++) { + s->devtype->power(&s->p[i].port, 1); + uart_resume_port(&max310x_uart, &s->p[i].port); + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(max310x_pm_ops, max310x_suspend, max310x_resume); + +#ifdef CONFIG_GPIOLIB +static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + unsigned int val; + struct max310x_port *s = gpiochip_get_data(chip); + struct uart_port *port = &s->p[offset / 4].port; + + val = max310x_port_read(port, MAX310X_GPIODATA_REG); + + return !!((val >> 4) & (1 << (offset % 4))); +} + +static void max310x_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +{ + struct max310x_port *s = gpiochip_get_data(chip); + struct uart_port *port = &s->p[offset / 4].port; + + max310x_port_update(port, MAX310X_GPIODATA_REG, 1 << (offset % 4), + value ? 1 << (offset % 4) : 0); +} + +static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned offset) +{ + struct max310x_port *s = gpiochip_get_data(chip); + struct uart_port *port = &s->p[offset / 4].port; + + max310x_port_update(port, MAX310X_GPIOCFG_REG, 1 << (offset % 4), 0); + + return 0; +} + +static int max310x_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + struct max310x_port *s = gpiochip_get_data(chip); + struct uart_port *port = &s->p[offset / 4].port; + + max310x_port_update(port, MAX310X_GPIODATA_REG, 1 << (offset % 4), + value ? 1 << (offset % 4) : 0); + max310x_port_update(port, MAX310X_GPIOCFG_REG, 1 << (offset % 4), + 1 << (offset % 4)); + + return 0; +} + +static int max310x_gpio_set_config(struct gpio_chip *chip, unsigned int offset, + unsigned long config) +{ + struct max310x_port *s = gpiochip_get_data(chip); + struct uart_port *port = &s->p[offset / 4].port; + + switch (pinconf_to_config_param(config)) { + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + max310x_port_update(port, MAX310X_GPIOCFG_REG, + 1 << ((offset % 4) + 4), + 1 << ((offset % 4) + 4)); + return 0; + case PIN_CONFIG_DRIVE_PUSH_PULL: + max310x_port_update(port, MAX310X_GPIOCFG_REG, + 1 << ((offset % 4) + 4), 0); + return 0; + default: + return -ENOTSUPP; + } +} +#endif + +static const struct serial_rs485 max310x_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX, + .delay_rts_before_send = 1, + .delay_rts_after_send = 1, +}; + +static int max310x_probe(struct device *dev, const struct max310x_devtype *devtype, + const struct max310x_if_cfg *if_cfg, + struct regmap *regmaps[], int irq) +{ + int i, ret, fmin, fmax, freq; + struct max310x_port *s; + u32 uartclk = 0; + bool xtal; + + for (i = 0; i < devtype->nr; i++) + if (IS_ERR(regmaps[i])) + return PTR_ERR(regmaps[i]); + + /* Alloc port structure */ + s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL); + if (!s) { + dev_err(dev, "Error allocating port structure\n"); + return -ENOMEM; + } + + /* Always ask for fixed clock rate from a property. */ + device_property_read_u32(dev, "clock-frequency", &uartclk); + + xtal = device_property_match_string(dev, "clock-names", "osc") < 0; + if (xtal) + s->clk = devm_clk_get_optional(dev, "xtal"); + else + s->clk = devm_clk_get_optional(dev, "osc"); + if (IS_ERR(s->clk)) + return PTR_ERR(s->clk); + + ret = clk_prepare_enable(s->clk); + if (ret) + return ret; + + freq = clk_get_rate(s->clk); + if (freq == 0) + freq = uartclk; + if (freq == 0) { + dev_err(dev, "Cannot get clock rate\n"); + ret = -EINVAL; + goto out_clk; + } + + if (xtal) { + fmin = 1000000; + fmax = 4000000; + } else { + fmin = 500000; + fmax = 35000000; + } + + /* Check frequency limits */ + if (freq < fmin || freq > fmax) { + ret = -ERANGE; + goto out_clk; + } + + s->regmap = regmaps[0]; + s->devtype = devtype; + s->if_cfg = if_cfg; + dev_set_drvdata(dev, s); + + /* Check device to ensure we are talking to what we expect */ + ret = devtype->detect(dev); + if (ret) + goto out_clk; + + for (i = 0; i < devtype->nr; i++) { + /* Reset port */ + regmap_write(regmaps[i], MAX310X_MODE2_REG, + MAX310X_MODE2_RST_BIT); + /* Clear port reset */ + regmap_write(regmaps[i], MAX310X_MODE2_REG, 0); + + /* Wait for port startup */ + do { + regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret); + } while (ret != 0x01); + + regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1); + } + + uartclk = max310x_set_ref_clk(dev, s, freq, xtal); + dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk); + + for (i = 0; i < devtype->nr; i++) { + unsigned int line; + + line = find_first_zero_bit(max310x_lines, MAX310X_UART_NRMAX); + if (line == MAX310X_UART_NRMAX) { + ret = -ERANGE; + goto out_uart; + } + + /* Initialize port data */ + s->p[i].port.line = line; + s->p[i].port.dev = dev; + s->p[i].port.irq = irq; + s->p[i].port.type = PORT_MAX310X; + s->p[i].port.fifosize = MAX310X_FIFO_SIZE; + s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY; + s->p[i].port.iotype = UPIO_PORT; + s->p[i].port.iobase = i; + s->p[i].port.membase = (void __iomem *)~0; + s->p[i].port.uartclk = uartclk; + s->p[i].port.rs485_config = max310x_rs485_config; + s->p[i].port.rs485_supported = max310x_rs485_supported; + s->p[i].port.ops = &max310x_ops; + s->p[i].regmap = regmaps[i]; + + /* Disable all interrupts */ + max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0); + /* Clear IRQ status register */ + max310x_port_read(&s->p[i].port, MAX310X_IRQSTS_REG); + /* Initialize queue for start TX */ + INIT_WORK(&s->p[i].tx_work, max310x_tx_proc); + /* Initialize queue for changing LOOPBACK mode */ + INIT_WORK(&s->p[i].md_work, max310x_md_proc); + /* Initialize queue for changing RS485 mode */ + INIT_WORK(&s->p[i].rs_work, max310x_rs_proc); + + /* Register port */ + ret = uart_add_one_port(&max310x_uart, &s->p[i].port); + if (ret) { + s->p[i].port.dev = NULL; + goto out_uart; + } + set_bit(line, max310x_lines); + + /* Go to suspend mode */ + devtype->power(&s->p[i].port, 0); + } + +#ifdef CONFIG_GPIOLIB + /* Setup GPIO cotroller */ + s->gpio.owner = THIS_MODULE; + s->gpio.parent = dev; + s->gpio.label = devtype->name; + s->gpio.direction_input = max310x_gpio_direction_input; + s->gpio.get = max310x_gpio_get; + s->gpio.direction_output= max310x_gpio_direction_output; + s->gpio.set = max310x_gpio_set; + s->gpio.set_config = max310x_gpio_set_config; + s->gpio.base = -1; + s->gpio.ngpio = devtype->nr * 4; + s->gpio.can_sleep = 1; + ret = devm_gpiochip_add_data(dev, &s->gpio, s); + if (ret) + goto out_uart; +#endif + + /* Setup interrupt */ + ret = devm_request_threaded_irq(dev, irq, NULL, max310x_ist, + IRQF_ONESHOT | IRQF_SHARED, dev_name(dev), s); + if (!ret) + return 0; + + dev_err(dev, "Unable to reguest IRQ %i\n", irq); + +out_uart: + for (i = 0; i < devtype->nr; i++) { + if (s->p[i].port.dev) { + uart_remove_one_port(&max310x_uart, &s->p[i].port); + clear_bit(s->p[i].port.line, max310x_lines); + } + } + +out_clk: + clk_disable_unprepare(s->clk); + + return ret; +} + +static void max310x_remove(struct device *dev) +{ + struct max310x_port *s = dev_get_drvdata(dev); + int i; + + for (i = 0; i < s->devtype->nr; i++) { + cancel_work_sync(&s->p[i].tx_work); + cancel_work_sync(&s->p[i].md_work); + cancel_work_sync(&s->p[i].rs_work); + uart_remove_one_port(&max310x_uart, &s->p[i].port); + clear_bit(s->p[i].port.line, max310x_lines); + s->devtype->power(&s->p[i].port, 0); + } + + clk_disable_unprepare(s->clk); +} + +static const struct of_device_id __maybe_unused max310x_dt_ids[] = { + { .compatible = "maxim,max3107", .data = &max3107_devtype, }, + { .compatible = "maxim,max3108", .data = &max3108_devtype, }, + { .compatible = "maxim,max3109", .data = &max3109_devtype, }, + { .compatible = "maxim,max14830", .data = &max14830_devtype }, + { } +}; +MODULE_DEVICE_TABLE(of, max310x_dt_ids); + +static struct regmap_config regcfg = { + .reg_bits = 8, + .val_bits = 8, + .write_flag_mask = MAX310X_WRITE_BIT, + .cache_type = REGCACHE_RBTREE, + .max_register = MAX310X_REG_1F, + .writeable_reg = max310x_reg_writeable, + .volatile_reg = max310x_reg_volatile, + .precious_reg = max310x_reg_precious, + .writeable_noinc_reg = max310x_reg_noinc, + .readable_noinc_reg = max310x_reg_noinc, + .max_raw_read = MAX310X_FIFO_SIZE, + .max_raw_write = MAX310X_FIFO_SIZE, +}; + +#ifdef CONFIG_SPI_MASTER +static int max310x_spi_extended_reg_enable(struct device *dev, bool enable) +{ + struct max310x_port *s = dev_get_drvdata(dev); + + return regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, + enable ? MAX310X_EXTREG_ENBL : MAX310X_EXTREG_DSBL); +} + +static const struct max310x_if_cfg __maybe_unused max310x_spi_if_cfg = { + .extended_reg_enable = max310x_spi_extended_reg_enable, + .rev_id_reg = MAX310X_SPI_REVID_EXTREG, +}; + +static int max310x_spi_probe(struct spi_device *spi) +{ + const struct max310x_devtype *devtype; + struct regmap *regmaps[4]; + unsigned int i; + int ret; + + /* Setup SPI bus */ + spi->bits_per_word = 8; + spi->mode = spi->mode ? : SPI_MODE_0; + spi->max_speed_hz = spi->max_speed_hz ? : 26000000; + ret = spi_setup(spi); + if (ret) + return ret; + + devtype = device_get_match_data(&spi->dev); + if (!devtype) + devtype = (struct max310x_devtype *)spi_get_device_id(spi)->driver_data; + + for (i = 0; i < devtype->nr; i++) { + u8 port_mask = i * 0x20; + regcfg.read_flag_mask = port_mask; + regcfg.write_flag_mask = port_mask | MAX310X_WRITE_BIT; + regmaps[i] = devm_regmap_init_spi(spi, ®cfg); + } + + return max310x_probe(&spi->dev, devtype, &max310x_spi_if_cfg, regmaps, spi->irq); +} + +static void max310x_spi_remove(struct spi_device *spi) +{ + max310x_remove(&spi->dev); +} + +static const struct spi_device_id max310x_id_table[] = { + { "max3107", (kernel_ulong_t)&max3107_devtype, }, + { "max3108", (kernel_ulong_t)&max3108_devtype, }, + { "max3109", (kernel_ulong_t)&max3109_devtype, }, + { "max14830", (kernel_ulong_t)&max14830_devtype, }, + { } +}; +MODULE_DEVICE_TABLE(spi, max310x_id_table); + +static struct spi_driver max310x_spi_driver = { + .driver = { + .name = MAX310X_NAME, + .of_match_table = max310x_dt_ids, + .pm = &max310x_pm_ops, + }, + .probe = max310x_spi_probe, + .remove = max310x_spi_remove, + .id_table = max310x_id_table, +}; +#endif + +#ifdef CONFIG_I2C +static int max310x_i2c_extended_reg_enable(struct device *dev, bool enable) +{ + return 0; +} + +static struct regmap_config regcfg_i2c = { + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_RBTREE, + .writeable_reg = max310x_reg_writeable, + .volatile_reg = max310x_reg_volatile, + .precious_reg = max310x_reg_precious, + .max_register = MAX310X_I2C_REVID_EXTREG, + .writeable_noinc_reg = max310x_reg_noinc, + .readable_noinc_reg = max310x_reg_noinc, + .max_raw_read = MAX310X_FIFO_SIZE, + .max_raw_write = MAX310X_FIFO_SIZE, +}; + +static const struct max310x_if_cfg max310x_i2c_if_cfg = { + .extended_reg_enable = max310x_i2c_extended_reg_enable, + .rev_id_reg = MAX310X_I2C_REVID_EXTREG, +}; + +static unsigned short max310x_i2c_slave_addr(unsigned short addr, + unsigned int nr) +{ + /* + * For MAX14830 and MAX3109, the slave address depends on what the + * A0 and A1 pins are tied to. + * See Table I2C Address Map of the datasheet. + * Based on that table, the following formulas were determined. + * UART1 - UART0 = 0x10 + * UART2 - UART1 = 0x20 + 0x10 + * UART3 - UART2 = 0x10 + */ + + addr -= nr * 0x10; + + if (nr >= 2) + addr -= 0x20; + + return addr; +} + +static int max310x_i2c_probe(struct i2c_client *client) +{ + const struct max310x_devtype *devtype = + device_get_match_data(&client->dev); + struct i2c_client *port_client; + struct regmap *regmaps[4]; + unsigned int i; + u8 port_addr; + + if (client->addr < devtype->slave_addr.min || + client->addr > devtype->slave_addr.max) + return dev_err_probe(&client->dev, -EINVAL, + "Slave addr 0x%x outside of range [0x%x, 0x%x]\n", + client->addr, devtype->slave_addr.min, + devtype->slave_addr.max); + + regmaps[0] = devm_regmap_init_i2c(client, ®cfg_i2c); + + for (i = 1; i < devtype->nr; i++) { + port_addr = max310x_i2c_slave_addr(client->addr, i); + port_client = devm_i2c_new_dummy_device(&client->dev, + client->adapter, + port_addr); + + regmaps[i] = devm_regmap_init_i2c(port_client, ®cfg_i2c); + } + + return max310x_probe(&client->dev, devtype, &max310x_i2c_if_cfg, + regmaps, client->irq); +} + +static void max310x_i2c_remove(struct i2c_client *client) +{ + max310x_remove(&client->dev); +} + +static struct i2c_driver max310x_i2c_driver = { + .driver = { + .name = MAX310X_NAME, + .of_match_table = max310x_dt_ids, + .pm = &max310x_pm_ops, + }, + .probe_new = max310x_i2c_probe, + .remove = max310x_i2c_remove, +}; +#endif + +static int __init max310x_uart_init(void) +{ + int ret; + + bitmap_zero(max310x_lines, MAX310X_UART_NRMAX); + + ret = uart_register_driver(&max310x_uart); + if (ret) + return ret; + +#ifdef CONFIG_SPI_MASTER + ret = spi_register_driver(&max310x_spi_driver); + if (ret) + goto err_spi_register; +#endif + +#ifdef CONFIG_I2C + ret = i2c_add_driver(&max310x_i2c_driver); + if (ret) + goto err_i2c_register; +#endif + + return 0; + +#ifdef CONFIG_I2C +err_i2c_register: + spi_unregister_driver(&max310x_spi_driver); +#endif + +err_spi_register: + uart_unregister_driver(&max310x_uart); + + return ret; +} +module_init(max310x_uart_init); + +static void __exit max310x_uart_exit(void) +{ +#ifdef CONFIG_I2C + i2c_del_driver(&max310x_i2c_driver); +#endif + +#ifdef CONFIG_SPI_MASTER + spi_unregister_driver(&max310x_spi_driver); +#endif + + uart_unregister_driver(&max310x_uart); +} +module_exit(max310x_uart_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alexander Shiyan "); +MODULE_DESCRIPTION("MAX310X serial driver"); diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c new file mode 100644 index 000000000..b1cd9a76d --- /dev/null +++ b/drivers/tty/serial/mcf.c @@ -0,0 +1,709 @@ +// SPDX-License-Identifier: GPL-2.0+ +/****************************************************************************/ + +/* + * mcf.c -- Freescale ColdFire UART driver + * + * (C) Copyright 2003-2007, Greg Ungerer + */ + +/****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/****************************************************************************/ + +/* + * Some boards implement the DTR/DCD lines using GPIO lines, most + * don't. Dummy out the access macros for those that don't. Those + * that do should define these macros somewhere in there board + * specific inlude files. + */ +#if !defined(mcf_getppdcd) +#define mcf_getppdcd(p) (1) +#endif +#if !defined(mcf_getppdtr) +#define mcf_getppdtr(p) (1) +#endif +#if !defined(mcf_setppdtr) +#define mcf_setppdtr(p, v) do { } while (0) +#endif + +/****************************************************************************/ + +/* + * Local per-uart structure. + */ +struct mcf_uart { + struct uart_port port; + unsigned int sigs; /* Local copy of line sigs */ + unsigned char imr; /* Local IMR mirror */ +}; + +/****************************************************************************/ + +static unsigned int mcf_tx_empty(struct uart_port *port) +{ + return (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXEMPTY) ? + TIOCSER_TEMT : 0; +} + +/****************************************************************************/ + +static unsigned int mcf_get_mctrl(struct uart_port *port) +{ + struct mcf_uart *pp = container_of(port, struct mcf_uart, port); + unsigned int sigs; + + sigs = (readb(port->membase + MCFUART_UIPR) & MCFUART_UIPR_CTS) ? + 0 : TIOCM_CTS; + sigs |= (pp->sigs & TIOCM_RTS); + sigs |= (mcf_getppdcd(port->line) ? TIOCM_CD : 0); + sigs |= (mcf_getppdtr(port->line) ? TIOCM_DTR : 0); + + return sigs; +} + +/****************************************************************************/ + +static void mcf_set_mctrl(struct uart_port *port, unsigned int sigs) +{ + struct mcf_uart *pp = container_of(port, struct mcf_uart, port); + + pp->sigs = sigs; + mcf_setppdtr(port->line, (sigs & TIOCM_DTR)); + if (sigs & TIOCM_RTS) + writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1); + else + writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP0); +} + +/****************************************************************************/ + +static void mcf_start_tx(struct uart_port *port) +{ + struct mcf_uart *pp = container_of(port, struct mcf_uart, port); + + if (port->rs485.flags & SER_RS485_ENABLED) { + /* Enable Transmitter */ + writeb(MCFUART_UCR_TXENABLE, port->membase + MCFUART_UCR); + /* Manually assert RTS */ + writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1); + } + pp->imr |= MCFUART_UIR_TXREADY; + writeb(pp->imr, port->membase + MCFUART_UIMR); +} + +/****************************************************************************/ + +static void mcf_stop_tx(struct uart_port *port) +{ + struct mcf_uart *pp = container_of(port, struct mcf_uart, port); + + pp->imr &= ~MCFUART_UIR_TXREADY; + writeb(pp->imr, port->membase + MCFUART_UIMR); +} + +/****************************************************************************/ + +static void mcf_stop_rx(struct uart_port *port) +{ + struct mcf_uart *pp = container_of(port, struct mcf_uart, port); + + pp->imr &= ~MCFUART_UIR_RXREADY; + writeb(pp->imr, port->membase + MCFUART_UIMR); +} + +/****************************************************************************/ + +static void mcf_break_ctl(struct uart_port *port, int break_state) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + if (break_state == -1) + writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR); + else + writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR); + spin_unlock_irqrestore(&port->lock, flags); +} + +/****************************************************************************/ + +static int mcf_startup(struct uart_port *port) +{ + struct mcf_uart *pp = container_of(port, struct mcf_uart, port); + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* Reset UART, get it into known state... */ + writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); + writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); + + /* Enable the UART transmitter and receiver */ + writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE, + port->membase + MCFUART_UCR); + + /* Enable RX interrupts now */ + pp->imr = MCFUART_UIR_RXREADY; + writeb(pp->imr, port->membase + MCFUART_UIMR); + + spin_unlock_irqrestore(&port->lock, flags); + + return 0; +} + +/****************************************************************************/ + +static void mcf_shutdown(struct uart_port *port) +{ + struct mcf_uart *pp = container_of(port, struct mcf_uart, port); + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* Disable all interrupts now */ + pp->imr = 0; + writeb(pp->imr, port->membase + MCFUART_UIMR); + + /* Disable UART transmitter and receiver */ + writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); + writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); + + spin_unlock_irqrestore(&port->lock, flags); +} + +/****************************************************************************/ + +static void mcf_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned long flags; + unsigned int baud, baudclk; +#if defined(CONFIG_M5272) + unsigned int baudfr; +#endif + unsigned char mr1, mr2; + + baud = uart_get_baud_rate(port, termios, old, 0, 230400); +#if defined(CONFIG_M5272) + baudclk = (MCF_BUSCLK / baud) / 32; + baudfr = (((MCF_BUSCLK / baud) + 1) / 2) % 16; +#else + baudclk = ((MCF_BUSCLK / baud) + 16) / 32; +#endif + + mr1 = MCFUART_MR1_RXIRQRDY | MCFUART_MR1_RXERRCHAR; + mr2 = 0; + + switch (termios->c_cflag & CSIZE) { + case CS5: mr1 |= MCFUART_MR1_CS5; break; + case CS6: mr1 |= MCFUART_MR1_CS6; break; + case CS7: mr1 |= MCFUART_MR1_CS7; break; + case CS8: + default: mr1 |= MCFUART_MR1_CS8; break; + } + + if (termios->c_cflag & PARENB) { + if (termios->c_cflag & CMSPAR) { + if (termios->c_cflag & PARODD) + mr1 |= MCFUART_MR1_PARITYMARK; + else + mr1 |= MCFUART_MR1_PARITYSPACE; + } else { + if (termios->c_cflag & PARODD) + mr1 |= MCFUART_MR1_PARITYODD; + else + mr1 |= MCFUART_MR1_PARITYEVEN; + } + } else { + mr1 |= MCFUART_MR1_PARITYNONE; + } + + /* + * FIXME: port->read_status_mask and port->ignore_status_mask + * need to be initialized based on termios settings for + * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT + */ + + if (termios->c_cflag & CSTOPB) + mr2 |= MCFUART_MR2_STOP2; + else + mr2 |= MCFUART_MR2_STOP1; + + if (termios->c_cflag & CRTSCTS) { + mr1 |= MCFUART_MR1_RXRTS; + mr2 |= MCFUART_MR2_TXCTS; + } + + spin_lock_irqsave(&port->lock, flags); + if (port->rs485.flags & SER_RS485_ENABLED) { + dev_dbg(port->dev, "Setting UART to RS485\n"); + mr2 |= MCFUART_MR2_TXRTS; + } + + uart_update_timeout(port, termios->c_cflag, baud); + writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); + writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); + writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR); + writeb(mr1, port->membase + MCFUART_UMR); + writeb(mr2, port->membase + MCFUART_UMR); + writeb((baudclk & 0xff00) >> 8, port->membase + MCFUART_UBG1); + writeb((baudclk & 0xff), port->membase + MCFUART_UBG2); +#if defined(CONFIG_M5272) + writeb((baudfr & 0x0f), port->membase + MCFUART_UFPD); +#endif + writeb(MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER, + port->membase + MCFUART_UCSR); + writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE, + port->membase + MCFUART_UCR); + spin_unlock_irqrestore(&port->lock, flags); +} + +/****************************************************************************/ + +static void mcf_rx_chars(struct mcf_uart *pp) +{ + struct uart_port *port = &pp->port; + unsigned char status, ch, flag; + + while ((status = readb(port->membase + MCFUART_USR)) & MCFUART_USR_RXREADY) { + ch = readb(port->membase + MCFUART_URB); + flag = TTY_NORMAL; + port->icount.rx++; + + if (status & MCFUART_USR_RXERR) { + writeb(MCFUART_UCR_CMDRESETERR, + port->membase + MCFUART_UCR); + + if (status & MCFUART_USR_RXBREAK) { + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } else if (status & MCFUART_USR_RXPARITY) { + port->icount.parity++; + } else if (status & MCFUART_USR_RXOVERRUN) { + port->icount.overrun++; + } else if (status & MCFUART_USR_RXFRAMING) { + port->icount.frame++; + } + + status &= port->read_status_mask; + + if (status & MCFUART_USR_RXBREAK) + flag = TTY_BREAK; + else if (status & MCFUART_USR_RXPARITY) + flag = TTY_PARITY; + else if (status & MCFUART_USR_RXFRAMING) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(port, ch)) + continue; + uart_insert_char(port, status, MCFUART_USR_RXOVERRUN, ch, flag); + } + + tty_flip_buffer_push(&port->state->port); +} + +/****************************************************************************/ + +static void mcf_tx_chars(struct mcf_uart *pp) +{ + struct uart_port *port = &pp->port; + struct circ_buf *xmit = &port->state->xmit; + + if (port->x_char) { + /* Send special char - probably flow control */ + writeb(port->x_char, port->membase + MCFUART_UTB); + port->x_char = 0; + port->icount.tx++; + return; + } + + while (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY) { + if (uart_circ_empty(xmit)) + break; + writeb(xmit->buf[xmit->tail], port->membase + MCFUART_UTB); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE -1); + port->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) { + mcf_stop_tx(port); + /* Disable TX to negate RTS automatically */ + if (port->rs485.flags & SER_RS485_ENABLED) + writeb(MCFUART_UCR_TXDISABLE, + port->membase + MCFUART_UCR); + } +} + +/****************************************************************************/ + +static irqreturn_t mcf_interrupt(int irq, void *data) +{ + struct uart_port *port = data; + struct mcf_uart *pp = container_of(port, struct mcf_uart, port); + unsigned int isr; + irqreturn_t ret = IRQ_NONE; + + isr = readb(port->membase + MCFUART_UISR) & pp->imr; + + spin_lock(&port->lock); + if (isr & MCFUART_UIR_RXREADY) { + mcf_rx_chars(pp); + ret = IRQ_HANDLED; + } + if (isr & MCFUART_UIR_TXREADY) { + mcf_tx_chars(pp); + ret = IRQ_HANDLED; + } + spin_unlock(&port->lock); + + return ret; +} + +/****************************************************************************/ + +static void mcf_config_port(struct uart_port *port, int flags) +{ + port->type = PORT_MCF; + port->fifosize = MCFUART_TXFIFOSIZE; + + /* Clear mask, so no surprise interrupts. */ + writeb(0, port->membase + MCFUART_UIMR); + + if (request_irq(port->irq, mcf_interrupt, 0, "UART", port)) + printk(KERN_ERR "MCF: unable to attach ColdFire UART %d " + "interrupt vector=%d\n", port->line, port->irq); +} + +/****************************************************************************/ + +static const char *mcf_type(struct uart_port *port) +{ + return (port->type == PORT_MCF) ? "ColdFire UART" : NULL; +} + +/****************************************************************************/ + +static int mcf_request_port(struct uart_port *port) +{ + /* UARTs always present */ + return 0; +} + +/****************************************************************************/ + +static void mcf_release_port(struct uart_port *port) +{ + /* Nothing to release... */ +} + +/****************************************************************************/ + +static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_MCF)) + return -EINVAL; + return 0; +} + +/****************************************************************************/ + +/* Enable or disable the RS485 support */ +static int mcf_config_rs485(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + unsigned char mr1, mr2; + + /* Get mode registers */ + mr1 = readb(port->membase + MCFUART_UMR); + mr2 = readb(port->membase + MCFUART_UMR); + if (rs485->flags & SER_RS485_ENABLED) { + dev_dbg(port->dev, "Setting UART to RS485\n"); + /* Automatically negate RTS after TX completes */ + mr2 |= MCFUART_MR2_TXRTS; + } else { + dev_dbg(port->dev, "Setting UART to RS232\n"); + mr2 &= ~MCFUART_MR2_TXRTS; + } + writeb(mr1, port->membase + MCFUART_UMR); + writeb(mr2, port->membase + MCFUART_UMR); + + return 0; +} + +static const struct serial_rs485 mcf_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND, +}; + +/****************************************************************************/ + +/* + * Define the basic serial functions we support. + */ +static const struct uart_ops mcf_uart_ops = { + .tx_empty = mcf_tx_empty, + .get_mctrl = mcf_get_mctrl, + .set_mctrl = mcf_set_mctrl, + .start_tx = mcf_start_tx, + .stop_tx = mcf_stop_tx, + .stop_rx = mcf_stop_rx, + .break_ctl = mcf_break_ctl, + .startup = mcf_startup, + .shutdown = mcf_shutdown, + .set_termios = mcf_set_termios, + .type = mcf_type, + .request_port = mcf_request_port, + .release_port = mcf_release_port, + .config_port = mcf_config_port, + .verify_port = mcf_verify_port, +}; + +static struct mcf_uart mcf_ports[4]; + +#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) + +/****************************************************************************/ +#if defined(CONFIG_SERIAL_MCF_CONSOLE) +/****************************************************************************/ + +int __init early_mcf_setup(struct mcf_platform_uart *platp) +{ + struct uart_port *port; + int i; + + for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) { + port = &mcf_ports[i].port; + + port->line = i; + port->type = PORT_MCF; + port->mapbase = platp[i].mapbase; + port->membase = (platp[i].membase) ? platp[i].membase : + (unsigned char __iomem *) port->mapbase; + port->iotype = SERIAL_IO_MEM; + port->irq = platp[i].irq; + port->uartclk = MCF_BUSCLK; + port->flags = UPF_BOOT_AUTOCONF; + port->rs485_config = mcf_config_rs485; + port->rs485_supported = mcf_rs485_supported; + port->ops = &mcf_uart_ops; + } + + return 0; +} + +/****************************************************************************/ + +static void mcf_console_putc(struct console *co, const char c) +{ + struct uart_port *port = &(mcf_ports + co->index)->port; + int i; + + for (i = 0; (i < 0x10000); i++) { + if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY) + break; + } + writeb(c, port->membase + MCFUART_UTB); + for (i = 0; (i < 0x10000); i++) { + if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY) + break; + } +} + +/****************************************************************************/ + +static void mcf_console_write(struct console *co, const char *s, unsigned int count) +{ + for (; (count); count--, s++) { + mcf_console_putc(co, *s); + if (*s == '\n') + mcf_console_putc(co, '\r'); + } +} + +/****************************************************************************/ + +static int __init mcf_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = CONFIG_SERIAL_MCF_BAUDRATE; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if ((co->index < 0) || (co->index >= MCF_MAXPORTS)) + co->index = 0; + port = &mcf_ports[co->index].port; + if (port->membase == 0) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +/****************************************************************************/ + +static struct uart_driver mcf_driver; + +static struct console mcf_console = { + .name = "ttyS", + .write = mcf_console_write, + .device = uart_console_device, + .setup = mcf_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &mcf_driver, +}; + +static int __init mcf_console_init(void) +{ + register_console(&mcf_console); + return 0; +} + +console_initcall(mcf_console_init); + +#define MCF_CONSOLE &mcf_console + +/****************************************************************************/ +#else +/****************************************************************************/ + +#define MCF_CONSOLE NULL + +/****************************************************************************/ +#endif /* CONFIG_SERIAL_MCF_CONSOLE */ +/****************************************************************************/ + +/* + * Define the mcf UART driver structure. + */ +static struct uart_driver mcf_driver = { + .owner = THIS_MODULE, + .driver_name = "mcf", + .dev_name = "ttyS", + .major = TTY_MAJOR, + .minor = 64, + .nr = MCF_MAXPORTS, + .cons = MCF_CONSOLE, +}; + +/****************************************************************************/ + +static int mcf_probe(struct platform_device *pdev) +{ + struct mcf_platform_uart *platp = dev_get_platdata(&pdev->dev); + struct uart_port *port; + int i; + + for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) { + port = &mcf_ports[i].port; + + port->line = i; + port->type = PORT_MCF; + port->mapbase = platp[i].mapbase; + port->membase = (platp[i].membase) ? platp[i].membase : + (unsigned char __iomem *) platp[i].mapbase; + port->dev = &pdev->dev; + port->iotype = SERIAL_IO_MEM; + port->irq = platp[i].irq; + port->uartclk = MCF_BUSCLK; + port->ops = &mcf_uart_ops; + port->flags = UPF_BOOT_AUTOCONF; + port->rs485_config = mcf_config_rs485; + port->rs485_supported = mcf_rs485_supported; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MCF_CONSOLE); + + uart_add_one_port(&mcf_driver, port); + } + + return 0; +} + +/****************************************************************************/ + +static int mcf_remove(struct platform_device *pdev) +{ + struct uart_port *port; + int i; + + for (i = 0; (i < MCF_MAXPORTS); i++) { + port = &mcf_ports[i].port; + if (port) + uart_remove_one_port(&mcf_driver, port); + } + + return 0; +} + +/****************************************************************************/ + +static struct platform_driver mcf_platform_driver = { + .probe = mcf_probe, + .remove = mcf_remove, + .driver = { + .name = "mcfuart", + }, +}; + +/****************************************************************************/ + +static int __init mcf_init(void) +{ + int rc; + + printk("ColdFire internal UART serial driver\n"); + + rc = uart_register_driver(&mcf_driver); + if (rc) + return rc; + rc = platform_driver_register(&mcf_platform_driver); + if (rc) { + uart_unregister_driver(&mcf_driver); + return rc; + } + return 0; +} + +/****************************************************************************/ + +static void __exit mcf_exit(void) +{ + platform_driver_unregister(&mcf_platform_driver); + uart_unregister_driver(&mcf_driver); +} + +/****************************************************************************/ + +module_init(mcf_init); +module_exit(mcf_exit); + +MODULE_AUTHOR("Greg Ungerer "); +MODULE_DESCRIPTION("Freescale ColdFire UART driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mcfuart"); + +/****************************************************************************/ diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c new file mode 100644 index 000000000..3690f5cf0 --- /dev/null +++ b/drivers/tty/serial/men_z135_uart.c @@ -0,0 +1,932 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MEN 16z135 High Speed UART + * + * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de) + * Author: Johannes Thumshirn + */ +#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MEN_Z135_MAX_PORTS 12 +#define MEN_Z135_BASECLK 29491200 +#define MEN_Z135_FIFO_SIZE 1024 +#define MEN_Z135_FIFO_WATERMARK 1020 + +#define MEN_Z135_STAT_REG 0x0 +#define MEN_Z135_RX_RAM 0x4 +#define MEN_Z135_TX_RAM 0x400 +#define MEN_Z135_RX_CTRL 0x800 +#define MEN_Z135_TX_CTRL 0x804 +#define MEN_Z135_CONF_REG 0x808 +#define MEN_Z135_UART_FREQ 0x80c +#define MEN_Z135_BAUD_REG 0x810 +#define MEN_Z135_TIMEOUT 0x814 + +#define IRQ_ID(x) ((x) & 0x1f) + +#define MEN_Z135_IER_RXCIEN BIT(0) /* RX Space IRQ */ +#define MEN_Z135_IER_TXCIEN BIT(1) /* TX Space IRQ */ +#define MEN_Z135_IER_RLSIEN BIT(2) /* Receiver Line Status IRQ */ +#define MEN_Z135_IER_MSIEN BIT(3) /* Modem Status IRQ */ +#define MEN_Z135_ALL_IRQS (MEN_Z135_IER_RXCIEN \ + | MEN_Z135_IER_RLSIEN \ + | MEN_Z135_IER_MSIEN \ + | MEN_Z135_IER_TXCIEN) + +#define MEN_Z135_MCR_DTR BIT(24) +#define MEN_Z135_MCR_RTS BIT(25) +#define MEN_Z135_MCR_OUT1 BIT(26) +#define MEN_Z135_MCR_OUT2 BIT(27) +#define MEN_Z135_MCR_LOOP BIT(28) +#define MEN_Z135_MCR_RCFC BIT(29) + +#define MEN_Z135_MSR_DCTS BIT(0) +#define MEN_Z135_MSR_DDSR BIT(1) +#define MEN_Z135_MSR_DRI BIT(2) +#define MEN_Z135_MSR_DDCD BIT(3) +#define MEN_Z135_MSR_CTS BIT(4) +#define MEN_Z135_MSR_DSR BIT(5) +#define MEN_Z135_MSR_RI BIT(6) +#define MEN_Z135_MSR_DCD BIT(7) + +#define MEN_Z135_LCR_SHIFT 8 /* LCR shift mask */ + +#define MEN_Z135_WL5 0 /* CS5 */ +#define MEN_Z135_WL6 1 /* CS6 */ +#define MEN_Z135_WL7 2 /* CS7 */ +#define MEN_Z135_WL8 3 /* CS8 */ + +#define MEN_Z135_STB_SHIFT 2 /* Stopbits */ +#define MEN_Z135_NSTB1 0 +#define MEN_Z135_NSTB2 1 + +#define MEN_Z135_PEN_SHIFT 3 /* Parity enable */ +#define MEN_Z135_PAR_DIS 0 +#define MEN_Z135_PAR_ENA 1 + +#define MEN_Z135_PTY_SHIFT 4 /* Parity type */ +#define MEN_Z135_PTY_ODD 0 +#define MEN_Z135_PTY_EVN 1 + +#define MEN_Z135_LSR_DR BIT(0) +#define MEN_Z135_LSR_OE BIT(1) +#define MEN_Z135_LSR_PE BIT(2) +#define MEN_Z135_LSR_FE BIT(3) +#define MEN_Z135_LSR_BI BIT(4) +#define MEN_Z135_LSR_THEP BIT(5) +#define MEN_Z135_LSR_TEXP BIT(6) +#define MEN_Z135_LSR_RXFIFOERR BIT(7) + +#define MEN_Z135_IRQ_ID_RLS BIT(0) +#define MEN_Z135_IRQ_ID_RDA BIT(1) +#define MEN_Z135_IRQ_ID_CTI BIT(2) +#define MEN_Z135_IRQ_ID_TSA BIT(3) +#define MEN_Z135_IRQ_ID_MST BIT(4) + +#define LCR(x) (((x) >> MEN_Z135_LCR_SHIFT) & 0xff) + +#define BYTES_TO_ALIGN(x) ((x) & 0x3) + +static int line; + +static int txlvl = 5; +module_param(txlvl, int, S_IRUGO); +MODULE_PARM_DESC(txlvl, "TX IRQ trigger level 0-7, default 5 (128 byte)"); + +static int rxlvl = 6; +module_param(rxlvl, int, S_IRUGO); +MODULE_PARM_DESC(rxlvl, "RX IRQ trigger level 0-7, default 6 (256 byte)"); + +static int align; +module_param(align, int, S_IRUGO); +MODULE_PARM_DESC(align, "Keep hardware FIFO write pointer aligned, default 0"); + +static uint rx_timeout; +module_param(rx_timeout, uint, S_IRUGO); +MODULE_PARM_DESC(rx_timeout, "RX timeout. " + "Timeout in seconds = (timeout_reg * baud_reg * 4) / freq_reg"); + +struct men_z135_port { + struct uart_port port; + struct mcb_device *mdev; + struct resource *mem; + unsigned char *rxbuf; + u32 stat_reg; + spinlock_t lock; + bool automode; +}; +#define to_men_z135(port) container_of((port), struct men_z135_port, port) + +/** + * men_z135_reg_set() - Set value in register + * @uart: The UART port + * @addr: Register address + * @val: value to set + */ +static inline void men_z135_reg_set(struct men_z135_port *uart, + u32 addr, u32 val) +{ + struct uart_port *port = &uart->port; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&uart->lock, flags); + + reg = ioread32(port->membase + addr); + reg |= val; + iowrite32(reg, port->membase + addr); + + spin_unlock_irqrestore(&uart->lock, flags); +} + +/** + * men_z135_reg_clr() - Unset value in register + * @uart: The UART port + * @addr: Register address + * @val: value to clear + */ +static void men_z135_reg_clr(struct men_z135_port *uart, + u32 addr, u32 val) +{ + struct uart_port *port = &uart->port; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&uart->lock, flags); + + reg = ioread32(port->membase + addr); + reg &= ~val; + iowrite32(reg, port->membase + addr); + + spin_unlock_irqrestore(&uart->lock, flags); +} + +/** + * men_z135_handle_modem_status() - Handle change of modem status + * @uart: The UART port + * + * Handle change of modem status register. This is done by reading the "delta" + * versions of DCD (Data Carrier Detect) and CTS (Clear To Send). + */ +static void men_z135_handle_modem_status(struct men_z135_port *uart) +{ + u8 msr; + + msr = (uart->stat_reg >> 8) & 0xff; + + if (msr & MEN_Z135_MSR_DDCD) + uart_handle_dcd_change(&uart->port, + msr & MEN_Z135_MSR_DCD); + if (msr & MEN_Z135_MSR_DCTS) + uart_handle_cts_change(&uart->port, + msr & MEN_Z135_MSR_CTS); +} + +static void men_z135_handle_lsr(struct men_z135_port *uart) +{ + struct uart_port *port = &uart->port; + u8 lsr; + + lsr = (uart->stat_reg >> 16) & 0xff; + + if (lsr & MEN_Z135_LSR_OE) + port->icount.overrun++; + if (lsr & MEN_Z135_LSR_PE) + port->icount.parity++; + if (lsr & MEN_Z135_LSR_FE) + port->icount.frame++; + if (lsr & MEN_Z135_LSR_BI) { + port->icount.brk++; + uart_handle_break(port); + } +} + +/** + * get_rx_fifo_content() - Get the number of bytes in RX FIFO + * @uart: The UART port + * + * Read RXC register from hardware and return current FIFO fill size. + */ +static u16 get_rx_fifo_content(struct men_z135_port *uart) +{ + struct uart_port *port = &uart->port; + u32 stat_reg; + u16 rxc; + u8 rxc_lo; + u8 rxc_hi; + + stat_reg = ioread32(port->membase + MEN_Z135_STAT_REG); + rxc_lo = stat_reg >> 24; + rxc_hi = (stat_reg & 0xC0) >> 6; + + rxc = rxc_lo | (rxc_hi << 8); + + return rxc; +} + +/** + * men_z135_handle_rx() - RX tasklet routine + * @uart: Pointer to struct men_z135_port + * + * Copy from RX FIFO and acknowledge number of bytes copied. + */ +static void men_z135_handle_rx(struct men_z135_port *uart) +{ + struct uart_port *port = &uart->port; + struct tty_port *tport = &port->state->port; + int copied; + u16 size; + int room; + + size = get_rx_fifo_content(uart); + + if (size == 0) + return; + + /* Avoid accidently accessing TX FIFO instead of RX FIFO. Last + * longword in RX FIFO cannot be read.(0x004-0x3FF) + */ + if (size > MEN_Z135_FIFO_WATERMARK) + size = MEN_Z135_FIFO_WATERMARK; + + room = tty_buffer_request_room(tport, size); + if (room != size) + dev_warn(&uart->mdev->dev, + "Not enough room in flip buffer, truncating to %d\n", + room); + + if (room == 0) + return; + + memcpy_fromio(uart->rxbuf, port->membase + MEN_Z135_RX_RAM, room); + /* Be sure to first copy all data and then acknowledge it */ + mb(); + iowrite32(room, port->membase + MEN_Z135_RX_CTRL); + + copied = tty_insert_flip_string(tport, uart->rxbuf, room); + if (copied != room) + dev_warn(&uart->mdev->dev, + "Only copied %d instead of %d bytes\n", + copied, room); + + port->icount.rx += copied; + + tty_flip_buffer_push(tport); + +} + +/** + * men_z135_handle_tx() - TX tasklet routine + * @uart: Pointer to struct men_z135_port + * + */ +static void men_z135_handle_tx(struct men_z135_port *uart) +{ + struct uart_port *port = &uart->port; + struct circ_buf *xmit = &port->state->xmit; + u32 txc; + u32 wptr; + int qlen; + int n; + int txfree; + int head; + int tail; + int s; + + if (uart_circ_empty(xmit)) + goto out; + + if (uart_tx_stopped(port)) + goto out; + + if (port->x_char) + goto out; + + /* calculate bytes to copy */ + qlen = uart_circ_chars_pending(xmit); + if (qlen <= 0) + goto out; + + wptr = ioread32(port->membase + MEN_Z135_TX_CTRL); + txc = (wptr >> 16) & 0x3ff; + wptr &= 0x3ff; + + if (txc > MEN_Z135_FIFO_WATERMARK) + txc = MEN_Z135_FIFO_WATERMARK; + + txfree = MEN_Z135_FIFO_WATERMARK - txc; + if (txfree <= 0) { + dev_err(&uart->mdev->dev, + "Not enough room in TX FIFO have %d, need %d\n", + txfree, qlen); + goto irq_en; + } + + /* if we're not aligned, it's better to copy only 1 or 2 bytes and + * then the rest. + */ + if (align && qlen >= 3 && BYTES_TO_ALIGN(wptr)) + n = 4 - BYTES_TO_ALIGN(wptr); + else if (qlen > txfree) + n = txfree; + else + n = qlen; + + if (n <= 0) + goto irq_en; + + head = xmit->head & (UART_XMIT_SIZE - 1); + tail = xmit->tail & (UART_XMIT_SIZE - 1); + + s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail; + n = min(n, s); + + memcpy_toio(port->membase + MEN_Z135_TX_RAM, &xmit->buf[xmit->tail], n); + xmit->tail = (xmit->tail + n) & (UART_XMIT_SIZE - 1); + + iowrite32(n & 0x3ff, port->membase + MEN_Z135_TX_CTRL); + + port->icount.tx += n; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + +irq_en: + if (!uart_circ_empty(xmit)) + men_z135_reg_set(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_TXCIEN); + else + men_z135_reg_clr(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_TXCIEN); + +out: + return; + +} + +/** + * men_z135_intr() - Handle legacy IRQs + * @irq: The IRQ number + * @data: Pointer to UART port + * + * Check IIR register to find the cause of the interrupt and handle it. + * It is possible that multiple interrupts reason bits are set and reading + * the IIR is a destructive read, so we always need to check for all possible + * interrupts and handle them. + */ +static irqreturn_t men_z135_intr(int irq, void *data) +{ + struct men_z135_port *uart = (struct men_z135_port *)data; + struct uart_port *port = &uart->port; + bool handled = false; + int irq_id; + + uart->stat_reg = ioread32(port->membase + MEN_Z135_STAT_REG); + irq_id = IRQ_ID(uart->stat_reg); + + if (!irq_id) + goto out; + + spin_lock(&port->lock); + /* It's save to write to IIR[7:6] RXC[9:8] */ + iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG); + + if (irq_id & MEN_Z135_IRQ_ID_RLS) { + men_z135_handle_lsr(uart); + handled = true; + } + + if (irq_id & (MEN_Z135_IRQ_ID_RDA | MEN_Z135_IRQ_ID_CTI)) { + if (irq_id & MEN_Z135_IRQ_ID_CTI) + dev_dbg(&uart->mdev->dev, "Character Timeout Indication\n"); + men_z135_handle_rx(uart); + handled = true; + } + + if (irq_id & MEN_Z135_IRQ_ID_TSA) { + men_z135_handle_tx(uart); + handled = true; + } + + if (irq_id & MEN_Z135_IRQ_ID_MST) { + men_z135_handle_modem_status(uart); + handled = true; + } + + spin_unlock(&port->lock); +out: + return IRQ_RETVAL(handled); +} + +/** + * men_z135_request_irq() - Request IRQ for 16z135 core + * @uart: z135 private uart port structure + * + * Request an IRQ for 16z135 to use. First try using MSI, if it fails + * fall back to using legacy interrupts. + */ +static int men_z135_request_irq(struct men_z135_port *uart) +{ + struct device *dev = &uart->mdev->dev; + struct uart_port *port = &uart->port; + int err = 0; + + err = request_irq(port->irq, men_z135_intr, IRQF_SHARED, + "men_z135_intr", uart); + if (err) + dev_err(dev, "Error %d getting interrupt\n", err); + + return err; +} + +/** + * men_z135_tx_empty() - Handle tx_empty call + * @port: The UART port + * + * This function tests whether the TX FIFO and shifter for the port + * described by @port is empty. + */ +static unsigned int men_z135_tx_empty(struct uart_port *port) +{ + u32 wptr; + u16 txc; + + wptr = ioread32(port->membase + MEN_Z135_TX_CTRL); + txc = (wptr >> 16) & 0x3ff; + + if (txc == 0) + return TIOCSER_TEMT; + else + return 0; +} + +/** + * men_z135_set_mctrl() - Set modem control lines + * @port: The UART port + * @mctrl: The modem control lines + * + * This function sets the modem control lines for a port described by @port + * to the state described by @mctrl + */ +static void men_z135_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + u32 old; + u32 conf_reg; + + conf_reg = old = ioread32(port->membase + MEN_Z135_CONF_REG); + if (mctrl & TIOCM_RTS) + conf_reg |= MEN_Z135_MCR_RTS; + else + conf_reg &= ~MEN_Z135_MCR_RTS; + + if (mctrl & TIOCM_DTR) + conf_reg |= MEN_Z135_MCR_DTR; + else + conf_reg &= ~MEN_Z135_MCR_DTR; + + if (mctrl & TIOCM_OUT1) + conf_reg |= MEN_Z135_MCR_OUT1; + else + conf_reg &= ~MEN_Z135_MCR_OUT1; + + if (mctrl & TIOCM_OUT2) + conf_reg |= MEN_Z135_MCR_OUT2; + else + conf_reg &= ~MEN_Z135_MCR_OUT2; + + if (mctrl & TIOCM_LOOP) + conf_reg |= MEN_Z135_MCR_LOOP; + else + conf_reg &= ~MEN_Z135_MCR_LOOP; + + if (conf_reg != old) + iowrite32(conf_reg, port->membase + MEN_Z135_CONF_REG); +} + +/** + * men_z135_get_mctrl() - Get modem control lines + * @port: The UART port + * + * Retruns the current state of modem control inputs. + */ +static unsigned int men_z135_get_mctrl(struct uart_port *port) +{ + unsigned int mctrl = 0; + u8 msr; + + msr = ioread8(port->membase + MEN_Z135_STAT_REG + 1); + + if (msr & MEN_Z135_MSR_CTS) + mctrl |= TIOCM_CTS; + if (msr & MEN_Z135_MSR_DSR) + mctrl |= TIOCM_DSR; + if (msr & MEN_Z135_MSR_RI) + mctrl |= TIOCM_RI; + if (msr & MEN_Z135_MSR_DCD) + mctrl |= TIOCM_CAR; + + return mctrl; +} + +/** + * men_z135_stop_tx() - Stop transmitting characters + * @port: The UART port + * + * Stop transmitting characters. This might be due to CTS line becomming + * inactive or the tty layer indicating we want to stop transmission due to + * an XOFF character. + */ +static void men_z135_stop_tx(struct uart_port *port) +{ + struct men_z135_port *uart = to_men_z135(port); + + men_z135_reg_clr(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_TXCIEN); +} + +/* + * men_z135_disable_ms() - Disable Modem Status + * port: The UART port + * + * Enable Modem Status IRQ. + */ +static void men_z135_disable_ms(struct uart_port *port) +{ + struct men_z135_port *uart = to_men_z135(port); + + men_z135_reg_clr(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_MSIEN); +} + +/** + * men_z135_start_tx() - Start transmitting characters + * @port: The UART port + * + * Start transmitting character. This actually doesn't transmit anything, but + * fires off the TX tasklet. + */ +static void men_z135_start_tx(struct uart_port *port) +{ + struct men_z135_port *uart = to_men_z135(port); + + if (uart->automode) + men_z135_disable_ms(port); + + men_z135_handle_tx(uart); +} + +/** + * men_z135_stop_rx() - Stop receiving characters + * @port: The UART port + * + * Stop receiving characters; the port is in the process of being closed. + */ +static void men_z135_stop_rx(struct uart_port *port) +{ + struct men_z135_port *uart = to_men_z135(port); + + men_z135_reg_clr(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_RXCIEN); +} + +/** + * men_z135_enable_ms() - Enable Modem Status + * @port: the port + * + * Enable Modem Status IRQ. + */ +static void men_z135_enable_ms(struct uart_port *port) +{ + struct men_z135_port *uart = to_men_z135(port); + + men_z135_reg_set(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_MSIEN); +} + +static int men_z135_startup(struct uart_port *port) +{ + struct men_z135_port *uart = to_men_z135(port); + int err; + u32 conf_reg = 0; + + err = men_z135_request_irq(uart); + if (err) + return -ENODEV; + + conf_reg = ioread32(port->membase + MEN_Z135_CONF_REG); + + /* Activate all but TX space available IRQ */ + conf_reg |= MEN_Z135_ALL_IRQS & ~MEN_Z135_IER_TXCIEN; + conf_reg &= ~(0xff << 16); + conf_reg |= (txlvl << 16); + conf_reg |= (rxlvl << 20); + + iowrite32(conf_reg, port->membase + MEN_Z135_CONF_REG); + + if (rx_timeout) + iowrite32(rx_timeout, port->membase + MEN_Z135_TIMEOUT); + + return 0; +} + +static void men_z135_shutdown(struct uart_port *port) +{ + struct men_z135_port *uart = to_men_z135(port); + u32 conf_reg = 0; + + conf_reg |= MEN_Z135_ALL_IRQS; + + men_z135_reg_clr(uart, MEN_Z135_CONF_REG, conf_reg); + + free_irq(uart->port.irq, uart); +} + +static void men_z135_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct men_z135_port *uart = to_men_z135(port); + unsigned int baud; + u32 conf_reg; + u32 bd_reg; + u32 uart_freq; + u8 lcr; + + conf_reg = ioread32(port->membase + MEN_Z135_CONF_REG); + lcr = LCR(conf_reg); + + /* byte size */ + switch (termios->c_cflag & CSIZE) { + case CS5: + lcr |= MEN_Z135_WL5; + break; + case CS6: + lcr |= MEN_Z135_WL6; + break; + case CS7: + lcr |= MEN_Z135_WL7; + break; + case CS8: + lcr |= MEN_Z135_WL8; + break; + } + + /* stop bits */ + if (termios->c_cflag & CSTOPB) + lcr |= MEN_Z135_NSTB2 << MEN_Z135_STB_SHIFT; + + /* parity */ + if (termios->c_cflag & PARENB) { + lcr |= MEN_Z135_PAR_ENA << MEN_Z135_PEN_SHIFT; + + if (termios->c_cflag & PARODD) + lcr |= MEN_Z135_PTY_ODD << MEN_Z135_PTY_SHIFT; + else + lcr |= MEN_Z135_PTY_EVN << MEN_Z135_PTY_SHIFT; + } else + lcr |= MEN_Z135_PAR_DIS << MEN_Z135_PEN_SHIFT; + + conf_reg |= MEN_Z135_IER_MSIEN; + if (termios->c_cflag & CRTSCTS) { + conf_reg |= MEN_Z135_MCR_RCFC; + uart->automode = true; + termios->c_cflag &= ~CLOCAL; + } else { + conf_reg &= ~MEN_Z135_MCR_RCFC; + uart->automode = false; + } + + termios->c_cflag &= ~CMSPAR; /* Mark/Space parity is not supported */ + + conf_reg |= lcr << MEN_Z135_LCR_SHIFT; + iowrite32(conf_reg, port->membase + MEN_Z135_CONF_REG); + + uart_freq = ioread32(port->membase + MEN_Z135_UART_FREQ); + if (uart_freq == 0) + uart_freq = MEN_Z135_BASECLK; + + baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16); + + spin_lock_irq(&port->lock); + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); + + bd_reg = uart_freq / (4 * baud); + iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG); + + uart_update_timeout(port, termios->c_cflag, baud); + spin_unlock_irq(&port->lock); +} + +static const char *men_z135_type(struct uart_port *port) +{ + return KBUILD_MODNAME; +} + +static void men_z135_release_port(struct uart_port *port) +{ + struct men_z135_port *uart = to_men_z135(port); + + iounmap(port->membase); + port->membase = NULL; + + mcb_release_mem(uart->mem); +} + +static int men_z135_request_port(struct uart_port *port) +{ + struct men_z135_port *uart = to_men_z135(port); + struct mcb_device *mdev = uart->mdev; + struct resource *mem; + + mem = mcb_request_mem(uart->mdev, dev_name(&mdev->dev)); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + port->mapbase = mem->start; + uart->mem = mem; + + port->membase = ioremap(mem->start, resource_size(mem)); + if (port->membase == NULL) { + mcb_release_mem(mem); + return -ENOMEM; + } + + return 0; +} + +static void men_z135_config_port(struct uart_port *port, int type) +{ + port->type = PORT_MEN_Z135; + men_z135_request_port(port); +} + +static int men_z135_verify_port(struct uart_port *port, + struct serial_struct *serinfo) +{ + return -EINVAL; +} + +static const struct uart_ops men_z135_ops = { + .tx_empty = men_z135_tx_empty, + .set_mctrl = men_z135_set_mctrl, + .get_mctrl = men_z135_get_mctrl, + .stop_tx = men_z135_stop_tx, + .start_tx = men_z135_start_tx, + .stop_rx = men_z135_stop_rx, + .enable_ms = men_z135_enable_ms, + .startup = men_z135_startup, + .shutdown = men_z135_shutdown, + .set_termios = men_z135_set_termios, + .type = men_z135_type, + .release_port = men_z135_release_port, + .request_port = men_z135_request_port, + .config_port = men_z135_config_port, + .verify_port = men_z135_verify_port, +}; + +static struct uart_driver men_z135_driver = { + .owner = THIS_MODULE, + .driver_name = KBUILD_MODNAME, + .dev_name = "ttyHSU", + .major = 0, + .minor = 0, + .nr = MEN_Z135_MAX_PORTS, +}; + +/** + * men_z135_probe() - Probe a z135 instance + * @mdev: The MCB device + * @id: The MCB device ID + * + * men_z135_probe does the basic setup of hardware resources and registers the + * new uart port to the tty layer. + */ +static int men_z135_probe(struct mcb_device *mdev, + const struct mcb_device_id *id) +{ + struct men_z135_port *uart; + struct resource *mem; + struct device *dev; + int err; + + dev = &mdev->dev; + + uart = devm_kzalloc(dev, sizeof(struct men_z135_port), GFP_KERNEL); + if (!uart) + return -ENOMEM; + + uart->rxbuf = (unsigned char *)__get_free_page(GFP_KERNEL); + if (!uart->rxbuf) + return -ENOMEM; + + mem = &mdev->mem; + + mcb_set_drvdata(mdev, uart); + + uart->port.uartclk = MEN_Z135_BASECLK * 16; + uart->port.fifosize = MEN_Z135_FIFO_SIZE; + uart->port.iotype = UPIO_MEM; + uart->port.ops = &men_z135_ops; + uart->port.irq = mcb_get_irq(mdev); + uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP; + uart->port.line = line++; + uart->port.dev = dev; + uart->port.type = PORT_MEN_Z135; + uart->port.mapbase = mem->start; + uart->port.membase = NULL; + uart->mdev = mdev; + + spin_lock_init(&uart->lock); + + err = uart_add_one_port(&men_z135_driver, &uart->port); + if (err) + goto err; + + return 0; + +err: + free_page((unsigned long) uart->rxbuf); + dev_err(dev, "Failed to add UART: %d\n", err); + + return err; +} + +/** + * men_z135_remove() - Remove a z135 instance from the system + * + * @mdev: The MCB device + */ +static void men_z135_remove(struct mcb_device *mdev) +{ + struct men_z135_port *uart = mcb_get_drvdata(mdev); + + line--; + uart_remove_one_port(&men_z135_driver, &uart->port); + free_page((unsigned long) uart->rxbuf); +} + +static const struct mcb_device_id men_z135_ids[] = { + { .device = 0x87 }, + { } +}; +MODULE_DEVICE_TABLE(mcb, men_z135_ids); + +static struct mcb_driver mcb_driver = { + .driver = { + .name = "z135-uart", + .owner = THIS_MODULE, + }, + .probe = men_z135_probe, + .remove = men_z135_remove, + .id_table = men_z135_ids, +}; + +/** + * men_z135_init() - Driver Registration Routine + * + * men_z135_init is the first routine called when the driver is loaded. All it + * does is register with the legacy MEN Chameleon subsystem. + */ +static int __init men_z135_init(void) +{ + int err; + + err = uart_register_driver(&men_z135_driver); + if (err) { + pr_err("Failed to register UART: %d\n", err); + return err; + } + + err = mcb_register_driver(&mcb_driver); + if (err) { + pr_err("Failed to register MCB driver: %d\n", err); + uart_unregister_driver(&men_z135_driver); + return err; + } + + return 0; +} +module_init(men_z135_init); + +/** + * men_z135_exit() - Driver Exit Routine + * + * men_z135_exit is called just before the driver is removed from memory. + */ +static void __exit men_z135_exit(void) +{ + mcb_unregister_driver(&mcb_driver); + uart_unregister_driver(&men_z135_driver); +} +module_exit(men_z135_exit); + +MODULE_AUTHOR("Johannes Thumshirn "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MEN 16z135 High Speed UART"); +MODULE_ALIAS("mcb:16z135"); +MODULE_IMPORT_NS(MCB); diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c new file mode 100644 index 000000000..8f6d54c04 --- /dev/null +++ b/drivers/tty/serial/meson_uart.c @@ -0,0 +1,847 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on meson_uart.c, by AMLOGIC, INC. + * + * Copyright (C) 2014 Carlo Caione + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Register offsets */ +#define AML_UART_WFIFO 0x00 +#define AML_UART_RFIFO 0x04 +#define AML_UART_CONTROL 0x08 +#define AML_UART_STATUS 0x0c +#define AML_UART_MISC 0x10 +#define AML_UART_REG5 0x14 + +/* AML_UART_CONTROL bits */ +#define AML_UART_TX_EN BIT(12) +#define AML_UART_RX_EN BIT(13) +#define AML_UART_TWO_WIRE_EN BIT(15) +#define AML_UART_STOP_BIT_LEN_MASK (0x03 << 16) +#define AML_UART_STOP_BIT_1SB (0x00 << 16) +#define AML_UART_STOP_BIT_2SB (0x01 << 16) +#define AML_UART_PARITY_TYPE BIT(18) +#define AML_UART_PARITY_EN BIT(19) +#define AML_UART_TX_RST BIT(22) +#define AML_UART_RX_RST BIT(23) +#define AML_UART_CLEAR_ERR BIT(24) +#define AML_UART_RX_INT_EN BIT(27) +#define AML_UART_TX_INT_EN BIT(28) +#define AML_UART_DATA_LEN_MASK (0x03 << 20) +#define AML_UART_DATA_LEN_8BIT (0x00 << 20) +#define AML_UART_DATA_LEN_7BIT (0x01 << 20) +#define AML_UART_DATA_LEN_6BIT (0x02 << 20) +#define AML_UART_DATA_LEN_5BIT (0x03 << 20) + +/* AML_UART_STATUS bits */ +#define AML_UART_PARITY_ERR BIT(16) +#define AML_UART_FRAME_ERR BIT(17) +#define AML_UART_TX_FIFO_WERR BIT(18) +#define AML_UART_RX_EMPTY BIT(20) +#define AML_UART_TX_FULL BIT(21) +#define AML_UART_TX_EMPTY BIT(22) +#define AML_UART_XMIT_BUSY BIT(25) +#define AML_UART_ERR (AML_UART_PARITY_ERR | \ + AML_UART_FRAME_ERR | \ + AML_UART_TX_FIFO_WERR) + +/* AML_UART_MISC bits */ +#define AML_UART_XMIT_IRQ(c) (((c) & 0xff) << 8) +#define AML_UART_RECV_IRQ(c) ((c) & 0xff) + +/* AML_UART_REG5 bits */ +#define AML_UART_BAUD_MASK 0x7fffff +#define AML_UART_BAUD_USE BIT(23) +#define AML_UART_BAUD_XTAL BIT(24) +#define AML_UART_BAUD_XTAL_DIV2 BIT(27) + +#define AML_UART_PORT_NUM 12 +#define AML_UART_PORT_OFFSET 6 +#define AML_UART_DEV_NAME "ttyAML" + +#define AML_UART_POLL_USEC 5 +#define AML_UART_TIMEOUT_USEC 10000 + +static struct uart_driver meson_uart_driver; + +static struct uart_port *meson_ports[AML_UART_PORT_NUM]; + +struct meson_uart_data { + bool has_xtal_div2; +}; + +static void meson_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static unsigned int meson_uart_get_mctrl(struct uart_port *port) +{ + return TIOCM_CTS; +} + +static unsigned int meson_uart_tx_empty(struct uart_port *port) +{ + u32 val; + + val = readl(port->membase + AML_UART_STATUS); + val &= (AML_UART_TX_EMPTY | AML_UART_XMIT_BUSY); + return (val == AML_UART_TX_EMPTY) ? TIOCSER_TEMT : 0; +} + +static void meson_uart_stop_tx(struct uart_port *port) +{ + u32 val; + + val = readl(port->membase + AML_UART_CONTROL); + val &= ~AML_UART_TX_INT_EN; + writel(val, port->membase + AML_UART_CONTROL); +} + +static void meson_uart_stop_rx(struct uart_port *port) +{ + u32 val; + + val = readl(port->membase + AML_UART_CONTROL); + val &= ~AML_UART_RX_EN; + writel(val, port->membase + AML_UART_CONTROL); +} + +static void meson_uart_shutdown(struct uart_port *port) +{ + unsigned long flags; + u32 val; + + free_irq(port->irq, port); + + spin_lock_irqsave(&port->lock, flags); + + val = readl(port->membase + AML_UART_CONTROL); + val &= ~AML_UART_RX_EN; + val &= ~(AML_UART_RX_INT_EN | AML_UART_TX_INT_EN); + writel(val, port->membase + AML_UART_CONTROL); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void meson_uart_start_tx(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + unsigned int ch; + u32 val; + + if (uart_tx_stopped(port)) { + meson_uart_stop_tx(port); + return; + } + + while (!(readl(port->membase + AML_UART_STATUS) & AML_UART_TX_FULL)) { + if (port->x_char) { + writel(port->x_char, port->membase + AML_UART_WFIFO); + port->icount.tx++; + port->x_char = 0; + continue; + } + + if (uart_circ_empty(xmit)) + break; + + ch = xmit->buf[xmit->tail]; + writel(ch, port->membase + AML_UART_WFIFO); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + if (!uart_circ_empty(xmit)) { + val = readl(port->membase + AML_UART_CONTROL); + val |= AML_UART_TX_INT_EN; + writel(val, port->membase + AML_UART_CONTROL); + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static void meson_receive_chars(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + char flag; + u32 ostatus, status, ch, mode; + + do { + flag = TTY_NORMAL; + port->icount.rx++; + ostatus = status = readl(port->membase + AML_UART_STATUS); + + if (status & AML_UART_ERR) { + if (status & AML_UART_TX_FIFO_WERR) + port->icount.overrun++; + else if (status & AML_UART_FRAME_ERR) + port->icount.frame++; + else if (status & AML_UART_PARITY_ERR) + port->icount.frame++; + + mode = readl(port->membase + AML_UART_CONTROL); + mode |= AML_UART_CLEAR_ERR; + writel(mode, port->membase + AML_UART_CONTROL); + + /* It doesn't clear to 0 automatically */ + mode &= ~AML_UART_CLEAR_ERR; + writel(mode, port->membase + AML_UART_CONTROL); + + status &= port->read_status_mask; + if (status & AML_UART_FRAME_ERR) + flag = TTY_FRAME; + else if (status & AML_UART_PARITY_ERR) + flag = TTY_PARITY; + } + + ch = readl(port->membase + AML_UART_RFIFO); + ch &= 0xff; + + if ((ostatus & AML_UART_FRAME_ERR) && (ch == 0)) { + port->icount.brk++; + flag = TTY_BREAK; + if (uart_handle_break(port)) + continue; + } + + if (uart_handle_sysrq_char(port, ch)) + continue; + + if ((status & port->ignore_status_mask) == 0) + tty_insert_flip_char(tport, ch, flag); + + if (status & AML_UART_TX_FIFO_WERR) + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + + } while (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)); + + tty_flip_buffer_push(tport); +} + +static irqreturn_t meson_uart_interrupt(int irq, void *dev_id) +{ + struct uart_port *port = (struct uart_port *)dev_id; + + spin_lock(&port->lock); + + if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)) + meson_receive_chars(port); + + if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_TX_FULL)) { + if (readl(port->membase + AML_UART_CONTROL) & AML_UART_TX_INT_EN) + meson_uart_start_tx(port); + } + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static const char *meson_uart_type(struct uart_port *port) +{ + return (port->type == PORT_MESON) ? "meson_uart" : NULL; +} + +/* + * This function is called only from probe() using a temporary io mapping + * in order to perform a reset before setting up the device. Since the + * temporarily mapped region was successfully requested, there can be no + * console on this port at this time. Hence it is not necessary for this + * function to acquire the port->lock. (Since there is no console on this + * port at this time, the port->lock is not initialized yet.) + */ +static void meson_uart_reset(struct uart_port *port) +{ + u32 val; + + val = readl(port->membase + AML_UART_CONTROL); + val |= (AML_UART_RX_RST | AML_UART_TX_RST | AML_UART_CLEAR_ERR); + writel(val, port->membase + AML_UART_CONTROL); + + val &= ~(AML_UART_RX_RST | AML_UART_TX_RST | AML_UART_CLEAR_ERR); + writel(val, port->membase + AML_UART_CONTROL); +} + +static int meson_uart_startup(struct uart_port *port) +{ + unsigned long flags; + u32 val; + int ret = 0; + + spin_lock_irqsave(&port->lock, flags); + + val = readl(port->membase + AML_UART_CONTROL); + val |= AML_UART_CLEAR_ERR; + writel(val, port->membase + AML_UART_CONTROL); + val &= ~AML_UART_CLEAR_ERR; + writel(val, port->membase + AML_UART_CONTROL); + + val |= (AML_UART_RX_EN | AML_UART_TX_EN); + writel(val, port->membase + AML_UART_CONTROL); + + val |= (AML_UART_RX_INT_EN | AML_UART_TX_INT_EN); + writel(val, port->membase + AML_UART_CONTROL); + + val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2)); + writel(val, port->membase + AML_UART_MISC); + + spin_unlock_irqrestore(&port->lock, flags); + + ret = request_irq(port->irq, meson_uart_interrupt, 0, + port->name, port); + + return ret; +} + +static void meson_uart_change_speed(struct uart_port *port, unsigned long baud) +{ + const struct meson_uart_data *private_data = port->private_data; + u32 val = 0; + + while (!meson_uart_tx_empty(port)) + cpu_relax(); + + if (port->uartclk == 24000000) { + unsigned int xtal_div = 3; + + if (private_data && private_data->has_xtal_div2) { + xtal_div = 2; + val |= AML_UART_BAUD_XTAL_DIV2; + } + val |= DIV_ROUND_CLOSEST(port->uartclk / xtal_div, baud) - 1; + val |= AML_UART_BAUD_XTAL; + } else { + val = DIV_ROUND_CLOSEST(port->uartclk / 4, baud) - 1; + } + val |= AML_UART_BAUD_USE; + writel(val, port->membase + AML_UART_REG5); +} + +static void meson_uart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int cflags, iflags, baud; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&port->lock, flags); + + cflags = termios->c_cflag; + iflags = termios->c_iflag; + + val = readl(port->membase + AML_UART_CONTROL); + + val &= ~AML_UART_DATA_LEN_MASK; + switch (cflags & CSIZE) { + case CS8: + val |= AML_UART_DATA_LEN_8BIT; + break; + case CS7: + val |= AML_UART_DATA_LEN_7BIT; + break; + case CS6: + val |= AML_UART_DATA_LEN_6BIT; + break; + case CS5: + val |= AML_UART_DATA_LEN_5BIT; + break; + } + + if (cflags & PARENB) + val |= AML_UART_PARITY_EN; + else + val &= ~AML_UART_PARITY_EN; + + if (cflags & PARODD) + val |= AML_UART_PARITY_TYPE; + else + val &= ~AML_UART_PARITY_TYPE; + + val &= ~AML_UART_STOP_BIT_LEN_MASK; + if (cflags & CSTOPB) + val |= AML_UART_STOP_BIT_2SB; + else + val |= AML_UART_STOP_BIT_1SB; + + if (cflags & CRTSCTS) { + if (port->flags & UPF_HARD_FLOW) + val &= ~AML_UART_TWO_WIRE_EN; + else + termios->c_cflag &= ~CRTSCTS; + } else { + val |= AML_UART_TWO_WIRE_EN; + } + + writel(val, port->membase + AML_UART_CONTROL); + + baud = uart_get_baud_rate(port, termios, old, 50, 4000000); + meson_uart_change_speed(port, baud); + + port->read_status_mask = AML_UART_TX_FIFO_WERR; + if (iflags & INPCK) + port->read_status_mask |= AML_UART_PARITY_ERR | + AML_UART_FRAME_ERR; + + port->ignore_status_mask = 0; + if (iflags & IGNPAR) + port->ignore_status_mask |= AML_UART_PARITY_ERR | + AML_UART_FRAME_ERR; + + uart_update_timeout(port, termios->c_cflag, baud); + spin_unlock_irqrestore(&port->lock, flags); +} + +static int meson_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + int ret = 0; + + if (port->type != PORT_MESON) + ret = -EINVAL; + if (port->irq != ser->irq) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + return ret; +} + +static void meson_uart_release_port(struct uart_port *port) +{ + devm_iounmap(port->dev, port->membase); + port->membase = NULL; + devm_release_mem_region(port->dev, port->mapbase, port->mapsize); +} + +static int meson_uart_request_port(struct uart_port *port) +{ + if (!devm_request_mem_region(port->dev, port->mapbase, port->mapsize, + dev_name(port->dev))) { + dev_err(port->dev, "Memory region busy\n"); + return -EBUSY; + } + + port->membase = devm_ioremap(port->dev, port->mapbase, + port->mapsize); + if (!port->membase) + return -ENOMEM; + + return 0; +} + +static void meson_uart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_MESON; + meson_uart_request_port(port); + } +} + +#ifdef CONFIG_CONSOLE_POLL +/* + * Console polling routines for writing and reading from the uart while + * in an interrupt or debug context (i.e. kgdb). + */ + +static int meson_uart_poll_get_char(struct uart_port *port) +{ + u32 c; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY) + c = NO_POLL_CHAR; + else + c = readl(port->membase + AML_UART_RFIFO); + + spin_unlock_irqrestore(&port->lock, flags); + + return c; +} + +static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c) +{ + unsigned long flags; + u32 reg; + int ret; + + spin_lock_irqsave(&port->lock, flags); + + /* Wait until FIFO is empty or timeout */ + ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg, + reg & AML_UART_TX_EMPTY, + AML_UART_POLL_USEC, + AML_UART_TIMEOUT_USEC); + if (ret == -ETIMEDOUT) { + dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n"); + goto out; + } + + /* Write the character */ + writel(c, port->membase + AML_UART_WFIFO); + + /* Wait until FIFO is empty or timeout */ + ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg, + reg & AML_UART_TX_EMPTY, + AML_UART_POLL_USEC, + AML_UART_TIMEOUT_USEC); + if (ret == -ETIMEDOUT) + dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n"); + +out: + spin_unlock_irqrestore(&port->lock, flags); +} + +#endif /* CONFIG_CONSOLE_POLL */ + +static const struct uart_ops meson_uart_ops = { + .set_mctrl = meson_uart_set_mctrl, + .get_mctrl = meson_uart_get_mctrl, + .tx_empty = meson_uart_tx_empty, + .start_tx = meson_uart_start_tx, + .stop_tx = meson_uart_stop_tx, + .stop_rx = meson_uart_stop_rx, + .startup = meson_uart_startup, + .shutdown = meson_uart_shutdown, + .set_termios = meson_uart_set_termios, + .type = meson_uart_type, + .config_port = meson_uart_config_port, + .request_port = meson_uart_request_port, + .release_port = meson_uart_release_port, + .verify_port = meson_uart_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = meson_uart_poll_get_char, + .poll_put_char = meson_uart_poll_put_char, +#endif +}; + +#ifdef CONFIG_SERIAL_MESON_CONSOLE +static void meson_uart_enable_tx_engine(struct uart_port *port) +{ + u32 val; + + val = readl(port->membase + AML_UART_CONTROL); + val |= AML_UART_TX_EN; + writel(val, port->membase + AML_UART_CONTROL); +} + +static void meson_console_putchar(struct uart_port *port, unsigned char ch) +{ + if (!port->membase) + return; + + while (readl(port->membase + AML_UART_STATUS) & AML_UART_TX_FULL) + cpu_relax(); + writel(ch, port->membase + AML_UART_WFIFO); +} + +static void meson_serial_port_write(struct uart_port *port, const char *s, + u_int count) +{ + unsigned long flags; + int locked; + u32 val, tmp; + + local_irq_save(flags); + if (port->sysrq) { + locked = 0; + } else if (oops_in_progress) { + locked = spin_trylock(&port->lock); + } else { + spin_lock(&port->lock); + locked = 1; + } + + val = readl(port->membase + AML_UART_CONTROL); + tmp = val & ~(AML_UART_TX_INT_EN | AML_UART_RX_INT_EN); + writel(tmp, port->membase + AML_UART_CONTROL); + + uart_console_write(port, s, count, meson_console_putchar); + writel(val, port->membase + AML_UART_CONTROL); + + if (locked) + spin_unlock(&port->lock); + local_irq_restore(flags); +} + +static void meson_serial_console_write(struct console *co, const char *s, + u_int count) +{ + struct uart_port *port; + + port = meson_ports[co->index]; + if (!port) + return; + + meson_serial_port_write(port, s, count); +} + +static int meson_serial_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= AML_UART_PORT_NUM) + return -EINVAL; + + port = meson_ports[co->index]; + if (!port || !port->membase) + return -ENODEV; + + meson_uart_enable_tx_engine(port); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct console meson_serial_console = { + .name = AML_UART_DEV_NAME, + .write = meson_serial_console_write, + .device = uart_console_device, + .setup = meson_serial_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &meson_uart_driver, +}; + +static int __init meson_serial_console_init(void) +{ + register_console(&meson_serial_console); + return 0; +} + +static void meson_serial_early_console_write(struct console *co, + const char *s, + u_int count) +{ + struct earlycon_device *dev = co->data; + + meson_serial_port_write(&dev->port, s, count); +} + +static int __init +meson_serial_early_console_setup(struct earlycon_device *device, const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + meson_uart_enable_tx_engine(&device->port); + device->con->write = meson_serial_early_console_write; + return 0; +} + +OF_EARLYCON_DECLARE(meson, "amlogic,meson-ao-uart", + meson_serial_early_console_setup); + +#define MESON_SERIAL_CONSOLE (&meson_serial_console) +#else +static int __init meson_serial_console_init(void) { + return 0; +} +#define MESON_SERIAL_CONSOLE NULL +#endif + +static struct uart_driver meson_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "meson_uart", + .dev_name = AML_UART_DEV_NAME, + .nr = AML_UART_PORT_NUM, + .cons = MESON_SERIAL_CONSOLE, +}; + +static int meson_uart_probe_clocks(struct platform_device *pdev, + struct uart_port *port) +{ + struct clk *clk_xtal = NULL; + struct clk *clk_pclk = NULL; + struct clk *clk_baud = NULL; + + clk_pclk = devm_clk_get_enabled(&pdev->dev, "pclk"); + if (IS_ERR(clk_pclk)) + return PTR_ERR(clk_pclk); + + clk_xtal = devm_clk_get_enabled(&pdev->dev, "xtal"); + if (IS_ERR(clk_xtal)) + return PTR_ERR(clk_xtal); + + clk_baud = devm_clk_get_enabled(&pdev->dev, "baud"); + if (IS_ERR(clk_baud)) + return PTR_ERR(clk_baud); + + port->uartclk = clk_get_rate(clk_baud); + + return 0; +} + +static int meson_uart_probe(struct platform_device *pdev) +{ + struct resource *res_mem; + struct uart_port *port; + u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */ + int ret = 0; + int irq; + bool has_rtscts; + + if (pdev->dev.of_node) + pdev->id = of_alias_get_id(pdev->dev.of_node, "serial"); + + if (pdev->id < 0) { + int id; + + for (id = AML_UART_PORT_OFFSET; id < AML_UART_PORT_NUM; id++) { + if (!meson_ports[id]) { + pdev->id = id; + break; + } + } + } + + if (pdev->id < 0 || pdev->id >= AML_UART_PORT_NUM) + return -EINVAL; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_mem) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize); + has_rtscts = of_property_read_bool(pdev->dev.of_node, "uart-has-rtscts"); + + if (meson_ports[pdev->id]) { + dev_err(&pdev->dev, "port %d already allocated\n", pdev->id); + return -EBUSY; + } + + port = devm_kzalloc(&pdev->dev, sizeof(struct uart_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + ret = meson_uart_probe_clocks(pdev, port); + if (ret) + return ret; + + port->iotype = UPIO_MEM; + port->mapbase = res_mem->start; + port->mapsize = resource_size(res_mem); + port->irq = irq; + port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY; + if (has_rtscts) + port->flags |= UPF_HARD_FLOW; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE); + port->dev = &pdev->dev; + port->line = pdev->id; + port->type = PORT_MESON; + port->x_char = 0; + port->ops = &meson_uart_ops; + port->fifosize = fifosize; + port->private_data = (void *)device_get_match_data(&pdev->dev); + + meson_ports[pdev->id] = port; + platform_set_drvdata(pdev, port); + + /* reset port before registering (and possibly registering console) */ + if (meson_uart_request_port(port) >= 0) { + meson_uart_reset(port); + meson_uart_release_port(port); + } + + ret = uart_add_one_port(&meson_uart_driver, port); + if (ret) + meson_ports[pdev->id] = NULL; + + return ret; +} + +static int meson_uart_remove(struct platform_device *pdev) +{ + struct uart_port *port; + + port = platform_get_drvdata(pdev); + uart_remove_one_port(&meson_uart_driver, port); + meson_ports[pdev->id] = NULL; + + return 0; +} + +static struct meson_uart_data s4_uart_data = { + .has_xtal_div2 = true, +}; + +static const struct of_device_id meson_uart_dt_match[] = { + { .compatible = "amlogic,meson6-uart" }, + { .compatible = "amlogic,meson8-uart" }, + { .compatible = "amlogic,meson8b-uart" }, + { .compatible = "amlogic,meson-gx-uart" }, + { + .compatible = "amlogic,meson-s4-uart", + .data = (void *)&s4_uart_data, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, meson_uart_dt_match); + +static struct platform_driver meson_uart_platform_driver = { + .probe = meson_uart_probe, + .remove = meson_uart_remove, + .driver = { + .name = "meson_uart", + .of_match_table = meson_uart_dt_match, + }, +}; + +static int __init meson_uart_init(void) +{ + int ret; + + ret = meson_serial_console_init(); + if (ret) + return ret; + + ret = uart_register_driver(&meson_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&meson_uart_platform_driver); + if (ret) + uart_unregister_driver(&meson_uart_driver); + + return ret; +} + +static void __exit meson_uart_exit(void) +{ + platform_driver_unregister(&meson_uart_platform_driver); + uart_unregister_driver(&meson_uart_driver); +} + +module_init(meson_uart_init); +module_exit(meson_uart_exit); + +MODULE_AUTHOR("Carlo Caione "); +MODULE_DESCRIPTION("Amlogic Meson serial port driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c new file mode 100644 index 000000000..c15e0d84d --- /dev/null +++ b/drivers/tty/serial/milbeaut_usio.c @@ -0,0 +1,612 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Socionext Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define USIO_NAME "mlb-usio-uart" +#define USIO_UART_DEV_NAME "ttyUSI" + +static struct uart_port mlb_usio_ports[CONFIG_SERIAL_MILBEAUT_USIO_PORTS]; + +#define RX 0 +#define TX 1 +static int mlb_usio_irq[CONFIG_SERIAL_MILBEAUT_USIO_PORTS][2]; + +#define MLB_USIO_REG_SMR 0 +#define MLB_USIO_REG_SCR 1 +#define MLB_USIO_REG_ESCR 2 +#define MLB_USIO_REG_SSR 3 +#define MLB_USIO_REG_DR 4 +#define MLB_USIO_REG_BGR 6 +#define MLB_USIO_REG_FCR 12 +#define MLB_USIO_REG_FBYTE 14 + +#define MLB_USIO_SMR_SOE BIT(0) +#define MLB_USIO_SMR_SBL BIT(3) +#define MLB_USIO_SCR_TXE BIT(0) +#define MLB_USIO_SCR_RXE BIT(1) +#define MLB_USIO_SCR_TBIE BIT(2) +#define MLB_USIO_SCR_TIE BIT(3) +#define MLB_USIO_SCR_RIE BIT(4) +#define MLB_USIO_SCR_UPCL BIT(7) +#define MLB_USIO_ESCR_L_8BIT 0 +#define MLB_USIO_ESCR_L_5BIT 1 +#define MLB_USIO_ESCR_L_6BIT 2 +#define MLB_USIO_ESCR_L_7BIT 3 +#define MLB_USIO_ESCR_P BIT(3) +#define MLB_USIO_ESCR_PEN BIT(4) +#define MLB_USIO_ESCR_FLWEN BIT(7) +#define MLB_USIO_SSR_TBI BIT(0) +#define MLB_USIO_SSR_TDRE BIT(1) +#define MLB_USIO_SSR_RDRF BIT(2) +#define MLB_USIO_SSR_ORE BIT(3) +#define MLB_USIO_SSR_FRE BIT(4) +#define MLB_USIO_SSR_PE BIT(5) +#define MLB_USIO_SSR_REC BIT(7) +#define MLB_USIO_SSR_BRK BIT(8) +#define MLB_USIO_FCR_FE1 BIT(0) +#define MLB_USIO_FCR_FE2 BIT(1) +#define MLB_USIO_FCR_FCL1 BIT(2) +#define MLB_USIO_FCR_FCL2 BIT(3) +#define MLB_USIO_FCR_FSET BIT(4) +#define MLB_USIO_FCR_FTIE BIT(9) +#define MLB_USIO_FCR_FDRQ BIT(10) +#define MLB_USIO_FCR_FRIIE BIT(11) + +static void mlb_usio_stop_tx(struct uart_port *port) +{ + writew(readw(port->membase + MLB_USIO_REG_FCR) & ~MLB_USIO_FCR_FTIE, + port->membase + MLB_USIO_REG_FCR); + writeb(readb(port->membase + MLB_USIO_REG_SCR) & ~MLB_USIO_SCR_TBIE, + port->membase + MLB_USIO_REG_SCR); +} + +static void mlb_usio_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + int count; + + writew(readw(port->membase + MLB_USIO_REG_FCR) & ~MLB_USIO_FCR_FTIE, + port->membase + MLB_USIO_REG_FCR); + writeb(readb(port->membase + MLB_USIO_REG_SCR) & + ~(MLB_USIO_SCR_TIE | MLB_USIO_SCR_TBIE), + port->membase + MLB_USIO_REG_SCR); + + if (port->x_char) { + writew(port->x_char, port->membase + MLB_USIO_REG_DR); + port->icount.tx++; + port->x_char = 0; + return; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + mlb_usio_stop_tx(port); + return; + } + + count = port->fifosize - + (readw(port->membase + MLB_USIO_REG_FBYTE) & 0xff); + + do { + writew(xmit->buf[xmit->tail], port->membase + MLB_USIO_REG_DR); + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + + } while (--count > 0); + + writew(readw(port->membase + MLB_USIO_REG_FCR) & ~MLB_USIO_FCR_FDRQ, + port->membase + MLB_USIO_REG_FCR); + + writeb(readb(port->membase + MLB_USIO_REG_SCR) | MLB_USIO_SCR_TBIE, + port->membase + MLB_USIO_REG_SCR); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + mlb_usio_stop_tx(port); +} + +static void mlb_usio_start_tx(struct uart_port *port) +{ + u16 fcr = readw(port->membase + MLB_USIO_REG_FCR); + + writew(fcr | MLB_USIO_FCR_FTIE, port->membase + MLB_USIO_REG_FCR); + if (!(fcr & MLB_USIO_FCR_FDRQ)) + return; + + writeb(readb(port->membase + MLB_USIO_REG_SCR) | MLB_USIO_SCR_TBIE, + port->membase + MLB_USIO_REG_SCR); + + if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI) + mlb_usio_tx_chars(port); +} + +static void mlb_usio_stop_rx(struct uart_port *port) +{ + writeb(readb(port->membase + MLB_USIO_REG_SCR) & ~MLB_USIO_SCR_RIE, + port->membase + MLB_USIO_REG_SCR); +} + +static void mlb_usio_enable_ms(struct uart_port *port) +{ + writeb(readb(port->membase + MLB_USIO_REG_SCR) | + MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE, + port->membase + MLB_USIO_REG_SCR); +} + +static void mlb_usio_rx_chars(struct uart_port *port) +{ + struct tty_port *ttyport = &port->state->port; + unsigned long flag = 0; + char ch = 0; + u8 status; + int max_count = 2; + + while (max_count--) { + status = readb(port->membase + MLB_USIO_REG_SSR); + + if (!(status & MLB_USIO_SSR_RDRF)) + break; + + if (!(status & (MLB_USIO_SSR_ORE | MLB_USIO_SSR_FRE | + MLB_USIO_SSR_PE))) { + ch = readw(port->membase + MLB_USIO_REG_DR); + flag = TTY_NORMAL; + port->icount.rx++; + if (uart_handle_sysrq_char(port, ch)) + continue; + uart_insert_char(port, status, MLB_USIO_SSR_ORE, + ch, flag); + continue; + } + if (status & MLB_USIO_SSR_PE) + port->icount.parity++; + if (status & MLB_USIO_SSR_ORE) + port->icount.overrun++; + status &= port->read_status_mask; + if (status & MLB_USIO_SSR_BRK) { + flag = TTY_BREAK; + ch = 0; + } else + if (status & MLB_USIO_SSR_PE) { + flag = TTY_PARITY; + ch = 0; + } else + if (status & MLB_USIO_SSR_FRE) { + flag = TTY_FRAME; + ch = 0; + } + if (flag) + uart_insert_char(port, status, MLB_USIO_SSR_ORE, + ch, flag); + + writeb(readb(port->membase + MLB_USIO_REG_SSR) | + MLB_USIO_SSR_REC, + port->membase + MLB_USIO_REG_SSR); + + max_count = readw(port->membase + MLB_USIO_REG_FBYTE) >> 8; + writew(readw(port->membase + MLB_USIO_REG_FCR) | + MLB_USIO_FCR_FE2 | MLB_USIO_FCR_FRIIE, + port->membase + MLB_USIO_REG_FCR); + } + + tty_flip_buffer_push(ttyport); +} + +static irqreturn_t mlb_usio_rx_irq(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + + spin_lock(&port->lock); + mlb_usio_rx_chars(port); + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static irqreturn_t mlb_usio_tx_irq(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + + spin_lock(&port->lock); + if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI) + mlb_usio_tx_chars(port); + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static unsigned int mlb_usio_tx_empty(struct uart_port *port) +{ + return (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI) ? + TIOCSER_TEMT : 0; +} + +static void mlb_usio_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static unsigned int mlb_usio_get_mctrl(struct uart_port *port) +{ + return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; + +} + +static void mlb_usio_break_ctl(struct uart_port *port, int break_state) +{ +} + +static int mlb_usio_startup(struct uart_port *port) +{ + const char *portname = to_platform_device(port->dev)->name; + unsigned long flags; + int ret, index = port->line; + unsigned char escr; + + ret = request_irq(mlb_usio_irq[index][RX], mlb_usio_rx_irq, + 0, portname, port); + if (ret) + return ret; + ret = request_irq(mlb_usio_irq[index][TX], mlb_usio_tx_irq, + 0, portname, port); + if (ret) { + free_irq(mlb_usio_irq[index][RX], port); + return ret; + } + + escr = readb(port->membase + MLB_USIO_REG_ESCR); + if (of_property_read_bool(port->dev->of_node, "auto-flow-control")) + escr |= MLB_USIO_ESCR_FLWEN; + spin_lock_irqsave(&port->lock, flags); + writeb(0, port->membase + MLB_USIO_REG_SCR); + writeb(escr, port->membase + MLB_USIO_REG_ESCR); + writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR); + writeb(MLB_USIO_SSR_REC, port->membase + MLB_USIO_REG_SSR); + writew(0, port->membase + MLB_USIO_REG_FCR); + writew(MLB_USIO_FCR_FCL1 | MLB_USIO_FCR_FCL2, + port->membase + MLB_USIO_REG_FCR); + writew(MLB_USIO_FCR_FE1 | MLB_USIO_FCR_FE2 | MLB_USIO_FCR_FRIIE, + port->membase + MLB_USIO_REG_FCR); + writew(0, port->membase + MLB_USIO_REG_FBYTE); + writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE); + + writeb(MLB_USIO_SCR_TXE | MLB_USIO_SCR_RIE | MLB_USIO_SCR_TBIE | + MLB_USIO_SCR_RXE, port->membase + MLB_USIO_REG_SCR); + spin_unlock_irqrestore(&port->lock, flags); + + return 0; +} + +static void mlb_usio_shutdown(struct uart_port *port) +{ + int index = port->line; + + free_irq(mlb_usio_irq[index][RX], port); + free_irq(mlb_usio_irq[index][TX], port); +} + +static void mlb_usio_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int escr, smr = MLB_USIO_SMR_SOE; + unsigned long flags, baud, quot; + + switch (termios->c_cflag & CSIZE) { + case CS5: + escr = MLB_USIO_ESCR_L_5BIT; + break; + case CS6: + escr = MLB_USIO_ESCR_L_6BIT; + break; + case CS7: + escr = MLB_USIO_ESCR_L_7BIT; + break; + case CS8: + default: + escr = MLB_USIO_ESCR_L_8BIT; + break; + } + + if (termios->c_cflag & CSTOPB) + smr |= MLB_USIO_SMR_SBL; + + if (termios->c_cflag & PARENB) { + escr |= MLB_USIO_ESCR_PEN; + if (termios->c_cflag & PARODD) + escr |= MLB_USIO_ESCR_P; + } + /* Set hard flow control */ + if (of_property_read_bool(port->dev->of_node, "auto-flow-control") || + (termios->c_cflag & CRTSCTS)) + escr |= MLB_USIO_ESCR_FLWEN; + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk); + if (baud > 1) + quot = port->uartclk / baud - 1; + else + quot = 0; + + spin_lock_irqsave(&port->lock, flags); + uart_update_timeout(port, termios->c_cflag, baud); + port->read_status_mask = MLB_USIO_SSR_ORE | MLB_USIO_SSR_RDRF | + MLB_USIO_SSR_TDRE; + if (termios->c_iflag & INPCK) + port->read_status_mask |= MLB_USIO_SSR_FRE | MLB_USIO_SSR_PE; + + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= MLB_USIO_SSR_FRE | MLB_USIO_SSR_PE; + if ((termios->c_iflag & IGNBRK) && (termios->c_iflag & IGNPAR)) + port->ignore_status_mask |= MLB_USIO_SSR_ORE; + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= MLB_USIO_SSR_RDRF; + + writeb(0, port->membase + MLB_USIO_REG_SCR); + writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR); + writeb(MLB_USIO_SSR_REC, port->membase + MLB_USIO_REG_SSR); + writew(0, port->membase + MLB_USIO_REG_FCR); + writeb(smr, port->membase + MLB_USIO_REG_SMR); + writeb(escr, port->membase + MLB_USIO_REG_ESCR); + writew(quot, port->membase + MLB_USIO_REG_BGR); + writew(0, port->membase + MLB_USIO_REG_FCR); + writew(MLB_USIO_FCR_FCL1 | MLB_USIO_FCR_FCL2 | MLB_USIO_FCR_FE1 | + MLB_USIO_FCR_FE2 | MLB_USIO_FCR_FRIIE, + port->membase + MLB_USIO_REG_FCR); + writew(0, port->membase + MLB_USIO_REG_FBYTE); + writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE); + writeb(MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE | MLB_USIO_SCR_TBIE | + MLB_USIO_SCR_TXE, port->membase + MLB_USIO_REG_SCR); + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *mlb_usio_type(struct uart_port *port) +{ + return ((port->type == PORT_MLB_USIO) ? USIO_NAME : NULL); +} + +static void mlb_usio_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_MLB_USIO; +} + +static const struct uart_ops mlb_usio_ops = { + .tx_empty = mlb_usio_tx_empty, + .set_mctrl = mlb_usio_set_mctrl, + .get_mctrl = mlb_usio_get_mctrl, + .stop_tx = mlb_usio_stop_tx, + .start_tx = mlb_usio_start_tx, + .stop_rx = mlb_usio_stop_rx, + .enable_ms = mlb_usio_enable_ms, + .break_ctl = mlb_usio_break_ctl, + .startup = mlb_usio_startup, + .shutdown = mlb_usio_shutdown, + .set_termios = mlb_usio_set_termios, + .type = mlb_usio_type, + .config_port = mlb_usio_config_port, +}; + +#ifdef CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE + +static void mlb_usio_console_putchar(struct uart_port *port, unsigned char c) +{ + while (!(readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TDRE)) + cpu_relax(); + + writew(c, port->membase + MLB_USIO_REG_DR); +} + +static void mlb_usio_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *port = &mlb_usio_ports[co->index]; + + uart_console_write(port, s, count, mlb_usio_console_putchar); +} + +static int __init mlb_usio_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = 115200; + int parity = 'n'; + int flow = 'n'; + int bits = 8; + + if (co->index >= CONFIG_SERIAL_MILBEAUT_USIO_PORTS) + return -ENODEV; + + port = &mlb_usio_ports[co->index]; + if (!port->membase) + return -ENODEV; + + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + if (of_property_read_bool(port->dev->of_node, "auto-flow-control")) + flow = 'r'; + + return uart_set_options(port, co, baud, parity, bits, flow); +} + + +static struct uart_driver mlb_usio_uart_driver; +static struct console mlb_usio_console = { + .name = USIO_UART_DEV_NAME, + .write = mlb_usio_console_write, + .device = uart_console_device, + .setup = mlb_usio_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &mlb_usio_uart_driver, +}; + +static int __init mlb_usio_console_init(void) +{ + register_console(&mlb_usio_console); + return 0; +} +console_initcall(mlb_usio_console_init); + + +static void mlb_usio_early_console_write(struct console *co, const char *s, + u_int count) +{ + struct earlycon_device *dev = co->data; + + uart_console_write(&dev->port, s, count, mlb_usio_console_putchar); +} + +static int __init mlb_usio_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + device->con->write = mlb_usio_early_console_write; + return 0; +} + +OF_EARLYCON_DECLARE(mlb_usio, "socionext,milbeaut-usio-uart", + mlb_usio_early_console_setup); + +#define USIO_CONSOLE (&mlb_usio_console) +#else +#define USIO_CONSOLE NULL +#endif + +static struct uart_driver mlb_usio_uart_driver = { + .owner = THIS_MODULE, + .driver_name = USIO_NAME, + .dev_name = USIO_UART_DEV_NAME, + .cons = USIO_CONSOLE, + .nr = CONFIG_SERIAL_MILBEAUT_USIO_PORTS, +}; + +static int mlb_usio_probe(struct platform_device *pdev) +{ + struct clk *clk = devm_clk_get(&pdev->dev, NULL); + struct uart_port *port; + struct resource *res; + int index = 0; + int ret; + + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Missing clock\n"); + return PTR_ERR(clk); + } + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(&pdev->dev, "Clock enable failed: %d\n", ret); + return ret; + } + of_property_read_u32(pdev->dev.of_node, "index", &index); + port = &mlb_usio_ports[index]; + + port->private_data = (void *)clk; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "Missing regs\n"); + ret = -ENODEV; + goto failed; + } + port->membase = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + + ret = platform_get_irq_byname(pdev, "rx"); + mlb_usio_irq[index][RX] = ret; + + ret = platform_get_irq_byname(pdev, "tx"); + mlb_usio_irq[index][TX] = ret; + + port->irq = mlb_usio_irq[index][RX]; + port->uartclk = clk_get_rate(clk); + port->fifosize = 128; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE); + port->iotype = UPIO_MEM32; + port->flags = UPF_BOOT_AUTOCONF | UPF_SPD_VHI; + port->line = index; + port->ops = &mlb_usio_ops; + port->dev = &pdev->dev; + + ret = uart_add_one_port(&mlb_usio_uart_driver, port); + if (ret) { + dev_err(&pdev->dev, "Adding port failed: %d\n", ret); + goto failed; + } + return 0; + +failed: + clk_disable_unprepare(clk); + + return ret; +} + +static int mlb_usio_remove(struct platform_device *pdev) +{ + struct uart_port *port = &mlb_usio_ports[pdev->id]; + struct clk *clk = port->private_data; + + uart_remove_one_port(&mlb_usio_uart_driver, port); + clk_disable_unprepare(clk); + + return 0; +} + +static const struct of_device_id mlb_usio_dt_ids[] = { + { .compatible = "socionext,milbeaut-usio-uart" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mlb_usio_dt_ids); + +static struct platform_driver mlb_usio_driver = { + .probe = mlb_usio_probe, + .remove = mlb_usio_remove, + .driver = { + .name = USIO_NAME, + .of_match_table = mlb_usio_dt_ids, + }, +}; + +static int __init mlb_usio_init(void) +{ + int ret = uart_register_driver(&mlb_usio_uart_driver); + + if (ret) { + pr_err("%s: uart registration failed: %d\n", __func__, ret); + return ret; + } + ret = platform_driver_register(&mlb_usio_driver); + if (ret) { + uart_unregister_driver(&mlb_usio_uart_driver); + pr_err("%s: drv registration failed: %d\n", __func__, ret); + return ret; + } + + return 0; +} + +static void __exit mlb_usio_exit(void) +{ + platform_driver_unregister(&mlb_usio_driver); + uart_unregister_driver(&mlb_usio_uart_driver); +} + +module_init(mlb_usio_init); +module_exit(mlb_usio_exit); + +MODULE_AUTHOR("SOCIONEXT"); +MODULE_DESCRIPTION("MILBEAUT_USIO/UART Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c new file mode 100644 index 000000000..73362d4bc --- /dev/null +++ b/drivers/tty/serial/mpc52xx_uart.c @@ -0,0 +1,1953 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs. + * + * FIXME According to the usermanual the status bits in the status register + * are only updated when the peripherals access the FIFO and not when the + * CPU access them. So since we use this bits to know when we stop writing + * and reading, they may not be updated in-time and a race condition may + * exists. But I haven't be able to prove this and I don't care. But if + * any problem arises, it might worth checking. The TX/RX FIFO Stats + * registers should be used in addition. + * Update: Actually, they seem updated ... At least the bits we use. + * + * + * Maintainer : Sylvain Munaut + * + * Some of the code has been inspired/copied from the 2.4 code written + * by Dale Farnsworth . + * + * Copyright (C) 2008 Freescale Semiconductor Inc. + * John Rigby + * Added support for MPC5121 + * Copyright (C) 2006 Secret Lab Technologies Ltd. + * Grant Likely + * Copyright (C) 2004-2006 Sylvain Munaut + * Copyright (C) 2003 MontaVista, Software, Inc. + */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + + +/* We've been assigned a range on the "Low-density serial ports" major */ +#define SERIAL_PSC_MAJOR 204 +#define SERIAL_PSC_MINOR 148 + + +#define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */ + + +static struct uart_port mpc52xx_uart_ports[MPC52xx_PSC_MAXNUM]; + /* Rem: - We use the read_status_mask as a shadow of + * psc->mpc52xx_psc_imr + * - It's important that is array is all zero on start as we + * use it to know if it's initialized or not ! If it's not sure + * it's cleared, then a memset(...,0,...) should be added to + * the console_init + */ + +/* lookup table for matching device nodes to index numbers */ +static struct device_node *mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM]; + +static void mpc52xx_uart_of_enumerate(void); + + +#define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase)) + + +/* Forward declaration of the interruption handling routine */ +static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id); +static irqreturn_t mpc5xxx_uart_process_int(struct uart_port *port); + +/* ======================================================================== */ +/* PSC fifo operations for isolating differences between 52xx and 512x */ +/* ======================================================================== */ + +struct psc_ops { + void (*fifo_init)(struct uart_port *port); + unsigned int (*raw_rx_rdy)(struct uart_port *port); + unsigned int (*raw_tx_rdy)(struct uart_port *port); + unsigned int (*rx_rdy)(struct uart_port *port); + unsigned int (*tx_rdy)(struct uart_port *port); + unsigned int (*tx_empty)(struct uart_port *port); + void (*stop_rx)(struct uart_port *port); + void (*start_tx)(struct uart_port *port); + void (*stop_tx)(struct uart_port *port); + void (*rx_clr_irq)(struct uart_port *port); + void (*tx_clr_irq)(struct uart_port *port); + void (*write_char)(struct uart_port *port, unsigned char c); + unsigned char (*read_char)(struct uart_port *port); + void (*cw_disable_ints)(struct uart_port *port); + void (*cw_restore_ints)(struct uart_port *port); + unsigned int (*set_baudrate)(struct uart_port *port, + struct ktermios *new, + const struct ktermios *old); + int (*clock_alloc)(struct uart_port *port); + void (*clock_relse)(struct uart_port *port); + int (*clock)(struct uart_port *port, int enable); + int (*fifoc_init)(void); + void (*fifoc_uninit)(void); + void (*get_irq)(struct uart_port *, struct device_node *); + irqreturn_t (*handle_irq)(struct uart_port *port); + u16 (*get_status)(struct uart_port *port); + u8 (*get_ipcr)(struct uart_port *port); + void (*command)(struct uart_port *port, u8 cmd); + void (*set_mode)(struct uart_port *port, u8 mr1, u8 mr2); + void (*set_rts)(struct uart_port *port, int state); + void (*enable_ms)(struct uart_port *port); + void (*set_sicr)(struct uart_port *port, u32 val); + void (*set_imr)(struct uart_port *port, u16 val); + u8 (*get_mr1)(struct uart_port *port); +}; + +/* setting the prescaler and divisor reg is common for all chips */ +static inline void mpc52xx_set_divisor(struct mpc52xx_psc __iomem *psc, + u16 prescaler, unsigned int divisor) +{ + /* select prescaler */ + out_be16(&psc->mpc52xx_psc_clock_select, prescaler); + out_8(&psc->ctur, divisor >> 8); + out_8(&psc->ctlr, divisor & 0xff); +} + +static u16 mpc52xx_psc_get_status(struct uart_port *port) +{ + return in_be16(&PSC(port)->mpc52xx_psc_status); +} + +static u8 mpc52xx_psc_get_ipcr(struct uart_port *port) +{ + return in_8(&PSC(port)->mpc52xx_psc_ipcr); +} + +static void mpc52xx_psc_command(struct uart_port *port, u8 cmd) +{ + out_8(&PSC(port)->command, cmd); +} + +static void mpc52xx_psc_set_mode(struct uart_port *port, u8 mr1, u8 mr2) +{ + out_8(&PSC(port)->command, MPC52xx_PSC_SEL_MODE_REG_1); + out_8(&PSC(port)->mode, mr1); + out_8(&PSC(port)->mode, mr2); +} + +static void mpc52xx_psc_set_rts(struct uart_port *port, int state) +{ + if (state) + out_8(&PSC(port)->op1, MPC52xx_PSC_OP_RTS); + else + out_8(&PSC(port)->op0, MPC52xx_PSC_OP_RTS); +} + +static void mpc52xx_psc_enable_ms(struct uart_port *port) +{ + struct mpc52xx_psc __iomem *psc = PSC(port); + + /* clear D_*-bits by reading them */ + in_8(&psc->mpc52xx_psc_ipcr); + /* enable CTS and DCD as IPC interrupts */ + out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD); + + port->read_status_mask |= MPC52xx_PSC_IMR_IPC; + out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); +} + +static void mpc52xx_psc_set_sicr(struct uart_port *port, u32 val) +{ + out_be32(&PSC(port)->sicr, val); +} + +static void mpc52xx_psc_set_imr(struct uart_port *port, u16 val) +{ + out_be16(&PSC(port)->mpc52xx_psc_imr, val); +} + +static u8 mpc52xx_psc_get_mr1(struct uart_port *port) +{ + out_8(&PSC(port)->command, MPC52xx_PSC_SEL_MODE_REG_1); + return in_8(&PSC(port)->mode); +} + +#ifdef CONFIG_PPC_MPC52xx +#define FIFO_52xx(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1)) +static void mpc52xx_psc_fifo_init(struct uart_port *port) +{ + struct mpc52xx_psc __iomem *psc = PSC(port); + struct mpc52xx_psc_fifo __iomem *fifo = FIFO_52xx(port); + + out_8(&fifo->rfcntl, 0x00); + out_be16(&fifo->rfalarm, 0x1ff); + out_8(&fifo->tfcntl, 0x07); + out_be16(&fifo->tfalarm, 0x80); + + port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY; + out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); +} + +static unsigned int mpc52xx_psc_raw_rx_rdy(struct uart_port *port) +{ + return in_be16(&PSC(port)->mpc52xx_psc_status) + & MPC52xx_PSC_SR_RXRDY; +} + +static unsigned int mpc52xx_psc_raw_tx_rdy(struct uart_port *port) +{ + return in_be16(&PSC(port)->mpc52xx_psc_status) + & MPC52xx_PSC_SR_TXRDY; +} + + +static unsigned int mpc52xx_psc_rx_rdy(struct uart_port *port) +{ + return in_be16(&PSC(port)->mpc52xx_psc_isr) + & port->read_status_mask + & MPC52xx_PSC_IMR_RXRDY; +} + +static unsigned int mpc52xx_psc_tx_rdy(struct uart_port *port) +{ + return in_be16(&PSC(port)->mpc52xx_psc_isr) + & port->read_status_mask + & MPC52xx_PSC_IMR_TXRDY; +} + +static unsigned int mpc52xx_psc_tx_empty(struct uart_port *port) +{ + u16 sts = in_be16(&PSC(port)->mpc52xx_psc_status); + + return (sts & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0; +} + +static void mpc52xx_psc_start_tx(struct uart_port *port) +{ + port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY; + out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); +} + +static void mpc52xx_psc_stop_tx(struct uart_port *port) +{ + port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY; + out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); +} + +static void mpc52xx_psc_stop_rx(struct uart_port *port) +{ + port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY; + out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); +} + +static void mpc52xx_psc_rx_clr_irq(struct uart_port *port) +{ +} + +static void mpc52xx_psc_tx_clr_irq(struct uart_port *port) +{ +} + +static void mpc52xx_psc_write_char(struct uart_port *port, unsigned char c) +{ + out_8(&PSC(port)->mpc52xx_psc_buffer_8, c); +} + +static unsigned char mpc52xx_psc_read_char(struct uart_port *port) +{ + return in_8(&PSC(port)->mpc52xx_psc_buffer_8); +} + +static void mpc52xx_psc_cw_disable_ints(struct uart_port *port) +{ + out_be16(&PSC(port)->mpc52xx_psc_imr, 0); +} + +static void mpc52xx_psc_cw_restore_ints(struct uart_port *port) +{ + out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); +} + +static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port, + struct ktermios *new, + const struct ktermios *old) +{ + unsigned int baud; + unsigned int divisor; + + /* The 5200 has a fixed /32 prescaler, uartclk contains the ipb freq */ + baud = uart_get_baud_rate(port, new, old, + port->uartclk / (32 * 0xffff) + 1, + port->uartclk / 32); + divisor = (port->uartclk + 16 * baud) / (32 * baud); + + /* enable the /32 prescaler and set the divisor */ + mpc52xx_set_divisor(PSC(port), 0xdd00, divisor); + return baud; +} + +static unsigned int mpc5200b_psc_set_baudrate(struct uart_port *port, + struct ktermios *new, + const struct ktermios *old) +{ + unsigned int baud; + unsigned int divisor; + u16 prescaler; + + /* The 5200B has a selectable /4 or /32 prescaler, uartclk contains the + * ipb freq */ + baud = uart_get_baud_rate(port, new, old, + port->uartclk / (32 * 0xffff) + 1, + port->uartclk / 4); + divisor = (port->uartclk + 2 * baud) / (4 * baud); + + /* select the proper prescaler and set the divisor + * prefer high prescaler for more tolerance on low baudrates */ + if (divisor > 0xffff || baud <= 115200) { + divisor = (divisor + 4) / 8; + prescaler = 0xdd00; /* /32 */ + } else + prescaler = 0xff00; /* /4 */ + mpc52xx_set_divisor(PSC(port), prescaler, divisor); + return baud; +} + +static void mpc52xx_psc_get_irq(struct uart_port *port, struct device_node *np) +{ + port->irqflags = 0; + port->irq = irq_of_parse_and_map(np, 0); +} + +/* 52xx specific interrupt handler. The caller holds the port lock */ +static irqreturn_t mpc52xx_psc_handle_irq(struct uart_port *port) +{ + return mpc5xxx_uart_process_int(port); +} + +static const struct psc_ops mpc52xx_psc_ops = { + .fifo_init = mpc52xx_psc_fifo_init, + .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy, + .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy, + .rx_rdy = mpc52xx_psc_rx_rdy, + .tx_rdy = mpc52xx_psc_tx_rdy, + .tx_empty = mpc52xx_psc_tx_empty, + .stop_rx = mpc52xx_psc_stop_rx, + .start_tx = mpc52xx_psc_start_tx, + .stop_tx = mpc52xx_psc_stop_tx, + .rx_clr_irq = mpc52xx_psc_rx_clr_irq, + .tx_clr_irq = mpc52xx_psc_tx_clr_irq, + .write_char = mpc52xx_psc_write_char, + .read_char = mpc52xx_psc_read_char, + .cw_disable_ints = mpc52xx_psc_cw_disable_ints, + .cw_restore_ints = mpc52xx_psc_cw_restore_ints, + .set_baudrate = mpc5200_psc_set_baudrate, + .get_irq = mpc52xx_psc_get_irq, + .handle_irq = mpc52xx_psc_handle_irq, + .get_status = mpc52xx_psc_get_status, + .get_ipcr = mpc52xx_psc_get_ipcr, + .command = mpc52xx_psc_command, + .set_mode = mpc52xx_psc_set_mode, + .set_rts = mpc52xx_psc_set_rts, + .enable_ms = mpc52xx_psc_enable_ms, + .set_sicr = mpc52xx_psc_set_sicr, + .set_imr = mpc52xx_psc_set_imr, + .get_mr1 = mpc52xx_psc_get_mr1, +}; + +static const struct psc_ops mpc5200b_psc_ops = { + .fifo_init = mpc52xx_psc_fifo_init, + .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy, + .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy, + .rx_rdy = mpc52xx_psc_rx_rdy, + .tx_rdy = mpc52xx_psc_tx_rdy, + .tx_empty = mpc52xx_psc_tx_empty, + .stop_rx = mpc52xx_psc_stop_rx, + .start_tx = mpc52xx_psc_start_tx, + .stop_tx = mpc52xx_psc_stop_tx, + .rx_clr_irq = mpc52xx_psc_rx_clr_irq, + .tx_clr_irq = mpc52xx_psc_tx_clr_irq, + .write_char = mpc52xx_psc_write_char, + .read_char = mpc52xx_psc_read_char, + .cw_disable_ints = mpc52xx_psc_cw_disable_ints, + .cw_restore_ints = mpc52xx_psc_cw_restore_ints, + .set_baudrate = mpc5200b_psc_set_baudrate, + .get_irq = mpc52xx_psc_get_irq, + .handle_irq = mpc52xx_psc_handle_irq, + .get_status = mpc52xx_psc_get_status, + .get_ipcr = mpc52xx_psc_get_ipcr, + .command = mpc52xx_psc_command, + .set_mode = mpc52xx_psc_set_mode, + .set_rts = mpc52xx_psc_set_rts, + .enable_ms = mpc52xx_psc_enable_ms, + .set_sicr = mpc52xx_psc_set_sicr, + .set_imr = mpc52xx_psc_set_imr, + .get_mr1 = mpc52xx_psc_get_mr1, +}; + +#endif /* CONFIG_PPC_MPC52xx */ + +#ifdef CONFIG_PPC_MPC512x +#define FIFO_512x(port) ((struct mpc512x_psc_fifo __iomem *)(PSC(port)+1)) + +/* PSC FIFO Controller for mpc512x */ +struct psc_fifoc { + u32 fifoc_cmd; + u32 fifoc_int; + u32 fifoc_dma; + u32 fifoc_axe; + u32 fifoc_debug; +}; + +static struct psc_fifoc __iomem *psc_fifoc; +static unsigned int psc_fifoc_irq; +static struct clk *psc_fifoc_clk; + +static void mpc512x_psc_fifo_init(struct uart_port *port) +{ + /* /32 prescaler */ + out_be16(&PSC(port)->mpc52xx_psc_clock_select, 0xdd00); + + out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE); + out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE); + out_be32(&FIFO_512x(port)->txalarm, 1); + out_be32(&FIFO_512x(port)->tximr, 0); + + out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_RESET_SLICE); + out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_ENABLE_SLICE); + out_be32(&FIFO_512x(port)->rxalarm, 1); + out_be32(&FIFO_512x(port)->rximr, 0); + + out_be32(&FIFO_512x(port)->tximr, MPC512x_PSC_FIFO_ALARM); + out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM); +} + +static unsigned int mpc512x_psc_raw_rx_rdy(struct uart_port *port) +{ + return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY); +} + +static unsigned int mpc512x_psc_raw_tx_rdy(struct uart_port *port) +{ + return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL); +} + +static unsigned int mpc512x_psc_rx_rdy(struct uart_port *port) +{ + return in_be32(&FIFO_512x(port)->rxsr) + & in_be32(&FIFO_512x(port)->rximr) + & MPC512x_PSC_FIFO_ALARM; +} + +static unsigned int mpc512x_psc_tx_rdy(struct uart_port *port) +{ + return in_be32(&FIFO_512x(port)->txsr) + & in_be32(&FIFO_512x(port)->tximr) + & MPC512x_PSC_FIFO_ALARM; +} + +static unsigned int mpc512x_psc_tx_empty(struct uart_port *port) +{ + return in_be32(&FIFO_512x(port)->txsr) + & MPC512x_PSC_FIFO_EMPTY; +} + +static void mpc512x_psc_stop_rx(struct uart_port *port) +{ + unsigned long rx_fifo_imr; + + rx_fifo_imr = in_be32(&FIFO_512x(port)->rximr); + rx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM; + out_be32(&FIFO_512x(port)->rximr, rx_fifo_imr); +} + +static void mpc512x_psc_start_tx(struct uart_port *port) +{ + unsigned long tx_fifo_imr; + + tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr); + tx_fifo_imr |= MPC512x_PSC_FIFO_ALARM; + out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr); +} + +static void mpc512x_psc_stop_tx(struct uart_port *port) +{ + unsigned long tx_fifo_imr; + + tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr); + tx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM; + out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr); +} + +static void mpc512x_psc_rx_clr_irq(struct uart_port *port) +{ + out_be32(&FIFO_512x(port)->rxisr, in_be32(&FIFO_512x(port)->rxisr)); +} + +static void mpc512x_psc_tx_clr_irq(struct uart_port *port) +{ + out_be32(&FIFO_512x(port)->txisr, in_be32(&FIFO_512x(port)->txisr)); +} + +static void mpc512x_psc_write_char(struct uart_port *port, unsigned char c) +{ + out_8(&FIFO_512x(port)->txdata_8, c); +} + +static unsigned char mpc512x_psc_read_char(struct uart_port *port) +{ + return in_8(&FIFO_512x(port)->rxdata_8); +} + +static void mpc512x_psc_cw_disable_ints(struct uart_port *port) +{ + port->read_status_mask = + in_be32(&FIFO_512x(port)->tximr) << 16 | + in_be32(&FIFO_512x(port)->rximr); + out_be32(&FIFO_512x(port)->tximr, 0); + out_be32(&FIFO_512x(port)->rximr, 0); +} + +static void mpc512x_psc_cw_restore_ints(struct uart_port *port) +{ + out_be32(&FIFO_512x(port)->tximr, + (port->read_status_mask >> 16) & 0x7f); + out_be32(&FIFO_512x(port)->rximr, port->read_status_mask & 0x7f); +} + +static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port, + struct ktermios *new, + const struct ktermios *old) +{ + unsigned int baud; + unsigned int divisor; + + /* + * The "MPC5121e Microcontroller Reference Manual, Rev. 3" says on + * pg. 30-10 that the chip supports a /32 and a /10 prescaler. + * Furthermore, it states that "After reset, the prescaler by 10 + * for the UART mode is selected", but the reset register value is + * 0x0000 which means a /32 prescaler. This is wrong. + * + * In reality using /32 prescaler doesn't work, as it is not supported! + * Use /16 or /10 prescaler, see "MPC5121e Hardware Design Guide", + * Chapter 4.1 PSC in UART Mode. + * Calculate with a /16 prescaler here. + */ + + /* uartclk contains the ips freq */ + baud = uart_get_baud_rate(port, new, old, + port->uartclk / (16 * 0xffff) + 1, + port->uartclk / 16); + divisor = (port->uartclk + 8 * baud) / (16 * baud); + + /* enable the /16 prescaler and set the divisor */ + mpc52xx_set_divisor(PSC(port), 0xdd00, divisor); + return baud; +} + +/* Init PSC FIFO Controller */ +static int __init mpc512x_psc_fifoc_init(void) +{ + int err; + struct device_node *np; + struct clk *clk; + + /* default error code, potentially overwritten by clock calls */ + err = -ENODEV; + + np = of_find_compatible_node(NULL, NULL, + "fsl,mpc5121-psc-fifo"); + if (!np) { + pr_err("%s: Can't find FIFOC node\n", __func__); + goto out_err; + } + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + /* backwards compat with device trees that lack clock specs */ + clk = clk_get_sys(np->name, "ipg"); + } + if (IS_ERR(clk)) { + pr_err("%s: Can't lookup FIFO clock\n", __func__); + err = PTR_ERR(clk); + goto out_ofnode_put; + } + if (clk_prepare_enable(clk)) { + pr_err("%s: Can't enable FIFO clock\n", __func__); + clk_put(clk); + goto out_ofnode_put; + } + psc_fifoc_clk = clk; + + psc_fifoc = of_iomap(np, 0); + if (!psc_fifoc) { + pr_err("%s: Can't map FIFOC\n", __func__); + goto out_clk_disable; + } + + psc_fifoc_irq = irq_of_parse_and_map(np, 0); + if (psc_fifoc_irq == 0) { + pr_err("%s: Can't get FIFOC irq\n", __func__); + goto out_unmap; + } + + of_node_put(np); + return 0; + +out_unmap: + iounmap(psc_fifoc); +out_clk_disable: + clk_disable_unprepare(psc_fifoc_clk); + clk_put(psc_fifoc_clk); +out_ofnode_put: + of_node_put(np); +out_err: + return err; +} + +static void __exit mpc512x_psc_fifoc_uninit(void) +{ + iounmap(psc_fifoc); + + /* disable the clock, errors are not fatal */ + if (psc_fifoc_clk) { + clk_disable_unprepare(psc_fifoc_clk); + clk_put(psc_fifoc_clk); + psc_fifoc_clk = NULL; + } +} + +/* 512x specific interrupt handler. The caller holds the port lock */ +static irqreturn_t mpc512x_psc_handle_irq(struct uart_port *port) +{ + unsigned long fifoc_int; + int psc_num; + + /* Read pending PSC FIFOC interrupts */ + fifoc_int = in_be32(&psc_fifoc->fifoc_int); + + /* Check if it is an interrupt for this port */ + psc_num = (port->mapbase & 0xf00) >> 8; + if (test_bit(psc_num, &fifoc_int) || + test_bit(psc_num + 16, &fifoc_int)) + return mpc5xxx_uart_process_int(port); + + return IRQ_NONE; +} + +static struct clk *psc_mclk_clk[MPC52xx_PSC_MAXNUM]; +static struct clk *psc_ipg_clk[MPC52xx_PSC_MAXNUM]; + +/* called from within the .request_port() callback (allocation) */ +static int mpc512x_psc_alloc_clock(struct uart_port *port) +{ + int psc_num; + struct clk *clk; + int err; + + psc_num = (port->mapbase & 0xf00) >> 8; + + clk = devm_clk_get(port->dev, "mclk"); + if (IS_ERR(clk)) { + dev_err(port->dev, "Failed to get MCLK!\n"); + err = PTR_ERR(clk); + goto out_err; + } + err = clk_prepare_enable(clk); + if (err) { + dev_err(port->dev, "Failed to enable MCLK!\n"); + goto out_err; + } + psc_mclk_clk[psc_num] = clk; + + clk = devm_clk_get(port->dev, "ipg"); + if (IS_ERR(clk)) { + dev_err(port->dev, "Failed to get IPG clock!\n"); + err = PTR_ERR(clk); + goto out_err; + } + err = clk_prepare_enable(clk); + if (err) { + dev_err(port->dev, "Failed to enable IPG clock!\n"); + goto out_err; + } + psc_ipg_clk[psc_num] = clk; + + return 0; + +out_err: + if (psc_mclk_clk[psc_num]) { + clk_disable_unprepare(psc_mclk_clk[psc_num]); + psc_mclk_clk[psc_num] = NULL; + } + if (psc_ipg_clk[psc_num]) { + clk_disable_unprepare(psc_ipg_clk[psc_num]); + psc_ipg_clk[psc_num] = NULL; + } + return err; +} + +/* called from within the .release_port() callback (release) */ +static void mpc512x_psc_relse_clock(struct uart_port *port) +{ + int psc_num; + struct clk *clk; + + psc_num = (port->mapbase & 0xf00) >> 8; + clk = psc_mclk_clk[psc_num]; + if (clk) { + clk_disable_unprepare(clk); + psc_mclk_clk[psc_num] = NULL; + } + if (psc_ipg_clk[psc_num]) { + clk_disable_unprepare(psc_ipg_clk[psc_num]); + psc_ipg_clk[psc_num] = NULL; + } +} + +/* implementation of the .clock() callback (enable/disable) */ +static int mpc512x_psc_endis_clock(struct uart_port *port, int enable) +{ + int psc_num; + struct clk *psc_clk; + int ret; + + if (uart_console(port)) + return 0; + + psc_num = (port->mapbase & 0xf00) >> 8; + psc_clk = psc_mclk_clk[psc_num]; + if (!psc_clk) { + dev_err(port->dev, "Failed to get PSC clock entry!\n"); + return -ENODEV; + } + + dev_dbg(port->dev, "mclk %sable\n", enable ? "en" : "dis"); + if (enable) { + ret = clk_enable(psc_clk); + if (ret) + dev_err(port->dev, "Failed to enable MCLK!\n"); + return ret; + } else { + clk_disable(psc_clk); + return 0; + } +} + +static void mpc512x_psc_get_irq(struct uart_port *port, struct device_node *np) +{ + port->irqflags = IRQF_SHARED; + port->irq = psc_fifoc_irq; +} + +#define PSC_5125(port) ((struct mpc5125_psc __iomem *)((port)->membase)) +#define FIFO_5125(port) ((struct mpc512x_psc_fifo __iomem *)(PSC_5125(port)+1)) + +static void mpc5125_psc_fifo_init(struct uart_port *port) +{ + /* /32 prescaler */ + out_8(&PSC_5125(port)->mpc52xx_psc_clock_select, 0xdd); + + out_be32(&FIFO_5125(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE); + out_be32(&FIFO_5125(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE); + out_be32(&FIFO_5125(port)->txalarm, 1); + out_be32(&FIFO_5125(port)->tximr, 0); + + out_be32(&FIFO_5125(port)->rxcmd, MPC512x_PSC_FIFO_RESET_SLICE); + out_be32(&FIFO_5125(port)->rxcmd, MPC512x_PSC_FIFO_ENABLE_SLICE); + out_be32(&FIFO_5125(port)->rxalarm, 1); + out_be32(&FIFO_5125(port)->rximr, 0); + + out_be32(&FIFO_5125(port)->tximr, MPC512x_PSC_FIFO_ALARM); + out_be32(&FIFO_5125(port)->rximr, MPC512x_PSC_FIFO_ALARM); +} + +static unsigned int mpc5125_psc_raw_rx_rdy(struct uart_port *port) +{ + return !(in_be32(&FIFO_5125(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY); +} + +static unsigned int mpc5125_psc_raw_tx_rdy(struct uart_port *port) +{ + return !(in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_FULL); +} + +static unsigned int mpc5125_psc_rx_rdy(struct uart_port *port) +{ + return in_be32(&FIFO_5125(port)->rxsr) & + in_be32(&FIFO_5125(port)->rximr) & MPC512x_PSC_FIFO_ALARM; +} + +static unsigned int mpc5125_psc_tx_rdy(struct uart_port *port) +{ + return in_be32(&FIFO_5125(port)->txsr) & + in_be32(&FIFO_5125(port)->tximr) & MPC512x_PSC_FIFO_ALARM; +} + +static unsigned int mpc5125_psc_tx_empty(struct uart_port *port) +{ + return in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_EMPTY; +} + +static void mpc5125_psc_stop_rx(struct uart_port *port) +{ + unsigned long rx_fifo_imr; + + rx_fifo_imr = in_be32(&FIFO_5125(port)->rximr); + rx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM; + out_be32(&FIFO_5125(port)->rximr, rx_fifo_imr); +} + +static void mpc5125_psc_start_tx(struct uart_port *port) +{ + unsigned long tx_fifo_imr; + + tx_fifo_imr = in_be32(&FIFO_5125(port)->tximr); + tx_fifo_imr |= MPC512x_PSC_FIFO_ALARM; + out_be32(&FIFO_5125(port)->tximr, tx_fifo_imr); +} + +static void mpc5125_psc_stop_tx(struct uart_port *port) +{ + unsigned long tx_fifo_imr; + + tx_fifo_imr = in_be32(&FIFO_5125(port)->tximr); + tx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM; + out_be32(&FIFO_5125(port)->tximr, tx_fifo_imr); +} + +static void mpc5125_psc_rx_clr_irq(struct uart_port *port) +{ + out_be32(&FIFO_5125(port)->rxisr, in_be32(&FIFO_5125(port)->rxisr)); +} + +static void mpc5125_psc_tx_clr_irq(struct uart_port *port) +{ + out_be32(&FIFO_5125(port)->txisr, in_be32(&FIFO_5125(port)->txisr)); +} + +static void mpc5125_psc_write_char(struct uart_port *port, unsigned char c) +{ + out_8(&FIFO_5125(port)->txdata_8, c); +} + +static unsigned char mpc5125_psc_read_char(struct uart_port *port) +{ + return in_8(&FIFO_5125(port)->rxdata_8); +} + +static void mpc5125_psc_cw_disable_ints(struct uart_port *port) +{ + port->read_status_mask = + in_be32(&FIFO_5125(port)->tximr) << 16 | + in_be32(&FIFO_5125(port)->rximr); + out_be32(&FIFO_5125(port)->tximr, 0); + out_be32(&FIFO_5125(port)->rximr, 0); +} + +static void mpc5125_psc_cw_restore_ints(struct uart_port *port) +{ + out_be32(&FIFO_5125(port)->tximr, + (port->read_status_mask >> 16) & 0x7f); + out_be32(&FIFO_5125(port)->rximr, port->read_status_mask & 0x7f); +} + +static inline void mpc5125_set_divisor(struct mpc5125_psc __iomem *psc, + u8 prescaler, unsigned int divisor) +{ + /* select prescaler */ + out_8(&psc->mpc52xx_psc_clock_select, prescaler); + out_8(&psc->ctur, divisor >> 8); + out_8(&psc->ctlr, divisor & 0xff); +} + +static unsigned int mpc5125_psc_set_baudrate(struct uart_port *port, + struct ktermios *new, + const struct ktermios *old) +{ + unsigned int baud; + unsigned int divisor; + + /* + * Calculate with a /16 prescaler here. + */ + + /* uartclk contains the ips freq */ + baud = uart_get_baud_rate(port, new, old, + port->uartclk / (16 * 0xffff) + 1, + port->uartclk / 16); + divisor = (port->uartclk + 8 * baud) / (16 * baud); + + /* enable the /16 prescaler and set the divisor */ + mpc5125_set_divisor(PSC_5125(port), 0xdd, divisor); + return baud; +} + +/* + * MPC5125 have compatible PSC FIFO Controller. + * Special init not needed. + */ +static u16 mpc5125_psc_get_status(struct uart_port *port) +{ + return in_be16(&PSC_5125(port)->mpc52xx_psc_status); +} + +static u8 mpc5125_psc_get_ipcr(struct uart_port *port) +{ + return in_8(&PSC_5125(port)->mpc52xx_psc_ipcr); +} + +static void mpc5125_psc_command(struct uart_port *port, u8 cmd) +{ + out_8(&PSC_5125(port)->command, cmd); +} + +static void mpc5125_psc_set_mode(struct uart_port *port, u8 mr1, u8 mr2) +{ + out_8(&PSC_5125(port)->mr1, mr1); + out_8(&PSC_5125(port)->mr2, mr2); +} + +static void mpc5125_psc_set_rts(struct uart_port *port, int state) +{ + if (state & TIOCM_RTS) + out_8(&PSC_5125(port)->op1, MPC52xx_PSC_OP_RTS); + else + out_8(&PSC_5125(port)->op0, MPC52xx_PSC_OP_RTS); +} + +static void mpc5125_psc_enable_ms(struct uart_port *port) +{ + struct mpc5125_psc __iomem *psc = PSC_5125(port); + + /* clear D_*-bits by reading them */ + in_8(&psc->mpc52xx_psc_ipcr); + /* enable CTS and DCD as IPC interrupts */ + out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD); + + port->read_status_mask |= MPC52xx_PSC_IMR_IPC; + out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); +} + +static void mpc5125_psc_set_sicr(struct uart_port *port, u32 val) +{ + out_be32(&PSC_5125(port)->sicr, val); +} + +static void mpc5125_psc_set_imr(struct uart_port *port, u16 val) +{ + out_be16(&PSC_5125(port)->mpc52xx_psc_imr, val); +} + +static u8 mpc5125_psc_get_mr1(struct uart_port *port) +{ + return in_8(&PSC_5125(port)->mr1); +} + +static const struct psc_ops mpc5125_psc_ops = { + .fifo_init = mpc5125_psc_fifo_init, + .raw_rx_rdy = mpc5125_psc_raw_rx_rdy, + .raw_tx_rdy = mpc5125_psc_raw_tx_rdy, + .rx_rdy = mpc5125_psc_rx_rdy, + .tx_rdy = mpc5125_psc_tx_rdy, + .tx_empty = mpc5125_psc_tx_empty, + .stop_rx = mpc5125_psc_stop_rx, + .start_tx = mpc5125_psc_start_tx, + .stop_tx = mpc5125_psc_stop_tx, + .rx_clr_irq = mpc5125_psc_rx_clr_irq, + .tx_clr_irq = mpc5125_psc_tx_clr_irq, + .write_char = mpc5125_psc_write_char, + .read_char = mpc5125_psc_read_char, + .cw_disable_ints = mpc5125_psc_cw_disable_ints, + .cw_restore_ints = mpc5125_psc_cw_restore_ints, + .set_baudrate = mpc5125_psc_set_baudrate, + .clock_alloc = mpc512x_psc_alloc_clock, + .clock_relse = mpc512x_psc_relse_clock, + .clock = mpc512x_psc_endis_clock, + .fifoc_init = mpc512x_psc_fifoc_init, + .fifoc_uninit = mpc512x_psc_fifoc_uninit, + .get_irq = mpc512x_psc_get_irq, + .handle_irq = mpc512x_psc_handle_irq, + .get_status = mpc5125_psc_get_status, + .get_ipcr = mpc5125_psc_get_ipcr, + .command = mpc5125_psc_command, + .set_mode = mpc5125_psc_set_mode, + .set_rts = mpc5125_psc_set_rts, + .enable_ms = mpc5125_psc_enable_ms, + .set_sicr = mpc5125_psc_set_sicr, + .set_imr = mpc5125_psc_set_imr, + .get_mr1 = mpc5125_psc_get_mr1, +}; + +static const struct psc_ops mpc512x_psc_ops = { + .fifo_init = mpc512x_psc_fifo_init, + .raw_rx_rdy = mpc512x_psc_raw_rx_rdy, + .raw_tx_rdy = mpc512x_psc_raw_tx_rdy, + .rx_rdy = mpc512x_psc_rx_rdy, + .tx_rdy = mpc512x_psc_tx_rdy, + .tx_empty = mpc512x_psc_tx_empty, + .stop_rx = mpc512x_psc_stop_rx, + .start_tx = mpc512x_psc_start_tx, + .stop_tx = mpc512x_psc_stop_tx, + .rx_clr_irq = mpc512x_psc_rx_clr_irq, + .tx_clr_irq = mpc512x_psc_tx_clr_irq, + .write_char = mpc512x_psc_write_char, + .read_char = mpc512x_psc_read_char, + .cw_disable_ints = mpc512x_psc_cw_disable_ints, + .cw_restore_ints = mpc512x_psc_cw_restore_ints, + .set_baudrate = mpc512x_psc_set_baudrate, + .clock_alloc = mpc512x_psc_alloc_clock, + .clock_relse = mpc512x_psc_relse_clock, + .clock = mpc512x_psc_endis_clock, + .fifoc_init = mpc512x_psc_fifoc_init, + .fifoc_uninit = mpc512x_psc_fifoc_uninit, + .get_irq = mpc512x_psc_get_irq, + .handle_irq = mpc512x_psc_handle_irq, + .get_status = mpc52xx_psc_get_status, + .get_ipcr = mpc52xx_psc_get_ipcr, + .command = mpc52xx_psc_command, + .set_mode = mpc52xx_psc_set_mode, + .set_rts = mpc52xx_psc_set_rts, + .enable_ms = mpc52xx_psc_enable_ms, + .set_sicr = mpc52xx_psc_set_sicr, + .set_imr = mpc52xx_psc_set_imr, + .get_mr1 = mpc52xx_psc_get_mr1, +}; +#endif /* CONFIG_PPC_MPC512x */ + + +static const struct psc_ops *psc_ops; + +/* ======================================================================== */ +/* UART operations */ +/* ======================================================================== */ + +static unsigned int +mpc52xx_uart_tx_empty(struct uart_port *port) +{ + return psc_ops->tx_empty(port) ? TIOCSER_TEMT : 0; +} + +static void +mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + psc_ops->set_rts(port, mctrl & TIOCM_RTS); +} + +static unsigned int +mpc52xx_uart_get_mctrl(struct uart_port *port) +{ + unsigned int ret = TIOCM_DSR; + u8 status = psc_ops->get_ipcr(port); + + if (!(status & MPC52xx_PSC_CTS)) + ret |= TIOCM_CTS; + if (!(status & MPC52xx_PSC_DCD)) + ret |= TIOCM_CAR; + + return ret; +} + +static void +mpc52xx_uart_stop_tx(struct uart_port *port) +{ + /* port->lock taken by caller */ + psc_ops->stop_tx(port); +} + +static void +mpc52xx_uart_start_tx(struct uart_port *port) +{ + /* port->lock taken by caller */ + psc_ops->start_tx(port); +} + +static void +mpc52xx_uart_stop_rx(struct uart_port *port) +{ + /* port->lock taken by caller */ + psc_ops->stop_rx(port); +} + +static void +mpc52xx_uart_enable_ms(struct uart_port *port) +{ + psc_ops->enable_ms(port); +} + +static void +mpc52xx_uart_break_ctl(struct uart_port *port, int ctl) +{ + unsigned long flags; + spin_lock_irqsave(&port->lock, flags); + + if (ctl == -1) + psc_ops->command(port, MPC52xx_PSC_START_BRK); + else + psc_ops->command(port, MPC52xx_PSC_STOP_BRK); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static int +mpc52xx_uart_startup(struct uart_port *port) +{ + int ret; + + if (psc_ops->clock) { + ret = psc_ops->clock(port, 1); + if (ret) + return ret; + } + + /* Request IRQ */ + ret = request_irq(port->irq, mpc52xx_uart_int, + port->irqflags, "mpc52xx_psc_uart", port); + if (ret) + return ret; + + /* Reset/activate the port, clear and enable interrupts */ + psc_ops->command(port, MPC52xx_PSC_RST_RX); + psc_ops->command(port, MPC52xx_PSC_RST_TX); + + /* + * According to Freescale's support the RST_TX command can produce a + * spike on the TX pin. So they recommend to delay "for one character". + * One millisecond should be enough for everyone. + */ + msleep(1); + + psc_ops->set_sicr(port, 0); /* UART mode DCD ignored */ + + psc_ops->fifo_init(port); + + psc_ops->command(port, MPC52xx_PSC_TX_ENABLE); + psc_ops->command(port, MPC52xx_PSC_RX_ENABLE); + + return 0; +} + +static void +mpc52xx_uart_shutdown(struct uart_port *port) +{ + /* Shut down the port. Leave TX active if on a console port */ + psc_ops->command(port, MPC52xx_PSC_RST_RX); + if (!uart_console(port)) + psc_ops->command(port, MPC52xx_PSC_RST_TX); + + port->read_status_mask = 0; + psc_ops->set_imr(port, port->read_status_mask); + + if (psc_ops->clock) + psc_ops->clock(port, 0); + + /* Disable interrupt */ + psc_ops->cw_disable_ints(port); + + /* Release interrupt */ + free_irq(port->irq, port); +} + +static void +mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new, + const struct ktermios *old) +{ + unsigned long flags; + unsigned char mr1, mr2; + unsigned int j; + unsigned int baud; + + /* Prepare what we're gonna write */ + mr1 = 0; + + switch (new->c_cflag & CSIZE) { + case CS5: mr1 |= MPC52xx_PSC_MODE_5_BITS; + break; + case CS6: mr1 |= MPC52xx_PSC_MODE_6_BITS; + break; + case CS7: mr1 |= MPC52xx_PSC_MODE_7_BITS; + break; + case CS8: + default: mr1 |= MPC52xx_PSC_MODE_8_BITS; + } + + if (new->c_cflag & PARENB) { + if (new->c_cflag & CMSPAR) + mr1 |= MPC52xx_PSC_MODE_PARFORCE; + + /* With CMSPAR, PARODD also means high parity (same as termios) */ + mr1 |= (new->c_cflag & PARODD) ? + MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN; + } else { + mr1 |= MPC52xx_PSC_MODE_PARNONE; + } + + mr2 = 0; + + if (new->c_cflag & CSTOPB) + mr2 |= MPC52xx_PSC_MODE_TWO_STOP; + else + mr2 |= ((new->c_cflag & CSIZE) == CS5) ? + MPC52xx_PSC_MODE_ONE_STOP_5_BITS : + MPC52xx_PSC_MODE_ONE_STOP; + + if (new->c_cflag & CRTSCTS) { + mr1 |= MPC52xx_PSC_MODE_RXRTS; + mr2 |= MPC52xx_PSC_MODE_TXCTS; + } + + /* Get the lock */ + spin_lock_irqsave(&port->lock, flags); + + /* Do our best to flush TX & RX, so we don't lose anything */ + /* But we don't wait indefinitely ! */ + j = 5000000; /* Maximum wait */ + /* FIXME Can't receive chars since set_termios might be called at early + * boot for the console, all stuff is not yet ready to receive at that + * time and that just makes the kernel oops */ + /* while (j-- && mpc52xx_uart_int_rx_chars(port)); */ + while (!mpc52xx_uart_tx_empty(port) && --j) + udelay(1); + + if (!j) + printk(KERN_ERR "mpc52xx_uart.c: " + "Unable to flush RX & TX fifos in-time in set_termios." + "Some chars may have been lost.\n"); + + /* Reset the TX & RX */ + psc_ops->command(port, MPC52xx_PSC_RST_RX); + psc_ops->command(port, MPC52xx_PSC_RST_TX); + + /* Send new mode settings */ + psc_ops->set_mode(port, mr1, mr2); + baud = psc_ops->set_baudrate(port, new, old); + + /* Update the per-port timeout */ + uart_update_timeout(port, new->c_cflag, baud); + + if (UART_ENABLE_MS(port, new->c_cflag)) + mpc52xx_uart_enable_ms(port); + + /* Reenable TX & RX */ + psc_ops->command(port, MPC52xx_PSC_TX_ENABLE); + psc_ops->command(port, MPC52xx_PSC_RX_ENABLE); + + /* We're all set, release the lock */ + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char * +mpc52xx_uart_type(struct uart_port *port) +{ + /* + * We keep using PORT_MPC52xx for historic reasons although it applies + * for MPC512x, too, but print "MPC5xxx" to not irritate users + */ + return port->type == PORT_MPC52xx ? "MPC5xxx PSC" : NULL; +} + +static void +mpc52xx_uart_release_port(struct uart_port *port) +{ + if (psc_ops->clock_relse) + psc_ops->clock_relse(port); + + /* remapped by us ? */ + if (port->flags & UPF_IOREMAP) { + iounmap(port->membase); + port->membase = NULL; + } + + release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc)); +} + +static int +mpc52xx_uart_request_port(struct uart_port *port) +{ + int err; + + if (port->flags & UPF_IOREMAP) /* Need to remap ? */ + port->membase = ioremap(port->mapbase, + sizeof(struct mpc52xx_psc)); + + if (!port->membase) + return -EINVAL; + + err = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc), + "mpc52xx_psc_uart") != NULL ? 0 : -EBUSY; + + if (err) + goto out_membase; + + if (psc_ops->clock_alloc) { + err = psc_ops->clock_alloc(port); + if (err) + goto out_mapregion; + } + + return 0; + +out_mapregion: + release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc)); +out_membase: + if (port->flags & UPF_IOREMAP) { + iounmap(port->membase); + port->membase = NULL; + } + return err; +} + +static void +mpc52xx_uart_config_port(struct uart_port *port, int flags) +{ + if ((flags & UART_CONFIG_TYPE) + && (mpc52xx_uart_request_port(port) == 0)) + port->type = PORT_MPC52xx; +} + +static int +mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPC52xx) + return -EINVAL; + + if ((ser->irq != port->irq) || + (ser->io_type != UPIO_MEM) || + (ser->baud_base != port->uartclk) || + (ser->iomem_base != (void *)port->mapbase) || + (ser->hub6 != 0)) + return -EINVAL; + + return 0; +} + + +static const struct uart_ops mpc52xx_uart_ops = { + .tx_empty = mpc52xx_uart_tx_empty, + .set_mctrl = mpc52xx_uart_set_mctrl, + .get_mctrl = mpc52xx_uart_get_mctrl, + .stop_tx = mpc52xx_uart_stop_tx, + .start_tx = mpc52xx_uart_start_tx, + .stop_rx = mpc52xx_uart_stop_rx, + .enable_ms = mpc52xx_uart_enable_ms, + .break_ctl = mpc52xx_uart_break_ctl, + .startup = mpc52xx_uart_startup, + .shutdown = mpc52xx_uart_shutdown, + .set_termios = mpc52xx_uart_set_termios, +/* .pm = mpc52xx_uart_pm, Not supported yet */ + .type = mpc52xx_uart_type, + .release_port = mpc52xx_uart_release_port, + .request_port = mpc52xx_uart_request_port, + .config_port = mpc52xx_uart_config_port, + .verify_port = mpc52xx_uart_verify_port +}; + + +/* ======================================================================== */ +/* Interrupt handling */ +/* ======================================================================== */ + +static inline bool +mpc52xx_uart_int_rx_chars(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + unsigned char ch, flag; + unsigned short status; + + /* While we can read, do so ! */ + while (psc_ops->raw_rx_rdy(port)) { + /* Get the char */ + ch = psc_ops->read_char(port); + + /* Handle sysreq char */ + if (uart_handle_sysrq_char(port, ch)) + continue; + + /* Store it */ + + flag = TTY_NORMAL; + port->icount.rx++; + + status = psc_ops->get_status(port); + + if (status & (MPC52xx_PSC_SR_PE | + MPC52xx_PSC_SR_FE | + MPC52xx_PSC_SR_RB)) { + + if (status & MPC52xx_PSC_SR_RB) { + flag = TTY_BREAK; + uart_handle_break(port); + port->icount.brk++; + } else if (status & MPC52xx_PSC_SR_PE) { + flag = TTY_PARITY; + port->icount.parity++; + } + else if (status & MPC52xx_PSC_SR_FE) { + flag = TTY_FRAME; + port->icount.frame++; + } + + /* Clear error condition */ + psc_ops->command(port, MPC52xx_PSC_RST_ERR_STAT); + + } + tty_insert_flip_char(tport, ch, flag); + if (status & MPC52xx_PSC_SR_OE) { + /* + * Overrun is special, since it's + * reported immediately, and doesn't + * affect the current character + */ + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + port->icount.overrun++; + } + } + + tty_flip_buffer_push(tport); + + return psc_ops->raw_rx_rdy(port); +} + +static inline bool +mpc52xx_uart_int_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + + /* Process out of band chars */ + if (port->x_char) { + psc_ops->write_char(port, port->x_char); + port->icount.tx++; + port->x_char = 0; + return true; + } + + /* Nothing to do ? */ + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + mpc52xx_uart_stop_tx(port); + return false; + } + + /* Send chars */ + while (psc_ops->raw_tx_rdy(port)) { + psc_ops->write_char(port, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } + + /* Wake up */ + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + /* Maybe we're done after all */ + if (uart_circ_empty(xmit)) { + mpc52xx_uart_stop_tx(port); + return false; + } + + return true; +} + +static irqreturn_t +mpc5xxx_uart_process_int(struct uart_port *port) +{ + unsigned long pass = ISR_PASS_LIMIT; + bool keepgoing; + u8 status; + + /* While we have stuff to do, we continue */ + do { + /* If we don't find anything to do, we stop */ + keepgoing = false; + + psc_ops->rx_clr_irq(port); + if (psc_ops->rx_rdy(port)) + keepgoing |= mpc52xx_uart_int_rx_chars(port); + + psc_ops->tx_clr_irq(port); + if (psc_ops->tx_rdy(port)) + keepgoing |= mpc52xx_uart_int_tx_chars(port); + + status = psc_ops->get_ipcr(port); + if (status & MPC52xx_PSC_D_DCD) + uart_handle_dcd_change(port, !(status & MPC52xx_PSC_DCD)); + + if (status & MPC52xx_PSC_D_CTS) + uart_handle_cts_change(port, !(status & MPC52xx_PSC_CTS)); + + /* Limit number of iteration */ + if (!(--pass)) + keepgoing = false; + + } while (keepgoing); + + return IRQ_HANDLED; +} + +static irqreturn_t +mpc52xx_uart_int(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + irqreturn_t ret; + + spin_lock(&port->lock); + + ret = psc_ops->handle_irq(port); + + spin_unlock(&port->lock); + + return ret; +} + +/* ======================================================================== */ +/* Console ( if applicable ) */ +/* ======================================================================== */ + +#ifdef CONFIG_SERIAL_MPC52xx_CONSOLE + +static void __init +mpc52xx_console_get_options(struct uart_port *port, + int *baud, int *parity, int *bits, int *flow) +{ + unsigned char mr1; + + pr_debug("mpc52xx_console_get_options(port=%p)\n", port); + + /* Read the mode registers */ + mr1 = psc_ops->get_mr1(port); + + /* CT{U,L}R are write-only ! */ + *baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD; + + /* Parse them */ + switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) { + case MPC52xx_PSC_MODE_5_BITS: + *bits = 5; + break; + case MPC52xx_PSC_MODE_6_BITS: + *bits = 6; + break; + case MPC52xx_PSC_MODE_7_BITS: + *bits = 7; + break; + case MPC52xx_PSC_MODE_8_BITS: + default: + *bits = 8; + } + + if (mr1 & MPC52xx_PSC_MODE_PARNONE) + *parity = 'n'; + else + *parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e'; +} + +static void +mpc52xx_console_write(struct console *co, const char *s, unsigned int count) +{ + struct uart_port *port = &mpc52xx_uart_ports[co->index]; + unsigned int i, j; + + /* Disable interrupts */ + psc_ops->cw_disable_ints(port); + + /* Wait the TX buffer to be empty */ + j = 5000000; /* Maximum wait */ + while (!mpc52xx_uart_tx_empty(port) && --j) + udelay(1); + + /* Write all the chars */ + for (i = 0; i < count; i++, s++) { + /* Line return handling */ + if (*s == '\n') + psc_ops->write_char(port, '\r'); + + /* Send the char */ + psc_ops->write_char(port, *s); + + /* Wait the TX buffer to be empty */ + j = 20000; /* Maximum wait */ + while (!mpc52xx_uart_tx_empty(port) && --j) + udelay(1); + } + + /* Restore interrupt state */ + psc_ops->cw_restore_ints(port); +} + + +static int __init +mpc52xx_console_setup(struct console *co, char *options) +{ + struct uart_port *port = &mpc52xx_uart_ports[co->index]; + struct device_node *np = mpc52xx_uart_nodes[co->index]; + unsigned int uartclk; + struct resource res; + int ret; + + int baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n", + co, co->index, options); + + if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) { + pr_debug("PSC%x out of range\n", co->index); + return -EINVAL; + } + + if (!np) { + pr_debug("PSC%x not found in device tree\n", co->index); + return -EINVAL; + } + + pr_debug("Console on ttyPSC%x is %pOF\n", + co->index, mpc52xx_uart_nodes[co->index]); + + /* Fetch register locations */ + ret = of_address_to_resource(np, 0, &res); + if (ret) { + pr_debug("Could not get resources for PSC%x\n", co->index); + return ret; + } + + uartclk = mpc5xxx_fwnode_get_bus_frequency(of_fwnode_handle(np)); + if (uartclk == 0) { + pr_debug("Could not find uart clock frequency!\n"); + return -EINVAL; + } + + /* Basic port init. Needed since we use some uart_??? func before + * real init for early access */ + spin_lock_init(&port->lock); + port->uartclk = uartclk; + port->ops = &mpc52xx_uart_ops; + port->mapbase = res.start; + port->membase = ioremap(res.start, sizeof(struct mpc52xx_psc)); + port->irq = irq_of_parse_and_map(np, 0); + + if (port->membase == NULL) + return -EINVAL; + + pr_debug("mpc52xx-psc uart at %p, mapped to %p, irq=%x, freq=%i\n", + (void *)port->mapbase, port->membase, + port->irq, port->uartclk); + + /* Setup the port parameters accoding to options */ + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow); + + pr_debug("Setting console parameters: %i %i%c1 flow=%c\n", + baud, bits, parity, flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + + +static struct uart_driver mpc52xx_uart_driver; + +static struct console mpc52xx_console = { + .name = "ttyPSC", + .write = mpc52xx_console_write, + .device = uart_console_device, + .setup = mpc52xx_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, /* Specified on the cmdline (e.g. console=ttyPSC0) */ + .data = &mpc52xx_uart_driver, +}; + + +static int __init +mpc52xx_console_init(void) +{ + mpc52xx_uart_of_enumerate(); + register_console(&mpc52xx_console); + return 0; +} + +console_initcall(mpc52xx_console_init); + +#define MPC52xx_PSC_CONSOLE &mpc52xx_console +#else +#define MPC52xx_PSC_CONSOLE NULL +#endif + + +/* ======================================================================== */ +/* UART Driver */ +/* ======================================================================== */ + +static struct uart_driver mpc52xx_uart_driver = { + .driver_name = "mpc52xx_psc_uart", + .dev_name = "ttyPSC", + .major = SERIAL_PSC_MAJOR, + .minor = SERIAL_PSC_MINOR, + .nr = MPC52xx_PSC_MAXNUM, + .cons = MPC52xx_PSC_CONSOLE, +}; + +/* ======================================================================== */ +/* OF Platform Driver */ +/* ======================================================================== */ + +static const struct of_device_id mpc52xx_uart_of_match[] = { +#ifdef CONFIG_PPC_MPC52xx + { .compatible = "fsl,mpc5200b-psc-uart", .data = &mpc5200b_psc_ops, }, + { .compatible = "fsl,mpc5200-psc-uart", .data = &mpc52xx_psc_ops, }, + /* binding used by old lite5200 device trees: */ + { .compatible = "mpc5200-psc-uart", .data = &mpc52xx_psc_ops, }, + /* binding used by efika: */ + { .compatible = "mpc5200-serial", .data = &mpc52xx_psc_ops, }, +#endif +#ifdef CONFIG_PPC_MPC512x + { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, }, + { .compatible = "fsl,mpc5125-psc-uart", .data = &mpc5125_psc_ops, }, +#endif + {}, +}; + +static int mpc52xx_uart_of_probe(struct platform_device *op) +{ + int idx = -1; + unsigned int uartclk; + struct uart_port *port = NULL; + struct resource res; + int ret; + + /* Check validity & presence */ + for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++) + if (mpc52xx_uart_nodes[idx] == op->dev.of_node) + break; + if (idx >= MPC52xx_PSC_MAXNUM) + return -EINVAL; + pr_debug("Found %pOF assigned to ttyPSC%x\n", + mpc52xx_uart_nodes[idx], idx); + + /* set the uart clock to the input clock of the psc, the different + * prescalers are taken into account in the set_baudrate() methods + * of the respective chip */ + uartclk = mpc5xxx_get_bus_frequency(&op->dev); + if (uartclk == 0) { + dev_dbg(&op->dev, "Could not find uart clock frequency!\n"); + return -EINVAL; + } + + /* Init the port structure */ + port = &mpc52xx_uart_ports[idx]; + + spin_lock_init(&port->lock); + port->uartclk = uartclk; + port->fifosize = 512; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MPC52xx_CONSOLE); + port->iotype = UPIO_MEM; + port->flags = UPF_BOOT_AUTOCONF | + (uart_console(port) ? 0 : UPF_IOREMAP); + port->line = idx; + port->ops = &mpc52xx_uart_ops; + port->dev = &op->dev; + + /* Search for IRQ and mapbase */ + ret = of_address_to_resource(op->dev.of_node, 0, &res); + if (ret) + return ret; + + port->mapbase = res.start; + if (!port->mapbase) { + dev_dbg(&op->dev, "Could not allocate resources for PSC\n"); + return -EINVAL; + } + + psc_ops->get_irq(port, op->dev.of_node); + if (port->irq == 0) { + dev_dbg(&op->dev, "Could not get irq\n"); + return -EINVAL; + } + + dev_dbg(&op->dev, "mpc52xx-psc uart at %p, irq=%x, freq=%i\n", + (void *)port->mapbase, port->irq, port->uartclk); + + /* Add the port to the uart sub-system */ + ret = uart_add_one_port(&mpc52xx_uart_driver, port); + if (ret) + return ret; + + platform_set_drvdata(op, (void *)port); + return 0; +} + +static int +mpc52xx_uart_of_remove(struct platform_device *op) +{ + struct uart_port *port = platform_get_drvdata(op); + + if (port) + uart_remove_one_port(&mpc52xx_uart_driver, port); + + return 0; +} + +#ifdef CONFIG_PM +static int +mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state) +{ + struct uart_port *port = platform_get_drvdata(op); + + if (port) + uart_suspend_port(&mpc52xx_uart_driver, port); + + return 0; +} + +static int +mpc52xx_uart_of_resume(struct platform_device *op) +{ + struct uart_port *port = platform_get_drvdata(op); + + if (port) + uart_resume_port(&mpc52xx_uart_driver, port); + + return 0; +} +#endif + +static void +mpc52xx_uart_of_assign(struct device_node *np) +{ + int i; + + /* Find the first free PSC number */ + for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) { + if (mpc52xx_uart_nodes[i] == NULL) { + of_node_get(np); + mpc52xx_uart_nodes[i] = np; + return; + } + } +} + +static void +mpc52xx_uart_of_enumerate(void) +{ + static int enum_done; + struct device_node *np; + const struct of_device_id *match; + int i; + + if (enum_done) + return; + + /* Assign index to each PSC in device tree */ + for_each_matching_node(np, mpc52xx_uart_of_match) { + match = of_match_node(mpc52xx_uart_of_match, np); + psc_ops = match->data; + mpc52xx_uart_of_assign(np); + } + + enum_done = 1; + + for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) { + if (mpc52xx_uart_nodes[i]) + pr_debug("%pOF assigned to ttyPSC%x\n", + mpc52xx_uart_nodes[i], i); + } +} + +MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match); + +static struct platform_driver mpc52xx_uart_of_driver = { + .probe = mpc52xx_uart_of_probe, + .remove = mpc52xx_uart_of_remove, +#ifdef CONFIG_PM + .suspend = mpc52xx_uart_of_suspend, + .resume = mpc52xx_uart_of_resume, +#endif + .driver = { + .name = "mpc52xx-psc-uart", + .of_match_table = mpc52xx_uart_of_match, + }, +}; + + +/* ======================================================================== */ +/* Module */ +/* ======================================================================== */ + +static int __init +mpc52xx_uart_init(void) +{ + int ret; + + printk(KERN_INFO "Serial: MPC52xx PSC UART driver\n"); + + ret = uart_register_driver(&mpc52xx_uart_driver); + if (ret) { + printk(KERN_ERR "%s: uart_register_driver failed (%i)\n", + __FILE__, ret); + return ret; + } + + mpc52xx_uart_of_enumerate(); + + /* + * Map the PSC FIFO Controller and init if on MPC512x. + */ + if (psc_ops && psc_ops->fifoc_init) { + ret = psc_ops->fifoc_init(); + if (ret) + goto err_init; + } + + ret = platform_driver_register(&mpc52xx_uart_of_driver); + if (ret) { + printk(KERN_ERR "%s: platform_driver_register failed (%i)\n", + __FILE__, ret); + goto err_reg; + } + + return 0; +err_reg: + if (psc_ops && psc_ops->fifoc_uninit) + psc_ops->fifoc_uninit(); +err_init: + uart_unregister_driver(&mpc52xx_uart_driver); + return ret; +} + +static void __exit +mpc52xx_uart_exit(void) +{ + if (psc_ops->fifoc_uninit) + psc_ops->fifoc_uninit(); + + platform_driver_unregister(&mpc52xx_uart_of_driver); + uart_unregister_driver(&mpc52xx_uart_driver); +} + + +module_init(mpc52xx_uart_init); +module_exit(mpc52xx_uart_exit); + +MODULE_AUTHOR("Sylvain Munaut "); +MODULE_DESCRIPTION("Freescale MPC52xx PSC UART"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c new file mode 100644 index 000000000..2e3e6cf16 --- /dev/null +++ b/drivers/tty/serial/mps2-uart.c @@ -0,0 +1,655 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MPS2 UART driver + * + * Copyright (C) 2015 ARM Limited + * + * Author: Vladimir Murzin + * + * TODO: support for SysRq + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SERIAL_NAME "ttyMPS" +#define DRIVER_NAME "mps2-uart" +#define MAKE_NAME(x) (DRIVER_NAME # x) + +#define UARTn_DATA 0x00 + +#define UARTn_STATE 0x04 +#define UARTn_STATE_TX_FULL BIT(0) +#define UARTn_STATE_RX_FULL BIT(1) +#define UARTn_STATE_TX_OVERRUN BIT(2) +#define UARTn_STATE_RX_OVERRUN BIT(3) + +#define UARTn_CTRL 0x08 +#define UARTn_CTRL_TX_ENABLE BIT(0) +#define UARTn_CTRL_RX_ENABLE BIT(1) +#define UARTn_CTRL_TX_INT_ENABLE BIT(2) +#define UARTn_CTRL_RX_INT_ENABLE BIT(3) +#define UARTn_CTRL_TX_OVERRUN_INT_ENABLE BIT(4) +#define UARTn_CTRL_RX_OVERRUN_INT_ENABLE BIT(5) + +#define UARTn_INT 0x0c +#define UARTn_INT_TX BIT(0) +#define UARTn_INT_RX BIT(1) +#define UARTn_INT_TX_OVERRUN BIT(2) +#define UARTn_INT_RX_OVERRUN BIT(3) + +#define UARTn_BAUDDIV 0x10 +#define UARTn_BAUDDIV_MASK GENMASK(20, 0) + +/* + * Helpers to make typical enable/disable operations more readable. + */ +#define UARTn_CTRL_TX_GRP (UARTn_CTRL_TX_ENABLE |\ + UARTn_CTRL_TX_INT_ENABLE |\ + UARTn_CTRL_TX_OVERRUN_INT_ENABLE) + +#define UARTn_CTRL_RX_GRP (UARTn_CTRL_RX_ENABLE |\ + UARTn_CTRL_RX_INT_ENABLE |\ + UARTn_CTRL_RX_OVERRUN_INT_ENABLE) + +#define MPS2_MAX_PORTS 3 + +#define UART_PORT_COMBINED_IRQ BIT(0) + +struct mps2_uart_port { + struct uart_port port; + struct clk *clk; + unsigned int tx_irq; + unsigned int rx_irq; + unsigned int flags; +}; + +static inline struct mps2_uart_port *to_mps2_port(struct uart_port *port) +{ + return container_of(port, struct mps2_uart_port, port); +} + +static void mps2_uart_write8(struct uart_port *port, u8 val, unsigned int off) +{ + struct mps2_uart_port *mps_port = to_mps2_port(port); + + writeb(val, mps_port->port.membase + off); +} + +static u8 mps2_uart_read8(struct uart_port *port, unsigned int off) +{ + struct mps2_uart_port *mps_port = to_mps2_port(port); + + return readb(mps_port->port.membase + off); +} + +static void mps2_uart_write32(struct uart_port *port, u32 val, unsigned int off) +{ + struct mps2_uart_port *mps_port = to_mps2_port(port); + + writel_relaxed(val, mps_port->port.membase + off); +} + +static unsigned int mps2_uart_tx_empty(struct uart_port *port) +{ + u8 status = mps2_uart_read8(port, UARTn_STATE); + + return (status & UARTn_STATE_TX_FULL) ? 0 : TIOCSER_TEMT; +} + +static void mps2_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static unsigned int mps2_uart_get_mctrl(struct uart_port *port) +{ + return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR; +} + +static void mps2_uart_stop_tx(struct uart_port *port) +{ + u8 control = mps2_uart_read8(port, UARTn_CTRL); + + control &= ~UARTn_CTRL_TX_INT_ENABLE; + + mps2_uart_write8(port, control, UARTn_CTRL); +} + +static void mps2_uart_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + + while (!(mps2_uart_read8(port, UARTn_STATE) & UARTn_STATE_TX_FULL)) { + if (port->x_char) { + mps2_uart_write8(port, port->x_char, UARTn_DATA); + port->x_char = 0; + port->icount.tx++; + continue; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + break; + + mps2_uart_write8(port, xmit->buf[xmit->tail], UARTn_DATA); + xmit->tail = (xmit->tail + 1) % UART_XMIT_SIZE; + port->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + mps2_uart_stop_tx(port); +} + +static void mps2_uart_start_tx(struct uart_port *port) +{ + u8 control = mps2_uart_read8(port, UARTn_CTRL); + + control |= UARTn_CTRL_TX_INT_ENABLE; + + mps2_uart_write8(port, control, UARTn_CTRL); + + /* + * We've just unmasked the TX IRQ and now slow-starting via + * polling; if there is enough data to fill up the internal + * write buffer in one go, the TX IRQ should assert, at which + * point we switch to fully interrupt-driven TX. + */ + + mps2_uart_tx_chars(port); +} + +static void mps2_uart_stop_rx(struct uart_port *port) +{ + u8 control = mps2_uart_read8(port, UARTn_CTRL); + + control &= ~UARTn_CTRL_RX_GRP; + + mps2_uart_write8(port, control, UARTn_CTRL); +} + +static void mps2_uart_break_ctl(struct uart_port *port, int ctl) +{ +} + +static void mps2_uart_rx_chars(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + + while (mps2_uart_read8(port, UARTn_STATE) & UARTn_STATE_RX_FULL) { + u8 rxdata = mps2_uart_read8(port, UARTn_DATA); + + port->icount.rx++; + tty_insert_flip_char(&port->state->port, rxdata, TTY_NORMAL); + } + + tty_flip_buffer_push(tport); +} + +static irqreturn_t mps2_uart_rxirq(int irq, void *data) +{ + struct uart_port *port = data; + u8 irqflag = mps2_uart_read8(port, UARTn_INT); + + if (unlikely(!(irqflag & UARTn_INT_RX))) + return IRQ_NONE; + + spin_lock(&port->lock); + + mps2_uart_write8(port, UARTn_INT_RX, UARTn_INT); + mps2_uart_rx_chars(port); + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static irqreturn_t mps2_uart_txirq(int irq, void *data) +{ + struct uart_port *port = data; + u8 irqflag = mps2_uart_read8(port, UARTn_INT); + + if (unlikely(!(irqflag & UARTn_INT_TX))) + return IRQ_NONE; + + spin_lock(&port->lock); + + mps2_uart_write8(port, UARTn_INT_TX, UARTn_INT); + mps2_uart_tx_chars(port); + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static irqreturn_t mps2_uart_oerrirq(int irq, void *data) +{ + irqreturn_t handled = IRQ_NONE; + struct uart_port *port = data; + u8 irqflag = mps2_uart_read8(port, UARTn_INT); + + spin_lock(&port->lock); + + if (irqflag & UARTn_INT_RX_OVERRUN) { + struct tty_port *tport = &port->state->port; + + mps2_uart_write8(port, UARTn_INT_RX_OVERRUN, UARTn_INT); + port->icount.overrun++; + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + tty_flip_buffer_push(tport); + handled = IRQ_HANDLED; + } + + /* + * It's never been seen in practice and it never *should* happen since + * we check if there is enough room in TX buffer before sending data. + * So we keep this check in case something suspicious has happened. + */ + if (irqflag & UARTn_INT_TX_OVERRUN) { + mps2_uart_write8(port, UARTn_INT_TX_OVERRUN, UARTn_INT); + handled = IRQ_HANDLED; + } + + spin_unlock(&port->lock); + + return handled; +} + +static irqreturn_t mps2_uart_combinedirq(int irq, void *data) +{ + if (mps2_uart_rxirq(irq, data) == IRQ_HANDLED) + return IRQ_HANDLED; + + if (mps2_uart_txirq(irq, data) == IRQ_HANDLED) + return IRQ_HANDLED; + + if (mps2_uart_oerrirq(irq, data) == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static int mps2_uart_startup(struct uart_port *port) +{ + struct mps2_uart_port *mps_port = to_mps2_port(port); + u8 control = mps2_uart_read8(port, UARTn_CTRL); + int ret; + + control &= ~(UARTn_CTRL_RX_GRP | UARTn_CTRL_TX_GRP); + + mps2_uart_write8(port, control, UARTn_CTRL); + + if (mps_port->flags & UART_PORT_COMBINED_IRQ) { + ret = request_irq(port->irq, mps2_uart_combinedirq, 0, + MAKE_NAME(-combined), mps_port); + + if (ret) { + dev_err(port->dev, "failed to register combinedirq (%d)\n", ret); + return ret; + } + } else { + ret = request_irq(port->irq, mps2_uart_oerrirq, IRQF_SHARED, + MAKE_NAME(-overrun), mps_port); + + if (ret) { + dev_err(port->dev, "failed to register oerrirq (%d)\n", ret); + return ret; + } + + ret = request_irq(mps_port->rx_irq, mps2_uart_rxirq, 0, + MAKE_NAME(-rx), mps_port); + if (ret) { + dev_err(port->dev, "failed to register rxirq (%d)\n", ret); + goto err_free_oerrirq; + } + + ret = request_irq(mps_port->tx_irq, mps2_uart_txirq, 0, + MAKE_NAME(-tx), mps_port); + if (ret) { + dev_err(port->dev, "failed to register txirq (%d)\n", ret); + goto err_free_rxirq; + } + + } + + control |= UARTn_CTRL_RX_GRP | UARTn_CTRL_TX_GRP; + + mps2_uart_write8(port, control, UARTn_CTRL); + + return 0; + +err_free_rxirq: + free_irq(mps_port->rx_irq, mps_port); +err_free_oerrirq: + free_irq(port->irq, mps_port); + + return ret; +} + +static void mps2_uart_shutdown(struct uart_port *port) +{ + struct mps2_uart_port *mps_port = to_mps2_port(port); + u8 control = mps2_uart_read8(port, UARTn_CTRL); + + control &= ~(UARTn_CTRL_RX_GRP | UARTn_CTRL_TX_GRP); + + mps2_uart_write8(port, control, UARTn_CTRL); + + if (!(mps_port->flags & UART_PORT_COMBINED_IRQ)) { + free_irq(mps_port->rx_irq, mps_port); + free_irq(mps_port->tx_irq, mps_port); + } + + free_irq(port->irq, mps_port); +} + +static void +mps2_uart_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned long flags; + unsigned int baud, bauddiv; + + termios->c_cflag &= ~(CRTSCTS | CMSPAR); + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + termios->c_cflag &= ~PARENB; + termios->c_cflag &= ~CSTOPB; + + baud = uart_get_baud_rate(port, termios, old, + DIV_ROUND_CLOSEST(port->uartclk, UARTn_BAUDDIV_MASK), + DIV_ROUND_CLOSEST(port->uartclk, 16)); + + bauddiv = DIV_ROUND_CLOSEST(port->uartclk, baud); + + spin_lock_irqsave(&port->lock, flags); + + uart_update_timeout(port, termios->c_cflag, baud); + mps2_uart_write32(port, bauddiv, UARTn_BAUDDIV); + + spin_unlock_irqrestore(&port->lock, flags); + + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} + +static const char *mps2_uart_type(struct uart_port *port) +{ + return (port->type == PORT_MPS2UART) ? DRIVER_NAME : NULL; +} + +static void mps2_uart_release_port(struct uart_port *port) +{ +} + +static int mps2_uart_request_port(struct uart_port *port) +{ + return 0; +} + +static void mps2_uart_config_port(struct uart_port *port, int type) +{ + if (type & UART_CONFIG_TYPE && !mps2_uart_request_port(port)) + port->type = PORT_MPS2UART; +} + +static int mps2_uart_verify_port(struct uart_port *port, struct serial_struct *serinfo) +{ + return -EINVAL; +} + +static const struct uart_ops mps2_uart_pops = { + .tx_empty = mps2_uart_tx_empty, + .set_mctrl = mps2_uart_set_mctrl, + .get_mctrl = mps2_uart_get_mctrl, + .stop_tx = mps2_uart_stop_tx, + .start_tx = mps2_uart_start_tx, + .stop_rx = mps2_uart_stop_rx, + .break_ctl = mps2_uart_break_ctl, + .startup = mps2_uart_startup, + .shutdown = mps2_uart_shutdown, + .set_termios = mps2_uart_set_termios, + .type = mps2_uart_type, + .release_port = mps2_uart_release_port, + .request_port = mps2_uart_request_port, + .config_port = mps2_uart_config_port, + .verify_port = mps2_uart_verify_port, +}; + +static DEFINE_IDR(ports_idr); + +#ifdef CONFIG_SERIAL_MPS2_UART_CONSOLE +static void mps2_uart_console_putchar(struct uart_port *port, unsigned char ch) +{ + while (mps2_uart_read8(port, UARTn_STATE) & UARTn_STATE_TX_FULL) + cpu_relax(); + + mps2_uart_write8(port, ch, UARTn_DATA); +} + +static void mps2_uart_console_write(struct console *co, const char *s, unsigned int cnt) +{ + struct mps2_uart_port *mps_port = idr_find(&ports_idr, co->index); + struct uart_port *port = &mps_port->port; + + uart_console_write(port, s, cnt, mps2_uart_console_putchar); +} + +static int mps2_uart_console_setup(struct console *co, char *options) +{ + struct mps2_uart_port *mps_port; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= MPS2_MAX_PORTS) + return -ENODEV; + + mps_port = idr_find(&ports_idr, co->index); + + if (!mps_port) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&mps_port->port, co, baud, parity, bits, flow); +} + +static struct uart_driver mps2_uart_driver; + +static struct console mps2_uart_console = { + .name = SERIAL_NAME, + .device = uart_console_device, + .write = mps2_uart_console_write, + .setup = mps2_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &mps2_uart_driver, +}; + +#define MPS2_SERIAL_CONSOLE (&mps2_uart_console) + +static void mps2_early_putchar(struct uart_port *port, unsigned char ch) +{ + while (readb(port->membase + UARTn_STATE) & UARTn_STATE_TX_FULL) + cpu_relax(); + + writeb((unsigned char)ch, port->membase + UARTn_DATA); +} + +static void mps2_early_write(struct console *con, const char *s, unsigned int n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, mps2_early_putchar); +} + +static int __init mps2_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = mps2_early_write; + + return 0; +} + +OF_EARLYCON_DECLARE(mps2, "arm,mps2-uart", mps2_early_console_setup); + +#else +#define MPS2_SERIAL_CONSOLE NULL +#endif + +static struct uart_driver mps2_uart_driver = { + .driver_name = DRIVER_NAME, + .dev_name = SERIAL_NAME, + .nr = MPS2_MAX_PORTS, + .cons = MPS2_SERIAL_CONSOLE, +}; + +static int mps2_of_get_port(struct platform_device *pdev, + struct mps2_uart_port *mps_port) +{ + struct device_node *np = pdev->dev.of_node; + int id; + + if (!np) + return -ENODEV; + + id = of_alias_get_id(np, "serial"); + + if (id < 0) + id = idr_alloc_cyclic(&ports_idr, (void *)mps_port, 0, MPS2_MAX_PORTS, GFP_KERNEL); + else + id = idr_alloc(&ports_idr, (void *)mps_port, id, MPS2_MAX_PORTS, GFP_KERNEL); + + if (id < 0) + return id; + + /* Only combined irq is presesnt */ + if (platform_irq_count(pdev) == 1) + mps_port->flags |= UART_PORT_COMBINED_IRQ; + + mps_port->port.line = id; + + return 0; +} + +static int mps2_init_port(struct platform_device *pdev, + struct mps2_uart_port *mps_port) +{ + struct resource *res; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mps_port->port.membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mps_port->port.membase)) + return PTR_ERR(mps_port->port.membase); + + mps_port->port.mapbase = res->start; + mps_port->port.mapsize = resource_size(res); + mps_port->port.iotype = UPIO_MEM; + mps_port->port.flags = UPF_BOOT_AUTOCONF; + mps_port->port.fifosize = 1; + mps_port->port.ops = &mps2_uart_pops; + mps_port->port.dev = &pdev->dev; + + mps_port->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(mps_port->clk)) + return PTR_ERR(mps_port->clk); + + ret = clk_prepare_enable(mps_port->clk); + if (ret) + return ret; + + mps_port->port.uartclk = clk_get_rate(mps_port->clk); + + clk_disable_unprepare(mps_port->clk); + + + if (mps_port->flags & UART_PORT_COMBINED_IRQ) { + mps_port->port.irq = platform_get_irq(pdev, 0); + } else { + mps_port->rx_irq = platform_get_irq(pdev, 0); + mps_port->tx_irq = platform_get_irq(pdev, 1); + mps_port->port.irq = platform_get_irq(pdev, 2); + } + + return ret; +} + +static int mps2_serial_probe(struct platform_device *pdev) +{ + struct mps2_uart_port *mps_port; + int ret; + + mps_port = devm_kzalloc(&pdev->dev, sizeof(struct mps2_uart_port), GFP_KERNEL); + + if (!mps_port) + return -ENOMEM; + + ret = mps2_of_get_port(pdev, mps_port); + if (ret) + return ret; + + ret = mps2_init_port(pdev, mps_port); + if (ret) + return ret; + + ret = uart_add_one_port(&mps2_uart_driver, &mps_port->port); + if (ret) + return ret; + + platform_set_drvdata(pdev, mps_port); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id mps2_match[] = { + { .compatible = "arm,mps2-uart", }, + {}, +}; +#endif + +static struct platform_driver mps2_serial_driver = { + .probe = mps2_serial_probe, + + .driver = { + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(mps2_match), + .suppress_bind_attrs = true, + }, +}; + +static int __init mps2_uart_init(void) +{ + int ret; + + ret = uart_register_driver(&mps2_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&mps2_serial_driver); + if (ret) + uart_unregister_driver(&mps2_uart_driver); + + return ret; +} +arch_initcall(mps2_uart_init); diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c new file mode 100644 index 000000000..7dd19a281 --- /dev/null +++ b/drivers/tty/serial/msm_serial.c @@ -0,0 +1,1926 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for msm7k serial device and console + * + * Copyright (C) 2007 Google, Inc. + * Author: Robert Love + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MSM_UART_MR1 0x0000 + +#define MSM_UART_MR1_AUTO_RFR_LEVEL0 0x3F +#define MSM_UART_MR1_AUTO_RFR_LEVEL1 0x3FF00 +#define MSM_UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00 +#define MSM_UART_MR1_RX_RDY_CTL BIT(7) +#define MSM_UART_MR1_CTS_CTL BIT(6) + +#define MSM_UART_MR2 0x0004 +#define MSM_UART_MR2_ERROR_MODE BIT(6) +#define MSM_UART_MR2_BITS_PER_CHAR 0x30 +#define MSM_UART_MR2_BITS_PER_CHAR_5 (0x0 << 4) +#define MSM_UART_MR2_BITS_PER_CHAR_6 (0x1 << 4) +#define MSM_UART_MR2_BITS_PER_CHAR_7 (0x2 << 4) +#define MSM_UART_MR2_BITS_PER_CHAR_8 (0x3 << 4) +#define MSM_UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2) +#define MSM_UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2) +#define MSM_UART_MR2_PARITY_MODE_NONE 0x0 +#define MSM_UART_MR2_PARITY_MODE_ODD 0x1 +#define MSM_UART_MR2_PARITY_MODE_EVEN 0x2 +#define MSM_UART_MR2_PARITY_MODE_SPACE 0x3 +#define MSM_UART_MR2_PARITY_MODE 0x3 + +#define MSM_UART_CSR 0x0008 + +#define MSM_UART_TF 0x000C +#define UARTDM_TF 0x0070 + +#define MSM_UART_CR 0x0010 +#define MSM_UART_CR_CMD_NULL (0 << 4) +#define MSM_UART_CR_CMD_RESET_RX (1 << 4) +#define MSM_UART_CR_CMD_RESET_TX (2 << 4) +#define MSM_UART_CR_CMD_RESET_ERR (3 << 4) +#define MSM_UART_CR_CMD_RESET_BREAK_INT (4 << 4) +#define MSM_UART_CR_CMD_START_BREAK (5 << 4) +#define MSM_UART_CR_CMD_STOP_BREAK (6 << 4) +#define MSM_UART_CR_CMD_RESET_CTS (7 << 4) +#define MSM_UART_CR_CMD_RESET_STALE_INT (8 << 4) +#define MSM_UART_CR_CMD_PACKET_MODE (9 << 4) +#define MSM_UART_CR_CMD_MODE_RESET (12 << 4) +#define MSM_UART_CR_CMD_SET_RFR (13 << 4) +#define MSM_UART_CR_CMD_RESET_RFR (14 << 4) +#define MSM_UART_CR_CMD_PROTECTION_EN (16 << 4) +#define MSM_UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8) +#define MSM_UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4) +#define MSM_UART_CR_CMD_FORCE_STALE (4 << 8) +#define MSM_UART_CR_CMD_RESET_TX_READY (3 << 8) +#define MSM_UART_CR_TX_DISABLE BIT(3) +#define MSM_UART_CR_TX_ENABLE BIT(2) +#define MSM_UART_CR_RX_DISABLE BIT(1) +#define MSM_UART_CR_RX_ENABLE BIT(0) +#define MSM_UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4)) + +#define MSM_UART_IMR 0x0014 +#define MSM_UART_IMR_TXLEV BIT(0) +#define MSM_UART_IMR_RXSTALE BIT(3) +#define MSM_UART_IMR_RXLEV BIT(4) +#define MSM_UART_IMR_DELTA_CTS BIT(5) +#define MSM_UART_IMR_CURRENT_CTS BIT(6) +#define MSM_UART_IMR_RXBREAK_START BIT(10) + +#define MSM_UART_IPR_RXSTALE_LAST 0x20 +#define MSM_UART_IPR_STALE_LSB 0x1F +#define MSM_UART_IPR_STALE_TIMEOUT_MSB 0x3FF80 +#define MSM_UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80 + +#define MSM_UART_IPR 0x0018 +#define MSM_UART_TFWR 0x001C +#define MSM_UART_RFWR 0x0020 +#define MSM_UART_HCR 0x0024 + +#define MSM_UART_MREG 0x0028 +#define MSM_UART_NREG 0x002C +#define MSM_UART_DREG 0x0030 +#define MSM_UART_MNDREG 0x0034 +#define MSM_UART_IRDA 0x0038 +#define MSM_UART_MISR_MODE 0x0040 +#define MSM_UART_MISR_RESET 0x0044 +#define MSM_UART_MISR_EXPORT 0x0048 +#define MSM_UART_MISR_VAL 0x004C +#define MSM_UART_TEST_CTRL 0x0050 + +#define MSM_UART_SR 0x0008 +#define MSM_UART_SR_HUNT_CHAR BIT(7) +#define MSM_UART_SR_RX_BREAK BIT(6) +#define MSM_UART_SR_PAR_FRAME_ERR BIT(5) +#define MSM_UART_SR_OVERRUN BIT(4) +#define MSM_UART_SR_TX_EMPTY BIT(3) +#define MSM_UART_SR_TX_READY BIT(2) +#define MSM_UART_SR_RX_FULL BIT(1) +#define MSM_UART_SR_RX_READY BIT(0) + +#define MSM_UART_RF 0x000C +#define UARTDM_RF 0x0070 +#define MSM_UART_MISR 0x0010 +#define MSM_UART_ISR 0x0014 +#define MSM_UART_ISR_TX_READY BIT(7) + +#define UARTDM_RXFS 0x50 +#define UARTDM_RXFS_BUF_SHIFT 0x7 +#define UARTDM_RXFS_BUF_MASK 0x7 + +#define UARTDM_DMEN 0x3C +#define UARTDM_DMEN_RX_SC_ENABLE BIT(5) +#define UARTDM_DMEN_TX_SC_ENABLE BIT(4) + +#define UARTDM_DMEN_TX_BAM_ENABLE BIT(2) /* UARTDM_1P4 */ +#define UARTDM_DMEN_TX_DM_ENABLE BIT(0) /* < UARTDM_1P4 */ + +#define UARTDM_DMEN_RX_BAM_ENABLE BIT(3) /* UARTDM_1P4 */ +#define UARTDM_DMEN_RX_DM_ENABLE BIT(1) /* < UARTDM_1P4 */ + +#define UARTDM_DMRX 0x34 +#define UARTDM_NCF_TX 0x40 +#define UARTDM_RX_TOTAL_SNAP 0x38 + +#define UARTDM_BURST_SIZE 16 /* in bytes */ +#define UARTDM_TX_AIGN(x) ((x) & ~0x3) /* valid for > 1p3 */ +#define UARTDM_TX_MAX 256 /* in bytes, valid for <= 1p3 */ +#define UARTDM_RX_SIZE (UART_XMIT_SIZE / 4) + +enum { + UARTDM_1P1 = 1, + UARTDM_1P2, + UARTDM_1P3, + UARTDM_1P4, +}; + +struct msm_dma { + struct dma_chan *chan; + enum dma_data_direction dir; + dma_addr_t phys; + unsigned char *virt; + dma_cookie_t cookie; + u32 enable_bit; + unsigned int count; + struct dma_async_tx_descriptor *desc; +}; + +struct msm_port { + struct uart_port uart; + char name[16]; + struct clk *clk; + struct clk *pclk; + unsigned int imr; + int is_uartdm; + unsigned int old_snap_state; + bool break_detected; + struct msm_dma tx_dma; + struct msm_dma rx_dma; +}; + +static inline struct msm_port *to_msm_port(struct uart_port *up) +{ + return container_of(up, struct msm_port, uart); +} + +static +void msm_write(struct uart_port *port, unsigned int val, unsigned int off) +{ + writel_relaxed(val, port->membase + off); +} + +static +unsigned int msm_read(struct uart_port *port, unsigned int off) +{ + return readl_relaxed(port->membase + off); +} + +/* + * Setup the MND registers to use the TCXO clock. + */ +static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port) +{ + msm_write(port, 0x06, MSM_UART_MREG); + msm_write(port, 0xF1, MSM_UART_NREG); + msm_write(port, 0x0F, MSM_UART_DREG); + msm_write(port, 0x1A, MSM_UART_MNDREG); + port->uartclk = 1843200; +} + +/* + * Setup the MND registers to use the TCXO clock divided by 4. + */ +static void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port) +{ + msm_write(port, 0x18, MSM_UART_MREG); + msm_write(port, 0xF6, MSM_UART_NREG); + msm_write(port, 0x0F, MSM_UART_DREG); + msm_write(port, 0x0A, MSM_UART_MNDREG); + port->uartclk = 1843200; +} + +static void msm_serial_set_mnd_regs(struct uart_port *port) +{ + struct msm_port *msm_port = to_msm_port(port); + + /* + * These registers don't exist so we change the clk input rate + * on uartdm hardware instead + */ + if (msm_port->is_uartdm) + return; + + if (port->uartclk == 19200000) + msm_serial_set_mnd_regs_tcxo(port); + else if (port->uartclk == 4800000) + msm_serial_set_mnd_regs_tcxoby4(port); +} + +static void msm_handle_tx(struct uart_port *port); +static void msm_start_rx_dma(struct msm_port *msm_port); + +static void msm_stop_dma(struct uart_port *port, struct msm_dma *dma) +{ + struct device *dev = port->dev; + unsigned int mapped; + u32 val; + + mapped = dma->count; + dma->count = 0; + + dmaengine_terminate_all(dma->chan); + + /* + * DMA Stall happens if enqueue and flush command happens concurrently. + * For example before changing the baud rate/protocol configuration and + * sending flush command to ADM, disable the channel of UARTDM. + * Note: should not reset the receiver here immediately as it is not + * suggested to do disable/reset or reset/disable at the same time. + */ + val = msm_read(port, UARTDM_DMEN); + val &= ~dma->enable_bit; + msm_write(port, val, UARTDM_DMEN); + + if (mapped) + dma_unmap_single(dev, dma->phys, mapped, dma->dir); +} + +static void msm_release_dma(struct msm_port *msm_port) +{ + struct msm_dma *dma; + + dma = &msm_port->tx_dma; + if (dma->chan) { + msm_stop_dma(&msm_port->uart, dma); + dma_release_channel(dma->chan); + } + + memset(dma, 0, sizeof(*dma)); + + dma = &msm_port->rx_dma; + if (dma->chan) { + msm_stop_dma(&msm_port->uart, dma); + dma_release_channel(dma->chan); + kfree(dma->virt); + } + + memset(dma, 0, sizeof(*dma)); +} + +static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base) +{ + struct device *dev = msm_port->uart.dev; + struct dma_slave_config conf; + struct qcom_adm_peripheral_config periph_conf = {}; + struct msm_dma *dma; + u32 crci = 0; + int ret; + + dma = &msm_port->tx_dma; + + /* allocate DMA resources, if available */ + dma->chan = dma_request_chan(dev, "tx"); + if (IS_ERR(dma->chan)) + goto no_tx; + + of_property_read_u32(dev->of_node, "qcom,tx-crci", &crci); + + memset(&conf, 0, sizeof(conf)); + conf.direction = DMA_MEM_TO_DEV; + conf.device_fc = true; + conf.dst_addr = base + UARTDM_TF; + conf.dst_maxburst = UARTDM_BURST_SIZE; + if (crci) { + conf.peripheral_config = &periph_conf; + conf.peripheral_size = sizeof(periph_conf); + periph_conf.crci = crci; + } + + ret = dmaengine_slave_config(dma->chan, &conf); + if (ret) + goto rel_tx; + + dma->dir = DMA_TO_DEVICE; + + if (msm_port->is_uartdm < UARTDM_1P4) + dma->enable_bit = UARTDM_DMEN_TX_DM_ENABLE; + else + dma->enable_bit = UARTDM_DMEN_TX_BAM_ENABLE; + + return; + +rel_tx: + dma_release_channel(dma->chan); +no_tx: + memset(dma, 0, sizeof(*dma)); +} + +static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base) +{ + struct device *dev = msm_port->uart.dev; + struct dma_slave_config conf; + struct qcom_adm_peripheral_config periph_conf = {}; + struct msm_dma *dma; + u32 crci = 0; + int ret; + + dma = &msm_port->rx_dma; + + /* allocate DMA resources, if available */ + dma->chan = dma_request_chan(dev, "rx"); + if (IS_ERR(dma->chan)) + goto no_rx; + + of_property_read_u32(dev->of_node, "qcom,rx-crci", &crci); + + dma->virt = kzalloc(UARTDM_RX_SIZE, GFP_KERNEL); + if (!dma->virt) + goto rel_rx; + + memset(&conf, 0, sizeof(conf)); + conf.direction = DMA_DEV_TO_MEM; + conf.device_fc = true; + conf.src_addr = base + UARTDM_RF; + conf.src_maxburst = UARTDM_BURST_SIZE; + if (crci) { + conf.peripheral_config = &periph_conf; + conf.peripheral_size = sizeof(periph_conf); + periph_conf.crci = crci; + } + + ret = dmaengine_slave_config(dma->chan, &conf); + if (ret) + goto err; + + dma->dir = DMA_FROM_DEVICE; + + if (msm_port->is_uartdm < UARTDM_1P4) + dma->enable_bit = UARTDM_DMEN_RX_DM_ENABLE; + else + dma->enable_bit = UARTDM_DMEN_RX_BAM_ENABLE; + + return; +err: + kfree(dma->virt); +rel_rx: + dma_release_channel(dma->chan); +no_rx: + memset(dma, 0, sizeof(*dma)); +} + +static inline void msm_wait_for_xmitr(struct uart_port *port) +{ + unsigned int timeout = 500000; + + while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_EMPTY)) { + if (msm_read(port, MSM_UART_ISR) & MSM_UART_ISR_TX_READY) + break; + udelay(1); + if (!timeout--) + break; + } + msm_write(port, MSM_UART_CR_CMD_RESET_TX_READY, MSM_UART_CR); +} + +static void msm_stop_tx(struct uart_port *port) +{ + struct msm_port *msm_port = to_msm_port(port); + + msm_port->imr &= ~MSM_UART_IMR_TXLEV; + msm_write(port, msm_port->imr, MSM_UART_IMR); +} + +static void msm_start_tx(struct uart_port *port) +{ + struct msm_port *msm_port = to_msm_port(port); + struct msm_dma *dma = &msm_port->tx_dma; + + /* Already started in DMA mode */ + if (dma->count) + return; + + msm_port->imr |= MSM_UART_IMR_TXLEV; + msm_write(port, msm_port->imr, MSM_UART_IMR); +} + +static void msm_reset_dm_count(struct uart_port *port, int count) +{ + msm_wait_for_xmitr(port); + msm_write(port, count, UARTDM_NCF_TX); + msm_read(port, UARTDM_NCF_TX); +} + +static void msm_complete_tx_dma(void *args) +{ + struct msm_port *msm_port = args; + struct uart_port *port = &msm_port->uart; + struct circ_buf *xmit = &port->state->xmit; + struct msm_dma *dma = &msm_port->tx_dma; + struct dma_tx_state state; + unsigned long flags; + unsigned int count; + u32 val; + + spin_lock_irqsave(&port->lock, flags); + + /* Already stopped */ + if (!dma->count) + goto done; + + dmaengine_tx_status(dma->chan, dma->cookie, &state); + + dma_unmap_single(port->dev, dma->phys, dma->count, dma->dir); + + val = msm_read(port, UARTDM_DMEN); + val &= ~dma->enable_bit; + msm_write(port, val, UARTDM_DMEN); + + if (msm_port->is_uartdm > UARTDM_1P3) { + msm_write(port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR); + msm_write(port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR); + } + + count = dma->count - state.residue; + port->icount.tx += count; + dma->count = 0; + + xmit->tail += count; + xmit->tail &= UART_XMIT_SIZE - 1; + + /* Restore "Tx FIFO below watermark" interrupt */ + msm_port->imr |= MSM_UART_IMR_TXLEV; + msm_write(port, msm_port->imr, MSM_UART_IMR); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + msm_handle_tx(port); +done: + spin_unlock_irqrestore(&port->lock, flags); +} + +static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count) +{ + struct circ_buf *xmit = &msm_port->uart.state->xmit; + struct uart_port *port = &msm_port->uart; + struct msm_dma *dma = &msm_port->tx_dma; + void *cpu_addr; + int ret; + u32 val; + + cpu_addr = &xmit->buf[xmit->tail]; + + dma->phys = dma_map_single(port->dev, cpu_addr, count, dma->dir); + ret = dma_mapping_error(port->dev, dma->phys); + if (ret) + return ret; + + dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys, + count, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | + DMA_PREP_FENCE); + if (!dma->desc) { + ret = -EIO; + goto unmap; + } + + dma->desc->callback = msm_complete_tx_dma; + dma->desc->callback_param = msm_port; + + dma->cookie = dmaengine_submit(dma->desc); + ret = dma_submit_error(dma->cookie); + if (ret) + goto unmap; + + /* + * Using DMA complete for Tx FIFO reload, no need for + * "Tx FIFO below watermark" one, disable it + */ + msm_port->imr &= ~MSM_UART_IMR_TXLEV; + msm_write(port, msm_port->imr, MSM_UART_IMR); + + dma->count = count; + + val = msm_read(port, UARTDM_DMEN); + val |= dma->enable_bit; + + if (msm_port->is_uartdm < UARTDM_1P4) + msm_write(port, val, UARTDM_DMEN); + + msm_reset_dm_count(port, count); + + if (msm_port->is_uartdm > UARTDM_1P3) + msm_write(port, val, UARTDM_DMEN); + + dma_async_issue_pending(dma->chan); + return 0; +unmap: + dma_unmap_single(port->dev, dma->phys, count, dma->dir); + return ret; +} + +static void msm_complete_rx_dma(void *args) +{ + struct msm_port *msm_port = args; + struct uart_port *port = &msm_port->uart; + struct tty_port *tport = &port->state->port; + struct msm_dma *dma = &msm_port->rx_dma; + int count = 0, i, sysrq; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&port->lock, flags); + + /* Already stopped */ + if (!dma->count) + goto done; + + val = msm_read(port, UARTDM_DMEN); + val &= ~dma->enable_bit; + msm_write(port, val, UARTDM_DMEN); + + if (msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN) { + port->icount.overrun++; + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR); + } + + count = msm_read(port, UARTDM_RX_TOTAL_SNAP); + + port->icount.rx += count; + + dma->count = 0; + + dma_unmap_single(port->dev, dma->phys, UARTDM_RX_SIZE, dma->dir); + + for (i = 0; i < count; i++) { + char flag = TTY_NORMAL; + + if (msm_port->break_detected && dma->virt[i] == 0) { + port->icount.brk++; + flag = TTY_BREAK; + msm_port->break_detected = false; + if (uart_handle_break(port)) + continue; + } + + if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK)) + flag = TTY_NORMAL; + + spin_unlock_irqrestore(&port->lock, flags); + sysrq = uart_handle_sysrq_char(port, dma->virt[i]); + spin_lock_irqsave(&port->lock, flags); + if (!sysrq) + tty_insert_flip_char(tport, dma->virt[i], flag); + } + + msm_start_rx_dma(msm_port); +done: + spin_unlock_irqrestore(&port->lock, flags); + + if (count) + tty_flip_buffer_push(tport); +} + +static void msm_start_rx_dma(struct msm_port *msm_port) +{ + struct msm_dma *dma = &msm_port->rx_dma; + struct uart_port *uart = &msm_port->uart; + u32 val; + int ret; + + if (IS_ENABLED(CONFIG_CONSOLE_POLL)) + return; + + if (!dma->chan) + return; + + dma->phys = dma_map_single(uart->dev, dma->virt, + UARTDM_RX_SIZE, dma->dir); + ret = dma_mapping_error(uart->dev, dma->phys); + if (ret) + goto sw_mode; + + dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys, + UARTDM_RX_SIZE, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!dma->desc) + goto unmap; + + dma->desc->callback = msm_complete_rx_dma; + dma->desc->callback_param = msm_port; + + dma->cookie = dmaengine_submit(dma->desc); + ret = dma_submit_error(dma->cookie); + if (ret) + goto unmap; + /* + * Using DMA for FIFO off-load, no need for "Rx FIFO over + * watermark" or "stale" interrupts, disable them + */ + msm_port->imr &= ~(MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE); + + /* + * Well, when DMA is ADM3 engine(implied by <= UARTDM v1.3), + * we need RXSTALE to flush input DMA fifo to memory + */ + if (msm_port->is_uartdm < UARTDM_1P4) + msm_port->imr |= MSM_UART_IMR_RXSTALE; + + msm_write(uart, msm_port->imr, MSM_UART_IMR); + + dma->count = UARTDM_RX_SIZE; + + dma_async_issue_pending(dma->chan); + + msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR); + msm_write(uart, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR); + + val = msm_read(uart, UARTDM_DMEN); + val |= dma->enable_bit; + + if (msm_port->is_uartdm < UARTDM_1P4) + msm_write(uart, val, UARTDM_DMEN); + + msm_write(uart, UARTDM_RX_SIZE, UARTDM_DMRX); + + if (msm_port->is_uartdm > UARTDM_1P3) + msm_write(uart, val, UARTDM_DMEN); + + return; +unmap: + dma_unmap_single(uart->dev, dma->phys, UARTDM_RX_SIZE, dma->dir); + +sw_mode: + /* + * Switch from DMA to SW/FIFO mode. After clearing Rx BAM (UARTDM_DMEN), + * receiver must be reset. + */ + msm_write(uart, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR); + msm_write(uart, MSM_UART_CR_RX_ENABLE, MSM_UART_CR); + + msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR); + msm_write(uart, 0xFFFFFF, UARTDM_DMRX); + msm_write(uart, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR); + + /* Re-enable RX interrupts */ + msm_port->imr |= MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE; + msm_write(uart, msm_port->imr, MSM_UART_IMR); +} + +static void msm_stop_rx(struct uart_port *port) +{ + struct msm_port *msm_port = to_msm_port(port); + struct msm_dma *dma = &msm_port->rx_dma; + + msm_port->imr &= ~(MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE); + msm_write(port, msm_port->imr, MSM_UART_IMR); + + if (dma->chan) + msm_stop_dma(port, dma); +} + +static void msm_enable_ms(struct uart_port *port) +{ + struct msm_port *msm_port = to_msm_port(port); + + msm_port->imr |= MSM_UART_IMR_DELTA_CTS; + msm_write(port, msm_port->imr, MSM_UART_IMR); +} + +static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr) + __must_hold(&port->lock) +{ + struct tty_port *tport = &port->state->port; + unsigned int sr; + int count = 0; + struct msm_port *msm_port = to_msm_port(port); + + if ((msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN)) { + port->icount.overrun++; + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR); + } + + if (misr & MSM_UART_IMR_RXSTALE) { + count = msm_read(port, UARTDM_RX_TOTAL_SNAP) - + msm_port->old_snap_state; + msm_port->old_snap_state = 0; + } else { + count = 4 * (msm_read(port, MSM_UART_RFWR)); + msm_port->old_snap_state += count; + } + + /* TODO: Precise error reporting */ + + port->icount.rx += count; + + while (count > 0) { + unsigned char buf[4]; + int sysrq, r_count, i; + + sr = msm_read(port, MSM_UART_SR); + if ((sr & MSM_UART_SR_RX_READY) == 0) { + msm_port->old_snap_state -= count; + break; + } + + ioread32_rep(port->membase + UARTDM_RF, buf, 1); + r_count = min_t(int, count, sizeof(buf)); + + for (i = 0; i < r_count; i++) { + char flag = TTY_NORMAL; + + if (msm_port->break_detected && buf[i] == 0) { + port->icount.brk++; + flag = TTY_BREAK; + msm_port->break_detected = false; + if (uart_handle_break(port)) + continue; + } + + if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK)) + flag = TTY_NORMAL; + + spin_unlock(&port->lock); + sysrq = uart_handle_sysrq_char(port, buf[i]); + spin_lock(&port->lock); + if (!sysrq) + tty_insert_flip_char(tport, buf[i], flag); + } + count -= r_count; + } + + tty_flip_buffer_push(tport); + + if (misr & (MSM_UART_IMR_RXSTALE)) + msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR); + msm_write(port, 0xFFFFFF, UARTDM_DMRX); + msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR); + + /* Try to use DMA */ + msm_start_rx_dma(msm_port); +} + +static void msm_handle_rx(struct uart_port *port) + __must_hold(&port->lock) +{ + struct tty_port *tport = &port->state->port; + unsigned int sr; + + /* + * Handle overrun. My understanding of the hardware is that overrun + * is not tied to the RX buffer, so we handle the case out of band. + */ + if ((msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN)) { + port->icount.overrun++; + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR); + } + + /* and now the main RX loop */ + while ((sr = msm_read(port, MSM_UART_SR)) & MSM_UART_SR_RX_READY) { + unsigned int c; + char flag = TTY_NORMAL; + int sysrq; + + c = msm_read(port, MSM_UART_RF); + + if (sr & MSM_UART_SR_RX_BREAK) { + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } else if (sr & MSM_UART_SR_PAR_FRAME_ERR) { + port->icount.frame++; + } else { + port->icount.rx++; + } + + /* Mask conditions we're ignorning. */ + sr &= port->read_status_mask; + + if (sr & MSM_UART_SR_RX_BREAK) + flag = TTY_BREAK; + else if (sr & MSM_UART_SR_PAR_FRAME_ERR) + flag = TTY_FRAME; + + spin_unlock(&port->lock); + sysrq = uart_handle_sysrq_char(port, c); + spin_lock(&port->lock); + if (!sysrq) + tty_insert_flip_char(tport, c, flag); + } + + tty_flip_buffer_push(tport); +} + +static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count) +{ + struct circ_buf *xmit = &port->state->xmit; + struct msm_port *msm_port = to_msm_port(port); + unsigned int num_chars; + unsigned int tf_pointer = 0; + void __iomem *tf; + + if (msm_port->is_uartdm) + tf = port->membase + UARTDM_TF; + else + tf = port->membase + MSM_UART_TF; + + if (tx_count && msm_port->is_uartdm) + msm_reset_dm_count(port, tx_count); + + while (tf_pointer < tx_count) { + int i; + char buf[4] = { 0 }; + + if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY)) + break; + + if (msm_port->is_uartdm) + num_chars = min(tx_count - tf_pointer, + (unsigned int)sizeof(buf)); + else + num_chars = 1; + + for (i = 0; i < num_chars; i++) { + buf[i] = xmit->buf[xmit->tail + i]; + port->icount.tx++; + } + + iowrite32_rep(tf, buf, 1); + xmit->tail = (xmit->tail + num_chars) & (UART_XMIT_SIZE - 1); + tf_pointer += num_chars; + } + + /* disable tx interrupts if nothing more to send */ + if (uart_circ_empty(xmit)) + msm_stop_tx(port); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static void msm_handle_tx(struct uart_port *port) +{ + struct msm_port *msm_port = to_msm_port(port); + struct circ_buf *xmit = &msm_port->uart.state->xmit; + struct msm_dma *dma = &msm_port->tx_dma; + unsigned int pio_count, dma_count, dma_min; + char buf[4] = { 0 }; + void __iomem *tf; + int err = 0; + + if (port->x_char) { + if (msm_port->is_uartdm) + tf = port->membase + UARTDM_TF; + else + tf = port->membase + MSM_UART_TF; + + buf[0] = port->x_char; + + if (msm_port->is_uartdm) + msm_reset_dm_count(port, 1); + + iowrite32_rep(tf, buf, 1); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + msm_stop_tx(port); + return; + } + + pio_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + + dma_min = 1; /* Always DMA */ + if (msm_port->is_uartdm > UARTDM_1P3) { + dma_count = UARTDM_TX_AIGN(dma_count); + dma_min = UARTDM_BURST_SIZE; + } else { + if (dma_count > UARTDM_TX_MAX) + dma_count = UARTDM_TX_MAX; + } + + if (pio_count > port->fifosize) + pio_count = port->fifosize; + + if (!dma->chan || dma_count < dma_min) + msm_handle_tx_pio(port, pio_count); + else + err = msm_handle_tx_dma(msm_port, dma_count); + + if (err) /* fall back to PIO mode */ + msm_handle_tx_pio(port, pio_count); +} + +static void msm_handle_delta_cts(struct uart_port *port) +{ + msm_write(port, MSM_UART_CR_CMD_RESET_CTS, MSM_UART_CR); + port->icount.cts++; + wake_up_interruptible(&port->state->port.delta_msr_wait); +} + +static irqreturn_t msm_uart_irq(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct msm_port *msm_port = to_msm_port(port); + struct msm_dma *dma = &msm_port->rx_dma; + unsigned long flags; + unsigned int misr; + u32 val; + + spin_lock_irqsave(&port->lock, flags); + misr = msm_read(port, MSM_UART_MISR); + msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */ + + if (misr & MSM_UART_IMR_RXBREAK_START) { + msm_port->break_detected = true; + msm_write(port, MSM_UART_CR_CMD_RESET_RXBREAK_START, MSM_UART_CR); + } + + if (misr & (MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE)) { + if (dma->count) { + val = MSM_UART_CR_CMD_STALE_EVENT_DISABLE; + msm_write(port, val, MSM_UART_CR); + val = MSM_UART_CR_CMD_RESET_STALE_INT; + msm_write(port, val, MSM_UART_CR); + /* + * Flush DMA input fifo to memory, this will also + * trigger DMA RX completion + */ + dmaengine_terminate_all(dma->chan); + } else if (msm_port->is_uartdm) { + msm_handle_rx_dm(port, misr); + } else { + msm_handle_rx(port); + } + } + if (misr & MSM_UART_IMR_TXLEV) + msm_handle_tx(port); + if (misr & MSM_UART_IMR_DELTA_CTS) + msm_handle_delta_cts(port); + + msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */ + spin_unlock_irqrestore(&port->lock, flags); + + return IRQ_HANDLED; +} + +static unsigned int msm_tx_empty(struct uart_port *port) +{ + return (msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0; +} + +static unsigned int msm_get_mctrl(struct uart_port *port) +{ + return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS; +} + +static void msm_reset(struct uart_port *port) +{ + struct msm_port *msm_port = to_msm_port(port); + unsigned int mr; + + /* reset everything */ + msm_write(port, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR); + msm_write(port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR); + msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR); + msm_write(port, MSM_UART_CR_CMD_RESET_BREAK_INT, MSM_UART_CR); + msm_write(port, MSM_UART_CR_CMD_RESET_CTS, MSM_UART_CR); + msm_write(port, MSM_UART_CR_CMD_RESET_RFR, MSM_UART_CR); + mr = msm_read(port, MSM_UART_MR1); + mr &= ~MSM_UART_MR1_RX_RDY_CTL; + msm_write(port, mr, MSM_UART_MR1); + + /* Disable DM modes */ + if (msm_port->is_uartdm) + msm_write(port, 0, UARTDM_DMEN); +} + +static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + unsigned int mr; + + mr = msm_read(port, MSM_UART_MR1); + + if (!(mctrl & TIOCM_RTS)) { + mr &= ~MSM_UART_MR1_RX_RDY_CTL; + msm_write(port, mr, MSM_UART_MR1); + msm_write(port, MSM_UART_CR_CMD_RESET_RFR, MSM_UART_CR); + } else { + mr |= MSM_UART_MR1_RX_RDY_CTL; + msm_write(port, mr, MSM_UART_MR1); + } +} + +static void msm_break_ctl(struct uart_port *port, int break_ctl) +{ + if (break_ctl) + msm_write(port, MSM_UART_CR_CMD_START_BREAK, MSM_UART_CR); + else + msm_write(port, MSM_UART_CR_CMD_STOP_BREAK, MSM_UART_CR); +} + +struct msm_baud_map { + u16 divisor; + u8 code; + u8 rxstale; +}; + +static const struct msm_baud_map * +msm_find_best_baud(struct uart_port *port, unsigned int baud, + unsigned long *rate) +{ + struct msm_port *msm_port = to_msm_port(port); + unsigned int divisor, result; + unsigned long target, old, best_rate = 0, diff, best_diff = ULONG_MAX; + const struct msm_baud_map *entry, *end, *best; + static const struct msm_baud_map table[] = { + { 1, 0xff, 31 }, + { 2, 0xee, 16 }, + { 3, 0xdd, 8 }, + { 4, 0xcc, 6 }, + { 6, 0xbb, 6 }, + { 8, 0xaa, 6 }, + { 12, 0x99, 6 }, + { 16, 0x88, 1 }, + { 24, 0x77, 1 }, + { 32, 0x66, 1 }, + { 48, 0x55, 1 }, + { 96, 0x44, 1 }, + { 192, 0x33, 1 }, + { 384, 0x22, 1 }, + { 768, 0x11, 1 }, + { 1536, 0x00, 1 }, + }; + + best = table; /* Default to smallest divider */ + target = clk_round_rate(msm_port->clk, 16 * baud); + divisor = DIV_ROUND_CLOSEST(target, 16 * baud); + + end = table + ARRAY_SIZE(table); + entry = table; + while (entry < end) { + if (entry->divisor <= divisor) { + result = target / entry->divisor / 16; + diff = abs(result - baud); + + /* Keep track of best entry */ + if (diff < best_diff) { + best_diff = diff; + best = entry; + best_rate = target; + } + + if (result == baud) + break; + } else if (entry->divisor > divisor) { + old = target; + target = clk_round_rate(msm_port->clk, old + 1); + /* + * The rate didn't get any faster so we can't do + * better at dividing it down + */ + if (target == old) + break; + + /* Start the divisor search over at this new rate */ + entry = table; + divisor = DIV_ROUND_CLOSEST(target, 16 * baud); + continue; + } + entry++; + } + + *rate = best_rate; + return best; +} + +static int msm_set_baud_rate(struct uart_port *port, unsigned int baud, + unsigned long *saved_flags) +{ + unsigned int rxstale, watermark, mask; + struct msm_port *msm_port = to_msm_port(port); + const struct msm_baud_map *entry; + unsigned long flags, rate; + + flags = *saved_flags; + spin_unlock_irqrestore(&port->lock, flags); + + entry = msm_find_best_baud(port, baud, &rate); + clk_set_rate(msm_port->clk, rate); + baud = rate / 16 / entry->divisor; + + spin_lock_irqsave(&port->lock, flags); + *saved_flags = flags; + port->uartclk = rate; + + msm_write(port, entry->code, MSM_UART_CSR); + + /* RX stale watermark */ + rxstale = entry->rxstale; + watermark = MSM_UART_IPR_STALE_LSB & rxstale; + if (msm_port->is_uartdm) { + mask = MSM_UART_DM_IPR_STALE_TIMEOUT_MSB; + } else { + watermark |= MSM_UART_IPR_RXSTALE_LAST; + mask = MSM_UART_IPR_STALE_TIMEOUT_MSB; + } + + watermark |= mask & (rxstale << 2); + + msm_write(port, watermark, MSM_UART_IPR); + + /* set RX watermark */ + watermark = (port->fifosize * 3) / 4; + msm_write(port, watermark, MSM_UART_RFWR); + + /* set TX watermark */ + msm_write(port, 10, MSM_UART_TFWR); + + msm_write(port, MSM_UART_CR_CMD_PROTECTION_EN, MSM_UART_CR); + msm_reset(port); + + /* Enable RX and TX */ + msm_write(port, MSM_UART_CR_TX_ENABLE | MSM_UART_CR_RX_ENABLE, MSM_UART_CR); + + /* turn on RX and CTS interrupts */ + msm_port->imr = MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE | + MSM_UART_IMR_CURRENT_CTS | MSM_UART_IMR_RXBREAK_START; + + msm_write(port, msm_port->imr, MSM_UART_IMR); + + if (msm_port->is_uartdm) { + msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR); + msm_write(port, 0xFFFFFF, UARTDM_DMRX); + msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR); + } + + return baud; +} + +static void msm_init_clock(struct uart_port *port) +{ + struct msm_port *msm_port = to_msm_port(port); + + clk_prepare_enable(msm_port->clk); + clk_prepare_enable(msm_port->pclk); + msm_serial_set_mnd_regs(port); +} + +static int msm_startup(struct uart_port *port) +{ + struct msm_port *msm_port = to_msm_port(port); + unsigned int data, rfr_level, mask; + int ret; + + snprintf(msm_port->name, sizeof(msm_port->name), + "msm_serial%d", port->line); + + msm_init_clock(port); + + if (likely(port->fifosize > 12)) + rfr_level = port->fifosize - 12; + else + rfr_level = port->fifosize; + + /* set automatic RFR level */ + data = msm_read(port, MSM_UART_MR1); + + if (msm_port->is_uartdm) + mask = MSM_UART_DM_MR1_AUTO_RFR_LEVEL1; + else + mask = MSM_UART_MR1_AUTO_RFR_LEVEL1; + + data &= ~mask; + data &= ~MSM_UART_MR1_AUTO_RFR_LEVEL0; + data |= mask & (rfr_level << 2); + data |= MSM_UART_MR1_AUTO_RFR_LEVEL0 & rfr_level; + msm_write(port, data, MSM_UART_MR1); + + if (msm_port->is_uartdm) { + msm_request_tx_dma(msm_port, msm_port->uart.mapbase); + msm_request_rx_dma(msm_port, msm_port->uart.mapbase); + } + + ret = request_irq(port->irq, msm_uart_irq, IRQF_TRIGGER_HIGH, + msm_port->name, port); + if (unlikely(ret)) + goto err_irq; + + return 0; + +err_irq: + if (msm_port->is_uartdm) + msm_release_dma(msm_port); + + clk_disable_unprepare(msm_port->pclk); + clk_disable_unprepare(msm_port->clk); + + return ret; +} + +static void msm_shutdown(struct uart_port *port) +{ + struct msm_port *msm_port = to_msm_port(port); + + msm_port->imr = 0; + msm_write(port, 0, MSM_UART_IMR); /* disable interrupts */ + + if (msm_port->is_uartdm) + msm_release_dma(msm_port); + + clk_disable_unprepare(msm_port->clk); + + free_irq(port->irq, port); +} + +static void msm_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct msm_port *msm_port = to_msm_port(port); + struct msm_dma *dma = &msm_port->rx_dma; + unsigned long flags; + unsigned int baud, mr; + + spin_lock_irqsave(&port->lock, flags); + + if (dma->chan) /* Terminate if any */ + msm_stop_dma(port, dma); + + /* calculate and set baud rate */ + baud = uart_get_baud_rate(port, termios, old, 300, 4000000); + baud = msm_set_baud_rate(port, baud, &flags); + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); + + /* calculate parity */ + mr = msm_read(port, MSM_UART_MR2); + mr &= ~MSM_UART_MR2_PARITY_MODE; + if (termios->c_cflag & PARENB) { + if (termios->c_cflag & PARODD) + mr |= MSM_UART_MR2_PARITY_MODE_ODD; + else if (termios->c_cflag & CMSPAR) + mr |= MSM_UART_MR2_PARITY_MODE_SPACE; + else + mr |= MSM_UART_MR2_PARITY_MODE_EVEN; + } + + /* calculate bits per char */ + mr &= ~MSM_UART_MR2_BITS_PER_CHAR; + switch (termios->c_cflag & CSIZE) { + case CS5: + mr |= MSM_UART_MR2_BITS_PER_CHAR_5; + break; + case CS6: + mr |= MSM_UART_MR2_BITS_PER_CHAR_6; + break; + case CS7: + mr |= MSM_UART_MR2_BITS_PER_CHAR_7; + break; + case CS8: + default: + mr |= MSM_UART_MR2_BITS_PER_CHAR_8; + break; + } + + /* calculate stop bits */ + mr &= ~(MSM_UART_MR2_STOP_BIT_LEN_ONE | MSM_UART_MR2_STOP_BIT_LEN_TWO); + if (termios->c_cflag & CSTOPB) + mr |= MSM_UART_MR2_STOP_BIT_LEN_TWO; + else + mr |= MSM_UART_MR2_STOP_BIT_LEN_ONE; + + /* set parity, bits per char, and stop bit */ + msm_write(port, mr, MSM_UART_MR2); + + /* calculate and set hardware flow control */ + mr = msm_read(port, MSM_UART_MR1); + mr &= ~(MSM_UART_MR1_CTS_CTL | MSM_UART_MR1_RX_RDY_CTL); + if (termios->c_cflag & CRTSCTS) { + mr |= MSM_UART_MR1_CTS_CTL; + mr |= MSM_UART_MR1_RX_RDY_CTL; + } + msm_write(port, mr, MSM_UART_MR1); + + /* Configure status bits to ignore based on termio flags. */ + port->read_status_mask = 0; + if (termios->c_iflag & INPCK) + port->read_status_mask |= MSM_UART_SR_PAR_FRAME_ERR; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= MSM_UART_SR_RX_BREAK; + + uart_update_timeout(port, termios->c_cflag, baud); + + /* Try to use DMA */ + msm_start_rx_dma(msm_port); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *msm_type(struct uart_port *port) +{ + return "MSM"; +} + +static void msm_release_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *uart_resource; + resource_size_t size; + + uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(!uart_resource)) + return; + size = resource_size(uart_resource); + + release_mem_region(port->mapbase, size); + iounmap(port->membase); + port->membase = NULL; +} + +static int msm_request_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *uart_resource; + resource_size_t size; + int ret; + + uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(!uart_resource)) + return -ENXIO; + + size = resource_size(uart_resource); + + if (!request_mem_region(port->mapbase, size, "msm_serial")) + return -EBUSY; + + port->membase = ioremap(port->mapbase, size); + if (!port->membase) { + ret = -EBUSY; + goto fail_release_port; + } + + return 0; + +fail_release_port: + release_mem_region(port->mapbase, size); + return ret; +} + +static void msm_config_port(struct uart_port *port, int flags) +{ + int ret; + + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_MSM; + ret = msm_request_port(port); + if (ret) + return; + } +} + +static int msm_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_MSM)) + return -EINVAL; + if (unlikely(port->irq != ser->irq)) + return -EINVAL; + return 0; +} + +static void msm_power(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct msm_port *msm_port = to_msm_port(port); + + switch (state) { + case 0: + clk_prepare_enable(msm_port->clk); + clk_prepare_enable(msm_port->pclk); + break; + case 3: + clk_disable_unprepare(msm_port->clk); + clk_disable_unprepare(msm_port->pclk); + break; + default: + pr_err("msm_serial: Unknown PM state %d\n", state); + } +} + +#ifdef CONFIG_CONSOLE_POLL +static int msm_poll_get_char_single(struct uart_port *port) +{ + struct msm_port *msm_port = to_msm_port(port); + unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : MSM_UART_RF; + + if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_RX_READY)) + return NO_POLL_CHAR; + + return msm_read(port, rf_reg) & 0xff; +} + +static int msm_poll_get_char_dm(struct uart_port *port) +{ + int c; + static u32 slop; + static int count; + unsigned char *sp = (unsigned char *)&slop; + + /* Check if a previous read had more than one char */ + if (count) { + c = sp[sizeof(slop) - count]; + count--; + /* Or if FIFO is empty */ + } else if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_RX_READY)) { + /* + * If RX packing buffer has less than a word, force stale to + * push contents into RX FIFO + */ + count = msm_read(port, UARTDM_RXFS); + count = (count >> UARTDM_RXFS_BUF_SHIFT) & UARTDM_RXFS_BUF_MASK; + if (count) { + msm_write(port, MSM_UART_CR_CMD_FORCE_STALE, MSM_UART_CR); + slop = msm_read(port, UARTDM_RF); + c = sp[0]; + count--; + msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR); + msm_write(port, 0xFFFFFF, UARTDM_DMRX); + msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR); + } else { + c = NO_POLL_CHAR; + } + /* FIFO has a word */ + } else { + slop = msm_read(port, UARTDM_RF); + c = sp[0]; + count = sizeof(slop) - 1; + } + + return c; +} + +static int msm_poll_get_char(struct uart_port *port) +{ + u32 imr; + int c; + struct msm_port *msm_port = to_msm_port(port); + + /* Disable all interrupts */ + imr = msm_read(port, MSM_UART_IMR); + msm_write(port, 0, MSM_UART_IMR); + + if (msm_port->is_uartdm) + c = msm_poll_get_char_dm(port); + else + c = msm_poll_get_char_single(port); + + /* Enable interrupts */ + msm_write(port, imr, MSM_UART_IMR); + + return c; +} + +static void msm_poll_put_char(struct uart_port *port, unsigned char c) +{ + u32 imr; + struct msm_port *msm_port = to_msm_port(port); + + /* Disable all interrupts */ + imr = msm_read(port, MSM_UART_IMR); + msm_write(port, 0, MSM_UART_IMR); + + if (msm_port->is_uartdm) + msm_reset_dm_count(port, 1); + + /* Wait until FIFO is empty */ + while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY)) + cpu_relax(); + + /* Write a character */ + msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : MSM_UART_TF); + + /* Wait until FIFO is empty */ + while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY)) + cpu_relax(); + + /* Enable interrupts */ + msm_write(port, imr, MSM_UART_IMR); +} +#endif + +static const struct uart_ops msm_uart_pops = { + .tx_empty = msm_tx_empty, + .set_mctrl = msm_set_mctrl, + .get_mctrl = msm_get_mctrl, + .stop_tx = msm_stop_tx, + .start_tx = msm_start_tx, + .stop_rx = msm_stop_rx, + .enable_ms = msm_enable_ms, + .break_ctl = msm_break_ctl, + .startup = msm_startup, + .shutdown = msm_shutdown, + .set_termios = msm_set_termios, + .type = msm_type, + .release_port = msm_release_port, + .request_port = msm_request_port, + .config_port = msm_config_port, + .verify_port = msm_verify_port, + .pm = msm_power, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = msm_poll_get_char, + .poll_put_char = msm_poll_put_char, +#endif +}; + +static struct msm_port msm_uart_ports[] = { + { + .uart = { + .iotype = UPIO_MEM, + .ops = &msm_uart_pops, + .flags = UPF_BOOT_AUTOCONF, + .fifosize = 64, + .line = 0, + }, + }, + { + .uart = { + .iotype = UPIO_MEM, + .ops = &msm_uart_pops, + .flags = UPF_BOOT_AUTOCONF, + .fifosize = 64, + .line = 1, + }, + }, + { + .uart = { + .iotype = UPIO_MEM, + .ops = &msm_uart_pops, + .flags = UPF_BOOT_AUTOCONF, + .fifosize = 64, + .line = 2, + }, + }, +}; + +#define MSM_UART_NR ARRAY_SIZE(msm_uart_ports) + +static inline struct uart_port *msm_get_port_from_line(unsigned int line) +{ + return &msm_uart_ports[line].uart; +} + +#ifdef CONFIG_SERIAL_MSM_CONSOLE +static void __msm_console_write(struct uart_port *port, const char *s, + unsigned int count, bool is_uartdm) +{ + unsigned long flags; + int i; + int num_newlines = 0; + bool replaced = false; + void __iomem *tf; + int locked = 1; + + if (is_uartdm) + tf = port->membase + UARTDM_TF; + else + tf = port->membase + MSM_UART_TF; + + /* Account for newlines that will get a carriage return added */ + for (i = 0; i < count; i++) + if (s[i] == '\n') + num_newlines++; + count += num_newlines; + + local_irq_save(flags); + + if (port->sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&port->lock); + else + spin_lock(&port->lock); + + if (is_uartdm) + msm_reset_dm_count(port, count); + + i = 0; + while (i < count) { + int j; + unsigned int num_chars; + char buf[4] = { 0 }; + + if (is_uartdm) + num_chars = min(count - i, (unsigned int)sizeof(buf)); + else + num_chars = 1; + + for (j = 0; j < num_chars; j++) { + char c = *s; + + if (c == '\n' && !replaced) { + buf[j] = '\r'; + j++; + replaced = true; + } + if (j < num_chars) { + buf[j] = c; + s++; + replaced = false; + } + } + + while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY)) + cpu_relax(); + + iowrite32_rep(tf, buf, 1); + i += num_chars; + } + + if (locked) + spin_unlock(&port->lock); + + local_irq_restore(flags); +} + +static void msm_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *port; + struct msm_port *msm_port; + + BUG_ON(co->index < 0 || co->index >= MSM_UART_NR); + + port = msm_get_port_from_line(co->index); + msm_port = to_msm_port(port); + + __msm_console_write(port, s, count, msm_port->is_uartdm); +} + +static int msm_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (unlikely(co->index >= MSM_UART_NR || co->index < 0)) + return -ENXIO; + + port = msm_get_port_from_line(co->index); + + if (unlikely(!port->membase)) + return -ENXIO; + + msm_init_clock(port); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + pr_info("msm_serial: console setup on port #%d\n", port->line); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static void +msm_serial_early_write(struct console *con, const char *s, unsigned n) +{ + struct earlycon_device *dev = con->data; + + __msm_console_write(&dev->port, s, n, false); +} + +static int __init +msm_serial_early_console_setup(struct earlycon_device *device, const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = msm_serial_early_write; + return 0; +} +OF_EARLYCON_DECLARE(msm_serial, "qcom,msm-uart", + msm_serial_early_console_setup); + +static void +msm_serial_early_write_dm(struct console *con, const char *s, unsigned n) +{ + struct earlycon_device *dev = con->data; + + __msm_console_write(&dev->port, s, n, true); +} + +static int __init +msm_serial_early_console_setup_dm(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = msm_serial_early_write_dm; + return 0; +} +OF_EARLYCON_DECLARE(msm_serial_dm, "qcom,msm-uartdm", + msm_serial_early_console_setup_dm); + +static struct uart_driver msm_uart_driver; + +static struct console msm_console = { + .name = "ttyMSM", + .write = msm_console_write, + .device = uart_console_device, + .setup = msm_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &msm_uart_driver, +}; + +#define MSM_CONSOLE (&msm_console) + +#else +#define MSM_CONSOLE NULL +#endif + +static struct uart_driver msm_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "msm_serial", + .dev_name = "ttyMSM", + .nr = MSM_UART_NR, + .cons = MSM_CONSOLE, +}; + +static atomic_t msm_uart_next_id = ATOMIC_INIT(0); + +static const struct of_device_id msm_uartdm_table[] = { + { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 }, + { .compatible = "qcom,msm-uartdm-v1.2", .data = (void *)UARTDM_1P2 }, + { .compatible = "qcom,msm-uartdm-v1.3", .data = (void *)UARTDM_1P3 }, + { .compatible = "qcom,msm-uartdm-v1.4", .data = (void *)UARTDM_1P4 }, + { } +}; + +static int msm_serial_probe(struct platform_device *pdev) +{ + struct msm_port *msm_port; + struct resource *resource; + struct uart_port *port; + const struct of_device_id *id; + int irq, line; + + if (pdev->dev.of_node) + line = of_alias_get_id(pdev->dev.of_node, "serial"); + else + line = pdev->id; + + if (line < 0) + line = atomic_inc_return(&msm_uart_next_id) - 1; + + if (unlikely(line < 0 || line >= MSM_UART_NR)) + return -ENXIO; + + dev_info(&pdev->dev, "msm_serial: detected port #%d\n", line); + + port = msm_get_port_from_line(line); + port->dev = &pdev->dev; + msm_port = to_msm_port(port); + + id = of_match_device(msm_uartdm_table, &pdev->dev); + if (id) + msm_port->is_uartdm = (unsigned long)id->data; + else + msm_port->is_uartdm = 0; + + msm_port->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(msm_port->clk)) + return PTR_ERR(msm_port->clk); + + if (msm_port->is_uartdm) { + msm_port->pclk = devm_clk_get(&pdev->dev, "iface"); + if (IS_ERR(msm_port->pclk)) + return PTR_ERR(msm_port->pclk); + } + + port->uartclk = clk_get_rate(msm_port->clk); + dev_info(&pdev->dev, "uartclk = %d\n", port->uartclk); + + resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(!resource)) + return -ENXIO; + port->mapbase = resource->start; + + irq = platform_get_irq(pdev, 0); + if (unlikely(irq < 0)) + return -ENXIO; + port->irq = irq; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MSM_CONSOLE); + + platform_set_drvdata(pdev, port); + + return uart_add_one_port(&msm_uart_driver, port); +} + +static int msm_serial_remove(struct platform_device *pdev) +{ + struct uart_port *port = platform_get_drvdata(pdev); + + uart_remove_one_port(&msm_uart_driver, port); + + return 0; +} + +static const struct of_device_id msm_match_table[] = { + { .compatible = "qcom,msm-uart" }, + { .compatible = "qcom,msm-uartdm" }, + {} +}; +MODULE_DEVICE_TABLE(of, msm_match_table); + +static int __maybe_unused msm_serial_suspend(struct device *dev) +{ + struct msm_port *port = dev_get_drvdata(dev); + + uart_suspend_port(&msm_uart_driver, &port->uart); + + return 0; +} + +static int __maybe_unused msm_serial_resume(struct device *dev) +{ + struct msm_port *port = dev_get_drvdata(dev); + + uart_resume_port(&msm_uart_driver, &port->uart); + + return 0; +} + +static const struct dev_pm_ops msm_serial_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(msm_serial_suspend, msm_serial_resume) +}; + +static struct platform_driver msm_platform_driver = { + .remove = msm_serial_remove, + .probe = msm_serial_probe, + .driver = { + .name = "msm_serial", + .pm = &msm_serial_dev_pm_ops, + .of_match_table = msm_match_table, + }, +}; + +static int __init msm_serial_init(void) +{ + int ret; + + ret = uart_register_driver(&msm_uart_driver); + if (unlikely(ret)) + return ret; + + ret = platform_driver_register(&msm_platform_driver); + if (unlikely(ret)) + uart_unregister_driver(&msm_uart_driver); + + pr_info("msm_serial: driver initialized\n"); + + return ret; +} + +static void __exit msm_serial_exit(void) +{ + platform_driver_unregister(&msm_platform_driver); + uart_unregister_driver(&msm_uart_driver); +} + +module_init(msm_serial_init); +module_exit(msm_serial_exit); + +MODULE_AUTHOR("Robert Love "); +MODULE_DESCRIPTION("Driver for msm7x serial device"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c new file mode 100644 index 000000000..ed0e763f6 --- /dev/null +++ b/drivers/tty/serial/mux.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* +** mux.c: +** serial driver for the Mux console found in some PA-RISC servers. +** +** (c) Copyright 2002 Ryan Bradetich +** (c) Copyright 2002 Hewlett-Packard Company +** +** This Driver currently only supports the console (port 0) on the MUX. +** Additional work will be needed on this driver to enable the full +** functionality of the MUX. +** +*/ + +#include +#include +#include +#include +#include +#include +#include +#include /* for udelay */ +#include +#include +#include +#include + +#include +#include + +#define MUX_OFFSET 0x800 +#define MUX_LINE_OFFSET 0x80 + +#define MUX_FIFO_SIZE 255 +#define MUX_POLL_DELAY (30 * HZ / 1000) + +#define IO_DATA_REG_OFFSET 0x3c +#define IO_DCOUNT_REG_OFFSET 0x40 + +#define MUX_EOFIFO(status) ((status & 0xF000) == 0xF000) +#define MUX_STATUS(status) ((status & 0xF000) == 0x8000) +#define MUX_BREAK(status) ((status & 0xF000) == 0x2000) + +#define MUX_NR 256 +static unsigned int port_cnt __read_mostly; +struct mux_port { + struct uart_port port; + int enabled; +}; +static struct mux_port mux_ports[MUX_NR]; + +static struct uart_driver mux_driver = { + .owner = THIS_MODULE, + .driver_name = "ttyB", + .dev_name = "ttyB", + .major = MUX_MAJOR, + .minor = 0, + .nr = MUX_NR, +}; + +static struct timer_list mux_timer; + +#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET) +#define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET) + +/** + * get_mux_port_count - Get the number of available ports on the Mux. + * @dev: The parisc device. + * + * This function is used to determine the number of ports the Mux + * supports. The IODC data reports the number of ports the Mux + * can support, but there are cases where not all the Mux ports + * are connected. This function can override the IODC and + * return the true port count. + */ +static int __init get_mux_port_count(struct parisc_device *dev) +{ + int status; + u8 iodc_data[32]; + unsigned long bytecnt; + + /* If this is the built-in Mux for the K-Class (Eole CAP/MUX), + * we only need to allocate resources for 1 port since the + * other 7 ports are not connected. + */ + if(dev->id.hversion == 0x15) + return 1; + + status = pdc_iodc_read(&bytecnt, dev->hpa.start, 0, iodc_data, 32); + BUG_ON(status != PDC_OK); + + /* Return the number of ports specified in the iodc data. */ + return ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8; +} + +/** + * mux_tx_empty - Check if the transmitter fifo is empty. + * @port: Ptr to the uart_port. + * + * This function test if the transmitter fifo for the port + * described by 'port' is empty. If it is empty, this function + * should return TIOCSER_TEMT, otherwise return 0. + */ +static unsigned int mux_tx_empty(struct uart_port *port) +{ + return UART_GET_FIFO_CNT(port) ? 0 : TIOCSER_TEMT; +} + +/** + * mux_set_mctrl - Set the current state of the modem control inputs. + * @ports: Ptr to the uart_port. + * @mctrl: Modem control bits. + * + * The Serial MUX does not support CTS, DCD or DSR so this function + * is ignored. + */ +static void mux_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +/** + * mux_get_mctrl - Returns the current state of modem control inputs. + * @port: Ptr to the uart_port. + * + * The Serial MUX does not support CTS, DCD or DSR so these lines are + * treated as permanently active. + */ +static unsigned int mux_get_mctrl(struct uart_port *port) +{ + return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; +} + +/** + * mux_stop_tx - Stop transmitting characters. + * @port: Ptr to the uart_port. + * + * The Serial MUX does not support this function. + */ +static void mux_stop_tx(struct uart_port *port) +{ +} + +/** + * mux_start_tx - Start transmitting characters. + * @port: Ptr to the uart_port. + * + * The Serial Mux does not support this function. + */ +static void mux_start_tx(struct uart_port *port) +{ +} + +/** + * mux_stop_rx - Stop receiving characters. + * @port: Ptr to the uart_port. + * + * The Serial Mux does not support this function. + */ +static void mux_stop_rx(struct uart_port *port) +{ +} + +/** + * mux_break_ctl - Control the transmitssion of a break signal. + * @port: Ptr to the uart_port. + * @break_state: Raise/Lower the break signal. + * + * The Serial Mux does not support this function. + */ +static void mux_break_ctl(struct uart_port *port, int break_state) +{ +} + +/** + * mux_write - Write chars to the mux fifo. + * @port: Ptr to the uart_port. + * + * This function writes all the data from the uart buffer to + * the mux fifo. + */ +static void mux_write(struct uart_port *port) +{ + int count; + struct circ_buf *xmit = &port->state->xmit; + + if(port->x_char) { + UART_PUT_CHAR(port, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + if(uart_circ_empty(xmit) || uart_tx_stopped(port)) { + mux_stop_tx(port); + return; + } + + count = (port->fifosize) - UART_GET_FIFO_CNT(port); + do { + UART_PUT_CHAR(port, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if(uart_circ_empty(xmit)) + break; + + } while(--count > 0); + + while(UART_GET_FIFO_CNT(port)) + udelay(1); + + if(uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + mux_stop_tx(port); +} + +/** + * mux_read - Read chars from the mux fifo. + * @port: Ptr to the uart_port. + * + * This reads all available data from the mux's fifo and pushes + * the data to the tty layer. + */ +static void mux_read(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + int data; + __u32 start_count = port->icount.rx; + + while(1) { + data = __raw_readl(port->membase + IO_DATA_REG_OFFSET); + + if (MUX_STATUS(data)) + continue; + + if (MUX_EOFIFO(data)) + break; + + port->icount.rx++; + + if (MUX_BREAK(data)) { + port->icount.brk++; + if(uart_handle_break(port)) + continue; + } + + if (uart_handle_sysrq_char(port, data & 0xffu)) + continue; + + tty_insert_flip_char(tport, data & 0xFF, TTY_NORMAL); + } + + if (start_count != port->icount.rx) + tty_flip_buffer_push(tport); +} + +/** + * mux_startup - Initialize the port. + * @port: Ptr to the uart_port. + * + * Grab any resources needed for this port and start the + * mux timer. + */ +static int mux_startup(struct uart_port *port) +{ + mux_ports[port->line].enabled = 1; + return 0; +} + +/** + * mux_shutdown - Disable the port. + * @port: Ptr to the uart_port. + * + * Release any resources needed for the port. + */ +static void mux_shutdown(struct uart_port *port) +{ + mux_ports[port->line].enabled = 0; +} + +/** + * mux_set_termios - Chane port parameters. + * @port: Ptr to the uart_port. + * @termios: new termios settings. + * @old: old termios settings. + * + * The Serial Mux does not support this function. + */ +static void +mux_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ +} + +/** + * mux_type - Describe the port. + * @port: Ptr to the uart_port. + * + * Return a pointer to a string constant describing the + * specified port. + */ +static const char *mux_type(struct uart_port *port) +{ + return "Mux"; +} + +/** + * mux_release_port - Release memory and IO regions. + * @port: Ptr to the uart_port. + * + * Release any memory and IO region resources currently in use by + * the port. + */ +static void mux_release_port(struct uart_port *port) +{ +} + +/** + * mux_request_port - Request memory and IO regions. + * @port: Ptr to the uart_port. + * + * Request any memory and IO region resources required by the port. + * If any fail, no resources should be registered when this function + * returns, and it should return -EBUSY on failure. + */ +static int mux_request_port(struct uart_port *port) +{ + return 0; +} + +/** + * mux_config_port - Perform port autoconfiguration. + * @port: Ptr to the uart_port. + * @type: Bitmask of required configurations. + * + * Perform any autoconfiguration steps for the port. This function is + * called if the UPF_BOOT_AUTOCONF flag is specified for the port. + * [Note: This is required for now because of a bug in the Serial core. + * rmk has already submitted a patch to linus, should be available for + * 2.5.47.] + */ +static void mux_config_port(struct uart_port *port, int type) +{ + port->type = PORT_MUX; +} + +/** + * mux_verify_port - Verify the port information. + * @port: Ptr to the uart_port. + * @ser: Ptr to the serial information. + * + * Verify the new serial port information contained within serinfo is + * suitable for this port type. + */ +static int mux_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if(port->membase == NULL) + return -EINVAL; + + return 0; +} + +/** + * mux_drv_poll - Mux poll function. + * @unused: Unused variable + * + * This function periodically polls the Serial MUX to check for new data. + */ +static void mux_poll(struct timer_list *unused) +{ + int i; + + for(i = 0; i < port_cnt; ++i) { + if(!mux_ports[i].enabled) + continue; + + mux_read(&mux_ports[i].port); + mux_write(&mux_ports[i].port); + } + + mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY); +} + + +#ifdef CONFIG_SERIAL_MUX_CONSOLE +static void mux_console_write(struct console *co, const char *s, unsigned count) +{ + /* Wait until the FIFO drains. */ + while(UART_GET_FIFO_CNT(&mux_ports[0].port)) + udelay(1); + + while(count--) { + if(*s == '\n') { + UART_PUT_CHAR(&mux_ports[0].port, '\r'); + } + UART_PUT_CHAR(&mux_ports[0].port, *s++); + } + +} + +static int mux_console_setup(struct console *co, char *options) +{ + return 0; +} + +static struct console mux_console = { + .name = "ttyB", + .write = mux_console_write, + .device = uart_console_device, + .setup = mux_console_setup, + .flags = CON_ENABLED | CON_PRINTBUFFER, + .index = 0, + .data = &mux_driver, +}; + +#define MUX_CONSOLE &mux_console +#else +#define MUX_CONSOLE NULL +#endif + +static const struct uart_ops mux_pops = { + .tx_empty = mux_tx_empty, + .set_mctrl = mux_set_mctrl, + .get_mctrl = mux_get_mctrl, + .stop_tx = mux_stop_tx, + .start_tx = mux_start_tx, + .stop_rx = mux_stop_rx, + .break_ctl = mux_break_ctl, + .startup = mux_startup, + .shutdown = mux_shutdown, + .set_termios = mux_set_termios, + .type = mux_type, + .release_port = mux_release_port, + .request_port = mux_request_port, + .config_port = mux_config_port, + .verify_port = mux_verify_port, +}; + +/** + * mux_probe - Determine if the Serial Mux should claim this device. + * @dev: The parisc device. + * + * Deterimine if the Serial Mux should claim this chip (return 0) + * or not (return 1). + */ +static int __init mux_probe(struct parisc_device *dev) +{ + int i, status; + + int port_count = get_mux_port_count(dev); + printk(KERN_INFO "Serial mux driver (%d ports) Revision: 0.6\n", port_count); + + dev_set_drvdata(&dev->dev, (void *)(long)port_count); + request_mem_region(dev->hpa.start + MUX_OFFSET, + port_count * MUX_LINE_OFFSET, "Mux"); + + if(!port_cnt) { + mux_driver.cons = MUX_CONSOLE; + + status = uart_register_driver(&mux_driver); + if(status) { + printk(KERN_ERR "Serial mux: Unable to register driver.\n"); + return 1; + } + } + + for(i = 0; i < port_count; ++i, ++port_cnt) { + struct uart_port *port = &mux_ports[port_cnt].port; + port->iobase = 0; + port->mapbase = dev->hpa.start + MUX_OFFSET + + (i * MUX_LINE_OFFSET); + port->membase = ioremap(port->mapbase, MUX_LINE_OFFSET); + port->iotype = UPIO_MEM; + port->type = PORT_MUX; + port->irq = 0; + port->uartclk = 0; + port->fifosize = MUX_FIFO_SIZE; + port->ops = &mux_pops; + port->flags = UPF_BOOT_AUTOCONF; + port->line = port_cnt; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MUX_CONSOLE); + + spin_lock_init(&port->lock); + + status = uart_add_one_port(&mux_driver, port); + BUG_ON(status); + } + + return 0; +} + +static void __exit mux_remove(struct parisc_device *dev) +{ + int i, j; + int port_count = (long)dev_get_drvdata(&dev->dev); + + /* Find Port 0 for this card in the mux_ports list. */ + for(i = 0; i < port_cnt; ++i) { + if(mux_ports[i].port.mapbase == dev->hpa.start + MUX_OFFSET) + break; + } + BUG_ON(i + port_count > port_cnt); + + /* Release the resources associated with each port on the device. */ + for(j = 0; j < port_count; ++j, ++i) { + struct uart_port *port = &mux_ports[i].port; + + uart_remove_one_port(&mux_driver, port); + if(port->membase) + iounmap(port->membase); + } + + release_mem_region(dev->hpa.start + MUX_OFFSET, port_count * MUX_LINE_OFFSET); +} + +/* Hack. This idea was taken from the 8250_gsc.c on how to properly order + * the serial port detection in the proper order. The idea is we always + * want the builtin mux to be detected before addin mux cards, so we + * specifically probe for the builtin mux cards first. + * + * This table only contains the parisc_device_id of known builtin mux + * devices. All other mux cards will be detected by the generic mux_tbl. + */ +static const struct parisc_device_id builtin_mux_tbl[] __initconst = { + { HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x15, 0x0000D }, /* All K-class */ + { HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x44, 0x0000D }, /* E35, E45, and E55 */ + { 0, } +}; + +static const struct parisc_device_id mux_tbl[] __initconst = { + { HPHW_A_DIRECT, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0000D }, + { 0, } +}; + +MODULE_DEVICE_TABLE(parisc, builtin_mux_tbl); +MODULE_DEVICE_TABLE(parisc, mux_tbl); + +static struct parisc_driver builtin_serial_mux_driver __refdata = { + .name = "builtin_serial_mux", + .id_table = builtin_mux_tbl, + .probe = mux_probe, + .remove = __exit_p(mux_remove), +}; + +static struct parisc_driver serial_mux_driver __refdata = { + .name = "serial_mux", + .id_table = mux_tbl, + .probe = mux_probe, + .remove = __exit_p(mux_remove), +}; + +/** + * mux_init - Serial MUX initialization procedure. + * + * Register the Serial MUX driver. + */ +static int __init mux_init(void) +{ + register_parisc_driver(&builtin_serial_mux_driver); + register_parisc_driver(&serial_mux_driver); + + if(port_cnt > 0) { + /* Start the Mux timer */ + timer_setup(&mux_timer, mux_poll, 0); + mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY); + +#ifdef CONFIG_SERIAL_MUX_CONSOLE + register_console(&mux_console); +#endif + } + + return 0; +} + +/** + * mux_exit - Serial MUX cleanup procedure. + * + * Unregister the Serial MUX driver from the tty layer. + */ +static void __exit mux_exit(void) +{ + /* Delete the Mux timer. */ + if(port_cnt > 0) { + del_timer_sync(&mux_timer); +#ifdef CONFIG_SERIAL_MUX_CONSOLE + unregister_console(&mux_console); +#endif + } + + unregister_parisc_driver(&builtin_serial_mux_driver); + unregister_parisc_driver(&serial_mux_driver); + uart_unregister_driver(&mux_driver); +} + +module_init(mux_init); +module_exit(mux_exit); + +MODULE_AUTHOR("Ryan Bradetich"); +MODULE_DESCRIPTION("Serial MUX driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV_MAJOR(MUX_MAJOR); diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c new file mode 100644 index 000000000..ba16e1da6 --- /dev/null +++ b/drivers/tty/serial/mvebu-uart.c @@ -0,0 +1,1565 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* +* *************************************************************************** +* Marvell Armada-3700 Serial Driver +* Author: Wilson Ding +* Copyright (C) 2015 Marvell International Ltd. +* *************************************************************************** +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Register Map */ +#define UART_STD_RBR 0x00 +#define UART_EXT_RBR 0x18 + +#define UART_STD_TSH 0x04 +#define UART_EXT_TSH 0x1C + +#define UART_STD_CTRL1 0x08 +#define UART_EXT_CTRL1 0x04 +#define CTRL_SOFT_RST BIT(31) +#define CTRL_TXFIFO_RST BIT(15) +#define CTRL_RXFIFO_RST BIT(14) +#define CTRL_SND_BRK_SEQ BIT(11) +#define CTRL_BRK_DET_INT BIT(3) +#define CTRL_FRM_ERR_INT BIT(2) +#define CTRL_PAR_ERR_INT BIT(1) +#define CTRL_OVR_ERR_INT BIT(0) +#define CTRL_BRK_INT (CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | \ + CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT) + +#define UART_STD_CTRL2 UART_STD_CTRL1 +#define UART_EXT_CTRL2 0x20 +#define CTRL_STD_TX_RDY_INT BIT(5) +#define CTRL_EXT_TX_RDY_INT BIT(6) +#define CTRL_STD_RX_RDY_INT BIT(4) +#define CTRL_EXT_RX_RDY_INT BIT(5) + +#define UART_STAT 0x0C +#define STAT_TX_FIFO_EMP BIT(13) +#define STAT_TX_FIFO_FUL BIT(11) +#define STAT_TX_EMP BIT(6) +#define STAT_STD_TX_RDY BIT(5) +#define STAT_EXT_TX_RDY BIT(15) +#define STAT_STD_RX_RDY BIT(4) +#define STAT_EXT_RX_RDY BIT(14) +#define STAT_BRK_DET BIT(3) +#define STAT_FRM_ERR BIT(2) +#define STAT_PAR_ERR BIT(1) +#define STAT_OVR_ERR BIT(0) +#define STAT_BRK_ERR (STAT_BRK_DET | STAT_FRM_ERR \ + | STAT_PAR_ERR | STAT_OVR_ERR) + +/* + * Marvell Armada 3700 Functional Specifications describes that bit 21 of UART + * Clock Control register controls UART1 and bit 20 controls UART2. But in + * reality bit 21 controls UART2 and bit 20 controls UART1. This seems to be an + * error in Marvell's documentation. Hence following CLK_DIS macros are swapped. + */ + +#define UART_BRDV 0x10 +/* These bits are located in UART1 address space and control UART2 */ +#define UART2_CLK_DIS BIT(21) +/* These bits are located in UART1 address space and control UART1 */ +#define UART1_CLK_DIS BIT(20) +/* These bits are located in UART1 address space and control both UARTs */ +#define CLK_NO_XTAL BIT(19) +#define CLK_TBG_DIV1_SHIFT 15 +#define CLK_TBG_DIV1_MASK 0x7 +#define CLK_TBG_DIV1_MAX 6 +#define CLK_TBG_DIV2_SHIFT 12 +#define CLK_TBG_DIV2_MASK 0x7 +#define CLK_TBG_DIV2_MAX 6 +#define CLK_TBG_SEL_SHIFT 10 +#define CLK_TBG_SEL_MASK 0x3 +/* These bits are located in both UARTs address space */ +#define BRDV_BAUD_MASK 0x3FF +#define BRDV_BAUD_MAX BRDV_BAUD_MASK + +#define UART_OSAMP 0x14 +#define OSAMP_DEFAULT_DIVISOR 16 +#define OSAMP_DIVISORS_MASK 0x3F3F3F3F +#define OSAMP_MAX_DIVISOR 63 + +#define MVEBU_NR_UARTS 2 + +#define MVEBU_UART_TYPE "mvebu-uart" +#define DRIVER_NAME "mvebu_serial" + +enum { + /* Either there is only one summed IRQ... */ + UART_IRQ_SUM = 0, + /* ...or there are two separate IRQ for RX and TX */ + UART_RX_IRQ = 0, + UART_TX_IRQ, + UART_IRQ_COUNT +}; + +/* Diverging register offsets */ +struct uart_regs_layout { + unsigned int rbr; + unsigned int tsh; + unsigned int ctrl; + unsigned int intr; +}; + +/* Diverging flags */ +struct uart_flags { + unsigned int ctrl_tx_rdy_int; + unsigned int ctrl_rx_rdy_int; + unsigned int stat_tx_rdy; + unsigned int stat_rx_rdy; +}; + +/* Driver data, a structure for each UART port */ +struct mvebu_uart_driver_data { + bool is_ext; + struct uart_regs_layout regs; + struct uart_flags flags; +}; + +/* Saved registers during suspend */ +struct mvebu_uart_pm_regs { + unsigned int rbr; + unsigned int tsh; + unsigned int ctrl; + unsigned int intr; + unsigned int stat; + unsigned int brdv; + unsigned int osamp; +}; + +/* MVEBU UART driver structure */ +struct mvebu_uart { + struct uart_port *port; + struct clk *clk; + int irq[UART_IRQ_COUNT]; + struct mvebu_uart_driver_data *data; +#if defined(CONFIG_PM) + struct mvebu_uart_pm_regs pm_regs; +#endif /* CONFIG_PM */ +}; + +static struct mvebu_uart *to_mvuart(struct uart_port *port) +{ + return (struct mvebu_uart *)port->private_data; +} + +#define IS_EXTENDED(port) (to_mvuart(port)->data->is_ext) + +#define UART_RBR(port) (to_mvuart(port)->data->regs.rbr) +#define UART_TSH(port) (to_mvuart(port)->data->regs.tsh) +#define UART_CTRL(port) (to_mvuart(port)->data->regs.ctrl) +#define UART_INTR(port) (to_mvuart(port)->data->regs.intr) + +#define CTRL_TX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_tx_rdy_int) +#define CTRL_RX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_rx_rdy_int) +#define STAT_TX_RDY(port) (to_mvuart(port)->data->flags.stat_tx_rdy) +#define STAT_RX_RDY(port) (to_mvuart(port)->data->flags.stat_rx_rdy) + +static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS]; + +static DEFINE_SPINLOCK(mvebu_uart_lock); + +/* Core UART Driver Operations */ +static unsigned int mvebu_uart_tx_empty(struct uart_port *port) +{ + unsigned long flags; + unsigned int st; + + spin_lock_irqsave(&port->lock, flags); + st = readl(port->membase + UART_STAT); + spin_unlock_irqrestore(&port->lock, flags); + + return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0; +} + +static unsigned int mvebu_uart_get_mctrl(struct uart_port *port) +{ + return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; +} + +static void mvebu_uart_set_mctrl(struct uart_port *port, + unsigned int mctrl) +{ +/* + * Even if we do not support configuring the modem control lines, this + * function must be proided to the serial core + */ +} + +static void mvebu_uart_stop_tx(struct uart_port *port) +{ + unsigned int ctl = readl(port->membase + UART_INTR(port)); + + ctl &= ~CTRL_TX_RDY_INT(port); + writel(ctl, port->membase + UART_INTR(port)); +} + +static void mvebu_uart_start_tx(struct uart_port *port) +{ + unsigned int ctl; + struct circ_buf *xmit = &port->state->xmit; + + if (IS_EXTENDED(port) && !uart_circ_empty(xmit)) { + writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port)); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + ctl = readl(port->membase + UART_INTR(port)); + ctl |= CTRL_TX_RDY_INT(port); + writel(ctl, port->membase + UART_INTR(port)); +} + +static void mvebu_uart_stop_rx(struct uart_port *port) +{ + unsigned int ctl; + + ctl = readl(port->membase + UART_CTRL(port)); + ctl &= ~CTRL_BRK_INT; + writel(ctl, port->membase + UART_CTRL(port)); + + ctl = readl(port->membase + UART_INTR(port)); + ctl &= ~CTRL_RX_RDY_INT(port); + writel(ctl, port->membase + UART_INTR(port)); +} + +static void mvebu_uart_break_ctl(struct uart_port *port, int brk) +{ + unsigned int ctl; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + ctl = readl(port->membase + UART_CTRL(port)); + if (brk == -1) + ctl |= CTRL_SND_BRK_SEQ; + else + ctl &= ~CTRL_SND_BRK_SEQ; + writel(ctl, port->membase + UART_CTRL(port)); + spin_unlock_irqrestore(&port->lock, flags); +} + +static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status) +{ + struct tty_port *tport = &port->state->port; + unsigned char ch = 0; + char flag = 0; + int ret; + + do { + if (status & STAT_RX_RDY(port)) { + ch = readl(port->membase + UART_RBR(port)); + ch &= 0xff; + flag = TTY_NORMAL; + port->icount.rx++; + + if (status & STAT_PAR_ERR) + port->icount.parity++; + } + + /* + * For UART2, error bits are not cleared on buffer read. + * This causes interrupt loop and system hang. + */ + if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) { + ret = readl(port->membase + UART_STAT); + ret |= STAT_BRK_ERR; + writel(ret, port->membase + UART_STAT); + } + + if (status & STAT_BRK_DET) { + port->icount.brk++; + status &= ~(STAT_FRM_ERR | STAT_PAR_ERR); + if (uart_handle_break(port)) + goto ignore_char; + } + + if (status & STAT_OVR_ERR) + port->icount.overrun++; + + if (status & STAT_FRM_ERR) + port->icount.frame++; + + if (uart_handle_sysrq_char(port, ch)) + goto ignore_char; + + if (status & port->ignore_status_mask & STAT_PAR_ERR) + status &= ~STAT_RX_RDY(port); + + status &= port->read_status_mask; + + if (status & STAT_PAR_ERR) + flag = TTY_PARITY; + + status &= ~port->ignore_status_mask; + + if (status & STAT_RX_RDY(port)) + tty_insert_flip_char(tport, ch, flag); + + if (status & STAT_BRK_DET) + tty_insert_flip_char(tport, 0, TTY_BREAK); + + if (status & STAT_FRM_ERR) + tty_insert_flip_char(tport, 0, TTY_FRAME); + + if (status & STAT_OVR_ERR) + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + +ignore_char: + status = readl(port->membase + UART_STAT); + } while (status & (STAT_RX_RDY(port) | STAT_BRK_DET)); + + tty_flip_buffer_push(tport); +} + +static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status) +{ + struct circ_buf *xmit = &port->state->xmit; + unsigned int count; + unsigned int st; + + if (port->x_char) { + writel(port->x_char, port->membase + UART_TSH(port)); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + mvebu_uart_stop_tx(port); + return; + } + + for (count = 0; count < port->fifosize; count++) { + writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port)); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + + if (uart_circ_empty(xmit)) + break; + + st = readl(port->membase + UART_STAT); + if (st & STAT_TX_FIFO_FUL) + break; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + mvebu_uart_stop_tx(port); +} + +static irqreturn_t mvebu_uart_isr(int irq, void *dev_id) +{ + struct uart_port *port = (struct uart_port *)dev_id; + unsigned int st = readl(port->membase + UART_STAT); + + if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR | + STAT_BRK_DET)) + mvebu_uart_rx_chars(port, st); + + if (st & STAT_TX_RDY(port)) + mvebu_uart_tx_chars(port, st); + + return IRQ_HANDLED; +} + +static irqreturn_t mvebu_uart_rx_isr(int irq, void *dev_id) +{ + struct uart_port *port = (struct uart_port *)dev_id; + unsigned int st = readl(port->membase + UART_STAT); + + if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR | + STAT_BRK_DET)) + mvebu_uart_rx_chars(port, st); + + return IRQ_HANDLED; +} + +static irqreturn_t mvebu_uart_tx_isr(int irq, void *dev_id) +{ + struct uart_port *port = (struct uart_port *)dev_id; + unsigned int st = readl(port->membase + UART_STAT); + + if (st & STAT_TX_RDY(port)) + mvebu_uart_tx_chars(port, st); + + return IRQ_HANDLED; +} + +static int mvebu_uart_startup(struct uart_port *port) +{ + struct mvebu_uart *mvuart = to_mvuart(port); + unsigned int ctl; + int ret; + + writel(CTRL_TXFIFO_RST | CTRL_RXFIFO_RST, + port->membase + UART_CTRL(port)); + udelay(1); + + /* Clear the error bits of state register before IRQ request */ + ret = readl(port->membase + UART_STAT); + ret |= STAT_BRK_ERR; + writel(ret, port->membase + UART_STAT); + + writel(CTRL_BRK_INT, port->membase + UART_CTRL(port)); + + ctl = readl(port->membase + UART_INTR(port)); + ctl |= CTRL_RX_RDY_INT(port); + writel(ctl, port->membase + UART_INTR(port)); + + if (!mvuart->irq[UART_TX_IRQ]) { + /* Old bindings with just one interrupt (UART0 only) */ + ret = devm_request_irq(port->dev, mvuart->irq[UART_IRQ_SUM], + mvebu_uart_isr, port->irqflags, + dev_name(port->dev), port); + if (ret) { + dev_err(port->dev, "unable to request IRQ %d\n", + mvuart->irq[UART_IRQ_SUM]); + return ret; + } + } else { + /* New bindings with an IRQ for RX and TX (both UART) */ + ret = devm_request_irq(port->dev, mvuart->irq[UART_RX_IRQ], + mvebu_uart_rx_isr, port->irqflags, + dev_name(port->dev), port); + if (ret) { + dev_err(port->dev, "unable to request IRQ %d\n", + mvuart->irq[UART_RX_IRQ]); + return ret; + } + + ret = devm_request_irq(port->dev, mvuart->irq[UART_TX_IRQ], + mvebu_uart_tx_isr, port->irqflags, + dev_name(port->dev), + port); + if (ret) { + dev_err(port->dev, "unable to request IRQ %d\n", + mvuart->irq[UART_TX_IRQ]); + devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ], + port); + return ret; + } + } + + return 0; +} + +static void mvebu_uart_shutdown(struct uart_port *port) +{ + struct mvebu_uart *mvuart = to_mvuart(port); + + writel(0, port->membase + UART_INTR(port)); + + if (!mvuart->irq[UART_TX_IRQ]) { + devm_free_irq(port->dev, mvuart->irq[UART_IRQ_SUM], port); + } else { + devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ], port); + devm_free_irq(port->dev, mvuart->irq[UART_TX_IRQ], port); + } +} + +static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud) +{ + unsigned int d_divisor, m_divisor; + unsigned long flags; + u32 brdv, osamp; + + if (!port->uartclk) + return 0; + + /* + * The baudrate is derived from the UART clock thanks to divisors: + * > d1 * d2 ("TBG divisors"): can divide only TBG clock from 1 to 6 + * > D ("baud generator"): can divide the clock from 1 to 1023 + * > M ("fractional divisor"): allows a better accuracy (from 1 to 63) + * + * Exact formulas for calculating baudrate: + * + * with default x16 scheme: + * baudrate = xtal / (d * 16) + * baudrate = tbg / (d1 * d2 * d * 16) + * + * with fractional divisor: + * baudrate = 10 * xtal / (d * (3 * (m1 + m2) + 2 * (m3 + m4))) + * baudrate = 10 * tbg / (d1*d2 * d * (3 * (m1 + m2) + 2 * (m3 + m4))) + * + * Oversampling value: + * osamp = (m1 << 0) | (m2 << 8) | (m3 << 16) | (m4 << 24); + * + * Where m1 controls number of clock cycles per bit for bits 1,2,3; + * m2 for bits 4,5,6; m3 for bits 7,8 and m4 for bits 9,10. + * + * To simplify baudrate setup set all the M prescalers to the same + * value. For baudrates 9600 Bd and higher, it is enough to use the + * default (x16) divisor or fractional divisor with M = 63, so there + * is no need to use real fractional support (where the M prescalers + * are not equal). + * + * When all the M prescalers are zeroed then default (x16) divisor is + * used. Default x16 scheme is more stable than M (fractional divisor), + * so use M only when D divisor is not enough to derive baudrate. + * + * Member port->uartclk is either xtal clock rate or TBG clock rate + * divided by (d1 * d2). So d1 and d2 are already set by the UART clock + * driver (and UART driver itself cannot change them). Moreover they are + * shared between both UARTs. + */ + + m_divisor = OSAMP_DEFAULT_DIVISOR; + d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor); + + if (d_divisor > BRDV_BAUD_MAX) { + /* + * Experiments show that small M divisors are unstable. + * Use maximal possible M = 63 and calculate D divisor. + */ + m_divisor = OSAMP_MAX_DIVISOR; + d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor); + } + + if (d_divisor < 1) + d_divisor = 1; + else if (d_divisor > BRDV_BAUD_MAX) + d_divisor = BRDV_BAUD_MAX; + + spin_lock_irqsave(&mvebu_uart_lock, flags); + brdv = readl(port->membase + UART_BRDV); + brdv &= ~BRDV_BAUD_MASK; + brdv |= d_divisor; + writel(brdv, port->membase + UART_BRDV); + spin_unlock_irqrestore(&mvebu_uart_lock, flags); + + osamp = readl(port->membase + UART_OSAMP); + osamp &= ~OSAMP_DIVISORS_MASK; + if (m_divisor != OSAMP_DEFAULT_DIVISOR) + osamp |= (m_divisor << 0) | (m_divisor << 8) | + (m_divisor << 16) | (m_divisor << 24); + writel(osamp, port->membase + UART_OSAMP); + + return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor); +} + +static void mvebu_uart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + unsigned long flags; + unsigned int baud, min_baud, max_baud; + + spin_lock_irqsave(&port->lock, flags); + + port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR | + STAT_TX_RDY(port) | STAT_TX_FIFO_FUL; + + if (termios->c_iflag & INPCK) + port->read_status_mask |= STAT_FRM_ERR | STAT_PAR_ERR; + + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= + STAT_FRM_ERR | STAT_PAR_ERR | STAT_OVR_ERR; + + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR; + + /* + * Maximal divisor is 1023 and maximal fractional divisor is 63. And + * experiments show that baudrates above 1/80 of parent clock rate are + * not stable. So disallow baudrates above 1/80 of the parent clock + * rate. If port->uartclk is not available, then + * mvebu_uart_baud_rate_set() fails, so values min_baud and max_baud + * in this case do not matter. + */ + min_baud = DIV_ROUND_UP(port->uartclk, BRDV_BAUD_MAX * + OSAMP_MAX_DIVISOR); + max_baud = port->uartclk / 80; + + baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud); + baud = mvebu_uart_baud_rate_set(port, baud); + + /* In case baudrate cannot be changed, report previous old value */ + if (baud == 0 && old) + baud = tty_termios_baud_rate(old); + + /* Only the following flag changes are supported */ + if (old) { + termios->c_iflag &= INPCK | IGNPAR; + termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); + termios->c_cflag &= CREAD | CBAUD; + termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); + termios->c_cflag |= CS8; + } + + if (baud != 0) { + tty_termios_encode_baud_rate(termios, baud, baud); + uart_update_timeout(port, termios->c_cflag, baud); + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *mvebu_uart_type(struct uart_port *port) +{ + return MVEBU_UART_TYPE; +} + +static void mvebu_uart_release_port(struct uart_port *port) +{ + /* Nothing to do here */ +} + +static int mvebu_uart_request_port(struct uart_port *port) +{ + return 0; +} + +#ifdef CONFIG_CONSOLE_POLL +static int mvebu_uart_get_poll_char(struct uart_port *port) +{ + unsigned int st = readl(port->membase + UART_STAT); + + if (!(st & STAT_RX_RDY(port))) + return NO_POLL_CHAR; + + return readl(port->membase + UART_RBR(port)); +} + +static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c) +{ + unsigned int st; + + for (;;) { + st = readl(port->membase + UART_STAT); + + if (!(st & STAT_TX_FIFO_FUL)) + break; + + udelay(1); + } + + writel(c, port->membase + UART_TSH(port)); +} +#endif + +static const struct uart_ops mvebu_uart_ops = { + .tx_empty = mvebu_uart_tx_empty, + .set_mctrl = mvebu_uart_set_mctrl, + .get_mctrl = mvebu_uart_get_mctrl, + .stop_tx = mvebu_uart_stop_tx, + .start_tx = mvebu_uart_start_tx, + .stop_rx = mvebu_uart_stop_rx, + .break_ctl = mvebu_uart_break_ctl, + .startup = mvebu_uart_startup, + .shutdown = mvebu_uart_shutdown, + .set_termios = mvebu_uart_set_termios, + .type = mvebu_uart_type, + .release_port = mvebu_uart_release_port, + .request_port = mvebu_uart_request_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = mvebu_uart_get_poll_char, + .poll_put_char = mvebu_uart_put_poll_char, +#endif +}; + +/* Console Driver Operations */ + +#ifdef CONFIG_SERIAL_MVEBU_CONSOLE +/* Early Console */ +static void mvebu_uart_putc(struct uart_port *port, unsigned char c) +{ + unsigned int st; + + for (;;) { + st = readl(port->membase + UART_STAT); + if (!(st & STAT_TX_FIFO_FUL)) + break; + } + + /* At early stage, DT is not parsed yet, only use UART0 */ + writel(c, port->membase + UART_STD_TSH); + + for (;;) { + st = readl(port->membase + UART_STAT); + if (st & STAT_TX_FIFO_EMP) + break; + } +} + +static void mvebu_uart_putc_early_write(struct console *con, + const char *s, + unsigned int n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, mvebu_uart_putc); +} + +static int __init +mvebu_uart_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = mvebu_uart_putc_early_write; + + return 0; +} + +EARLYCON_DECLARE(ar3700_uart, mvebu_uart_early_console_setup); +OF_EARLYCON_DECLARE(ar3700_uart, "marvell,armada-3700-uart", + mvebu_uart_early_console_setup); + +static void wait_for_xmitr(struct uart_port *port) +{ + u32 val; + + readl_poll_timeout_atomic(port->membase + UART_STAT, val, + (val & STAT_TX_RDY(port)), 1, 10000); +} + +static void wait_for_xmite(struct uart_port *port) +{ + u32 val; + + readl_poll_timeout_atomic(port->membase + UART_STAT, val, + (val & STAT_TX_EMP), 1, 10000); +} + +static void mvebu_uart_console_putchar(struct uart_port *port, unsigned char ch) +{ + wait_for_xmitr(port); + writel(ch, port->membase + UART_TSH(port)); +} + +static void mvebu_uart_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *port = &mvebu_uart_ports[co->index]; + unsigned long flags; + unsigned int ier, intr, ctl; + int locked = 1; + + if (oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); + + ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT; + intr = readl(port->membase + UART_INTR(port)) & + (CTRL_RX_RDY_INT(port) | CTRL_TX_RDY_INT(port)); + writel(0, port->membase + UART_CTRL(port)); + writel(0, port->membase + UART_INTR(port)); + + uart_console_write(port, s, count, mvebu_uart_console_putchar); + + wait_for_xmite(port); + + if (ier) + writel(ier, port->membase + UART_CTRL(port)); + + if (intr) { + ctl = intr | readl(port->membase + UART_INTR(port)); + writel(ctl, port->membase + UART_INTR(port)); + } + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); +} + +static int mvebu_uart_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= MVEBU_NR_UARTS) + return -EINVAL; + + port = &mvebu_uart_ports[co->index]; + + if (!port->mapbase || !port->membase) { + pr_debug("console on ttyMV%i not present\n", co->index); + return -ENODEV; + } + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct uart_driver mvebu_uart_driver; + +static struct console mvebu_uart_console = { + .name = "ttyMV", + .write = mvebu_uart_console_write, + .device = uart_console_device, + .setup = mvebu_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &mvebu_uart_driver, +}; + +static int __init mvebu_uart_console_init(void) +{ + register_console(&mvebu_uart_console); + return 0; +} + +console_initcall(mvebu_uart_console_init); + + +#endif /* CONFIG_SERIAL_MVEBU_CONSOLE */ + +static struct uart_driver mvebu_uart_driver = { + .owner = THIS_MODULE, + .driver_name = DRIVER_NAME, + .dev_name = "ttyMV", + .nr = MVEBU_NR_UARTS, +#ifdef CONFIG_SERIAL_MVEBU_CONSOLE + .cons = &mvebu_uart_console, +#endif +}; + +#if defined(CONFIG_PM) +static int mvebu_uart_suspend(struct device *dev) +{ + struct mvebu_uart *mvuart = dev_get_drvdata(dev); + struct uart_port *port = mvuart->port; + unsigned long flags; + + uart_suspend_port(&mvebu_uart_driver, port); + + mvuart->pm_regs.rbr = readl(port->membase + UART_RBR(port)); + mvuart->pm_regs.tsh = readl(port->membase + UART_TSH(port)); + mvuart->pm_regs.ctrl = readl(port->membase + UART_CTRL(port)); + mvuart->pm_regs.intr = readl(port->membase + UART_INTR(port)); + mvuart->pm_regs.stat = readl(port->membase + UART_STAT); + spin_lock_irqsave(&mvebu_uart_lock, flags); + mvuart->pm_regs.brdv = readl(port->membase + UART_BRDV); + spin_unlock_irqrestore(&mvebu_uart_lock, flags); + mvuart->pm_regs.osamp = readl(port->membase + UART_OSAMP); + + device_set_wakeup_enable(dev, true); + + return 0; +} + +static int mvebu_uart_resume(struct device *dev) +{ + struct mvebu_uart *mvuart = dev_get_drvdata(dev); + struct uart_port *port = mvuart->port; + unsigned long flags; + + writel(mvuart->pm_regs.rbr, port->membase + UART_RBR(port)); + writel(mvuart->pm_regs.tsh, port->membase + UART_TSH(port)); + writel(mvuart->pm_regs.ctrl, port->membase + UART_CTRL(port)); + writel(mvuart->pm_regs.intr, port->membase + UART_INTR(port)); + writel(mvuart->pm_regs.stat, port->membase + UART_STAT); + spin_lock_irqsave(&mvebu_uart_lock, flags); + writel(mvuart->pm_regs.brdv, port->membase + UART_BRDV); + spin_unlock_irqrestore(&mvebu_uart_lock, flags); + writel(mvuart->pm_regs.osamp, port->membase + UART_OSAMP); + + uart_resume_port(&mvebu_uart_driver, port); + + return 0; +} + +static const struct dev_pm_ops mvebu_uart_pm_ops = { + .suspend = mvebu_uart_suspend, + .resume = mvebu_uart_resume, +}; +#endif /* CONFIG_PM */ + +static const struct of_device_id mvebu_uart_of_match[]; + +/* Counter to keep track of each UART port id when not using CONFIG_OF */ +static int uart_num_counter; + +static int mvebu_uart_probe(struct platform_device *pdev) +{ + struct resource *reg = platform_get_resource(pdev, IORESOURCE_MEM, 0); + const struct of_device_id *match = of_match_device(mvebu_uart_of_match, + &pdev->dev); + struct uart_port *port; + struct mvebu_uart *mvuart; + int id, irq; + + if (!reg) { + dev_err(&pdev->dev, "no registers defined\n"); + return -EINVAL; + } + + /* Assume that all UART ports have a DT alias or none has */ + id = of_alias_get_id(pdev->dev.of_node, "serial"); + if (!pdev->dev.of_node || id < 0) + pdev->id = uart_num_counter++; + else + pdev->id = id; + + if (pdev->id >= MVEBU_NR_UARTS) { + dev_err(&pdev->dev, "cannot have more than %d UART ports\n", + MVEBU_NR_UARTS); + return -EINVAL; + } + + port = &mvebu_uart_ports[pdev->id]; + + spin_lock_init(&port->lock); + + port->dev = &pdev->dev; + port->type = PORT_MVEBU; + port->ops = &mvebu_uart_ops; + port->regshift = 0; + + port->fifosize = 32; + port->iotype = UPIO_MEM32; + port->flags = UPF_FIXED_PORT; + port->line = pdev->id; + + /* + * IRQ number is not stored in this structure because we may have two of + * them per port (RX and TX). Instead, use the driver UART structure + * array so called ->irq[]. + */ + port->irq = 0; + port->irqflags = 0; + port->mapbase = reg->start; + + port->membase = devm_ioremap_resource(&pdev->dev, reg); + if (IS_ERR(port->membase)) + return PTR_ERR(port->membase); + + mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart), + GFP_KERNEL); + if (!mvuart) + return -ENOMEM; + + /* Get controller data depending on the compatible string */ + mvuart->data = (struct mvebu_uart_driver_data *)match->data; + mvuart->port = port; + + port->private_data = mvuart; + platform_set_drvdata(pdev, mvuart); + + /* Get fixed clock frequency */ + mvuart->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(mvuart->clk)) { + if (PTR_ERR(mvuart->clk) == -EPROBE_DEFER) + return PTR_ERR(mvuart->clk); + + if (IS_EXTENDED(port)) { + dev_err(&pdev->dev, "unable to get UART clock\n"); + return PTR_ERR(mvuart->clk); + } + } else { + if (!clk_prepare_enable(mvuart->clk)) + port->uartclk = clk_get_rate(mvuart->clk); + } + + /* Manage interrupts */ + if (platform_irq_count(pdev) == 1) { + /* Old bindings: no name on the single unamed UART0 IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + mvuart->irq[UART_IRQ_SUM] = irq; + } else { + /* + * New bindings: named interrupts (RX, TX) for both UARTS, + * only make use of uart-rx and uart-tx interrupts, do not use + * uart-sum of UART0 port. + */ + irq = platform_get_irq_byname(pdev, "uart-rx"); + if (irq < 0) + return irq; + + mvuart->irq[UART_RX_IRQ] = irq; + + irq = platform_get_irq_byname(pdev, "uart-tx"); + if (irq < 0) + return irq; + + mvuart->irq[UART_TX_IRQ] = irq; + } + + /* UART Soft Reset*/ + writel(CTRL_SOFT_RST, port->membase + UART_CTRL(port)); + udelay(1); + writel(0, port->membase + UART_CTRL(port)); + + return uart_add_one_port(&mvebu_uart_driver, port); +} + +static struct mvebu_uart_driver_data uart_std_driver_data = { + .is_ext = false, + .regs.rbr = UART_STD_RBR, + .regs.tsh = UART_STD_TSH, + .regs.ctrl = UART_STD_CTRL1, + .regs.intr = UART_STD_CTRL2, + .flags.ctrl_tx_rdy_int = CTRL_STD_TX_RDY_INT, + .flags.ctrl_rx_rdy_int = CTRL_STD_RX_RDY_INT, + .flags.stat_tx_rdy = STAT_STD_TX_RDY, + .flags.stat_rx_rdy = STAT_STD_RX_RDY, +}; + +static struct mvebu_uart_driver_data uart_ext_driver_data = { + .is_ext = true, + .regs.rbr = UART_EXT_RBR, + .regs.tsh = UART_EXT_TSH, + .regs.ctrl = UART_EXT_CTRL1, + .regs.intr = UART_EXT_CTRL2, + .flags.ctrl_tx_rdy_int = CTRL_EXT_TX_RDY_INT, + .flags.ctrl_rx_rdy_int = CTRL_EXT_RX_RDY_INT, + .flags.stat_tx_rdy = STAT_EXT_TX_RDY, + .flags.stat_rx_rdy = STAT_EXT_RX_RDY, +}; + +/* Match table for of_platform binding */ +static const struct of_device_id mvebu_uart_of_match[] = { + { + .compatible = "marvell,armada-3700-uart", + .data = (void *)&uart_std_driver_data, + }, + { + .compatible = "marvell,armada-3700-uart-ext", + .data = (void *)&uart_ext_driver_data, + }, + {} +}; + +static struct platform_driver mvebu_uart_platform_driver = { + .probe = mvebu_uart_probe, + .driver = { + .name = "mvebu-uart", + .of_match_table = of_match_ptr(mvebu_uart_of_match), + .suppress_bind_attrs = true, +#if defined(CONFIG_PM) + .pm = &mvebu_uart_pm_ops, +#endif /* CONFIG_PM */ + }, +}; + +/* This code is based on clk-fixed-factor.c driver and modified. */ + +struct mvebu_uart_clock { + struct clk_hw clk_hw; + int clock_idx; + u32 pm_context_reg1; + u32 pm_context_reg2; +}; + +struct mvebu_uart_clock_base { + struct mvebu_uart_clock clocks[2]; + unsigned int parent_rates[5]; + int parent_idx; + unsigned int div; + void __iomem *reg1; + void __iomem *reg2; + bool configured; +}; + +#define PARENT_CLOCK_XTAL 4 + +#define to_uart_clock(hw) container_of(hw, struct mvebu_uart_clock, clk_hw) +#define to_uart_clock_base(uart_clock) container_of(uart_clock, \ + struct mvebu_uart_clock_base, clocks[uart_clock->clock_idx]) + +static int mvebu_uart_clock_prepare(struct clk_hw *hw) +{ + struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); + struct mvebu_uart_clock_base *uart_clock_base = + to_uart_clock_base(uart_clock); + unsigned int prev_clock_idx, prev_clock_rate, prev_d1d2; + unsigned int parent_clock_idx, parent_clock_rate; + unsigned long flags; + unsigned int d1, d2; + u64 divisor; + u32 val; + + /* + * This function just reconfigures UART Clock Control register (located + * in UART1 address space which controls both UART1 and UART2) to + * selected UART base clock and recalculates current UART1/UART2 + * divisors in their address spaces, so that final baudrate will not be + * changed by switching UART parent clock. This is required for + * otherwise kernel's boot log stops working - we need to ensure that + * UART baudrate does not change during this setup. It is a one time + * operation, it will execute only once and set `configured` to true, + * and be skipped on subsequent calls. Because this UART Clock Control + * register (UART_BRDV) is shared between UART1 baudrate function, + * UART1 clock selector and UART2 clock selector, every access to + * UART_BRDV (reg1) needs to be protected by a lock. + */ + + spin_lock_irqsave(&mvebu_uart_lock, flags); + + if (uart_clock_base->configured) { + spin_unlock_irqrestore(&mvebu_uart_lock, flags); + return 0; + } + + parent_clock_idx = uart_clock_base->parent_idx; + parent_clock_rate = uart_clock_base->parent_rates[parent_clock_idx]; + + val = readl(uart_clock_base->reg1); + + if (uart_clock_base->div > CLK_TBG_DIV1_MAX) { + d1 = CLK_TBG_DIV1_MAX; + d2 = uart_clock_base->div / CLK_TBG_DIV1_MAX; + } else { + d1 = uart_clock_base->div; + d2 = 1; + } + + if (val & CLK_NO_XTAL) { + prev_clock_idx = (val >> CLK_TBG_SEL_SHIFT) & CLK_TBG_SEL_MASK; + prev_d1d2 = ((val >> CLK_TBG_DIV1_SHIFT) & CLK_TBG_DIV1_MASK) * + ((val >> CLK_TBG_DIV2_SHIFT) & CLK_TBG_DIV2_MASK); + } else { + prev_clock_idx = PARENT_CLOCK_XTAL; + prev_d1d2 = 1; + } + + /* Note that uart_clock_base->parent_rates[i] may not be available */ + prev_clock_rate = uart_clock_base->parent_rates[prev_clock_idx]; + + /* Recalculate UART1 divisor so UART1 baudrate does not change */ + if (prev_clock_rate) { + divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) * + parent_clock_rate * prev_d1d2, + prev_clock_rate * d1 * d2); + if (divisor < 1) + divisor = 1; + else if (divisor > BRDV_BAUD_MAX) + divisor = BRDV_BAUD_MAX; + val = (val & ~BRDV_BAUD_MASK) | divisor; + } + + if (parent_clock_idx != PARENT_CLOCK_XTAL) { + /* Do not use XTAL, select TBG clock and TBG d1 * d2 divisors */ + val |= CLK_NO_XTAL; + val &= ~(CLK_TBG_DIV1_MASK << CLK_TBG_DIV1_SHIFT); + val |= d1 << CLK_TBG_DIV1_SHIFT; + val &= ~(CLK_TBG_DIV2_MASK << CLK_TBG_DIV2_SHIFT); + val |= d2 << CLK_TBG_DIV2_SHIFT; + val &= ~(CLK_TBG_SEL_MASK << CLK_TBG_SEL_SHIFT); + val |= parent_clock_idx << CLK_TBG_SEL_SHIFT; + } else { + /* Use XTAL, TBG bits are then ignored */ + val &= ~CLK_NO_XTAL; + } + + writel(val, uart_clock_base->reg1); + + /* Recalculate UART2 divisor so UART2 baudrate does not change */ + if (prev_clock_rate) { + val = readl(uart_clock_base->reg2); + divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) * + parent_clock_rate * prev_d1d2, + prev_clock_rate * d1 * d2); + if (divisor < 1) + divisor = 1; + else if (divisor > BRDV_BAUD_MAX) + divisor = BRDV_BAUD_MAX; + val = (val & ~BRDV_BAUD_MASK) | divisor; + writel(val, uart_clock_base->reg2); + } + + uart_clock_base->configured = true; + + spin_unlock_irqrestore(&mvebu_uart_lock, flags); + + return 0; +} + +static int mvebu_uart_clock_enable(struct clk_hw *hw) +{ + struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); + struct mvebu_uart_clock_base *uart_clock_base = + to_uart_clock_base(uart_clock); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&mvebu_uart_lock, flags); + + val = readl(uart_clock_base->reg1); + + if (uart_clock->clock_idx == 0) + val &= ~UART1_CLK_DIS; + else + val &= ~UART2_CLK_DIS; + + writel(val, uart_clock_base->reg1); + + spin_unlock_irqrestore(&mvebu_uart_lock, flags); + + return 0; +} + +static void mvebu_uart_clock_disable(struct clk_hw *hw) +{ + struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); + struct mvebu_uart_clock_base *uart_clock_base = + to_uart_clock_base(uart_clock); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&mvebu_uart_lock, flags); + + val = readl(uart_clock_base->reg1); + + if (uart_clock->clock_idx == 0) + val |= UART1_CLK_DIS; + else + val |= UART2_CLK_DIS; + + writel(val, uart_clock_base->reg1); + + spin_unlock_irqrestore(&mvebu_uart_lock, flags); +} + +static int mvebu_uart_clock_is_enabled(struct clk_hw *hw) +{ + struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); + struct mvebu_uart_clock_base *uart_clock_base = + to_uart_clock_base(uart_clock); + u32 val; + + val = readl(uart_clock_base->reg1); + + if (uart_clock->clock_idx == 0) + return !(val & UART1_CLK_DIS); + else + return !(val & UART2_CLK_DIS); +} + +static int mvebu_uart_clock_save_context(struct clk_hw *hw) +{ + struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); + struct mvebu_uart_clock_base *uart_clock_base = + to_uart_clock_base(uart_clock); + unsigned long flags; + + spin_lock_irqsave(&mvebu_uart_lock, flags); + uart_clock->pm_context_reg1 = readl(uart_clock_base->reg1); + uart_clock->pm_context_reg2 = readl(uart_clock_base->reg2); + spin_unlock_irqrestore(&mvebu_uart_lock, flags); + + return 0; +} + +static void mvebu_uart_clock_restore_context(struct clk_hw *hw) +{ + struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); + struct mvebu_uart_clock_base *uart_clock_base = + to_uart_clock_base(uart_clock); + unsigned long flags; + + spin_lock_irqsave(&mvebu_uart_lock, flags); + writel(uart_clock->pm_context_reg1, uart_clock_base->reg1); + writel(uart_clock->pm_context_reg2, uart_clock_base->reg2); + spin_unlock_irqrestore(&mvebu_uart_lock, flags); +} + +static unsigned long mvebu_uart_clock_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); + struct mvebu_uart_clock_base *uart_clock_base = + to_uart_clock_base(uart_clock); + + return parent_rate / uart_clock_base->div; +} + +static long mvebu_uart_clock_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); + struct mvebu_uart_clock_base *uart_clock_base = + to_uart_clock_base(uart_clock); + + return *parent_rate / uart_clock_base->div; +} + +static int mvebu_uart_clock_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + /* + * We must report success but we can do so unconditionally because + * mvebu_uart_clock_round_rate returns values that ensure this call is a + * nop. + */ + + return 0; +} + +static const struct clk_ops mvebu_uart_clock_ops = { + .prepare = mvebu_uart_clock_prepare, + .enable = mvebu_uart_clock_enable, + .disable = mvebu_uart_clock_disable, + .is_enabled = mvebu_uart_clock_is_enabled, + .save_context = mvebu_uart_clock_save_context, + .restore_context = mvebu_uart_clock_restore_context, + .round_rate = mvebu_uart_clock_round_rate, + .set_rate = mvebu_uart_clock_set_rate, + .recalc_rate = mvebu_uart_clock_recalc_rate, +}; + +static int mvebu_uart_clock_register(struct device *dev, + struct mvebu_uart_clock *uart_clock, + const char *name, + const char *parent_name) +{ + struct clk_init_data init = { }; + + uart_clock->clk_hw.init = &init; + + init.name = name; + init.ops = &mvebu_uart_clock_ops; + init.flags = 0; + init.num_parents = 1; + init.parent_names = &parent_name; + + return devm_clk_hw_register(dev, &uart_clock->clk_hw); +} + +static int mvebu_uart_clock_probe(struct platform_device *pdev) +{ + static const char *const uart_clk_names[] = { "uart_1", "uart_2" }; + static const char *const parent_clk_names[] = { "TBG-A-P", "TBG-B-P", + "TBG-A-S", "TBG-B-S", + "xtal" }; + struct clk *parent_clks[ARRAY_SIZE(parent_clk_names)]; + struct mvebu_uart_clock_base *uart_clock_base; + struct clk_hw_onecell_data *hw_clk_data; + struct device *dev = &pdev->dev; + int i, parent_clk_idx, ret; + unsigned long div, rate; + struct resource *res; + unsigned int d1, d2; + + BUILD_BUG_ON(ARRAY_SIZE(uart_clk_names) != + ARRAY_SIZE(uart_clock_base->clocks)); + BUILD_BUG_ON(ARRAY_SIZE(parent_clk_names) != + ARRAY_SIZE(uart_clock_base->parent_rates)); + + uart_clock_base = devm_kzalloc(dev, + sizeof(*uart_clock_base), + GFP_KERNEL); + if (!uart_clock_base) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Couldn't get first register\n"); + return -ENOENT; + } + + /* + * UART Clock Control register (reg1 / UART_BRDV) is in the address + * space of UART1 (standard UART variant), controls parent clock and + * dividers for both UART1 and UART2 and is supplied via DT as the first + * resource. Therefore use ioremap() rather than ioremap_resource() to + * avoid conflicts with UART1 driver. Access to UART_BRDV is protected + * by a lock shared between clock and UART driver. + */ + uart_clock_base->reg1 = devm_ioremap(dev, res->start, + resource_size(res)); + if (!uart_clock_base->reg1) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(dev, "Couldn't get second register\n"); + return -ENOENT; + } + + /* + * UART 2 Baud Rate Divisor register (reg2 / UART_BRDV) is in address + * space of UART2 (extended UART variant), controls only one UART2 + * specific divider and is supplied via DT as second resource. + * Therefore use ioremap() rather than ioremap_resource() to avoid + * conflicts with UART2 driver. Access to UART_BRDV is protected by a + * by lock shared between clock and UART driver. + */ + uart_clock_base->reg2 = devm_ioremap(dev, res->start, + resource_size(res)); + if (!uart_clock_base->reg2) + return -ENOMEM; + + hw_clk_data = devm_kzalloc(dev, + struct_size(hw_clk_data, hws, + ARRAY_SIZE(uart_clk_names)), + GFP_KERNEL); + if (!hw_clk_data) + return -ENOMEM; + + hw_clk_data->num = ARRAY_SIZE(uart_clk_names); + for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) { + hw_clk_data->hws[i] = &uart_clock_base->clocks[i].clk_hw; + uart_clock_base->clocks[i].clock_idx = i; + } + + parent_clk_idx = -1; + + for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) { + parent_clks[i] = devm_clk_get(dev, parent_clk_names[i]); + if (IS_ERR(parent_clks[i])) { + if (PTR_ERR(parent_clks[i]) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_warn(dev, "Couldn't get the parent clock %s: %ld\n", + parent_clk_names[i], PTR_ERR(parent_clks[i])); + continue; + } + + ret = clk_prepare_enable(parent_clks[i]); + if (ret) { + dev_warn(dev, "Couldn't enable parent clock %s: %d\n", + parent_clk_names[i], ret); + continue; + } + rate = clk_get_rate(parent_clks[i]); + uart_clock_base->parent_rates[i] = rate; + + if (i != PARENT_CLOCK_XTAL) { + /* + * Calculate the smallest TBG d1 and d2 divisors that + * still can provide 9600 baudrate. + */ + d1 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR * + BRDV_BAUD_MAX); + if (d1 < 1) + d1 = 1; + else if (d1 > CLK_TBG_DIV1_MAX) + d1 = CLK_TBG_DIV1_MAX; + + d2 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR * + BRDV_BAUD_MAX * d1); + if (d2 < 1) + d2 = 1; + else if (d2 > CLK_TBG_DIV2_MAX) + d2 = CLK_TBG_DIV2_MAX; + } else { + /* + * When UART clock uses XTAL clock as a source then it + * is not possible to use d1 and d2 divisors. + */ + d1 = d2 = 1; + } + + /* Skip clock source which cannot provide 9600 baudrate */ + if (rate > 9600 * OSAMP_MAX_DIVISOR * BRDV_BAUD_MAX * d1 * d2) + continue; + + /* + * Choose TBG clock source with the smallest divisors. Use XTAL + * clock source only in case TBG is not available as XTAL cannot + * be used for baudrates higher than 230400. + */ + if (parent_clk_idx == -1 || + (i != PARENT_CLOCK_XTAL && div > d1 * d2)) { + parent_clk_idx = i; + div = d1 * d2; + } + } + + for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) { + if (i == parent_clk_idx || IS_ERR(parent_clks[i])) + continue; + clk_disable_unprepare(parent_clks[i]); + devm_clk_put(dev, parent_clks[i]); + } + + if (parent_clk_idx == -1) { + dev_err(dev, "No usable parent clock\n"); + return -ENOENT; + } + + uart_clock_base->parent_idx = parent_clk_idx; + uart_clock_base->div = div; + + dev_notice(dev, "Using parent clock %s as base UART clock\n", + __clk_get_name(parent_clks[parent_clk_idx])); + + for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) { + ret = mvebu_uart_clock_register(dev, + &uart_clock_base->clocks[i], + uart_clk_names[i], + __clk_get_name(parent_clks[parent_clk_idx])); + if (ret) { + dev_err(dev, "Can't register UART clock %d: %d\n", + i, ret); + return ret; + } + } + + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, + hw_clk_data); +} + +static const struct of_device_id mvebu_uart_clock_of_match[] = { + { .compatible = "marvell,armada-3700-uart-clock", }, + { } +}; + +static struct platform_driver mvebu_uart_clock_platform_driver = { + .probe = mvebu_uart_clock_probe, + .driver = { + .name = "mvebu-uart-clock", + .of_match_table = mvebu_uart_clock_of_match, + }, +}; + +static int __init mvebu_uart_init(void) +{ + int ret; + + ret = uart_register_driver(&mvebu_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&mvebu_uart_clock_platform_driver); + if (ret) { + uart_unregister_driver(&mvebu_uart_driver); + return ret; + } + + ret = platform_driver_register(&mvebu_uart_platform_driver); + if (ret) { + platform_driver_unregister(&mvebu_uart_clock_platform_driver); + uart_unregister_driver(&mvebu_uart_driver); + return ret; + } + + return 0; +} +arch_initcall(mvebu_uart_init); diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c new file mode 100644 index 000000000..d21a4f3ef --- /dev/null +++ b/drivers/tty/serial/mxs-auart.c @@ -0,0 +1,1760 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Application UART driver for: + * Freescale STMP37XX/STMP378X + * Alphascale ASM9260 + * + * Author: dmitry pervushin + * + * Copyright 2014 Oleksij Rempel + * Provide Alphascale ASM9260 support. + * Copyright 2008-2010 Freescale Semiconductor, Inc. + * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "serial_mctrl_gpio.h" + +#define MXS_AUART_PORTS 5 +#define MXS_AUART_FIFO_SIZE 16 + +#define SET_REG 0x4 +#define CLR_REG 0x8 +#define TOG_REG 0xc + +#define AUART_CTRL0 0x00000000 +#define AUART_CTRL1 0x00000010 +#define AUART_CTRL2 0x00000020 +#define AUART_LINECTRL 0x00000030 +#define AUART_LINECTRL2 0x00000040 +#define AUART_INTR 0x00000050 +#define AUART_DATA 0x00000060 +#define AUART_STAT 0x00000070 +#define AUART_DEBUG 0x00000080 +#define AUART_VERSION 0x00000090 +#define AUART_AUTOBAUD 0x000000a0 + +#define AUART_CTRL0_SFTRST (1 << 31) +#define AUART_CTRL0_CLKGATE (1 << 30) +#define AUART_CTRL0_RXTO_ENABLE (1 << 27) +#define AUART_CTRL0_RXTIMEOUT(v) (((v) & 0x7ff) << 16) +#define AUART_CTRL0_XFER_COUNT(v) ((v) & 0xffff) + +#define AUART_CTRL1_XFER_COUNT(v) ((v) & 0xffff) + +#define AUART_CTRL2_DMAONERR (1 << 26) +#define AUART_CTRL2_TXDMAE (1 << 25) +#define AUART_CTRL2_RXDMAE (1 << 24) + +#define AUART_CTRL2_CTSEN (1 << 15) +#define AUART_CTRL2_RTSEN (1 << 14) +#define AUART_CTRL2_RTS (1 << 11) +#define AUART_CTRL2_RXE (1 << 9) +#define AUART_CTRL2_TXE (1 << 8) +#define AUART_CTRL2_UARTEN (1 << 0) + +#define AUART_LINECTRL_BAUD_DIV_MAX 0x003fffc0 +#define AUART_LINECTRL_BAUD_DIV_MIN 0x000000ec +#define AUART_LINECTRL_BAUD_DIVINT_SHIFT 16 +#define AUART_LINECTRL_BAUD_DIVINT_MASK 0xffff0000 +#define AUART_LINECTRL_BAUD_DIVINT(v) (((v) & 0xffff) << 16) +#define AUART_LINECTRL_BAUD_DIVFRAC_SHIFT 8 +#define AUART_LINECTRL_BAUD_DIVFRAC_MASK 0x00003f00 +#define AUART_LINECTRL_BAUD_DIVFRAC(v) (((v) & 0x3f) << 8) +#define AUART_LINECTRL_SPS (1 << 7) +#define AUART_LINECTRL_WLEN_MASK 0x00000060 +#define AUART_LINECTRL_WLEN(v) ((((v) - 5) & 0x3) << 5) +#define AUART_LINECTRL_FEN (1 << 4) +#define AUART_LINECTRL_STP2 (1 << 3) +#define AUART_LINECTRL_EPS (1 << 2) +#define AUART_LINECTRL_PEN (1 << 1) +#define AUART_LINECTRL_BRK (1 << 0) + +#define AUART_INTR_RTIEN (1 << 22) +#define AUART_INTR_TXIEN (1 << 21) +#define AUART_INTR_RXIEN (1 << 20) +#define AUART_INTR_CTSMIEN (1 << 17) +#define AUART_INTR_RTIS (1 << 6) +#define AUART_INTR_TXIS (1 << 5) +#define AUART_INTR_RXIS (1 << 4) +#define AUART_INTR_CTSMIS (1 << 1) + +#define AUART_STAT_BUSY (1 << 29) +#define AUART_STAT_CTS (1 << 28) +#define AUART_STAT_TXFE (1 << 27) +#define AUART_STAT_TXFF (1 << 25) +#define AUART_STAT_RXFE (1 << 24) +#define AUART_STAT_OERR (1 << 19) +#define AUART_STAT_BERR (1 << 18) +#define AUART_STAT_PERR (1 << 17) +#define AUART_STAT_FERR (1 << 16) +#define AUART_STAT_RXCOUNT_MASK 0xffff + +/* + * Start of Alphascale asm9260 defines + * This list contains only differences of existing bits + * between imx2x and asm9260 + */ +#define ASM9260_HW_CTRL0 0x0000 +/* + * RW. Tell the UART to execute the RX DMA Command. The + * UART will clear this bit at the end of receive execution. + */ +#define ASM9260_BM_CTRL0_RXDMA_RUN BIT(28) +/* RW. 0 use FIFO for status register; 1 use DMA */ +#define ASM9260_BM_CTRL0_RXTO_SOURCE_STATUS BIT(25) +/* + * RW. RX TIMEOUT Enable. Valid for FIFO and DMA. + * Warning: If this bit is set to 0, the RX timeout will not affect receive DMA + * operation. If this bit is set to 1, a receive timeout will cause the receive + * DMA logic to terminate by filling the remaining DMA bytes with garbage data. + */ +#define ASM9260_BM_CTRL0_RXTO_ENABLE BIT(24) +/* + * RW. Receive Timeout Counter Value: number of 8-bit-time to wait before + * asserting timeout on the RX input. If the RXFIFO is not empty and the RX + * input is idle, then the watchdog counter will decrement each bit-time. Note + * 7-bit-time is added to the programmed value, so a value of zero will set + * the counter to 7-bit-time, a value of 0x1 gives 15-bit-time and so on. Also + * note that the counter is reloaded at the end of each frame, so if the frame + * is 10 bits long and the timeout counter value is zero, then timeout will + * occur (when FIFO is not empty) even if the RX input is not idle. The default + * value is 0x3 (31 bit-time). + */ +#define ASM9260_BM_CTRL0_RXTO_MASK (0xff << 16) +/* TIMEOUT = (100*7+1)*(1/BAUD) */ +#define ASM9260_BM_CTRL0_DEFAULT_RXTIMEOUT (20 << 16) + +/* TX ctrl register */ +#define ASM9260_HW_CTRL1 0x0010 +/* + * RW. Tell the UART to execute the TX DMA Command. The + * UART will clear this bit at the end of transmit execution. + */ +#define ASM9260_BM_CTRL1_TXDMA_RUN BIT(28) + +#define ASM9260_HW_CTRL2 0x0020 +/* + * RW. Receive Interrupt FIFO Level Select. + * The trigger points for the receive interrupt are as follows: + * ONE_EIGHTHS = 0x0 Trigger on FIFO full to at least 2 of 16 entries. + * ONE_QUARTER = 0x1 Trigger on FIFO full to at least 4 of 16 entries. + * ONE_HALF = 0x2 Trigger on FIFO full to at least 8 of 16 entries. + * THREE_QUARTERS = 0x3 Trigger on FIFO full to at least 12 of 16 entries. + * SEVEN_EIGHTHS = 0x4 Trigger on FIFO full to at least 14 of 16 entries. + */ +#define ASM9260_BM_CTRL2_RXIFLSEL (7 << 20) +#define ASM9260_BM_CTRL2_DEFAULT_RXIFLSEL (3 << 20) +/* RW. Same as RXIFLSEL */ +#define ASM9260_BM_CTRL2_TXIFLSEL (7 << 16) +#define ASM9260_BM_CTRL2_DEFAULT_TXIFLSEL (2 << 16) +/* RW. Set DTR. When this bit is 1, the output is 0. */ +#define ASM9260_BM_CTRL2_DTR BIT(10) +/* RW. Loop Back Enable */ +#define ASM9260_BM_CTRL2_LBE BIT(7) +#define ASM9260_BM_CTRL2_PORT_ENABLE BIT(0) + +#define ASM9260_HW_LINECTRL 0x0030 +/* + * RW. Stick Parity Select. When bits 1, 2, and 7 of this register are set, the + * parity bit is transmitted and checked as a 0. When bits 1 and 7 are set, + * and bit 2 is 0, the parity bit is transmitted and checked as a 1. When this + * bit is cleared stick parity is disabled. + */ +#define ASM9260_BM_LCTRL_SPS BIT(7) +/* RW. Word length */ +#define ASM9260_BM_LCTRL_WLEN (3 << 5) +#define ASM9260_BM_LCTRL_CHRL_5 (0 << 5) +#define ASM9260_BM_LCTRL_CHRL_6 (1 << 5) +#define ASM9260_BM_LCTRL_CHRL_7 (2 << 5) +#define ASM9260_BM_LCTRL_CHRL_8 (3 << 5) + +/* + * Interrupt register. + * contains the interrupt enables and the interrupt status bits + */ +#define ASM9260_HW_INTR 0x0040 +/* Tx FIFO EMPTY Raw Interrupt enable */ +#define ASM9260_BM_INTR_TFEIEN BIT(27) +/* Overrun Error Interrupt Enable. */ +#define ASM9260_BM_INTR_OEIEN BIT(26) +/* Break Error Interrupt Enable. */ +#define ASM9260_BM_INTR_BEIEN BIT(25) +/* Parity Error Interrupt Enable. */ +#define ASM9260_BM_INTR_PEIEN BIT(24) +/* Framing Error Interrupt Enable. */ +#define ASM9260_BM_INTR_FEIEN BIT(23) + +/* nUARTDSR Modem Interrupt Enable. */ +#define ASM9260_BM_INTR_DSRMIEN BIT(19) +/* nUARTDCD Modem Interrupt Enable. */ +#define ASM9260_BM_INTR_DCDMIEN BIT(18) +/* nUARTRI Modem Interrupt Enable. */ +#define ASM9260_BM_INTR_RIMIEN BIT(16) +/* Auto-Boud Timeout */ +#define ASM9260_BM_INTR_ABTO BIT(13) +#define ASM9260_BM_INTR_ABEO BIT(12) +/* Tx FIFO EMPTY Raw Interrupt state */ +#define ASM9260_BM_INTR_TFEIS BIT(11) +/* Overrun Error */ +#define ASM9260_BM_INTR_OEIS BIT(10) +/* Break Error */ +#define ASM9260_BM_INTR_BEIS BIT(9) +/* Parity Error */ +#define ASM9260_BM_INTR_PEIS BIT(8) +/* Framing Error */ +#define ASM9260_BM_INTR_FEIS BIT(7) +#define ASM9260_BM_INTR_DSRMIS BIT(3) +#define ASM9260_BM_INTR_DCDMIS BIT(2) +#define ASM9260_BM_INTR_RIMIS BIT(0) + +/* + * RW. In DMA mode, up to 4 Received/Transmit characters can be accessed at a + * time. In PIO mode, only one character can be accessed at a time. The status + * register contains the receive data flags and valid bits. + */ +#define ASM9260_HW_DATA 0x0050 + +#define ASM9260_HW_STAT 0x0060 +/* RO. If 1, UARTAPP is present in this product. */ +#define ASM9260_BM_STAT_PRESENT BIT(31) +/* RO. If 1, HISPEED is present in this product. */ +#define ASM9260_BM_STAT_HISPEED BIT(30) +/* RO. Receive FIFO Full. */ +#define ASM9260_BM_STAT_RXFULL BIT(26) + +/* RO. The UART Debug Register contains the state of the DMA signals. */ +#define ASM9260_HW_DEBUG 0x0070 +/* DMA Command Run Status */ +#define ASM9260_BM_DEBUG_TXDMARUN BIT(5) +#define ASM9260_BM_DEBUG_RXDMARUN BIT(4) +/* DMA Command End Status */ +#define ASM9260_BM_DEBUG_TXCMDEND BIT(3) +#define ASM9260_BM_DEBUG_RXCMDEND BIT(2) +/* DMA Request Status */ +#define ASM9260_BM_DEBUG_TXDMARQ BIT(1) +#define ASM9260_BM_DEBUG_RXDMARQ BIT(0) + +#define ASM9260_HW_ILPR 0x0080 + +#define ASM9260_HW_RS485CTRL 0x0090 +/* + * RW. This bit reverses the polarity of the direction control signal on the RTS + * (or DTR) pin. + * If 0, The direction control pin will be driven to logic ‘0’ when the + * transmitter has data to be sent. It will be driven to logic ‘1’ after the + * last bit of data has been transmitted. + */ +#define ASM9260_BM_RS485CTRL_ONIV BIT(5) +/* RW. Enable Auto Direction Control. */ +#define ASM9260_BM_RS485CTRL_DIR_CTRL BIT(4) +/* + * RW. If 0 and DIR_CTRL = 1, pin RTS is used for direction control. + * If 1 and DIR_CTRL = 1, pin DTR is used for direction control. + */ +#define ASM9260_BM_RS485CTRL_PINSEL BIT(3) +/* RW. Enable Auto Address Detect (AAD). */ +#define ASM9260_BM_RS485CTRL_AADEN BIT(2) +/* RW. Disable receiver. */ +#define ASM9260_BM_RS485CTRL_RXDIS BIT(1) +/* RW. Enable RS-485/EIA-485 Normal Multidrop Mode (NMM) */ +#define ASM9260_BM_RS485CTRL_RS485EN BIT(0) + +#define ASM9260_HW_RS485ADRMATCH 0x00a0 +/* Contains the address match value. */ +#define ASM9260_BM_RS485ADRMATCH_MASK (0xff << 0) + +#define ASM9260_HW_RS485DLY 0x00b0 +/* + * RW. Contains the direction control (RTS or DTR) delay value. This delay time + * is in periods of the baud clock. + */ +#define ASM9260_BM_RS485DLY_MASK (0xff << 0) + +#define ASM9260_HW_AUTOBAUD 0x00c0 +/* WO. Auto-baud time-out interrupt clear bit. */ +#define ASM9260_BM_AUTOBAUD_TO_INT_CLR BIT(9) +/* WO. End of auto-baud interrupt clear bit. */ +#define ASM9260_BM_AUTOBAUD_EO_INT_CLR BIT(8) +/* Restart in case of timeout (counter restarts at next UART Rx falling edge) */ +#define ASM9260_BM_AUTOBAUD_AUTORESTART BIT(2) +/* Auto-baud mode select bit. 0 - Mode 0, 1 - Mode 1. */ +#define ASM9260_BM_AUTOBAUD_MODE BIT(1) +/* + * Auto-baud start (auto-baud is running). Auto-baud run bit. This bit is + * automatically cleared after auto-baud completion. + */ +#define ASM9260_BM_AUTOBAUD_START BIT(0) + +#define ASM9260_HW_CTRL3 0x00d0 +#define ASM9260_BM_CTRL3_OUTCLK_DIV_MASK (0xffff << 16) +/* + * RW. Provide clk over OUTCLK pin. In case of asm9260 it can be configured on + * pins 137 and 144. + */ +#define ASM9260_BM_CTRL3_MASTERMODE BIT(6) +/* RW. Baud Rate Mode: 1 - Enable sync mode. 0 - async mode. */ +#define ASM9260_BM_CTRL3_SYNCMODE BIT(4) +/* RW. 1 - MSB bit send frist; 0 - LSB bit frist. */ +#define ASM9260_BM_CTRL3_MSBF BIT(2) +/* RW. 1 - sample rate = 8 x Baudrate; 0 - sample rate = 16 x Baudrate. */ +#define ASM9260_BM_CTRL3_BAUD8 BIT(1) +/* RW. 1 - Set word length to 9bit. 0 - use ASM9260_BM_LCTRL_WLEN */ +#define ASM9260_BM_CTRL3_9BIT BIT(0) + +#define ASM9260_HW_ISO7816_CTRL 0x00e0 +/* RW. Enable High Speed mode. */ +#define ASM9260_BM_ISO7816CTRL_HS BIT(12) +/* Disable Successive Receive NACK */ +#define ASM9260_BM_ISO7816CTRL_DS_NACK BIT(8) +#define ASM9260_BM_ISO7816CTRL_MAX_ITER_MASK (0xff << 4) +/* Receive NACK Inhibit */ +#define ASM9260_BM_ISO7816CTRL_INACK BIT(3) +#define ASM9260_BM_ISO7816CTRL_NEG_DATA BIT(2) +/* RW. 1 - ISO7816 mode; 0 - USART mode */ +#define ASM9260_BM_ISO7816CTRL_ENABLE BIT(0) + +#define ASM9260_HW_ISO7816_ERRCNT 0x00f0 +/* Parity error counter. Will be cleared after reading */ +#define ASM9260_BM_ISO7816_NB_ERRORS_MASK (0xff << 0) + +#define ASM9260_HW_ISO7816_STATUS 0x0100 +/* Max number of Repetitions Reached */ +#define ASM9260_BM_ISO7816_STAT_ITERATION BIT(0) + +/* End of Alphascale asm9260 defines */ + +static struct uart_driver auart_driver; + +enum mxs_auart_type { + IMX23_AUART, + IMX28_AUART, + ASM9260_AUART, +}; + +struct vendor_data { + const u16 *reg_offset; +}; + +enum { + REG_CTRL0, + REG_CTRL1, + REG_CTRL2, + REG_LINECTRL, + REG_LINECTRL2, + REG_INTR, + REG_DATA, + REG_STAT, + REG_DEBUG, + REG_VERSION, + REG_AUTOBAUD, + + /* The size of the array - must be last */ + REG_ARRAY_SIZE, +}; + +static const u16 mxs_asm9260_offsets[REG_ARRAY_SIZE] = { + [REG_CTRL0] = ASM9260_HW_CTRL0, + [REG_CTRL1] = ASM9260_HW_CTRL1, + [REG_CTRL2] = ASM9260_HW_CTRL2, + [REG_LINECTRL] = ASM9260_HW_LINECTRL, + [REG_INTR] = ASM9260_HW_INTR, + [REG_DATA] = ASM9260_HW_DATA, + [REG_STAT] = ASM9260_HW_STAT, + [REG_DEBUG] = ASM9260_HW_DEBUG, + [REG_AUTOBAUD] = ASM9260_HW_AUTOBAUD, +}; + +static const u16 mxs_stmp37xx_offsets[REG_ARRAY_SIZE] = { + [REG_CTRL0] = AUART_CTRL0, + [REG_CTRL1] = AUART_CTRL1, + [REG_CTRL2] = AUART_CTRL2, + [REG_LINECTRL] = AUART_LINECTRL, + [REG_LINECTRL2] = AUART_LINECTRL2, + [REG_INTR] = AUART_INTR, + [REG_DATA] = AUART_DATA, + [REG_STAT] = AUART_STAT, + [REG_DEBUG] = AUART_DEBUG, + [REG_VERSION] = AUART_VERSION, + [REG_AUTOBAUD] = AUART_AUTOBAUD, +}; + +static const struct vendor_data vendor_alphascale_asm9260 = { + .reg_offset = mxs_asm9260_offsets, +}; + +static const struct vendor_data vendor_freescale_stmp37xx = { + .reg_offset = mxs_stmp37xx_offsets, +}; + +struct mxs_auart_port { + struct uart_port port; + +#define MXS_AUART_DMA_ENABLED 0x2 +#define MXS_AUART_DMA_TX_SYNC 2 /* bit 2 */ +#define MXS_AUART_DMA_RX_READY 3 /* bit 3 */ +#define MXS_AUART_RTSCTS 4 /* bit 4 */ + unsigned long flags; + unsigned int mctrl_prev; + enum mxs_auart_type devtype; + const struct vendor_data *vendor; + + struct clk *clk; + struct clk *clk_ahb; + struct device *dev; + + /* for DMA */ + struct scatterlist tx_sgl; + struct dma_chan *tx_dma_chan; + void *tx_dma_buf; + + struct scatterlist rx_sgl; + struct dma_chan *rx_dma_chan; + void *rx_dma_buf; + + struct mctrl_gpios *gpios; + int gpio_irq[UART_GPIO_MAX]; + bool ms_irq_enabled; +}; + +static const struct of_device_id mxs_auart_dt_ids[] = { + { + .compatible = "fsl,imx28-auart", + .data = (const void *)IMX28_AUART + }, { + .compatible = "fsl,imx23-auart", + .data = (const void *)IMX23_AUART + }, { + .compatible = "alphascale,asm9260-auart", + .data = (const void *)ASM9260_AUART + }, { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mxs_auart_dt_ids); + +static inline int is_imx28_auart(struct mxs_auart_port *s) +{ + return s->devtype == IMX28_AUART; +} + +static inline int is_asm9260_auart(struct mxs_auart_port *s) +{ + return s->devtype == ASM9260_AUART; +} + +static inline bool auart_dma_enabled(struct mxs_auart_port *s) +{ + return s->flags & MXS_AUART_DMA_ENABLED; +} + +static unsigned int mxs_reg_to_offset(const struct mxs_auart_port *uap, + unsigned int reg) +{ + return uap->vendor->reg_offset[reg]; +} + +static unsigned int mxs_read(const struct mxs_auart_port *uap, + unsigned int reg) +{ + void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg); + + return readl_relaxed(addr); +} + +static void mxs_write(unsigned int val, struct mxs_auart_port *uap, + unsigned int reg) +{ + void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg); + + writel_relaxed(val, addr); +} + +static void mxs_set(unsigned int val, struct mxs_auart_port *uap, + unsigned int reg) +{ + void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg); + + writel_relaxed(val, addr + SET_REG); +} + +static void mxs_clr(unsigned int val, struct mxs_auart_port *uap, + unsigned int reg) +{ + void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg); + + writel_relaxed(val, addr + CLR_REG); +} + +static void mxs_auart_stop_tx(struct uart_port *u); + +#define to_auart_port(u) container_of(u, struct mxs_auart_port, port) + +static void mxs_auart_tx_chars(struct mxs_auart_port *s); + +static void dma_tx_callback(void *param) +{ + struct mxs_auart_port *s = param; + struct circ_buf *xmit = &s->port.state->xmit; + + dma_unmap_sg(s->dev, &s->tx_sgl, 1, DMA_TO_DEVICE); + + /* clear the bit used to serialize the DMA tx. */ + clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); + smp_mb__after_atomic(); + + /* wake up the possible processes. */ + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&s->port); + + mxs_auart_tx_chars(s); +} + +static int mxs_auart_dma_tx(struct mxs_auart_port *s, int size) +{ + struct dma_async_tx_descriptor *desc; + struct scatterlist *sgl = &s->tx_sgl; + struct dma_chan *channel = s->tx_dma_chan; + u32 pio; + + /* [1] : send PIO. Note, the first pio word is CTRL1. */ + pio = AUART_CTRL1_XFER_COUNT(size); + desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)&pio, + 1, DMA_TRANS_NONE, 0); + if (!desc) { + dev_err(s->dev, "step 1 error\n"); + return -EINVAL; + } + + /* [2] : set DMA buffer. */ + sg_init_one(sgl, s->tx_dma_buf, size); + dma_map_sg(s->dev, sgl, 1, DMA_TO_DEVICE); + desc = dmaengine_prep_slave_sg(channel, sgl, + 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_err(s->dev, "step 2 error\n"); + return -EINVAL; + } + + /* [3] : submit the DMA */ + desc->callback = dma_tx_callback; + desc->callback_param = s; + dmaengine_submit(desc); + dma_async_issue_pending(channel); + return 0; +} + +static void mxs_auart_tx_chars(struct mxs_auart_port *s) +{ + struct circ_buf *xmit = &s->port.state->xmit; + + if (auart_dma_enabled(s)) { + u32 i = 0; + int size; + void *buffer = s->tx_dma_buf; + + if (test_and_set_bit(MXS_AUART_DMA_TX_SYNC, &s->flags)) + return; + + while (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) { + size = min_t(u32, UART_XMIT_SIZE - i, + CIRC_CNT_TO_END(xmit->head, + xmit->tail, + UART_XMIT_SIZE)); + memcpy(buffer + i, xmit->buf + xmit->tail, size); + xmit->tail = (xmit->tail + size) & (UART_XMIT_SIZE - 1); + + i += size; + if (i >= UART_XMIT_SIZE) + break; + } + + if (uart_tx_stopped(&s->port)) + mxs_auart_stop_tx(&s->port); + + if (i) { + mxs_auart_dma_tx(s, i); + } else { + clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); + smp_mb__after_atomic(); + } + return; + } + + + while (!(mxs_read(s, REG_STAT) & AUART_STAT_TXFF)) { + if (s->port.x_char) { + s->port.icount.tx++; + mxs_write(s->port.x_char, s, REG_DATA); + s->port.x_char = 0; + continue; + } + if (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) { + s->port.icount.tx++; + mxs_write(xmit->buf[xmit->tail], s, REG_DATA); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + } else + break; + } + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&s->port); + + if (uart_circ_empty(&(s->port.state->xmit))) + mxs_clr(AUART_INTR_TXIEN, s, REG_INTR); + else + mxs_set(AUART_INTR_TXIEN, s, REG_INTR); + + if (uart_tx_stopped(&s->port)) + mxs_auart_stop_tx(&s->port); +} + +static void mxs_auart_rx_char(struct mxs_auart_port *s) +{ + int flag; + u32 stat; + u8 c; + + c = mxs_read(s, REG_DATA); + stat = mxs_read(s, REG_STAT); + + flag = TTY_NORMAL; + s->port.icount.rx++; + + if (stat & AUART_STAT_BERR) { + s->port.icount.brk++; + if (uart_handle_break(&s->port)) + goto out; + } else if (stat & AUART_STAT_PERR) { + s->port.icount.parity++; + } else if (stat & AUART_STAT_FERR) { + s->port.icount.frame++; + } + + /* + * Mask off conditions which should be ingored. + */ + stat &= s->port.read_status_mask; + + if (stat & AUART_STAT_BERR) { + flag = TTY_BREAK; + } else if (stat & AUART_STAT_PERR) + flag = TTY_PARITY; + else if (stat & AUART_STAT_FERR) + flag = TTY_FRAME; + + if (stat & AUART_STAT_OERR) + s->port.icount.overrun++; + + if (uart_handle_sysrq_char(&s->port, c)) + goto out; + + uart_insert_char(&s->port, stat, AUART_STAT_OERR, c, flag); +out: + mxs_write(stat, s, REG_STAT); +} + +static void mxs_auart_rx_chars(struct mxs_auart_port *s) +{ + u32 stat = 0; + + for (;;) { + stat = mxs_read(s, REG_STAT); + if (stat & AUART_STAT_RXFE) + break; + mxs_auart_rx_char(s); + } + + mxs_write(stat, s, REG_STAT); + tty_flip_buffer_push(&s->port.state->port); +} + +static int mxs_auart_request_port(struct uart_port *u) +{ + return 0; +} + +static int mxs_auart_verify_port(struct uart_port *u, + struct serial_struct *ser) +{ + if (u->type != PORT_UNKNOWN && u->type != PORT_IMX) + return -EINVAL; + return 0; +} + +static void mxs_auart_config_port(struct uart_port *u, int flags) +{ +} + +static const char *mxs_auart_type(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + + return dev_name(s->dev); +} + +static void mxs_auart_release_port(struct uart_port *u) +{ +} + +static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl) +{ + struct mxs_auart_port *s = to_auart_port(u); + + u32 ctrl = mxs_read(s, REG_CTRL2); + + ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS); + if (mctrl & TIOCM_RTS) { + if (uart_cts_enabled(u)) + ctrl |= AUART_CTRL2_RTSEN; + else + ctrl |= AUART_CTRL2_RTS; + } + + mxs_write(ctrl, s, REG_CTRL2); + + mctrl_gpio_set(s->gpios, mctrl); +} + +#define MCTRL_ANY_DELTA (TIOCM_RI | TIOCM_DSR | TIOCM_CD | TIOCM_CTS) +static u32 mxs_auart_modem_status(struct mxs_auart_port *s, u32 mctrl) +{ + u32 mctrl_diff; + + mctrl_diff = mctrl ^ s->mctrl_prev; + s->mctrl_prev = mctrl; + if (mctrl_diff & MCTRL_ANY_DELTA && s->ms_irq_enabled && + s->port.state != NULL) { + if (mctrl_diff & TIOCM_RI) + s->port.icount.rng++; + if (mctrl_diff & TIOCM_DSR) + s->port.icount.dsr++; + if (mctrl_diff & TIOCM_CD) + uart_handle_dcd_change(&s->port, mctrl & TIOCM_CD); + if (mctrl_diff & TIOCM_CTS) + uart_handle_cts_change(&s->port, mctrl & TIOCM_CTS); + + wake_up_interruptible(&s->port.state->port.delta_msr_wait); + } + return mctrl; +} + +static u32 mxs_auart_get_mctrl(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + u32 stat = mxs_read(s, REG_STAT); + u32 mctrl = 0; + + if (stat & AUART_STAT_CTS) + mctrl |= TIOCM_CTS; + + return mctrl_gpio_get(s->gpios, &mctrl); +} + +/* + * Enable modem status interrupts + */ +static void mxs_auart_enable_ms(struct uart_port *port) +{ + struct mxs_auart_port *s = to_auart_port(port); + + /* + * Interrupt should not be enabled twice + */ + if (s->ms_irq_enabled) + return; + + s->ms_irq_enabled = true; + + if (s->gpio_irq[UART_GPIO_CTS] >= 0) + enable_irq(s->gpio_irq[UART_GPIO_CTS]); + /* TODO: enable AUART_INTR_CTSMIEN otherwise */ + + if (s->gpio_irq[UART_GPIO_DSR] >= 0) + enable_irq(s->gpio_irq[UART_GPIO_DSR]); + + if (s->gpio_irq[UART_GPIO_RI] >= 0) + enable_irq(s->gpio_irq[UART_GPIO_RI]); + + if (s->gpio_irq[UART_GPIO_DCD] >= 0) + enable_irq(s->gpio_irq[UART_GPIO_DCD]); +} + +/* + * Disable modem status interrupts + */ +static void mxs_auart_disable_ms(struct uart_port *port) +{ + struct mxs_auart_port *s = to_auart_port(port); + + /* + * Interrupt should not be disabled twice + */ + if (!s->ms_irq_enabled) + return; + + s->ms_irq_enabled = false; + + if (s->gpio_irq[UART_GPIO_CTS] >= 0) + disable_irq(s->gpio_irq[UART_GPIO_CTS]); + /* TODO: disable AUART_INTR_CTSMIEN otherwise */ + + if (s->gpio_irq[UART_GPIO_DSR] >= 0) + disable_irq(s->gpio_irq[UART_GPIO_DSR]); + + if (s->gpio_irq[UART_GPIO_RI] >= 0) + disable_irq(s->gpio_irq[UART_GPIO_RI]); + + if (s->gpio_irq[UART_GPIO_DCD] >= 0) + disable_irq(s->gpio_irq[UART_GPIO_DCD]); +} + +static int mxs_auart_dma_prep_rx(struct mxs_auart_port *s); +static void dma_rx_callback(void *arg) +{ + struct mxs_auart_port *s = (struct mxs_auart_port *) arg; + struct tty_port *port = &s->port.state->port; + int count; + u32 stat; + + dma_unmap_sg(s->dev, &s->rx_sgl, 1, DMA_FROM_DEVICE); + + stat = mxs_read(s, REG_STAT); + stat &= ~(AUART_STAT_OERR | AUART_STAT_BERR | + AUART_STAT_PERR | AUART_STAT_FERR); + + count = stat & AUART_STAT_RXCOUNT_MASK; + tty_insert_flip_string(port, s->rx_dma_buf, count); + + mxs_write(stat, s, REG_STAT); + tty_flip_buffer_push(port); + + /* start the next DMA for RX. */ + mxs_auart_dma_prep_rx(s); +} + +static int mxs_auart_dma_prep_rx(struct mxs_auart_port *s) +{ + struct dma_async_tx_descriptor *desc; + struct scatterlist *sgl = &s->rx_sgl; + struct dma_chan *channel = s->rx_dma_chan; + u32 pio[1]; + + /* [1] : send PIO */ + pio[0] = AUART_CTRL0_RXTO_ENABLE + | AUART_CTRL0_RXTIMEOUT(0x80) + | AUART_CTRL0_XFER_COUNT(UART_XMIT_SIZE); + desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio, + 1, DMA_TRANS_NONE, 0); + if (!desc) { + dev_err(s->dev, "step 1 error\n"); + return -EINVAL; + } + + /* [2] : send DMA request */ + sg_init_one(sgl, s->rx_dma_buf, UART_XMIT_SIZE); + dma_map_sg(s->dev, sgl, 1, DMA_FROM_DEVICE); + desc = dmaengine_prep_slave_sg(channel, sgl, 1, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_err(s->dev, "step 2 error\n"); + return -1; + } + + /* [3] : submit the DMA, but do not issue it. */ + desc->callback = dma_rx_callback; + desc->callback_param = s; + dmaengine_submit(desc); + dma_async_issue_pending(channel); + return 0; +} + +static void mxs_auart_dma_exit_channel(struct mxs_auart_port *s) +{ + if (s->tx_dma_chan) { + dma_release_channel(s->tx_dma_chan); + s->tx_dma_chan = NULL; + } + if (s->rx_dma_chan) { + dma_release_channel(s->rx_dma_chan); + s->rx_dma_chan = NULL; + } + + kfree(s->tx_dma_buf); + kfree(s->rx_dma_buf); + s->tx_dma_buf = NULL; + s->rx_dma_buf = NULL; +} + +static void mxs_auart_dma_exit(struct mxs_auart_port *s) +{ + + mxs_clr(AUART_CTRL2_TXDMAE | AUART_CTRL2_RXDMAE | AUART_CTRL2_DMAONERR, + s, REG_CTRL2); + + mxs_auart_dma_exit_channel(s); + s->flags &= ~MXS_AUART_DMA_ENABLED; + clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); + clear_bit(MXS_AUART_DMA_RX_READY, &s->flags); +} + +static int mxs_auart_dma_init(struct mxs_auart_port *s) +{ + if (auart_dma_enabled(s)) + return 0; + + /* init for RX */ + s->rx_dma_chan = dma_request_slave_channel(s->dev, "rx"); + if (!s->rx_dma_chan) + goto err_out; + s->rx_dma_buf = kzalloc(UART_XMIT_SIZE, GFP_KERNEL | GFP_DMA); + if (!s->rx_dma_buf) + goto err_out; + + /* init for TX */ + s->tx_dma_chan = dma_request_slave_channel(s->dev, "tx"); + if (!s->tx_dma_chan) + goto err_out; + s->tx_dma_buf = kzalloc(UART_XMIT_SIZE, GFP_KERNEL | GFP_DMA); + if (!s->tx_dma_buf) + goto err_out; + + /* set the flags */ + s->flags |= MXS_AUART_DMA_ENABLED; + dev_dbg(s->dev, "enabled the DMA support."); + + /* The DMA buffer is now the FIFO the TTY subsystem can use */ + s->port.fifosize = UART_XMIT_SIZE; + + return 0; + +err_out: + mxs_auart_dma_exit_channel(s); + return -EINVAL; + +} + +#define RTS_AT_AUART() !mctrl_gpio_to_gpiod(s->gpios, UART_GPIO_RTS) +#define CTS_AT_AUART() !mctrl_gpio_to_gpiod(s->gpios, UART_GPIO_CTS) +static void mxs_auart_settermios(struct uart_port *u, + struct ktermios *termios, + const struct ktermios *old) +{ + struct mxs_auart_port *s = to_auart_port(u); + u32 ctrl, ctrl2, div; + unsigned int cflag, baud, baud_min, baud_max; + + cflag = termios->c_cflag; + + ctrl = AUART_LINECTRL_FEN; + ctrl2 = mxs_read(s, REG_CTRL2); + + ctrl |= AUART_LINECTRL_WLEN(tty_get_char_size(cflag)); + + /* parity */ + if (cflag & PARENB) { + ctrl |= AUART_LINECTRL_PEN; + if ((cflag & PARODD) == 0) + ctrl |= AUART_LINECTRL_EPS; + if (cflag & CMSPAR) + ctrl |= AUART_LINECTRL_SPS; + } + + u->read_status_mask = AUART_STAT_OERR; + + if (termios->c_iflag & INPCK) + u->read_status_mask |= AUART_STAT_PERR; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + u->read_status_mask |= AUART_STAT_BERR; + + /* + * Characters to ignore + */ + u->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + u->ignore_status_mask |= AUART_STAT_PERR; + if (termios->c_iflag & IGNBRK) { + u->ignore_status_mask |= AUART_STAT_BERR; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + u->ignore_status_mask |= AUART_STAT_OERR; + } + + /* + * ignore all characters if CREAD is not set + */ + if (cflag & CREAD) + ctrl2 |= AUART_CTRL2_RXE; + else + ctrl2 &= ~AUART_CTRL2_RXE; + + /* figure out the stop bits requested */ + if (cflag & CSTOPB) + ctrl |= AUART_LINECTRL_STP2; + + /* figure out the hardware flow control settings */ + ctrl2 &= ~(AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN); + if (cflag & CRTSCTS) { + /* + * The DMA has a bug(see errata:2836) in mx23. + * So we can not implement the DMA for auart in mx23, + * we can only implement the DMA support for auart + * in mx28. + */ + if (is_imx28_auart(s) + && test_bit(MXS_AUART_RTSCTS, &s->flags)) { + if (!mxs_auart_dma_init(s)) + /* enable DMA tranfer */ + ctrl2 |= AUART_CTRL2_TXDMAE | AUART_CTRL2_RXDMAE + | AUART_CTRL2_DMAONERR; + } + /* Even if RTS is GPIO line RTSEN can be enabled because + * the pinctrl configuration decides about RTS pin function */ + ctrl2 |= AUART_CTRL2_RTSEN; + if (CTS_AT_AUART()) + ctrl2 |= AUART_CTRL2_CTSEN; + } + + /* set baud rate */ + if (is_asm9260_auart(s)) { + baud = uart_get_baud_rate(u, termios, old, + u->uartclk * 4 / 0x3FFFFF, + u->uartclk / 16); + div = u->uartclk * 4 / baud; + } else { + baud_min = DIV_ROUND_UP(u->uartclk * 32, + AUART_LINECTRL_BAUD_DIV_MAX); + baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN; + baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max); + div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud); + } + + ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F); + ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6); + mxs_write(ctrl, s, REG_LINECTRL); + + mxs_write(ctrl2, s, REG_CTRL2); + + uart_update_timeout(u, termios->c_cflag, baud); + + /* prepare for the DMA RX. */ + if (auart_dma_enabled(s) && + !test_and_set_bit(MXS_AUART_DMA_RX_READY, &s->flags)) { + if (!mxs_auart_dma_prep_rx(s)) { + /* Disable the normal RX interrupt. */ + mxs_clr(AUART_INTR_RXIEN | AUART_INTR_RTIEN, + s, REG_INTR); + } else { + mxs_auart_dma_exit(s); + dev_err(s->dev, "We can not start up the DMA.\n"); + } + } + + /* CTS flow-control and modem-status interrupts */ + if (UART_ENABLE_MS(u, termios->c_cflag)) + mxs_auart_enable_ms(u); + else + mxs_auart_disable_ms(u); +} + +static void mxs_auart_set_ldisc(struct uart_port *port, + struct ktermios *termios) +{ + if (termios->c_line == N_PPS) { + port->flags |= UPF_HARDPPS_CD; + mxs_auart_enable_ms(port); + } else { + port->flags &= ~UPF_HARDPPS_CD; + } +} + +static irqreturn_t mxs_auart_irq_handle(int irq, void *context) +{ + u32 istat; + struct mxs_auart_port *s = context; + u32 mctrl_temp = s->mctrl_prev; + u32 stat = mxs_read(s, REG_STAT); + + istat = mxs_read(s, REG_INTR); + + /* ack irq */ + mxs_clr(istat & (AUART_INTR_RTIS | AUART_INTR_TXIS | AUART_INTR_RXIS + | AUART_INTR_CTSMIS), s, REG_INTR); + + /* + * Dealing with GPIO interrupt + */ + if (irq == s->gpio_irq[UART_GPIO_CTS] || + irq == s->gpio_irq[UART_GPIO_DCD] || + irq == s->gpio_irq[UART_GPIO_DSR] || + irq == s->gpio_irq[UART_GPIO_RI]) + mxs_auart_modem_status(s, + mctrl_gpio_get(s->gpios, &mctrl_temp)); + + if (istat & AUART_INTR_CTSMIS) { + if (CTS_AT_AUART() && s->ms_irq_enabled) + uart_handle_cts_change(&s->port, + stat & AUART_STAT_CTS); + mxs_clr(AUART_INTR_CTSMIS, s, REG_INTR); + istat &= ~AUART_INTR_CTSMIS; + } + + if (istat & (AUART_INTR_RTIS | AUART_INTR_RXIS)) { + if (!auart_dma_enabled(s)) + mxs_auart_rx_chars(s); + istat &= ~(AUART_INTR_RTIS | AUART_INTR_RXIS); + } + + if (istat & AUART_INTR_TXIS) { + mxs_auart_tx_chars(s); + istat &= ~AUART_INTR_TXIS; + } + + return IRQ_HANDLED; +} + +static void mxs_auart_reset_deassert(struct mxs_auart_port *s) +{ + int i; + unsigned int reg; + + mxs_clr(AUART_CTRL0_SFTRST, s, REG_CTRL0); + + for (i = 0; i < 10000; i++) { + reg = mxs_read(s, REG_CTRL0); + if (!(reg & AUART_CTRL0_SFTRST)) + break; + udelay(3); + } + mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0); +} + +static void mxs_auart_reset_assert(struct mxs_auart_port *s) +{ + int i; + u32 reg; + + reg = mxs_read(s, REG_CTRL0); + /* if already in reset state, keep it untouched */ + if (reg & AUART_CTRL0_SFTRST) + return; + + mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0); + mxs_set(AUART_CTRL0_SFTRST, s, REG_CTRL0); + + for (i = 0; i < 1000; i++) { + reg = mxs_read(s, REG_CTRL0); + /* reset is finished when the clock is gated */ + if (reg & AUART_CTRL0_CLKGATE) + return; + udelay(10); + } + + dev_err(s->dev, "Failed to reset the unit."); +} + +static int mxs_auart_startup(struct uart_port *u) +{ + int ret; + struct mxs_auart_port *s = to_auart_port(u); + + ret = clk_prepare_enable(s->clk); + if (ret) + return ret; + + if (uart_console(u)) { + mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0); + } else { + /* reset the unit to a well known state */ + mxs_auart_reset_assert(s); + mxs_auart_reset_deassert(s); + } + + mxs_set(AUART_CTRL2_UARTEN, s, REG_CTRL2); + + mxs_write(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN, + s, REG_INTR); + + /* Reset FIFO size (it could have changed if DMA was enabled) */ + u->fifosize = MXS_AUART_FIFO_SIZE; + + /* + * Enable fifo so all four bytes of a DMA word are written to + * output (otherwise, only the LSB is written, ie. 1 in 4 bytes) + */ + mxs_set(AUART_LINECTRL_FEN, s, REG_LINECTRL); + + /* get initial status of modem lines */ + mctrl_gpio_get(s->gpios, &s->mctrl_prev); + + s->ms_irq_enabled = false; + return 0; +} + +static void mxs_auart_shutdown(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + + mxs_auart_disable_ms(u); + + if (auart_dma_enabled(s)) + mxs_auart_dma_exit(s); + + if (uart_console(u)) { + mxs_clr(AUART_CTRL2_UARTEN, s, REG_CTRL2); + + mxs_clr(AUART_INTR_RXIEN | AUART_INTR_RTIEN | + AUART_INTR_CTSMIEN, s, REG_INTR); + mxs_set(AUART_CTRL0_CLKGATE, s, REG_CTRL0); + } else { + mxs_auart_reset_assert(s); + } + + clk_disable_unprepare(s->clk); +} + +static unsigned int mxs_auart_tx_empty(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + + if ((mxs_read(s, REG_STAT) & + (AUART_STAT_TXFE | AUART_STAT_BUSY)) == AUART_STAT_TXFE) + return TIOCSER_TEMT; + + return 0; +} + +static void mxs_auart_start_tx(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + + /* enable transmitter */ + mxs_set(AUART_CTRL2_TXE, s, REG_CTRL2); + + mxs_auart_tx_chars(s); +} + +static void mxs_auart_stop_tx(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + + mxs_clr(AUART_CTRL2_TXE, s, REG_CTRL2); +} + +static void mxs_auart_stop_rx(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + + mxs_clr(AUART_CTRL2_RXE, s, REG_CTRL2); +} + +static void mxs_auart_break_ctl(struct uart_port *u, int ctl) +{ + struct mxs_auart_port *s = to_auart_port(u); + + if (ctl) + mxs_set(AUART_LINECTRL_BRK, s, REG_LINECTRL); + else + mxs_clr(AUART_LINECTRL_BRK, s, REG_LINECTRL); +} + +static const struct uart_ops mxs_auart_ops = { + .tx_empty = mxs_auart_tx_empty, + .start_tx = mxs_auart_start_tx, + .stop_tx = mxs_auart_stop_tx, + .stop_rx = mxs_auart_stop_rx, + .enable_ms = mxs_auart_enable_ms, + .break_ctl = mxs_auart_break_ctl, + .set_mctrl = mxs_auart_set_mctrl, + .get_mctrl = mxs_auart_get_mctrl, + .startup = mxs_auart_startup, + .shutdown = mxs_auart_shutdown, + .set_termios = mxs_auart_settermios, + .set_ldisc = mxs_auart_set_ldisc, + .type = mxs_auart_type, + .release_port = mxs_auart_release_port, + .request_port = mxs_auart_request_port, + .config_port = mxs_auart_config_port, + .verify_port = mxs_auart_verify_port, +}; + +static struct mxs_auart_port *auart_port[MXS_AUART_PORTS]; + +#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE +static void mxs_auart_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct mxs_auart_port *s = to_auart_port(port); + unsigned int to = 1000; + + while (mxs_read(s, REG_STAT) & AUART_STAT_TXFF) { + if (!to--) + break; + udelay(1); + } + + mxs_write(ch, s, REG_DATA); +} + +static void +auart_console_write(struct console *co, const char *str, unsigned int count) +{ + struct mxs_auart_port *s; + struct uart_port *port; + unsigned int old_ctrl0, old_ctrl2; + unsigned int to = 20000; + + if (co->index >= MXS_AUART_PORTS || co->index < 0) + return; + + s = auart_port[co->index]; + port = &s->port; + + clk_enable(s->clk); + + /* First save the CR then disable the interrupts */ + old_ctrl2 = mxs_read(s, REG_CTRL2); + old_ctrl0 = mxs_read(s, REG_CTRL0); + + mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0); + mxs_set(AUART_CTRL2_UARTEN | AUART_CTRL2_TXE, s, REG_CTRL2); + + uart_console_write(port, str, count, mxs_auart_console_putchar); + + /* Finally, wait for transmitter to become empty ... */ + while (mxs_read(s, REG_STAT) & AUART_STAT_BUSY) { + udelay(1); + if (!to--) + break; + } + + /* + * ... and restore the TCR if we waited long enough for the transmitter + * to be idle. This might keep the transmitter enabled although it is + * unused, but that is better than to disable it while it is still + * transmitting. + */ + if (!(mxs_read(s, REG_STAT) & AUART_STAT_BUSY)) { + mxs_write(old_ctrl0, s, REG_CTRL0); + mxs_write(old_ctrl2, s, REG_CTRL2); + } + + clk_disable(s->clk); +} + +static void __init +auart_console_get_options(struct mxs_auart_port *s, int *baud, + int *parity, int *bits) +{ + struct uart_port *port = &s->port; + unsigned int lcr_h, quot; + + if (!(mxs_read(s, REG_CTRL2) & AUART_CTRL2_UARTEN)) + return; + + lcr_h = mxs_read(s, REG_LINECTRL); + + *parity = 'n'; + if (lcr_h & AUART_LINECTRL_PEN) { + if (lcr_h & AUART_LINECTRL_EPS) + *parity = 'e'; + else + *parity = 'o'; + } + + if ((lcr_h & AUART_LINECTRL_WLEN_MASK) == AUART_LINECTRL_WLEN(7)) + *bits = 7; + else + *bits = 8; + + quot = ((mxs_read(s, REG_LINECTRL) & AUART_LINECTRL_BAUD_DIVINT_MASK)) + >> (AUART_LINECTRL_BAUD_DIVINT_SHIFT - 6); + quot |= ((mxs_read(s, REG_LINECTRL) & AUART_LINECTRL_BAUD_DIVFRAC_MASK)) + >> AUART_LINECTRL_BAUD_DIVFRAC_SHIFT; + if (quot == 0) + quot = 1; + + *baud = (port->uartclk << 2) / quot; +} + +static int __init +auart_console_setup(struct console *co, char *options) +{ + struct mxs_auart_port *s; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index == -1 || co->index >= ARRAY_SIZE(auart_port)) + co->index = 0; + s = auart_port[co->index]; + if (!s) + return -ENODEV; + + ret = clk_prepare_enable(s->clk); + if (ret) + return ret; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + auart_console_get_options(s, &baud, &parity, &bits); + + ret = uart_set_options(&s->port, co, baud, parity, bits, flow); + + clk_disable_unprepare(s->clk); + + return ret; +} + +static struct console auart_console = { + .name = "ttyAPP", + .write = auart_console_write, + .device = uart_console_device, + .setup = auart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &auart_driver, +}; +#endif + +static struct uart_driver auart_driver = { + .owner = THIS_MODULE, + .driver_name = "ttyAPP", + .dev_name = "ttyAPP", + .major = 0, + .minor = 0, + .nr = MXS_AUART_PORTS, +#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE + .cons = &auart_console, +#endif +}; + +static void mxs_init_regs(struct mxs_auart_port *s) +{ + if (is_asm9260_auart(s)) + s->vendor = &vendor_alphascale_asm9260; + else + s->vendor = &vendor_freescale_stmp37xx; +} + +static int mxs_get_clks(struct mxs_auart_port *s, + struct platform_device *pdev) +{ + int err; + + if (!is_asm9260_auart(s)) { + s->clk = devm_clk_get(&pdev->dev, NULL); + return PTR_ERR_OR_ZERO(s->clk); + } + + s->clk = devm_clk_get(s->dev, "mod"); + if (IS_ERR(s->clk)) { + dev_err(s->dev, "Failed to get \"mod\" clk\n"); + return PTR_ERR(s->clk); + } + + s->clk_ahb = devm_clk_get(s->dev, "ahb"); + if (IS_ERR(s->clk_ahb)) { + dev_err(s->dev, "Failed to get \"ahb\" clk\n"); + return PTR_ERR(s->clk_ahb); + } + + err = clk_prepare_enable(s->clk_ahb); + if (err) { + dev_err(s->dev, "Failed to enable ahb_clk!\n"); + return err; + } + + err = clk_set_rate(s->clk, clk_get_rate(s->clk_ahb)); + if (err) { + dev_err(s->dev, "Failed to set rate!\n"); + goto disable_clk_ahb; + } + + err = clk_prepare_enable(s->clk); + if (err) { + dev_err(s->dev, "Failed to enable clk!\n"); + goto disable_clk_ahb; + } + + return 0; + +disable_clk_ahb: + clk_disable_unprepare(s->clk_ahb); + return err; +} + +static int mxs_auart_init_gpios(struct mxs_auart_port *s, struct device *dev) +{ + enum mctrl_gpio_idx i; + struct gpio_desc *gpiod; + + s->gpios = mctrl_gpio_init_noauto(dev, 0); + if (IS_ERR(s->gpios)) + return PTR_ERR(s->gpios); + + /* Block (enabled before) DMA option if RTS or CTS is GPIO line */ + if (!RTS_AT_AUART() || !CTS_AT_AUART()) { + if (test_bit(MXS_AUART_RTSCTS, &s->flags)) + dev_warn(dev, + "DMA and flow control via gpio may cause some problems. DMA disabled!\n"); + clear_bit(MXS_AUART_RTSCTS, &s->flags); + } + + for (i = 0; i < UART_GPIO_MAX; i++) { + gpiod = mctrl_gpio_to_gpiod(s->gpios, i); + if (gpiod && (gpiod_get_direction(gpiod) == 1)) + s->gpio_irq[i] = gpiod_to_irq(gpiod); + else + s->gpio_irq[i] = -EINVAL; + } + + return 0; +} + +static void mxs_auart_free_gpio_irq(struct mxs_auart_port *s) +{ + enum mctrl_gpio_idx i; + + for (i = 0; i < UART_GPIO_MAX; i++) + if (s->gpio_irq[i] >= 0) + free_irq(s->gpio_irq[i], s); +} + +static int mxs_auart_request_gpio_irq(struct mxs_auart_port *s) +{ + int *irq = s->gpio_irq; + enum mctrl_gpio_idx i; + int err = 0; + + for (i = 0; (i < UART_GPIO_MAX) && !err; i++) { + if (irq[i] < 0) + continue; + + irq_set_status_flags(irq[i], IRQ_NOAUTOEN); + err = request_irq(irq[i], mxs_auart_irq_handle, + IRQ_TYPE_EDGE_BOTH, dev_name(s->dev), s); + if (err) + dev_err(s->dev, "%s - Can't get %d irq\n", + __func__, irq[i]); + } + + /* + * If something went wrong, rollback. + * Be careful: i may be unsigned. + */ + while (err && (i-- > 0)) + if (irq[i] >= 0) + free_irq(irq[i], s); + + return err; +} + +static int mxs_auart_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mxs_auart_port *s; + u32 version; + int ret, irq; + struct resource *r; + + s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->port.dev = &pdev->dev; + s->dev = &pdev->dev; + + ret = of_alias_get_id(np, "serial"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get alias id: %d\n", ret); + return ret; + } + s->port.line = ret; + + if (of_get_property(np, "uart-has-rtscts", NULL) || + of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */) + set_bit(MXS_AUART_RTSCTS, &s->flags); + + if (s->port.line >= ARRAY_SIZE(auart_port)) { + dev_err(&pdev->dev, "serial%d out of range\n", s->port.line); + return -EINVAL; + } + + s->devtype = (enum mxs_auart_type)of_device_get_match_data(&pdev->dev); + + ret = mxs_get_clks(s, pdev); + if (ret) + return ret; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + ret = -ENXIO; + goto out_disable_clks; + } + + s->port.mapbase = r->start; + s->port.membase = ioremap(r->start, resource_size(r)); + if (!s->port.membase) { + ret = -ENOMEM; + goto out_disable_clks; + } + s->port.ops = &mxs_auart_ops; + s->port.iotype = UPIO_MEM; + s->port.fifosize = MXS_AUART_FIFO_SIZE; + s->port.uartclk = clk_get_rate(s->clk); + s->port.type = PORT_IMX; + s->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_MXS_AUART_CONSOLE); + + mxs_init_regs(s); + + s->mctrl_prev = 0; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto out_iounmap; + } + + s->port.irq = irq; + ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0, + dev_name(&pdev->dev), s); + if (ret) + goto out_iounmap; + + platform_set_drvdata(pdev, s); + + ret = mxs_auart_init_gpios(s, &pdev->dev); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize GPIOs.\n"); + goto out_iounmap; + } + + /* + * Get the GPIO lines IRQ + */ + ret = mxs_auart_request_gpio_irq(s); + if (ret) + goto out_iounmap; + + auart_port[s->port.line] = s; + + mxs_auart_reset_deassert(s); + + ret = uart_add_one_port(&auart_driver, &s->port); + if (ret) + goto out_free_qpio_irq; + + /* ASM9260 don't have version reg */ + if (is_asm9260_auart(s)) { + dev_info(&pdev->dev, "Found APPUART ASM9260\n"); + } else { + version = mxs_read(s, REG_VERSION); + dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n", + (version >> 24) & 0xff, + (version >> 16) & 0xff, version & 0xffff); + } + + return 0; + +out_free_qpio_irq: + mxs_auart_free_gpio_irq(s); + auart_port[pdev->id] = NULL; + +out_iounmap: + iounmap(s->port.membase); + +out_disable_clks: + if (is_asm9260_auart(s)) { + clk_disable_unprepare(s->clk); + clk_disable_unprepare(s->clk_ahb); + } + return ret; +} + +static int mxs_auart_remove(struct platform_device *pdev) +{ + struct mxs_auart_port *s = platform_get_drvdata(pdev); + + uart_remove_one_port(&auart_driver, &s->port); + auart_port[pdev->id] = NULL; + mxs_auart_free_gpio_irq(s); + iounmap(s->port.membase); + if (is_asm9260_auart(s)) { + clk_disable_unprepare(s->clk); + clk_disable_unprepare(s->clk_ahb); + } + + return 0; +} + +static struct platform_driver mxs_auart_driver = { + .probe = mxs_auart_probe, + .remove = mxs_auart_remove, + .driver = { + .name = "mxs-auart", + .of_match_table = mxs_auart_dt_ids, + }, +}; + +static int __init mxs_auart_init(void) +{ + int r; + + r = uart_register_driver(&auart_driver); + if (r) + goto out; + + r = platform_driver_register(&mxs_auart_driver); + if (r) + goto out_err; + + return 0; +out_err: + uart_unregister_driver(&auart_driver); +out: + return r; +} + +static void __exit mxs_auart_exit(void) +{ + platform_driver_unregister(&mxs_auart_driver); + uart_unregister_driver(&auart_driver); +} + +module_init(mxs_auart_init); +module_exit(mxs_auart_exit); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Freescale MXS application uart driver"); +MODULE_ALIAS("platform:mxs-auart"); diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c new file mode 100644 index 000000000..beb7896eb --- /dev/null +++ b/drivers/tty/serial/omap-serial.c @@ -0,0 +1,1874 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for OMAP-UART controller. + * Based on drivers/serial/8250.c + * + * Copyright (C) 2010 Texas Instruments. + * + * Authors: + * Govindraj R + * Thara Gopinath + * + * Note: This driver is made separate from 8250 driver as we cannot + * over load 8250 driver with omap platform specific configuration for + * features like DMA, it makes easier to implement features like DMA and + * hardware flow control and software flow control configuration with + * this driver as required for the omap-platform. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OMAP_MAX_HSUART_PORTS 10 + +#define UART_BUILD_REVISION(x, y) (((x) << 8) | (y)) + +#define OMAP_UART_REV_42 0x0402 +#define OMAP_UART_REV_46 0x0406 +#define OMAP_UART_REV_52 0x0502 +#define OMAP_UART_REV_63 0x0603 + +#define OMAP_UART_TX_WAKEUP_EN BIT(7) + +/* Feature flags */ +#define OMAP_UART_WER_HAS_TX_WAKEUP BIT(0) + +#define UART_ERRATA_i202_MDR1_ACCESS BIT(0) +#define UART_ERRATA_i291_DMA_FORCEIDLE BIT(1) + +#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz */ + +/* SCR register bitmasks */ +#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK (1 << 7) +#define OMAP_UART_SCR_TX_TRIG_GRANU1_MASK (1 << 6) +#define OMAP_UART_SCR_TX_EMPTY (1 << 3) + +/* FCR register bitmasks */ +#define OMAP_UART_FCR_RX_FIFO_TRIG_MASK (0x3 << 6) +#define OMAP_UART_FCR_TX_FIFO_TRIG_MASK (0x3 << 4) + +/* MVR register bitmasks */ +#define OMAP_UART_MVR_SCHEME_SHIFT 30 + +#define OMAP_UART_LEGACY_MVR_MAJ_MASK 0xf0 +#define OMAP_UART_LEGACY_MVR_MAJ_SHIFT 4 +#define OMAP_UART_LEGACY_MVR_MIN_MASK 0x0f + +#define OMAP_UART_MVR_MAJ_MASK 0x700 +#define OMAP_UART_MVR_MAJ_SHIFT 8 +#define OMAP_UART_MVR_MIN_MASK 0x3f + +#define OMAP_UART_DMA_CH_FREE -1 + +#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA +#define OMAP_MODE13X_SPEED 230400 + +/* WER = 0x7F + * Enable module level wakeup in WER reg + */ +#define OMAP_UART_WER_MOD_WKUP 0x7F + +/* Enable XON/XOFF flow control on output */ +#define OMAP_UART_SW_TX 0x08 + +/* Enable XON/XOFF flow control on input */ +#define OMAP_UART_SW_RX 0x02 + +#define OMAP_UART_SW_CLR 0xF0 + +#define OMAP_UART_TCR_TRIG 0x0F + +struct uart_omap_dma { + u8 uart_dma_tx; + u8 uart_dma_rx; + int rx_dma_channel; + int tx_dma_channel; + dma_addr_t rx_buf_dma_phys; + dma_addr_t tx_buf_dma_phys; + unsigned int uart_base; + /* + * Buffer for rx dma. It is not required for tx because the buffer + * comes from port structure. + */ + unsigned char *rx_buf; + unsigned int prev_rx_dma_pos; + int tx_buf_size; + int tx_dma_used; + int rx_dma_used; + spinlock_t tx_lock; + spinlock_t rx_lock; + /* timer to poll activity on rx dma */ + struct timer_list rx_timer; + unsigned int rx_buf_size; + unsigned int rx_poll_rate; + unsigned int rx_timeout; +}; + +struct uart_omap_port { + struct uart_port port; + struct uart_omap_dma uart_dma; + struct device *dev; + int wakeirq; + + unsigned char ier; + unsigned char lcr; + unsigned char mcr; + unsigned char fcr; + unsigned char efr; + unsigned char dll; + unsigned char dlh; + unsigned char mdr1; + unsigned char scr; + unsigned char wer; + + int use_dma; + /* + * Some bits in registers are cleared on a read, so they must + * be saved whenever the register is read, but the bits will not + * be immediately processed. + */ + unsigned int lsr_break_flag; + unsigned char msr_saved_flags; + char name[20]; + unsigned long port_activity; + int context_loss_cnt; + u32 errata; + u32 features; + + struct gpio_desc *rts_gpiod; + + struct pm_qos_request pm_qos_request; + u32 latency; + u32 calc_latency; + struct work_struct qos_work; + bool is_suspending; + + unsigned int rs485_tx_filter_count; +}; + +#define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port))) + +static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS]; + +/* Forward declaration of functions */ +static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1); + +static inline unsigned int serial_in(struct uart_omap_port *up, int offset) +{ + offset <<= up->port.regshift; + return readw(up->port.membase + offset); +} + +static inline void serial_out(struct uart_omap_port *up, int offset, int value) +{ + offset <<= up->port.regshift; + writew(value, up->port.membase + offset); +} + +static inline void serial_omap_clear_fifos(struct uart_omap_port *up) +{ + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + serial_out(up, UART_FCR, 0); +} + +#ifdef CONFIG_PM +static int serial_omap_get_context_loss_count(struct uart_omap_port *up) +{ + struct omap_uart_port_info *pdata = dev_get_platdata(up->dev); + + if (!pdata || !pdata->get_context_loss_count) + return -EINVAL; + + return pdata->get_context_loss_count(up->dev); +} + +/* REVISIT: Remove this when omap3 boots in device tree only mode */ +static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable) +{ + struct omap_uart_port_info *pdata = dev_get_platdata(up->dev); + + if (!pdata || !pdata->enable_wakeup) + return; + + pdata->enable_wakeup(up->dev, enable); +} +#endif /* CONFIG_PM */ + +/* + * Calculate the absolute difference between the desired and actual baud + * rate for the given mode. + */ +static inline int calculate_baud_abs_diff(struct uart_port *port, + unsigned int baud, unsigned int mode) +{ + unsigned int n = port->uartclk / (mode * baud); + int abs_diff; + + if (n == 0) + n = 1; + + abs_diff = baud - (port->uartclk / (mode * n)); + if (abs_diff < 0) + abs_diff = -abs_diff; + + return abs_diff; +} + +/* + * serial_omap_baud_is_mode16 - check if baud rate is MODE16X + * @port: uart port info + * @baud: baudrate for which mode needs to be determined + * + * Returns true if baud rate is MODE16X and false if MODE13X + * Original table in OMAP TRM named "UART Mode Baud Rates, Divisor Values, + * and Error Rates" determines modes not for all common baud rates. + * E.g. for 1000000 baud rate mode must be 16x, but according to that + * table it's determined as 13x. + */ +static bool +serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud) +{ + int abs_diff_13 = calculate_baud_abs_diff(port, baud, 13); + int abs_diff_16 = calculate_baud_abs_diff(port, baud, 16); + + return (abs_diff_13 >= abs_diff_16); +} + +/* + * serial_omap_get_divisor - calculate divisor value + * @port: uart port info + * @baud: baudrate for which divisor needs to be calculated. + */ +static unsigned int +serial_omap_get_divisor(struct uart_port *port, unsigned int baud) +{ + unsigned int mode; + + if (!serial_omap_baud_is_mode16(port, baud)) + mode = 13; + else + mode = 16; + return port->uartclk/(mode * baud); +} + +static void serial_omap_enable_ms(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + + dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->port.line); + + up->ier |= UART_IER_MSI; + serial_out(up, UART_IER, up->ier); +} + +static void serial_omap_stop_tx(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + int res; + + /* Handle RS-485 */ + if (port->rs485.flags & SER_RS485_ENABLED) { + if (up->scr & OMAP_UART_SCR_TX_EMPTY) { + /* THR interrupt is fired when both TX FIFO and TX + * shift register are empty. This means there's nothing + * left to transmit now, so make sure the THR interrupt + * is fired when TX FIFO is below the trigger level, + * disable THR interrupts and toggle the RS-485 GPIO + * data direction pin if needed. + */ + up->scr &= ~OMAP_UART_SCR_TX_EMPTY; + serial_out(up, UART_OMAP_SCR, up->scr); + res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ? + 1 : 0; + if (gpiod_get_value(up->rts_gpiod) != res) { + if (port->rs485.delay_rts_after_send > 0) + mdelay( + port->rs485.delay_rts_after_send); + gpiod_set_value(up->rts_gpiod, res); + } + } else { + /* We're asked to stop, but there's still stuff in the + * UART FIFO, so make sure the THR interrupt is fired + * when both TX FIFO and TX shift register are empty. + * The next THR interrupt (if no transmission is started + * in the meantime) will indicate the end of a + * transmission. Therefore we _don't_ disable THR + * interrupts in this situation. + */ + up->scr |= OMAP_UART_SCR_TX_EMPTY; + serial_out(up, UART_OMAP_SCR, up->scr); + return; + } + } + + if (up->ier & UART_IER_THRI) { + up->ier &= ~UART_IER_THRI; + serial_out(up, UART_IER, up->ier); + } +} + +static void serial_omap_stop_rx(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + + up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); + up->port.read_status_mask &= ~UART_LSR_DR; + serial_out(up, UART_IER, up->ier); +} + +static void serial_omap_put_char(struct uart_omap_port *up, unsigned char ch) +{ + serial_out(up, UART_TX, ch); + + if ((up->port.rs485.flags & SER_RS485_ENABLED) && + !(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) + up->rs485_tx_filter_count++; +} + +static void transmit_chars(struct uart_omap_port *up, unsigned int lsr) +{ + struct circ_buf *xmit = &up->port.state->xmit; + int count; + + if (up->port.x_char) { + serial_omap_put_char(up, up->port.x_char); + up->port.icount.tx++; + up->port.x_char = 0; + return; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { + serial_omap_stop_tx(&up->port); + return; + } + count = up->port.fifosize / 4; + do { + serial_omap_put_char(up, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->port.icount.tx++; + + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); + + if (uart_circ_empty(xmit)) + serial_omap_stop_tx(&up->port); +} + +static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up) +{ + if (!(up->ier & UART_IER_THRI)) { + up->ier |= UART_IER_THRI; + serial_out(up, UART_IER, up->ier); + } +} + +static void serial_omap_start_tx(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + int res; + + /* Handle RS-485 */ + if (port->rs485.flags & SER_RS485_ENABLED) { + /* Fire THR interrupts when FIFO is below trigger level */ + up->scr &= ~OMAP_UART_SCR_TX_EMPTY; + serial_out(up, UART_OMAP_SCR, up->scr); + + /* if rts not already enabled */ + res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0; + if (gpiod_get_value(up->rts_gpiod) != res) { + gpiod_set_value(up->rts_gpiod, res); + if (port->rs485.delay_rts_before_send > 0) + mdelay(port->rs485.delay_rts_before_send); + } + } + + if ((port->rs485.flags & SER_RS485_ENABLED) && + !(port->rs485.flags & SER_RS485_RX_DURING_TX)) + up->rs485_tx_filter_count = 0; + + serial_omap_enable_ier_thri(up); +} + +static void serial_omap_throttle(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); + serial_out(up, UART_IER, up->ier); + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static void serial_omap_unthrottle(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + up->ier |= UART_IER_RLSI | UART_IER_RDI; + serial_out(up, UART_IER, up->ier); + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static unsigned int check_modem_status(struct uart_omap_port *up) +{ + unsigned int status; + + status = serial_in(up, UART_MSR); + status |= up->msr_saved_flags; + up->msr_saved_flags = 0; + if ((status & UART_MSR_ANY_DELTA) == 0) + return status; + + if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI && + up->port.state != NULL) { + if (status & UART_MSR_TERI) + up->port.icount.rng++; + if (status & UART_MSR_DDSR) + up->port.icount.dsr++; + if (status & UART_MSR_DDCD) + uart_handle_dcd_change + (&up->port, status & UART_MSR_DCD); + if (status & UART_MSR_DCTS) + uart_handle_cts_change + (&up->port, status & UART_MSR_CTS); + wake_up_interruptible(&up->port.state->port.delta_msr_wait); + } + + return status; +} + +static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr) +{ + unsigned int flag; + + /* + * Read one data character out to avoid stalling the receiver according + * to the table 23-246 of the omap4 TRM. + */ + if (likely(lsr & UART_LSR_DR)) { + serial_in(up, UART_RX); + if ((up->port.rs485.flags & SER_RS485_ENABLED) && + !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) && + up->rs485_tx_filter_count) + up->rs485_tx_filter_count--; + } + + up->port.icount.rx++; + flag = TTY_NORMAL; + + if (lsr & UART_LSR_BI) { + flag = TTY_BREAK; + lsr &= ~(UART_LSR_FE | UART_LSR_PE); + up->port.icount.brk++; + /* + * We do the SysRQ and SAK checking + * here because otherwise the break + * may get masked by ignore_status_mask + * or read_status_mask. + */ + if (uart_handle_break(&up->port)) + return; + + } + + if (lsr & UART_LSR_PE) { + flag = TTY_PARITY; + up->port.icount.parity++; + } + + if (lsr & UART_LSR_FE) { + flag = TTY_FRAME; + up->port.icount.frame++; + } + + if (lsr & UART_LSR_OE) + up->port.icount.overrun++; + +#ifdef CONFIG_SERIAL_OMAP_CONSOLE + if (up->port.line == up->port.cons->index) { + /* Recover the break flag from console xmit */ + lsr |= up->lsr_break_flag; + } +#endif + uart_insert_char(&up->port, lsr, UART_LSR_OE, 0, flag); +} + +static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr) +{ + unsigned char ch = 0; + unsigned int flag; + + if (!(lsr & UART_LSR_DR)) + return; + + ch = serial_in(up, UART_RX); + if ((up->port.rs485.flags & SER_RS485_ENABLED) && + !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) && + up->rs485_tx_filter_count) { + up->rs485_tx_filter_count--; + return; + } + + flag = TTY_NORMAL; + up->port.icount.rx++; + + if (uart_handle_sysrq_char(&up->port, ch)) + return; + + uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag); +} + +/** + * serial_omap_irq() - This handles the interrupt from one port + * @irq: uart port irq number + * @dev_id: uart port info + */ +static irqreturn_t serial_omap_irq(int irq, void *dev_id) +{ + struct uart_omap_port *up = dev_id; + unsigned int iir, lsr; + unsigned int type; + irqreturn_t ret = IRQ_NONE; + int max_count = 256; + + spin_lock(&up->port.lock); + + do { + iir = serial_in(up, UART_IIR); + if (iir & UART_IIR_NO_INT) + break; + + ret = IRQ_HANDLED; + lsr = serial_in(up, UART_LSR); + + /* extract IRQ type from IIR register */ + type = iir & 0x3e; + + switch (type) { + case UART_IIR_MSI: + check_modem_status(up); + break; + case UART_IIR_THRI: + transmit_chars(up, lsr); + break; + case UART_IIR_RX_TIMEOUT: + case UART_IIR_RDI: + serial_omap_rdi(up, lsr); + break; + case UART_IIR_RLSI: + serial_omap_rlsi(up, lsr); + break; + case UART_IIR_CTS_RTS_DSR: + /* simply try again */ + break; + case UART_IIR_XOFF: + default: + break; + } + } while (max_count--); + + spin_unlock(&up->port.lock); + + tty_flip_buffer_push(&up->port.state->port); + + up->port_activity = jiffies; + + return ret; +} + +static unsigned int serial_omap_tx_empty(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned long flags; + unsigned int ret = 0; + + dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line); + spin_lock_irqsave(&up->port.lock, flags); + ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; + spin_unlock_irqrestore(&up->port.lock, flags); + + return ret; +} + +static unsigned int serial_omap_get_mctrl(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned int status; + unsigned int ret = 0; + + status = check_modem_status(up); + + dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->port.line); + + if (status & UART_MSR_DCD) + ret |= TIOCM_CAR; + if (status & UART_MSR_RI) + ret |= TIOCM_RNG; + if (status & UART_MSR_DSR) + ret |= TIOCM_DSR; + if (status & UART_MSR_CTS) + ret |= TIOCM_CTS; + return ret; +} + +static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned char mcr = 0, old_mcr, lcr; + + dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->port.line); + if (mctrl & TIOCM_RTS) + mcr |= UART_MCR_RTS; + if (mctrl & TIOCM_DTR) + mcr |= UART_MCR_DTR; + if (mctrl & TIOCM_OUT1) + mcr |= UART_MCR_OUT1; + if (mctrl & TIOCM_OUT2) + mcr |= UART_MCR_OUT2; + if (mctrl & TIOCM_LOOP) + mcr |= UART_MCR_LOOP; + + old_mcr = serial_in(up, UART_MCR); + old_mcr &= ~(UART_MCR_LOOP | UART_MCR_OUT2 | UART_MCR_OUT1 | + UART_MCR_DTR | UART_MCR_RTS); + up->mcr = old_mcr | mcr; + serial_out(up, UART_MCR, up->mcr); + + /* Turn off autoRTS if RTS is lowered; restore autoRTS if RTS raised */ + lcr = serial_in(up, UART_LCR); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) + up->efr |= UART_EFR_RTS; + else + up->efr &= ~UART_EFR_RTS; + serial_out(up, UART_EFR, up->efr); + serial_out(up, UART_LCR, lcr); +} + +static void serial_omap_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned long flags; + + dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line); + spin_lock_irqsave(&up->port.lock, flags); + if (break_state == -1) + up->lcr |= UART_LCR_SBC; + else + up->lcr &= ~UART_LCR_SBC; + serial_out(up, UART_LCR, up->lcr); + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static int serial_omap_startup(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned long flags; + int retval; + + /* + * Allocate the IRQ + */ + retval = request_irq(up->port.irq, serial_omap_irq, up->port.irqflags, + up->name, up); + if (retval) + return retval; + + /* Optional wake-up IRQ */ + if (up->wakeirq) { + retval = dev_pm_set_dedicated_wake_irq(up->dev, up->wakeirq); + if (retval) { + free_irq(up->port.irq, up); + return retval; + } + } + + dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line); + + pm_runtime_get_sync(up->dev); + /* + * Clear the FIFO buffers and disable them. + * (they will be reenabled in set_termios()) + */ + serial_omap_clear_fifos(up); + + /* + * Clear the interrupt registers. + */ + (void) serial_in(up, UART_LSR); + if (serial_in(up, UART_LSR) & UART_LSR_DR) + (void) serial_in(up, UART_RX); + (void) serial_in(up, UART_IIR); + (void) serial_in(up, UART_MSR); + + /* + * Now, initialize the UART + */ + serial_out(up, UART_LCR, UART_LCR_WLEN8); + spin_lock_irqsave(&up->port.lock, flags); + /* + * Most PC uarts need OUT2 raised to enable interrupts. + */ + up->port.mctrl |= TIOCM_OUT2; + serial_omap_set_mctrl(&up->port, up->port.mctrl); + spin_unlock_irqrestore(&up->port.lock, flags); + + up->msr_saved_flags = 0; + /* + * Finally, enable interrupts. Note: Modem status interrupts + * are set via set_termios(), which will be occurring imminently + * anyway, so we don't enable them here. + */ + up->ier = UART_IER_RLSI | UART_IER_RDI; + serial_out(up, UART_IER, up->ier); + + /* Enable module level wake up */ + up->wer = OMAP_UART_WER_MOD_WKUP; + if (up->features & OMAP_UART_WER_HAS_TX_WAKEUP) + up->wer |= OMAP_UART_TX_WAKEUP_EN; + + serial_out(up, UART_OMAP_WER, up->wer); + + up->port_activity = jiffies; + return 0; +} + +static void serial_omap_shutdown(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned long flags; + + dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->port.line); + + /* + * Disable interrupts from this port + */ + up->ier = 0; + serial_out(up, UART_IER, 0); + + spin_lock_irqsave(&up->port.lock, flags); + up->port.mctrl &= ~TIOCM_OUT2; + serial_omap_set_mctrl(&up->port, up->port.mctrl); + spin_unlock_irqrestore(&up->port.lock, flags); + + /* + * Disable break condition and FIFOs + */ + serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC); + serial_omap_clear_fifos(up); + + /* + * Read data port to reset things, and then free the irq + */ + if (serial_in(up, UART_LSR) & UART_LSR_DR) + (void) serial_in(up, UART_RX); + + pm_runtime_put_sync(up->dev); + free_irq(up->port.irq, up); + dev_pm_clear_wake_irq(up->dev); +} + +static void serial_omap_uart_qos_work(struct work_struct *work) +{ + struct uart_omap_port *up = container_of(work, struct uart_omap_port, + qos_work); + + cpu_latency_qos_update_request(&up->pm_qos_request, up->latency); +} + +static void +serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned char cval = 0; + unsigned long flags; + unsigned int baud, quot; + + cval = UART_LCR_WLEN(tty_get_char_size(termios->c_cflag)); + + if (termios->c_cflag & CSTOPB) + cval |= UART_LCR_STOP; + if (termios->c_cflag & PARENB) + cval |= UART_LCR_PARITY; + if (!(termios->c_cflag & PARODD)) + cval |= UART_LCR_EPAR; + if (termios->c_cflag & CMSPAR) + cval |= UART_LCR_SPAR; + + /* + * Ask the core to calculate the divisor for us. + */ + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13); + quot = serial_omap_get_divisor(port, baud); + + /* calculate wakeup latency constraint */ + up->calc_latency = (USEC_PER_SEC * up->port.fifosize) / (baud / 8); + up->latency = up->calc_latency; + schedule_work(&up->qos_work); + + up->dll = quot & 0xff; + up->dlh = quot >> 8; + up->mdr1 = UART_OMAP_MDR1_DISABLE; + + up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 | + UART_FCR_ENABLE_FIFO; + + /* + * Ok, we're now changing the port state. Do it with + * interrupts disabled. + */ + spin_lock_irqsave(&up->port.lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; + if (termios->c_iflag & INPCK) + up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; + if (termios->c_iflag & (BRKINT | PARMRK)) + up->port.read_status_mask |= UART_LSR_BI; + + /* + * Characters to ignore + */ + up->port.ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; + if (termios->c_iflag & IGNBRK) { + up->port.ignore_status_mask |= UART_LSR_BI; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + up->port.ignore_status_mask |= UART_LSR_OE; + } + + /* + * ignore all characters if CREAD is not set + */ + if ((termios->c_cflag & CREAD) == 0) + up->port.ignore_status_mask |= UART_LSR_DR; + + /* + * Modem status interrupts + */ + up->ier &= ~UART_IER_MSI; + if (UART_ENABLE_MS(&up->port, termios->c_cflag)) + up->ier |= UART_IER_MSI; + serial_out(up, UART_IER, up->ier); + serial_out(up, UART_LCR, cval); /* reset DLAB */ + up->lcr = cval; + up->scr = 0; + + /* FIFOs and DMA Settings */ + + /* FCR can be changed only when the + * baud clock is not running + * DLL_REG and DLH_REG set to 0. + */ + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); + serial_out(up, UART_DLL, 0); + serial_out(up, UART_DLM, 0); + serial_out(up, UART_LCR, 0); + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + + up->efr = serial_in(up, UART_EFR) & ~UART_EFR_ECB; + up->efr &= ~UART_EFR_SCD; + serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); + up->mcr = serial_in(up, UART_MCR) & ~UART_MCR_TCRTLR; + serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); + /* FIFO ENABLE, DMA MODE */ + + up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK; + /* + * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK + * sets Enables the granularity of 1 for TRIGGER RX + * level. Along with setting RX FIFO trigger level + * to 1 (as noted below, 16 characters) and TLR[3:0] + * to zero this will result RX FIFO threshold level + * to 1 character, instead of 16 as noted in comment + * below. + */ + + /* Set receive FIFO threshold to 16 characters and + * transmit FIFO threshold to 32 spaces + */ + up->fcr &= ~OMAP_UART_FCR_RX_FIFO_TRIG_MASK; + up->fcr &= ~OMAP_UART_FCR_TX_FIFO_TRIG_MASK; + up->fcr |= UART_FCR6_R_TRIGGER_16 | UART_FCR6_T_TRIGGER_24 | + UART_FCR_ENABLE_FIFO; + + serial_out(up, UART_FCR, up->fcr); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + + serial_out(up, UART_OMAP_SCR, up->scr); + + /* Reset UART_MCR_TCRTLR: this must be done with the EFR_ECB bit set */ + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); + serial_out(up, UART_MCR, up->mcr); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, UART_EFR, up->efr); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); + + /* Protocol, Baud Rate, and Interrupt Settings */ + + if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) + serial_omap_mdr1_errataset(up, up->mdr1); + else + serial_out(up, UART_OMAP_MDR1, up->mdr1); + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); + + serial_out(up, UART_LCR, 0); + serial_out(up, UART_IER, 0); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + + serial_out(up, UART_DLL, up->dll); /* LS of divisor */ + serial_out(up, UART_DLM, up->dlh); /* MS of divisor */ + + serial_out(up, UART_LCR, 0); + serial_out(up, UART_IER, up->ier); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + + serial_out(up, UART_EFR, up->efr); + serial_out(up, UART_LCR, cval); + + if (!serial_omap_baud_is_mode16(port, baud)) + up->mdr1 = UART_OMAP_MDR1_13X_MODE; + else + up->mdr1 = UART_OMAP_MDR1_16X_MODE; + + if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) + serial_omap_mdr1_errataset(up, up->mdr1); + else + serial_out(up, UART_OMAP_MDR1, up->mdr1); + + /* Configure flow control */ + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + + /* XON1/XOFF1 accessible mode B, TCRTLR=0, ECB=0 */ + serial_out(up, UART_XON1, termios->c_cc[VSTART]); + serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]); + + /* Enable access to TCR/TLR */ + serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); + serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); + + serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG); + + up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF); + + if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) { + /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */ + up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; + up->efr |= UART_EFR_CTS; + } else { + /* Disable AUTORTS and AUTOCTS */ + up->efr &= ~(UART_EFR_CTS | UART_EFR_RTS); + } + + if (up->port.flags & UPF_SOFT_FLOW) { + /* clear SW control mode bits */ + up->efr &= OMAP_UART_SW_CLR; + + /* + * IXON Flag: + * Enable XON/XOFF flow control on input. + * Receiver compares XON1, XOFF1. + */ + if (termios->c_iflag & IXON) + up->efr |= OMAP_UART_SW_RX; + + /* + * IXOFF Flag: + * Enable XON/XOFF flow control on output. + * Transmit XON1, XOFF1 + */ + if (termios->c_iflag & IXOFF) { + up->port.status |= UPSTAT_AUTOXOFF; + up->efr |= OMAP_UART_SW_TX; + } + + /* + * IXANY Flag: + * Enable any character to restart output. + * Operation resumes after receiving any + * character after recognition of the XOFF character + */ + if (termios->c_iflag & IXANY) + up->mcr |= UART_MCR_XONANY; + else + up->mcr &= ~UART_MCR_XONANY; + } + serial_out(up, UART_MCR, up->mcr); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, UART_EFR, up->efr); + serial_out(up, UART_LCR, up->lcr); + + serial_omap_set_mctrl(&up->port, up->port.mctrl); + + spin_unlock_irqrestore(&up->port.lock, flags); + dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line); +} + +static void +serial_omap_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned char efr; + + dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line); + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + efr = serial_in(up, UART_EFR); + serial_out(up, UART_EFR, efr | UART_EFR_ECB); + serial_out(up, UART_LCR, 0); + + serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, UART_EFR, efr); + serial_out(up, UART_LCR, 0); +} + +static void serial_omap_release_port(struct uart_port *port) +{ + dev_dbg(port->dev, "serial_omap_release_port+\n"); +} + +static int serial_omap_request_port(struct uart_port *port) +{ + dev_dbg(port->dev, "serial_omap_request_port+\n"); + return 0; +} + +static void serial_omap_config_port(struct uart_port *port, int flags) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + + dev_dbg(up->port.dev, "serial_omap_config_port+%d\n", + up->port.line); + up->port.type = PORT_OMAP; + up->port.flags |= UPF_SOFT_FLOW | UPF_HARD_FLOW; +} + +static int +serial_omap_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + /* we don't want the core code to modify any port params */ + dev_dbg(port->dev, "serial_omap_verify_port+\n"); + return -EINVAL; +} + +static const char * +serial_omap_type(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + + dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->port.line); + return up->name; +} + +static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up) +{ + unsigned int status, tmout = 10000; + + /* Wait up to 10ms for the character(s) to be sent. */ + do { + status = serial_in(up, UART_LSR); + + if (status & UART_LSR_BI) + up->lsr_break_flag = UART_LSR_BI; + + if (--tmout == 0) + break; + udelay(1); + } while (!uart_lsr_tx_empty(status)); + + /* Wait up to 1s for flow control if necessary */ + if (up->port.flags & UPF_CONS_FLOW) { + tmout = 1000000; + for (tmout = 1000000; tmout; tmout--) { + unsigned int msr = serial_in(up, UART_MSR); + + up->msr_saved_flags |= msr & MSR_SAVE_FLAGS; + if (msr & UART_MSR_CTS) + break; + + udelay(1); + } + } +} + +#ifdef CONFIG_CONSOLE_POLL + +static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + + wait_for_xmitr(up); + serial_out(up, UART_TX, ch); +} + +static int serial_omap_poll_get_char(struct uart_port *port) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned int status; + + status = serial_in(up, UART_LSR); + if (!(status & UART_LSR_DR)) { + status = NO_POLL_CHAR; + goto out; + } + + status = serial_in(up, UART_RX); + +out: + return status; +} + +#endif /* CONFIG_CONSOLE_POLL */ + +#ifdef CONFIG_SERIAL_OMAP_CONSOLE + +#ifdef CONFIG_SERIAL_EARLYCON +static unsigned int omap_serial_early_in(struct uart_port *port, int offset) +{ + offset <<= port->regshift; + return readw(port->membase + offset); +} + +static void omap_serial_early_out(struct uart_port *port, int offset, + int value) +{ + offset <<= port->regshift; + writew(value, port->membase + offset); +} + +static void omap_serial_early_putc(struct uart_port *port, unsigned char c) +{ + unsigned int status; + + for (;;) { + status = omap_serial_early_in(port, UART_LSR); + if (uart_lsr_tx_empty(status)) + break; + cpu_relax(); + } + omap_serial_early_out(port, UART_TX, c); +} + +static void early_omap_serial_write(struct console *console, const char *s, + unsigned int count) +{ + struct earlycon_device *device = console->data; + struct uart_port *port = &device->port; + + uart_console_write(port, s, count, omap_serial_early_putc); +} + +static int __init early_omap_serial_setup(struct earlycon_device *device, + const char *options) +{ + struct uart_port *port = &device->port; + + if (!(device->port.membase || device->port.iobase)) + return -ENODEV; + + port->regshift = 2; + device->con->write = early_omap_serial_write; + return 0; +} + +OF_EARLYCON_DECLARE(omapserial, "ti,omap2-uart", early_omap_serial_setup); +OF_EARLYCON_DECLARE(omapserial, "ti,omap3-uart", early_omap_serial_setup); +OF_EARLYCON_DECLARE(omapserial, "ti,omap4-uart", early_omap_serial_setup); +#endif /* CONFIG_SERIAL_EARLYCON */ + +static struct uart_omap_port *serial_omap_console_ports[OMAP_MAX_HSUART_PORTS]; + +static struct uart_driver serial_omap_reg; + +static void serial_omap_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + + wait_for_xmitr(up); + serial_out(up, UART_TX, ch); +} + +static void +serial_omap_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_omap_port *up = serial_omap_console_ports[co->index]; + unsigned long flags; + unsigned int ier; + int locked = 1; + + local_irq_save(flags); + if (up->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&up->port.lock); + else + spin_lock(&up->port.lock); + + /* + * First save the IER then disable the interrupts + */ + ier = serial_in(up, UART_IER); + serial_out(up, UART_IER, 0); + + uart_console_write(&up->port, s, count, serial_omap_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + wait_for_xmitr(up); + serial_out(up, UART_IER, ier); + /* + * The receive handling will happen properly because the + * receive ready bit will still be set; it is not cleared + * on read. However, modem control will not, we must + * call it if we have saved something in the saved flags + * while processing with interrupts off. + */ + if (up->msr_saved_flags) + check_modem_status(up); + + if (locked) + spin_unlock(&up->port.lock); + local_irq_restore(flags); +} + +static int __init +serial_omap_console_setup(struct console *co, char *options) +{ + struct uart_omap_port *up; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (serial_omap_console_ports[co->index] == NULL) + return -ENODEV; + up = serial_omap_console_ports[co->index]; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&up->port, co, baud, parity, bits, flow); +} + +static struct console serial_omap_console = { + .name = OMAP_SERIAL_NAME, + .write = serial_omap_console_write, + .device = uart_console_device, + .setup = serial_omap_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &serial_omap_reg, +}; + +static void serial_omap_add_console_port(struct uart_omap_port *up) +{ + serial_omap_console_ports[up->port.line] = up; +} + +#define OMAP_CONSOLE (&serial_omap_console) + +#else + +#define OMAP_CONSOLE NULL + +static inline void serial_omap_add_console_port(struct uart_omap_port *up) +{} + +#endif + +/* Enable or disable the rs485 support */ +static int +serial_omap_config_rs485(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + struct uart_omap_port *up = to_uart_omap_port(port); + unsigned int mode; + int val; + + /* Disable interrupts from this port */ + mode = up->ier; + up->ier = 0; + serial_out(up, UART_IER, 0); + + /* enable / disable rts */ + val = (rs485->flags & SER_RS485_ENABLED) ? + SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND; + val = (rs485->flags & val) ? 1 : 0; + gpiod_set_value(up->rts_gpiod, val); + + /* Enable interrupts */ + up->ier = mode; + serial_out(up, UART_IER, up->ier); + + /* If RS-485 is disabled, make sure the THR interrupt is fired when + * TX FIFO is below the trigger level. + */ + if (!(rs485->flags & SER_RS485_ENABLED) && + (up->scr & OMAP_UART_SCR_TX_EMPTY)) { + up->scr &= ~OMAP_UART_SCR_TX_EMPTY; + serial_out(up, UART_OMAP_SCR, up->scr); + } + + return 0; +} + +static const struct uart_ops serial_omap_pops = { + .tx_empty = serial_omap_tx_empty, + .set_mctrl = serial_omap_set_mctrl, + .get_mctrl = serial_omap_get_mctrl, + .stop_tx = serial_omap_stop_tx, + .start_tx = serial_omap_start_tx, + .throttle = serial_omap_throttle, + .unthrottle = serial_omap_unthrottle, + .stop_rx = serial_omap_stop_rx, + .enable_ms = serial_omap_enable_ms, + .break_ctl = serial_omap_break_ctl, + .startup = serial_omap_startup, + .shutdown = serial_omap_shutdown, + .set_termios = serial_omap_set_termios, + .pm = serial_omap_pm, + .type = serial_omap_type, + .release_port = serial_omap_release_port, + .request_port = serial_omap_request_port, + .config_port = serial_omap_config_port, + .verify_port = serial_omap_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_put_char = serial_omap_poll_put_char, + .poll_get_char = serial_omap_poll_get_char, +#endif +}; + +static struct uart_driver serial_omap_reg = { + .owner = THIS_MODULE, + .driver_name = "OMAP-SERIAL", + .dev_name = OMAP_SERIAL_NAME, + .nr = OMAP_MAX_HSUART_PORTS, + .cons = OMAP_CONSOLE, +}; + +#ifdef CONFIG_PM_SLEEP +static int serial_omap_prepare(struct device *dev) +{ + struct uart_omap_port *up = dev_get_drvdata(dev); + + up->is_suspending = true; + + return 0; +} + +static void serial_omap_complete(struct device *dev) +{ + struct uart_omap_port *up = dev_get_drvdata(dev); + + up->is_suspending = false; +} + +static int serial_omap_suspend(struct device *dev) +{ + struct uart_omap_port *up = dev_get_drvdata(dev); + + uart_suspend_port(&serial_omap_reg, &up->port); + flush_work(&up->qos_work); + + if (device_may_wakeup(dev)) + serial_omap_enable_wakeup(up, true); + else + serial_omap_enable_wakeup(up, false); + + return 0; +} + +static int serial_omap_resume(struct device *dev) +{ + struct uart_omap_port *up = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + serial_omap_enable_wakeup(up, false); + + uart_resume_port(&serial_omap_reg, &up->port); + + return 0; +} +#else +#define serial_omap_prepare NULL +#define serial_omap_complete NULL +#endif /* CONFIG_PM_SLEEP */ + +static void omap_serial_fill_features_erratas(struct uart_omap_port *up) +{ + u32 mvr, scheme; + u16 revision, major, minor; + + mvr = readl(up->port.membase + (UART_OMAP_MVER << up->port.regshift)); + + /* Check revision register scheme */ + scheme = mvr >> OMAP_UART_MVR_SCHEME_SHIFT; + + switch (scheme) { + case 0: /* Legacy Scheme: OMAP2/3 */ + /* MINOR_REV[0:4], MAJOR_REV[4:7] */ + major = (mvr & OMAP_UART_LEGACY_MVR_MAJ_MASK) >> + OMAP_UART_LEGACY_MVR_MAJ_SHIFT; + minor = (mvr & OMAP_UART_LEGACY_MVR_MIN_MASK); + break; + case 1: + /* New Scheme: OMAP4+ */ + /* MINOR_REV[0:5], MAJOR_REV[8:10] */ + major = (mvr & OMAP_UART_MVR_MAJ_MASK) >> + OMAP_UART_MVR_MAJ_SHIFT; + minor = (mvr & OMAP_UART_MVR_MIN_MASK); + break; + default: + dev_warn(up->dev, + "Unknown %s revision, defaulting to highest\n", + up->name); + /* highest possible revision */ + major = 0xff; + minor = 0xff; + } + + /* normalize revision for the driver */ + revision = UART_BUILD_REVISION(major, minor); + + switch (revision) { + case OMAP_UART_REV_46: + up->errata |= (UART_ERRATA_i202_MDR1_ACCESS | + UART_ERRATA_i291_DMA_FORCEIDLE); + break; + case OMAP_UART_REV_52: + up->errata |= (UART_ERRATA_i202_MDR1_ACCESS | + UART_ERRATA_i291_DMA_FORCEIDLE); + up->features |= OMAP_UART_WER_HAS_TX_WAKEUP; + break; + case OMAP_UART_REV_63: + up->errata |= UART_ERRATA_i202_MDR1_ACCESS; + up->features |= OMAP_UART_WER_HAS_TX_WAKEUP; + break; + default: + break; + } +} + +static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev) +{ + struct omap_uart_port_info *omap_up_info; + + omap_up_info = devm_kzalloc(dev, sizeof(*omap_up_info), GFP_KERNEL); + if (!omap_up_info) + return NULL; /* out of memory */ + + of_property_read_u32(dev->of_node, "clock-frequency", + &omap_up_info->uartclk); + + omap_up_info->flags = UPF_BOOT_AUTOCONF; + + return omap_up_info; +} + +static const struct serial_rs485 serial_omap_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | + SER_RS485_RX_DURING_TX, + .delay_rts_before_send = 1, + .delay_rts_after_send = 1, +}; + +static int serial_omap_probe_rs485(struct uart_omap_port *up, + struct device *dev) +{ + struct serial_rs485 *rs485conf = &up->port.rs485; + struct device_node *np = dev->of_node; + enum gpiod_flags gflags; + int ret; + + rs485conf->flags = 0; + up->rts_gpiod = NULL; + + if (!np) + return 0; + + up->port.rs485_config = serial_omap_config_rs485; + up->port.rs485_supported = serial_omap_rs485_supported; + + ret = uart_get_rs485_mode(&up->port); + if (ret) + return ret; + + if (of_property_read_bool(np, "rs485-rts-active-high")) { + rs485conf->flags |= SER_RS485_RTS_ON_SEND; + rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; + } else { + rs485conf->flags &= ~SER_RS485_RTS_ON_SEND; + rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; + } + + /* check for tx enable gpio */ + gflags = rs485conf->flags & SER_RS485_RTS_AFTER_SEND ? + GPIOD_OUT_HIGH : GPIOD_OUT_LOW; + up->rts_gpiod = devm_gpiod_get_optional(dev, "rts", gflags); + if (IS_ERR(up->rts_gpiod)) { + ret = PTR_ERR(up->rts_gpiod); + if (ret == -EPROBE_DEFER) + return ret; + + up->rts_gpiod = NULL; + up->port.rs485_supported = (const struct serial_rs485) { }; + if (rs485conf->flags & SER_RS485_ENABLED) { + dev_err(dev, "disabling RS-485 (rts-gpio missing in device tree)\n"); + memset(rs485conf, 0, sizeof(*rs485conf)); + } + } else { + gpiod_set_consumer_name(up->rts_gpiod, "omap-serial"); + } + + return 0; +} + +static int serial_omap_probe(struct platform_device *pdev) +{ + struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev); + struct uart_omap_port *up; + struct resource *mem; + void __iomem *base; + int uartirq = 0; + int wakeirq = 0; + int ret; + + /* The optional wakeirq may be specified in the board dts file */ + if (pdev->dev.of_node) { + uartirq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (!uartirq) + return -EPROBE_DEFER; + wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1); + omap_up_info = of_get_uart_port_info(&pdev->dev); + pdev->dev.platform_data = omap_up_info; + } else { + uartirq = platform_get_irq(pdev, 0); + if (uartirq < 0) + return -EPROBE_DEFER; + } + + up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL); + if (!up) + return -ENOMEM; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(base)) + return PTR_ERR(base); + + up->dev = &pdev->dev; + up->port.dev = &pdev->dev; + up->port.type = PORT_OMAP; + up->port.iotype = UPIO_MEM; + up->port.irq = uartirq; + up->port.regshift = 2; + up->port.fifosize = 64; + up->port.ops = &serial_omap_pops; + up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_OMAP_CONSOLE); + + if (pdev->dev.of_node) + ret = of_alias_get_id(pdev->dev.of_node, "serial"); + else + ret = pdev->id; + + if (ret < 0) { + dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", + ret); + goto err_port_line; + } + up->port.line = ret; + + if (up->port.line >= OMAP_MAX_HSUART_PORTS) { + dev_err(&pdev->dev, "uart ID %d > MAX %d.\n", up->port.line, + OMAP_MAX_HSUART_PORTS); + ret = -ENXIO; + goto err_port_line; + } + + up->wakeirq = wakeirq; + if (!up->wakeirq) + dev_info(up->port.dev, "no wakeirq for uart%d\n", + up->port.line); + + sprintf(up->name, "OMAP UART%d", up->port.line); + up->port.mapbase = mem->start; + up->port.membase = base; + up->port.flags = omap_up_info->flags; + up->port.uartclk = omap_up_info->uartclk; + if (!up->port.uartclk) { + up->port.uartclk = DEFAULT_CLK_SPEED; + dev_warn(&pdev->dev, + "No clock speed specified: using default: %d\n", + DEFAULT_CLK_SPEED); + } + + ret = serial_omap_probe_rs485(up, &pdev->dev); + if (ret < 0) + goto err_rs485; + + up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + up->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + cpu_latency_qos_add_request(&up->pm_qos_request, up->latency); + INIT_WORK(&up->qos_work, serial_omap_uart_qos_work); + + platform_set_drvdata(pdev, up); + if (omap_up_info->autosuspend_timeout == 0) + omap_up_info->autosuspend_timeout = -1; + + device_init_wakeup(up->dev, true); + + pm_runtime_enable(&pdev->dev); + + pm_runtime_get_sync(&pdev->dev); + + omap_serial_fill_features_erratas(up); + + ui[up->port.line] = up; + serial_omap_add_console_port(up); + + ret = uart_add_one_port(&serial_omap_reg, &up->port); + if (ret != 0) + goto err_add_port; + + return 0; + +err_add_port: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + cpu_latency_qos_remove_request(&up->pm_qos_request); + device_init_wakeup(up->dev, false); +err_rs485: +err_port_line: + return ret; +} + +static int serial_omap_remove(struct platform_device *dev) +{ + struct uart_omap_port *up = platform_get_drvdata(dev); + + pm_runtime_get_sync(up->dev); + + uart_remove_one_port(&serial_omap_reg, &up->port); + + pm_runtime_put_sync(up->dev); + pm_runtime_disable(up->dev); + cpu_latency_qos_remove_request(&up->pm_qos_request); + device_init_wakeup(&dev->dev, false); + + return 0; +} + +/* + * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460) + * The access to uart register after MDR1 Access + * causes UART to corrupt data. + * + * Need a delay = + * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS) + * give 10 times as much + */ +static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1) +{ + u8 timeout = 255; + + serial_out(up, UART_OMAP_MDR1, mdr1); + udelay(2); + serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT | + UART_FCR_CLEAR_RCVR); + /* + * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and + * TX_FIFO_E bit is 1. + */ + while (UART_LSR_THRE != (serial_in(up, UART_LSR) & + (UART_LSR_THRE | UART_LSR_DR))) { + timeout--; + if (!timeout) { + /* Should *never* happen. we warn and carry on */ + dev_crit(up->dev, "Errata i202: timedout %x\n", + serial_in(up, UART_LSR)); + break; + } + udelay(1); + } +} + +#ifdef CONFIG_PM +static void serial_omap_restore_context(struct uart_omap_port *up) +{ + if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) + serial_omap_mdr1_errataset(up, UART_OMAP_MDR1_DISABLE); + else + serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE); + + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */ + serial_out(up, UART_EFR, UART_EFR_ECB); + serial_out(up, UART_LCR, 0x0); /* Operational mode */ + serial_out(up, UART_IER, 0x0); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */ + serial_out(up, UART_DLL, up->dll); + serial_out(up, UART_DLM, up->dlh); + serial_out(up, UART_LCR, 0x0); /* Operational mode */ + serial_out(up, UART_IER, up->ier); + serial_out(up, UART_FCR, up->fcr); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); + serial_out(up, UART_MCR, up->mcr); + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */ + serial_out(up, UART_OMAP_SCR, up->scr); + serial_out(up, UART_EFR, up->efr); + serial_out(up, UART_LCR, up->lcr); + if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) + serial_omap_mdr1_errataset(up, up->mdr1); + else + serial_out(up, UART_OMAP_MDR1, up->mdr1); + serial_out(up, UART_OMAP_WER, up->wer); +} + +static int serial_omap_runtime_suspend(struct device *dev) +{ + struct uart_omap_port *up = dev_get_drvdata(dev); + + if (!up) + return -EINVAL; + + /* + * When using 'no_console_suspend', the console UART must not be + * suspended. Since driver suspend is managed by runtime suspend, + * preventing runtime suspend (by returning error) will keep device + * active during suspend. + */ + if (up->is_suspending && !console_suspend_enabled && + uart_console(&up->port)) + return -EBUSY; + + up->context_loss_cnt = serial_omap_get_context_loss_count(up); + + serial_omap_enable_wakeup(up, true); + + up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + schedule_work(&up->qos_work); + + return 0; +} + +static int serial_omap_runtime_resume(struct device *dev) +{ + struct uart_omap_port *up = dev_get_drvdata(dev); + + int loss_cnt = serial_omap_get_context_loss_count(up); + + serial_omap_enable_wakeup(up, false); + + if (loss_cnt < 0) { + dev_dbg(dev, "serial_omap_get_context_loss_count failed : %d\n", + loss_cnt); + serial_omap_restore_context(up); + } else if (up->context_loss_cnt != loss_cnt) { + serial_omap_restore_context(up); + } + up->latency = up->calc_latency; + schedule_work(&up->qos_work); + + return 0; +} +#endif + +static const struct dev_pm_ops serial_omap_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(serial_omap_suspend, serial_omap_resume) + SET_RUNTIME_PM_OPS(serial_omap_runtime_suspend, + serial_omap_runtime_resume, NULL) + .prepare = serial_omap_prepare, + .complete = serial_omap_complete, +}; + +#if defined(CONFIG_OF) +static const struct of_device_id omap_serial_of_match[] = { + { .compatible = "ti,omap2-uart" }, + { .compatible = "ti,omap3-uart" }, + { .compatible = "ti,omap4-uart" }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_serial_of_match); +#endif + +static struct platform_driver serial_omap_driver = { + .probe = serial_omap_probe, + .remove = serial_omap_remove, + .driver = { + .name = OMAP_SERIAL_DRIVER_NAME, + .pm = &serial_omap_dev_pm_ops, + .of_match_table = of_match_ptr(omap_serial_of_match), + }, +}; + +static int __init serial_omap_init(void) +{ + int ret; + + ret = uart_register_driver(&serial_omap_reg); + if (ret != 0) + return ret; + ret = platform_driver_register(&serial_omap_driver); + if (ret != 0) + uart_unregister_driver(&serial_omap_reg); + return ret; +} + +static void __exit serial_omap_exit(void) +{ + platform_driver_unregister(&serial_omap_driver); + uart_unregister_driver(&serial_omap_reg); +} + +module_init(serial_omap_init); +module_exit(serial_omap_exit); + +MODULE_DESCRIPTION("OMAP High Speed UART driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Texas Instruments Inc"); diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c new file mode 100644 index 000000000..fde39cc11 --- /dev/null +++ b/drivers/tty/serial/owl-uart.c @@ -0,0 +1,796 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Actions Semi Owl family serial console + * + * Copyright 2013 Actions Semi Inc. + * Author: Actions Semi, Inc. + * + * Copyright (c) 2016-2017 Andreas Färber + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OWL_UART_PORT_NUM 7 +#define OWL_UART_DEV_NAME "ttyOWL" + +#define OWL_UART_CTL 0x000 +#define OWL_UART_RXDAT 0x004 +#define OWL_UART_TXDAT 0x008 +#define OWL_UART_STAT 0x00c + +#define OWL_UART_CTL_DWLS_MASK GENMASK(1, 0) +#define OWL_UART_CTL_DWLS_5BITS (0x0 << 0) +#define OWL_UART_CTL_DWLS_6BITS (0x1 << 0) +#define OWL_UART_CTL_DWLS_7BITS (0x2 << 0) +#define OWL_UART_CTL_DWLS_8BITS (0x3 << 0) +#define OWL_UART_CTL_STPS_2BITS BIT(2) +#define OWL_UART_CTL_PRS_MASK GENMASK(6, 4) +#define OWL_UART_CTL_PRS_NONE (0x0 << 4) +#define OWL_UART_CTL_PRS_ODD (0x4 << 4) +#define OWL_UART_CTL_PRS_MARK (0x5 << 4) +#define OWL_UART_CTL_PRS_EVEN (0x6 << 4) +#define OWL_UART_CTL_PRS_SPACE (0x7 << 4) +#define OWL_UART_CTL_AFE BIT(12) +#define OWL_UART_CTL_TRFS_TX BIT(14) +#define OWL_UART_CTL_EN BIT(15) +#define OWL_UART_CTL_RXDE BIT(16) +#define OWL_UART_CTL_TXDE BIT(17) +#define OWL_UART_CTL_RXIE BIT(18) +#define OWL_UART_CTL_TXIE BIT(19) +#define OWL_UART_CTL_LBEN BIT(20) + +#define OWL_UART_STAT_RIP BIT(0) +#define OWL_UART_STAT_TIP BIT(1) +#define OWL_UART_STAT_RXER BIT(2) +#define OWL_UART_STAT_TFER BIT(3) +#define OWL_UART_STAT_RXST BIT(4) +#define OWL_UART_STAT_RFEM BIT(5) +#define OWL_UART_STAT_TFFU BIT(6) +#define OWL_UART_STAT_CTSS BIT(7) +#define OWL_UART_STAT_RTSS BIT(8) +#define OWL_UART_STAT_TFES BIT(10) +#define OWL_UART_STAT_TRFL_MASK GENMASK(16, 11) +#define OWL_UART_STAT_UTBB BIT(17) + +#define OWL_UART_POLL_USEC 5 +#define OWL_UART_TIMEOUT_USEC 10000 + +static struct uart_driver owl_uart_driver; + +struct owl_uart_info { + unsigned int tx_fifosize; +}; + +struct owl_uart_port { + struct uart_port port; + struct clk *clk; +}; + +#define to_owl_uart_port(prt) container_of(prt, struct owl_uart_port, prt) + +static struct owl_uart_port *owl_uart_ports[OWL_UART_PORT_NUM]; + +static inline void owl_uart_write(struct uart_port *port, u32 val, unsigned int off) +{ + writel(val, port->membase + off); +} + +static inline u32 owl_uart_read(struct uart_port *port, unsigned int off) +{ + return readl(port->membase + off); +} + +static void owl_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + u32 ctl; + + ctl = owl_uart_read(port, OWL_UART_CTL); + + if (mctrl & TIOCM_LOOP) + ctl |= OWL_UART_CTL_LBEN; + else + ctl &= ~OWL_UART_CTL_LBEN; + + owl_uart_write(port, ctl, OWL_UART_CTL); +} + +static unsigned int owl_uart_get_mctrl(struct uart_port *port) +{ + unsigned int mctrl = TIOCM_CAR | TIOCM_DSR; + u32 stat, ctl; + + ctl = owl_uart_read(port, OWL_UART_CTL); + stat = owl_uart_read(port, OWL_UART_STAT); + if (stat & OWL_UART_STAT_RTSS) + mctrl |= TIOCM_RTS; + if ((stat & OWL_UART_STAT_CTSS) || !(ctl & OWL_UART_CTL_AFE)) + mctrl |= TIOCM_CTS; + return mctrl; +} + +static unsigned int owl_uart_tx_empty(struct uart_port *port) +{ + unsigned long flags; + u32 val; + unsigned int ret; + + spin_lock_irqsave(&port->lock, flags); + + val = owl_uart_read(port, OWL_UART_STAT); + ret = (val & OWL_UART_STAT_TFES) ? TIOCSER_TEMT : 0; + + spin_unlock_irqrestore(&port->lock, flags); + + return ret; +} + +static void owl_uart_stop_rx(struct uart_port *port) +{ + u32 val; + + val = owl_uart_read(port, OWL_UART_CTL); + val &= ~(OWL_UART_CTL_RXIE | OWL_UART_CTL_RXDE); + owl_uart_write(port, val, OWL_UART_CTL); + + val = owl_uart_read(port, OWL_UART_STAT); + val |= OWL_UART_STAT_RIP; + owl_uart_write(port, val, OWL_UART_STAT); +} + +static void owl_uart_stop_tx(struct uart_port *port) +{ + u32 val; + + val = owl_uart_read(port, OWL_UART_CTL); + val &= ~(OWL_UART_CTL_TXIE | OWL_UART_CTL_TXDE); + owl_uart_write(port, val, OWL_UART_CTL); + + val = owl_uart_read(port, OWL_UART_STAT); + val |= OWL_UART_STAT_TIP; + owl_uart_write(port, val, OWL_UART_STAT); +} + +static void owl_uart_start_tx(struct uart_port *port) +{ + u32 val; + + if (uart_tx_stopped(port)) { + owl_uart_stop_tx(port); + return; + } + + val = owl_uart_read(port, OWL_UART_STAT); + val |= OWL_UART_STAT_TIP; + owl_uart_write(port, val, OWL_UART_STAT); + + val = owl_uart_read(port, OWL_UART_CTL); + val |= OWL_UART_CTL_TXIE; + owl_uart_write(port, val, OWL_UART_CTL); +} + +static void owl_uart_send_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + unsigned int ch; + + if (port->x_char) { + while (!(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU)) + cpu_relax(); + owl_uart_write(port, port->x_char, OWL_UART_TXDAT); + port->icount.tx++; + port->x_char = 0; + } + + if (uart_tx_stopped(port)) + return; + + while (!(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU)) { + if (uart_circ_empty(xmit)) + break; + + ch = xmit->buf[xmit->tail]; + owl_uart_write(port, ch, OWL_UART_TXDAT); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + owl_uart_stop_tx(port); +} + +static void owl_uart_receive_chars(struct uart_port *port) +{ + u32 stat, val; + + val = owl_uart_read(port, OWL_UART_CTL); + val &= ~OWL_UART_CTL_TRFS_TX; + owl_uart_write(port, val, OWL_UART_CTL); + + stat = owl_uart_read(port, OWL_UART_STAT); + while (!(stat & OWL_UART_STAT_RFEM)) { + char flag = TTY_NORMAL; + + if (stat & OWL_UART_STAT_RXER) + port->icount.overrun++; + + if (stat & OWL_UART_STAT_RXST) { + /* We are not able to distinguish the error type. */ + port->icount.brk++; + port->icount.frame++; + + stat &= port->read_status_mask; + if (stat & OWL_UART_STAT_RXST) + flag = TTY_PARITY; + } else + port->icount.rx++; + + val = owl_uart_read(port, OWL_UART_RXDAT); + val &= 0xff; + + if ((stat & port->ignore_status_mask) == 0) + tty_insert_flip_char(&port->state->port, val, flag); + + stat = owl_uart_read(port, OWL_UART_STAT); + } + + tty_flip_buffer_push(&port->state->port); +} + +static irqreturn_t owl_uart_irq(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + unsigned long flags; + u32 stat; + + spin_lock_irqsave(&port->lock, flags); + + stat = owl_uart_read(port, OWL_UART_STAT); + + if (stat & OWL_UART_STAT_RIP) + owl_uart_receive_chars(port); + + if (stat & OWL_UART_STAT_TIP) + owl_uart_send_chars(port); + + stat = owl_uart_read(port, OWL_UART_STAT); + stat |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP; + owl_uart_write(port, stat, OWL_UART_STAT); + + spin_unlock_irqrestore(&port->lock, flags); + + return IRQ_HANDLED; +} + +static void owl_uart_shutdown(struct uart_port *port) +{ + u32 val; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + val = owl_uart_read(port, OWL_UART_CTL); + val &= ~(OWL_UART_CTL_TXIE | OWL_UART_CTL_RXIE + | OWL_UART_CTL_TXDE | OWL_UART_CTL_RXDE | OWL_UART_CTL_EN); + owl_uart_write(port, val, OWL_UART_CTL); + + spin_unlock_irqrestore(&port->lock, flags); + + free_irq(port->irq, port); +} + +static int owl_uart_startup(struct uart_port *port) +{ + u32 val; + unsigned long flags; + int ret; + + ret = request_irq(port->irq, owl_uart_irq, IRQF_TRIGGER_HIGH, + "owl-uart", port); + if (ret) + return ret; + + spin_lock_irqsave(&port->lock, flags); + + val = owl_uart_read(port, OWL_UART_STAT); + val |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP + | OWL_UART_STAT_RXER | OWL_UART_STAT_TFER | OWL_UART_STAT_RXST; + owl_uart_write(port, val, OWL_UART_STAT); + + val = owl_uart_read(port, OWL_UART_CTL); + val |= OWL_UART_CTL_RXIE | OWL_UART_CTL_TXIE; + val |= OWL_UART_CTL_EN; + owl_uart_write(port, val, OWL_UART_CTL); + + spin_unlock_irqrestore(&port->lock, flags); + + return 0; +} + +static void owl_uart_change_baudrate(struct owl_uart_port *owl_port, + unsigned long baud) +{ + clk_set_rate(owl_port->clk, baud * 8); +} + +static void owl_uart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct owl_uart_port *owl_port = to_owl_uart_port(port); + unsigned int baud; + u32 ctl; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + ctl = owl_uart_read(port, OWL_UART_CTL); + + ctl &= ~OWL_UART_CTL_DWLS_MASK; + switch (termios->c_cflag & CSIZE) { + case CS5: + ctl |= OWL_UART_CTL_DWLS_5BITS; + break; + case CS6: + ctl |= OWL_UART_CTL_DWLS_6BITS; + break; + case CS7: + ctl |= OWL_UART_CTL_DWLS_7BITS; + break; + case CS8: + default: + ctl |= OWL_UART_CTL_DWLS_8BITS; + break; + } + + if (termios->c_cflag & CSTOPB) + ctl |= OWL_UART_CTL_STPS_2BITS; + else + ctl &= ~OWL_UART_CTL_STPS_2BITS; + + ctl &= ~OWL_UART_CTL_PRS_MASK; + if (termios->c_cflag & PARENB) { + if (termios->c_cflag & CMSPAR) { + if (termios->c_cflag & PARODD) + ctl |= OWL_UART_CTL_PRS_MARK; + else + ctl |= OWL_UART_CTL_PRS_SPACE; + } else if (termios->c_cflag & PARODD) + ctl |= OWL_UART_CTL_PRS_ODD; + else + ctl |= OWL_UART_CTL_PRS_EVEN; + } else + ctl |= OWL_UART_CTL_PRS_NONE; + + if (termios->c_cflag & CRTSCTS) + ctl |= OWL_UART_CTL_AFE; + else + ctl &= ~OWL_UART_CTL_AFE; + + owl_uart_write(port, ctl, OWL_UART_CTL); + + baud = uart_get_baud_rate(port, termios, old, 9600, 3200000); + owl_uart_change_baudrate(owl_port, baud); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); + + port->read_status_mask |= OWL_UART_STAT_RXER; + if (termios->c_iflag & INPCK) + port->read_status_mask |= OWL_UART_STAT_RXST; + + uart_update_timeout(port, termios->c_cflag, baud); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void owl_uart_release_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return; + + if (port->flags & UPF_IOREMAP) { + devm_release_mem_region(port->dev, port->mapbase, + resource_size(res)); + devm_iounmap(port->dev, port->membase); + port->membase = NULL; + } +} + +static int owl_uart_request_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + if (!devm_request_mem_region(port->dev, port->mapbase, + resource_size(res), dev_name(port->dev))) + return -EBUSY; + + if (port->flags & UPF_IOREMAP) { + port->membase = devm_ioremap(port->dev, port->mapbase, + resource_size(res)); + if (!port->membase) + return -EBUSY; + } + + return 0; +} + +static const char *owl_uart_type(struct uart_port *port) +{ + return (port->type == PORT_OWL) ? "owl-uart" : NULL; +} + +static int owl_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + if (port->type != PORT_OWL) + return -EINVAL; + + if (port->irq != ser->irq) + return -EINVAL; + + return 0; +} + +static void owl_uart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_OWL; + owl_uart_request_port(port); + } +} + +#ifdef CONFIG_CONSOLE_POLL + +static int owl_uart_poll_get_char(struct uart_port *port) +{ + if (owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_RFEM) + return NO_POLL_CHAR; + + return owl_uart_read(port, OWL_UART_RXDAT); +} + +static void owl_uart_poll_put_char(struct uart_port *port, unsigned char ch) +{ + u32 reg; + int ret; + + /* Wait while FIFO is full or timeout */ + ret = readl_poll_timeout_atomic(port->membase + OWL_UART_STAT, reg, + !(reg & OWL_UART_STAT_TFFU), + OWL_UART_POLL_USEC, + OWL_UART_TIMEOUT_USEC); + if (ret == -ETIMEDOUT) { + dev_err(port->dev, "Timeout waiting while UART TX FULL\n"); + return; + } + + owl_uart_write(port, ch, OWL_UART_TXDAT); +} + +#endif /* CONFIG_CONSOLE_POLL */ + +static const struct uart_ops owl_uart_ops = { + .set_mctrl = owl_uart_set_mctrl, + .get_mctrl = owl_uart_get_mctrl, + .tx_empty = owl_uart_tx_empty, + .start_tx = owl_uart_start_tx, + .stop_rx = owl_uart_stop_rx, + .stop_tx = owl_uart_stop_tx, + .startup = owl_uart_startup, + .shutdown = owl_uart_shutdown, + .set_termios = owl_uart_set_termios, + .type = owl_uart_type, + .config_port = owl_uart_config_port, + .request_port = owl_uart_request_port, + .release_port = owl_uart_release_port, + .verify_port = owl_uart_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = owl_uart_poll_get_char, + .poll_put_char = owl_uart_poll_put_char, +#endif +}; + +#ifdef CONFIG_SERIAL_OWL_CONSOLE + +static void owl_console_putchar(struct uart_port *port, unsigned char ch) +{ + if (!port->membase) + return; + + while (owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU) + cpu_relax(); + + owl_uart_write(port, ch, OWL_UART_TXDAT); +} + +static void owl_uart_port_write(struct uart_port *port, const char *s, + u_int count) +{ + u32 old_ctl, val; + unsigned long flags; + int locked; + + local_irq_save(flags); + + if (port->sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&port->lock); + else { + spin_lock(&port->lock); + locked = 1; + } + + old_ctl = owl_uart_read(port, OWL_UART_CTL); + val = old_ctl | OWL_UART_CTL_TRFS_TX; + /* disable IRQ */ + val &= ~(OWL_UART_CTL_RXIE | OWL_UART_CTL_TXIE); + owl_uart_write(port, val, OWL_UART_CTL); + + uart_console_write(port, s, count, owl_console_putchar); + + /* wait until all contents have been sent out */ + while (owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TRFL_MASK) + cpu_relax(); + + /* clear IRQ pending */ + val = owl_uart_read(port, OWL_UART_STAT); + val |= OWL_UART_STAT_TIP | OWL_UART_STAT_RIP; + owl_uart_write(port, val, OWL_UART_STAT); + + owl_uart_write(port, old_ctl, OWL_UART_CTL); + + if (locked) + spin_unlock(&port->lock); + + local_irq_restore(flags); +} + +static void owl_uart_console_write(struct console *co, const char *s, + u_int count) +{ + struct owl_uart_port *owl_port; + + owl_port = owl_uart_ports[co->index]; + if (!owl_port) + return; + + owl_uart_port_write(&owl_port->port, s, count); +} + +static int owl_uart_console_setup(struct console *co, char *options) +{ + struct owl_uart_port *owl_port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= OWL_UART_PORT_NUM) + return -EINVAL; + + owl_port = owl_uart_ports[co->index]; + if (!owl_port || !owl_port->port.membase) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&owl_port->port, co, baud, parity, bits, flow); +} + +static struct console owl_uart_console = { + .name = OWL_UART_DEV_NAME, + .write = owl_uart_console_write, + .device = uart_console_device, + .setup = owl_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &owl_uart_driver, +}; + +static int __init owl_uart_console_init(void) +{ + register_console(&owl_uart_console); + + return 0; +} +console_initcall(owl_uart_console_init); + +static void owl_uart_early_console_write(struct console *co, + const char *s, + u_int count) +{ + struct earlycon_device *dev = co->data; + + owl_uart_port_write(&dev->port, s, count); +} + +static int __init +owl_uart_early_console_setup(struct earlycon_device *device, const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = owl_uart_early_console_write; + + return 0; +} +OF_EARLYCON_DECLARE(owl, "actions,owl-uart", + owl_uart_early_console_setup); + +#define OWL_UART_CONSOLE (&owl_uart_console) +#else +#define OWL_UART_CONSOLE NULL +#endif + +static struct uart_driver owl_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "owl-uart", + .dev_name = OWL_UART_DEV_NAME, + .nr = OWL_UART_PORT_NUM, + .cons = OWL_UART_CONSOLE, +}; + +static const struct owl_uart_info owl_s500_info = { + .tx_fifosize = 16, +}; + +static const struct owl_uart_info owl_s900_info = { + .tx_fifosize = 32, +}; + +static const struct of_device_id owl_uart_dt_matches[] = { + { .compatible = "actions,s500-uart", .data = &owl_s500_info }, + { .compatible = "actions,s900-uart", .data = &owl_s900_info }, + { } +}; +MODULE_DEVICE_TABLE(of, owl_uart_dt_matches); + +static int owl_uart_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + const struct owl_uart_info *info = NULL; + struct resource *res_mem; + struct owl_uart_port *owl_port; + int ret, irq; + + if (pdev->dev.of_node) { + pdev->id = of_alias_get_id(pdev->dev.of_node, "serial"); + match = of_match_node(owl_uart_dt_matches, pdev->dev.of_node); + if (match) + info = match->data; + } + + if (pdev->id < 0 || pdev->id >= OWL_UART_PORT_NUM) { + dev_err(&pdev->dev, "id %d out of range\n", pdev->id); + return -EINVAL; + } + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_mem) { + dev_err(&pdev->dev, "could not get mem\n"); + return -ENODEV; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + if (owl_uart_ports[pdev->id]) { + dev_err(&pdev->dev, "port %d already allocated\n", pdev->id); + return -EBUSY; + } + + owl_port = devm_kzalloc(&pdev->dev, sizeof(*owl_port), GFP_KERNEL); + if (!owl_port) + return -ENOMEM; + + owl_port->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(owl_port->clk)) { + dev_err(&pdev->dev, "could not get clk\n"); + return PTR_ERR(owl_port->clk); + } + + ret = clk_prepare_enable(owl_port->clk); + if (ret) { + dev_err(&pdev->dev, "could not enable clk\n"); + return ret; + } + + owl_port->port.dev = &pdev->dev; + owl_port->port.line = pdev->id; + owl_port->port.type = PORT_OWL; + owl_port->port.iotype = UPIO_MEM; + owl_port->port.mapbase = res_mem->start; + owl_port->port.irq = irq; + owl_port->port.uartclk = clk_get_rate(owl_port->clk); + if (owl_port->port.uartclk == 0) { + dev_err(&pdev->dev, "clock rate is zero\n"); + clk_disable_unprepare(owl_port->clk); + return -EINVAL; + } + owl_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_LOW_LATENCY; + owl_port->port.x_char = 0; + owl_port->port.fifosize = (info) ? info->tx_fifosize : 16; + owl_port->port.ops = &owl_uart_ops; + + owl_uart_ports[pdev->id] = owl_port; + platform_set_drvdata(pdev, owl_port); + + ret = uart_add_one_port(&owl_uart_driver, &owl_port->port); + if (ret) + owl_uart_ports[pdev->id] = NULL; + + return ret; +} + +static int owl_uart_remove(struct platform_device *pdev) +{ + struct owl_uart_port *owl_port = platform_get_drvdata(pdev); + + uart_remove_one_port(&owl_uart_driver, &owl_port->port); + owl_uart_ports[pdev->id] = NULL; + clk_disable_unprepare(owl_port->clk); + + return 0; +} + +static struct platform_driver owl_uart_platform_driver = { + .probe = owl_uart_probe, + .remove = owl_uart_remove, + .driver = { + .name = "owl-uart", + .of_match_table = owl_uart_dt_matches, + }, +}; + +static int __init owl_uart_init(void) +{ + int ret; + + ret = uart_register_driver(&owl_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&owl_uart_platform_driver); + if (ret) + uart_unregister_driver(&owl_uart_driver); + + return ret; +} + +static void __exit owl_uart_exit(void) +{ + platform_driver_unregister(&owl_uart_platform_driver); + uart_unregister_driver(&owl_uart_driver); +} + +module_init(owl_uart_init); +module_exit(owl_uart_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c new file mode 100644 index 000000000..abff1c647 --- /dev/null +++ b/drivers/tty/serial/pch_uart.c @@ -0,0 +1,1918 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + *Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +enum { + PCH_UART_HANDLED_RX_INT_SHIFT, + PCH_UART_HANDLED_TX_INT_SHIFT, + PCH_UART_HANDLED_RX_ERR_INT_SHIFT, + PCH_UART_HANDLED_RX_TRG_INT_SHIFT, + PCH_UART_HANDLED_MS_INT_SHIFT, + PCH_UART_HANDLED_LS_INT_SHIFT, +}; + +#define PCH_UART_DRIVER_DEVICE "ttyPCH" + +/* Set the max number of UART port + * Intel EG20T PCH: 4 port + * LAPIS Semiconductor ML7213 IOH: 3 port + * LAPIS Semiconductor ML7223 IOH: 2 port +*/ +#define PCH_UART_NR 4 + +#define PCH_UART_HANDLED_RX_INT (1<<((PCH_UART_HANDLED_RX_INT_SHIFT)<<1)) +#define PCH_UART_HANDLED_TX_INT (1<<((PCH_UART_HANDLED_TX_INT_SHIFT)<<1)) +#define PCH_UART_HANDLED_RX_ERR_INT (1<<((\ + PCH_UART_HANDLED_RX_ERR_INT_SHIFT)<<1)) +#define PCH_UART_HANDLED_RX_TRG_INT (1<<((\ + PCH_UART_HANDLED_RX_TRG_INT_SHIFT)<<1)) +#define PCH_UART_HANDLED_MS_INT (1<<((PCH_UART_HANDLED_MS_INT_SHIFT)<<1)) + +#define PCH_UART_HANDLED_LS_INT (1<<((PCH_UART_HANDLED_LS_INT_SHIFT)<<1)) + +#define PCH_UART_RBR 0x00 +#define PCH_UART_THR 0x00 + +#define PCH_UART_IER_MASK (PCH_UART_IER_ERBFI|PCH_UART_IER_ETBEI|\ + PCH_UART_IER_ELSI|PCH_UART_IER_EDSSI) +#define PCH_UART_IER_ERBFI 0x00000001 +#define PCH_UART_IER_ETBEI 0x00000002 +#define PCH_UART_IER_ELSI 0x00000004 +#define PCH_UART_IER_EDSSI 0x00000008 + +#define PCH_UART_IIR_IP 0x00000001 +#define PCH_UART_IIR_IID 0x00000006 +#define PCH_UART_IIR_MSI 0x00000000 +#define PCH_UART_IIR_TRI 0x00000002 +#define PCH_UART_IIR_RRI 0x00000004 +#define PCH_UART_IIR_REI 0x00000006 +#define PCH_UART_IIR_TOI 0x00000008 +#define PCH_UART_IIR_FIFO256 0x00000020 +#define PCH_UART_IIR_FIFO64 PCH_UART_IIR_FIFO256 +#define PCH_UART_IIR_FE 0x000000C0 + +#define PCH_UART_FCR_FIFOE 0x00000001 +#define PCH_UART_FCR_RFR 0x00000002 +#define PCH_UART_FCR_TFR 0x00000004 +#define PCH_UART_FCR_DMS 0x00000008 +#define PCH_UART_FCR_FIFO256 0x00000020 +#define PCH_UART_FCR_RFTL 0x000000C0 + +#define PCH_UART_FCR_RFTL1 0x00000000 +#define PCH_UART_FCR_RFTL64 0x00000040 +#define PCH_UART_FCR_RFTL128 0x00000080 +#define PCH_UART_FCR_RFTL224 0x000000C0 +#define PCH_UART_FCR_RFTL16 PCH_UART_FCR_RFTL64 +#define PCH_UART_FCR_RFTL32 PCH_UART_FCR_RFTL128 +#define PCH_UART_FCR_RFTL56 PCH_UART_FCR_RFTL224 +#define PCH_UART_FCR_RFTL4 PCH_UART_FCR_RFTL64 +#define PCH_UART_FCR_RFTL8 PCH_UART_FCR_RFTL128 +#define PCH_UART_FCR_RFTL14 PCH_UART_FCR_RFTL224 +#define PCH_UART_FCR_RFTL_SHIFT 6 + +#define PCH_UART_LCR_WLS 0x00000003 +#define PCH_UART_LCR_STB 0x00000004 +#define PCH_UART_LCR_PEN 0x00000008 +#define PCH_UART_LCR_EPS 0x00000010 +#define PCH_UART_LCR_SP 0x00000020 +#define PCH_UART_LCR_SB 0x00000040 +#define PCH_UART_LCR_DLAB 0x00000080 +#define PCH_UART_LCR_NP 0x00000000 +#define PCH_UART_LCR_OP PCH_UART_LCR_PEN +#define PCH_UART_LCR_EP (PCH_UART_LCR_PEN | PCH_UART_LCR_EPS) +#define PCH_UART_LCR_1P (PCH_UART_LCR_PEN | PCH_UART_LCR_SP) +#define PCH_UART_LCR_0P (PCH_UART_LCR_PEN | PCH_UART_LCR_EPS |\ + PCH_UART_LCR_SP) + +#define PCH_UART_LCR_5BIT 0x00000000 +#define PCH_UART_LCR_6BIT 0x00000001 +#define PCH_UART_LCR_7BIT 0x00000002 +#define PCH_UART_LCR_8BIT 0x00000003 + +#define PCH_UART_MCR_DTR 0x00000001 +#define PCH_UART_MCR_RTS 0x00000002 +#define PCH_UART_MCR_OUT 0x0000000C +#define PCH_UART_MCR_LOOP 0x00000010 +#define PCH_UART_MCR_AFE 0x00000020 + +#define PCH_UART_LSR_DR 0x00000001 +#define PCH_UART_LSR_ERR (1<<7) + +#define PCH_UART_MSR_DCTS 0x00000001 +#define PCH_UART_MSR_DDSR 0x00000002 +#define PCH_UART_MSR_TERI 0x00000004 +#define PCH_UART_MSR_DDCD 0x00000008 +#define PCH_UART_MSR_CTS 0x00000010 +#define PCH_UART_MSR_DSR 0x00000020 +#define PCH_UART_MSR_RI 0x00000040 +#define PCH_UART_MSR_DCD 0x00000080 +#define PCH_UART_MSR_DELTA (PCH_UART_MSR_DCTS | PCH_UART_MSR_DDSR |\ + PCH_UART_MSR_TERI | PCH_UART_MSR_DDCD) + +#define PCH_UART_DLL 0x00 +#define PCH_UART_DLM 0x01 + +#define PCH_UART_BRCSR 0x0E + +#define PCH_UART_IID_RLS (PCH_UART_IIR_REI) +#define PCH_UART_IID_RDR (PCH_UART_IIR_RRI) +#define PCH_UART_IID_RDR_TO (PCH_UART_IIR_RRI | PCH_UART_IIR_TOI) +#define PCH_UART_IID_THRE (PCH_UART_IIR_TRI) +#define PCH_UART_IID_MS (PCH_UART_IIR_MSI) + +#define PCH_UART_HAL_PARITY_NONE (PCH_UART_LCR_NP) +#define PCH_UART_HAL_PARITY_ODD (PCH_UART_LCR_OP) +#define PCH_UART_HAL_PARITY_EVEN (PCH_UART_LCR_EP) +#define PCH_UART_HAL_PARITY_FIX1 (PCH_UART_LCR_1P) +#define PCH_UART_HAL_PARITY_FIX0 (PCH_UART_LCR_0P) +#define PCH_UART_HAL_5BIT (PCH_UART_LCR_5BIT) +#define PCH_UART_HAL_6BIT (PCH_UART_LCR_6BIT) +#define PCH_UART_HAL_7BIT (PCH_UART_LCR_7BIT) +#define PCH_UART_HAL_8BIT (PCH_UART_LCR_8BIT) +#define PCH_UART_HAL_STB1 0 +#define PCH_UART_HAL_STB2 (PCH_UART_LCR_STB) + +#define PCH_UART_HAL_CLR_TX_FIFO (PCH_UART_FCR_TFR) +#define PCH_UART_HAL_CLR_RX_FIFO (PCH_UART_FCR_RFR) +#define PCH_UART_HAL_CLR_ALL_FIFO (PCH_UART_HAL_CLR_TX_FIFO | \ + PCH_UART_HAL_CLR_RX_FIFO) + +#define PCH_UART_HAL_DMA_MODE0 0 +#define PCH_UART_HAL_FIFO_DIS 0 +#define PCH_UART_HAL_FIFO16 (PCH_UART_FCR_FIFOE) +#define PCH_UART_HAL_FIFO256 (PCH_UART_FCR_FIFOE | \ + PCH_UART_FCR_FIFO256) +#define PCH_UART_HAL_FIFO64 (PCH_UART_HAL_FIFO256) +#define PCH_UART_HAL_TRIGGER1 (PCH_UART_FCR_RFTL1) +#define PCH_UART_HAL_TRIGGER64 (PCH_UART_FCR_RFTL64) +#define PCH_UART_HAL_TRIGGER128 (PCH_UART_FCR_RFTL128) +#define PCH_UART_HAL_TRIGGER224 (PCH_UART_FCR_RFTL224) +#define PCH_UART_HAL_TRIGGER16 (PCH_UART_FCR_RFTL16) +#define PCH_UART_HAL_TRIGGER32 (PCH_UART_FCR_RFTL32) +#define PCH_UART_HAL_TRIGGER56 (PCH_UART_FCR_RFTL56) +#define PCH_UART_HAL_TRIGGER4 (PCH_UART_FCR_RFTL4) +#define PCH_UART_HAL_TRIGGER8 (PCH_UART_FCR_RFTL8) +#define PCH_UART_HAL_TRIGGER14 (PCH_UART_FCR_RFTL14) +#define PCH_UART_HAL_TRIGGER_L (PCH_UART_FCR_RFTL64) +#define PCH_UART_HAL_TRIGGER_M (PCH_UART_FCR_RFTL128) +#define PCH_UART_HAL_TRIGGER_H (PCH_UART_FCR_RFTL224) + +#define PCH_UART_HAL_RX_INT (PCH_UART_IER_ERBFI) +#define PCH_UART_HAL_TX_INT (PCH_UART_IER_ETBEI) +#define PCH_UART_HAL_RX_ERR_INT (PCH_UART_IER_ELSI) +#define PCH_UART_HAL_MS_INT (PCH_UART_IER_EDSSI) +#define PCH_UART_HAL_ALL_INT (PCH_UART_IER_MASK) + +#define PCH_UART_HAL_DTR (PCH_UART_MCR_DTR) +#define PCH_UART_HAL_RTS (PCH_UART_MCR_RTS) +#define PCH_UART_HAL_OUT (PCH_UART_MCR_OUT) +#define PCH_UART_HAL_LOOP (PCH_UART_MCR_LOOP) +#define PCH_UART_HAL_AFE (PCH_UART_MCR_AFE) + +#define DEFAULT_UARTCLK 1843200 /* 1.8432 MHz */ +#define CMITC_UARTCLK 192000000 /* 192.0000 MHz */ +#define FRI2_64_UARTCLK 64000000 /* 64.0000 MHz */ +#define FRI2_48_UARTCLK 48000000 /* 48.0000 MHz */ +#define NTC1_UARTCLK 64000000 /* 64.0000 MHz */ +#define MINNOW_UARTCLK 50000000 /* 50.0000 MHz */ + +struct pch_uart_buffer { + unsigned char *buf; + int size; +}; + +struct eg20t_port { + struct uart_port port; + int port_type; + void __iomem *membase; + resource_size_t mapbase; + unsigned int iobase; + struct pci_dev *pdev; + int fifo_size; + unsigned int uartclk; + int start_tx; + int start_rx; + int tx_empty; + int trigger; + int trigger_level; + struct pch_uart_buffer rxbuf; + unsigned int dmsr; + unsigned int fcr; + unsigned int mcr; + unsigned int use_dma; + struct dma_async_tx_descriptor *desc_tx; + struct dma_async_tx_descriptor *desc_rx; + struct pch_dma_slave param_tx; + struct pch_dma_slave param_rx; + struct dma_chan *chan_tx; + struct dma_chan *chan_rx; + struct scatterlist *sg_tx_p; + int nent; + int orig_nent; + struct scatterlist sg_rx; + int tx_dma_use; + void *rx_buf_virt; + dma_addr_t rx_buf_dma; + +#define IRQ_NAME_SIZE 17 + char irq_name[IRQ_NAME_SIZE]; + + /* protect the eg20t_port private structure and io access to membase */ + spinlock_t lock; +}; + +/** + * struct pch_uart_driver_data - private data structure for UART-DMA + * @port_type: The type of UART port + * @line_no: UART port line number (0, 1, 2...) + */ +struct pch_uart_driver_data { + int port_type; + int line_no; +}; + +enum pch_uart_num_t { + pch_et20t_uart0 = 0, + pch_et20t_uart1, + pch_et20t_uart2, + pch_et20t_uart3, + pch_ml7213_uart0, + pch_ml7213_uart1, + pch_ml7213_uart2, + pch_ml7223_uart0, + pch_ml7223_uart1, + pch_ml7831_uart0, + pch_ml7831_uart1, +}; + +static struct pch_uart_driver_data drv_dat[] = { + [pch_et20t_uart0] = {PORT_PCH_8LINE, 0}, + [pch_et20t_uart1] = {PORT_PCH_2LINE, 1}, + [pch_et20t_uart2] = {PORT_PCH_2LINE, 2}, + [pch_et20t_uart3] = {PORT_PCH_2LINE, 3}, + [pch_ml7213_uart0] = {PORT_PCH_8LINE, 0}, + [pch_ml7213_uart1] = {PORT_PCH_2LINE, 1}, + [pch_ml7213_uart2] = {PORT_PCH_2LINE, 2}, + [pch_ml7223_uart0] = {PORT_PCH_8LINE, 0}, + [pch_ml7223_uart1] = {PORT_PCH_2LINE, 1}, + [pch_ml7831_uart0] = {PORT_PCH_8LINE, 0}, + [pch_ml7831_uart1] = {PORT_PCH_2LINE, 1}, +}; + +#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE +static struct eg20t_port *pch_uart_ports[PCH_UART_NR]; +#endif +static unsigned int default_baud = 9600; +static unsigned int user_uartclk = 0; +static const int trigger_level_256[4] = { 1, 64, 128, 224 }; +static const int trigger_level_64[4] = { 1, 16, 32, 56 }; +static const int trigger_level_16[4] = { 1, 4, 8, 14 }; +static const int trigger_level_1[4] = { 1, 1, 1, 1 }; + +#define PCH_REGS_BUFSIZE 1024 + + +static ssize_t port_show_regs(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct eg20t_port *priv = file->private_data; + char *buf; + u32 len = 0; + ssize_t ret; + unsigned char lcr; + + buf = kzalloc(PCH_REGS_BUFSIZE, GFP_KERNEL); + if (!buf) + return 0; + + len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len, + "PCH EG20T port[%d] regs:\n", priv->port.line); + + len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len, + "=================================\n"); + len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len, + "IER: \t0x%02x\n", ioread8(priv->membase + UART_IER)); + len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len, + "IIR: \t0x%02x\n", ioread8(priv->membase + UART_IIR)); + len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len, + "LCR: \t0x%02x\n", ioread8(priv->membase + UART_LCR)); + len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len, + "MCR: \t0x%02x\n", ioread8(priv->membase + UART_MCR)); + len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len, + "LSR: \t0x%02x\n", ioread8(priv->membase + UART_LSR)); + len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len, + "MSR: \t0x%02x\n", ioread8(priv->membase + UART_MSR)); + len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len, + "BRCSR: \t0x%02x\n", + ioread8(priv->membase + PCH_UART_BRCSR)); + + lcr = ioread8(priv->membase + UART_LCR); + iowrite8(PCH_UART_LCR_DLAB, priv->membase + UART_LCR); + len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len, + "DLL: \t0x%02x\n", ioread8(priv->membase + UART_DLL)); + len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len, + "DLM: \t0x%02x\n", ioread8(priv->membase + UART_DLM)); + iowrite8(lcr, priv->membase + UART_LCR); + + if (len > PCH_REGS_BUFSIZE) + len = PCH_REGS_BUFSIZE; + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return ret; +} + +static const struct file_operations port_regs_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = port_show_regs, + .llseek = default_llseek, +}; + +static const struct dmi_system_id pch_uart_dmi_table[] = { + { + .ident = "CM-iTC", + { + DMI_MATCH(DMI_BOARD_NAME, "CM-iTC"), + }, + (void *)CMITC_UARTCLK, + }, + { + .ident = "FRI2", + { + DMI_MATCH(DMI_BIOS_VERSION, "FRI2"), + }, + (void *)FRI2_64_UARTCLK, + }, + { + .ident = "Fish River Island II", + { + DMI_MATCH(DMI_PRODUCT_NAME, "Fish River Island II"), + }, + (void *)FRI2_48_UARTCLK, + }, + { + .ident = "COMe-mTT", + { + DMI_MATCH(DMI_BOARD_NAME, "COMe-mTT"), + }, + (void *)NTC1_UARTCLK, + }, + { + .ident = "nanoETXexpress-TT", + { + DMI_MATCH(DMI_BOARD_NAME, "nanoETXexpress-TT"), + }, + (void *)NTC1_UARTCLK, + }, + { + .ident = "MinnowBoard", + { + DMI_MATCH(DMI_BOARD_NAME, "MinnowBoard"), + }, + (void *)MINNOW_UARTCLK, + }, + { } +}; + +/* Return UART clock, checking for board specific clocks. */ +static unsigned int pch_uart_get_uartclk(void) +{ + const struct dmi_system_id *d; + + if (user_uartclk) + return user_uartclk; + + d = dmi_first_match(pch_uart_dmi_table); + if (d) + return (unsigned long)d->driver_data; + + return DEFAULT_UARTCLK; +} + +static void pch_uart_hal_enable_interrupt(struct eg20t_port *priv, + unsigned int flag) +{ + u8 ier = ioread8(priv->membase + UART_IER); + ier |= flag & PCH_UART_IER_MASK; + iowrite8(ier, priv->membase + UART_IER); +} + +static void pch_uart_hal_disable_interrupt(struct eg20t_port *priv, + unsigned int flag) +{ + u8 ier = ioread8(priv->membase + UART_IER); + ier &= ~(flag & PCH_UART_IER_MASK); + iowrite8(ier, priv->membase + UART_IER); +} + +static int pch_uart_hal_set_line(struct eg20t_port *priv, unsigned int baud, + unsigned int parity, unsigned int bits, + unsigned int stb) +{ + unsigned int dll, dlm, lcr; + int div; + + div = DIV_ROUND_CLOSEST(priv->uartclk / 16, baud); + if (div < 0 || USHRT_MAX <= div) { + dev_err(priv->port.dev, "Invalid Baud(div=0x%x)\n", div); + return -EINVAL; + } + + dll = (unsigned int)div & 0x00FFU; + dlm = ((unsigned int)div >> 8) & 0x00FFU; + + if (parity & ~(PCH_UART_LCR_PEN | PCH_UART_LCR_EPS | PCH_UART_LCR_SP)) { + dev_err(priv->port.dev, "Invalid parity(0x%x)\n", parity); + return -EINVAL; + } + + if (bits & ~PCH_UART_LCR_WLS) { + dev_err(priv->port.dev, "Invalid bits(0x%x)\n", bits); + return -EINVAL; + } + + if (stb & ~PCH_UART_LCR_STB) { + dev_err(priv->port.dev, "Invalid STB(0x%x)\n", stb); + return -EINVAL; + } + + lcr = parity; + lcr |= bits; + lcr |= stb; + + dev_dbg(priv->port.dev, "%s:baud = %u, div = %04x, lcr = %02x (%lu)\n", + __func__, baud, div, lcr, jiffies); + iowrite8(PCH_UART_LCR_DLAB, priv->membase + UART_LCR); + iowrite8(dll, priv->membase + PCH_UART_DLL); + iowrite8(dlm, priv->membase + PCH_UART_DLM); + iowrite8(lcr, priv->membase + UART_LCR); + + return 0; +} + +static int pch_uart_hal_fifo_reset(struct eg20t_port *priv, + unsigned int flag) +{ + if (flag & ~(PCH_UART_FCR_TFR | PCH_UART_FCR_RFR)) { + dev_err(priv->port.dev, "%s:Invalid flag(0x%x)\n", + __func__, flag); + return -EINVAL; + } + + iowrite8(PCH_UART_FCR_FIFOE | priv->fcr, priv->membase + UART_FCR); + iowrite8(PCH_UART_FCR_FIFOE | priv->fcr | flag, + priv->membase + UART_FCR); + iowrite8(priv->fcr, priv->membase + UART_FCR); + + return 0; +} + +static int pch_uart_hal_set_fifo(struct eg20t_port *priv, + unsigned int dmamode, + unsigned int fifo_size, unsigned int trigger) +{ + u8 fcr; + + if (dmamode & ~PCH_UART_FCR_DMS) { + dev_err(priv->port.dev, "%s:Invalid DMA Mode(0x%x)\n", + __func__, dmamode); + return -EINVAL; + } + + if (fifo_size & ~(PCH_UART_FCR_FIFOE | PCH_UART_FCR_FIFO256)) { + dev_err(priv->port.dev, "%s:Invalid FIFO SIZE(0x%x)\n", + __func__, fifo_size); + return -EINVAL; + } + + if (trigger & ~PCH_UART_FCR_RFTL) { + dev_err(priv->port.dev, "%s:Invalid TRIGGER(0x%x)\n", + __func__, trigger); + return -EINVAL; + } + + switch (priv->fifo_size) { + case 256: + priv->trigger_level = + trigger_level_256[trigger >> PCH_UART_FCR_RFTL_SHIFT]; + break; + case 64: + priv->trigger_level = + trigger_level_64[trigger >> PCH_UART_FCR_RFTL_SHIFT]; + break; + case 16: + priv->trigger_level = + trigger_level_16[trigger >> PCH_UART_FCR_RFTL_SHIFT]; + break; + default: + priv->trigger_level = + trigger_level_1[trigger >> PCH_UART_FCR_RFTL_SHIFT]; + break; + } + fcr = + dmamode | fifo_size | trigger | PCH_UART_FCR_RFR | PCH_UART_FCR_TFR; + iowrite8(PCH_UART_FCR_FIFOE, priv->membase + UART_FCR); + iowrite8(PCH_UART_FCR_FIFOE | PCH_UART_FCR_RFR | PCH_UART_FCR_TFR, + priv->membase + UART_FCR); + iowrite8(fcr, priv->membase + UART_FCR); + priv->fcr = fcr; + + return 0; +} + +static u8 pch_uart_hal_get_modem(struct eg20t_port *priv) +{ + unsigned int msr = ioread8(priv->membase + UART_MSR); + priv->dmsr = msr & PCH_UART_MSR_DELTA; + return (u8)msr; +} + +static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf, + int rx_size) +{ + int i; + u8 rbr, lsr; + struct uart_port *port = &priv->port; + + lsr = ioread8(priv->membase + UART_LSR); + for (i = 0, lsr = ioread8(priv->membase + UART_LSR); + i < rx_size && lsr & (UART_LSR_DR | UART_LSR_BI); + lsr = ioread8(priv->membase + UART_LSR)) { + rbr = ioread8(priv->membase + PCH_UART_RBR); + + if (lsr & UART_LSR_BI) { + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } + if (uart_handle_sysrq_char(port, rbr)) + continue; + + buf[i++] = rbr; + } + return i; +} + +static unsigned char pch_uart_hal_get_iid(struct eg20t_port *priv) +{ + return ioread8(priv->membase + UART_IIR) &\ + (PCH_UART_IIR_IID | PCH_UART_IIR_TOI | PCH_UART_IIR_IP); +} + +static u8 pch_uart_hal_get_line_status(struct eg20t_port *priv) +{ + return ioread8(priv->membase + UART_LSR); +} + +static void pch_uart_hal_set_break(struct eg20t_port *priv, int on) +{ + unsigned int lcr; + + lcr = ioread8(priv->membase + UART_LCR); + if (on) + lcr |= PCH_UART_LCR_SB; + else + lcr &= ~PCH_UART_LCR_SB; + + iowrite8(lcr, priv->membase + UART_LCR); +} + +static int push_rx(struct eg20t_port *priv, const unsigned char *buf, + int size) +{ + struct uart_port *port = &priv->port; + struct tty_port *tport = &port->state->port; + + tty_insert_flip_string(tport, buf, size); + tty_flip_buffer_push(tport); + + return 0; +} + +static int dma_push_rx(struct eg20t_port *priv, int size) +{ + int room; + struct uart_port *port = &priv->port; + struct tty_port *tport = &port->state->port; + + room = tty_buffer_request_room(tport, size); + + if (room < size) + dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", + size - room); + if (!room) + return 0; + + tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size); + + port->icount.rx += room; + + return room; +} + +static void pch_free_dma(struct uart_port *port) +{ + struct eg20t_port *priv; + priv = container_of(port, struct eg20t_port, port); + + if (priv->chan_tx) { + dma_release_channel(priv->chan_tx); + priv->chan_tx = NULL; + } + if (priv->chan_rx) { + dma_release_channel(priv->chan_rx); + priv->chan_rx = NULL; + } + + if (priv->rx_buf_dma) { + dma_free_coherent(port->dev, port->fifosize, priv->rx_buf_virt, + priv->rx_buf_dma); + priv->rx_buf_virt = NULL; + priv->rx_buf_dma = 0; + } + + return; +} + +static bool filter(struct dma_chan *chan, void *slave) +{ + struct pch_dma_slave *param = slave; + + if ((chan->chan_id == param->chan_id) && (param->dma_dev == + chan->device->dev)) { + chan->private = param; + return true; + } else { + return false; + } +} + +static void pch_request_dma(struct uart_port *port) +{ + dma_cap_mask_t mask; + struct dma_chan *chan; + struct pci_dev *dma_dev; + struct pch_dma_slave *param; + struct eg20t_port *priv = + container_of(port, struct eg20t_port, port); + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* Get DMA's dev information */ + dma_dev = pci_get_slot(priv->pdev->bus, + PCI_DEVFN(PCI_SLOT(priv->pdev->devfn), 0)); + + /* Set Tx DMA */ + param = &priv->param_tx; + param->dma_dev = &dma_dev->dev; + param->chan_id = priv->port.line * 2; /* Tx = 0, 2, 4, ... */ + + param->tx_reg = port->mapbase + UART_TX; + chan = dma_request_channel(mask, filter, param); + if (!chan) { + dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n", + __func__); + pci_dev_put(dma_dev); + return; + } + priv->chan_tx = chan; + + /* Set Rx DMA */ + param = &priv->param_rx; + param->dma_dev = &dma_dev->dev; + param->chan_id = priv->port.line * 2 + 1; /* Rx = Tx + 1 */ + + param->rx_reg = port->mapbase + UART_RX; + chan = dma_request_channel(mask, filter, param); + if (!chan) { + dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n", + __func__); + dma_release_channel(priv->chan_tx); + priv->chan_tx = NULL; + pci_dev_put(dma_dev); + return; + } + + /* Get Consistent memory for DMA */ + priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize, + &priv->rx_buf_dma, GFP_KERNEL); + priv->chan_rx = chan; + + pci_dev_put(dma_dev); +} + +static void pch_dma_rx_complete(void *arg) +{ + struct eg20t_port *priv = arg; + struct uart_port *port = &priv->port; + int count; + + dma_sync_sg_for_cpu(port->dev, &priv->sg_rx, 1, DMA_FROM_DEVICE); + count = dma_push_rx(priv, priv->trigger_level); + if (count) + tty_flip_buffer_push(&port->state->port); + async_tx_ack(priv->desc_rx); + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); +} + +static void pch_dma_tx_complete(void *arg) +{ + struct eg20t_port *priv = arg; + struct uart_port *port = &priv->port; + struct circ_buf *xmit = &port->state->xmit; + struct scatterlist *sg = priv->sg_tx_p; + int i; + + for (i = 0; i < priv->nent; i++, sg++) { + xmit->tail += sg_dma_len(sg); + port->icount.tx += sg_dma_len(sg); + } + xmit->tail &= UART_XMIT_SIZE - 1; + async_tx_ack(priv->desc_tx); + dma_unmap_sg(port->dev, priv->sg_tx_p, priv->orig_nent, DMA_TO_DEVICE); + priv->tx_dma_use = 0; + priv->nent = 0; + priv->orig_nent = 0; + kfree(priv->sg_tx_p); + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT); +} + +static int handle_rx_to(struct eg20t_port *priv) +{ + struct pch_uart_buffer *buf; + int rx_size; + int ret; + if (!priv->start_rx) { + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); + return 0; + } + buf = &priv->rxbuf; + do { + rx_size = pch_uart_hal_read(priv, buf->buf, buf->size); + ret = push_rx(priv, buf->buf, rx_size); + if (ret) + return 0; + } while (rx_size == buf->size); + + return PCH_UART_HANDLED_RX_INT; +} + +static int handle_rx(struct eg20t_port *priv) +{ + return handle_rx_to(priv); +} + +static int dma_handle_rx(struct eg20t_port *priv) +{ + struct uart_port *port = &priv->port; + struct dma_async_tx_descriptor *desc; + struct scatterlist *sg; + + priv = container_of(port, struct eg20t_port, port); + sg = &priv->sg_rx; + + sg_init_table(&priv->sg_rx, 1); /* Initialize SG table */ + + sg_dma_len(sg) = priv->trigger_level; + + sg_set_page(&priv->sg_rx, virt_to_page(priv->rx_buf_virt), + sg_dma_len(sg), offset_in_page(priv->rx_buf_virt)); + + sg_dma_address(sg) = priv->rx_buf_dma; + + desc = dmaengine_prep_slave_sg(priv->chan_rx, + sg, 1, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + + if (!desc) + return 0; + + priv->desc_rx = desc; + desc->callback = pch_dma_rx_complete; + desc->callback_param = priv; + desc->tx_submit(desc); + dma_async_issue_pending(priv->chan_rx); + + return PCH_UART_HANDLED_RX_INT; +} + +static unsigned int handle_tx(struct eg20t_port *priv) +{ + struct uart_port *port = &priv->port; + struct circ_buf *xmit = &port->state->xmit; + int fifo_size; + int tx_empty; + + if (!priv->start_tx) { + dev_info(priv->port.dev, "%s:Tx isn't started. (%lu)\n", + __func__, jiffies); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); + priv->tx_empty = 1; + return 0; + } + + fifo_size = max(priv->fifo_size, 1); + tx_empty = 1; + if (port->x_char) { + iowrite8(port->x_char, priv->membase + PCH_UART_THR); + port->icount.tx++; + port->x_char = 0; + tx_empty = 0; + fifo_size--; + } + + while (!uart_tx_stopped(port) && !uart_circ_empty(xmit) && fifo_size) { + iowrite8(xmit->buf[xmit->tail], priv->membase + PCH_UART_THR); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + fifo_size--; + tx_empty = 0; + } + + priv->tx_empty = tx_empty; + + if (tx_empty) { + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); + uart_write_wakeup(port); + } + + return PCH_UART_HANDLED_TX_INT; +} + +static unsigned int dma_handle_tx(struct eg20t_port *priv) +{ + struct uart_port *port = &priv->port; + struct circ_buf *xmit = &port->state->xmit; + struct scatterlist *sg; + int nent; + int fifo_size; + struct dma_async_tx_descriptor *desc; + int num; + int i; + int bytes; + int size; + int rem; + + if (!priv->start_tx) { + dev_info(priv->port.dev, "%s:Tx isn't started. (%lu)\n", + __func__, jiffies); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); + priv->tx_empty = 1; + return 0; + } + + if (priv->tx_dma_use) { + dev_dbg(priv->port.dev, "%s:Tx is not completed. (%lu)\n", + __func__, jiffies); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); + priv->tx_empty = 1; + return 0; + } + + fifo_size = max(priv->fifo_size, 1); + + if (port->x_char) { + iowrite8(port->x_char, priv->membase + PCH_UART_THR); + port->icount.tx++; + port->x_char = 0; + fifo_size--; + } + + bytes = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + if (!bytes) { + dev_dbg(priv->port.dev, "%s 0 bytes return\n", __func__); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); + uart_write_wakeup(port); + return 0; + } + + if (bytes > fifo_size) { + num = bytes / fifo_size + 1; + size = fifo_size; + rem = bytes % fifo_size; + } else { + num = 1; + size = bytes; + rem = bytes; + } + + dev_dbg(priv->port.dev, "%s num=%d size=%d rem=%d\n", + __func__, num, size, rem); + + priv->tx_dma_use = 1; + + priv->sg_tx_p = kmalloc_array(num, sizeof(struct scatterlist), GFP_ATOMIC); + if (!priv->sg_tx_p) { + dev_err(priv->port.dev, "%s:kzalloc Failed\n", __func__); + return 0; + } + + sg_init_table(priv->sg_tx_p, num); /* Initialize SG table */ + sg = priv->sg_tx_p; + + for (i = 0; i < num; i++, sg++) { + if (i == (num - 1)) + sg_set_page(sg, virt_to_page(xmit->buf), + rem, fifo_size * i); + else + sg_set_page(sg, virt_to_page(xmit->buf), + size, fifo_size * i); + } + + sg = priv->sg_tx_p; + nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE); + if (!nent) { + dev_err(priv->port.dev, "%s:dma_map_sg Failed\n", __func__); + return 0; + } + priv->orig_nent = num; + priv->nent = nent; + + for (i = 0; i < nent; i++, sg++) { + sg->offset = (xmit->tail & (UART_XMIT_SIZE - 1)) + + fifo_size * i; + sg_dma_address(sg) = (sg_dma_address(sg) & + ~(UART_XMIT_SIZE - 1)) + sg->offset; + if (i == (nent - 1)) + sg_dma_len(sg) = rem; + else + sg_dma_len(sg) = size; + } + + desc = dmaengine_prep_slave_sg(priv->chan_tx, + priv->sg_tx_p, nent, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_err(priv->port.dev, "%s:dmaengine_prep_slave_sg Failed\n", + __func__); + return 0; + } + dma_sync_sg_for_device(port->dev, priv->sg_tx_p, nent, DMA_TO_DEVICE); + priv->desc_tx = desc; + desc->callback = pch_dma_tx_complete; + desc->callback_param = priv; + + desc->tx_submit(desc); + + dma_async_issue_pending(priv->chan_tx); + + return PCH_UART_HANDLED_TX_INT; +} + +static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr) +{ + struct uart_port *port = &priv->port; + struct tty_struct *tty = tty_port_tty_get(&port->state->port); + char *error_msg[5] = {}; + int i = 0; + + if (lsr & PCH_UART_LSR_ERR) + error_msg[i++] = "Error data in FIFO\n"; + + if (lsr & UART_LSR_FE) { + port->icount.frame++; + error_msg[i++] = " Framing Error\n"; + } + + if (lsr & UART_LSR_PE) { + port->icount.parity++; + error_msg[i++] = " Parity Error\n"; + } + + if (lsr & UART_LSR_OE) { + port->icount.overrun++; + error_msg[i++] = " Overrun Error\n"; + } + + if (tty == NULL) { + for (i = 0; error_msg[i] != NULL; i++) + dev_err(&priv->pdev->dev, error_msg[i]); + } else { + tty_kref_put(tty); + } +} + +static irqreturn_t pch_uart_interrupt(int irq, void *dev_id) +{ + struct eg20t_port *priv = dev_id; + unsigned int handled; + u8 lsr; + int ret = 0; + unsigned char iid; + unsigned long flags; + int next = 1; + u8 msr; + + spin_lock_irqsave(&priv->lock, flags); + handled = 0; + while (next) { + iid = pch_uart_hal_get_iid(priv); + if (iid & PCH_UART_IIR_IP) /* No Interrupt */ + break; + switch (iid) { + case PCH_UART_IID_RLS: /* Receiver Line Status */ + lsr = pch_uart_hal_get_line_status(priv); + if (lsr & (PCH_UART_LSR_ERR | UART_LSR_FE | + UART_LSR_PE | UART_LSR_OE)) { + pch_uart_err_ir(priv, lsr); + ret = PCH_UART_HANDLED_RX_ERR_INT; + } else { + ret = PCH_UART_HANDLED_LS_INT; + } + break; + case PCH_UART_IID_RDR: /* Received Data Ready */ + if (priv->use_dma) { + pch_uart_hal_disable_interrupt(priv, + PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); + ret = dma_handle_rx(priv); + if (!ret) + pch_uart_hal_enable_interrupt(priv, + PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); + } else { + ret = handle_rx(priv); + } + break; + case PCH_UART_IID_RDR_TO: /* Received Data Ready + (FIFO Timeout) */ + ret = handle_rx_to(priv); + break; + case PCH_UART_IID_THRE: /* Transmitter Holding Register + Empty */ + if (priv->use_dma) + ret = dma_handle_tx(priv); + else + ret = handle_tx(priv); + break; + case PCH_UART_IID_MS: /* Modem Status */ + msr = pch_uart_hal_get_modem(priv); + next = 0; /* MS ir prioirty is the lowest. So, MS ir + means final interrupt */ + if ((msr & UART_MSR_ANY_DELTA) == 0) + break; + ret |= PCH_UART_HANDLED_MS_INT; + break; + default: /* Never junp to this label */ + dev_err(priv->port.dev, "%s:iid=%02x (%lu)\n", __func__, + iid, jiffies); + ret = -1; + next = 0; + break; + } + handled |= (unsigned int)ret; + } + + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_RETVAL(handled); +} + +/* This function tests whether the transmitter fifo and shifter for the port + described by 'port' is empty. */ +static unsigned int pch_uart_tx_empty(struct uart_port *port) +{ + struct eg20t_port *priv; + + priv = container_of(port, struct eg20t_port, port); + if (priv->tx_empty) + return TIOCSER_TEMT; + else + return 0; +} + +/* Returns the current state of modem control inputs. */ +static unsigned int pch_uart_get_mctrl(struct uart_port *port) +{ + struct eg20t_port *priv; + u8 modem; + unsigned int ret = 0; + + priv = container_of(port, struct eg20t_port, port); + modem = pch_uart_hal_get_modem(priv); + + if (modem & UART_MSR_DCD) + ret |= TIOCM_CAR; + + if (modem & UART_MSR_RI) + ret |= TIOCM_RNG; + + if (modem & UART_MSR_DSR) + ret |= TIOCM_DSR; + + if (modem & UART_MSR_CTS) + ret |= TIOCM_CTS; + + return ret; +} + +static void pch_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + u32 mcr = 0; + struct eg20t_port *priv = container_of(port, struct eg20t_port, port); + + if (mctrl & TIOCM_DTR) + mcr |= UART_MCR_DTR; + if (mctrl & TIOCM_RTS) + mcr |= UART_MCR_RTS; + if (mctrl & TIOCM_LOOP) + mcr |= UART_MCR_LOOP; + + if (priv->mcr & UART_MCR_AFE) + mcr |= UART_MCR_AFE; + + if (mctrl) + iowrite8(mcr, priv->membase + UART_MCR); +} + +static void pch_uart_stop_tx(struct uart_port *port) +{ + struct eg20t_port *priv; + priv = container_of(port, struct eg20t_port, port); + priv->start_tx = 0; + priv->tx_dma_use = 0; +} + +static void pch_uart_start_tx(struct uart_port *port) +{ + struct eg20t_port *priv; + + priv = container_of(port, struct eg20t_port, port); + + if (priv->use_dma) { + if (priv->tx_dma_use) { + dev_dbg(priv->port.dev, "%s : Tx DMA is NOT empty.\n", + __func__); + return; + } + } + + priv->start_tx = 1; + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT); +} + +static void pch_uart_stop_rx(struct uart_port *port) +{ + struct eg20t_port *priv; + priv = container_of(port, struct eg20t_port, port); + priv->start_rx = 0; + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); +} + +/* Enable the modem status interrupts. */ +static void pch_uart_enable_ms(struct uart_port *port) +{ + struct eg20t_port *priv; + priv = container_of(port, struct eg20t_port, port); + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_MS_INT); +} + +/* Control the transmission of a break signal. */ +static void pch_uart_break_ctl(struct uart_port *port, int ctl) +{ + struct eg20t_port *priv; + unsigned long flags; + + priv = container_of(port, struct eg20t_port, port); + spin_lock_irqsave(&priv->lock, flags); + pch_uart_hal_set_break(priv, ctl); + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* Grab any interrupt resources and initialise any low level driver state. */ +static int pch_uart_startup(struct uart_port *port) +{ + struct eg20t_port *priv; + int ret; + int fifo_size; + int trigger_level; + + priv = container_of(port, struct eg20t_port, port); + priv->tx_empty = 1; + + if (port->uartclk) + priv->uartclk = port->uartclk; + else + port->uartclk = priv->uartclk; + + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT); + ret = pch_uart_hal_set_line(priv, default_baud, + PCH_UART_HAL_PARITY_NONE, PCH_UART_HAL_8BIT, + PCH_UART_HAL_STB1); + if (ret) + return ret; + + switch (priv->fifo_size) { + case 256: + fifo_size = PCH_UART_HAL_FIFO256; + break; + case 64: + fifo_size = PCH_UART_HAL_FIFO64; + break; + case 16: + fifo_size = PCH_UART_HAL_FIFO16; + break; + case 1: + default: + fifo_size = PCH_UART_HAL_FIFO_DIS; + break; + } + + switch (priv->trigger) { + case PCH_UART_HAL_TRIGGER1: + trigger_level = 1; + break; + case PCH_UART_HAL_TRIGGER_L: + trigger_level = priv->fifo_size / 4; + break; + case PCH_UART_HAL_TRIGGER_M: + trigger_level = priv->fifo_size / 2; + break; + case PCH_UART_HAL_TRIGGER_H: + default: + trigger_level = priv->fifo_size - (priv->fifo_size / 8); + break; + } + + priv->trigger_level = trigger_level; + ret = pch_uart_hal_set_fifo(priv, PCH_UART_HAL_DMA_MODE0, + fifo_size, priv->trigger); + if (ret < 0) + return ret; + + ret = request_irq(priv->port.irq, pch_uart_interrupt, IRQF_SHARED, + priv->irq_name, priv); + if (ret < 0) + return ret; + + if (priv->use_dma) + pch_request_dma(port); + + priv->start_rx = 1; + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); + uart_update_timeout(port, CS8, default_baud); + + return 0; +} + +static void pch_uart_shutdown(struct uart_port *port) +{ + struct eg20t_port *priv; + int ret; + + priv = container_of(port, struct eg20t_port, port); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT); + pch_uart_hal_fifo_reset(priv, PCH_UART_HAL_CLR_ALL_FIFO); + ret = pch_uart_hal_set_fifo(priv, PCH_UART_HAL_DMA_MODE0, + PCH_UART_HAL_FIFO_DIS, PCH_UART_HAL_TRIGGER1); + if (ret) + dev_err(priv->port.dev, + "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret); + + pch_free_dma(port); + + free_irq(priv->port.irq, priv); +} + +/* Change the port parameters, including word length, parity, stop + *bits. Update read_status_mask and ignore_status_mask to indicate + *the types of events we are interested in receiving. */ +static void pch_uart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + int rtn; + unsigned int baud, parity, bits, stb; + struct eg20t_port *priv; + unsigned long flags; + + priv = container_of(port, struct eg20t_port, port); + switch (termios->c_cflag & CSIZE) { + case CS5: + bits = PCH_UART_HAL_5BIT; + break; + case CS6: + bits = PCH_UART_HAL_6BIT; + break; + case CS7: + bits = PCH_UART_HAL_7BIT; + break; + default: /* CS8 */ + bits = PCH_UART_HAL_8BIT; + break; + } + if (termios->c_cflag & CSTOPB) + stb = PCH_UART_HAL_STB2; + else + stb = PCH_UART_HAL_STB1; + + if (termios->c_cflag & PARENB) { + if (termios->c_cflag & PARODD) + parity = PCH_UART_HAL_PARITY_ODD; + else + parity = PCH_UART_HAL_PARITY_EVEN; + + } else + parity = PCH_UART_HAL_PARITY_NONE; + + /* Only UART0 has auto hardware flow function */ + if ((termios->c_cflag & CRTSCTS) && (priv->fifo_size == 256)) + priv->mcr |= UART_MCR_AFE; + else + priv->mcr &= ~UART_MCR_AFE; + + termios->c_cflag &= ~CMSPAR; /* Mark/Space parity is not supported */ + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); + + spin_lock_irqsave(&priv->lock, flags); + spin_lock(&port->lock); + + uart_update_timeout(port, termios->c_cflag, baud); + rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb); + if (rtn) + goto out; + + pch_uart_set_mctrl(&priv->port, priv->port.mctrl); + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); + +out: + spin_unlock(&port->lock); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static const char *pch_uart_type(struct uart_port *port) +{ + return KBUILD_MODNAME; +} + +static void pch_uart_release_port(struct uart_port *port) +{ + struct eg20t_port *priv; + + priv = container_of(port, struct eg20t_port, port); + pci_iounmap(priv->pdev, priv->membase); + pci_release_regions(priv->pdev); +} + +static int pch_uart_request_port(struct uart_port *port) +{ + struct eg20t_port *priv; + int ret; + void __iomem *membase; + + priv = container_of(port, struct eg20t_port, port); + ret = pci_request_regions(priv->pdev, KBUILD_MODNAME); + if (ret < 0) + return -EBUSY; + + membase = pci_iomap(priv->pdev, 1, 0); + if (!membase) { + pci_release_regions(priv->pdev); + return -EBUSY; + } + priv->membase = port->membase = membase; + + return 0; +} + +static void pch_uart_config_port(struct uart_port *port, int type) +{ + struct eg20t_port *priv; + + priv = container_of(port, struct eg20t_port, port); + if (type & UART_CONFIG_TYPE) { + port->type = priv->port_type; + pch_uart_request_port(port); + } +} + +static int pch_uart_verify_port(struct uart_port *port, + struct serial_struct *serinfo) +{ + struct eg20t_port *priv; + + priv = container_of(port, struct eg20t_port, port); + if (serinfo->flags & UPF_LOW_LATENCY) { + dev_info(priv->port.dev, + "PCH UART : Use PIO Mode (without DMA)\n"); + priv->use_dma = 0; + serinfo->flags &= ~UPF_LOW_LATENCY; + } else { +#ifndef CONFIG_PCH_DMA + dev_err(priv->port.dev, "%s : PCH DMA is not Loaded.\n", + __func__); + return -EOPNOTSUPP; +#endif + if (!priv->use_dma) { + pch_request_dma(port); + if (priv->chan_rx) + priv->use_dma = 1; + } + dev_info(priv->port.dev, "PCH UART: %s\n", + priv->use_dma ? + "Use DMA Mode" : "No DMA"); + } + + return 0; +} + +#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_PCH_UART_CONSOLE) +/* + * Wait for transmitter & holding register to empty + */ +static void wait_for_xmitr(struct eg20t_port *up, int bits) +{ + unsigned int status, tmout = 10000; + + /* Wait up to 10ms for the character(s) to be sent. */ + for (;;) { + status = ioread8(up->membase + UART_LSR); + + if ((status & bits) == bits) + break; + if (--tmout == 0) + break; + udelay(1); + } + + /* Wait up to 1s for flow control if necessary */ + if (up->port.flags & UPF_CONS_FLOW) { + unsigned int tmout; + for (tmout = 1000000; tmout; tmout--) { + unsigned int msr = ioread8(up->membase + UART_MSR); + if (msr & UART_MSR_CTS) + break; + udelay(1); + touch_nmi_watchdog(); + } + } +} +#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_PCH_UART_CONSOLE */ + +#ifdef CONFIG_CONSOLE_POLL +/* + * Console polling routines for communicate via uart while + * in an interrupt or debug context. + */ +static int pch_uart_get_poll_char(struct uart_port *port) +{ + struct eg20t_port *priv = + container_of(port, struct eg20t_port, port); + u8 lsr = ioread8(priv->membase + UART_LSR); + + if (!(lsr & UART_LSR_DR)) + return NO_POLL_CHAR; + + return ioread8(priv->membase + PCH_UART_RBR); +} + + +static void pch_uart_put_poll_char(struct uart_port *port, + unsigned char c) +{ + unsigned int ier; + struct eg20t_port *priv = + container_of(port, struct eg20t_port, port); + + /* + * First save the IER then disable the interrupts + */ + ier = ioread8(priv->membase + UART_IER); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT); + + wait_for_xmitr(priv, UART_LSR_THRE); + /* + * Send the character out. + */ + iowrite8(c, priv->membase + PCH_UART_THR); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY); + iowrite8(ier, priv->membase + UART_IER); +} +#endif /* CONFIG_CONSOLE_POLL */ + +static const struct uart_ops pch_uart_ops = { + .tx_empty = pch_uart_tx_empty, + .set_mctrl = pch_uart_set_mctrl, + .get_mctrl = pch_uart_get_mctrl, + .stop_tx = pch_uart_stop_tx, + .start_tx = pch_uart_start_tx, + .stop_rx = pch_uart_stop_rx, + .enable_ms = pch_uart_enable_ms, + .break_ctl = pch_uart_break_ctl, + .startup = pch_uart_startup, + .shutdown = pch_uart_shutdown, + .set_termios = pch_uart_set_termios, +/* .pm = pch_uart_pm, Not supported yet */ + .type = pch_uart_type, + .release_port = pch_uart_release_port, + .request_port = pch_uart_request_port, + .config_port = pch_uart_config_port, + .verify_port = pch_uart_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = pch_uart_get_poll_char, + .poll_put_char = pch_uart_put_poll_char, +#endif +}; + +#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE + +static void pch_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct eg20t_port *priv = + container_of(port, struct eg20t_port, port); + + wait_for_xmitr(priv, UART_LSR_THRE); + iowrite8(ch, priv->membase + PCH_UART_THR); +} + +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + * + * The console_lock must be held when we get here. + */ +static void +pch_console_write(struct console *co, const char *s, unsigned int count) +{ + struct eg20t_port *priv; + unsigned long flags; + int priv_locked = 1; + int port_locked = 1; + u8 ier; + + priv = pch_uart_ports[co->index]; + + touch_nmi_watchdog(); + + local_irq_save(flags); + if (priv->port.sysrq) { + /* call to uart_handle_sysrq_char already took the priv lock */ + priv_locked = 0; + /* serial8250_handle_port() already took the port lock */ + port_locked = 0; + } else if (oops_in_progress) { + priv_locked = spin_trylock(&priv->lock); + port_locked = spin_trylock(&priv->port.lock); + } else { + spin_lock(&priv->lock); + spin_lock(&priv->port.lock); + } + + /* + * First save the IER then disable the interrupts + */ + ier = ioread8(priv->membase + UART_IER); + + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT); + + uart_console_write(&priv->port, s, count, pch_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY); + iowrite8(ier, priv->membase + UART_IER); + + if (port_locked) + spin_unlock(&priv->port.lock); + if (priv_locked) + spin_unlock(&priv->lock); + local_irq_restore(flags); +} + +static int __init pch_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = default_baud; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index >= PCH_UART_NR) + co->index = 0; + port = &pch_uart_ports[co->index]->port; + + if (!port || (!port->iobase && !port->membase)) + return -ENODEV; + + port->uartclk = pch_uart_get_uartclk(); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct uart_driver pch_uart_driver; + +static struct console pch_console = { + .name = PCH_UART_DRIVER_DEVICE, + .write = pch_console_write, + .device = uart_console_device, + .setup = pch_console_setup, + .flags = CON_PRINTBUFFER | CON_ANYTIME, + .index = -1, + .data = &pch_uart_driver, +}; + +#define PCH_CONSOLE (&pch_console) +#else +#define PCH_CONSOLE NULL +#endif /* CONFIG_SERIAL_PCH_UART_CONSOLE */ + +static struct uart_driver pch_uart_driver = { + .owner = THIS_MODULE, + .driver_name = KBUILD_MODNAME, + .dev_name = PCH_UART_DRIVER_DEVICE, + .major = 0, + .minor = 0, + .nr = PCH_UART_NR, + .cons = PCH_CONSOLE, +}; + +static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct eg20t_port *priv; + int ret; + unsigned int iobase; + unsigned int mapbase; + unsigned char *rxbuf; + int fifosize; + int port_type; + struct pch_uart_driver_data *board; + char name[32]; + + board = &drv_dat[id->driver_data]; + port_type = board->port_type; + + priv = kzalloc(sizeof(struct eg20t_port), GFP_KERNEL); + if (priv == NULL) + goto init_port_alloc_err; + + rxbuf = (unsigned char *)__get_free_page(GFP_KERNEL); + if (!rxbuf) + goto init_port_free_txbuf; + + switch (port_type) { + case PORT_PCH_8LINE: + fifosize = 256; /* EG20T/ML7213: UART0 */ + break; + case PORT_PCH_2LINE: + fifosize = 64; /* EG20T:UART1~3 ML7213: UART1~2*/ + break; + default: + dev_err(&pdev->dev, "Invalid Port Type(=%d)\n", port_type); + goto init_port_hal_free; + } + + pci_enable_msi(pdev); + pci_set_master(pdev); + + spin_lock_init(&priv->lock); + + iobase = pci_resource_start(pdev, 0); + mapbase = pci_resource_start(pdev, 1); + priv->mapbase = mapbase; + priv->iobase = iobase; + priv->pdev = pdev; + priv->tx_empty = 1; + priv->rxbuf.buf = rxbuf; + priv->rxbuf.size = PAGE_SIZE; + + priv->fifo_size = fifosize; + priv->uartclk = pch_uart_get_uartclk(); + priv->port_type = port_type; + priv->port.dev = &pdev->dev; + priv->port.iobase = iobase; + priv->port.membase = NULL; + priv->port.mapbase = mapbase; + priv->port.irq = pdev->irq; + priv->port.iotype = UPIO_PORT; + priv->port.ops = &pch_uart_ops; + priv->port.flags = UPF_BOOT_AUTOCONF; + priv->port.fifosize = fifosize; + priv->port.line = board->line_no; + priv->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PCH_UART_CONSOLE); + priv->trigger = PCH_UART_HAL_TRIGGER_M; + + snprintf(priv->irq_name, IRQ_NAME_SIZE, + KBUILD_MODNAME ":" PCH_UART_DRIVER_DEVICE "%d", + priv->port.line); + + spin_lock_init(&priv->port.lock); + + pci_set_drvdata(pdev, priv); + priv->trigger_level = 1; + priv->fcr = 0; + + if (pdev->dev.of_node) + of_property_read_u32(pdev->dev.of_node, "clock-frequency" + , &user_uartclk); + +#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE + pch_uart_ports[board->line_no] = priv; +#endif + ret = uart_add_one_port(&pch_uart_driver, &priv->port); + if (ret < 0) + goto init_port_hal_free; + + snprintf(name, sizeof(name), "uart%d_regs", priv->port.line); + debugfs_create_file(name, S_IFREG | S_IRUGO, NULL, priv, + &port_regs_ops); + + return priv; + +init_port_hal_free: +#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE + pch_uart_ports[board->line_no] = NULL; +#endif + free_page((unsigned long)rxbuf); +init_port_free_txbuf: + kfree(priv); +init_port_alloc_err: + + return NULL; +} + +static void pch_uart_exit_port(struct eg20t_port *priv) +{ + char name[32]; + + snprintf(name, sizeof(name), "uart%d_regs", priv->port.line); + debugfs_lookup_and_remove(name, NULL); + uart_remove_one_port(&pch_uart_driver, &priv->port); + free_page((unsigned long)priv->rxbuf.buf); +} + +static void pch_uart_pci_remove(struct pci_dev *pdev) +{ + struct eg20t_port *priv = pci_get_drvdata(pdev); + + pci_disable_msi(pdev); + +#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE + pch_uart_ports[priv->port.line] = NULL; +#endif + pch_uart_exit_port(priv); + pci_disable_device(pdev); + kfree(priv); + return; +} + +static int __maybe_unused pch_uart_pci_suspend(struct device *dev) +{ + struct eg20t_port *priv = dev_get_drvdata(dev); + + uart_suspend_port(&pch_uart_driver, &priv->port); + + return 0; +} + +static int __maybe_unused pch_uart_pci_resume(struct device *dev) +{ + struct eg20t_port *priv = dev_get_drvdata(dev); + + uart_resume_port(&pch_uart_driver, &priv->port); + + return 0; +} + +static const struct pci_device_id pch_uart_pci_id[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8811), + .driver_data = pch_et20t_uart0}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8812), + .driver_data = pch_et20t_uart1}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8813), + .driver_data = pch_et20t_uart2}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8814), + .driver_data = pch_et20t_uart3}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8027), + .driver_data = pch_ml7213_uart0}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8028), + .driver_data = pch_ml7213_uart1}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8029), + .driver_data = pch_ml7213_uart2}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800C), + .driver_data = pch_ml7223_uart0}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D), + .driver_data = pch_ml7223_uart1}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8811), + .driver_data = pch_ml7831_uart0}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8812), + .driver_data = pch_ml7831_uart1}, + {0,}, +}; + +static int pch_uart_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int ret; + struct eg20t_port *priv; + + ret = pci_enable_device(pdev); + if (ret < 0) + goto probe_error; + + priv = pch_uart_init_port(pdev, id); + if (!priv) { + ret = -EBUSY; + goto probe_disable_device; + } + pci_set_drvdata(pdev, priv); + + return ret; + +probe_disable_device: + pci_disable_msi(pdev); + pci_disable_device(pdev); +probe_error: + return ret; +} + +static SIMPLE_DEV_PM_OPS(pch_uart_pci_pm_ops, + pch_uart_pci_suspend, + pch_uart_pci_resume); + +static struct pci_driver pch_uart_pci_driver = { + .name = "pch_uart", + .id_table = pch_uart_pci_id, + .probe = pch_uart_pci_probe, + .remove = pch_uart_pci_remove, + .driver.pm = &pch_uart_pci_pm_ops, +}; + +static int __init pch_uart_module_init(void) +{ + int ret; + + /* register as UART driver */ + ret = uart_register_driver(&pch_uart_driver); + if (ret < 0) + return ret; + + /* register as PCI driver */ + ret = pci_register_driver(&pch_uart_pci_driver); + if (ret < 0) + uart_unregister_driver(&pch_uart_driver); + + return ret; +} +module_init(pch_uart_module_init); + +static void __exit pch_uart_module_exit(void) +{ + pci_unregister_driver(&pch_uart_pci_driver); + uart_unregister_driver(&pch_uart_driver); +} +module_exit(pch_uart_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel EG20T PCH UART PCI Driver"); +MODULE_DEVICE_TABLE(pci, pch_uart_pci_id); + +module_param(default_baud, uint, S_IRUGO); +MODULE_PARM_DESC(default_baud, + "Default BAUD for initial driver state and console (default 9600)"); +module_param(user_uartclk, uint, S_IRUGO); +MODULE_PARM_DESC(user_uartclk, + "Override UART default or board specific UART clock"); diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c new file mode 100644 index 000000000..2beada66c --- /dev/null +++ b/drivers/tty/serial/pic32_uart.c @@ -0,0 +1,1004 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PIC32 Integrated Serial Driver. + * + * Copyright (C) 2015 Microchip Technology, Inc. + * + * Authors: + * Sorin-Andrei Pistirica + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* UART name and device definitions */ +#define PIC32_DEV_NAME "pic32-uart" +#define PIC32_MAX_UARTS 6 +#define PIC32_SDEV_NAME "ttyPIC" + +#define PIC32_UART_DFLT_BRATE 9600 +#define PIC32_UART_TX_FIFO_DEPTH 8 +#define PIC32_UART_RX_FIFO_DEPTH 8 + +#define PIC32_UART_MODE 0x00 +#define PIC32_UART_STA 0x10 +#define PIC32_UART_TX 0x20 +#define PIC32_UART_RX 0x30 +#define PIC32_UART_BRG 0x40 + +/* struct pic32_sport - pic32 serial port descriptor + * @port: uart port descriptor + * @idx: port index + * @irq_fault: virtual fault interrupt number + * @irq_fault_name: irq fault name + * @irq_rx: virtual rx interrupt number + * @irq_rx_name: irq rx name + * @irq_tx: virtual tx interrupt number + * @irq_tx_name: irq tx name + * @cts_gpiod: clear to send GPIO + * @dev: device descriptor + **/ +struct pic32_sport { + struct uart_port port; + int idx; + + int irq_fault; + const char *irq_fault_name; + int irq_rx; + const char *irq_rx_name; + int irq_tx; + const char *irq_tx_name; + bool enable_tx_irq; + + struct gpio_desc *cts_gpiod; + + struct clk *clk; + + struct device *dev; +}; + +static inline struct pic32_sport *to_pic32_sport(struct uart_port *port) +{ + return container_of(port, struct pic32_sport, port); +} + +static inline void pic32_uart_writel(struct pic32_sport *sport, + u32 reg, u32 val) +{ + __raw_writel(val, sport->port.membase + reg); +} + +static inline u32 pic32_uart_readl(struct pic32_sport *sport, u32 reg) +{ + return __raw_readl(sport->port.membase + reg); +} + +/* pic32 uart mode register bits */ +#define PIC32_UART_MODE_ON BIT(15) +#define PIC32_UART_MODE_FRZ BIT(14) +#define PIC32_UART_MODE_SIDL BIT(13) +#define PIC32_UART_MODE_IREN BIT(12) +#define PIC32_UART_MODE_RTSMD BIT(11) +#define PIC32_UART_MODE_RESV1 BIT(10) +#define PIC32_UART_MODE_UEN1 BIT(9) +#define PIC32_UART_MODE_UEN0 BIT(8) +#define PIC32_UART_MODE_WAKE BIT(7) +#define PIC32_UART_MODE_LPBK BIT(6) +#define PIC32_UART_MODE_ABAUD BIT(5) +#define PIC32_UART_MODE_RXINV BIT(4) +#define PIC32_UART_MODE_BRGH BIT(3) +#define PIC32_UART_MODE_PDSEL1 BIT(2) +#define PIC32_UART_MODE_PDSEL0 BIT(1) +#define PIC32_UART_MODE_STSEL BIT(0) + +/* pic32 uart status register bits */ +#define PIC32_UART_STA_UTXISEL1 BIT(15) +#define PIC32_UART_STA_UTXISEL0 BIT(14) +#define PIC32_UART_STA_UTXINV BIT(13) +#define PIC32_UART_STA_URXEN BIT(12) +#define PIC32_UART_STA_UTXBRK BIT(11) +#define PIC32_UART_STA_UTXEN BIT(10) +#define PIC32_UART_STA_UTXBF BIT(9) +#define PIC32_UART_STA_TRMT BIT(8) +#define PIC32_UART_STA_URXISEL1 BIT(7) +#define PIC32_UART_STA_URXISEL0 BIT(6) +#define PIC32_UART_STA_ADDEN BIT(5) +#define PIC32_UART_STA_RIDLE BIT(4) +#define PIC32_UART_STA_PERR BIT(3) +#define PIC32_UART_STA_FERR BIT(2) +#define PIC32_UART_STA_OERR BIT(1) +#define PIC32_UART_STA_URXDA BIT(0) + +/* pic32_sport pointer for console use */ +static struct pic32_sport *pic32_sports[PIC32_MAX_UARTS]; + +static inline void pic32_wait_deplete_txbuf(struct pic32_sport *sport) +{ + /* wait for tx empty, otherwise chars will be lost or corrupted */ + while (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_TRMT)) + udelay(1); +} + +/* serial core request to check if uart tx buffer is empty */ +static unsigned int pic32_uart_tx_empty(struct uart_port *port) +{ + struct pic32_sport *sport = to_pic32_sport(port); + u32 val = pic32_uart_readl(sport, PIC32_UART_STA); + + return (val & PIC32_UART_STA_TRMT) ? 1 : 0; +} + +/* serial core request to set UART outputs */ +static void pic32_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct pic32_sport *sport = to_pic32_sport(port); + + /* set loopback mode */ + if (mctrl & TIOCM_LOOP) + pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE), + PIC32_UART_MODE_LPBK); + else + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE), + PIC32_UART_MODE_LPBK); +} + +/* serial core request to return the state of misc UART input pins */ +static unsigned int pic32_uart_get_mctrl(struct uart_port *port) +{ + struct pic32_sport *sport = to_pic32_sport(port); + unsigned int mctrl = 0; + + /* get the state of CTS input pin for this port */ + if (!sport->cts_gpiod) + mctrl |= TIOCM_CTS; + else if (gpiod_get_value(sport->cts_gpiod)) + mctrl |= TIOCM_CTS; + + /* DSR and CD are not supported in PIC32, so return 1 + * RI is not supported in PIC32, so return 0 + */ + mctrl |= TIOCM_CD; + mctrl |= TIOCM_DSR; + + return mctrl; +} + +/* stop tx and start tx are not called in pairs, therefore a flag indicates + * the status of irq to control the irq-depth. + */ +static inline void pic32_uart_irqtxen(struct pic32_sport *sport, u8 en) +{ + if (en && !sport->enable_tx_irq) { + enable_irq(sport->irq_tx); + sport->enable_tx_irq = true; + } else if (!en && sport->enable_tx_irq) { + /* use disable_irq_nosync() and not disable_irq() to avoid self + * imposed deadlock by not waiting for irq handler to end, + * since this callback is called from interrupt context. + */ + disable_irq_nosync(sport->irq_tx); + sport->enable_tx_irq = false; + } +} + +/* serial core request to disable tx ASAP (used for flow control) */ +static void pic32_uart_stop_tx(struct uart_port *port) +{ + struct pic32_sport *sport = to_pic32_sport(port); + + if (!(pic32_uart_readl(sport, PIC32_UART_MODE) & PIC32_UART_MODE_ON)) + return; + + if (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_UTXEN)) + return; + + /* wait for tx empty */ + pic32_wait_deplete_txbuf(sport); + + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA), + PIC32_UART_STA_UTXEN); + pic32_uart_irqtxen(sport, 0); +} + +/* serial core request to (re)enable tx */ +static void pic32_uart_start_tx(struct uart_port *port) +{ + struct pic32_sport *sport = to_pic32_sport(port); + + pic32_uart_irqtxen(sport, 1); + pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA), + PIC32_UART_STA_UTXEN); +} + +/* serial core request to stop rx, called before port shutdown */ +static void pic32_uart_stop_rx(struct uart_port *port) +{ + struct pic32_sport *sport = to_pic32_sport(port); + + /* disable rx interrupts */ + disable_irq(sport->irq_rx); + + /* receiver Enable bit OFF */ + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA), + PIC32_UART_STA_URXEN); +} + +/* serial core request to start/stop emitting break char */ +static void pic32_uart_break_ctl(struct uart_port *port, int ctl) +{ + struct pic32_sport *sport = to_pic32_sport(port); + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + if (ctl) + pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA), + PIC32_UART_STA_UTXBRK); + else + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA), + PIC32_UART_STA_UTXBRK); + + spin_unlock_irqrestore(&port->lock, flags); +} + +/* get port type in string format */ +static const char *pic32_uart_type(struct uart_port *port) +{ + return (port->type == PORT_PIC32) ? PIC32_DEV_NAME : NULL; +} + +/* read all chars in rx fifo and send them to core */ +static void pic32_uart_do_rx(struct uart_port *port) +{ + struct pic32_sport *sport = to_pic32_sport(port); + struct tty_port *tty; + unsigned int max_count; + + /* limit number of char read in interrupt, should not be + * higher than fifo size anyway since we're much faster than + * serial port + */ + max_count = PIC32_UART_RX_FIFO_DEPTH; + + spin_lock(&port->lock); + + tty = &port->state->port; + + do { + u32 sta_reg, c; + char flag; + + /* get overrun/fifo empty information from status register */ + sta_reg = pic32_uart_readl(sport, PIC32_UART_STA); + if (unlikely(sta_reg & PIC32_UART_STA_OERR)) { + + /* fifo reset is required to clear interrupt */ + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA), + PIC32_UART_STA_OERR); + + port->icount.overrun++; + tty_insert_flip_char(tty, 0, TTY_OVERRUN); + } + + /* Can at least one more character can be read? */ + if (!(sta_reg & PIC32_UART_STA_URXDA)) + break; + + /* read the character and increment the rx counter */ + c = pic32_uart_readl(sport, PIC32_UART_RX); + + port->icount.rx++; + flag = TTY_NORMAL; + c &= 0xff; + + if (unlikely((sta_reg & PIC32_UART_STA_PERR) || + (sta_reg & PIC32_UART_STA_FERR))) { + + /* do stats first */ + if (sta_reg & PIC32_UART_STA_PERR) + port->icount.parity++; + if (sta_reg & PIC32_UART_STA_FERR) + port->icount.frame++; + + /* update flag wrt read_status_mask */ + sta_reg &= port->read_status_mask; + + if (sta_reg & PIC32_UART_STA_FERR) + flag = TTY_FRAME; + if (sta_reg & PIC32_UART_STA_PERR) + flag = TTY_PARITY; + } + + if (uart_handle_sysrq_char(port, c)) + continue; + + if ((sta_reg & port->ignore_status_mask) == 0) + tty_insert_flip_char(tty, c, flag); + + } while (--max_count); + + spin_unlock(&port->lock); + + tty_flip_buffer_push(tty); +} + +/* fill tx fifo with chars to send, stop when fifo is about to be full + * or when all chars have been sent. + */ +static void pic32_uart_do_tx(struct uart_port *port) +{ + struct pic32_sport *sport = to_pic32_sport(port); + struct circ_buf *xmit = &port->state->xmit; + unsigned int max_count = PIC32_UART_TX_FIFO_DEPTH; + + if (port->x_char) { + pic32_uart_writel(sport, PIC32_UART_TX, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_tx_stopped(port)) { + pic32_uart_stop_tx(port); + return; + } + + if (uart_circ_empty(xmit)) + goto txq_empty; + + /* keep stuffing chars into uart tx buffer + * 1) until uart fifo is full + * or + * 2) until the circ buffer is empty + * (all chars have been sent) + * or + * 3) until the max count is reached + * (prevents lingering here for too long in certain cases) + */ + while (!(PIC32_UART_STA_UTXBF & + pic32_uart_readl(sport, PIC32_UART_STA))) { + unsigned int c = xmit->buf[xmit->tail]; + + pic32_uart_writel(sport, PIC32_UART_TX, c); + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + if (--max_count == 0) + break; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + goto txq_empty; + + return; + +txq_empty: + pic32_uart_irqtxen(sport, 0); +} + +/* RX interrupt handler */ +static irqreturn_t pic32_uart_rx_interrupt(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + + pic32_uart_do_rx(port); + + return IRQ_HANDLED; +} + +/* TX interrupt handler */ +static irqreturn_t pic32_uart_tx_interrupt(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + pic32_uart_do_tx(port); + spin_unlock_irqrestore(&port->lock, flags); + + return IRQ_HANDLED; +} + +/* FAULT interrupt handler */ +static irqreturn_t pic32_uart_fault_interrupt(int irq, void *dev_id) +{ + /* do nothing: pic32_uart_do_rx() handles faults. */ + return IRQ_HANDLED; +} + +/* enable rx & tx operation on uart */ +static void pic32_uart_en_and_unmask(struct uart_port *port) +{ + struct pic32_sport *sport = to_pic32_sport(port); + + pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA), + PIC32_UART_STA_UTXEN | PIC32_UART_STA_URXEN); + pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE), + PIC32_UART_MODE_ON); +} + +/* disable rx & tx operation on uart */ +static void pic32_uart_dsbl_and_mask(struct uart_port *port) +{ + struct pic32_sport *sport = to_pic32_sport(port); + + /* wait for tx empty, otherwise chars will be lost or corrupted */ + pic32_wait_deplete_txbuf(sport); + + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA), + PIC32_UART_STA_UTXEN | PIC32_UART_STA_URXEN); + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE), + PIC32_UART_MODE_ON); +} + +/* serial core request to initialize uart and start rx operation */ +static int pic32_uart_startup(struct uart_port *port) +{ + struct pic32_sport *sport = to_pic32_sport(port); + u32 dflt_baud = (port->uartclk / PIC32_UART_DFLT_BRATE / 16) - 1; + unsigned long flags; + int ret; + + local_irq_save(flags); + + ret = clk_prepare_enable(sport->clk); + if (ret) { + local_irq_restore(flags); + goto out_done; + } + + /* clear status and mode registers */ + pic32_uart_writel(sport, PIC32_UART_MODE, 0); + pic32_uart_writel(sport, PIC32_UART_STA, 0); + + /* disable uart and mask all interrupts */ + pic32_uart_dsbl_and_mask(port); + + /* set default baud */ + pic32_uart_writel(sport, PIC32_UART_BRG, dflt_baud); + + local_irq_restore(flags); + + /* Each UART of a PIC32 has three interrupts therefore, + * we setup driver to register the 3 irqs for the device. + * + * For each irq request_irq() is called with interrupt disabled. + * And the irq is enabled as soon as we are ready to handle them. + */ + sport->enable_tx_irq = false; + + sport->irq_fault_name = kasprintf(GFP_KERNEL, "%s%d-fault", + pic32_uart_type(port), + sport->idx); + if (!sport->irq_fault_name) { + dev_err(port->dev, "%s: kasprintf err!", __func__); + ret = -ENOMEM; + goto out_disable_clk; + } + irq_set_status_flags(sport->irq_fault, IRQ_NOAUTOEN); + ret = request_irq(sport->irq_fault, pic32_uart_fault_interrupt, + IRQF_NO_THREAD, sport->irq_fault_name, port); + if (ret) { + dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n", + __func__, sport->irq_fault, ret, + pic32_uart_type(port)); + goto out_f; + } + + sport->irq_rx_name = kasprintf(GFP_KERNEL, "%s%d-rx", + pic32_uart_type(port), + sport->idx); + if (!sport->irq_rx_name) { + dev_err(port->dev, "%s: kasprintf err!", __func__); + ret = -ENOMEM; + goto out_f; + } + irq_set_status_flags(sport->irq_rx, IRQ_NOAUTOEN); + ret = request_irq(sport->irq_rx, pic32_uart_rx_interrupt, + IRQF_NO_THREAD, sport->irq_rx_name, port); + if (ret) { + dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n", + __func__, sport->irq_rx, ret, + pic32_uart_type(port)); + goto out_r; + } + + sport->irq_tx_name = kasprintf(GFP_KERNEL, "%s%d-tx", + pic32_uart_type(port), + sport->idx); + if (!sport->irq_tx_name) { + dev_err(port->dev, "%s: kasprintf err!", __func__); + ret = -ENOMEM; + goto out_r; + } + irq_set_status_flags(sport->irq_tx, IRQ_NOAUTOEN); + ret = request_irq(sport->irq_tx, pic32_uart_tx_interrupt, + IRQF_NO_THREAD, sport->irq_tx_name, port); + if (ret) { + dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n", + __func__, sport->irq_tx, ret, + pic32_uart_type(port)); + goto out_t; + } + + local_irq_save(flags); + + /* set rx interrupt on first receive */ + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA), + PIC32_UART_STA_URXISEL1 | PIC32_UART_STA_URXISEL0); + + /* set interrupt on empty */ + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA), + PIC32_UART_STA_UTXISEL1); + + /* enable all interrupts and eanable uart */ + pic32_uart_en_and_unmask(port); + + local_irq_restore(flags); + + enable_irq(sport->irq_rx); + + return 0; + +out_t: + free_irq(sport->irq_tx, port); + kfree(sport->irq_tx_name); +out_r: + free_irq(sport->irq_rx, port); + kfree(sport->irq_rx_name); +out_f: + free_irq(sport->irq_fault, port); + kfree(sport->irq_fault_name); +out_disable_clk: + clk_disable_unprepare(sport->clk); +out_done: + return ret; +} + +/* serial core request to flush & disable uart */ +static void pic32_uart_shutdown(struct uart_port *port) +{ + struct pic32_sport *sport = to_pic32_sport(port); + unsigned long flags; + + /* disable uart */ + spin_lock_irqsave(&port->lock, flags); + pic32_uart_dsbl_and_mask(port); + spin_unlock_irqrestore(&port->lock, flags); + clk_disable_unprepare(sport->clk); + + /* free all 3 interrupts for this UART */ + free_irq(sport->irq_fault, port); + kfree(sport->irq_fault_name); + free_irq(sport->irq_tx, port); + kfree(sport->irq_tx_name); + free_irq(sport->irq_rx, port); + kfree(sport->irq_rx_name); +} + +/* serial core request to change current uart setting */ +static void pic32_uart_set_termios(struct uart_port *port, + struct ktermios *new, + const struct ktermios *old) +{ + struct pic32_sport *sport = to_pic32_sport(port); + unsigned int baud; + unsigned int quot; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* disable uart and mask all interrupts while changing speed */ + pic32_uart_dsbl_and_mask(port); + + /* stop bit options */ + if (new->c_cflag & CSTOPB) + pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE), + PIC32_UART_MODE_STSEL); + else + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE), + PIC32_UART_MODE_STSEL); + + /* parity options */ + if (new->c_cflag & PARENB) { + if (new->c_cflag & PARODD) { + pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE), + PIC32_UART_MODE_PDSEL1); + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE), + PIC32_UART_MODE_PDSEL0); + } else { + pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE), + PIC32_UART_MODE_PDSEL0); + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE), + PIC32_UART_MODE_PDSEL1); + } + } else { + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE), + PIC32_UART_MODE_PDSEL1 | + PIC32_UART_MODE_PDSEL0); + } + /* if hw flow ctrl, then the pins must be specified in device tree */ + if ((new->c_cflag & CRTSCTS) && sport->cts_gpiod) { + /* enable hardware flow control */ + pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE), + PIC32_UART_MODE_UEN1); + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE), + PIC32_UART_MODE_UEN0); + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE), + PIC32_UART_MODE_RTSMD); + } else { + /* disable hardware flow control */ + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE), + PIC32_UART_MODE_UEN1); + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE), + PIC32_UART_MODE_UEN0); + pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE), + PIC32_UART_MODE_RTSMD); + } + + /* Always 8-bit */ + new->c_cflag |= CS8; + + /* Mark/Space parity is not supported */ + new->c_cflag &= ~CMSPAR; + + /* update baud */ + baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); + quot = uart_get_divisor(port, baud) - 1; + pic32_uart_writel(sport, PIC32_UART_BRG, quot); + uart_update_timeout(port, new->c_cflag, baud); + + if (tty_termios_baud_rate(new)) + tty_termios_encode_baud_rate(new, baud, baud); + + /* enable uart */ + pic32_uart_en_and_unmask(port); + + spin_unlock_irqrestore(&port->lock, flags); +} + +/* serial core request to claim uart iomem */ +static int pic32_uart_request_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res_mem; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(!res_mem)) + return -EINVAL; + + if (!request_mem_region(port->mapbase, resource_size(res_mem), + "pic32_uart_mem")) + return -EBUSY; + + port->membase = devm_ioremap(port->dev, port->mapbase, + resource_size(res_mem)); + if (!port->membase) { + dev_err(port->dev, "Unable to map registers\n"); + release_mem_region(port->mapbase, resource_size(res_mem)); + return -ENOMEM; + } + + return 0; +} + +/* serial core request to release uart iomem */ +static void pic32_uart_release_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res_mem; + unsigned int res_size; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(!res_mem)) + return; + res_size = resource_size(res_mem); + + release_mem_region(port->mapbase, res_size); +} + +/* serial core request to do any port required auto-configuration */ +static void pic32_uart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + if (pic32_uart_request_port(port)) + return; + port->type = PORT_PIC32; + } +} + +/* serial core request to check that port information in serinfo are suitable */ +static int pic32_uart_verify_port(struct uart_port *port, + struct serial_struct *serinfo) +{ + if (port->type != PORT_PIC32) + return -EINVAL; + if (port->irq != serinfo->irq) + return -EINVAL; + if (port->iotype != serinfo->io_type) + return -EINVAL; + if (port->mapbase != (unsigned long)serinfo->iomem_base) + return -EINVAL; + + return 0; +} + +/* serial core callbacks */ +static const struct uart_ops pic32_uart_ops = { + .tx_empty = pic32_uart_tx_empty, + .get_mctrl = pic32_uart_get_mctrl, + .set_mctrl = pic32_uart_set_mctrl, + .start_tx = pic32_uart_start_tx, + .stop_tx = pic32_uart_stop_tx, + .stop_rx = pic32_uart_stop_rx, + .break_ctl = pic32_uart_break_ctl, + .startup = pic32_uart_startup, + .shutdown = pic32_uart_shutdown, + .set_termios = pic32_uart_set_termios, + .type = pic32_uart_type, + .release_port = pic32_uart_release_port, + .request_port = pic32_uart_request_port, + .config_port = pic32_uart_config_port, + .verify_port = pic32_uart_verify_port, +}; + +#ifdef CONFIG_SERIAL_PIC32_CONSOLE +/* output given char */ +static void pic32_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct pic32_sport *sport = to_pic32_sport(port); + + if (!(pic32_uart_readl(sport, PIC32_UART_MODE) & PIC32_UART_MODE_ON)) + return; + + if (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_UTXEN)) + return; + + /* wait for tx empty */ + pic32_wait_deplete_txbuf(sport); + + pic32_uart_writel(sport, PIC32_UART_TX, ch & 0xff); +} + +/* console core request to output given string */ +static void pic32_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct pic32_sport *sport = pic32_sports[co->index]; + + /* call uart helper to deal with \r\n */ + uart_console_write(&sport->port, s, count, pic32_console_putchar); +} + +/* console core request to setup given console, find matching uart + * port and setup it. + */ +static int pic32_console_setup(struct console *co, char *options) +{ + struct pic32_sport *sport; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret = 0; + + if (unlikely(co->index < 0 || co->index >= PIC32_MAX_UARTS)) + return -ENODEV; + + sport = pic32_sports[co->index]; + if (!sport) + return -ENODEV; + + ret = clk_prepare_enable(sport->clk); + if (ret) + return ret; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&sport->port, co, baud, parity, bits, flow); +} + +static struct uart_driver pic32_uart_driver; +static struct console pic32_console = { + .name = PIC32_SDEV_NAME, + .write = pic32_console_write, + .device = uart_console_device, + .setup = pic32_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &pic32_uart_driver, +}; +#define PIC32_SCONSOLE (&pic32_console) + +static int __init pic32_console_init(void) +{ + register_console(&pic32_console); + return 0; +} +console_initcall(pic32_console_init); + +/* + * Late console initialization. + */ +static int __init pic32_late_console_init(void) +{ + if (!(pic32_console.flags & CON_ENABLED)) + register_console(&pic32_console); + + return 0; +} + +core_initcall(pic32_late_console_init); + +#else +#define PIC32_SCONSOLE NULL +#endif + +static struct uart_driver pic32_uart_driver = { + .owner = THIS_MODULE, + .driver_name = PIC32_DEV_NAME, + .dev_name = PIC32_SDEV_NAME, + .nr = PIC32_MAX_UARTS, + .cons = PIC32_SCONSOLE, +}; + +static int pic32_uart_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct pic32_sport *sport; + int uart_idx = 0; + struct resource *res_mem; + struct uart_port *port; + int ret; + + uart_idx = of_alias_get_id(np, "serial"); + if (uart_idx < 0 || uart_idx >= PIC32_MAX_UARTS) + return -EINVAL; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_mem) + return -EINVAL; + + sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); + if (!sport) + return -ENOMEM; + + sport->idx = uart_idx; + sport->irq_fault = irq_of_parse_and_map(np, 0); + sport->irq_rx = irq_of_parse_and_map(np, 1); + sport->irq_tx = irq_of_parse_and_map(np, 2); + sport->clk = devm_clk_get(&pdev->dev, NULL); + sport->dev = &pdev->dev; + + /* Hardware flow control: gpios + * !Note: Basically, CTS is needed for reading the status. + */ + sport->cts_gpiod = devm_gpiod_get_optional(dev, "cts", GPIOD_IN); + if (IS_ERR(sport->cts_gpiod)) + return dev_err_probe(dev, PTR_ERR(sport->cts_gpiod), "error requesting CTS GPIO\n"); + gpiod_set_consumer_name(sport->cts_gpiod, "CTS"); + + pic32_sports[uart_idx] = sport; + port = &sport->port; + port->iotype = UPIO_MEM; + port->mapbase = res_mem->start; + port->ops = &pic32_uart_ops; + port->flags = UPF_BOOT_AUTOCONF; + port->dev = &pdev->dev; + port->fifosize = PIC32_UART_TX_FIFO_DEPTH; + port->uartclk = clk_get_rate(sport->clk); + port->line = uart_idx; + + ret = uart_add_one_port(&pic32_uart_driver, port); + if (ret) { + port->membase = NULL; + dev_err(port->dev, "%s: uart add port error!\n", __func__); + goto err; + } + +#ifdef CONFIG_SERIAL_PIC32_CONSOLE + if (uart_console_enabled(port)) { + /* The peripheral clock has been enabled by console_setup, + * so disable it till the port is used. + */ + clk_disable_unprepare(sport->clk); + } +#endif + + platform_set_drvdata(pdev, port); + + dev_info(&pdev->dev, "%s: uart(%d) driver initialized.\n", + __func__, uart_idx); + + return 0; +err: + /* automatic unroll of sport and gpios */ + return ret; +} + +static int pic32_uart_remove(struct platform_device *pdev) +{ + struct uart_port *port = platform_get_drvdata(pdev); + struct pic32_sport *sport = to_pic32_sport(port); + + uart_remove_one_port(&pic32_uart_driver, port); + clk_disable_unprepare(sport->clk); + platform_set_drvdata(pdev, NULL); + pic32_sports[sport->idx] = NULL; + + /* automatic unroll of sport and gpios */ + return 0; +} + +static const struct of_device_id pic32_serial_dt_ids[] = { + { .compatible = "microchip,pic32mzda-uart" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, pic32_serial_dt_ids); + +static struct platform_driver pic32_uart_platform_driver = { + .probe = pic32_uart_probe, + .remove = pic32_uart_remove, + .driver = { + .name = PIC32_DEV_NAME, + .of_match_table = of_match_ptr(pic32_serial_dt_ids), + .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_PIC32), + }, +}; + +static int __init pic32_uart_init(void) +{ + int ret; + + ret = uart_register_driver(&pic32_uart_driver); + if (ret) { + pr_err("failed to register %s:%d\n", + pic32_uart_driver.driver_name, ret); + return ret; + } + + ret = platform_driver_register(&pic32_uart_platform_driver); + if (ret) { + pr_err("fail to register pic32 uart\n"); + uart_unregister_driver(&pic32_uart_driver); + } + + return ret; +} +arch_initcall(pic32_uart_init); + +static void __exit pic32_uart_exit(void) +{ +#ifdef CONFIG_SERIAL_PIC32_CONSOLE + unregister_console(&pic32_console); +#endif + platform_driver_unregister(&pic32_uart_platform_driver); + uart_unregister_driver(&pic32_uart_driver); +} +module_exit(pic32_uart_exit); + +MODULE_AUTHOR("Sorin-Andrei Pistirica "); +MODULE_DESCRIPTION("Microchip PIC32 integrated serial port driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c new file mode 100644 index 000000000..fe2e4ec42 --- /dev/null +++ b/drivers/tty/serial/pmac_zilog.c @@ -0,0 +1,1996 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for PowerMac Z85c30 based ESCC cell found in the + * "macio" ASICs of various PowerMac models + * + * Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org) + * + * Derived from drivers/macintosh/macserial.c by Paul Mackerras + * and drivers/serial/sunzilog.c by David S. Miller + * + * Hrm... actually, I ripped most of sunzilog (Thanks David !) and + * adapted special tweaks needed for us. I don't think it's worth + * merging back those though. The DMA code still has to get in + * and once done, I expect that driver to remain fairly stable in + * the long term, unless we change the driver model again... + * + * 2004-08-06 Harald Welte + * - Enable BREAK interrupt + * - Add support for sysreq + * + * TODO: - Add DMA support + * - Defer port shutdown to a few seconds after close + * - maybe put something right into uap->clk_divisor + */ + +#undef DEBUG +#undef USE_CTRL_O_SYSRQ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PPC_PMAC +#include +#include +#include +#else +#include +#define of_machine_is_compatible(x) (0) +#endif + +#include +#include + +#include "pmac_zilog.h" + +MODULE_AUTHOR("Benjamin Herrenschmidt "); +MODULE_DESCRIPTION("Driver for the Mac and PowerMac serial ports."); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_SERIAL_PMACZILOG_TTYS +#define PMACZILOG_MAJOR TTY_MAJOR +#define PMACZILOG_MINOR 64 +#define PMACZILOG_NAME "ttyS" +#else +#define PMACZILOG_MAJOR 204 +#define PMACZILOG_MINOR 192 +#define PMACZILOG_NAME "ttyPZ" +#endif + +#define pmz_debug(fmt, arg...) pr_debug("ttyPZ%d: " fmt, uap->port.line, ## arg) +#define pmz_error(fmt, arg...) pr_err("ttyPZ%d: " fmt, uap->port.line, ## arg) +#define pmz_info(fmt, arg...) pr_info("ttyPZ%d: " fmt, uap->port.line, ## arg) + +/* + * For the sake of early serial console, we can do a pre-probe + * (optional) of the ports at rather early boot time. + */ +static struct uart_pmac_port pmz_ports[MAX_ZS_PORTS]; +static int pmz_ports_count; + +static struct uart_driver pmz_uart_reg = { + .owner = THIS_MODULE, + .driver_name = PMACZILOG_NAME, + .dev_name = PMACZILOG_NAME, + .major = PMACZILOG_MAJOR, + .minor = PMACZILOG_MINOR, +}; + + +/* + * Load all registers to reprogram the port + * This function must only be called when the TX is not busy. The UART + * port lock must be held and local interrupts disabled. + */ +static void pmz_load_zsregs(struct uart_pmac_port *uap, u8 *regs) +{ + int i; + + /* Let pending transmits finish. */ + for (i = 0; i < 1000; i++) { + unsigned char stat = read_zsreg(uap, R1); + if (stat & ALL_SNT) + break; + udelay(100); + } + + ZS_CLEARERR(uap); + zssync(uap); + ZS_CLEARFIFO(uap); + zssync(uap); + ZS_CLEARERR(uap); + + /* Disable all interrupts. */ + write_zsreg(uap, R1, + regs[R1] & ~(RxINT_MASK | TxINT_ENAB | EXT_INT_ENAB)); + + /* Set parity, sync config, stop bits, and clock divisor. */ + write_zsreg(uap, R4, regs[R4]); + + /* Set misc. TX/RX control bits. */ + write_zsreg(uap, R10, regs[R10]); + + /* Set TX/RX controls sans the enable bits. */ + write_zsreg(uap, R3, regs[R3] & ~RxENABLE); + write_zsreg(uap, R5, regs[R5] & ~TxENABLE); + + /* now set R7 "prime" on ESCC */ + write_zsreg(uap, R15, regs[R15] | EN85C30); + write_zsreg(uap, R7, regs[R7P]); + + /* make sure we use R7 "non-prime" on ESCC */ + write_zsreg(uap, R15, regs[R15] & ~EN85C30); + + /* Synchronous mode config. */ + write_zsreg(uap, R6, regs[R6]); + write_zsreg(uap, R7, regs[R7]); + + /* Disable baud generator. */ + write_zsreg(uap, R14, regs[R14] & ~BRENAB); + + /* Clock mode control. */ + write_zsreg(uap, R11, regs[R11]); + + /* Lower and upper byte of baud rate generator divisor. */ + write_zsreg(uap, R12, regs[R12]); + write_zsreg(uap, R13, regs[R13]); + + /* Now rewrite R14, with BRENAB (if set). */ + write_zsreg(uap, R14, regs[R14]); + + /* Reset external status interrupts. */ + write_zsreg(uap, R0, RES_EXT_INT); + write_zsreg(uap, R0, RES_EXT_INT); + + /* Rewrite R3/R5, this time without enables masked. */ + write_zsreg(uap, R3, regs[R3]); + write_zsreg(uap, R5, regs[R5]); + + /* Rewrite R1, this time without IRQ enabled masked. */ + write_zsreg(uap, R1, regs[R1]); + + /* Enable interrupts */ + write_zsreg(uap, R9, regs[R9]); +} + +/* + * We do like sunzilog to avoid disrupting pending Tx + * Reprogram the Zilog channel HW registers with the copies found in the + * software state struct. If the transmitter is busy, we defer this update + * until the next TX complete interrupt. Else, we do it right now. + * + * The UART port lock must be held and local interrupts disabled. + */ +static void pmz_maybe_update_regs(struct uart_pmac_port *uap) +{ + if (!ZS_REGS_HELD(uap)) { + if (ZS_TX_ACTIVE(uap)) { + uap->flags |= PMACZILOG_FLAG_REGS_HELD; + } else { + pmz_debug("pmz: maybe_update_regs: updating\n"); + pmz_load_zsregs(uap, uap->curregs); + } + } +} + +static void pmz_interrupt_control(struct uart_pmac_port *uap, int enable) +{ + if (enable) { + uap->curregs[1] |= INT_ALL_Rx | TxINT_ENAB; + if (!ZS_IS_EXTCLK(uap)) + uap->curregs[1] |= EXT_INT_ENAB; + } else { + uap->curregs[1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK); + } + write_zsreg(uap, R1, uap->curregs[1]); +} + +static bool pmz_receive_chars(struct uart_pmac_port *uap) + __must_hold(&uap->port.lock) +{ + struct tty_port *port; + unsigned char ch, r1, drop, flag; + int loops = 0; + + /* Sanity check, make sure the old bug is no longer happening */ + if (uap->port.state == NULL) { + WARN_ON(1); + (void)read_zsdata(uap); + return false; + } + port = &uap->port.state->port; + + while (1) { + drop = 0; + + r1 = read_zsreg(uap, R1); + ch = read_zsdata(uap); + + if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) { + write_zsreg(uap, R0, ERR_RES); + zssync(uap); + } + + ch &= uap->parity_mask; + if (ch == 0 && uap->flags & PMACZILOG_FLAG_BREAK) { + uap->flags &= ~PMACZILOG_FLAG_BREAK; + } + +#if defined(CONFIG_MAGIC_SYSRQ) && defined(CONFIG_SERIAL_CORE_CONSOLE) +#ifdef USE_CTRL_O_SYSRQ + /* Handle the SysRq ^O Hack */ + if (ch == '\x0f') { + uap->port.sysrq = jiffies + HZ*5; + goto next_char; + } +#endif /* USE_CTRL_O_SYSRQ */ + if (uap->port.sysrq) { + int swallow; + spin_unlock(&uap->port.lock); + swallow = uart_handle_sysrq_char(&uap->port, ch); + spin_lock(&uap->port.lock); + if (swallow) + goto next_char; + } +#endif /* CONFIG_MAGIC_SYSRQ && CONFIG_SERIAL_CORE_CONSOLE */ + + /* A real serial line, record the character and status. */ + if (drop) + goto next_char; + + flag = TTY_NORMAL; + uap->port.icount.rx++; + + if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR | BRK_ABRT)) { + if (r1 & BRK_ABRT) { + pmz_debug("pmz: got break !\n"); + r1 &= ~(PAR_ERR | CRC_ERR); + uap->port.icount.brk++; + if (uart_handle_break(&uap->port)) + goto next_char; + } + else if (r1 & PAR_ERR) + uap->port.icount.parity++; + else if (r1 & CRC_ERR) + uap->port.icount.frame++; + if (r1 & Rx_OVR) + uap->port.icount.overrun++; + r1 &= uap->port.read_status_mask; + if (r1 & BRK_ABRT) + flag = TTY_BREAK; + else if (r1 & PAR_ERR) + flag = TTY_PARITY; + else if (r1 & CRC_ERR) + flag = TTY_FRAME; + } + + if (uap->port.ignore_status_mask == 0xff || + (r1 & uap->port.ignore_status_mask) == 0) { + tty_insert_flip_char(port, ch, flag); + } + if (r1 & Rx_OVR) + tty_insert_flip_char(port, 0, TTY_OVERRUN); + next_char: + /* We can get stuck in an infinite loop getting char 0 when the + * line is in a wrong HW state, we break that here. + * When that happens, I disable the receive side of the driver. + * Note that what I've been experiencing is a real irq loop where + * I'm getting flooded regardless of the actual port speed. + * Something strange is going on with the HW + */ + if ((++loops) > 1000) + goto flood; + ch = read_zsreg(uap, R0); + if (!(ch & Rx_CH_AV)) + break; + } + + return true; + flood: + pmz_interrupt_control(uap, 0); + pmz_error("pmz: rx irq flood !\n"); + return true; +} + +static void pmz_status_handle(struct uart_pmac_port *uap) +{ + unsigned char status; + + status = read_zsreg(uap, R0); + write_zsreg(uap, R0, RES_EXT_INT); + zssync(uap); + + if (ZS_IS_OPEN(uap) && ZS_WANTS_MODEM_STATUS(uap)) { + if (status & SYNC_HUNT) + uap->port.icount.dsr++; + + /* The Zilog just gives us an interrupt when DCD/CTS/etc. change. + * But it does not tell us which bit has changed, we have to keep + * track of this ourselves. + * The CTS input is inverted for some reason. -- paulus + */ + if ((status ^ uap->prev_status) & DCD) + uart_handle_dcd_change(&uap->port, + (status & DCD)); + if ((status ^ uap->prev_status) & CTS) + uart_handle_cts_change(&uap->port, + !(status & CTS)); + + wake_up_interruptible(&uap->port.state->port.delta_msr_wait); + } + + if (status & BRK_ABRT) + uap->flags |= PMACZILOG_FLAG_BREAK; + + uap->prev_status = status; +} + +static void pmz_transmit_chars(struct uart_pmac_port *uap) +{ + struct circ_buf *xmit; + + if (ZS_IS_CONS(uap)) { + unsigned char status = read_zsreg(uap, R0); + + /* TX still busy? Just wait for the next TX done interrupt. + * + * It can occur because of how we do serial console writes. It would + * be nice to transmit console writes just like we normally would for + * a TTY line. (ie. buffered and TX interrupt driven). That is not + * easy because console writes cannot sleep. One solution might be + * to poll on enough port->xmit space becoming free. -DaveM + */ + if (!(status & Tx_BUF_EMP)) + return; + } + + uap->flags &= ~PMACZILOG_FLAG_TX_ACTIVE; + + if (ZS_REGS_HELD(uap)) { + pmz_load_zsregs(uap, uap->curregs); + uap->flags &= ~PMACZILOG_FLAG_REGS_HELD; + } + + if (ZS_TX_STOPPED(uap)) { + uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED; + goto ack_tx_int; + } + + /* Under some circumstances, we see interrupts reported for + * a closed channel. The interrupt mask in R1 is clear, but + * R3 still signals the interrupts and we see them when taking + * an interrupt for the other channel (this could be a qemu + * bug but since the ESCC doc doesn't specify precsiely whether + * R3 interrup status bits are masked by R1 interrupt enable + * bits, better safe than sorry). --BenH. + */ + if (!ZS_IS_OPEN(uap)) + goto ack_tx_int; + + if (uap->port.x_char) { + uap->flags |= PMACZILOG_FLAG_TX_ACTIVE; + write_zsdata(uap, uap->port.x_char); + zssync(uap); + uap->port.icount.tx++; + uap->port.x_char = 0; + return; + } + + if (uap->port.state == NULL) + goto ack_tx_int; + xmit = &uap->port.state->xmit; + if (uart_circ_empty(xmit)) { + uart_write_wakeup(&uap->port); + goto ack_tx_int; + } + if (uart_tx_stopped(&uap->port)) + goto ack_tx_int; + + uap->flags |= PMACZILOG_FLAG_TX_ACTIVE; + write_zsdata(uap, xmit->buf[xmit->tail]); + zssync(uap); + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + uap->port.icount.tx++; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&uap->port); + + return; + +ack_tx_int: + write_zsreg(uap, R0, RES_Tx_P); + zssync(uap); +} + +/* Hrm... we register that twice, fixme later.... */ +static irqreturn_t pmz_interrupt(int irq, void *dev_id) +{ + struct uart_pmac_port *uap = dev_id; + struct uart_pmac_port *uap_a; + struct uart_pmac_port *uap_b; + int rc = IRQ_NONE; + bool push; + u8 r3; + + uap_a = pmz_get_port_A(uap); + uap_b = uap_a->mate; + + spin_lock(&uap_a->port.lock); + r3 = read_zsreg(uap_a, R3); + + /* Channel A */ + push = false; + if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { + if (!ZS_IS_OPEN(uap_a)) { + pmz_debug("ChanA interrupt while not open !\n"); + goto skip_a; + } + write_zsreg(uap_a, R0, RES_H_IUS); + zssync(uap_a); + if (r3 & CHAEXT) + pmz_status_handle(uap_a); + if (r3 & CHARxIP) + push = pmz_receive_chars(uap_a); + if (r3 & CHATxIP) + pmz_transmit_chars(uap_a); + rc = IRQ_HANDLED; + } + skip_a: + spin_unlock(&uap_a->port.lock); + if (push) + tty_flip_buffer_push(&uap->port.state->port); + + if (!uap_b) + goto out; + + spin_lock(&uap_b->port.lock); + push = false; + if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { + if (!ZS_IS_OPEN(uap_b)) { + pmz_debug("ChanB interrupt while not open !\n"); + goto skip_b; + } + write_zsreg(uap_b, R0, RES_H_IUS); + zssync(uap_b); + if (r3 & CHBEXT) + pmz_status_handle(uap_b); + if (r3 & CHBRxIP) + push = pmz_receive_chars(uap_b); + if (r3 & CHBTxIP) + pmz_transmit_chars(uap_b); + rc = IRQ_HANDLED; + } + skip_b: + spin_unlock(&uap_b->port.lock); + if (push) + tty_flip_buffer_push(&uap->port.state->port); + + out: + return rc; +} + +/* + * Peek the status register, lock not held by caller + */ +static inline u8 pmz_peek_status(struct uart_pmac_port *uap) +{ + unsigned long flags; + u8 status; + + spin_lock_irqsave(&uap->port.lock, flags); + status = read_zsreg(uap, R0); + spin_unlock_irqrestore(&uap->port.lock, flags); + + return status; +} + +/* + * Check if transmitter is empty + * The port lock is not held. + */ +static unsigned int pmz_tx_empty(struct uart_port *port) +{ + unsigned char status; + + status = pmz_peek_status(to_pmz(port)); + if (status & Tx_BUF_EMP) + return TIOCSER_TEMT; + return 0; +} + +/* + * Set Modem Control (RTS & DTR) bits + * The port lock is held and interrupts are disabled. + * Note: Shall we really filter out RTS on external ports or + * should that be dealt at higher level only ? + */ +static void pmz_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_pmac_port *uap = to_pmz(port); + unsigned char set_bits, clear_bits; + + /* Do nothing for irda for now... */ + if (ZS_IS_IRDA(uap)) + return; + /* We get called during boot with a port not up yet */ + if (!(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap))) + return; + + set_bits = clear_bits = 0; + + if (ZS_IS_INTMODEM(uap)) { + if (mctrl & TIOCM_RTS) + set_bits |= RTS; + else + clear_bits |= RTS; + } + if (mctrl & TIOCM_DTR) + set_bits |= DTR; + else + clear_bits |= DTR; + + /* NOTE: Not subject to 'transmitter active' rule. */ + uap->curregs[R5] |= set_bits; + uap->curregs[R5] &= ~clear_bits; + + write_zsreg(uap, R5, uap->curregs[R5]); + pmz_debug("pmz_set_mctrl: set bits: %x, clear bits: %x -> %x\n", + set_bits, clear_bits, uap->curregs[R5]); + zssync(uap); +} + +/* + * Get Modem Control bits (only the input ones, the core will + * or that with a cached value of the control ones) + * The port lock is held and interrupts are disabled. + */ +static unsigned int pmz_get_mctrl(struct uart_port *port) +{ + struct uart_pmac_port *uap = to_pmz(port); + unsigned char status; + unsigned int ret; + + status = read_zsreg(uap, R0); + + ret = 0; + if (status & DCD) + ret |= TIOCM_CAR; + if (status & SYNC_HUNT) + ret |= TIOCM_DSR; + if (!(status & CTS)) + ret |= TIOCM_CTS; + + return ret; +} + +/* + * Stop TX side. Dealt like sunzilog at next Tx interrupt, + * though for DMA, we will have to do a bit more. + * The port lock is held and interrupts are disabled. + */ +static void pmz_stop_tx(struct uart_port *port) +{ + to_pmz(port)->flags |= PMACZILOG_FLAG_TX_STOPPED; +} + +/* + * Kick the Tx side. + * The port lock is held and interrupts are disabled. + */ +static void pmz_start_tx(struct uart_port *port) +{ + struct uart_pmac_port *uap = to_pmz(port); + unsigned char status; + + uap->flags |= PMACZILOG_FLAG_TX_ACTIVE; + uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED; + + status = read_zsreg(uap, R0); + + /* TX busy? Just wait for the TX done interrupt. */ + if (!(status & Tx_BUF_EMP)) + return; + + /* Send the first character to jump-start the TX done + * IRQ sending engine. + */ + if (port->x_char) { + write_zsdata(uap, port->x_char); + zssync(uap); + port->icount.tx++; + port->x_char = 0; + } else { + struct circ_buf *xmit = &port->state->xmit; + + if (uart_circ_empty(xmit)) + return; + write_zsdata(uap, xmit->buf[xmit->tail]); + zssync(uap); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&uap->port); + } +} + +/* + * Stop Rx side, basically disable emitting of + * Rx interrupts on the port. We don't disable the rx + * side of the chip proper though + * The port lock is held. + */ +static void pmz_stop_rx(struct uart_port *port) +{ + struct uart_pmac_port *uap = to_pmz(port); + + /* Disable all RX interrupts. */ + uap->curregs[R1] &= ~RxINT_MASK; + pmz_maybe_update_regs(uap); +} + +/* + * Enable modem status change interrupts + * The port lock is held. + */ +static void pmz_enable_ms(struct uart_port *port) +{ + struct uart_pmac_port *uap = to_pmz(port); + unsigned char new_reg; + + if (ZS_IS_IRDA(uap)) + return; + new_reg = uap->curregs[R15] | (DCDIE | SYNCIE | CTSIE); + if (new_reg != uap->curregs[R15]) { + uap->curregs[R15] = new_reg; + + /* NOTE: Not subject to 'transmitter active' rule. */ + write_zsreg(uap, R15, uap->curregs[R15]); + } +} + +/* + * Control break state emission + * The port lock is not held. + */ +static void pmz_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_pmac_port *uap = to_pmz(port); + unsigned char set_bits, clear_bits, new_reg; + unsigned long flags; + + set_bits = clear_bits = 0; + + if (break_state) + set_bits |= SND_BRK; + else + clear_bits |= SND_BRK; + + spin_lock_irqsave(&port->lock, flags); + + new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits; + if (new_reg != uap->curregs[R5]) { + uap->curregs[R5] = new_reg; + write_zsreg(uap, R5, uap->curregs[R5]); + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +#ifdef CONFIG_PPC_PMAC + +/* + * Turn power on or off to the SCC and associated stuff + * (port drivers, modem, IR port, etc.) + * Returns the number of milliseconds we should wait before + * trying to use the port. + */ +static int pmz_set_scc_power(struct uart_pmac_port *uap, int state) +{ + int delay = 0; + int rc; + + if (state) { + rc = pmac_call_feature( + PMAC_FTR_SCC_ENABLE, uap->node, uap->port_type, 1); + pmz_debug("port power on result: %d\n", rc); + if (ZS_IS_INTMODEM(uap)) { + rc = pmac_call_feature( + PMAC_FTR_MODEM_ENABLE, uap->node, 0, 1); + delay = 2500; /* wait for 2.5s before using */ + pmz_debug("modem power result: %d\n", rc); + } + } else { + /* TODO: Make that depend on a timer, don't power down + * immediately + */ + if (ZS_IS_INTMODEM(uap)) { + rc = pmac_call_feature( + PMAC_FTR_MODEM_ENABLE, uap->node, 0, 0); + pmz_debug("port power off result: %d\n", rc); + } + pmac_call_feature(PMAC_FTR_SCC_ENABLE, uap->node, uap->port_type, 0); + } + return delay; +} + +#else + +static int pmz_set_scc_power(struct uart_pmac_port *uap, int state) +{ + return 0; +} + +#endif /* !CONFIG_PPC_PMAC */ + +/* + * FixZeroBug....Works around a bug in the SCC receiving channel. + * Inspired from Darwin code, 15 Sept. 2000 -DanM + * + * The following sequence prevents a problem that is seen with O'Hare ASICs + * (most versions -- also with some Heathrow and Hydra ASICs) where a zero + * at the input to the receiver becomes 'stuck' and locks up the receiver. + * This problem can occur as a result of a zero bit at the receiver input + * coincident with any of the following events: + * + * The SCC is initialized (hardware or software). + * A framing error is detected. + * The clocking option changes from synchronous or X1 asynchronous + * clocking to X16, X32, or X64 asynchronous clocking. + * The decoding mode is changed among NRZ, NRZI, FM0, or FM1. + * + * This workaround attempts to recover from the lockup condition by placing + * the SCC in synchronous loopback mode with a fast clock before programming + * any of the asynchronous modes. + */ +static void pmz_fix_zero_bug_scc(struct uart_pmac_port *uap) +{ + write_zsreg(uap, 9, ZS_IS_CHANNEL_A(uap) ? CHRA : CHRB); + zssync(uap); + udelay(10); + write_zsreg(uap, 9, (ZS_IS_CHANNEL_A(uap) ? CHRA : CHRB) | NV); + zssync(uap); + + write_zsreg(uap, 4, X1CLK | MONSYNC); + write_zsreg(uap, 3, Rx8); + write_zsreg(uap, 5, Tx8 | RTS); + write_zsreg(uap, 9, NV); /* Didn't we already do this? */ + write_zsreg(uap, 11, RCBR | TCBR); + write_zsreg(uap, 12, 0); + write_zsreg(uap, 13, 0); + write_zsreg(uap, 14, (LOOPBAK | BRSRC)); + write_zsreg(uap, 14, (LOOPBAK | BRSRC | BRENAB)); + write_zsreg(uap, 3, Rx8 | RxENABLE); + write_zsreg(uap, 0, RES_EXT_INT); + write_zsreg(uap, 0, RES_EXT_INT); + write_zsreg(uap, 0, RES_EXT_INT); /* to kill some time */ + + /* The channel should be OK now, but it is probably receiving + * loopback garbage. + * Switch to asynchronous mode, disable the receiver, + * and discard everything in the receive buffer. + */ + write_zsreg(uap, 9, NV); + write_zsreg(uap, 4, X16CLK | SB_MASK); + write_zsreg(uap, 3, Rx8); + + while (read_zsreg(uap, 0) & Rx_CH_AV) { + (void)read_zsreg(uap, 8); + write_zsreg(uap, 0, RES_EXT_INT); + write_zsreg(uap, 0, ERR_RES); + } +} + +/* + * Real startup routine, powers up the hardware and sets up + * the SCC. Returns a delay in ms where you need to wait before + * actually using the port, this is typically the internal modem + * powerup delay. This routine expect the lock to be taken. + */ +static int __pmz_startup(struct uart_pmac_port *uap) +{ + int pwr_delay = 0; + + memset(&uap->curregs, 0, sizeof(uap->curregs)); + + /* Power up the SCC & underlying hardware (modem/irda) */ + pwr_delay = pmz_set_scc_power(uap, 1); + + /* Nice buggy HW ... */ + pmz_fix_zero_bug_scc(uap); + + /* Reset the channel */ + uap->curregs[R9] = 0; + write_zsreg(uap, 9, ZS_IS_CHANNEL_A(uap) ? CHRA : CHRB); + zssync(uap); + udelay(10); + write_zsreg(uap, 9, 0); + zssync(uap); + + /* Clear the interrupt registers */ + write_zsreg(uap, R1, 0); + write_zsreg(uap, R0, ERR_RES); + write_zsreg(uap, R0, ERR_RES); + write_zsreg(uap, R0, RES_H_IUS); + write_zsreg(uap, R0, RES_H_IUS); + + /* Setup some valid baud rate */ + uap->curregs[R4] = X16CLK | SB1; + uap->curregs[R3] = Rx8; + uap->curregs[R5] = Tx8 | RTS; + if (!ZS_IS_IRDA(uap)) + uap->curregs[R5] |= DTR; + uap->curregs[R12] = 0; + uap->curregs[R13] = 0; + uap->curregs[R14] = BRENAB; + + /* Clear handshaking, enable BREAK interrupts */ + uap->curregs[R15] = BRKIE; + + /* Master interrupt enable */ + uap->curregs[R9] |= NV | MIE; + + pmz_load_zsregs(uap, uap->curregs); + + /* Enable receiver and transmitter. */ + write_zsreg(uap, R3, uap->curregs[R3] |= RxENABLE); + write_zsreg(uap, R5, uap->curregs[R5] |= TxENABLE); + + /* Remember status for DCD/CTS changes */ + uap->prev_status = read_zsreg(uap, R0); + + return pwr_delay; +} + +static void pmz_irda_reset(struct uart_pmac_port *uap) +{ + unsigned long flags; + + spin_lock_irqsave(&uap->port.lock, flags); + uap->curregs[R5] |= DTR; + write_zsreg(uap, R5, uap->curregs[R5]); + zssync(uap); + spin_unlock_irqrestore(&uap->port.lock, flags); + msleep(110); + + spin_lock_irqsave(&uap->port.lock, flags); + uap->curregs[R5] &= ~DTR; + write_zsreg(uap, R5, uap->curregs[R5]); + zssync(uap); + spin_unlock_irqrestore(&uap->port.lock, flags); + msleep(10); +} + +/* + * This is the "normal" startup routine, using the above one + * wrapped with the lock and doing a schedule delay + */ +static int pmz_startup(struct uart_port *port) +{ + struct uart_pmac_port *uap = to_pmz(port); + unsigned long flags; + int pwr_delay = 0; + + uap->flags |= PMACZILOG_FLAG_IS_OPEN; + + /* A console is never powered down. Else, power up and + * initialize the chip + */ + if (!ZS_IS_CONS(uap)) { + spin_lock_irqsave(&port->lock, flags); + pwr_delay = __pmz_startup(uap); + spin_unlock_irqrestore(&port->lock, flags); + } + sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line); + if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED, + uap->irq_name, uap)) { + pmz_error("Unable to register zs interrupt handler.\n"); + pmz_set_scc_power(uap, 0); + return -ENXIO; + } + + /* Right now, we deal with delay by blocking here, I'll be + * smarter later on + */ + if (pwr_delay != 0) { + pmz_debug("pmz: delaying %d ms\n", pwr_delay); + msleep(pwr_delay); + } + + /* IrDA reset is done now */ + if (ZS_IS_IRDA(uap)) + pmz_irda_reset(uap); + + /* Enable interrupt requests for the channel */ + spin_lock_irqsave(&port->lock, flags); + pmz_interrupt_control(uap, 1); + spin_unlock_irqrestore(&port->lock, flags); + + return 0; +} + +static void pmz_shutdown(struct uart_port *port) +{ + struct uart_pmac_port *uap = to_pmz(port); + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* Disable interrupt requests for the channel */ + pmz_interrupt_control(uap, 0); + + if (!ZS_IS_CONS(uap)) { + /* Disable receiver and transmitter */ + uap->curregs[R3] &= ~RxENABLE; + uap->curregs[R5] &= ~TxENABLE; + + /* Disable break assertion */ + uap->curregs[R5] &= ~SND_BRK; + pmz_maybe_update_regs(uap); + } + + spin_unlock_irqrestore(&port->lock, flags); + + /* Release interrupt handler */ + free_irq(uap->port.irq, uap); + + spin_lock_irqsave(&port->lock, flags); + + uap->flags &= ~PMACZILOG_FLAG_IS_OPEN; + + if (!ZS_IS_CONS(uap)) + pmz_set_scc_power(uap, 0); /* Shut the chip down */ + + spin_unlock_irqrestore(&port->lock, flags); +} + +/* Shared by TTY driver and serial console setup. The port lock is held + * and local interrupts are disabled. + */ +static void pmz_convert_to_zs(struct uart_pmac_port *uap, unsigned int cflag, + unsigned int iflag, unsigned long baud) +{ + int brg; + + /* Switch to external clocking for IrDA high clock rates. That + * code could be re-used for Midi interfaces with different + * multipliers + */ + if (baud >= 115200 && ZS_IS_IRDA(uap)) { + uap->curregs[R4] = X1CLK; + uap->curregs[R11] = RCTRxCP | TCTRxCP; + uap->curregs[R14] = 0; /* BRG off */ + uap->curregs[R12] = 0; + uap->curregs[R13] = 0; + uap->flags |= PMACZILOG_FLAG_IS_EXTCLK; + } else { + switch (baud) { + case ZS_CLOCK/16: /* 230400 */ + uap->curregs[R4] = X16CLK; + uap->curregs[R11] = 0; + uap->curregs[R14] = 0; + break; + case ZS_CLOCK/32: /* 115200 */ + uap->curregs[R4] = X32CLK; + uap->curregs[R11] = 0; + uap->curregs[R14] = 0; + break; + default: + uap->curregs[R4] = X16CLK; + uap->curregs[R11] = TCBR | RCBR; + brg = BPS_TO_BRG(baud, ZS_CLOCK / 16); + uap->curregs[R12] = (brg & 255); + uap->curregs[R13] = ((brg >> 8) & 255); + uap->curregs[R14] = BRENAB; + } + uap->flags &= ~PMACZILOG_FLAG_IS_EXTCLK; + } + + /* Character size, stop bits, and parity. */ + uap->curregs[3] &= ~RxN_MASK; + uap->curregs[5] &= ~TxN_MASK; + + switch (cflag & CSIZE) { + case CS5: + uap->curregs[3] |= Rx5; + uap->curregs[5] |= Tx5; + uap->parity_mask = 0x1f; + break; + case CS6: + uap->curregs[3] |= Rx6; + uap->curregs[5] |= Tx6; + uap->parity_mask = 0x3f; + break; + case CS7: + uap->curregs[3] |= Rx7; + uap->curregs[5] |= Tx7; + uap->parity_mask = 0x7f; + break; + case CS8: + default: + uap->curregs[3] |= Rx8; + uap->curregs[5] |= Tx8; + uap->parity_mask = 0xff; + break; + } + uap->curregs[4] &= ~(SB_MASK); + if (cflag & CSTOPB) + uap->curregs[4] |= SB2; + else + uap->curregs[4] |= SB1; + if (cflag & PARENB) + uap->curregs[4] |= PAR_ENAB; + else + uap->curregs[4] &= ~PAR_ENAB; + if (!(cflag & PARODD)) + uap->curregs[4] |= PAR_EVEN; + else + uap->curregs[4] &= ~PAR_EVEN; + + uap->port.read_status_mask = Rx_OVR; + if (iflag & INPCK) + uap->port.read_status_mask |= CRC_ERR | PAR_ERR; + if (iflag & (IGNBRK | BRKINT | PARMRK)) + uap->port.read_status_mask |= BRK_ABRT; + + uap->port.ignore_status_mask = 0; + if (iflag & IGNPAR) + uap->port.ignore_status_mask |= CRC_ERR | PAR_ERR; + if (iflag & IGNBRK) { + uap->port.ignore_status_mask |= BRK_ABRT; + if (iflag & IGNPAR) + uap->port.ignore_status_mask |= Rx_OVR; + } + + if ((cflag & CREAD) == 0) + uap->port.ignore_status_mask = 0xff; +} + + +/* + * Set the irda codec on the imac to the specified baud rate. + */ +static void pmz_irda_setup(struct uart_pmac_port *uap, unsigned long *baud) +{ + u8 cmdbyte; + int t, version; + + switch (*baud) { + /* SIR modes */ + case 2400: + cmdbyte = 0x53; + break; + case 4800: + cmdbyte = 0x52; + break; + case 9600: + cmdbyte = 0x51; + break; + case 19200: + cmdbyte = 0x50; + break; + case 38400: + cmdbyte = 0x4f; + break; + case 57600: + cmdbyte = 0x4e; + break; + case 115200: + cmdbyte = 0x4d; + break; + /* The FIR modes aren't really supported at this point, how + * do we select the speed ? via the FCR on KeyLargo ? + */ + case 1152000: + cmdbyte = 0; + break; + case 4000000: + cmdbyte = 0; + break; + default: /* 9600 */ + cmdbyte = 0x51; + *baud = 9600; + break; + } + + /* Wait for transmitter to drain */ + t = 10000; + while ((read_zsreg(uap, R0) & Tx_BUF_EMP) == 0 + || (read_zsreg(uap, R1) & ALL_SNT) == 0) { + if (--t <= 0) { + pmz_error("transmitter didn't drain\n"); + return; + } + udelay(10); + } + + /* Drain the receiver too */ + t = 100; + (void)read_zsdata(uap); + (void)read_zsdata(uap); + (void)read_zsdata(uap); + mdelay(10); + while (read_zsreg(uap, R0) & Rx_CH_AV) { + read_zsdata(uap); + mdelay(10); + if (--t <= 0) { + pmz_error("receiver didn't drain\n"); + return; + } + } + + /* Switch to command mode */ + uap->curregs[R5] |= DTR; + write_zsreg(uap, R5, uap->curregs[R5]); + zssync(uap); + mdelay(1); + + /* Switch SCC to 19200 */ + pmz_convert_to_zs(uap, CS8, 0, 19200); + pmz_load_zsregs(uap, uap->curregs); + mdelay(1); + + /* Write get_version command byte */ + write_zsdata(uap, 1); + t = 5000; + while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0) { + if (--t <= 0) { + pmz_error("irda_setup timed out on get_version byte\n"); + goto out; + } + udelay(10); + } + version = read_zsdata(uap); + + if (version < 4) { + pmz_info("IrDA: dongle version %d not supported\n", version); + goto out; + } + + /* Send speed mode */ + write_zsdata(uap, cmdbyte); + t = 5000; + while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0) { + if (--t <= 0) { + pmz_error("irda_setup timed out on speed mode byte\n"); + goto out; + } + udelay(10); + } + t = read_zsdata(uap); + if (t != cmdbyte) + pmz_error("irda_setup speed mode byte = %x (%x)\n", t, cmdbyte); + + pmz_info("IrDA setup for %ld bps, dongle version: %d\n", + *baud, version); + + (void)read_zsdata(uap); + (void)read_zsdata(uap); + (void)read_zsdata(uap); + + out: + /* Switch back to data mode */ + uap->curregs[R5] &= ~DTR; + write_zsreg(uap, R5, uap->curregs[R5]); + zssync(uap); + + (void)read_zsdata(uap); + (void)read_zsdata(uap); + (void)read_zsdata(uap); +} + + +static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_pmac_port *uap = to_pmz(port); + unsigned long baud; + + /* XXX Check which revs of machines actually allow 1 and 4Mb speeds + * on the IR dongle. Note that the IRTTY driver currently doesn't know + * about the FIR mode and high speed modes. So these are unused. For + * implementing proper support for these, we should probably add some + * DMA as well, at least on the Rx side, which isn't a simple thing + * at this point. + */ + if (ZS_IS_IRDA(uap)) { + /* Calc baud rate */ + baud = uart_get_baud_rate(port, termios, old, 1200, 4000000); + pmz_debug("pmz: switch IRDA to %ld bauds\n", baud); + /* Cet the irda codec to the right rate */ + pmz_irda_setup(uap, &baud); + /* Set final baud rate */ + pmz_convert_to_zs(uap, termios->c_cflag, termios->c_iflag, baud); + pmz_load_zsregs(uap, uap->curregs); + zssync(uap); + } else { + baud = uart_get_baud_rate(port, termios, old, 1200, 230400); + pmz_convert_to_zs(uap, termios->c_cflag, termios->c_iflag, baud); + /* Make sure modem status interrupts are correctly configured */ + if (UART_ENABLE_MS(&uap->port, termios->c_cflag)) { + uap->curregs[R15] |= DCDIE | SYNCIE | CTSIE; + uap->flags |= PMACZILOG_FLAG_MODEM_STATUS; + } else { + uap->curregs[R15] &= ~(DCDIE | SYNCIE | CTSIE); + uap->flags &= ~PMACZILOG_FLAG_MODEM_STATUS; + } + + /* Load registers to the chip */ + pmz_maybe_update_regs(uap); + } + uart_update_timeout(port, termios->c_cflag, baud); +} + +/* The port lock is not held. */ +static void pmz_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_pmac_port *uap = to_pmz(port); + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* Disable IRQs on the port */ + pmz_interrupt_control(uap, 0); + + /* Setup new port configuration */ + __pmz_set_termios(port, termios, old); + + /* Re-enable IRQs on the port */ + if (ZS_IS_OPEN(uap)) + pmz_interrupt_control(uap, 1); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *pmz_type(struct uart_port *port) +{ + struct uart_pmac_port *uap = to_pmz(port); + + if (ZS_IS_IRDA(uap)) + return "Z85c30 ESCC - Infrared port"; + else if (ZS_IS_INTMODEM(uap)) + return "Z85c30 ESCC - Internal modem"; + return "Z85c30 ESCC - Serial port"; +} + +/* We do not request/release mappings of the registers here, this + * happens at early serial probe time. + */ +static void pmz_release_port(struct uart_port *port) +{ +} + +static int pmz_request_port(struct uart_port *port) +{ + return 0; +} + +/* These do not need to do anything interesting either. */ +static void pmz_config_port(struct uart_port *port, int flags) +{ +} + +/* We do not support letting the user mess with the divisor, IRQ, etc. */ +static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + return -EINVAL; +} + +#ifdef CONFIG_CONSOLE_POLL + +static int pmz_poll_get_char(struct uart_port *port) +{ + struct uart_pmac_port *uap = + container_of(port, struct uart_pmac_port, port); + int tries = 2; + + while (tries) { + if ((read_zsreg(uap, R0) & Rx_CH_AV) != 0) + return read_zsdata(uap); + if (tries--) + udelay(5); + } + + return NO_POLL_CHAR; +} + +static void pmz_poll_put_char(struct uart_port *port, unsigned char c) +{ + struct uart_pmac_port *uap = + container_of(port, struct uart_pmac_port, port); + + /* Wait for the transmit buffer to empty. */ + while ((read_zsreg(uap, R0) & Tx_BUF_EMP) == 0) + udelay(5); + write_zsdata(uap, c); +} + +#endif /* CONFIG_CONSOLE_POLL */ + +static const struct uart_ops pmz_pops = { + .tx_empty = pmz_tx_empty, + .set_mctrl = pmz_set_mctrl, + .get_mctrl = pmz_get_mctrl, + .stop_tx = pmz_stop_tx, + .start_tx = pmz_start_tx, + .stop_rx = pmz_stop_rx, + .enable_ms = pmz_enable_ms, + .break_ctl = pmz_break_ctl, + .startup = pmz_startup, + .shutdown = pmz_shutdown, + .set_termios = pmz_set_termios, + .type = pmz_type, + .release_port = pmz_release_port, + .request_port = pmz_request_port, + .config_port = pmz_config_port, + .verify_port = pmz_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = pmz_poll_get_char, + .poll_put_char = pmz_poll_put_char, +#endif +}; + +#ifdef CONFIG_PPC_PMAC + +/* + * Setup one port structure after probing, HW is down at this point, + * Unlike sunzilog, we don't need to pre-init the spinlock as we don't + * register our console before uart_add_one_port() is called + */ +static int __init pmz_init_port(struct uart_pmac_port *uap) +{ + struct device_node *np = uap->node; + const char *conn; + const struct slot_names_prop { + int count; + char name[1]; + } *slots; + int len; + struct resource r_ports; + + /* + * Request & map chip registers + */ + if (of_address_to_resource(np, 0, &r_ports)) + return -ENODEV; + uap->port.mapbase = r_ports.start; + uap->port.membase = ioremap(uap->port.mapbase, 0x1000); + + uap->control_reg = uap->port.membase; + uap->data_reg = uap->control_reg + 0x10; + + /* + * Detect port type + */ + if (of_device_is_compatible(np, "cobalt")) + uap->flags |= PMACZILOG_FLAG_IS_INTMODEM; + conn = of_get_property(np, "AAPL,connector", &len); + if (conn && (strcmp(conn, "infrared") == 0)) + uap->flags |= PMACZILOG_FLAG_IS_IRDA; + uap->port_type = PMAC_SCC_ASYNC; + /* 1999 Powerbook G3 has slot-names property instead */ + slots = of_get_property(np, "slot-names", &len); + if (slots && slots->count > 0) { + if (strcmp(slots->name, "IrDA") == 0) + uap->flags |= PMACZILOG_FLAG_IS_IRDA; + else if (strcmp(slots->name, "Modem") == 0) + uap->flags |= PMACZILOG_FLAG_IS_INTMODEM; + } + if (ZS_IS_IRDA(uap)) + uap->port_type = PMAC_SCC_IRDA; + if (ZS_IS_INTMODEM(uap)) { + struct device_node* i2c_modem = + of_find_node_by_name(NULL, "i2c-modem"); + if (i2c_modem) { + const char* mid = + of_get_property(i2c_modem, "modem-id", NULL); + if (mid) switch(*mid) { + case 0x04 : + case 0x05 : + case 0x07 : + case 0x08 : + case 0x0b : + case 0x0c : + uap->port_type = PMAC_SCC_I2S1; + } + printk(KERN_INFO "pmac_zilog: i2c-modem detected, id: %d\n", + mid ? (*mid) : 0); + of_node_put(i2c_modem); + } else { + printk(KERN_INFO "pmac_zilog: serial modem detected\n"); + } + } + + /* + * Init remaining bits of "port" structure + */ + uap->port.iotype = UPIO_MEM; + uap->port.irq = irq_of_parse_and_map(np, 0); + uap->port.uartclk = ZS_CLOCK; + uap->port.fifosize = 1; + uap->port.ops = &pmz_pops; + uap->port.type = PORT_PMAC_ZILOG; + uap->port.flags = 0; + + /* + * Fixup for the port on Gatwick for which the device-tree has + * missing interrupts. Normally, the macio_dev would contain + * fixed up interrupt info, but we use the device-tree directly + * here due to early probing so we need the fixup too. + */ + if (uap->port.irq == 0 && + np->parent && np->parent->parent && + of_device_is_compatible(np->parent->parent, "gatwick")) { + /* IRQs on gatwick are offset by 64 */ + uap->port.irq = irq_create_mapping(NULL, 64 + 15); + } + + /* Setup some valid baud rate information in the register + * shadows so we don't write crap there before baud rate is + * first initialized. + */ + pmz_convert_to_zs(uap, CS8, 0, 9600); + + return 0; +} + +/* + * Get rid of a port on module removal + */ +static void pmz_dispose_port(struct uart_pmac_port *uap) +{ + struct device_node *np; + + np = uap->node; + iounmap(uap->control_reg); + uap->node = NULL; + of_node_put(np); + memset(uap, 0, sizeof(struct uart_pmac_port)); +} + +/* + * Called upon match with an escc node in the device-tree. + */ +static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match) +{ + struct uart_pmac_port *uap; + int i; + + /* Iterate the pmz_ports array to find a matching entry + */ + for (i = 0; i < MAX_ZS_PORTS; i++) + if (pmz_ports[i].node == mdev->ofdev.dev.of_node) + break; + if (i >= MAX_ZS_PORTS) + return -ENODEV; + + + uap = &pmz_ports[i]; + uap->dev = mdev; + uap->port.dev = &mdev->ofdev.dev; + dev_set_drvdata(&mdev->ofdev.dev, uap); + + /* We still activate the port even when failing to request resources + * to work around bugs in ancient Apple device-trees + */ + if (macio_request_resources(uap->dev, "pmac_zilog")) + printk(KERN_WARNING "%pOFn: Failed to request resource" + ", port still active\n", + uap->node); + else + uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED; + + return uart_add_one_port(&pmz_uart_reg, &uap->port); +} + +/* + * That one should not be called, macio isn't really a hotswap device, + * we don't expect one of those serial ports to go away... + */ +static int pmz_detach(struct macio_dev *mdev) +{ + struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev); + + if (!uap) + return -ENODEV; + + uart_remove_one_port(&pmz_uart_reg, &uap->port); + + if (uap->flags & PMACZILOG_FLAG_RSRC_REQUESTED) { + macio_release_resources(uap->dev); + uap->flags &= ~PMACZILOG_FLAG_RSRC_REQUESTED; + } + dev_set_drvdata(&mdev->ofdev.dev, NULL); + uap->dev = NULL; + uap->port.dev = NULL; + + return 0; +} + + +static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state) +{ + struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev); + + if (uap == NULL) { + printk("HRM... pmz_suspend with NULL uap\n"); + return 0; + } + + uart_suspend_port(&pmz_uart_reg, &uap->port); + + return 0; +} + + +static int pmz_resume(struct macio_dev *mdev) +{ + struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev); + + if (uap == NULL) + return 0; + + uart_resume_port(&pmz_uart_reg, &uap->port); + + return 0; +} + +/* + * Probe all ports in the system and build the ports array, we register + * with the serial layer later, so we get a proper struct device which + * allows the tty to attach properly. This is later than it used to be + * but the tty layer really wants it that way. + */ +static int __init pmz_probe(void) +{ + struct device_node *node_p, *node_a, *node_b, *np; + int count = 0; + int rc; + + /* + * Find all escc chips in the system + */ + for_each_node_by_name(node_p, "escc") { + /* + * First get channel A/B node pointers + * + * TODO: Add routines with proper locking to do that... + */ + node_a = node_b = NULL; + for_each_child_of_node(node_p, np) { + if (of_node_name_prefix(np, "ch-a")) + node_a = of_node_get(np); + else if (of_node_name_prefix(np, "ch-b")) + node_b = of_node_get(np); + } + if (!node_a && !node_b) { + of_node_put(node_a); + of_node_put(node_b); + printk(KERN_ERR "pmac_zilog: missing node %c for escc %pOF\n", + (!node_a) ? 'a' : 'b', node_p); + continue; + } + + /* + * Fill basic fields in the port structures + */ + if (node_b != NULL) { + pmz_ports[count].mate = &pmz_ports[count+1]; + pmz_ports[count+1].mate = &pmz_ports[count]; + } + pmz_ports[count].flags = PMACZILOG_FLAG_IS_CHANNEL_A; + pmz_ports[count].node = node_a; + pmz_ports[count+1].node = node_b; + pmz_ports[count].port.line = count; + pmz_ports[count+1].port.line = count+1; + + /* + * Setup the ports for real + */ + rc = pmz_init_port(&pmz_ports[count]); + if (rc == 0 && node_b != NULL) + rc = pmz_init_port(&pmz_ports[count+1]); + if (rc != 0) { + of_node_put(node_a); + of_node_put(node_b); + memset(&pmz_ports[count], 0, sizeof(struct uart_pmac_port)); + memset(&pmz_ports[count+1], 0, sizeof(struct uart_pmac_port)); + continue; + } + count += 2; + } + pmz_ports_count = count; + + return 0; +} + +#else + +/* On PCI PowerMacs, pmz_probe() does an explicit search of the OpenFirmware + * tree to obtain the device_nodes needed to start the console before the + * macio driver. On Macs without OpenFirmware, global platform_devices take + * the place of those device_nodes. + */ +extern struct platform_device scc_a_pdev, scc_b_pdev; + +static int __init pmz_init_port(struct uart_pmac_port *uap) +{ + struct resource *r_ports; + int irq; + + r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0); + if (!r_ports) + return -ENODEV; + + irq = platform_get_irq(uap->pdev, 0); + if (irq < 0) + return irq; + + uap->port.mapbase = r_ports->start; + uap->port.membase = (unsigned char __iomem *) r_ports->start; + uap->port.iotype = UPIO_MEM; + uap->port.irq = irq; + uap->port.uartclk = ZS_CLOCK; + uap->port.fifosize = 1; + uap->port.ops = &pmz_pops; + uap->port.type = PORT_PMAC_ZILOG; + uap->port.flags = 0; + + uap->control_reg = uap->port.membase; + uap->data_reg = uap->control_reg + 4; + uap->port_type = 0; + uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PMACZILOG_CONSOLE); + + pmz_convert_to_zs(uap, CS8, 0, 9600); + + return 0; +} + +static int __init pmz_probe(void) +{ + int err; + + pmz_ports_count = 0; + + pmz_ports[0].port.line = 0; + pmz_ports[0].flags = PMACZILOG_FLAG_IS_CHANNEL_A; + pmz_ports[0].pdev = &scc_a_pdev; + err = pmz_init_port(&pmz_ports[0]); + if (err) + return err; + pmz_ports_count++; + + pmz_ports[0].mate = &pmz_ports[1]; + pmz_ports[1].mate = &pmz_ports[0]; + pmz_ports[1].port.line = 1; + pmz_ports[1].flags = 0; + pmz_ports[1].pdev = &scc_b_pdev; + err = pmz_init_port(&pmz_ports[1]); + if (err) + return err; + pmz_ports_count++; + + return 0; +} + +static void pmz_dispose_port(struct uart_pmac_port *uap) +{ + memset(uap, 0, sizeof(struct uart_pmac_port)); +} + +static int __init pmz_attach(struct platform_device *pdev) +{ + struct uart_pmac_port *uap; + int i; + + /* Iterate the pmz_ports array to find a matching entry */ + for (i = 0; i < pmz_ports_count; i++) + if (pmz_ports[i].pdev == pdev) + break; + if (i >= pmz_ports_count) + return -ENODEV; + + uap = &pmz_ports[i]; + uap->port.dev = &pdev->dev; + platform_set_drvdata(pdev, uap); + + return uart_add_one_port(&pmz_uart_reg, &uap->port); +} + +static int __exit pmz_detach(struct platform_device *pdev) +{ + struct uart_pmac_port *uap = platform_get_drvdata(pdev); + + if (!uap) + return -ENODEV; + + uart_remove_one_port(&pmz_uart_reg, &uap->port); + + uap->port.dev = NULL; + + return 0; +} + +#endif /* !CONFIG_PPC_PMAC */ + +#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE + +static void pmz_console_write(struct console *con, const char *s, unsigned int count); +static int __init pmz_console_setup(struct console *co, char *options); + +static struct console pmz_console = { + .name = PMACZILOG_NAME, + .write = pmz_console_write, + .device = uart_console_device, + .setup = pmz_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &pmz_uart_reg, +}; + +#define PMACZILOG_CONSOLE &pmz_console +#else /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ +#define PMACZILOG_CONSOLE (NULL) +#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ + +/* + * Register the driver, console driver and ports with the serial + * core + */ +static int __init pmz_register(void) +{ + pmz_uart_reg.nr = pmz_ports_count; + pmz_uart_reg.cons = PMACZILOG_CONSOLE; + + /* + * Register this driver with the serial core + */ + return uart_register_driver(&pmz_uart_reg); +} + +#ifdef CONFIG_PPC_PMAC + +static const struct of_device_id pmz_match[] = +{ + { + .name = "ch-a", + }, + { + .name = "ch-b", + }, + {}, +}; +MODULE_DEVICE_TABLE (of, pmz_match); + +static struct macio_driver pmz_driver = { + .driver = { + .name = "pmac_zilog", + .owner = THIS_MODULE, + .of_match_table = pmz_match, + }, + .probe = pmz_attach, + .remove = pmz_detach, + .suspend = pmz_suspend, + .resume = pmz_resume, +}; + +#else + +static struct platform_driver pmz_driver = { + .remove = __exit_p(pmz_detach), + .driver = { + .name = "scc", + }, +}; + +#endif /* !CONFIG_PPC_PMAC */ + +static int __init init_pmz(void) +{ + int rc, i; + + /* + * First, we need to do a direct OF-based probe pass. We + * do that because we want serial console up before the + * macio stuffs calls us back, and since that makes it + * easier to pass the proper number of channels to + * uart_register_driver() + */ + if (pmz_ports_count == 0) + pmz_probe(); + + /* + * Bail early if no port found + */ + if (pmz_ports_count == 0) + return -ENODEV; + + /* + * Now we register with the serial layer + */ + rc = pmz_register(); + if (rc) { + printk(KERN_ERR + "pmac_zilog: Error registering serial device, disabling pmac_zilog.\n" + "pmac_zilog: Did another serial driver already claim the minors?\n"); + /* effectively "pmz_unprobe()" */ + for (i=0; i < pmz_ports_count; i++) + pmz_dispose_port(&pmz_ports[i]); + return rc; + } + + /* + * Then we register the macio driver itself + */ +#ifdef CONFIG_PPC_PMAC + return macio_register_driver(&pmz_driver); +#else + return platform_driver_probe(&pmz_driver, pmz_attach); +#endif +} + +static void __exit exit_pmz(void) +{ + int i; + +#ifdef CONFIG_PPC_PMAC + /* Get rid of macio-driver (detach from macio) */ + macio_unregister_driver(&pmz_driver); +#else + platform_driver_unregister(&pmz_driver); +#endif + + for (i = 0; i < pmz_ports_count; i++) { + struct uart_pmac_port *uport = &pmz_ports[i]; +#ifdef CONFIG_PPC_PMAC + if (uport->node != NULL) + pmz_dispose_port(uport); +#else + if (uport->pdev != NULL) + pmz_dispose_port(uport); +#endif + } + /* Unregister UART driver */ + uart_unregister_driver(&pmz_uart_reg); +} + +#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE + +static void pmz_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct uart_pmac_port *uap = + container_of(port, struct uart_pmac_port, port); + + /* Wait for the transmit buffer to empty. */ + while ((read_zsreg(uap, R0) & Tx_BUF_EMP) == 0) + udelay(5); + write_zsdata(uap, ch); +} + +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + */ +static void pmz_console_write(struct console *con, const char *s, unsigned int count) +{ + struct uart_pmac_port *uap = &pmz_ports[con->index]; + unsigned long flags; + + spin_lock_irqsave(&uap->port.lock, flags); + + /* Turn of interrupts and enable the transmitter. */ + write_zsreg(uap, R1, uap->curregs[1] & ~TxINT_ENAB); + write_zsreg(uap, R5, uap->curregs[5] | TxENABLE | RTS | DTR); + + uart_console_write(&uap->port, s, count, pmz_console_putchar); + + /* Restore the values in the registers. */ + write_zsreg(uap, R1, uap->curregs[1]); + /* Don't disable the transmitter. */ + + spin_unlock_irqrestore(&uap->port.lock, flags); +} + +/* + * Setup the serial console + */ +static int __init pmz_console_setup(struct console *co, char *options) +{ + struct uart_pmac_port *uap; + struct uart_port *port; + int baud = 38400; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + unsigned long pwr_delay; + + /* + * XServe's default to 57600 bps + */ + if (of_machine_is_compatible("RackMac1,1") + || of_machine_is_compatible("RackMac1,2") + || of_machine_is_compatible("MacRISC4")) + baud = 57600; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index >= pmz_ports_count) + co->index = 0; + uap = &pmz_ports[co->index]; +#ifdef CONFIG_PPC_PMAC + if (uap->node == NULL) + return -ENODEV; +#else + if (uap->pdev == NULL) + return -ENODEV; +#endif + port = &uap->port; + + /* + * Mark port as beeing a console + */ + uap->flags |= PMACZILOG_FLAG_IS_CONS; + + /* + * Temporary fix for uart layer who didn't setup the spinlock yet + */ + spin_lock_init(&port->lock); + + /* + * Enable the hardware + */ + pwr_delay = __pmz_startup(uap); + if (pwr_delay) + mdelay(pwr_delay); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static int __init pmz_console_init(void) +{ + /* Probe ports */ + pmz_probe(); + + if (pmz_ports_count == 0) + return -ENODEV; + + /* TODO: Autoprobe console based on OF */ + /* pmz_console.index = i; */ + register_console(&pmz_console); + + return 0; + +} +console_initcall(pmz_console_init); +#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ + +module_init(init_pmz); +module_exit(exit_pmz); diff --git a/drivers/tty/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h new file mode 100644 index 000000000..837b97ca0 --- /dev/null +++ b/drivers/tty/serial/pmac_zilog.h @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PMAC_ZILOG_H__ +#define __PMAC_ZILOG_H__ + +/* + * At most 2 ESCCs with 2 ports each + */ +#define MAX_ZS_PORTS 4 + +/* + * We wrap our port structure around the generic uart_port. + */ +#define NUM_ZSREGS 17 + +struct uart_pmac_port { + struct uart_port port; + struct uart_pmac_port *mate; + +#ifdef CONFIG_PPC_PMAC + /* macio_dev for the escc holding this port (maybe be null on + * early inited port) + */ + struct macio_dev *dev; + /* device node to this port, this points to one of 2 childs + * of "escc" node (ie. ch-a or ch-b) + */ + struct device_node *node; +#else + struct platform_device *pdev; +#endif + + /* Port type as obtained from device tree (IRDA, modem, ...) */ + int port_type; + u8 curregs[NUM_ZSREGS]; + + unsigned int flags; +#define PMACZILOG_FLAG_IS_CONS 0x00000001 +#define PMACZILOG_FLAG_IS_KGDB 0x00000002 +#define PMACZILOG_FLAG_MODEM_STATUS 0x00000004 +#define PMACZILOG_FLAG_IS_CHANNEL_A 0x00000008 +#define PMACZILOG_FLAG_REGS_HELD 0x00000010 +#define PMACZILOG_FLAG_TX_STOPPED 0x00000020 +#define PMACZILOG_FLAG_TX_ACTIVE 0x00000040 +#define PMACZILOG_FLAG_IS_IRDA 0x00000100 +#define PMACZILOG_FLAG_IS_INTMODEM 0x00000200 +#define PMACZILOG_FLAG_RSRC_REQUESTED 0x00000800 +#define PMACZILOG_FLAG_IS_OPEN 0x00002000 +#define PMACZILOG_FLAG_IS_EXTCLK 0x00008000 +#define PMACZILOG_FLAG_BREAK 0x00010000 + + unsigned char parity_mask; + unsigned char prev_status; + + volatile u8 __iomem *control_reg; + volatile u8 __iomem *data_reg; + + unsigned char irq_name[8]; +}; + +#define to_pmz(p) ((struct uart_pmac_port *)(p)) + +static inline struct uart_pmac_port *pmz_get_port_A(struct uart_pmac_port *uap) +{ + if (uap->flags & PMACZILOG_FLAG_IS_CHANNEL_A) + return uap; + return uap->mate; +} + +/* + * Register accessors. Note that we don't need to enforce a recovery + * delay on PCI PowerMac hardware, it's dealt in HW by the MacIO chip, + * though if we try to use this driver on older machines, we might have + * to add it back + */ +static inline u8 read_zsreg(struct uart_pmac_port *port, u8 reg) +{ + if (reg != 0) + writeb(reg, port->control_reg); + return readb(port->control_reg); +} + +static inline void write_zsreg(struct uart_pmac_port *port, u8 reg, u8 value) +{ + if (reg != 0) + writeb(reg, port->control_reg); + writeb(value, port->control_reg); +} + +static inline u8 read_zsdata(struct uart_pmac_port *port) +{ + return readb(port->data_reg); +} + +static inline void write_zsdata(struct uart_pmac_port *port, u8 data) +{ + writeb(data, port->data_reg); +} + +static inline void zssync(struct uart_pmac_port *port) +{ + (void)readb(port->control_reg); +} + +/* Conversion routines to/from brg time constants from/to bits + * per second. + */ +#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2)) +#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2) + +#define ZS_CLOCK 3686400 /* Z8530 RTxC input clock rate */ + +/* The Zilog register set */ + +#define FLAG 0x7e + +/* Write Register 0 */ +#define R0 0 /* Register selects */ +#define R1 1 +#define R2 2 +#define R3 3 +#define R4 4 +#define R5 5 +#define R6 6 +#define R7 7 +#define R8 8 +#define R9 9 +#define R10 10 +#define R11 11 +#define R12 12 +#define R13 13 +#define R14 14 +#define R15 15 +#define R7P 16 + +#define NULLCODE 0 /* Null Code */ +#define POINT_HIGH 0x8 /* Select upper half of registers */ +#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */ +#define SEND_ABORT 0x18 /* HDLC Abort */ +#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */ +#define RES_Tx_P 0x28 /* Reset TxINT Pending */ +#define ERR_RES 0x30 /* Error Reset */ +#define RES_H_IUS 0x38 /* Reset highest IUS */ + +#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */ +#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */ +#define RES_EOM_L 0xC0 /* Reset EOM latch */ + +/* Write Register 1 */ + +#define EXT_INT_ENAB 0x1 /* Ext Int Enable */ +#define TxINT_ENAB 0x2 /* Tx Int Enable */ +#define PAR_SPEC 0x4 /* Parity is special condition */ + +#define RxINT_DISAB 0 /* Rx Int Disable */ +#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */ +#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */ +#define INT_ERR_Rx 0x18 /* Int on error only */ +#define RxINT_MASK 0x18 + +#define WT_RDY_RT 0x20 /* W/Req reflects recv if 1, xmit if 0 */ +#define WT_FN_RDYFN 0x40 /* W/Req pin is DMA request if 1, wait if 0 */ +#define WT_RDY_ENAB 0x80 /* Enable W/Req pin */ + +/* Write Register #2 (Interrupt Vector) */ + +/* Write Register 3 */ + +#define RxENABLE 0x1 /* Rx Enable */ +#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */ +#define ADD_SM 0x4 /* Address Search Mode (SDLC) */ +#define RxCRC_ENAB 0x8 /* Rx CRC Enable */ +#define ENT_HM 0x10 /* Enter Hunt Mode */ +#define AUTO_ENAB 0x20 /* Auto Enables */ +#define Rx5 0x0 /* Rx 5 Bits/Character */ +#define Rx7 0x40 /* Rx 7 Bits/Character */ +#define Rx6 0x80 /* Rx 6 Bits/Character */ +#define Rx8 0xc0 /* Rx 8 Bits/Character */ +#define RxN_MASK 0xc0 + +/* Write Register 4 */ + +#define PAR_ENAB 0x1 /* Parity Enable */ +#define PAR_EVEN 0x2 /* Parity Even/Odd* */ + +#define SYNC_ENAB 0 /* Sync Modes Enable */ +#define SB1 0x4 /* 1 stop bit/char */ +#define SB15 0x8 /* 1.5 stop bits/char */ +#define SB2 0xc /* 2 stop bits/char */ +#define SB_MASK 0xc + +#define MONSYNC 0 /* 8 Bit Sync character */ +#define BISYNC 0x10 /* 16 bit sync character */ +#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */ +#define EXTSYNC 0x30 /* External Sync Mode */ + +#define X1CLK 0x0 /* x1 clock mode */ +#define X16CLK 0x40 /* x16 clock mode */ +#define X32CLK 0x80 /* x32 clock mode */ +#define X64CLK 0xC0 /* x64 clock mode */ +#define XCLK_MASK 0xC0 + +/* Write Register 5 */ + +#define TxCRC_ENAB 0x1 /* Tx CRC Enable */ +#define RTS 0x2 /* RTS */ +#define SDLC_CRC 0x4 /* SDLC/CRC-16 */ +#define TxENABLE 0x8 /* Tx Enable */ +#define SND_BRK 0x10 /* Send Break */ +#define Tx5 0x0 /* Tx 5 bits (or less)/character */ +#define Tx7 0x20 /* Tx 7 bits/character */ +#define Tx6 0x40 /* Tx 6 bits/character */ +#define Tx8 0x60 /* Tx 8 bits/character */ +#define TxN_MASK 0x60 +#define DTR 0x80 /* DTR */ + +/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */ + +/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */ + +/* Write Register 7' (Some enhanced feature control) */ +#define ENEXREAD 0x40 /* Enable read of some write registers */ + +/* Write Register 8 (transmit buffer) */ + +/* Write Register 9 (Master interrupt control) */ +#define VIS 1 /* Vector Includes Status */ +#define NV 2 /* No Vector */ +#define DLC 4 /* Disable Lower Chain */ +#define MIE 8 /* Master Interrupt Enable */ +#define STATHI 0x10 /* Status high */ +#define NORESET 0 /* No reset on write to R9 */ +#define CHRB 0x40 /* Reset channel B */ +#define CHRA 0x80 /* Reset channel A */ +#define FHWRES 0xc0 /* Force hardware reset */ + +/* Write Register 10 (misc control bits) */ +#define BIT6 1 /* 6 bit/8bit sync */ +#define LOOPMODE 2 /* SDLC Loop mode */ +#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */ +#define MARKIDLE 8 /* Mark/flag on idle */ +#define GAOP 0x10 /* Go active on poll */ +#define NRZ 0 /* NRZ mode */ +#define NRZI 0x20 /* NRZI mode */ +#define FM1 0x40 /* FM1 (transition = 1) */ +#define FM0 0x60 /* FM0 (transition = 0) */ +#define CRCPS 0x80 /* CRC Preset I/O */ + +/* Write Register 11 (Clock Mode control) */ +#define TRxCXT 0 /* TRxC = Xtal output */ +#define TRxCTC 1 /* TRxC = Transmit clock */ +#define TRxCBR 2 /* TRxC = BR Generator Output */ +#define TRxCDP 3 /* TRxC = DPLL output */ +#define TRxCOI 4 /* TRxC O/I */ +#define TCRTxCP 0 /* Transmit clock = RTxC pin */ +#define TCTRxCP 8 /* Transmit clock = TRxC pin */ +#define TCBR 0x10 /* Transmit clock = BR Generator output */ +#define TCDPLL 0x18 /* Transmit clock = DPLL output */ +#define RCRTxCP 0 /* Receive clock = RTxC pin */ +#define RCTRxCP 0x20 /* Receive clock = TRxC pin */ +#define RCBR 0x40 /* Receive clock = BR Generator output */ +#define RCDPLL 0x60 /* Receive clock = DPLL output */ +#define RTxCX 0x80 /* RTxC Xtal/No Xtal */ + +/* Write Register 12 (lower byte of baud rate generator time constant) */ + +/* Write Register 13 (upper byte of baud rate generator time constant) */ + +/* Write Register 14 (Misc control bits) */ +#define BRENAB 1 /* Baud rate generator enable */ +#define BRSRC 2 /* Baud rate generator source */ +#define DTRREQ 4 /* DTR/Request function */ +#define AUTOECHO 8 /* Auto Echo */ +#define LOOPBAK 0x10 /* Local loopback */ +#define SEARCH 0x20 /* Enter search mode */ +#define RMC 0x40 /* Reset missing clock */ +#define DISDPLL 0x60 /* Disable DPLL */ +#define SSBR 0x80 /* Set DPLL source = BR generator */ +#define SSRTxC 0xa0 /* Set DPLL source = RTxC */ +#define SFMM 0xc0 /* Set FM mode */ +#define SNRZI 0xe0 /* Set NRZI mode */ + +/* Write Register 15 (external/status interrupt control) */ +#define EN85C30 1 /* Enable some 85c30-enhanced registers */ +#define ZCIE 2 /* Zero count IE */ +#define ENSTFIFO 4 /* Enable status FIFO (SDLC) */ +#define DCDIE 8 /* DCD IE */ +#define SYNCIE 0x10 /* Sync/hunt IE */ +#define CTSIE 0x20 /* CTS IE */ +#define TxUIE 0x40 /* Tx Underrun/EOM IE */ +#define BRKIE 0x80 /* Break/Abort IE */ + + +/* Read Register 0 */ +#define Rx_CH_AV 0x1 /* Rx Character Available */ +#define ZCOUNT 0x2 /* Zero count */ +#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */ +#define DCD 0x8 /* DCD */ +#define SYNC_HUNT 0x10 /* Sync/hunt */ +#define CTS 0x20 /* CTS */ +#define TxEOM 0x40 /* Tx underrun */ +#define BRK_ABRT 0x80 /* Break/Abort */ + +/* Read Register 1 */ +#define ALL_SNT 0x1 /* All sent */ +/* Residue Data for 8 Rx bits/char programmed */ +#define RES3 0x8 /* 0/3 */ +#define RES4 0x4 /* 0/4 */ +#define RES5 0xc /* 0/5 */ +#define RES6 0x2 /* 0/6 */ +#define RES7 0xa /* 0/7 */ +#define RES8 0x6 /* 0/8 */ +#define RES18 0xe /* 1/8 */ +#define RES28 0x0 /* 2/8 */ +/* Special Rx Condition Interrupts */ +#define PAR_ERR 0x10 /* Parity error */ +#define Rx_OVR 0x20 /* Rx Overrun Error */ +#define CRC_ERR 0x40 /* CRC/Framing Error */ +#define END_FR 0x80 /* End of Frame (SDLC) */ + +/* Read Register 2 (channel b only) - Interrupt vector */ +#define CHB_Tx_EMPTY 0x00 +#define CHB_EXT_STAT 0x02 +#define CHB_Rx_AVAIL 0x04 +#define CHB_SPECIAL 0x06 +#define CHA_Tx_EMPTY 0x08 +#define CHA_EXT_STAT 0x0a +#define CHA_Rx_AVAIL 0x0c +#define CHA_SPECIAL 0x0e +#define STATUS_MASK 0x06 + +/* Read Register 3 (interrupt pending register) ch a only */ +#define CHBEXT 0x1 /* Channel B Ext/Stat IP */ +#define CHBTxIP 0x2 /* Channel B Tx IP */ +#define CHBRxIP 0x4 /* Channel B Rx IP */ +#define CHAEXT 0x8 /* Channel A Ext/Stat IP */ +#define CHATxIP 0x10 /* Channel A Tx IP */ +#define CHARxIP 0x20 /* Channel A Rx IP */ + +/* Read Register 8 (receive data register) */ + +/* Read Register 10 (misc status bits) */ +#define ONLOOP 2 /* On loop */ +#define LOOPSEND 0x10 /* Loop sending */ +#define CLK2MIS 0x40 /* Two clocks missing */ +#define CLK1MIS 0x80 /* One clock missing */ + +/* Read Register 12 (lower byte of baud rate generator constant) */ + +/* Read Register 13 (upper byte of baud rate generator constant) */ + +/* Read Register 15 (value of WR 15) */ + +/* Misc macros */ +#define ZS_CLEARERR(port) (write_zsreg(port, 0, ERR_RES)) +#define ZS_CLEARFIFO(port) do { \ + read_zsdata(port); \ + read_zsdata(port); \ + read_zsdata(port); \ + } while(0) + +#define ZS_IS_CONS(UP) ((UP)->flags & PMACZILOG_FLAG_IS_CONS) +#define ZS_IS_KGDB(UP) ((UP)->flags & PMACZILOG_FLAG_IS_KGDB) +#define ZS_IS_CHANNEL_A(UP) ((UP)->flags & PMACZILOG_FLAG_IS_CHANNEL_A) +#define ZS_REGS_HELD(UP) ((UP)->flags & PMACZILOG_FLAG_REGS_HELD) +#define ZS_TX_STOPPED(UP) ((UP)->flags & PMACZILOG_FLAG_TX_STOPPED) +#define ZS_TX_ACTIVE(UP) ((UP)->flags & PMACZILOG_FLAG_TX_ACTIVE) +#define ZS_WANTS_MODEM_STATUS(UP) ((UP)->flags & PMACZILOG_FLAG_MODEM_STATUS) +#define ZS_IS_IRDA(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRDA) +#define ZS_IS_INTMODEM(UP) ((UP)->flags & PMACZILOG_FLAG_IS_INTMODEM) +#define ZS_IS_OPEN(UP) ((UP)->flags & PMACZILOG_FLAG_IS_OPEN) +#define ZS_IS_EXTCLK(UP) ((UP)->flags & PMACZILOG_FLAG_IS_EXTCLK) + +#endif /* __PMAC_ZILOG_H__ */ diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c new file mode 100644 index 000000000..2d25231fa --- /dev/null +++ b/drivers/tty/serial/pxa.c @@ -0,0 +1,931 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Based on drivers/serial/8250.c by Russell King. + * + * Author: Nicolas Pitre + * Created: Feb 20, 2003 + * Copyright: (C) 2003 Monta Vista Software, Inc. + * + * Note 1: This driver is made separate from the already too overloaded + * 8250.c because it needs some kirks of its own and that'll make it + * easier to add DMA support. + * + * Note 2: I'm too sick of device allocation policies for serial ports. + * If someone else wants to request an "official" allocation of major/minor + * for this driver please be my guest. And don't forget that new hardware + * to come from Intel might have more than 3 or 4 of those UARTs. Let's + * hope for a better port registration and dynamic device allocation scheme + * with the serial core maintainer satisfaction to appear soon. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PXA_NAME_LEN 8 + +struct uart_pxa_port { + struct uart_port port; + unsigned char ier; + unsigned char lcr; + unsigned char mcr; + unsigned int lsr_break_flag; + struct clk *clk; + char name[PXA_NAME_LEN]; +}; + +static inline unsigned int serial_in(struct uart_pxa_port *up, int offset) +{ + offset <<= 2; + return readl(up->port.membase + offset); +} + +static inline void serial_out(struct uart_pxa_port *up, int offset, int value) +{ + offset <<= 2; + writel(value, up->port.membase + offset); +} + +static void serial_pxa_enable_ms(struct uart_port *port) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + + up->ier |= UART_IER_MSI; + serial_out(up, UART_IER, up->ier); +} + +static void serial_pxa_stop_tx(struct uart_port *port) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + + if (up->ier & UART_IER_THRI) { + up->ier &= ~UART_IER_THRI; + serial_out(up, UART_IER, up->ier); + } +} + +static void serial_pxa_stop_rx(struct uart_port *port) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + + up->ier &= ~UART_IER_RLSI; + up->port.read_status_mask &= ~UART_LSR_DR; + serial_out(up, UART_IER, up->ier); +} + +static inline void receive_chars(struct uart_pxa_port *up, int *status) +{ + unsigned int ch, flag; + int max_count = 256; + + do { + /* work around Errata #20 according to + * Intel(R) PXA27x Processor Family + * Specification Update (May 2005) + * + * Step 2 + * Disable the Reciever Time Out Interrupt via IER[RTOEI] + */ + up->ier &= ~UART_IER_RTOIE; + serial_out(up, UART_IER, up->ier); + + ch = serial_in(up, UART_RX); + flag = TTY_NORMAL; + up->port.icount.rx++; + + if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE | + UART_LSR_FE | UART_LSR_OE))) { + /* + * For statistics only + */ + if (*status & UART_LSR_BI) { + *status &= ~(UART_LSR_FE | UART_LSR_PE); + up->port.icount.brk++; + /* + * We do the SysRQ and SAK checking + * here because otherwise the break + * may get masked by ignore_status_mask + * or read_status_mask. + */ + if (uart_handle_break(&up->port)) + goto ignore_char; + } else if (*status & UART_LSR_PE) + up->port.icount.parity++; + else if (*status & UART_LSR_FE) + up->port.icount.frame++; + if (*status & UART_LSR_OE) + up->port.icount.overrun++; + + /* + * Mask off conditions which should be ignored. + */ + *status &= up->port.read_status_mask; + +#ifdef CONFIG_SERIAL_PXA_CONSOLE + if (up->port.line == up->port.cons->index) { + /* Recover the break flag from console xmit */ + *status |= up->lsr_break_flag; + up->lsr_break_flag = 0; + } +#endif + if (*status & UART_LSR_BI) { + flag = TTY_BREAK; + } else if (*status & UART_LSR_PE) + flag = TTY_PARITY; + else if (*status & UART_LSR_FE) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(&up->port, ch)) + goto ignore_char; + + uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag); + + ignore_char: + *status = serial_in(up, UART_LSR); + } while ((*status & UART_LSR_DR) && (max_count-- > 0)); + tty_flip_buffer_push(&up->port.state->port); + + /* work around Errata #20 according to + * Intel(R) PXA27x Processor Family + * Specification Update (May 2005) + * + * Step 6: + * No more data in FIFO: Re-enable RTO interrupt via IER[RTOIE] + */ + up->ier |= UART_IER_RTOIE; + serial_out(up, UART_IER, up->ier); +} + +static void transmit_chars(struct uart_pxa_port *up) +{ + struct circ_buf *xmit = &up->port.state->xmit; + int count; + + if (up->port.x_char) { + serial_out(up, UART_TX, up->port.x_char); + up->port.icount.tx++; + up->port.x_char = 0; + return; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { + serial_pxa_stop_tx(&up->port); + return; + } + + count = up->port.fifosize / 2; + do { + serial_out(up, UART_TX, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->port.icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); + + + if (uart_circ_empty(xmit)) + serial_pxa_stop_tx(&up->port); +} + +static void serial_pxa_start_tx(struct uart_port *port) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + + if (!(up->ier & UART_IER_THRI)) { + up->ier |= UART_IER_THRI; + serial_out(up, UART_IER, up->ier); + } +} + +/* should hold up->port.lock */ +static inline void check_modem_status(struct uart_pxa_port *up) +{ + int status; + + status = serial_in(up, UART_MSR); + + if ((status & UART_MSR_ANY_DELTA) == 0) + return; + + if (status & UART_MSR_TERI) + up->port.icount.rng++; + if (status & UART_MSR_DDSR) + up->port.icount.dsr++; + if (status & UART_MSR_DDCD) + uart_handle_dcd_change(&up->port, status & UART_MSR_DCD); + if (status & UART_MSR_DCTS) + uart_handle_cts_change(&up->port, status & UART_MSR_CTS); + + wake_up_interruptible(&up->port.state->port.delta_msr_wait); +} + +/* + * This handles the interrupt from one port. + */ +static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id) +{ + struct uart_pxa_port *up = dev_id; + unsigned int iir, lsr; + + iir = serial_in(up, UART_IIR); + if (iir & UART_IIR_NO_INT) + return IRQ_NONE; + spin_lock(&up->port.lock); + lsr = serial_in(up, UART_LSR); + if (lsr & UART_LSR_DR) + receive_chars(up, &lsr); + check_modem_status(up); + if (lsr & UART_LSR_THRE) + transmit_chars(up); + spin_unlock(&up->port.lock); + return IRQ_HANDLED; +} + +static unsigned int serial_pxa_tx_empty(struct uart_port *port) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + unsigned long flags; + unsigned int ret; + + spin_lock_irqsave(&up->port.lock, flags); + ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; + spin_unlock_irqrestore(&up->port.lock, flags); + + return ret; +} + +static unsigned int serial_pxa_get_mctrl(struct uart_port *port) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + unsigned char status; + unsigned int ret; + + status = serial_in(up, UART_MSR); + + ret = 0; + if (status & UART_MSR_DCD) + ret |= TIOCM_CAR; + if (status & UART_MSR_RI) + ret |= TIOCM_RNG; + if (status & UART_MSR_DSR) + ret |= TIOCM_DSR; + if (status & UART_MSR_CTS) + ret |= TIOCM_CTS; + return ret; +} + +static void serial_pxa_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + unsigned char mcr = 0; + + if (mctrl & TIOCM_RTS) + mcr |= UART_MCR_RTS; + if (mctrl & TIOCM_DTR) + mcr |= UART_MCR_DTR; + if (mctrl & TIOCM_OUT1) + mcr |= UART_MCR_OUT1; + if (mctrl & TIOCM_OUT2) + mcr |= UART_MCR_OUT2; + if (mctrl & TIOCM_LOOP) + mcr |= UART_MCR_LOOP; + + mcr |= up->mcr; + + serial_out(up, UART_MCR, mcr); +} + +static void serial_pxa_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + if (break_state == -1) + up->lcr |= UART_LCR_SBC; + else + up->lcr &= ~UART_LCR_SBC; + serial_out(up, UART_LCR, up->lcr); + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static int serial_pxa_startup(struct uart_port *port) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + unsigned long flags; + int retval; + + if (port->line == 3) /* HWUART */ + up->mcr |= UART_MCR_AFE; + else + up->mcr = 0; + + up->port.uartclk = clk_get_rate(up->clk); + + /* + * Allocate the IRQ + */ + retval = request_irq(up->port.irq, serial_pxa_irq, 0, up->name, up); + if (retval) + return retval; + + /* + * Clear the FIFO buffers and disable them. + * (they will be reenabled in set_termios()) + */ + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + serial_out(up, UART_FCR, 0); + + /* + * Clear the interrupt registers. + */ + (void) serial_in(up, UART_LSR); + (void) serial_in(up, UART_RX); + (void) serial_in(up, UART_IIR); + (void) serial_in(up, UART_MSR); + + /* + * Now, initialize the UART + */ + serial_out(up, UART_LCR, UART_LCR_WLEN8); + + spin_lock_irqsave(&up->port.lock, flags); + up->port.mctrl |= TIOCM_OUT2; + serial_pxa_set_mctrl(&up->port, up->port.mctrl); + spin_unlock_irqrestore(&up->port.lock, flags); + + /* + * Finally, enable interrupts. Note: Modem status interrupts + * are set via set_termios(), which will be occurring imminently + * anyway, so we don't enable them here. + */ + up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE; + serial_out(up, UART_IER, up->ier); + + /* + * And clear the interrupt registers again for luck. + */ + (void) serial_in(up, UART_LSR); + (void) serial_in(up, UART_RX); + (void) serial_in(up, UART_IIR); + (void) serial_in(up, UART_MSR); + + return 0; +} + +static void serial_pxa_shutdown(struct uart_port *port) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + unsigned long flags; + + free_irq(up->port.irq, up); + + /* + * Disable interrupts from this port + */ + up->ier = 0; + serial_out(up, UART_IER, 0); + + spin_lock_irqsave(&up->port.lock, flags); + up->port.mctrl &= ~TIOCM_OUT2; + serial_pxa_set_mctrl(&up->port, up->port.mctrl); + spin_unlock_irqrestore(&up->port.lock, flags); + + /* + * Disable break condition and FIFOs + */ + serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC); + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | + UART_FCR_CLEAR_XMIT); + serial_out(up, UART_FCR, 0); +} + +static void +serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + unsigned char cval, fcr = 0; + unsigned long flags; + unsigned int baud, quot; + unsigned int dll; + + cval = UART_LCR_WLEN(tty_get_char_size(termios->c_cflag)); + + if (termios->c_cflag & CSTOPB) + cval |= UART_LCR_STOP; + if (termios->c_cflag & PARENB) + cval |= UART_LCR_PARITY; + if (!(termios->c_cflag & PARODD)) + cval |= UART_LCR_EPAR; + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + quot = uart_get_divisor(port, baud); + + if ((up->port.uartclk / quot) < (2400 * 16)) + fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR1; + else if ((up->port.uartclk / quot) < (230400 * 16)) + fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR8; + else + fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR32; + + /* + * Ok, we're now changing the port state. Do it with + * interrupts disabled. + */ + spin_lock_irqsave(&up->port.lock, flags); + + /* + * Ensure the port will be enabled. + * This is required especially for serial console. + */ + up->ier |= UART_IER_UUE; + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; + if (termios->c_iflag & INPCK) + up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + up->port.read_status_mask |= UART_LSR_BI; + + /* + * Characters to ignore + */ + up->port.ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; + if (termios->c_iflag & IGNBRK) { + up->port.ignore_status_mask |= UART_LSR_BI; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + up->port.ignore_status_mask |= UART_LSR_OE; + } + + /* + * ignore all characters if CREAD is not set + */ + if ((termios->c_cflag & CREAD) == 0) + up->port.ignore_status_mask |= UART_LSR_DR; + + /* + * CTS flow control flag and modem status interrupts + */ + up->ier &= ~UART_IER_MSI; + if (UART_ENABLE_MS(&up->port, termios->c_cflag)) + up->ier |= UART_IER_MSI; + + serial_out(up, UART_IER, up->ier); + + if (termios->c_cflag & CRTSCTS) + up->mcr |= UART_MCR_AFE; + else + up->mcr &= ~UART_MCR_AFE; + + serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */ + serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */ + + /* + * work around Errata #75 according to Intel(R) PXA27x Processor Family + * Specification Update (Nov 2005) + */ + dll = serial_in(up, UART_DLL); + WARN_ON(dll != (quot & 0xff)); + + serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */ + serial_out(up, UART_LCR, cval); /* reset DLAB */ + up->lcr = cval; /* Save LCR */ + serial_pxa_set_mctrl(&up->port, up->port.mctrl); + serial_out(up, UART_FCR, fcr); + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static void +serial_pxa_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + + if (!state) + clk_prepare_enable(up->clk); + else + clk_disable_unprepare(up->clk); +} + +static void serial_pxa_release_port(struct uart_port *port) +{ +} + +static int serial_pxa_request_port(struct uart_port *port) +{ + return 0; +} + +static void serial_pxa_config_port(struct uart_port *port, int flags) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + up->port.type = PORT_PXA; +} + +static int +serial_pxa_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + /* we don't want the core code to modify any port params */ + return -EINVAL; +} + +static const char * +serial_pxa_type(struct uart_port *port) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + return up->name; +} + +static struct uart_pxa_port *serial_pxa_ports[4]; +static struct uart_driver serial_pxa_reg; + +#ifdef CONFIG_SERIAL_PXA_CONSOLE + +/* + * Wait for transmitter & holding register to empty + */ +static void wait_for_xmitr(struct uart_pxa_port *up) +{ + unsigned int status, tmout = 10000; + + /* Wait up to 10ms for the character(s) to be sent. */ + do { + status = serial_in(up, UART_LSR); + + if (status & UART_LSR_BI) + up->lsr_break_flag = UART_LSR_BI; + + if (--tmout == 0) + break; + udelay(1); + } while (!uart_lsr_tx_empty(status)); + + /* Wait up to 1s for flow control if necessary */ + if (up->port.flags & UPF_CONS_FLOW) { + tmout = 1000000; + while (--tmout && + ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0)) + udelay(1); + } +} + +static void serial_pxa_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + + wait_for_xmitr(up); + serial_out(up, UART_TX, ch); +} + +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + * + * The console_lock must be held when we get here. + */ +static void +serial_pxa_console_write(struct console *co, const char *s, unsigned int count) +{ + struct uart_pxa_port *up = serial_pxa_ports[co->index]; + unsigned int ier; + unsigned long flags; + int locked = 1; + + clk_enable(up->clk); + local_irq_save(flags); + if (up->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&up->port.lock); + else + spin_lock(&up->port.lock); + + /* + * First save the IER then disable the interrupts + */ + ier = serial_in(up, UART_IER); + serial_out(up, UART_IER, UART_IER_UUE); + + uart_console_write(&up->port, s, count, serial_pxa_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + wait_for_xmitr(up); + serial_out(up, UART_IER, ier); + + if (locked) + spin_unlock(&up->port.lock); + local_irq_restore(flags); + clk_disable(up->clk); + +} + +#ifdef CONFIG_CONSOLE_POLL +/* + * Console polling routines for writing and reading from the uart while + * in an interrupt or debug context. + */ + +static int serial_pxa_get_poll_char(struct uart_port *port) +{ + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + unsigned char lsr = serial_in(up, UART_LSR); + + while (!(lsr & UART_LSR_DR)) + lsr = serial_in(up, UART_LSR); + + return serial_in(up, UART_RX); +} + + +static void serial_pxa_put_poll_char(struct uart_port *port, + unsigned char c) +{ + unsigned int ier; + struct uart_pxa_port *up = (struct uart_pxa_port *)port; + + /* + * First save the IER then disable the interrupts + */ + ier = serial_in(up, UART_IER); + serial_out(up, UART_IER, UART_IER_UUE); + + wait_for_xmitr(up); + /* + * Send the character out. + */ + serial_out(up, UART_TX, c); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + wait_for_xmitr(up); + serial_out(up, UART_IER, ier); +} + +#endif /* CONFIG_CONSOLE_POLL */ + +static int __init +serial_pxa_console_setup(struct console *co, char *options) +{ + struct uart_pxa_port *up; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index == -1 || co->index >= serial_pxa_reg.nr) + co->index = 0; + up = serial_pxa_ports[co->index]; + if (!up) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&up->port, co, baud, parity, bits, flow); +} + +static struct console serial_pxa_console = { + .name = "ttyS", + .write = serial_pxa_console_write, + .device = uart_console_device, + .setup = serial_pxa_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &serial_pxa_reg, +}; + +#define PXA_CONSOLE &serial_pxa_console +#else +#define PXA_CONSOLE NULL +#endif + +static const struct uart_ops serial_pxa_pops = { + .tx_empty = serial_pxa_tx_empty, + .set_mctrl = serial_pxa_set_mctrl, + .get_mctrl = serial_pxa_get_mctrl, + .stop_tx = serial_pxa_stop_tx, + .start_tx = serial_pxa_start_tx, + .stop_rx = serial_pxa_stop_rx, + .enable_ms = serial_pxa_enable_ms, + .break_ctl = serial_pxa_break_ctl, + .startup = serial_pxa_startup, + .shutdown = serial_pxa_shutdown, + .set_termios = serial_pxa_set_termios, + .pm = serial_pxa_pm, + .type = serial_pxa_type, + .release_port = serial_pxa_release_port, + .request_port = serial_pxa_request_port, + .config_port = serial_pxa_config_port, + .verify_port = serial_pxa_verify_port, +#if defined(CONFIG_CONSOLE_POLL) && defined(CONFIG_SERIAL_PXA_CONSOLE) + .poll_get_char = serial_pxa_get_poll_char, + .poll_put_char = serial_pxa_put_poll_char, +#endif +}; + +static struct uart_driver serial_pxa_reg = { + .owner = THIS_MODULE, + .driver_name = "PXA serial", + .dev_name = "ttyS", + .major = TTY_MAJOR, + .minor = 64, + .nr = 4, + .cons = PXA_CONSOLE, +}; + +#ifdef CONFIG_PM +static int serial_pxa_suspend(struct device *dev) +{ + struct uart_pxa_port *sport = dev_get_drvdata(dev); + + if (sport) + uart_suspend_port(&serial_pxa_reg, &sport->port); + + return 0; +} + +static int serial_pxa_resume(struct device *dev) +{ + struct uart_pxa_port *sport = dev_get_drvdata(dev); + + if (sport) + uart_resume_port(&serial_pxa_reg, &sport->port); + + return 0; +} + +static const struct dev_pm_ops serial_pxa_pm_ops = { + .suspend = serial_pxa_suspend, + .resume = serial_pxa_resume, +}; +#endif + +static const struct of_device_id serial_pxa_dt_ids[] = { + { .compatible = "mrvl,pxa-uart", }, + { .compatible = "mrvl,mmp-uart", }, + {} +}; + +static int serial_pxa_probe_dt(struct platform_device *pdev, + struct uart_pxa_port *sport) +{ + struct device_node *np = pdev->dev.of_node; + int ret; + + if (!np) + return 1; + + ret = of_alias_get_id(np, "serial"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); + return ret; + } + sport->port.line = ret; + return 0; +} + +static int serial_pxa_probe(struct platform_device *dev) +{ + struct uart_pxa_port *sport; + struct resource *mmres; + int ret; + int irq; + + mmres = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!mmres) + return -ENODEV; + + irq = platform_get_irq(dev, 0); + if (irq < 0) + return irq; + + sport = kzalloc(sizeof(struct uart_pxa_port), GFP_KERNEL); + if (!sport) + return -ENOMEM; + + sport->clk = clk_get(&dev->dev, NULL); + if (IS_ERR(sport->clk)) { + ret = PTR_ERR(sport->clk); + goto err_free; + } + + ret = clk_prepare(sport->clk); + if (ret) { + clk_put(sport->clk); + goto err_free; + } + + sport->port.type = PORT_PXA; + sport->port.iotype = UPIO_MEM; + sport->port.mapbase = mmres->start; + sport->port.irq = irq; + sport->port.fifosize = 64; + sport->port.ops = &serial_pxa_pops; + sport->port.dev = &dev->dev; + sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; + sport->port.uartclk = clk_get_rate(sport->clk); + sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PXA_CONSOLE); + + ret = serial_pxa_probe_dt(dev, sport); + if (ret > 0) + sport->port.line = dev->id; + else if (ret < 0) + goto err_clk; + if (sport->port.line >= ARRAY_SIZE(serial_pxa_ports)) { + dev_err(&dev->dev, "serial%d out of range\n", sport->port.line); + ret = -EINVAL; + goto err_clk; + } + snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1); + + sport->port.membase = ioremap(mmres->start, resource_size(mmres)); + if (!sport->port.membase) { + ret = -ENOMEM; + goto err_clk; + } + + serial_pxa_ports[sport->port.line] = sport; + + uart_add_one_port(&serial_pxa_reg, &sport->port); + platform_set_drvdata(dev, sport); + + return 0; + + err_clk: + clk_unprepare(sport->clk); + clk_put(sport->clk); + err_free: + kfree(sport); + return ret; +} + +static struct platform_driver serial_pxa_driver = { + .probe = serial_pxa_probe, + + .driver = { + .name = "pxa2xx-uart", +#ifdef CONFIG_PM + .pm = &serial_pxa_pm_ops, +#endif + .suppress_bind_attrs = true, + .of_match_table = serial_pxa_dt_ids, + }, +}; + + +/* 8250 driver for PXA serial ports should be used */ +static int __init serial_pxa_init(void) +{ + int ret; + + ret = uart_register_driver(&serial_pxa_reg); + if (ret != 0) + return ret; + + ret = platform_driver_register(&serial_pxa_driver); + if (ret != 0) + uart_unregister_driver(&serial_pxa_reg); + + return ret; +} +device_initcall(serial_pxa_init); diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c new file mode 100644 index 000000000..d6f682ed1 --- /dev/null +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -0,0 +1,1615 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017-2018, The Linux foundation. All rights reserved. + +/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */ +#define __DISABLE_TRACE_MMIO__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* UART specific GENI registers */ +#define SE_UART_LOOPBACK_CFG 0x22c +#define SE_UART_IO_MACRO_CTRL 0x240 +#define SE_UART_TX_TRANS_CFG 0x25c +#define SE_UART_TX_WORD_LEN 0x268 +#define SE_UART_TX_STOP_BIT_LEN 0x26c +#define SE_UART_TX_TRANS_LEN 0x270 +#define SE_UART_RX_TRANS_CFG 0x280 +#define SE_UART_RX_WORD_LEN 0x28c +#define SE_UART_RX_STALE_CNT 0x294 +#define SE_UART_TX_PARITY_CFG 0x2a4 +#define SE_UART_RX_PARITY_CFG 0x2a8 +#define SE_UART_MANUAL_RFR 0x2ac + +/* SE_UART_TRANS_CFG */ +#define UART_TX_PAR_EN BIT(0) +#define UART_CTS_MASK BIT(1) + +/* SE_UART_TX_WORD_LEN */ +#define TX_WORD_LEN_MSK GENMASK(9, 0) + +/* SE_UART_TX_STOP_BIT_LEN */ +#define TX_STOP_BIT_LEN_MSK GENMASK(23, 0) +#define TX_STOP_BIT_LEN_1 0 +#define TX_STOP_BIT_LEN_1_5 1 +#define TX_STOP_BIT_LEN_2 2 + +/* SE_UART_TX_TRANS_LEN */ +#define TX_TRANS_LEN_MSK GENMASK(23, 0) + +/* SE_UART_RX_TRANS_CFG */ +#define UART_RX_INS_STATUS_BIT BIT(2) +#define UART_RX_PAR_EN BIT(3) + +/* SE_UART_RX_WORD_LEN */ +#define RX_WORD_LEN_MASK GENMASK(9, 0) + +/* SE_UART_RX_STALE_CNT */ +#define RX_STALE_CNT GENMASK(23, 0) + +/* SE_UART_TX_PARITY_CFG/RX_PARITY_CFG */ +#define PAR_CALC_EN BIT(0) +#define PAR_MODE_MSK GENMASK(2, 1) +#define PAR_MODE_SHFT 1 +#define PAR_EVEN 0x00 +#define PAR_ODD 0x01 +#define PAR_SPACE 0x10 +#define PAR_MARK 0x11 + +/* SE_UART_MANUAL_RFR register fields */ +#define UART_MANUAL_RFR_EN BIT(31) +#define UART_RFR_NOT_READY BIT(1) +#define UART_RFR_READY BIT(0) + +/* UART M_CMD OP codes */ +#define UART_START_TX 0x1 +#define UART_START_BREAK 0x4 +#define UART_STOP_BREAK 0x5 +/* UART S_CMD OP codes */ +#define UART_START_READ 0x1 +#define UART_PARAM 0x1 + +#define UART_OVERSAMPLING 32 +#define STALE_TIMEOUT 16 +#define DEFAULT_BITS_PER_CHAR 10 +#define GENI_UART_CONS_PORTS 1 +#define GENI_UART_PORTS 3 +#define DEF_FIFO_DEPTH_WORDS 16 +#define DEF_TX_WM 2 +#define DEF_FIFO_WIDTH_BITS 32 +#define UART_RX_WM 2 + +/* SE_UART_LOOPBACK_CFG */ +#define RX_TX_SORTED BIT(0) +#define CTS_RTS_SORTED BIT(1) +#define RX_TX_CTS_RTS_SORTED (RX_TX_SORTED | CTS_RTS_SORTED) + +/* UART pin swap value */ +#define DEFAULT_IO_MACRO_IO0_IO1_MASK GENMASK(3, 0) +#define IO_MACRO_IO0_SEL 0x3 +#define DEFAULT_IO_MACRO_IO2_IO3_MASK GENMASK(15, 4) +#define IO_MACRO_IO2_IO3_SWAP 0x4640 + +/* We always configure 4 bytes per FIFO word */ +#define BYTES_PER_FIFO_WORD 4 + +struct qcom_geni_private_data { + /* NOTE: earlycon port will have NULL here */ + struct uart_driver *drv; + + u32 poll_cached_bytes; + unsigned int poll_cached_bytes_cnt; + + u32 write_cached_bytes; + unsigned int write_cached_bytes_cnt; +}; + +struct qcom_geni_serial_port { + struct uart_port uport; + struct geni_se se; + const char *name; + u32 tx_fifo_depth; + u32 tx_fifo_width; + u32 rx_fifo_depth; + bool setup; + unsigned long clk_rate; + int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop); + unsigned int baud; + void *rx_fifo; + u32 loopback; + bool brk; + + unsigned int tx_remaining; + int wakeup_irq; + bool rx_tx_swap; + bool cts_rts_swap; + + struct qcom_geni_private_data private_data; +}; + +static const struct uart_ops qcom_geni_console_pops; +static const struct uart_ops qcom_geni_uart_pops; +static struct uart_driver qcom_geni_console_driver; +static struct uart_driver qcom_geni_uart_driver; +static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop); +static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop); +static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port); +static void qcom_geni_serial_stop_rx(struct uart_port *uport); +static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop); + +#define to_dev_port(ptr, member) \ + container_of(ptr, struct qcom_geni_serial_port, member) + +static struct qcom_geni_serial_port qcom_geni_uart_ports[GENI_UART_PORTS] = { + [0] = { + .uport = { + .iotype = UPIO_MEM, + .ops = &qcom_geni_uart_pops, + .flags = UPF_BOOT_AUTOCONF, + .line = 0, + }, + }, + [1] = { + .uport = { + .iotype = UPIO_MEM, + .ops = &qcom_geni_uart_pops, + .flags = UPF_BOOT_AUTOCONF, + .line = 1, + }, + }, + [2] = { + .uport = { + .iotype = UPIO_MEM, + .ops = &qcom_geni_uart_pops, + .flags = UPF_BOOT_AUTOCONF, + .line = 2, + }, + }, +}; + +static struct qcom_geni_serial_port qcom_geni_console_port = { + .uport = { + .iotype = UPIO_MEM, + .ops = &qcom_geni_console_pops, + .flags = UPF_BOOT_AUTOCONF, + .line = 0, + }, +}; + +static int qcom_geni_serial_request_port(struct uart_port *uport) +{ + struct platform_device *pdev = to_platform_device(uport->dev); + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + + uport->membase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(uport->membase)) + return PTR_ERR(uport->membase); + port->se.base = uport->membase; + return 0; +} + +static void qcom_geni_serial_config_port(struct uart_port *uport, int cfg_flags) +{ + if (cfg_flags & UART_CONFIG_TYPE) { + uport->type = PORT_MSM; + qcom_geni_serial_request_port(uport); + } +} + +static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport) +{ + unsigned int mctrl = TIOCM_DSR | TIOCM_CAR; + u32 geni_ios; + + if (uart_console(uport)) { + mctrl |= TIOCM_CTS; + } else { + geni_ios = readl(uport->membase + SE_GENI_IOS); + if (!(geni_ios & IO2_DATA_IN)) + mctrl |= TIOCM_CTS; + } + + return mctrl; +} + +static void qcom_geni_serial_set_mctrl(struct uart_port *uport, + unsigned int mctrl) +{ + u32 uart_manual_rfr = 0; + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + + if (uart_console(uport)) + return; + + if (mctrl & TIOCM_LOOP) + port->loopback = RX_TX_CTS_RTS_SORTED; + + if (!(mctrl & TIOCM_RTS) && !uport->suspended) + uart_manual_rfr = UART_MANUAL_RFR_EN | UART_RFR_NOT_READY; + writel(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR); +} + +static const char *qcom_geni_serial_get_type(struct uart_port *uport) +{ + return "MSM"; +} + +static struct qcom_geni_serial_port *get_port_from_line(int line, bool console) +{ + struct qcom_geni_serial_port *port; + int nr_ports = console ? GENI_UART_CONS_PORTS : GENI_UART_PORTS; + + if (line < 0 || line >= nr_ports) + return ERR_PTR(-ENXIO); + + port = console ? &qcom_geni_console_port : &qcom_geni_uart_ports[line]; + return port; +} + +static bool qcom_geni_serial_poll_bit(struct uart_port *uport, + int offset, int field, bool set) +{ + u32 reg; + struct qcom_geni_serial_port *port; + unsigned int baud; + unsigned int fifo_bits; + unsigned long timeout_us = 20000; + struct qcom_geni_private_data *private_data = uport->private_data; + + if (private_data->drv) { + port = to_dev_port(uport, uport); + baud = port->baud; + if (!baud) + baud = 115200; + fifo_bits = port->tx_fifo_depth * port->tx_fifo_width; + /* + * Total polling iterations based on FIFO worth of bytes to be + * sent at current baud. Add a little fluff to the wait. + */ + timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500; + } + + /* + * Use custom implementation instead of readl_poll_atomic since ktimer + * is not ready at the time of early console. + */ + timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10; + while (timeout_us) { + reg = readl(uport->membase + offset); + if ((bool)(reg & field) == set) + return true; + udelay(10); + timeout_us -= 10; + } + return false; +} + +static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size) +{ + u32 m_cmd; + + writel(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN); + m_cmd = UART_START_TX << M_OPCODE_SHFT; + writel(m_cmd, uport->membase + SE_GENI_M_CMD0); +} + +static void qcom_geni_serial_poll_tx_done(struct uart_port *uport) +{ + int done; + u32 irq_clear = M_CMD_DONE_EN; + + done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_CMD_DONE_EN, true); + if (!done) { + writel(M_GENI_CMD_ABORT, uport->membase + + SE_GENI_M_CMD_CTRL_REG); + irq_clear |= M_CMD_ABORT_EN; + qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_CMD_ABORT_EN, true); + } + writel(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR); +} + +static void qcom_geni_serial_abort_rx(struct uart_port *uport) +{ + u32 irq_clear = S_CMD_DONE_EN | S_CMD_ABORT_EN; + + writel(S_GENI_CMD_ABORT, uport->membase + SE_GENI_S_CMD_CTRL_REG); + qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG, + S_GENI_CMD_ABORT, false); + writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR); + writel(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG); +} + +#ifdef CONFIG_CONSOLE_POLL + +static int qcom_geni_serial_get_char(struct uart_port *uport) +{ + struct qcom_geni_private_data *private_data = uport->private_data; + u32 status; + u32 word_cnt; + int ret; + + if (!private_data->poll_cached_bytes_cnt) { + status = readl(uport->membase + SE_GENI_M_IRQ_STATUS); + writel(status, uport->membase + SE_GENI_M_IRQ_CLEAR); + + status = readl(uport->membase + SE_GENI_S_IRQ_STATUS); + writel(status, uport->membase + SE_GENI_S_IRQ_CLEAR); + + status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS); + word_cnt = status & RX_FIFO_WC_MSK; + if (!word_cnt) + return NO_POLL_CHAR; + + if (word_cnt == 1 && (status & RX_LAST)) + /* + * NOTE: If RX_LAST_BYTE_VALID is 0 it needs to be + * treated as if it was BYTES_PER_FIFO_WORD. + */ + private_data->poll_cached_bytes_cnt = + (status & RX_LAST_BYTE_VALID_MSK) >> + RX_LAST_BYTE_VALID_SHFT; + + if (private_data->poll_cached_bytes_cnt == 0) + private_data->poll_cached_bytes_cnt = BYTES_PER_FIFO_WORD; + + private_data->poll_cached_bytes = + readl(uport->membase + SE_GENI_RX_FIFOn); + } + + private_data->poll_cached_bytes_cnt--; + ret = private_data->poll_cached_bytes & 0xff; + private_data->poll_cached_bytes >>= 8; + + return ret; +} + +static void qcom_geni_serial_poll_put_char(struct uart_port *uport, + unsigned char c) +{ + writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG); + qcom_geni_serial_setup_tx(uport, 1); + WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_TX_FIFO_WATERMARK_EN, true)); + writel(c, uport->membase + SE_GENI_TX_FIFOn); + writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); + qcom_geni_serial_poll_tx_done(uport); +} +#endif + +#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE +static void qcom_geni_serial_wr_char(struct uart_port *uport, unsigned char ch) +{ + struct qcom_geni_private_data *private_data = uport->private_data; + + private_data->write_cached_bytes = + (private_data->write_cached_bytes >> 8) | (ch << 24); + private_data->write_cached_bytes_cnt++; + + if (private_data->write_cached_bytes_cnt == BYTES_PER_FIFO_WORD) { + writel(private_data->write_cached_bytes, + uport->membase + SE_GENI_TX_FIFOn); + private_data->write_cached_bytes_cnt = 0; + } +} + +static void +__qcom_geni_serial_console_write(struct uart_port *uport, const char *s, + unsigned int count) +{ + struct qcom_geni_private_data *private_data = uport->private_data; + + int i; + u32 bytes_to_send = count; + + for (i = 0; i < count; i++) { + /* + * uart_console_write() adds a carriage return for each newline. + * Account for additional bytes to be written. + */ + if (s[i] == '\n') + bytes_to_send++; + } + + writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG); + qcom_geni_serial_setup_tx(uport, bytes_to_send); + for (i = 0; i < count; ) { + size_t chars_to_write = 0; + size_t avail = DEF_FIFO_DEPTH_WORDS - DEF_TX_WM; + + /* + * If the WM bit never set, then the Tx state machine is not + * in a valid state, so break, cancel/abort any existing + * command. Unfortunately the current data being written is + * lost. + */ + if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_TX_FIFO_WATERMARK_EN, true)) + break; + chars_to_write = min_t(size_t, count - i, avail / 2); + uart_console_write(uport, s + i, chars_to_write, + qcom_geni_serial_wr_char); + writel(M_TX_FIFO_WATERMARK_EN, uport->membase + + SE_GENI_M_IRQ_CLEAR); + i += chars_to_write; + } + + if (private_data->write_cached_bytes_cnt) { + private_data->write_cached_bytes >>= BITS_PER_BYTE * + (BYTES_PER_FIFO_WORD - private_data->write_cached_bytes_cnt); + writel(private_data->write_cached_bytes, + uport->membase + SE_GENI_TX_FIFOn); + private_data->write_cached_bytes_cnt = 0; + } + + qcom_geni_serial_poll_tx_done(uport); +} + +static void qcom_geni_serial_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *uport; + struct qcom_geni_serial_port *port; + bool locked = true; + unsigned long flags; + u32 geni_status; + u32 irq_en; + + WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS); + + port = get_port_from_line(co->index, true); + if (IS_ERR(port)) + return; + + uport = &port->uport; + if (oops_in_progress) + locked = spin_trylock_irqsave(&uport->lock, flags); + else + spin_lock_irqsave(&uport->lock, flags); + + geni_status = readl(uport->membase + SE_GENI_STATUS); + + /* Cancel the current write to log the fault */ + if (!locked) { + geni_se_cancel_m_cmd(&port->se); + if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_CMD_CANCEL_EN, true)) { + geni_se_abort_m_cmd(&port->se); + qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_CMD_ABORT_EN, true); + writel(M_CMD_ABORT_EN, uport->membase + + SE_GENI_M_IRQ_CLEAR); + } + writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); + } else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) { + /* + * It seems we can't interrupt existing transfers if all data + * has been sent, in which case we need to look for done first. + */ + qcom_geni_serial_poll_tx_done(uport); + + if (!uart_circ_empty(&uport->state->xmit)) { + irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); + writel(irq_en | M_TX_FIFO_WATERMARK_EN, + uport->membase + SE_GENI_M_IRQ_EN); + } + } + + __qcom_geni_serial_console_write(uport, s, count); + + if (port->tx_remaining) + qcom_geni_serial_setup_tx(uport, port->tx_remaining); + + if (locked) + spin_unlock_irqrestore(&uport->lock, flags); +} + +static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop) +{ + u32 i; + unsigned char buf[sizeof(u32)]; + struct tty_port *tport; + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + + tport = &uport->state->port; + for (i = 0; i < bytes; ) { + int c; + int chunk = min_t(int, bytes - i, BYTES_PER_FIFO_WORD); + + ioread32_rep(uport->membase + SE_GENI_RX_FIFOn, buf, 1); + i += chunk; + if (drop) + continue; + + for (c = 0; c < chunk; c++) { + int sysrq; + + uport->icount.rx++; + if (port->brk && buf[c] == 0) { + port->brk = false; + if (uart_handle_break(uport)) + continue; + } + + sysrq = uart_prepare_sysrq_char(uport, buf[c]); + + if (!sysrq) + tty_insert_flip_char(tport, buf[c], TTY_NORMAL); + } + } + if (!drop) + tty_flip_buffer_push(tport); + return 0; +} +#else +static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop) +{ + return -EPERM; +} + +#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */ + +static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop) +{ + struct tty_port *tport; + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + u32 num_bytes_pw = port->tx_fifo_width / BITS_PER_BYTE; + u32 words = ALIGN(bytes, num_bytes_pw) / num_bytes_pw; + int ret; + + tport = &uport->state->port; + ioread32_rep(uport->membase + SE_GENI_RX_FIFOn, port->rx_fifo, words); + if (drop) + return 0; + + ret = tty_insert_flip_string(tport, port->rx_fifo, bytes); + if (ret != bytes) { + dev_err(uport->dev, "%s:Unable to push data ret %d_bytes %d\n", + __func__, ret, bytes); + WARN_ON_ONCE(1); + } + uport->icount.rx += ret; + tty_flip_buffer_push(tport); + return ret; +} + +static void qcom_geni_serial_start_tx(struct uart_port *uport) +{ + u32 irq_en; + u32 status; + + status = readl(uport->membase + SE_GENI_STATUS); + if (status & M_GENI_CMD_ACTIVE) + return; + + if (!qcom_geni_serial_tx_empty(uport)) + return; + + irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); + irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN; + + writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG); + writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN); +} + +static void qcom_geni_serial_stop_tx(struct uart_port *uport) +{ + u32 irq_en; + u32 status; + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + + irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); + irq_en &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN); + writel(0, uport->membase + SE_GENI_TX_WATERMARK_REG); + writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN); + status = readl(uport->membase + SE_GENI_STATUS); + /* Possible stop tx is called multiple times. */ + if (!(status & M_GENI_CMD_ACTIVE)) + return; + + geni_se_cancel_m_cmd(&port->se); + if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_CMD_CANCEL_EN, true)) { + geni_se_abort_m_cmd(&port->se); + qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_CMD_ABORT_EN, true); + writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); + } + writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); +} + +static void qcom_geni_serial_start_rx(struct uart_port *uport) +{ + u32 irq_en; + u32 status; + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + + status = readl(uport->membase + SE_GENI_STATUS); + if (status & S_GENI_CMD_ACTIVE) + qcom_geni_serial_stop_rx(uport); + + geni_se_setup_s_cmd(&port->se, UART_START_READ, 0); + + irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN); + irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN; + writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN); + + irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); + irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN; + writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN); +} + +static void qcom_geni_serial_stop_rx(struct uart_port *uport) +{ + u32 irq_en; + u32 status; + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + u32 s_irq_status; + + irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN); + irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN); + writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN); + + irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); + irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN); + writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN); + + status = readl(uport->membase + SE_GENI_STATUS); + /* Possible stop rx is called multiple times. */ + if (!(status & S_GENI_CMD_ACTIVE)) + return; + + geni_se_cancel_s_cmd(&port->se); + qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS, + S_CMD_CANCEL_EN, true); + /* + * If timeout occurs secondary engine remains active + * and Abort sequence is executed. + */ + s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS); + /* Flush the Rx buffer */ + if (s_irq_status & S_RX_FIFO_LAST_EN) + qcom_geni_serial_handle_rx(uport, true); + writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR); + + status = readl(uport->membase + SE_GENI_STATUS); + if (status & S_GENI_CMD_ACTIVE) + qcom_geni_serial_abort_rx(uport); +} + +static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop) +{ + u32 status; + u32 word_cnt; + u32 last_word_byte_cnt; + u32 last_word_partial; + u32 total_bytes; + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + + status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS); + word_cnt = status & RX_FIFO_WC_MSK; + last_word_partial = status & RX_LAST; + last_word_byte_cnt = (status & RX_LAST_BYTE_VALID_MSK) >> + RX_LAST_BYTE_VALID_SHFT; + + if (!word_cnt) + return; + total_bytes = BYTES_PER_FIFO_WORD * (word_cnt - 1); + if (last_word_partial && last_word_byte_cnt) + total_bytes += last_word_byte_cnt; + else + total_bytes += BYTES_PER_FIFO_WORD; + port->handle_rx(uport, total_bytes, drop); +} + +static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done, + bool active) +{ + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + struct circ_buf *xmit = &uport->state->xmit; + size_t avail; + size_t remaining; + size_t pending; + int i; + u32 status; + u32 irq_en; + unsigned int chunk; + int tail; + + status = readl(uport->membase + SE_GENI_TX_FIFO_STATUS); + + /* Complete the current tx command before taking newly added data */ + if (active) + pending = port->tx_remaining; + else + pending = uart_circ_chars_pending(xmit); + + /* All data has been transmitted and acknowledged as received */ + if (!pending && !status && done) { + qcom_geni_serial_stop_tx(uport); + goto out_write_wakeup; + } + + avail = port->tx_fifo_depth - (status & TX_FIFO_WC); + avail *= BYTES_PER_FIFO_WORD; + + tail = xmit->tail; + chunk = min(avail, pending); + if (!chunk) + goto out_write_wakeup; + + if (!port->tx_remaining) { + qcom_geni_serial_setup_tx(uport, pending); + port->tx_remaining = pending; + + irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); + if (!(irq_en & M_TX_FIFO_WATERMARK_EN)) + writel(irq_en | M_TX_FIFO_WATERMARK_EN, + uport->membase + SE_GENI_M_IRQ_EN); + } + + remaining = chunk; + for (i = 0; i < chunk; ) { + unsigned int tx_bytes; + u8 buf[sizeof(u32)]; + int c; + + memset(buf, 0, sizeof(buf)); + tx_bytes = min_t(size_t, remaining, BYTES_PER_FIFO_WORD); + + for (c = 0; c < tx_bytes ; c++) { + buf[c] = xmit->buf[tail++]; + tail &= UART_XMIT_SIZE - 1; + } + + iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1); + + i += tx_bytes; + uport->icount.tx += tx_bytes; + remaining -= tx_bytes; + port->tx_remaining -= tx_bytes; + } + + xmit->tail = tail; + + /* + * The tx fifo watermark is level triggered and latched. Though we had + * cleared it in qcom_geni_serial_isr it will have already reasserted + * so we must clear it again here after our writes. + */ + writel(M_TX_FIFO_WATERMARK_EN, + uport->membase + SE_GENI_M_IRQ_CLEAR); + +out_write_wakeup: + if (!port->tx_remaining) { + irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); + if (irq_en & M_TX_FIFO_WATERMARK_EN) + writel(irq_en & ~M_TX_FIFO_WATERMARK_EN, + uport->membase + SE_GENI_M_IRQ_EN); + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(uport); +} + +static irqreturn_t qcom_geni_serial_isr(int isr, void *dev) +{ + u32 m_irq_en; + u32 m_irq_status; + u32 s_irq_status; + u32 geni_status; + struct uart_port *uport = dev; + bool drop_rx = false; + struct tty_port *tport = &uport->state->port; + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + + if (uport->suspended) + return IRQ_NONE; + + spin_lock(&uport->lock); + + m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS); + s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS); + geni_status = readl(uport->membase + SE_GENI_STATUS); + m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); + writel(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR); + writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR); + + if (WARN_ON(m_irq_status & M_ILLEGAL_CMD_EN)) + goto out_unlock; + + if (s_irq_status & S_RX_FIFO_WR_ERR_EN) { + uport->icount.overrun++; + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + } + + if (m_irq_status & m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN)) + qcom_geni_serial_handle_tx(uport, m_irq_status & M_CMD_DONE_EN, + geni_status & M_GENI_CMD_ACTIVE); + + if (s_irq_status & S_GP_IRQ_0_EN || s_irq_status & S_GP_IRQ_1_EN) { + if (s_irq_status & S_GP_IRQ_0_EN) + uport->icount.parity++; + drop_rx = true; + } else if (s_irq_status & S_GP_IRQ_2_EN || + s_irq_status & S_GP_IRQ_3_EN) { + uport->icount.brk++; + port->brk = true; + } + + if (s_irq_status & S_RX_FIFO_WATERMARK_EN || + s_irq_status & S_RX_FIFO_LAST_EN) + qcom_geni_serial_handle_rx(uport, drop_rx); + +out_unlock: + uart_unlock_and_check_sysrq(uport); + + return IRQ_HANDLED; +} + +static int setup_fifos(struct qcom_geni_serial_port *port) +{ + struct uart_port *uport; + u32 old_rx_fifo_depth = port->rx_fifo_depth; + + uport = &port->uport; + port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se); + port->tx_fifo_width = geni_se_get_tx_fifo_width(&port->se); + port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se); + uport->fifosize = + (port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE; + + if (port->rx_fifo && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) { + port->rx_fifo = devm_krealloc(uport->dev, port->rx_fifo, + port->rx_fifo_depth * sizeof(u32), + GFP_KERNEL); + if (!port->rx_fifo) + return -ENOMEM; + } + + return 0; +} + + +static void qcom_geni_serial_shutdown(struct uart_port *uport) +{ + disable_irq(uport->irq); +} + +static int qcom_geni_serial_port_setup(struct uart_port *uport) +{ + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT; + u32 proto; + u32 pin_swap; + int ret; + + proto = geni_se_read_proto(&port->se); + if (proto != GENI_SE_UART) { + dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto); + return -ENXIO; + } + + qcom_geni_serial_stop_rx(uport); + + ret = setup_fifos(port); + if (ret) + return ret; + + writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT); + + pin_swap = readl(uport->membase + SE_UART_IO_MACRO_CTRL); + if (port->rx_tx_swap) { + pin_swap &= ~DEFAULT_IO_MACRO_IO2_IO3_MASK; + pin_swap |= IO_MACRO_IO2_IO3_SWAP; + } + if (port->cts_rts_swap) { + pin_swap &= ~DEFAULT_IO_MACRO_IO0_IO1_MASK; + pin_swap |= IO_MACRO_IO0_SEL; + } + /* Configure this register if RX-TX, CTS-RTS pins are swapped */ + if (port->rx_tx_swap || port->cts_rts_swap) + writel(pin_swap, uport->membase + SE_UART_IO_MACRO_CTRL); + + /* + * Make an unconditional cancel on the main sequencer to reset + * it else we could end up in data loss scenarios. + */ + if (uart_console(uport)) + qcom_geni_serial_poll_tx_done(uport); + geni_se_config_packing(&port->se, BITS_PER_BYTE, BYTES_PER_FIFO_WORD, + false, true, true); + geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2); + geni_se_select_mode(&port->se, GENI_SE_FIFO); + port->setup = true; + + return 0; +} + +static int qcom_geni_serial_startup(struct uart_port *uport) +{ + int ret; + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + + if (!port->setup) { + ret = qcom_geni_serial_port_setup(uport); + if (ret) + return ret; + } + enable_irq(uport->irq); + + return 0; +} + +static unsigned long find_clk_rate_in_tol(struct clk *clk, unsigned int desired_clk, + unsigned int *clk_div, unsigned int percent_tol) +{ + unsigned long freq; + unsigned long div, maxdiv; + u64 mult; + unsigned long offset, abs_tol, achieved; + + abs_tol = div_u64((u64)desired_clk * percent_tol, 100); + maxdiv = CLK_DIV_MSK >> CLK_DIV_SHFT; + div = 1; + while (div <= maxdiv) { + mult = (u64)div * desired_clk; + if (mult != (unsigned long)mult) + break; + + offset = div * abs_tol; + freq = clk_round_rate(clk, mult - offset); + + /* Can only get lower if we're done */ + if (freq < mult - offset) + break; + + /* + * Re-calculate div in case rounding skipped rates but we + * ended up at a good one, then check for a match. + */ + div = DIV_ROUND_CLOSEST(freq, desired_clk); + achieved = DIV_ROUND_CLOSEST(freq, div); + if (achieved <= desired_clk + abs_tol && + achieved >= desired_clk - abs_tol) { + *clk_div = div; + return freq; + } + + div = DIV_ROUND_UP(freq, desired_clk); + } + + return 0; +} + +static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud, + unsigned int sampling_rate, unsigned int *clk_div) +{ + unsigned long ser_clk; + unsigned long desired_clk; + + desired_clk = baud * sampling_rate; + if (!desired_clk) + return 0; + + /* + * try to find a clock rate within 2% tolerance, then within 5% + */ + ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 2); + if (!ser_clk) + ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 5); + + return ser_clk; +} + +static void qcom_geni_serial_set_termios(struct uart_port *uport, + struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud; + u32 bits_per_char; + u32 tx_trans_cfg; + u32 tx_parity_cfg; + u32 rx_trans_cfg; + u32 rx_parity_cfg; + u32 stop_bit_len; + unsigned int clk_div; + u32 ser_clk_cfg; + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + unsigned long clk_rate; + u32 ver, sampling_rate; + unsigned int avg_bw_core; + + qcom_geni_serial_stop_rx(uport); + /* baud rate */ + baud = uart_get_baud_rate(uport, termios, old, 300, 4000000); + port->baud = baud; + + sampling_rate = UART_OVERSAMPLING; + /* Sampling rate is halved for IP versions >= 2.5 */ + ver = geni_se_get_qup_hw_version(&port->se); + if (ver >= QUP_SE_VERSION_2_5) + sampling_rate /= 2; + + clk_rate = get_clk_div_rate(port->se.clk, baud, + sampling_rate, &clk_div); + if (!clk_rate) { + dev_err(port->se.dev, + "Couldn't find suitable clock rate for %u\n", + baud * sampling_rate); + goto out_restart_rx; + } + + dev_dbg(port->se.dev, "desired_rate-%u, clk_rate-%lu, clk_div-%u\n", + baud * sampling_rate, clk_rate, clk_div); + + uport->uartclk = clk_rate; + port->clk_rate = clk_rate; + dev_pm_opp_set_rate(uport->dev, clk_rate); + ser_clk_cfg = SER_CLK_EN; + ser_clk_cfg |= clk_div << CLK_DIV_SHFT; + + /* + * Bump up BW vote on CPU and CORE path as driver supports FIFO mode + * only. + */ + avg_bw_core = (baud > 115200) ? Bps_to_icc(CORE_2X_50_MHZ) + : GENI_DEFAULT_BW; + port->se.icc_paths[GENI_TO_CORE].avg_bw = avg_bw_core; + port->se.icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(baud); + geni_icc_set_bw(&port->se); + + /* parity */ + tx_trans_cfg = readl(uport->membase + SE_UART_TX_TRANS_CFG); + tx_parity_cfg = readl(uport->membase + SE_UART_TX_PARITY_CFG); + rx_trans_cfg = readl(uport->membase + SE_UART_RX_TRANS_CFG); + rx_parity_cfg = readl(uport->membase + SE_UART_RX_PARITY_CFG); + if (termios->c_cflag & PARENB) { + tx_trans_cfg |= UART_TX_PAR_EN; + rx_trans_cfg |= UART_RX_PAR_EN; + tx_parity_cfg |= PAR_CALC_EN; + rx_parity_cfg |= PAR_CALC_EN; + if (termios->c_cflag & PARODD) { + tx_parity_cfg |= PAR_ODD; + rx_parity_cfg |= PAR_ODD; + } else if (termios->c_cflag & CMSPAR) { + tx_parity_cfg |= PAR_SPACE; + rx_parity_cfg |= PAR_SPACE; + } else { + tx_parity_cfg |= PAR_EVEN; + rx_parity_cfg |= PAR_EVEN; + } + } else { + tx_trans_cfg &= ~UART_TX_PAR_EN; + rx_trans_cfg &= ~UART_RX_PAR_EN; + tx_parity_cfg &= ~PAR_CALC_EN; + rx_parity_cfg &= ~PAR_CALC_EN; + } + + /* bits per char */ + bits_per_char = tty_get_char_size(termios->c_cflag); + + /* stop bits */ + if (termios->c_cflag & CSTOPB) + stop_bit_len = TX_STOP_BIT_LEN_2; + else + stop_bit_len = TX_STOP_BIT_LEN_1; + + /* flow control, clear the CTS_MASK bit if using flow control. */ + if (termios->c_cflag & CRTSCTS) + tx_trans_cfg &= ~UART_CTS_MASK; + else + tx_trans_cfg |= UART_CTS_MASK; + + if (baud) + uart_update_timeout(uport, termios->c_cflag, baud); + + if (!uart_console(uport)) + writel(port->loopback, + uport->membase + SE_UART_LOOPBACK_CFG); + writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG); + writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG); + writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG); + writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG); + writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN); + writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN); + writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN); + writel(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG); + writel(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG); +out_restart_rx: + qcom_geni_serial_start_rx(uport); +} + +static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport) +{ + return !readl(uport->membase + SE_GENI_TX_FIFO_STATUS); +} + +#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE +static int qcom_geni_console_setup(struct console *co, char *options) +{ + struct uart_port *uport; + struct qcom_geni_serial_port *port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + if (co->index >= GENI_UART_CONS_PORTS || co->index < 0) + return -ENXIO; + + port = get_port_from_line(co->index, true); + if (IS_ERR(port)) { + pr_err("Invalid line %d\n", co->index); + return PTR_ERR(port); + } + + uport = &port->uport; + + if (unlikely(!uport->membase)) + return -ENXIO; + + if (!port->setup) { + ret = qcom_geni_serial_port_setup(uport); + if (ret) + return ret; + } + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(uport, co, baud, parity, bits, flow); +} + +static void qcom_geni_serial_earlycon_write(struct console *con, + const char *s, unsigned int n) +{ + struct earlycon_device *dev = con->data; + + __qcom_geni_serial_console_write(&dev->port, s, n); +} + +#ifdef CONFIG_CONSOLE_POLL +static int qcom_geni_serial_earlycon_read(struct console *con, + char *s, unsigned int n) +{ + struct earlycon_device *dev = con->data; + struct uart_port *uport = &dev->port; + int num_read = 0; + int ch; + + while (num_read < n) { + ch = qcom_geni_serial_get_char(uport); + if (ch == NO_POLL_CHAR) + break; + s[num_read++] = ch; + } + + return num_read; +} + +static void __init qcom_geni_serial_enable_early_read(struct geni_se *se, + struct console *con) +{ + geni_se_setup_s_cmd(se, UART_START_READ, 0); + con->read = qcom_geni_serial_earlycon_read; +} +#else +static inline void qcom_geni_serial_enable_early_read(struct geni_se *se, + struct console *con) { } +#endif + +static struct qcom_geni_private_data earlycon_private_data; + +static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev, + const char *opt) +{ + struct uart_port *uport = &dev->port; + u32 tx_trans_cfg; + u32 tx_parity_cfg = 0; /* Disable Tx Parity */ + u32 rx_trans_cfg = 0; + u32 rx_parity_cfg = 0; /* Disable Rx Parity */ + u32 stop_bit_len = 0; /* Default stop bit length - 1 bit */ + u32 bits_per_char; + struct geni_se se; + + if (!uport->membase) + return -EINVAL; + + uport->private_data = &earlycon_private_data; + + memset(&se, 0, sizeof(se)); + se.base = uport->membase; + if (geni_se_read_proto(&se) != GENI_SE_UART) + return -ENXIO; + /* + * Ignore Flow control. + * n = 8. + */ + tx_trans_cfg = UART_CTS_MASK; + bits_per_char = BITS_PER_BYTE; + + /* + * Make an unconditional cancel on the main sequencer to reset + * it else we could end up in data loss scenarios. + */ + qcom_geni_serial_poll_tx_done(uport); + qcom_geni_serial_abort_rx(uport); + geni_se_config_packing(&se, BITS_PER_BYTE, BYTES_PER_FIFO_WORD, + false, true, true); + geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2); + geni_se_select_mode(&se, GENI_SE_FIFO); + + writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG); + writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG); + writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG); + writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG); + writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN); + writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN); + writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN); + + dev->con->write = qcom_geni_serial_earlycon_write; + dev->con->setup = NULL; + qcom_geni_serial_enable_early_read(&se, dev->con); + + return 0; +} +OF_EARLYCON_DECLARE(qcom_geni, "qcom,geni-debug-uart", + qcom_geni_serial_earlycon_setup); + +static int __init console_register(struct uart_driver *drv) +{ + return uart_register_driver(drv); +} + +static void console_unregister(struct uart_driver *drv) +{ + uart_unregister_driver(drv); +} + +static struct console cons_ops = { + .name = "ttyMSM", + .write = qcom_geni_serial_console_write, + .device = uart_console_device, + .setup = qcom_geni_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &qcom_geni_console_driver, +}; + +static struct uart_driver qcom_geni_console_driver = { + .owner = THIS_MODULE, + .driver_name = "qcom_geni_console", + .dev_name = "ttyMSM", + .nr = GENI_UART_CONS_PORTS, + .cons = &cons_ops, +}; +#else +static int console_register(struct uart_driver *drv) +{ + return 0; +} + +static void console_unregister(struct uart_driver *drv) +{ +} +#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */ + +static struct uart_driver qcom_geni_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "qcom_geni_uart", + .dev_name = "ttyHS", + .nr = GENI_UART_PORTS, +}; + +static void qcom_geni_serial_pm(struct uart_port *uport, + unsigned int new_state, unsigned int old_state) +{ + struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + + /* If we've never been called, treat it as off */ + if (old_state == UART_PM_STATE_UNDEFINED) + old_state = UART_PM_STATE_OFF; + + if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF) { + geni_icc_enable(&port->se); + if (port->clk_rate) + dev_pm_opp_set_rate(uport->dev, port->clk_rate); + geni_se_resources_on(&port->se); + } else if (new_state == UART_PM_STATE_OFF && + old_state == UART_PM_STATE_ON) { + geni_se_resources_off(&port->se); + dev_pm_opp_set_rate(uport->dev, 0); + geni_icc_disable(&port->se); + } +} + +static const struct uart_ops qcom_geni_console_pops = { + .tx_empty = qcom_geni_serial_tx_empty, + .stop_tx = qcom_geni_serial_stop_tx, + .start_tx = qcom_geni_serial_start_tx, + .stop_rx = qcom_geni_serial_stop_rx, + .start_rx = qcom_geni_serial_start_rx, + .set_termios = qcom_geni_serial_set_termios, + .startup = qcom_geni_serial_startup, + .request_port = qcom_geni_serial_request_port, + .config_port = qcom_geni_serial_config_port, + .shutdown = qcom_geni_serial_shutdown, + .type = qcom_geni_serial_get_type, + .set_mctrl = qcom_geni_serial_set_mctrl, + .get_mctrl = qcom_geni_serial_get_mctrl, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = qcom_geni_serial_get_char, + .poll_put_char = qcom_geni_serial_poll_put_char, +#endif + .pm = qcom_geni_serial_pm, +}; + +static const struct uart_ops qcom_geni_uart_pops = { + .tx_empty = qcom_geni_serial_tx_empty, + .stop_tx = qcom_geni_serial_stop_tx, + .start_tx = qcom_geni_serial_start_tx, + .stop_rx = qcom_geni_serial_stop_rx, + .set_termios = qcom_geni_serial_set_termios, + .startup = qcom_geni_serial_startup, + .request_port = qcom_geni_serial_request_port, + .config_port = qcom_geni_serial_config_port, + .shutdown = qcom_geni_serial_shutdown, + .type = qcom_geni_serial_get_type, + .set_mctrl = qcom_geni_serial_set_mctrl, + .get_mctrl = qcom_geni_serial_get_mctrl, + .pm = qcom_geni_serial_pm, +}; + +static int qcom_geni_serial_probe(struct platform_device *pdev) +{ + int ret = 0; + int line; + struct qcom_geni_serial_port *port; + struct uart_port *uport; + struct resource *res; + int irq; + bool console = false; + struct uart_driver *drv; + + if (of_device_is_compatible(pdev->dev.of_node, "qcom,geni-debug-uart")) + console = true; + + if (console) { + drv = &qcom_geni_console_driver; + line = of_alias_get_id(pdev->dev.of_node, "serial"); + } else { + drv = &qcom_geni_uart_driver; + line = of_alias_get_id(pdev->dev.of_node, "serial"); + if (line == -ENODEV) /* compat with non-standard aliases */ + line = of_alias_get_id(pdev->dev.of_node, "hsuart"); + } + + port = get_port_from_line(line, console); + if (IS_ERR(port)) { + dev_err(&pdev->dev, "Invalid line %d\n", line); + return PTR_ERR(port); + } + + uport = &port->uport; + /* Don't allow 2 drivers to access the same port */ + if (uport->private_data) + return -ENODEV; + + uport->dev = &pdev->dev; + port->se.dev = &pdev->dev; + port->se.wrapper = dev_get_drvdata(pdev->dev.parent); + port->se.clk = devm_clk_get(&pdev->dev, "se"); + if (IS_ERR(port->se.clk)) { + ret = PTR_ERR(port->se.clk); + dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret); + return ret; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + uport->mapbase = res->start; + + port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS; + port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS; + port->tx_fifo_width = DEF_FIFO_WIDTH_BITS; + + if (!console) { + port->rx_fifo = devm_kcalloc(uport->dev, + port->rx_fifo_depth, sizeof(u32), GFP_KERNEL); + if (!port->rx_fifo) + return -ENOMEM; + } + + ret = geni_icc_get(&port->se, NULL); + if (ret) + return ret; + port->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW; + port->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW; + + /* Set BW for register access */ + ret = geni_icc_set_bw(&port->se); + if (ret) + return ret; + + port->name = devm_kasprintf(uport->dev, GFP_KERNEL, + "qcom_geni_serial_%s%d", + uart_console(uport) ? "console" : "uart", uport->line); + if (!port->name) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + uport->irq = irq; + uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_QCOM_GENI_CONSOLE); + + if (!console) + port->wakeup_irq = platform_get_irq_optional(pdev, 1); + + if (of_property_read_bool(pdev->dev.of_node, "rx-tx-swap")) + port->rx_tx_swap = true; + + if (of_property_read_bool(pdev->dev.of_node, "cts-rts-swap")) + port->cts_rts_swap = true; + + ret = devm_pm_opp_set_clkname(&pdev->dev, "se"); + if (ret) + return ret; + /* OPP table is optional */ + ret = devm_pm_opp_of_add_table(&pdev->dev); + if (ret && ret != -ENODEV) { + dev_err(&pdev->dev, "invalid OPP table in device tree\n"); + return ret; + } + + port->private_data.drv = drv; + uport->private_data = &port->private_data; + platform_set_drvdata(pdev, port); + port->handle_rx = console ? handle_rx_console : handle_rx_uart; + + irq_set_status_flags(uport->irq, IRQ_NOAUTOEN); + ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr, + IRQF_TRIGGER_HIGH, port->name, uport); + if (ret) { + dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret); + return ret; + } + + ret = uart_add_one_port(drv, uport); + if (ret) + return ret; + + if (port->wakeup_irq > 0) { + device_init_wakeup(&pdev->dev, true); + ret = dev_pm_set_dedicated_wake_irq(&pdev->dev, + port->wakeup_irq); + if (ret) { + device_init_wakeup(&pdev->dev, false); + uart_remove_one_port(drv, uport); + return ret; + } + } + + return 0; +} + +static int qcom_geni_serial_remove(struct platform_device *pdev) +{ + struct qcom_geni_serial_port *port = platform_get_drvdata(pdev); + struct uart_driver *drv = port->private_data.drv; + + dev_pm_clear_wake_irq(&pdev->dev); + device_init_wakeup(&pdev->dev, false); + uart_remove_one_port(drv, &port->uport); + + return 0; +} + +static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev) +{ + struct qcom_geni_serial_port *port = dev_get_drvdata(dev); + struct uart_port *uport = &port->uport; + struct qcom_geni_private_data *private_data = uport->private_data; + + /* + * This is done so we can hit the lowest possible state in suspend + * even with no_console_suspend + */ + if (uart_console(uport)) { + geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ACTIVE_ONLY); + geni_icc_set_bw(&port->se); + } + return uart_suspend_port(private_data->drv, uport); +} + +static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev) +{ + int ret; + struct qcom_geni_serial_port *port = dev_get_drvdata(dev); + struct uart_port *uport = &port->uport; + struct qcom_geni_private_data *private_data = uport->private_data; + + ret = uart_resume_port(private_data->drv, uport); + if (uart_console(uport)) { + geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ALWAYS); + geni_icc_set_bw(&port->se); + } + return ret; +} + +static const struct dev_pm_ops qcom_geni_serial_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend, + qcom_geni_serial_sys_resume) +}; + +static const struct of_device_id qcom_geni_serial_match_table[] = { + { .compatible = "qcom,geni-debug-uart", }, + { .compatible = "qcom,geni-uart", }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_geni_serial_match_table); + +static struct platform_driver qcom_geni_serial_platform_driver = { + .remove = qcom_geni_serial_remove, + .probe = qcom_geni_serial_probe, + .driver = { + .name = "qcom_geni_serial", + .of_match_table = qcom_geni_serial_match_table, + .pm = &qcom_geni_serial_pm_ops, + }, +}; + +static int __init qcom_geni_serial_init(void) +{ + int ret; + + ret = console_register(&qcom_geni_console_driver); + if (ret) + return ret; + + ret = uart_register_driver(&qcom_geni_uart_driver); + if (ret) { + console_unregister(&qcom_geni_console_driver); + return ret; + } + + ret = platform_driver_register(&qcom_geni_serial_platform_driver); + if (ret) { + console_unregister(&qcom_geni_console_driver); + uart_unregister_driver(&qcom_geni_uart_driver); + } + return ret; +} +module_init(qcom_geni_serial_init); + +static void __exit qcom_geni_serial_exit(void) +{ + platform_driver_unregister(&qcom_geni_serial_platform_driver); + console_unregister(&qcom_geni_console_driver); + uart_unregister_driver(&qcom_geni_uart_driver); +} +module_exit(qcom_geni_serial_exit); + +MODULE_DESCRIPTION("Serial driver for GENI based QUP cores"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c new file mode 100644 index 000000000..0e387e214 --- /dev/null +++ b/drivers/tty/serial/rda-uart.c @@ -0,0 +1,829 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * RDA8810PL serial device driver + * + * Copyright RDA Microelectronics Company Limited + * Copyright (c) 2017 Andreas Färber + * Copyright (c) 2018 Manivannan Sadhasivam + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RDA_UART_PORT_NUM 3 +#define RDA_UART_DEV_NAME "ttyRDA" + +#define RDA_UART_CTRL 0x00 +#define RDA_UART_STATUS 0x04 +#define RDA_UART_RXTX_BUFFER 0x08 +#define RDA_UART_IRQ_MASK 0x0c +#define RDA_UART_IRQ_CAUSE 0x10 +#define RDA_UART_IRQ_TRIGGERS 0x14 +#define RDA_UART_CMD_SET 0x18 +#define RDA_UART_CMD_CLR 0x1c + +/* UART_CTRL Bits */ +#define RDA_UART_ENABLE BIT(0) +#define RDA_UART_DBITS_8 BIT(1) +#define RDA_UART_TX_SBITS_2 BIT(2) +#define RDA_UART_PARITY_EN BIT(3) +#define RDA_UART_PARITY(x) (((x) & 0x3) << 4) +#define RDA_UART_PARITY_ODD RDA_UART_PARITY(0) +#define RDA_UART_PARITY_EVEN RDA_UART_PARITY(1) +#define RDA_UART_PARITY_SPACE RDA_UART_PARITY(2) +#define RDA_UART_PARITY_MARK RDA_UART_PARITY(3) +#define RDA_UART_DIV_MODE BIT(20) +#define RDA_UART_IRDA_EN BIT(21) +#define RDA_UART_DMA_EN BIT(22) +#define RDA_UART_FLOW_CNT_EN BIT(23) +#define RDA_UART_LOOP_BACK_EN BIT(24) +#define RDA_UART_RX_LOCK_ERR BIT(25) +#define RDA_UART_RX_BREAK_LEN(x) (((x) & 0xf) << 28) + +/* UART_STATUS Bits */ +#define RDA_UART_RX_FIFO(x) (((x) & 0x7f) << 0) +#define RDA_UART_RX_FIFO_MASK (0x7f << 0) +#define RDA_UART_TX_FIFO(x) (((x) & 0x1f) << 8) +#define RDA_UART_TX_FIFO_MASK (0x1f << 8) +#define RDA_UART_TX_ACTIVE BIT(14) +#define RDA_UART_RX_ACTIVE BIT(15) +#define RDA_UART_RX_OVERFLOW_ERR BIT(16) +#define RDA_UART_TX_OVERFLOW_ERR BIT(17) +#define RDA_UART_RX_PARITY_ERR BIT(18) +#define RDA_UART_RX_FRAMING_ERR BIT(19) +#define RDA_UART_RX_BREAK_INT BIT(20) +#define RDA_UART_DCTS BIT(24) +#define RDA_UART_CTS BIT(25) +#define RDA_UART_DTR BIT(28) +#define RDA_UART_CLK_ENABLED BIT(31) + +/* UART_RXTX_BUFFER Bits */ +#define RDA_UART_RX_DATA(x) (((x) & 0xff) << 0) +#define RDA_UART_TX_DATA(x) (((x) & 0xff) << 0) + +/* UART_IRQ_MASK Bits */ +#define RDA_UART_TX_MODEM_STATUS BIT(0) +#define RDA_UART_RX_DATA_AVAILABLE BIT(1) +#define RDA_UART_TX_DATA_NEEDED BIT(2) +#define RDA_UART_RX_TIMEOUT BIT(3) +#define RDA_UART_RX_LINE_ERR BIT(4) +#define RDA_UART_TX_DMA_DONE BIT(5) +#define RDA_UART_RX_DMA_DONE BIT(6) +#define RDA_UART_RX_DMA_TIMEOUT BIT(7) +#define RDA_UART_DTR_RISE BIT(8) +#define RDA_UART_DTR_FALL BIT(9) + +/* UART_IRQ_CAUSE Bits */ +#define RDA_UART_TX_MODEM_STATUS_U BIT(16) +#define RDA_UART_RX_DATA_AVAILABLE_U BIT(17) +#define RDA_UART_TX_DATA_NEEDED_U BIT(18) +#define RDA_UART_RX_TIMEOUT_U BIT(19) +#define RDA_UART_RX_LINE_ERR_U BIT(20) +#define RDA_UART_TX_DMA_DONE_U BIT(21) +#define RDA_UART_RX_DMA_DONE_U BIT(22) +#define RDA_UART_RX_DMA_TIMEOUT_U BIT(23) +#define RDA_UART_DTR_RISE_U BIT(24) +#define RDA_UART_DTR_FALL_U BIT(25) + +/* UART_TRIGGERS Bits */ +#define RDA_UART_RX_TRIGGER(x) (((x) & 0x1f) << 0) +#define RDA_UART_TX_TRIGGER(x) (((x) & 0xf) << 8) +#define RDA_UART_AFC_LEVEL(x) (((x) & 0x1f) << 16) + +/* UART_CMD_SET Bits */ +#define RDA_UART_RI BIT(0) +#define RDA_UART_DCD BIT(1) +#define RDA_UART_DSR BIT(2) +#define RDA_UART_TX_BREAK_CONTROL BIT(3) +#define RDA_UART_TX_FINISH_N_WAIT BIT(4) +#define RDA_UART_RTS BIT(5) +#define RDA_UART_RX_FIFO_RESET BIT(6) +#define RDA_UART_TX_FIFO_RESET BIT(7) + +#define RDA_UART_TX_FIFO_SIZE 16 + +static struct uart_driver rda_uart_driver; + +struct rda_uart_port { + struct uart_port port; + struct clk *clk; +}; + +#define to_rda_uart_port(port) container_of(port, struct rda_uart_port, port) + +static struct rda_uart_port *rda_uart_ports[RDA_UART_PORT_NUM]; + +static inline void rda_uart_write(struct uart_port *port, u32 val, + unsigned int off) +{ + writel(val, port->membase + off); +} + +static inline u32 rda_uart_read(struct uart_port *port, unsigned int off) +{ + return readl(port->membase + off); +} + +static unsigned int rda_uart_tx_empty(struct uart_port *port) +{ + unsigned long flags; + unsigned int ret; + u32 val; + + spin_lock_irqsave(&port->lock, flags); + + val = rda_uart_read(port, RDA_UART_STATUS); + ret = (val & RDA_UART_TX_FIFO_MASK) ? TIOCSER_TEMT : 0; + + spin_unlock_irqrestore(&port->lock, flags); + + return ret; +} + +static unsigned int rda_uart_get_mctrl(struct uart_port *port) +{ + unsigned int mctrl = 0; + u32 cmd_set, status; + + cmd_set = rda_uart_read(port, RDA_UART_CMD_SET); + status = rda_uart_read(port, RDA_UART_STATUS); + if (cmd_set & RDA_UART_RTS) + mctrl |= TIOCM_RTS; + if (!(status & RDA_UART_CTS)) + mctrl |= TIOCM_CTS; + + return mctrl; +} + +static void rda_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + u32 val; + + if (mctrl & TIOCM_RTS) { + val = rda_uart_read(port, RDA_UART_CMD_SET); + rda_uart_write(port, (val | RDA_UART_RTS), RDA_UART_CMD_SET); + } else { + /* Clear RTS to stop to receive. */ + val = rda_uart_read(port, RDA_UART_CMD_CLR); + rda_uart_write(port, (val | RDA_UART_RTS), RDA_UART_CMD_CLR); + } + + val = rda_uart_read(port, RDA_UART_CTRL); + + if (mctrl & TIOCM_LOOP) + val |= RDA_UART_LOOP_BACK_EN; + else + val &= ~RDA_UART_LOOP_BACK_EN; + + rda_uart_write(port, val, RDA_UART_CTRL); +} + +static void rda_uart_stop_tx(struct uart_port *port) +{ + u32 val; + + val = rda_uart_read(port, RDA_UART_IRQ_MASK); + val &= ~RDA_UART_TX_DATA_NEEDED; + rda_uart_write(port, val, RDA_UART_IRQ_MASK); + + val = rda_uart_read(port, RDA_UART_CMD_SET); + val |= RDA_UART_TX_FIFO_RESET; + rda_uart_write(port, val, RDA_UART_CMD_SET); +} + +static void rda_uart_stop_rx(struct uart_port *port) +{ + u32 val; + + val = rda_uart_read(port, RDA_UART_IRQ_MASK); + val &= ~(RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT); + rda_uart_write(port, val, RDA_UART_IRQ_MASK); + + /* Read Rx buffer before reset to avoid Rx timeout interrupt */ + val = rda_uart_read(port, RDA_UART_RXTX_BUFFER); + + val = rda_uart_read(port, RDA_UART_CMD_SET); + val |= RDA_UART_RX_FIFO_RESET; + rda_uart_write(port, val, RDA_UART_CMD_SET); +} + +static void rda_uart_start_tx(struct uart_port *port) +{ + u32 val; + + if (uart_tx_stopped(port)) { + rda_uart_stop_tx(port); + return; + } + + val = rda_uart_read(port, RDA_UART_IRQ_MASK); + val |= RDA_UART_TX_DATA_NEEDED; + rda_uart_write(port, val, RDA_UART_IRQ_MASK); +} + +static void rda_uart_change_baudrate(struct rda_uart_port *rda_port, + unsigned long baud) +{ + clk_set_rate(rda_port->clk, baud * 8); +} + +static void rda_uart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct rda_uart_port *rda_port = to_rda_uart_port(port); + unsigned long flags; + unsigned int ctrl, cmd_set, cmd_clr, triggers; + unsigned int baud; + u32 irq_mask; + + spin_lock_irqsave(&port->lock, flags); + + baud = uart_get_baud_rate(port, termios, old, 9600, port->uartclk / 4); + rda_uart_change_baudrate(rda_port, baud); + + ctrl = rda_uart_read(port, RDA_UART_CTRL); + cmd_set = rda_uart_read(port, RDA_UART_CMD_SET); + cmd_clr = rda_uart_read(port, RDA_UART_CMD_CLR); + + switch (termios->c_cflag & CSIZE) { + case CS5: + case CS6: + dev_warn(port->dev, "bit size not supported, using 7 bits\n"); + fallthrough; + case CS7: + ctrl &= ~RDA_UART_DBITS_8; + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS7; + break; + default: + ctrl |= RDA_UART_DBITS_8; + break; + } + + /* stop bits */ + if (termios->c_cflag & CSTOPB) + ctrl |= RDA_UART_TX_SBITS_2; + else + ctrl &= ~RDA_UART_TX_SBITS_2; + + /* parity check */ + if (termios->c_cflag & PARENB) { + ctrl |= RDA_UART_PARITY_EN; + + /* Mark or Space parity */ + if (termios->c_cflag & CMSPAR) { + if (termios->c_cflag & PARODD) + ctrl |= RDA_UART_PARITY_MARK; + else + ctrl |= RDA_UART_PARITY_SPACE; + } else if (termios->c_cflag & PARODD) { + ctrl |= RDA_UART_PARITY_ODD; + } else { + ctrl |= RDA_UART_PARITY_EVEN; + } + } else { + ctrl &= ~RDA_UART_PARITY_EN; + } + + /* Hardware handshake (RTS/CTS) */ + if (termios->c_cflag & CRTSCTS) { + ctrl |= RDA_UART_FLOW_CNT_EN; + cmd_set |= RDA_UART_RTS; + } else { + ctrl &= ~RDA_UART_FLOW_CNT_EN; + cmd_clr |= RDA_UART_RTS; + } + + ctrl |= RDA_UART_ENABLE; + ctrl &= ~RDA_UART_DMA_EN; + + triggers = (RDA_UART_AFC_LEVEL(20) | RDA_UART_RX_TRIGGER(16)); + irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK); + rda_uart_write(port, 0, RDA_UART_IRQ_MASK); + + rda_uart_write(port, triggers, RDA_UART_IRQ_TRIGGERS); + rda_uart_write(port, ctrl, RDA_UART_CTRL); + rda_uart_write(port, cmd_set, RDA_UART_CMD_SET); + rda_uart_write(port, cmd_clr, RDA_UART_CMD_CLR); + + rda_uart_write(port, irq_mask, RDA_UART_IRQ_MASK); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); + + /* update the per-port timeout */ + uart_update_timeout(port, termios->c_cflag, baud); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void rda_uart_send_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + unsigned int ch; + u32 val; + + if (uart_tx_stopped(port)) + return; + + if (port->x_char) { + while (!(rda_uart_read(port, RDA_UART_STATUS) & + RDA_UART_TX_FIFO_MASK)) + cpu_relax(); + + rda_uart_write(port, port->x_char, RDA_UART_RXTX_BUFFER); + port->icount.tx++; + port->x_char = 0; + } + + while (rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK) { + if (uart_circ_empty(xmit)) + break; + + ch = xmit->buf[xmit->tail]; + rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (!uart_circ_empty(xmit)) { + /* Re-enable Tx FIFO interrupt */ + val = rda_uart_read(port, RDA_UART_IRQ_MASK); + val |= RDA_UART_TX_DATA_NEEDED; + rda_uart_write(port, val, RDA_UART_IRQ_MASK); + } +} + +static void rda_uart_receive_chars(struct uart_port *port) +{ + u32 status, val; + + status = rda_uart_read(port, RDA_UART_STATUS); + while ((status & RDA_UART_RX_FIFO_MASK)) { + char flag = TTY_NORMAL; + + if (status & RDA_UART_RX_PARITY_ERR) { + port->icount.parity++; + flag = TTY_PARITY; + } + + if (status & RDA_UART_RX_FRAMING_ERR) { + port->icount.frame++; + flag = TTY_FRAME; + } + + if (status & RDA_UART_RX_OVERFLOW_ERR) { + port->icount.overrun++; + flag = TTY_OVERRUN; + } + + val = rda_uart_read(port, RDA_UART_RXTX_BUFFER); + val &= 0xff; + + port->icount.rx++; + tty_insert_flip_char(&port->state->port, val, flag); + + status = rda_uart_read(port, RDA_UART_STATUS); + } + + tty_flip_buffer_push(&port->state->port); +} + +static irqreturn_t rda_interrupt(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + unsigned long flags; + u32 val, irq_mask; + + spin_lock_irqsave(&port->lock, flags); + + /* Clear IRQ cause */ + val = rda_uart_read(port, RDA_UART_IRQ_CAUSE); + rda_uart_write(port, val, RDA_UART_IRQ_CAUSE); + + if (val & (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT)) + rda_uart_receive_chars(port); + + if (val & (RDA_UART_TX_DATA_NEEDED)) { + irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK); + irq_mask &= ~RDA_UART_TX_DATA_NEEDED; + rda_uart_write(port, irq_mask, RDA_UART_IRQ_MASK); + + rda_uart_send_chars(port); + } + + spin_unlock_irqrestore(&port->lock, flags); + + return IRQ_HANDLED; +} + +static int rda_uart_startup(struct uart_port *port) +{ + unsigned long flags; + int ret; + u32 val; + + spin_lock_irqsave(&port->lock, flags); + rda_uart_write(port, 0, RDA_UART_IRQ_MASK); + spin_unlock_irqrestore(&port->lock, flags); + + ret = request_irq(port->irq, rda_interrupt, IRQF_NO_SUSPEND, + "rda-uart", port); + if (ret) + return ret; + + spin_lock_irqsave(&port->lock, flags); + + val = rda_uart_read(port, RDA_UART_CTRL); + val |= RDA_UART_ENABLE; + rda_uart_write(port, val, RDA_UART_CTRL); + + /* enable rx interrupt */ + val = rda_uart_read(port, RDA_UART_IRQ_MASK); + val |= (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT); + rda_uart_write(port, val, RDA_UART_IRQ_MASK); + + spin_unlock_irqrestore(&port->lock, flags); + + return 0; +} + +static void rda_uart_shutdown(struct uart_port *port) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&port->lock, flags); + + rda_uart_stop_tx(port); + rda_uart_stop_rx(port); + + val = rda_uart_read(port, RDA_UART_CTRL); + val &= ~RDA_UART_ENABLE; + rda_uart_write(port, val, RDA_UART_CTRL); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *rda_uart_type(struct uart_port *port) +{ + return (port->type == PORT_RDA) ? "rda-uart" : NULL; +} + +static int rda_uart_request_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + if (!devm_request_mem_region(port->dev, port->mapbase, + resource_size(res), dev_name(port->dev))) + return -EBUSY; + + if (port->flags & UPF_IOREMAP) { + port->membase = devm_ioremap(port->dev, port->mapbase, + resource_size(res)); + if (!port->membase) + return -EBUSY; + } + + return 0; +} + +static void rda_uart_config_port(struct uart_port *port, int flags) +{ + unsigned long irq_flags; + + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_RDA; + rda_uart_request_port(port); + } + + spin_lock_irqsave(&port->lock, irq_flags); + + /* Clear mask, so no surprise interrupts. */ + rda_uart_write(port, 0, RDA_UART_IRQ_MASK); + + /* Clear status register */ + rda_uart_write(port, 0, RDA_UART_STATUS); + + spin_unlock_irqrestore(&port->lock, irq_flags); +} + +static void rda_uart_release_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return; + + if (port->flags & UPF_IOREMAP) { + devm_release_mem_region(port->dev, port->mapbase, + resource_size(res)); + devm_iounmap(port->dev, port->membase); + port->membase = NULL; + } +} + +static int rda_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + if (port->type != PORT_RDA) + return -EINVAL; + + if (port->irq != ser->irq) + return -EINVAL; + + return 0; +} + +static const struct uart_ops rda_uart_ops = { + .tx_empty = rda_uart_tx_empty, + .get_mctrl = rda_uart_get_mctrl, + .set_mctrl = rda_uart_set_mctrl, + .start_tx = rda_uart_start_tx, + .stop_tx = rda_uart_stop_tx, + .stop_rx = rda_uart_stop_rx, + .startup = rda_uart_startup, + .shutdown = rda_uart_shutdown, + .set_termios = rda_uart_set_termios, + .type = rda_uart_type, + .request_port = rda_uart_request_port, + .release_port = rda_uart_release_port, + .config_port = rda_uart_config_port, + .verify_port = rda_uart_verify_port, +}; + +#ifdef CONFIG_SERIAL_RDA_CONSOLE + +static void rda_console_putchar(struct uart_port *port, unsigned char ch) +{ + if (!port->membase) + return; + + while (!(rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK)) + cpu_relax(); + + rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER); +} + +static void rda_uart_port_write(struct uart_port *port, const char *s, + u_int count) +{ + u32 old_irq_mask; + unsigned long flags; + int locked; + + local_irq_save(flags); + + if (port->sysrq) { + locked = 0; + } else if (oops_in_progress) { + locked = spin_trylock(&port->lock); + } else { + spin_lock(&port->lock); + locked = 1; + } + + old_irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK); + rda_uart_write(port, 0, RDA_UART_IRQ_MASK); + + uart_console_write(port, s, count, rda_console_putchar); + + /* wait until all contents have been sent out */ + while (!(rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK)) + cpu_relax(); + + rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK); + + if (locked) + spin_unlock(&port->lock); + + local_irq_restore(flags); +} + +static void rda_uart_console_write(struct console *co, const char *s, + u_int count) +{ + struct rda_uart_port *rda_port; + + rda_port = rda_uart_ports[co->index]; + if (!rda_port) + return; + + rda_uart_port_write(&rda_port->port, s, count); +} + +static int rda_uart_console_setup(struct console *co, char *options) +{ + struct rda_uart_port *rda_port; + int baud = 921600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= RDA_UART_PORT_NUM) + return -EINVAL; + + rda_port = rda_uart_ports[co->index]; + if (!rda_port || !rda_port->port.membase) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&rda_port->port, co, baud, parity, bits, flow); +} + +static struct console rda_uart_console = { + .name = RDA_UART_DEV_NAME, + .write = rda_uart_console_write, + .device = uart_console_device, + .setup = rda_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &rda_uart_driver, +}; + +static int __init rda_uart_console_init(void) +{ + register_console(&rda_uart_console); + + return 0; +} +console_initcall(rda_uart_console_init); + +static void rda_uart_early_console_write(struct console *co, + const char *s, + u_int count) +{ + struct earlycon_device *dev = co->data; + + rda_uart_port_write(&dev->port, s, count); +} + +static int __init +rda_uart_early_console_setup(struct earlycon_device *device, const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = rda_uart_early_console_write; + + return 0; +} + +OF_EARLYCON_DECLARE(rda, "rda,8810pl-uart", + rda_uart_early_console_setup); + +#define RDA_UART_CONSOLE (&rda_uart_console) +#else +#define RDA_UART_CONSOLE NULL +#endif /* CONFIG_SERIAL_RDA_CONSOLE */ + +static struct uart_driver rda_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "rda-uart", + .dev_name = RDA_UART_DEV_NAME, + .nr = RDA_UART_PORT_NUM, + .cons = RDA_UART_CONSOLE, +}; + +static const struct of_device_id rda_uart_dt_matches[] = { + { .compatible = "rda,8810pl-uart" }, + { } +}; +MODULE_DEVICE_TABLE(of, rda_uart_dt_matches); + +static int rda_uart_probe(struct platform_device *pdev) +{ + struct resource *res_mem; + struct rda_uart_port *rda_port; + int ret, irq; + + if (pdev->dev.of_node) + pdev->id = of_alias_get_id(pdev->dev.of_node, "serial"); + + if (pdev->id < 0 || pdev->id >= RDA_UART_PORT_NUM) { + dev_err(&pdev->dev, "id %d out of range\n", pdev->id); + return -EINVAL; + } + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_mem) { + dev_err(&pdev->dev, "could not get mem\n"); + return -ENODEV; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + if (rda_uart_ports[pdev->id]) { + dev_err(&pdev->dev, "port %d already allocated\n", pdev->id); + return -EBUSY; + } + + rda_port = devm_kzalloc(&pdev->dev, sizeof(*rda_port), GFP_KERNEL); + if (!rda_port) + return -ENOMEM; + + rda_port->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(rda_port->clk)) { + dev_err(&pdev->dev, "could not get clk\n"); + return PTR_ERR(rda_port->clk); + } + + rda_port->port.dev = &pdev->dev; + rda_port->port.regshift = 0; + rda_port->port.line = pdev->id; + rda_port->port.type = PORT_RDA; + rda_port->port.iotype = UPIO_MEM; + rda_port->port.mapbase = res_mem->start; + rda_port->port.irq = irq; + rda_port->port.uartclk = clk_get_rate(rda_port->clk); + if (rda_port->port.uartclk == 0) { + dev_err(&pdev->dev, "clock rate is zero\n"); + return -EINVAL; + } + rda_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | + UPF_LOW_LATENCY; + rda_port->port.x_char = 0; + rda_port->port.fifosize = RDA_UART_TX_FIFO_SIZE; + rda_port->port.ops = &rda_uart_ops; + + rda_uart_ports[pdev->id] = rda_port; + platform_set_drvdata(pdev, rda_port); + + ret = uart_add_one_port(&rda_uart_driver, &rda_port->port); + if (ret) + rda_uart_ports[pdev->id] = NULL; + + return ret; +} + +static int rda_uart_remove(struct platform_device *pdev) +{ + struct rda_uart_port *rda_port = platform_get_drvdata(pdev); + + uart_remove_one_port(&rda_uart_driver, &rda_port->port); + rda_uart_ports[pdev->id] = NULL; + + return 0; +} + +static struct platform_driver rda_uart_platform_driver = { + .probe = rda_uart_probe, + .remove = rda_uart_remove, + .driver = { + .name = "rda-uart", + .of_match_table = rda_uart_dt_matches, + }, +}; + +static int __init rda_uart_init(void) +{ + int ret; + + ret = uart_register_driver(&rda_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&rda_uart_platform_driver); + if (ret) + uart_unregister_driver(&rda_uart_driver); + + return ret; +} + +static void __exit rda_uart_exit(void) +{ + platform_driver_unregister(&rda_uart_platform_driver); + uart_unregister_driver(&rda_uart_driver); +} + +module_init(rda_uart_init); +module_exit(rda_uart_exit); + +MODULE_AUTHOR("Manivannan Sadhasivam "); +MODULE_DESCRIPTION("RDA8810PL serial device driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c new file mode 100644 index 000000000..b81afb06f --- /dev/null +++ b/drivers/tty/serial/rp2.c @@ -0,0 +1,863 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Comtrol RocketPort EXPRESS/INFINITY cards + * + * Copyright (C) 2012 Kevin Cernekee + * + * Inspired by, and loosely based on: + * + * ar933x_uart.c + * Copyright (C) 2011 Gabor Juhos + * + * rocketport_infinity_express-linux-1.20.tar.gz + * Copyright (C) 2004-2011 Comtrol, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "rp2" + +#define RP2_FW_NAME "rp2.fw" +#define RP2_UCODE_BYTES 0x3f + +#define PORTS_PER_ASIC 16 +#define ALL_PORTS_MASK (BIT(PORTS_PER_ASIC) - 1) + +#define UART_CLOCK 44236800 +#define DEFAULT_BAUD_DIV (UART_CLOCK / (9600 * 16)) +#define FIFO_SIZE 512 + +/* BAR0 registers */ +#define RP2_FPGA_CTL0 0x110 +#define RP2_FPGA_CTL1 0x11c +#define RP2_IRQ_MASK 0x1ec +#define RP2_IRQ_MASK_EN_m BIT(0) +#define RP2_IRQ_STATUS 0x1f0 + +/* BAR1 registers */ +#define RP2_ASIC_SPACING 0x1000 +#define RP2_ASIC_OFFSET(i) ((i) << ilog2(RP2_ASIC_SPACING)) + +#define RP2_PORT_BASE 0x000 +#define RP2_PORT_SPACING 0x040 + +#define RP2_UCODE_BASE 0x400 +#define RP2_UCODE_SPACING 0x80 + +#define RP2_CLK_PRESCALER 0xc00 +#define RP2_CH_IRQ_STAT 0xc04 +#define RP2_CH_IRQ_MASK 0xc08 +#define RP2_ASIC_IRQ 0xd00 +#define RP2_ASIC_IRQ_EN_m BIT(20) +#define RP2_GLOBAL_CMD 0xd0c +#define RP2_ASIC_CFG 0xd04 + +/* port registers */ +#define RP2_DATA_DWORD 0x000 + +#define RP2_DATA_BYTE 0x008 +#define RP2_DATA_BYTE_ERR_PARITY_m BIT(8) +#define RP2_DATA_BYTE_ERR_OVERRUN_m BIT(9) +#define RP2_DATA_BYTE_ERR_FRAMING_m BIT(10) +#define RP2_DATA_BYTE_BREAK_m BIT(11) + +/* This lets uart_insert_char() drop bytes received on a !CREAD port */ +#define RP2_DUMMY_READ BIT(16) + +#define RP2_DATA_BYTE_EXCEPTION_MASK (RP2_DATA_BYTE_ERR_PARITY_m | \ + RP2_DATA_BYTE_ERR_OVERRUN_m | \ + RP2_DATA_BYTE_ERR_FRAMING_m | \ + RP2_DATA_BYTE_BREAK_m) + +#define RP2_RX_FIFO_COUNT 0x00c +#define RP2_TX_FIFO_COUNT 0x00e + +#define RP2_CHAN_STAT 0x010 +#define RP2_CHAN_STAT_RXDATA_m BIT(0) +#define RP2_CHAN_STAT_DCD_m BIT(3) +#define RP2_CHAN_STAT_DSR_m BIT(4) +#define RP2_CHAN_STAT_CTS_m BIT(5) +#define RP2_CHAN_STAT_RI_m BIT(6) +#define RP2_CHAN_STAT_OVERRUN_m BIT(13) +#define RP2_CHAN_STAT_DSR_CHANGED_m BIT(16) +#define RP2_CHAN_STAT_CTS_CHANGED_m BIT(17) +#define RP2_CHAN_STAT_CD_CHANGED_m BIT(18) +#define RP2_CHAN_STAT_RI_CHANGED_m BIT(22) +#define RP2_CHAN_STAT_TXEMPTY_m BIT(25) + +#define RP2_CHAN_STAT_MS_CHANGED_MASK (RP2_CHAN_STAT_DSR_CHANGED_m | \ + RP2_CHAN_STAT_CTS_CHANGED_m | \ + RP2_CHAN_STAT_CD_CHANGED_m | \ + RP2_CHAN_STAT_RI_CHANGED_m) + +#define RP2_TXRX_CTL 0x014 +#define RP2_TXRX_CTL_MSRIRQ_m BIT(0) +#define RP2_TXRX_CTL_RXIRQ_m BIT(2) +#define RP2_TXRX_CTL_RX_TRIG_s 3 +#define RP2_TXRX_CTL_RX_TRIG_m (0x3 << RP2_TXRX_CTL_RX_TRIG_s) +#define RP2_TXRX_CTL_RX_TRIG_1 (0x1 << RP2_TXRX_CTL_RX_TRIG_s) +#define RP2_TXRX_CTL_RX_TRIG_256 (0x2 << RP2_TXRX_CTL_RX_TRIG_s) +#define RP2_TXRX_CTL_RX_TRIG_448 (0x3 << RP2_TXRX_CTL_RX_TRIG_s) +#define RP2_TXRX_CTL_RX_EN_m BIT(5) +#define RP2_TXRX_CTL_RTSFLOW_m BIT(6) +#define RP2_TXRX_CTL_DTRFLOW_m BIT(7) +#define RP2_TXRX_CTL_TX_TRIG_s 16 +#define RP2_TXRX_CTL_TX_TRIG_m (0x3 << RP2_TXRX_CTL_RX_TRIG_s) +#define RP2_TXRX_CTL_DSRFLOW_m BIT(18) +#define RP2_TXRX_CTL_TXIRQ_m BIT(19) +#define RP2_TXRX_CTL_CTSFLOW_m BIT(23) +#define RP2_TXRX_CTL_TX_EN_m BIT(24) +#define RP2_TXRX_CTL_RTS_m BIT(25) +#define RP2_TXRX_CTL_DTR_m BIT(26) +#define RP2_TXRX_CTL_LOOP_m BIT(27) +#define RP2_TXRX_CTL_BREAK_m BIT(28) +#define RP2_TXRX_CTL_CMSPAR_m BIT(29) +#define RP2_TXRX_CTL_nPARODD_m BIT(30) +#define RP2_TXRX_CTL_PARENB_m BIT(31) + +#define RP2_UART_CTL 0x018 +#define RP2_UART_CTL_MODE_s 0 +#define RP2_UART_CTL_MODE_m (0x7 << RP2_UART_CTL_MODE_s) +#define RP2_UART_CTL_MODE_rs232 (0x1 << RP2_UART_CTL_MODE_s) +#define RP2_UART_CTL_FLUSH_RX_m BIT(3) +#define RP2_UART_CTL_FLUSH_TX_m BIT(4) +#define RP2_UART_CTL_RESET_CH_m BIT(5) +#define RP2_UART_CTL_XMIT_EN_m BIT(6) +#define RP2_UART_CTL_DATABITS_s 8 +#define RP2_UART_CTL_DATABITS_m (0x3 << RP2_UART_CTL_DATABITS_s) +#define RP2_UART_CTL_DATABITS_8 (0x3 << RP2_UART_CTL_DATABITS_s) +#define RP2_UART_CTL_DATABITS_7 (0x2 << RP2_UART_CTL_DATABITS_s) +#define RP2_UART_CTL_DATABITS_6 (0x1 << RP2_UART_CTL_DATABITS_s) +#define RP2_UART_CTL_DATABITS_5 (0x0 << RP2_UART_CTL_DATABITS_s) +#define RP2_UART_CTL_STOPBITS_m BIT(10) + +#define RP2_BAUD 0x01c + +/* ucode registers */ +#define RP2_TX_SWFLOW 0x02 +#define RP2_TX_SWFLOW_ena 0x81 +#define RP2_TX_SWFLOW_dis 0x9d + +#define RP2_RX_SWFLOW 0x0c +#define RP2_RX_SWFLOW_ena 0x81 +#define RP2_RX_SWFLOW_dis 0x8d + +#define RP2_RX_FIFO 0x37 +#define RP2_RX_FIFO_ena 0x08 +#define RP2_RX_FIFO_dis 0x81 + +static struct uart_driver rp2_uart_driver = { + .owner = THIS_MODULE, + .driver_name = DRV_NAME, + .dev_name = "ttyRP", + .nr = CONFIG_SERIAL_RP2_NR_UARTS, +}; + +struct rp2_card; + +struct rp2_uart_port { + struct uart_port port; + int idx; + int ignore_rx; + struct rp2_card *card; + void __iomem *asic_base; + void __iomem *base; + void __iomem *ucode; +}; + +struct rp2_card { + struct pci_dev *pdev; + struct rp2_uart_port *ports; + int n_ports; + int initialized_ports; + int minor_start; + int smpte; + void __iomem *bar0; + void __iomem *bar1; + spinlock_t card_lock; +}; + +#define RP_ID(prod) PCI_VDEVICE(RP, (prod)) +#define RP_CAP(ports, smpte) (((ports) << 8) | ((smpte) << 0)) + +static inline void rp2_decode_cap(const struct pci_device_id *id, + int *ports, int *smpte) +{ + *ports = id->driver_data >> 8; + *smpte = id->driver_data & 0xff; +} + +static DEFINE_SPINLOCK(rp2_minor_lock); +static int rp2_minor_next; + +static int rp2_alloc_ports(int n_ports) +{ + int ret = -ENOSPC; + + spin_lock(&rp2_minor_lock); + if (rp2_minor_next + n_ports <= CONFIG_SERIAL_RP2_NR_UARTS) { + /* sorry, no support for hot unplugging individual cards */ + ret = rp2_minor_next; + rp2_minor_next += n_ports; + } + spin_unlock(&rp2_minor_lock); + + return ret; +} + +static inline struct rp2_uart_port *port_to_up(struct uart_port *port) +{ + return container_of(port, struct rp2_uart_port, port); +} + +static void rp2_rmw(struct rp2_uart_port *up, int reg, + u32 clr_bits, u32 set_bits) +{ + u32 tmp = readl(up->base + reg); + tmp &= ~clr_bits; + tmp |= set_bits; + writel(tmp, up->base + reg); +} + +static void rp2_rmw_clr(struct rp2_uart_port *up, int reg, u32 val) +{ + rp2_rmw(up, reg, val, 0); +} + +static void rp2_rmw_set(struct rp2_uart_port *up, int reg, u32 val) +{ + rp2_rmw(up, reg, 0, val); +} + +static void rp2_mask_ch_irq(struct rp2_uart_port *up, int ch_num, + int is_enabled) +{ + unsigned long flags, irq_mask; + + spin_lock_irqsave(&up->card->card_lock, flags); + + irq_mask = readl(up->asic_base + RP2_CH_IRQ_MASK); + if (is_enabled) + irq_mask &= ~BIT(ch_num); + else + irq_mask |= BIT(ch_num); + writel(irq_mask, up->asic_base + RP2_CH_IRQ_MASK); + + spin_unlock_irqrestore(&up->card->card_lock, flags); +} + +static unsigned int rp2_uart_tx_empty(struct uart_port *port) +{ + struct rp2_uart_port *up = port_to_up(port); + unsigned long tx_fifo_bytes, flags; + + /* + * This should probably check the transmitter, not the FIFO. + * But the TXEMPTY bit doesn't seem to work unless the TX IRQ is + * enabled. + */ + spin_lock_irqsave(&up->port.lock, flags); + tx_fifo_bytes = readw(up->base + RP2_TX_FIFO_COUNT); + spin_unlock_irqrestore(&up->port.lock, flags); + + return tx_fifo_bytes ? 0 : TIOCSER_TEMT; +} + +static unsigned int rp2_uart_get_mctrl(struct uart_port *port) +{ + struct rp2_uart_port *up = port_to_up(port); + u32 status; + + status = readl(up->base + RP2_CHAN_STAT); + return ((status & RP2_CHAN_STAT_DCD_m) ? TIOCM_CAR : 0) | + ((status & RP2_CHAN_STAT_DSR_m) ? TIOCM_DSR : 0) | + ((status & RP2_CHAN_STAT_CTS_m) ? TIOCM_CTS : 0) | + ((status & RP2_CHAN_STAT_RI_m) ? TIOCM_RI : 0); +} + +static void rp2_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + rp2_rmw(port_to_up(port), RP2_TXRX_CTL, + RP2_TXRX_CTL_DTR_m | RP2_TXRX_CTL_RTS_m | RP2_TXRX_CTL_LOOP_m, + ((mctrl & TIOCM_DTR) ? RP2_TXRX_CTL_DTR_m : 0) | + ((mctrl & TIOCM_RTS) ? RP2_TXRX_CTL_RTS_m : 0) | + ((mctrl & TIOCM_LOOP) ? RP2_TXRX_CTL_LOOP_m : 0)); +} + +static void rp2_uart_start_tx(struct uart_port *port) +{ + rp2_rmw_set(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_TXIRQ_m); +} + +static void rp2_uart_stop_tx(struct uart_port *port) +{ + rp2_rmw_clr(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_TXIRQ_m); +} + +static void rp2_uart_stop_rx(struct uart_port *port) +{ + rp2_rmw_clr(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_RXIRQ_m); +} + +static void rp2_uart_break_ctl(struct uart_port *port, int break_state) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + rp2_rmw(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_BREAK_m, + break_state ? RP2_TXRX_CTL_BREAK_m : 0); + spin_unlock_irqrestore(&port->lock, flags); +} + +static void rp2_uart_enable_ms(struct uart_port *port) +{ + rp2_rmw_set(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_MSRIRQ_m); +} + +static void __rp2_uart_set_termios(struct rp2_uart_port *up, + unsigned long cfl, + unsigned long ifl, + unsigned int baud_div) +{ + /* baud rate divisor (calculated elsewhere). 0 = divide-by-1 */ + writew(baud_div - 1, up->base + RP2_BAUD); + + /* data bits and stop bits */ + rp2_rmw(up, RP2_UART_CTL, + RP2_UART_CTL_STOPBITS_m | RP2_UART_CTL_DATABITS_m, + ((cfl & CSTOPB) ? RP2_UART_CTL_STOPBITS_m : 0) | + (((cfl & CSIZE) == CS8) ? RP2_UART_CTL_DATABITS_8 : 0) | + (((cfl & CSIZE) == CS7) ? RP2_UART_CTL_DATABITS_7 : 0) | + (((cfl & CSIZE) == CS6) ? RP2_UART_CTL_DATABITS_6 : 0) | + (((cfl & CSIZE) == CS5) ? RP2_UART_CTL_DATABITS_5 : 0)); + + /* parity and hardware flow control */ + rp2_rmw(up, RP2_TXRX_CTL, + RP2_TXRX_CTL_PARENB_m | RP2_TXRX_CTL_nPARODD_m | + RP2_TXRX_CTL_CMSPAR_m | RP2_TXRX_CTL_DTRFLOW_m | + RP2_TXRX_CTL_DSRFLOW_m | RP2_TXRX_CTL_RTSFLOW_m | + RP2_TXRX_CTL_CTSFLOW_m, + ((cfl & PARENB) ? RP2_TXRX_CTL_PARENB_m : 0) | + ((cfl & PARODD) ? 0 : RP2_TXRX_CTL_nPARODD_m) | + ((cfl & CMSPAR) ? RP2_TXRX_CTL_CMSPAR_m : 0) | + ((cfl & CRTSCTS) ? (RP2_TXRX_CTL_RTSFLOW_m | + RP2_TXRX_CTL_CTSFLOW_m) : 0)); + + /* XON/XOFF software flow control */ + writeb((ifl & IXON) ? RP2_TX_SWFLOW_ena : RP2_TX_SWFLOW_dis, + up->ucode + RP2_TX_SWFLOW); + writeb((ifl & IXOFF) ? RP2_RX_SWFLOW_ena : RP2_RX_SWFLOW_dis, + up->ucode + RP2_RX_SWFLOW); +} + +static void rp2_uart_set_termios(struct uart_port *port, struct ktermios *new, + const struct ktermios *old) +{ + struct rp2_uart_port *up = port_to_up(port); + unsigned long flags; + unsigned int baud, baud_div; + + baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); + baud_div = uart_get_divisor(port, baud); + + if (tty_termios_baud_rate(new)) + tty_termios_encode_baud_rate(new, baud, baud); + + spin_lock_irqsave(&port->lock, flags); + + /* ignore all characters if CREAD is not set */ + port->ignore_status_mask = (new->c_cflag & CREAD) ? 0 : RP2_DUMMY_READ; + + __rp2_uart_set_termios(up, new->c_cflag, new->c_iflag, baud_div); + uart_update_timeout(port, new->c_cflag, baud); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void rp2_rx_chars(struct rp2_uart_port *up) +{ + u16 bytes = readw(up->base + RP2_RX_FIFO_COUNT); + struct tty_port *port = &up->port.state->port; + + for (; bytes != 0; bytes--) { + u32 byte = readw(up->base + RP2_DATA_BYTE) | RP2_DUMMY_READ; + char ch = byte & 0xff; + + if (likely(!(byte & RP2_DATA_BYTE_EXCEPTION_MASK))) { + if (!uart_handle_sysrq_char(&up->port, ch)) + uart_insert_char(&up->port, byte, 0, ch, + TTY_NORMAL); + } else { + char flag = TTY_NORMAL; + + if (byte & RP2_DATA_BYTE_BREAK_m) + flag = TTY_BREAK; + else if (byte & RP2_DATA_BYTE_ERR_FRAMING_m) + flag = TTY_FRAME; + else if (byte & RP2_DATA_BYTE_ERR_PARITY_m) + flag = TTY_PARITY; + uart_insert_char(&up->port, byte, + RP2_DATA_BYTE_ERR_OVERRUN_m, ch, flag); + } + up->port.icount.rx++; + } + + tty_flip_buffer_push(port); +} + +static void rp2_tx_chars(struct rp2_uart_port *up) +{ + u16 max_tx = FIFO_SIZE - readw(up->base + RP2_TX_FIFO_COUNT); + struct circ_buf *xmit = &up->port.state->xmit; + + if (uart_tx_stopped(&up->port)) { + rp2_uart_stop_tx(&up->port); + return; + } + + for (; max_tx != 0; max_tx--) { + if (up->port.x_char) { + writeb(up->port.x_char, up->base + RP2_DATA_BYTE); + up->port.x_char = 0; + up->port.icount.tx++; + continue; + } + if (uart_circ_empty(xmit)) { + rp2_uart_stop_tx(&up->port); + break; + } + writeb(xmit->buf[xmit->tail], up->base + RP2_DATA_BYTE); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->port.icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); +} + +static void rp2_ch_interrupt(struct rp2_uart_port *up) +{ + u32 status; + + spin_lock(&up->port.lock); + + /* + * The IRQ status bits are clear-on-write. Other status bits in + * this register aren't, so it's harmless to write to them. + */ + status = readl(up->base + RP2_CHAN_STAT); + writel(status, up->base + RP2_CHAN_STAT); + + if (status & RP2_CHAN_STAT_RXDATA_m) + rp2_rx_chars(up); + if (status & RP2_CHAN_STAT_TXEMPTY_m) + rp2_tx_chars(up); + if (status & RP2_CHAN_STAT_MS_CHANGED_MASK) + wake_up_interruptible(&up->port.state->port.delta_msr_wait); + + spin_unlock(&up->port.lock); +} + +static int rp2_asic_interrupt(struct rp2_card *card, unsigned int asic_id) +{ + void __iomem *base = card->bar1 + RP2_ASIC_OFFSET(asic_id); + int ch, handled = 0; + unsigned long status = readl(base + RP2_CH_IRQ_STAT) & + ~readl(base + RP2_CH_IRQ_MASK); + + for_each_set_bit(ch, &status, PORTS_PER_ASIC) { + rp2_ch_interrupt(&card->ports[ch]); + handled++; + } + return handled; +} + +static irqreturn_t rp2_uart_interrupt(int irq, void *dev_id) +{ + struct rp2_card *card = dev_id; + int handled; + + handled = rp2_asic_interrupt(card, 0); + if (card->n_ports >= PORTS_PER_ASIC) + handled += rp2_asic_interrupt(card, 1); + + return handled ? IRQ_HANDLED : IRQ_NONE; +} + +static inline void rp2_flush_fifos(struct rp2_uart_port *up) +{ + rp2_rmw_set(up, RP2_UART_CTL, + RP2_UART_CTL_FLUSH_RX_m | RP2_UART_CTL_FLUSH_TX_m); + readl(up->base + RP2_UART_CTL); + udelay(10); + rp2_rmw_clr(up, RP2_UART_CTL, + RP2_UART_CTL_FLUSH_RX_m | RP2_UART_CTL_FLUSH_TX_m); +} + +static int rp2_uart_startup(struct uart_port *port) +{ + struct rp2_uart_port *up = port_to_up(port); + + rp2_flush_fifos(up); + rp2_rmw(up, RP2_TXRX_CTL, RP2_TXRX_CTL_MSRIRQ_m, RP2_TXRX_CTL_RXIRQ_m); + rp2_rmw(up, RP2_TXRX_CTL, RP2_TXRX_CTL_RX_TRIG_m, + RP2_TXRX_CTL_RX_TRIG_1); + rp2_rmw(up, RP2_CHAN_STAT, 0, 0); + rp2_mask_ch_irq(up, up->idx, 1); + + return 0; +} + +static void rp2_uart_shutdown(struct uart_port *port) +{ + struct rp2_uart_port *up = port_to_up(port); + unsigned long flags; + + rp2_uart_break_ctl(port, 0); + + spin_lock_irqsave(&port->lock, flags); + rp2_mask_ch_irq(up, up->idx, 0); + rp2_rmw(up, RP2_CHAN_STAT, 0, 0); + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *rp2_uart_type(struct uart_port *port) +{ + return (port->type == PORT_RP2) ? "RocketPort 2 UART" : NULL; +} + +static void rp2_uart_release_port(struct uart_port *port) +{ + /* Nothing to release ... */ +} + +static int rp2_uart_request_port(struct uart_port *port) +{ + /* UARTs always present */ + return 0; +} + +static void rp2_uart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_RP2; +} + +static int rp2_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + if (ser->type != PORT_UNKNOWN && ser->type != PORT_RP2) + return -EINVAL; + + return 0; +} + +static const struct uart_ops rp2_uart_ops = { + .tx_empty = rp2_uart_tx_empty, + .set_mctrl = rp2_uart_set_mctrl, + .get_mctrl = rp2_uart_get_mctrl, + .stop_tx = rp2_uart_stop_tx, + .start_tx = rp2_uart_start_tx, + .stop_rx = rp2_uart_stop_rx, + .enable_ms = rp2_uart_enable_ms, + .break_ctl = rp2_uart_break_ctl, + .startup = rp2_uart_startup, + .shutdown = rp2_uart_shutdown, + .set_termios = rp2_uart_set_termios, + .type = rp2_uart_type, + .release_port = rp2_uart_release_port, + .request_port = rp2_uart_request_port, + .config_port = rp2_uart_config_port, + .verify_port = rp2_uart_verify_port, +}; + +static void rp2_reset_asic(struct rp2_card *card, unsigned int asic_id) +{ + void __iomem *base = card->bar1 + RP2_ASIC_OFFSET(asic_id); + u32 clk_cfg; + + writew(1, base + RP2_GLOBAL_CMD); + readw(base + RP2_GLOBAL_CMD); + msleep(100); + writel(0, base + RP2_CLK_PRESCALER); + + /* TDM clock configuration */ + clk_cfg = readw(base + RP2_ASIC_CFG); + clk_cfg = (clk_cfg & ~BIT(8)) | BIT(9); + writew(clk_cfg, base + RP2_ASIC_CFG); + + /* IRQ routing */ + writel(ALL_PORTS_MASK, base + RP2_CH_IRQ_MASK); + writel(RP2_ASIC_IRQ_EN_m, base + RP2_ASIC_IRQ); +} + +static void rp2_init_card(struct rp2_card *card) +{ + writel(4, card->bar0 + RP2_FPGA_CTL0); + writel(0, card->bar0 + RP2_FPGA_CTL1); + + rp2_reset_asic(card, 0); + if (card->n_ports >= PORTS_PER_ASIC) + rp2_reset_asic(card, 1); + + writel(RP2_IRQ_MASK_EN_m, card->bar0 + RP2_IRQ_MASK); +} + +static void rp2_init_port(struct rp2_uart_port *up, const struct firmware *fw) +{ + int i; + + writel(RP2_UART_CTL_RESET_CH_m, up->base + RP2_UART_CTL); + readl(up->base + RP2_UART_CTL); + udelay(1); + + writel(0, up->base + RP2_TXRX_CTL); + writel(0, up->base + RP2_UART_CTL); + readl(up->base + RP2_UART_CTL); + udelay(1); + + rp2_flush_fifos(up); + + for (i = 0; i < min_t(int, fw->size, RP2_UCODE_BYTES); i++) + writeb(fw->data[i], up->ucode + i); + + __rp2_uart_set_termios(up, CS8 | CREAD | CLOCAL, 0, DEFAULT_BAUD_DIV); + rp2_uart_set_mctrl(&up->port, 0); + + writeb(RP2_RX_FIFO_ena, up->ucode + RP2_RX_FIFO); + rp2_rmw(up, RP2_UART_CTL, RP2_UART_CTL_MODE_m, + RP2_UART_CTL_XMIT_EN_m | RP2_UART_CTL_MODE_rs232); + rp2_rmw_set(up, RP2_TXRX_CTL, + RP2_TXRX_CTL_TX_EN_m | RP2_TXRX_CTL_RX_EN_m); +} + +static void rp2_remove_ports(struct rp2_card *card) +{ + int i; + + for (i = 0; i < card->initialized_ports; i++) + uart_remove_one_port(&rp2_uart_driver, &card->ports[i].port); + card->initialized_ports = 0; +} + +static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw) +{ + resource_size_t phys_base; + int i, rc = 0; + + phys_base = pci_resource_start(card->pdev, 1); + + for (i = 0; i < card->n_ports; i++) { + struct rp2_uart_port *rp = &card->ports[i]; + struct uart_port *p; + int j = (unsigned)i % PORTS_PER_ASIC; + + rp->asic_base = card->bar1; + rp->base = card->bar1 + RP2_PORT_BASE + j*RP2_PORT_SPACING; + rp->ucode = card->bar1 + RP2_UCODE_BASE + j*RP2_UCODE_SPACING; + rp->card = card; + rp->idx = j; + + p = &rp->port; + p->line = card->minor_start + i; + p->dev = &card->pdev->dev; + p->type = PORT_RP2; + p->iotype = UPIO_MEM32; + p->uartclk = UART_CLOCK; + p->regshift = 2; + p->fifosize = FIFO_SIZE; + p->ops = &rp2_uart_ops; + p->irq = card->pdev->irq; + p->membase = rp->base; + p->mapbase = phys_base + RP2_PORT_BASE + j*RP2_PORT_SPACING; + + if (i >= PORTS_PER_ASIC) { + rp->asic_base += RP2_ASIC_SPACING; + rp->base += RP2_ASIC_SPACING; + rp->ucode += RP2_ASIC_SPACING; + p->mapbase += RP2_ASIC_SPACING; + } + + rp2_init_port(rp, fw); + rc = uart_add_one_port(&rp2_uart_driver, p); + if (rc) { + dev_err(&card->pdev->dev, + "error registering port %d: %d\n", i, rc); + rp2_remove_ports(card); + break; + } + card->initialized_ports++; + } + + return rc; +} + +static int rp2_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + const struct firmware *fw; + struct rp2_card *card; + struct rp2_uart_port *ports; + void __iomem * const *bars; + int rc; + + card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + pci_set_drvdata(pdev, card); + spin_lock_init(&card->card_lock); + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + rc = pcim_iomap_regions_request_all(pdev, 0x03, DRV_NAME); + if (rc) + return rc; + + bars = pcim_iomap_table(pdev); + card->bar0 = bars[0]; + card->bar1 = bars[1]; + card->pdev = pdev; + + rp2_decode_cap(id, &card->n_ports, &card->smpte); + dev_info(&pdev->dev, "found new card with %d ports\n", card->n_ports); + + card->minor_start = rp2_alloc_ports(card->n_ports); + if (card->minor_start < 0) { + dev_err(&pdev->dev, + "too many ports (try increasing CONFIG_SERIAL_RP2_NR_UARTS)\n"); + return -EINVAL; + } + + rp2_init_card(card); + + ports = devm_kcalloc(&pdev->dev, card->n_ports, sizeof(*ports), + GFP_KERNEL); + if (!ports) + return -ENOMEM; + card->ports = ports; + + rc = request_firmware(&fw, RP2_FW_NAME, &pdev->dev); + if (rc < 0) { + dev_err(&pdev->dev, "cannot find '%s' firmware image\n", + RP2_FW_NAME); + return rc; + } + + rc = rp2_load_firmware(card, fw); + + release_firmware(fw); + if (rc < 0) + return rc; + + rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt, + IRQF_SHARED, DRV_NAME, card); + if (rc) + return rc; + + return 0; +} + +static void rp2_remove(struct pci_dev *pdev) +{ + struct rp2_card *card = pci_get_drvdata(pdev); + + rp2_remove_ports(card); +} + +static const struct pci_device_id rp2_pci_tbl[] = { + + /* RocketPort INFINITY cards */ + + { RP_ID(0x0040), RP_CAP(8, 0) }, /* INF Octa, RJ45, selectable */ + { RP_ID(0x0041), RP_CAP(32, 0) }, /* INF 32, ext interface */ + { RP_ID(0x0042), RP_CAP(8, 0) }, /* INF Octa, ext interface */ + { RP_ID(0x0043), RP_CAP(16, 0) }, /* INF 16, ext interface */ + { RP_ID(0x0044), RP_CAP(4, 0) }, /* INF Quad, DB, selectable */ + { RP_ID(0x0045), RP_CAP(8, 0) }, /* INF Octa, DB, selectable */ + { RP_ID(0x0046), RP_CAP(4, 0) }, /* INF Quad, ext interface */ + { RP_ID(0x0047), RP_CAP(4, 0) }, /* INF Quad, RJ45 */ + { RP_ID(0x004a), RP_CAP(4, 0) }, /* INF Plus, Quad */ + { RP_ID(0x004b), RP_CAP(8, 0) }, /* INF Plus, Octa */ + { RP_ID(0x004c), RP_CAP(8, 0) }, /* INF III, Octa */ + { RP_ID(0x004d), RP_CAP(4, 0) }, /* INF III, Quad */ + { RP_ID(0x004e), RP_CAP(2, 0) }, /* INF Plus, 2, RS232 */ + { RP_ID(0x004f), RP_CAP(2, 1) }, /* INF Plus, 2, SMPTE */ + { RP_ID(0x0050), RP_CAP(4, 0) }, /* INF Plus, Quad, RJ45 */ + { RP_ID(0x0051), RP_CAP(8, 0) }, /* INF Plus, Octa, RJ45 */ + { RP_ID(0x0052), RP_CAP(8, 1) }, /* INF Octa, SMPTE */ + + /* RocketPort EXPRESS cards */ + + { RP_ID(0x0060), RP_CAP(8, 0) }, /* EXP Octa, RJ45, selectable */ + { RP_ID(0x0061), RP_CAP(32, 0) }, /* EXP 32, ext interface */ + { RP_ID(0x0062), RP_CAP(8, 0) }, /* EXP Octa, ext interface */ + { RP_ID(0x0063), RP_CAP(16, 0) }, /* EXP 16, ext interface */ + { RP_ID(0x0064), RP_CAP(4, 0) }, /* EXP Quad, DB, selectable */ + { RP_ID(0x0065), RP_CAP(8, 0) }, /* EXP Octa, DB, selectable */ + { RP_ID(0x0066), RP_CAP(4, 0) }, /* EXP Quad, ext interface */ + { RP_ID(0x0067), RP_CAP(4, 0) }, /* EXP Quad, RJ45 */ + { RP_ID(0x0068), RP_CAP(8, 0) }, /* EXP Octa, RJ11 */ + { RP_ID(0x0072), RP_CAP(8, 1) }, /* EXP Octa, SMPTE */ + { } +}; +MODULE_DEVICE_TABLE(pci, rp2_pci_tbl); + +static struct pci_driver rp2_pci_driver = { + .name = DRV_NAME, + .id_table = rp2_pci_tbl, + .probe = rp2_probe, + .remove = rp2_remove, +}; + +static int __init rp2_uart_init(void) +{ + int rc; + + rc = uart_register_driver(&rp2_uart_driver); + if (rc) + return rc; + + rc = pci_register_driver(&rp2_pci_driver); + if (rc) { + uart_unregister_driver(&rp2_uart_driver); + return rc; + } + + return 0; +} + +static void __exit rp2_uart_exit(void) +{ + pci_unregister_driver(&rp2_pci_driver); + uart_unregister_driver(&rp2_uart_driver); +} + +module_init(rp2_uart_init); +module_exit(rp2_uart_exit); + +MODULE_DESCRIPTION("Comtrol RocketPort EXPRESS/INFINITY driver"); +MODULE_AUTHOR("Kevin Cernekee "); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(RP2_FW_NAME); diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c new file mode 100644 index 000000000..dd9e3253c --- /dev/null +++ b/drivers/tty/serial/sa1100.c @@ -0,0 +1,948 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for SA11x0 serial ports + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * Copyright (C) 2000 Deep Blue Solutions Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "serial_mctrl_gpio.h" + +/* We've been assigned a range on the "Low-density serial ports" major */ +#define SERIAL_SA1100_MAJOR 204 +#define MINOR_START 5 + +#define NR_PORTS 3 + +#define SA1100_ISR_PASS_LIMIT 256 + +/* + * Convert from ignore_status_mask or read_status_mask to UTSR[01] + */ +#define SM_TO_UTSR0(x) ((x) & 0xff) +#define SM_TO_UTSR1(x) ((x) >> 8) +#define UTSR0_TO_SM(x) ((x)) +#define UTSR1_TO_SM(x) ((x) << 8) + +#define UART_GET_UTCR0(sport) __raw_readl((sport)->port.membase + UTCR0) +#define UART_GET_UTCR1(sport) __raw_readl((sport)->port.membase + UTCR1) +#define UART_GET_UTCR2(sport) __raw_readl((sport)->port.membase + UTCR2) +#define UART_GET_UTCR3(sport) __raw_readl((sport)->port.membase + UTCR3) +#define UART_GET_UTSR0(sport) __raw_readl((sport)->port.membase + UTSR0) +#define UART_GET_UTSR1(sport) __raw_readl((sport)->port.membase + UTSR1) +#define UART_GET_CHAR(sport) __raw_readl((sport)->port.membase + UTDR) + +#define UART_PUT_UTCR0(sport,v) __raw_writel((v),(sport)->port.membase + UTCR0) +#define UART_PUT_UTCR1(sport,v) __raw_writel((v),(sport)->port.membase + UTCR1) +#define UART_PUT_UTCR2(sport,v) __raw_writel((v),(sport)->port.membase + UTCR2) +#define UART_PUT_UTCR3(sport,v) __raw_writel((v),(sport)->port.membase + UTCR3) +#define UART_PUT_UTSR0(sport,v) __raw_writel((v),(sport)->port.membase + UTSR0) +#define UART_PUT_UTSR1(sport,v) __raw_writel((v),(sport)->port.membase + UTSR1) +#define UART_PUT_CHAR(sport,v) __raw_writel((v),(sport)->port.membase + UTDR) + +/* + * This is the size of our serial port register set. + */ +#define UART_PORT_SIZE 0x24 + +/* + * This determines how often we check the modem status signals + * for any change. They generally aren't connected to an IRQ + * so we have to poll them. We also check immediately before + * filling the TX fifo incase CTS has been dropped. + */ +#define MCTRL_TIMEOUT (250*HZ/1000) + +struct sa1100_port { + struct uart_port port; + struct timer_list timer; + unsigned int old_status; + struct mctrl_gpios *gpios; +}; + +/* + * Handle any change of modem status signal since we were last called. + */ +static void sa1100_mctrl_check(struct sa1100_port *sport) +{ + unsigned int status, changed; + + status = sport->port.ops->get_mctrl(&sport->port); + changed = status ^ sport->old_status; + + if (changed == 0) + return; + + sport->old_status = status; + + if (changed & TIOCM_RI) + sport->port.icount.rng++; + if (changed & TIOCM_DSR) + sport->port.icount.dsr++; + if (changed & TIOCM_CAR) + uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); + if (changed & TIOCM_CTS) + uart_handle_cts_change(&sport->port, status & TIOCM_CTS); + + wake_up_interruptible(&sport->port.state->port.delta_msr_wait); +} + +/* + * This is our per-port timeout handler, for checking the + * modem status signals. + */ +static void sa1100_timeout(struct timer_list *t) +{ + struct sa1100_port *sport = from_timer(sport, t, timer); + unsigned long flags; + + if (sport->port.state) { + spin_lock_irqsave(&sport->port.lock, flags); + sa1100_mctrl_check(sport); + spin_unlock_irqrestore(&sport->port.lock, flags); + + mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); + } +} + +/* + * interrupts disabled on entry + */ +static void sa1100_stop_tx(struct uart_port *port) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + u32 utcr3; + + utcr3 = UART_GET_UTCR3(sport); + UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_TIE); + sport->port.read_status_mask &= ~UTSR0_TO_SM(UTSR0_TFS); +} + +/* + * port locked and interrupts disabled + */ +static void sa1100_start_tx(struct uart_port *port) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + u32 utcr3; + + utcr3 = UART_GET_UTCR3(sport); + sport->port.read_status_mask |= UTSR0_TO_SM(UTSR0_TFS); + UART_PUT_UTCR3(sport, utcr3 | UTCR3_TIE); +} + +/* + * Interrupts enabled + */ +static void sa1100_stop_rx(struct uart_port *port) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + u32 utcr3; + + utcr3 = UART_GET_UTCR3(sport); + UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_RIE); +} + +/* + * Set the modem control timer to fire immediately. + */ +static void sa1100_enable_ms(struct uart_port *port) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + + mod_timer(&sport->timer, jiffies); + + mctrl_gpio_enable_ms(sport->gpios); +} + +static void +sa1100_rx_chars(struct sa1100_port *sport) +{ + unsigned int status, ch, flg; + + status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) | + UTSR0_TO_SM(UART_GET_UTSR0(sport)); + while (status & UTSR1_TO_SM(UTSR1_RNE)) { + ch = UART_GET_CHAR(sport); + + sport->port.icount.rx++; + + flg = TTY_NORMAL; + + /* + * note that the error handling code is + * out of the main execution path + */ + if (status & UTSR1_TO_SM(UTSR1_PRE | UTSR1_FRE | UTSR1_ROR)) { + if (status & UTSR1_TO_SM(UTSR1_PRE)) + sport->port.icount.parity++; + else if (status & UTSR1_TO_SM(UTSR1_FRE)) + sport->port.icount.frame++; + if (status & UTSR1_TO_SM(UTSR1_ROR)) + sport->port.icount.overrun++; + + status &= sport->port.read_status_mask; + + if (status & UTSR1_TO_SM(UTSR1_PRE)) + flg = TTY_PARITY; + else if (status & UTSR1_TO_SM(UTSR1_FRE)) + flg = TTY_FRAME; + + sport->port.sysrq = 0; + } + + if (uart_handle_sysrq_char(&sport->port, ch)) + goto ignore_char; + + uart_insert_char(&sport->port, status, UTSR1_TO_SM(UTSR1_ROR), ch, flg); + + ignore_char: + status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) | + UTSR0_TO_SM(UART_GET_UTSR0(sport)); + } + + tty_flip_buffer_push(&sport->port.state->port); +} + +static void sa1100_tx_chars(struct sa1100_port *sport) +{ + struct circ_buf *xmit = &sport->port.state->xmit; + + if (sport->port.x_char) { + UART_PUT_CHAR(sport, sport->port.x_char); + sport->port.icount.tx++; + sport->port.x_char = 0; + return; + } + + /* + * Check the modem control lines before + * transmitting anything. + */ + sa1100_mctrl_check(sport); + + if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { + sa1100_stop_tx(&sport->port); + return; + } + + /* + * Tried using FIFO (not checking TNF) for fifo fill: + * still had the '4 bytes repeated' problem. + */ + while (UART_GET_UTSR1(sport) & UTSR1_TNF) { + UART_PUT_CHAR(sport, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + sport->port.icount.tx++; + if (uart_circ_empty(xmit)) + break; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&sport->port); + + if (uart_circ_empty(xmit)) + sa1100_stop_tx(&sport->port); +} + +static irqreturn_t sa1100_int(int irq, void *dev_id) +{ + struct sa1100_port *sport = dev_id; + unsigned int status, pass_counter = 0; + + spin_lock(&sport->port.lock); + status = UART_GET_UTSR0(sport); + status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS; + do { + if (status & (UTSR0_RFS | UTSR0_RID)) { + /* Clear the receiver idle bit, if set */ + if (status & UTSR0_RID) + UART_PUT_UTSR0(sport, UTSR0_RID); + sa1100_rx_chars(sport); + } + + /* Clear the relevant break bits */ + if (status & (UTSR0_RBB | UTSR0_REB)) + UART_PUT_UTSR0(sport, status & (UTSR0_RBB | UTSR0_REB)); + + if (status & UTSR0_RBB) + sport->port.icount.brk++; + + if (status & UTSR0_REB) + uart_handle_break(&sport->port); + + if (status & UTSR0_TFS) + sa1100_tx_chars(sport); + if (pass_counter++ > SA1100_ISR_PASS_LIMIT) + break; + status = UART_GET_UTSR0(sport); + status &= SM_TO_UTSR0(sport->port.read_status_mask) | + ~UTSR0_TFS; + } while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID)); + spin_unlock(&sport->port.lock); + + return IRQ_HANDLED; +} + +/* + * Return TIOCSER_TEMT when transmitter is not busy. + */ +static unsigned int sa1100_tx_empty(struct uart_port *port) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + + return UART_GET_UTSR1(sport) & UTSR1_TBY ? 0 : TIOCSER_TEMT; +} + +static unsigned int sa1100_get_mctrl(struct uart_port *port) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + int ret = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; + + mctrl_gpio_get(sport->gpios, &ret); + + return ret; +} + +static void sa1100_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + + mctrl_gpio_set(sport->gpios, mctrl); +} + +/* + * Interrupts always disabled. + */ +static void sa1100_break_ctl(struct uart_port *port, int break_state) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + unsigned long flags; + unsigned int utcr3; + + spin_lock_irqsave(&sport->port.lock, flags); + utcr3 = UART_GET_UTCR3(sport); + if (break_state == -1) + utcr3 |= UTCR3_BRK; + else + utcr3 &= ~UTCR3_BRK; + UART_PUT_UTCR3(sport, utcr3); + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +static int sa1100_startup(struct uart_port *port) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + int retval; + + /* + * Allocate the IRQ + */ + retval = request_irq(sport->port.irq, sa1100_int, 0, + "sa11x0-uart", sport); + if (retval) + return retval; + + /* + * Finally, clear and enable interrupts + */ + UART_PUT_UTSR0(sport, -1); + UART_PUT_UTCR3(sport, UTCR3_RXE | UTCR3_TXE | UTCR3_RIE); + + /* + * Enable modem status interrupts + */ + spin_lock_irq(&sport->port.lock); + sa1100_enable_ms(&sport->port); + spin_unlock_irq(&sport->port.lock); + + return 0; +} + +static void sa1100_shutdown(struct uart_port *port) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + + /* + * Stop our timer. + */ + del_timer_sync(&sport->timer); + + /* + * Free the interrupt + */ + free_irq(sport->port.irq, sport); + + /* + * Disable all interrupts, port and break condition. + */ + UART_PUT_UTCR3(sport, 0); +} + +static void +sa1100_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + unsigned long flags; + unsigned int utcr0, old_utcr3, baud, quot; + unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; + + /* + * We only support CS7 and CS8. + */ + while ((termios->c_cflag & CSIZE) != CS7 && + (termios->c_cflag & CSIZE) != CS8) { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= old_csize; + old_csize = CS8; + } + + if ((termios->c_cflag & CSIZE) == CS8) + utcr0 = UTCR0_DSS; + else + utcr0 = 0; + + if (termios->c_cflag & CSTOPB) + utcr0 |= UTCR0_SBS; + if (termios->c_cflag & PARENB) { + utcr0 |= UTCR0_PE; + if (!(termios->c_cflag & PARODD)) + utcr0 |= UTCR0_OES; + } + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + quot = uart_get_divisor(port, baud); + + del_timer_sync(&sport->timer); + + spin_lock_irqsave(&sport->port.lock, flags); + + sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS); + sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR); + if (termios->c_iflag & INPCK) + sport->port.read_status_mask |= + UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE); + if (termios->c_iflag & (BRKINT | PARMRK)) + sport->port.read_status_mask |= + UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB); + + /* + * Characters to ignore + */ + sport->port.ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + sport->port.ignore_status_mask |= + UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE); + if (termios->c_iflag & IGNBRK) { + sport->port.ignore_status_mask |= + UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB); + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + sport->port.ignore_status_mask |= + UTSR1_TO_SM(UTSR1_ROR); + } + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* + * disable interrupts and drain transmitter + */ + old_utcr3 = UART_GET_UTCR3(sport); + UART_PUT_UTCR3(sport, old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE)); + + while (UART_GET_UTSR1(sport) & UTSR1_TBY) + barrier(); + + /* then, disable everything */ + UART_PUT_UTCR3(sport, 0); + + /* set the parity, stop bits and data size */ + UART_PUT_UTCR0(sport, utcr0); + + /* set the baud rate */ + quot -= 1; + UART_PUT_UTCR1(sport, ((quot & 0xf00) >> 8)); + UART_PUT_UTCR2(sport, (quot & 0xff)); + + UART_PUT_UTSR0(sport, -1); + + UART_PUT_UTCR3(sport, old_utcr3); + + if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) + sa1100_enable_ms(&sport->port); + + spin_unlock_irqrestore(&sport->port.lock, flags); +} + +static const char *sa1100_type(struct uart_port *port) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + + return sport->port.type == PORT_SA1100 ? "SA1100" : NULL; +} + +/* + * Release the memory region(s) being used by 'port'. + */ +static void sa1100_release_port(struct uart_port *port) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + + release_mem_region(sport->port.mapbase, UART_PORT_SIZE); +} + +/* + * Request the memory region(s) being used by 'port'. + */ +static int sa1100_request_port(struct uart_port *port) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + + return request_mem_region(sport->port.mapbase, UART_PORT_SIZE, + "sa11x0-uart") != NULL ? 0 : -EBUSY; +} + +/* + * Configure/autoconfigure the port. + */ +static void sa1100_config_port(struct uart_port *port, int flags) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + + if (flags & UART_CONFIG_TYPE && + sa1100_request_port(&sport->port) == 0) + sport->port.type = PORT_SA1100; +} + +/* + * Verify the new serial_struct (for TIOCSSERIAL). + * The only change we allow are to the flags and type, and + * even then only between PORT_SA1100 and PORT_UNKNOWN + */ +static int +sa1100_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + int ret = 0; + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_SA1100) + ret = -EINVAL; + if (sport->port.irq != ser->irq) + ret = -EINVAL; + if (ser->io_type != SERIAL_IO_MEM) + ret = -EINVAL; + if (sport->port.uartclk / 16 != ser->baud_base) + ret = -EINVAL; + if ((void *)sport->port.mapbase != ser->iomem_base) + ret = -EINVAL; + if (sport->port.iobase != ser->port) + ret = -EINVAL; + if (ser->hub6 != 0) + ret = -EINVAL; + return ret; +} + +static struct uart_ops sa1100_pops = { + .tx_empty = sa1100_tx_empty, + .set_mctrl = sa1100_set_mctrl, + .get_mctrl = sa1100_get_mctrl, + .stop_tx = sa1100_stop_tx, + .start_tx = sa1100_start_tx, + .stop_rx = sa1100_stop_rx, + .enable_ms = sa1100_enable_ms, + .break_ctl = sa1100_break_ctl, + .startup = sa1100_startup, + .shutdown = sa1100_shutdown, + .set_termios = sa1100_set_termios, + .type = sa1100_type, + .release_port = sa1100_release_port, + .request_port = sa1100_request_port, + .config_port = sa1100_config_port, + .verify_port = sa1100_verify_port, +}; + +static struct sa1100_port sa1100_ports[NR_PORTS]; + +/* + * Setup the SA1100 serial ports. Note that we don't include the IrDA + * port here since we have our own SIR/FIR driver (see drivers/net/irda) + * + * Note also that we support "console=ttySAx" where "x" is either 0 or 1. + * Which serial port this ends up being depends on the machine you're + * running this kernel on. I'm not convinced that this is a good idea, + * but that's the way it traditionally works. + * + * Note that NanoEngine UART3 becomes UART2, and UART2 is no longer + * used here. + */ +static void __init sa1100_init_ports(void) +{ + static int first = 1; + int i; + + if (!first) + return; + first = 0; + + for (i = 0; i < NR_PORTS; i++) { + sa1100_ports[i].port.uartclk = 3686400; + sa1100_ports[i].port.ops = &sa1100_pops; + sa1100_ports[i].port.fifosize = 8; + sa1100_ports[i].port.line = i; + sa1100_ports[i].port.iotype = UPIO_MEM; + timer_setup(&sa1100_ports[i].timer, sa1100_timeout, 0); + } + + /* + * make transmit lines outputs, so that when the port + * is closed, the output is in the MARK state. + */ + PPDR |= PPC_TXD1 | PPC_TXD3; + PPSR |= PPC_TXD1 | PPC_TXD3; +} + +void sa1100_register_uart_fns(struct sa1100_port_fns *fns) +{ + if (fns->get_mctrl) + sa1100_pops.get_mctrl = fns->get_mctrl; + if (fns->set_mctrl) + sa1100_pops.set_mctrl = fns->set_mctrl; + + sa1100_pops.pm = fns->pm; + /* + * FIXME: fns->set_wake is unused - this should be called from + * the suspend() callback if device_may_wakeup(dev)) is set. + */ +} + +void __init sa1100_register_uart(int idx, int port) +{ + if (idx >= NR_PORTS) { + printk(KERN_ERR "%s: bad index number %d\n", __func__, idx); + return; + } + + switch (port) { + case 1: + sa1100_ports[idx].port.membase = (void __iomem *)&Ser1UTCR0; + sa1100_ports[idx].port.mapbase = _Ser1UTCR0; + sa1100_ports[idx].port.irq = IRQ_Ser1UART; + sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; + break; + + case 2: + sa1100_ports[idx].port.membase = (void __iomem *)&Ser2UTCR0; + sa1100_ports[idx].port.mapbase = _Ser2UTCR0; + sa1100_ports[idx].port.irq = IRQ_Ser2ICP; + sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; + break; + + case 3: + sa1100_ports[idx].port.membase = (void __iomem *)&Ser3UTCR0; + sa1100_ports[idx].port.mapbase = _Ser3UTCR0; + sa1100_ports[idx].port.irq = IRQ_Ser3UART; + sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; + break; + + default: + printk(KERN_ERR "%s: bad port number %d\n", __func__, port); + } +} + + +#ifdef CONFIG_SERIAL_SA1100_CONSOLE +static void sa1100_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct sa1100_port *sport = + container_of(port, struct sa1100_port, port); + + while (!(UART_GET_UTSR1(sport) & UTSR1_TNF)) + barrier(); + UART_PUT_CHAR(sport, ch); +} + +/* + * Interrupts are disabled on entering + */ +static void +sa1100_console_write(struct console *co, const char *s, unsigned int count) +{ + struct sa1100_port *sport = &sa1100_ports[co->index]; + unsigned int old_utcr3, status; + + /* + * First, save UTCR3 and then disable interrupts + */ + old_utcr3 = UART_GET_UTCR3(sport); + UART_PUT_UTCR3(sport, (old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE)) | + UTCR3_TXE); + + uart_console_write(&sport->port, s, count, sa1100_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore UTCR3 + */ + do { + status = UART_GET_UTSR1(sport); + } while (status & UTSR1_TBY); + UART_PUT_UTCR3(sport, old_utcr3); +} + +/* + * If the port was already initialised (eg, by a boot loader), + * try to determine the current setup. + */ +static void __init +sa1100_console_get_options(struct sa1100_port *sport, int *baud, + int *parity, int *bits) +{ + unsigned int utcr3; + + utcr3 = UART_GET_UTCR3(sport) & (UTCR3_RXE | UTCR3_TXE); + if (utcr3 == (UTCR3_RXE | UTCR3_TXE)) { + /* ok, the port was enabled */ + unsigned int utcr0, quot; + + utcr0 = UART_GET_UTCR0(sport); + + *parity = 'n'; + if (utcr0 & UTCR0_PE) { + if (utcr0 & UTCR0_OES) + *parity = 'e'; + else + *parity = 'o'; + } + + if (utcr0 & UTCR0_DSS) + *bits = 8; + else + *bits = 7; + + quot = UART_GET_UTCR2(sport) | UART_GET_UTCR1(sport) << 8; + quot &= 0xfff; + *baud = sport->port.uartclk / (16 * (quot + 1)); + } +} + +static int __init +sa1100_console_setup(struct console *co, char *options) +{ + struct sa1100_port *sport; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index == -1 || co->index >= NR_PORTS) + co->index = 0; + sport = &sa1100_ports[co->index]; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + sa1100_console_get_options(sport, &baud, &parity, &bits); + + return uart_set_options(&sport->port, co, baud, parity, bits, flow); +} + +static struct uart_driver sa1100_reg; +static struct console sa1100_console = { + .name = "ttySA", + .write = sa1100_console_write, + .device = uart_console_device, + .setup = sa1100_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sa1100_reg, +}; + +static int __init sa1100_rs_console_init(void) +{ + sa1100_init_ports(); + register_console(&sa1100_console); + return 0; +} +console_initcall(sa1100_rs_console_init); + +#define SA1100_CONSOLE &sa1100_console +#else +#define SA1100_CONSOLE NULL +#endif + +static struct uart_driver sa1100_reg = { + .owner = THIS_MODULE, + .driver_name = "ttySA", + .dev_name = "ttySA", + .major = SERIAL_SA1100_MAJOR, + .minor = MINOR_START, + .nr = NR_PORTS, + .cons = SA1100_CONSOLE, +}; + +static int sa1100_serial_suspend(struct platform_device *dev, pm_message_t state) +{ + struct sa1100_port *sport = platform_get_drvdata(dev); + + if (sport) + uart_suspend_port(&sa1100_reg, &sport->port); + + return 0; +} + +static int sa1100_serial_resume(struct platform_device *dev) +{ + struct sa1100_port *sport = platform_get_drvdata(dev); + + if (sport) + uart_resume_port(&sa1100_reg, &sport->port); + + return 0; +} + +static int sa1100_serial_add_one_port(struct sa1100_port *sport, struct platform_device *dev) +{ + sport->port.dev = &dev->dev; + sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SA1100_CONSOLE); + + // mctrl_gpio_init() requires that the GPIO driver supports interrupts, + // but we need to support GPIO drivers for hardware that has no such + // interrupts. Use mctrl_gpio_init_noauto() instead. + sport->gpios = mctrl_gpio_init_noauto(sport->port.dev, 0); + if (IS_ERR(sport->gpios)) { + int err = PTR_ERR(sport->gpios); + + dev_err(sport->port.dev, "failed to get mctrl gpios: %d\n", + err); + + if (err == -EPROBE_DEFER) + return err; + + sport->gpios = NULL; + } + + platform_set_drvdata(dev, sport); + + return uart_add_one_port(&sa1100_reg, &sport->port); +} + +static int sa1100_serial_probe(struct platform_device *dev) +{ + struct resource *res; + int i; + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + for (i = 0; i < NR_PORTS; i++) + if (sa1100_ports[i].port.mapbase == res->start) + break; + if (i == NR_PORTS) + return -ENODEV; + + sa1100_serial_add_one_port(&sa1100_ports[i], dev); + + return 0; +} + +static int sa1100_serial_remove(struct platform_device *pdev) +{ + struct sa1100_port *sport = platform_get_drvdata(pdev); + + if (sport) + uart_remove_one_port(&sa1100_reg, &sport->port); + + return 0; +} + +static struct platform_driver sa11x0_serial_driver = { + .probe = sa1100_serial_probe, + .remove = sa1100_serial_remove, + .suspend = sa1100_serial_suspend, + .resume = sa1100_serial_resume, + .driver = { + .name = "sa11x0-uart", + }, +}; + +static int __init sa1100_serial_init(void) +{ + int ret; + + printk(KERN_INFO "Serial: SA11x0 driver\n"); + + sa1100_init_ports(); + + ret = uart_register_driver(&sa1100_reg); + if (ret == 0) { + ret = platform_driver_register(&sa11x0_serial_driver); + if (ret) + uart_unregister_driver(&sa1100_reg); + } + return ret; +} + +static void __exit sa1100_serial_exit(void) +{ + platform_driver_unregister(&sa11x0_serial_driver); + uart_unregister_driver(&sa1100_reg); +} + +module_init(sa1100_serial_init); +module_exit(sa1100_serial_exit); + +MODULE_AUTHOR("Deep Blue Solutions Ltd"); +MODULE_DESCRIPTION("SA1100 generic serial port driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_SA1100_MAJOR); +MODULE_ALIAS("platform:sa11x0-uart"); diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c new file mode 100644 index 000000000..aa2c51b84 --- /dev/null +++ b/drivers/tty/serial/samsung_tty.c @@ -0,0 +1,3118 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver core for Samsung SoC onboard UARTs. + * + * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + */ + +/* Note on 2410 error handling + * + * The s3c2410 manual has a love/hate affair with the contents of the + * UERSTAT register in the UART blocks, and keeps marking some of the + * error bits as reserved. Having checked with the s3c2410x01, + * it copes with BREAKs properly, so I am happy to ignore the RESERVED + * feature from the latter versions of the manual. + * + * If it becomes aparrent that latter versions of the 2410 remove these + * bits, then action will have to be taken to differentiate the versions + * and change the policy on BREAK + * + * BJD, 04-Nov-2004 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* UART name and device definitions */ + +#define S3C24XX_SERIAL_NAME "ttySAC" +#define S3C24XX_SERIAL_MAJOR 204 +#define S3C24XX_SERIAL_MINOR 64 + +#ifdef CONFIG_ARM64 +#define UART_NR 12 +#else +#define UART_NR CONFIG_SERIAL_SAMSUNG_UARTS +#endif + +#define S3C24XX_TX_PIO 1 +#define S3C24XX_TX_DMA 2 +#define S3C24XX_RX_PIO 1 +#define S3C24XX_RX_DMA 2 + +/* flag to ignore all characters coming in */ +#define RXSTAT_DUMMY_READ (0x10000000) + +enum s3c24xx_port_type { + TYPE_S3C24XX, + TYPE_S3C6400, + TYPE_APPLE_S5L, +}; + +struct s3c24xx_uart_info { + const char *name; + enum s3c24xx_port_type type; + unsigned int port_type; + unsigned int fifosize; + unsigned long rx_fifomask; + unsigned long rx_fifoshift; + unsigned long rx_fifofull; + unsigned long tx_fifomask; + unsigned long tx_fifoshift; + unsigned long tx_fifofull; + unsigned int def_clk_sel; + unsigned long num_clks; + unsigned long clksel_mask; + unsigned long clksel_shift; + unsigned long ucon_mask; + + /* uart port features */ + + unsigned int has_divslot:1; +}; + +struct s3c24xx_serial_drv_data { + const struct s3c24xx_uart_info info; + const struct s3c2410_uartcfg def_cfg; + const unsigned int fifosize[UART_NR]; +}; + +struct s3c24xx_uart_dma { + unsigned int rx_chan_id; + unsigned int tx_chan_id; + + struct dma_slave_config rx_conf; + struct dma_slave_config tx_conf; + + struct dma_chan *rx_chan; + struct dma_chan *tx_chan; + + dma_addr_t rx_addr; + dma_addr_t tx_addr; + + dma_cookie_t rx_cookie; + dma_cookie_t tx_cookie; + + char *rx_buf; + + dma_addr_t tx_transfer_addr; + + size_t rx_size; + size_t tx_size; + + struct dma_async_tx_descriptor *tx_desc; + struct dma_async_tx_descriptor *rx_desc; + + int tx_bytes_requested; + int rx_bytes_requested; +}; + +struct s3c24xx_uart_port { + unsigned char rx_claimed; + unsigned char tx_claimed; + unsigned char rx_enabled; + unsigned char tx_enabled; + unsigned int pm_level; + unsigned long baudclk_rate; + unsigned int min_dma_size; + + unsigned int rx_irq; + unsigned int tx_irq; + + unsigned int tx_in_progress; + unsigned int tx_mode; + unsigned int rx_mode; + + const struct s3c24xx_uart_info *info; + struct clk *clk; + struct clk *baudclk; + struct uart_port port; + const struct s3c24xx_serial_drv_data *drv_data; + + /* reference to platform data */ + const struct s3c2410_uartcfg *cfg; + + struct s3c24xx_uart_dma *dma; + +#ifdef CONFIG_ARM_S3C24XX_CPUFREQ + struct notifier_block freq_transition; +#endif +}; + +static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport); + +/* conversion functions */ + +#define s3c24xx_dev_to_port(__dev) dev_get_drvdata(__dev) + +/* register access controls */ + +#define portaddr(port, reg) ((port)->membase + (reg)) +#define portaddrl(port, reg) \ + ((unsigned long *)(unsigned long)((port)->membase + (reg))) + +static u32 rd_reg(const struct uart_port *port, u32 reg) +{ + switch (port->iotype) { + case UPIO_MEM: + return readb_relaxed(portaddr(port, reg)); + case UPIO_MEM32: + return readl_relaxed(portaddr(port, reg)); + default: + return 0; + } + return 0; +} + +#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg))) + +static void wr_reg(const struct uart_port *port, u32 reg, u32 val) +{ + switch (port->iotype) { + case UPIO_MEM: + writeb_relaxed(val, portaddr(port, reg)); + break; + case UPIO_MEM32: + writel_relaxed(val, portaddr(port, reg)); + break; + } +} + +#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg)) + +/* Byte-order aware bit setting/clearing functions. */ + +static inline void s3c24xx_set_bit(const struct uart_port *port, int idx, + unsigned int reg) +{ + unsigned long flags; + u32 val; + + local_irq_save(flags); + val = rd_regl(port, reg); + val |= (1 << idx); + wr_regl(port, reg, val); + local_irq_restore(flags); +} + +static inline void s3c24xx_clear_bit(const struct uart_port *port, int idx, + unsigned int reg) +{ + unsigned long flags; + u32 val; + + local_irq_save(flags); + val = rd_regl(port, reg); + val &= ~(1 << idx); + wr_regl(port, reg, val); + local_irq_restore(flags); +} + +static inline struct s3c24xx_uart_port *to_ourport(struct uart_port *port) +{ + return container_of(port, struct s3c24xx_uart_port, port); +} + +/* translate a port to the device name */ + +static inline const char *s3c24xx_serial_portname(const struct uart_port *port) +{ + return to_platform_device(port->dev)->name; +} + +static int s3c24xx_serial_txempty_nofifo(const struct uart_port *port) +{ + return rd_regl(port, S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXE; +} + +static void s3c24xx_serial_rx_enable(struct uart_port *port) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + unsigned long flags; + unsigned int ucon, ufcon; + int count = 10000; + + spin_lock_irqsave(&port->lock, flags); + + while (--count && !s3c24xx_serial_txempty_nofifo(port)) + udelay(100); + + ufcon = rd_regl(port, S3C2410_UFCON); + ufcon |= S3C2410_UFCON_RESETRX; + wr_regl(port, S3C2410_UFCON, ufcon); + + ucon = rd_regl(port, S3C2410_UCON); + ucon |= S3C2410_UCON_RXIRQMODE; + wr_regl(port, S3C2410_UCON, ucon); + + ourport->rx_enabled = 1; + spin_unlock_irqrestore(&port->lock, flags); +} + +static void s3c24xx_serial_rx_disable(struct uart_port *port) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + unsigned long flags; + unsigned int ucon; + + spin_lock_irqsave(&port->lock, flags); + + ucon = rd_regl(port, S3C2410_UCON); + ucon &= ~S3C2410_UCON_RXIRQMODE; + wr_regl(port, S3C2410_UCON, ucon); + + ourport->rx_enabled = 0; + spin_unlock_irqrestore(&port->lock, flags); +} + +static void s3c24xx_serial_stop_tx(struct uart_port *port) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + struct s3c24xx_uart_dma *dma = ourport->dma; + struct circ_buf *xmit = &port->state->xmit; + struct dma_tx_state state; + int count; + + if (!ourport->tx_enabled) + return; + + switch (ourport->info->type) { + case TYPE_S3C6400: + s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM); + break; + case TYPE_APPLE_S5L: + s3c24xx_clear_bit(port, APPLE_S5L_UCON_TXTHRESH_ENA, S3C2410_UCON); + break; + default: + disable_irq_nosync(ourport->tx_irq); + break; + } + + if (dma && dma->tx_chan && ourport->tx_in_progress == S3C24XX_TX_DMA) { + dmaengine_pause(dma->tx_chan); + dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state); + dmaengine_terminate_all(dma->tx_chan); + dma_sync_single_for_cpu(dma->tx_chan->device->dev, + dma->tx_transfer_addr, dma->tx_size, + DMA_TO_DEVICE); + async_tx_ack(dma->tx_desc); + count = dma->tx_bytes_requested - state.residue; + xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + port->icount.tx += count; + } + + ourport->tx_enabled = 0; + ourport->tx_in_progress = 0; + + if (port->flags & UPF_CONS_FLOW) + s3c24xx_serial_rx_enable(port); + + ourport->tx_mode = 0; +} + +static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport); + +static void s3c24xx_serial_tx_dma_complete(void *args) +{ + struct s3c24xx_uart_port *ourport = args; + struct uart_port *port = &ourport->port; + struct circ_buf *xmit = &port->state->xmit; + struct s3c24xx_uart_dma *dma = ourport->dma; + struct dma_tx_state state; + unsigned long flags; + int count; + + dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state); + count = dma->tx_bytes_requested - state.residue; + async_tx_ack(dma->tx_desc); + + dma_sync_single_for_cpu(dma->tx_chan->device->dev, + dma->tx_transfer_addr, dma->tx_size, + DMA_TO_DEVICE); + + spin_lock_irqsave(&port->lock, flags); + + xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + port->icount.tx += count; + ourport->tx_in_progress = 0; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + s3c24xx_serial_start_next_tx(ourport); + spin_unlock_irqrestore(&port->lock, flags); +} + +static void enable_tx_dma(struct s3c24xx_uart_port *ourport) +{ + const struct uart_port *port = &ourport->port; + u32 ucon; + + /* Mask Tx interrupt */ + switch (ourport->info->type) { + case TYPE_S3C6400: + s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM); + break; + case TYPE_APPLE_S5L: + WARN_ON(1); // No DMA + break; + default: + disable_irq_nosync(ourport->tx_irq); + break; + } + + /* Enable tx dma mode */ + ucon = rd_regl(port, S3C2410_UCON); + ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK); + ucon |= S3C64XX_UCON_TXBURST_1; + ucon |= S3C64XX_UCON_TXMODE_DMA; + wr_regl(port, S3C2410_UCON, ucon); + + ourport->tx_mode = S3C24XX_TX_DMA; +} + +static void enable_tx_pio(struct s3c24xx_uart_port *ourport) +{ + const struct uart_port *port = &ourport->port; + u32 ucon, ufcon; + + /* Set ufcon txtrig */ + ourport->tx_in_progress = S3C24XX_TX_PIO; + ufcon = rd_regl(port, S3C2410_UFCON); + wr_regl(port, S3C2410_UFCON, ufcon); + + /* Enable tx pio mode */ + ucon = rd_regl(port, S3C2410_UCON); + ucon &= ~(S3C64XX_UCON_TXMODE_MASK); + ucon |= S3C64XX_UCON_TXMODE_CPU; + wr_regl(port, S3C2410_UCON, ucon); + + /* Unmask Tx interrupt */ + switch (ourport->info->type) { + case TYPE_S3C6400: + s3c24xx_clear_bit(port, S3C64XX_UINTM_TXD, + S3C64XX_UINTM); + break; + case TYPE_APPLE_S5L: + ucon |= APPLE_S5L_UCON_TXTHRESH_ENA_MSK; + wr_regl(port, S3C2410_UCON, ucon); + break; + default: + enable_irq(ourport->tx_irq); + break; + } + + ourport->tx_mode = S3C24XX_TX_PIO; + + /* + * The Apple version only has edge triggered TX IRQs, so we need + * to kick off the process by sending some characters here. + */ + if (ourport->info->type == TYPE_APPLE_S5L) + s3c24xx_serial_tx_chars(ourport); +} + +static void s3c24xx_serial_start_tx_pio(struct s3c24xx_uart_port *ourport) +{ + if (ourport->tx_mode != S3C24XX_TX_PIO) + enable_tx_pio(ourport); +} + +static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport, + unsigned int count) +{ + struct uart_port *port = &ourport->port; + struct circ_buf *xmit = &port->state->xmit; + struct s3c24xx_uart_dma *dma = ourport->dma; + + if (ourport->tx_mode != S3C24XX_TX_DMA) + enable_tx_dma(ourport); + + dma->tx_size = count & ~(dma_get_cache_alignment() - 1); + dma->tx_transfer_addr = dma->tx_addr + xmit->tail; + + dma_sync_single_for_device(dma->tx_chan->device->dev, + dma->tx_transfer_addr, dma->tx_size, + DMA_TO_DEVICE); + + dma->tx_desc = dmaengine_prep_slave_single(dma->tx_chan, + dma->tx_transfer_addr, dma->tx_size, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); + if (!dma->tx_desc) { + dev_err(ourport->port.dev, "Unable to get desc for Tx\n"); + return -EIO; + } + + dma->tx_desc->callback = s3c24xx_serial_tx_dma_complete; + dma->tx_desc->callback_param = ourport; + dma->tx_bytes_requested = dma->tx_size; + + ourport->tx_in_progress = S3C24XX_TX_DMA; + dma->tx_cookie = dmaengine_submit(dma->tx_desc); + dma_async_issue_pending(dma->tx_chan); + return 0; +} + +static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport) +{ + struct uart_port *port = &ourport->port; + struct circ_buf *xmit = &port->state->xmit; + unsigned long count; + + /* Get data size up to the end of buffer */ + count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + + if (!count) { + s3c24xx_serial_stop_tx(port); + return; + } + + if (!ourport->dma || !ourport->dma->tx_chan || + count < ourport->min_dma_size || + xmit->tail & (dma_get_cache_alignment() - 1)) + s3c24xx_serial_start_tx_pio(ourport); + else + s3c24xx_serial_start_tx_dma(ourport, count); +} + +static void s3c24xx_serial_start_tx(struct uart_port *port) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + struct circ_buf *xmit = &port->state->xmit; + + if (!ourport->tx_enabled) { + if (port->flags & UPF_CONS_FLOW) + s3c24xx_serial_rx_disable(port); + + ourport->tx_enabled = 1; + if (!ourport->dma || !ourport->dma->tx_chan) + s3c24xx_serial_start_tx_pio(ourport); + } + + if (ourport->dma && ourport->dma->tx_chan) { + if (!uart_circ_empty(xmit) && !ourport->tx_in_progress) + s3c24xx_serial_start_next_tx(ourport); + } +} + +static void s3c24xx_uart_copy_rx_to_tty(struct s3c24xx_uart_port *ourport, + struct tty_port *tty, int count) +{ + struct s3c24xx_uart_dma *dma = ourport->dma; + int copied; + + if (!count) + return; + + dma_sync_single_for_cpu(dma->rx_chan->device->dev, dma->rx_addr, + dma->rx_size, DMA_FROM_DEVICE); + + ourport->port.icount.rx += count; + if (!tty) { + dev_err(ourport->port.dev, "No tty port\n"); + return; + } + copied = tty_insert_flip_string(tty, + ((unsigned char *)(ourport->dma->rx_buf)), count); + if (copied != count) { + WARN_ON(1); + dev_err(ourport->port.dev, "RxData copy to tty layer failed\n"); + } +} + +static void s3c24xx_serial_stop_rx(struct uart_port *port) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + struct s3c24xx_uart_dma *dma = ourport->dma; + struct tty_port *t = &port->state->port; + struct dma_tx_state state; + enum dma_status dma_status; + unsigned int received; + + if (ourport->rx_enabled) { + dev_dbg(port->dev, "stopping rx\n"); + switch (ourport->info->type) { + case TYPE_S3C6400: + s3c24xx_set_bit(port, S3C64XX_UINTM_RXD, + S3C64XX_UINTM); + break; + case TYPE_APPLE_S5L: + s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON); + s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTO_ENA, S3C2410_UCON); + break; + default: + disable_irq_nosync(ourport->rx_irq); + break; + } + ourport->rx_enabled = 0; + } + if (dma && dma->rx_chan) { + dmaengine_pause(dma->tx_chan); + dma_status = dmaengine_tx_status(dma->rx_chan, + dma->rx_cookie, &state); + if (dma_status == DMA_IN_PROGRESS || + dma_status == DMA_PAUSED) { + received = dma->rx_bytes_requested - state.residue; + dmaengine_terminate_all(dma->rx_chan); + s3c24xx_uart_copy_rx_to_tty(ourport, t, received); + } + } +} + +static inline const struct s3c24xx_uart_info + *s3c24xx_port_to_info(struct uart_port *port) +{ + return to_ourport(port)->info; +} + +static inline const struct s3c2410_uartcfg + *s3c24xx_port_to_cfg(const struct uart_port *port) +{ + const struct s3c24xx_uart_port *ourport; + + if (port->dev == NULL) + return NULL; + + ourport = container_of(port, struct s3c24xx_uart_port, port); + return ourport->cfg; +} + +static int s3c24xx_serial_rx_fifocnt(const struct s3c24xx_uart_port *ourport, + unsigned long ufstat) +{ + const struct s3c24xx_uart_info *info = ourport->info; + + if (ufstat & info->rx_fifofull) + return ourport->port.fifosize; + + return (ufstat & info->rx_fifomask) >> info->rx_fifoshift; +} + +static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport); +static void s3c24xx_serial_rx_dma_complete(void *args) +{ + struct s3c24xx_uart_port *ourport = args; + struct uart_port *port = &ourport->port; + + struct s3c24xx_uart_dma *dma = ourport->dma; + struct tty_port *t = &port->state->port; + struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port); + + struct dma_tx_state state; + unsigned long flags; + int received; + + dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state); + received = dma->rx_bytes_requested - state.residue; + async_tx_ack(dma->rx_desc); + + spin_lock_irqsave(&port->lock, flags); + + if (received) + s3c24xx_uart_copy_rx_to_tty(ourport, t, received); + + if (tty) { + tty_flip_buffer_push(t); + tty_kref_put(tty); + } + + s3c64xx_start_rx_dma(ourport); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport) +{ + struct s3c24xx_uart_dma *dma = ourport->dma; + + dma_sync_single_for_device(dma->rx_chan->device->dev, dma->rx_addr, + dma->rx_size, DMA_FROM_DEVICE); + + dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan, + dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!dma->rx_desc) { + dev_err(ourport->port.dev, "Unable to get desc for Rx\n"); + return; + } + + dma->rx_desc->callback = s3c24xx_serial_rx_dma_complete; + dma->rx_desc->callback_param = ourport; + dma->rx_bytes_requested = dma->rx_size; + + dma->rx_cookie = dmaengine_submit(dma->rx_desc); + dma_async_issue_pending(dma->rx_chan); +} + +/* ? - where has parity gone?? */ +#define S3C2410_UERSTAT_PARITY (0x1000) + +static void enable_rx_dma(struct s3c24xx_uart_port *ourport) +{ + struct uart_port *port = &ourport->port; + unsigned int ucon; + + /* set Rx mode to DMA mode */ + ucon = rd_regl(port, S3C2410_UCON); + ucon &= ~(S3C64XX_UCON_RXBURST_MASK | + S3C64XX_UCON_TIMEOUT_MASK | + S3C64XX_UCON_EMPTYINT_EN | + S3C64XX_UCON_DMASUS_EN | + S3C64XX_UCON_TIMEOUT_EN | + S3C64XX_UCON_RXMODE_MASK); + ucon |= S3C64XX_UCON_RXBURST_1 | + 0xf << S3C64XX_UCON_TIMEOUT_SHIFT | + S3C64XX_UCON_EMPTYINT_EN | + S3C64XX_UCON_TIMEOUT_EN | + S3C64XX_UCON_RXMODE_DMA; + wr_regl(port, S3C2410_UCON, ucon); + + ourport->rx_mode = S3C24XX_RX_DMA; +} + +static void enable_rx_pio(struct s3c24xx_uart_port *ourport) +{ + struct uart_port *port = &ourport->port; + unsigned int ucon; + + /* set Rx mode to DMA mode */ + ucon = rd_regl(port, S3C2410_UCON); + ucon &= ~S3C64XX_UCON_RXMODE_MASK; + ucon |= S3C64XX_UCON_RXMODE_CPU; + + /* Apple types use these bits for IRQ masks */ + if (ourport->info->type != TYPE_APPLE_S5L) { + ucon &= ~(S3C64XX_UCON_TIMEOUT_MASK | + S3C64XX_UCON_EMPTYINT_EN | + S3C64XX_UCON_DMASUS_EN | + S3C64XX_UCON_TIMEOUT_EN); + ucon |= 0xf << S3C64XX_UCON_TIMEOUT_SHIFT | + S3C64XX_UCON_TIMEOUT_EN; + } + wr_regl(port, S3C2410_UCON, ucon); + + ourport->rx_mode = S3C24XX_RX_PIO; +} + +static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport); + +static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id) +{ + unsigned int utrstat, received; + struct s3c24xx_uart_port *ourport = dev_id; + struct uart_port *port = &ourport->port; + struct s3c24xx_uart_dma *dma = ourport->dma; + struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port); + struct tty_port *t = &port->state->port; + struct dma_tx_state state; + + utrstat = rd_regl(port, S3C2410_UTRSTAT); + rd_regl(port, S3C2410_UFSTAT); + + spin_lock(&port->lock); + + if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) { + s3c64xx_start_rx_dma(ourport); + if (ourport->rx_mode == S3C24XX_RX_PIO) + enable_rx_dma(ourport); + goto finish; + } + + if (ourport->rx_mode == S3C24XX_RX_DMA) { + dmaengine_pause(dma->rx_chan); + dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state); + dmaengine_terminate_all(dma->rx_chan); + received = dma->rx_bytes_requested - state.residue; + s3c24xx_uart_copy_rx_to_tty(ourport, t, received); + + enable_rx_pio(ourport); + } + + s3c24xx_serial_rx_drain_fifo(ourport); + + if (tty) { + tty_flip_buffer_push(t); + tty_kref_put(tty); + } + + wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT); + +finish: + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport) +{ + struct uart_port *port = &ourport->port; + unsigned int ufcon, ch, flag, ufstat, uerstat; + unsigned int fifocnt = 0; + int max_count = port->fifosize; + + while (max_count-- > 0) { + /* + * Receive all characters known to be in FIFO + * before reading FIFO level again + */ + if (fifocnt == 0) { + ufstat = rd_regl(port, S3C2410_UFSTAT); + fifocnt = s3c24xx_serial_rx_fifocnt(ourport, ufstat); + if (fifocnt == 0) + break; + } + fifocnt--; + + uerstat = rd_regl(port, S3C2410_UERSTAT); + ch = rd_reg(port, S3C2410_URXH); + + if (port->flags & UPF_CONS_FLOW) { + int txe = s3c24xx_serial_txempty_nofifo(port); + + if (ourport->rx_enabled) { + if (!txe) { + ourport->rx_enabled = 0; + continue; + } + } else { + if (txe) { + ufcon = rd_regl(port, S3C2410_UFCON); + ufcon |= S3C2410_UFCON_RESETRX; + wr_regl(port, S3C2410_UFCON, ufcon); + ourport->rx_enabled = 1; + return; + } + continue; + } + } + + /* insert the character into the buffer */ + + flag = TTY_NORMAL; + port->icount.rx++; + + if (unlikely(uerstat & S3C2410_UERSTAT_ANY)) { + dev_dbg(port->dev, + "rxerr: port ch=0x%02x, rxs=0x%08x\n", + ch, uerstat); + + /* check for break */ + if (uerstat & S3C2410_UERSTAT_BREAK) { + dev_dbg(port->dev, "break!\n"); + port->icount.brk++; + if (uart_handle_break(port)) + continue; /* Ignore character */ + } + + if (uerstat & S3C2410_UERSTAT_FRAME) + port->icount.frame++; + if (uerstat & S3C2410_UERSTAT_OVERRUN) + port->icount.overrun++; + + uerstat &= port->read_status_mask; + + if (uerstat & S3C2410_UERSTAT_BREAK) + flag = TTY_BREAK; + else if (uerstat & S3C2410_UERSTAT_PARITY) + flag = TTY_PARITY; + else if (uerstat & (S3C2410_UERSTAT_FRAME | + S3C2410_UERSTAT_OVERRUN)) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(port, ch)) + continue; /* Ignore character */ + + uart_insert_char(port, uerstat, S3C2410_UERSTAT_OVERRUN, + ch, flag); + } + + tty_flip_buffer_push(&port->state->port); +} + +static irqreturn_t s3c24xx_serial_rx_chars_pio(void *dev_id) +{ + struct s3c24xx_uart_port *ourport = dev_id; + struct uart_port *port = &ourport->port; + + spin_lock(&port->lock); + s3c24xx_serial_rx_drain_fifo(ourport); + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static irqreturn_t s3c24xx_serial_rx_irq(int irq, void *dev_id) +{ + struct s3c24xx_uart_port *ourport = dev_id; + + if (ourport->dma && ourport->dma->rx_chan) + return s3c24xx_serial_rx_chars_dma(dev_id); + return s3c24xx_serial_rx_chars_pio(dev_id); +} + +static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport) +{ + struct uart_port *port = &ourport->port; + struct circ_buf *xmit = &port->state->xmit; + int count, dma_count = 0; + + count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + + if (ourport->dma && ourport->dma->tx_chan && + count >= ourport->min_dma_size) { + int align = dma_get_cache_alignment() - + (xmit->tail & (dma_get_cache_alignment() - 1)); + if (count - align >= ourport->min_dma_size) { + dma_count = count - align; + count = align; + } + } + + if (port->x_char) { + wr_reg(port, S3C2410_UTXH, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + /* if there isn't anything more to transmit, or the uart is now + * stopped, disable the uart and exit + */ + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + s3c24xx_serial_stop_tx(port); + return; + } + + /* try and drain the buffer... */ + + if (count > port->fifosize) { + count = port->fifosize; + dma_count = 0; + } + + while (!uart_circ_empty(xmit) && count > 0) { + if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull) + break; + + wr_reg(port, S3C2410_UTXH, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + count--; + } + + if (!count && dma_count) { + s3c24xx_serial_start_tx_dma(ourport, dma_count); + return; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + s3c24xx_serial_stop_tx(port); +} + +static irqreturn_t s3c24xx_serial_tx_irq(int irq, void *id) +{ + struct s3c24xx_uart_port *ourport = id; + struct uart_port *port = &ourport->port; + + spin_lock(&port->lock); + + s3c24xx_serial_tx_chars(ourport); + + spin_unlock(&port->lock); + return IRQ_HANDLED; +} + +/* interrupt handler for s3c64xx and later SoC's.*/ +static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id) +{ + const struct s3c24xx_uart_port *ourport = id; + const struct uart_port *port = &ourport->port; + unsigned int pend = rd_regl(port, S3C64XX_UINTP); + irqreturn_t ret = IRQ_HANDLED; + + if (pend & S3C64XX_UINTM_RXD_MSK) { + ret = s3c24xx_serial_rx_irq(irq, id); + wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_RXD_MSK); + } + if (pend & S3C64XX_UINTM_TXD_MSK) { + ret = s3c24xx_serial_tx_irq(irq, id); + wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_TXD_MSK); + } + return ret; +} + +/* interrupt handler for Apple SoC's.*/ +static irqreturn_t apple_serial_handle_irq(int irq, void *id) +{ + const struct s3c24xx_uart_port *ourport = id; + const struct uart_port *port = &ourport->port; + unsigned int pend = rd_regl(port, S3C2410_UTRSTAT); + irqreturn_t ret = IRQ_NONE; + + if (pend & (APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO)) { + wr_regl(port, S3C2410_UTRSTAT, + APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO); + ret = s3c24xx_serial_rx_irq(irq, id); + } + if (pend & APPLE_S5L_UTRSTAT_TXTHRESH) { + wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_TXTHRESH); + ret = s3c24xx_serial_tx_irq(irq, id); + } + + return ret; +} + +static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port) +{ + const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); + unsigned long ufstat = rd_regl(port, S3C2410_UFSTAT); + unsigned long ufcon = rd_regl(port, S3C2410_UFCON); + + if (ufcon & S3C2410_UFCON_FIFOMODE) { + if ((ufstat & info->tx_fifomask) != 0 || + (ufstat & info->tx_fifofull)) + return 0; + + return 1; + } + + return s3c24xx_serial_txempty_nofifo(port); +} + +/* no modem control lines */ +static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port) +{ + unsigned int umstat = rd_reg(port, S3C2410_UMSTAT); + + if (umstat & S3C2410_UMSTAT_CTS) + return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; + else + return TIOCM_CAR | TIOCM_DSR; +} + +static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + unsigned int umcon = rd_regl(port, S3C2410_UMCON); + unsigned int ucon = rd_regl(port, S3C2410_UCON); + + if (mctrl & TIOCM_RTS) + umcon |= S3C2410_UMCOM_RTS_LOW; + else + umcon &= ~S3C2410_UMCOM_RTS_LOW; + + wr_regl(port, S3C2410_UMCON, umcon); + + if (mctrl & TIOCM_LOOP) + ucon |= S3C2410_UCON_LOOPBACK; + else + ucon &= ~S3C2410_UCON_LOOPBACK; + + wr_regl(port, S3C2410_UCON, ucon); +} + +static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state) +{ + unsigned long flags; + unsigned int ucon; + + spin_lock_irqsave(&port->lock, flags); + + ucon = rd_regl(port, S3C2410_UCON); + + if (break_state) + ucon |= S3C2410_UCON_SBREAK; + else + ucon &= ~S3C2410_UCON_SBREAK; + + wr_regl(port, S3C2410_UCON, ucon); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p) +{ + struct s3c24xx_uart_dma *dma = p->dma; + struct dma_slave_caps dma_caps; + const char *reason = NULL; + int ret; + + /* Default slave configuration parameters */ + dma->rx_conf.direction = DMA_DEV_TO_MEM; + dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH; + dma->rx_conf.src_maxburst = 1; + + dma->tx_conf.direction = DMA_MEM_TO_DEV; + dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH; + dma->tx_conf.dst_maxburst = 1; + + dma->rx_chan = dma_request_chan(p->port.dev, "rx"); + + if (IS_ERR(dma->rx_chan)) { + reason = "DMA RX channel request failed"; + ret = PTR_ERR(dma->rx_chan); + goto err_warn; + } + + ret = dma_get_slave_caps(dma->rx_chan, &dma_caps); + if (ret < 0 || + dma_caps.residue_granularity < DMA_RESIDUE_GRANULARITY_BURST) { + reason = "insufficient DMA RX engine capabilities"; + ret = -EOPNOTSUPP; + goto err_release_rx; + } + + dmaengine_slave_config(dma->rx_chan, &dma->rx_conf); + + dma->tx_chan = dma_request_chan(p->port.dev, "tx"); + if (IS_ERR(dma->tx_chan)) { + reason = "DMA TX channel request failed"; + ret = PTR_ERR(dma->tx_chan); + goto err_release_rx; + } + + ret = dma_get_slave_caps(dma->tx_chan, &dma_caps); + if (ret < 0 || + dma_caps.residue_granularity < DMA_RESIDUE_GRANULARITY_BURST) { + reason = "insufficient DMA TX engine capabilities"; + ret = -EOPNOTSUPP; + goto err_release_tx; + } + + dmaengine_slave_config(dma->tx_chan, &dma->tx_conf); + + /* RX buffer */ + dma->rx_size = PAGE_SIZE; + + dma->rx_buf = kmalloc(dma->rx_size, GFP_KERNEL); + if (!dma->rx_buf) { + ret = -ENOMEM; + goto err_release_tx; + } + + dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf, + dma->rx_size, DMA_FROM_DEVICE); + if (dma_mapping_error(dma->rx_chan->device->dev, dma->rx_addr)) { + reason = "DMA mapping error for RX buffer"; + ret = -EIO; + goto err_free_rx; + } + + /* TX buffer */ + dma->tx_addr = dma_map_single(dma->tx_chan->device->dev, + p->port.state->xmit.buf, UART_XMIT_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dma->tx_chan->device->dev, dma->tx_addr)) { + reason = "DMA mapping error for TX buffer"; + ret = -EIO; + goto err_unmap_rx; + } + + return 0; + +err_unmap_rx: + dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr, + dma->rx_size, DMA_FROM_DEVICE); +err_free_rx: + kfree(dma->rx_buf); +err_release_tx: + dma_release_channel(dma->tx_chan); +err_release_rx: + dma_release_channel(dma->rx_chan); +err_warn: + if (reason) + dev_warn(p->port.dev, "%s, DMA will not be used\n", reason); + return ret; +} + +static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p) +{ + struct s3c24xx_uart_dma *dma = p->dma; + + if (dma->rx_chan) { + dmaengine_terminate_all(dma->rx_chan); + dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr, + dma->rx_size, DMA_FROM_DEVICE); + kfree(dma->rx_buf); + dma_release_channel(dma->rx_chan); + dma->rx_chan = NULL; + } + + if (dma->tx_chan) { + dmaengine_terminate_all(dma->tx_chan); + dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr, + UART_XMIT_SIZE, DMA_TO_DEVICE); + dma_release_channel(dma->tx_chan); + dma->tx_chan = NULL; + } +} + +static void s3c24xx_serial_shutdown(struct uart_port *port) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + + if (ourport->tx_claimed) { + free_irq(ourport->tx_irq, ourport); + ourport->tx_enabled = 0; + ourport->tx_claimed = 0; + ourport->tx_mode = 0; + } + + if (ourport->rx_claimed) { + free_irq(ourport->rx_irq, ourport); + ourport->rx_claimed = 0; + ourport->rx_enabled = 0; + } + + if (ourport->dma) + s3c24xx_serial_release_dma(ourport); + + ourport->tx_in_progress = 0; +} + +static void s3c64xx_serial_shutdown(struct uart_port *port) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + + ourport->tx_enabled = 0; + ourport->tx_mode = 0; + ourport->rx_enabled = 0; + + free_irq(port->irq, ourport); + + wr_regl(port, S3C64XX_UINTP, 0xf); + wr_regl(port, S3C64XX_UINTM, 0xf); + + if (ourport->dma) + s3c24xx_serial_release_dma(ourport); + + ourport->tx_in_progress = 0; +} + +static void apple_s5l_serial_shutdown(struct uart_port *port) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + + unsigned int ucon; + + ucon = rd_regl(port, S3C2410_UCON); + ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK | + APPLE_S5L_UCON_RXTHRESH_ENA_MSK | + APPLE_S5L_UCON_RXTO_ENA_MSK); + wr_regl(port, S3C2410_UCON, ucon); + + wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_ALL_FLAGS); + + free_irq(port->irq, ourport); + + ourport->tx_enabled = 0; + ourport->tx_mode = 0; + ourport->rx_enabled = 0; + + if (ourport->dma) + s3c24xx_serial_release_dma(ourport); + + ourport->tx_in_progress = 0; +} + +static int s3c24xx_serial_startup(struct uart_port *port) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + int ret; + + ourport->rx_enabled = 1; + + ret = request_irq(ourport->rx_irq, s3c24xx_serial_rx_irq, 0, + s3c24xx_serial_portname(port), ourport); + + if (ret != 0) { + dev_err(port->dev, "cannot get irq %d\n", ourport->rx_irq); + return ret; + } + + ourport->rx_claimed = 1; + + dev_dbg(port->dev, "requesting tx irq...\n"); + + ourport->tx_enabled = 1; + + ret = request_irq(ourport->tx_irq, s3c24xx_serial_tx_irq, 0, + s3c24xx_serial_portname(port), ourport); + + if (ret) { + dev_err(port->dev, "cannot get irq %d\n", ourport->tx_irq); + goto err; + } + + ourport->tx_claimed = 1; + + /* the port reset code should have done the correct + * register setup for the port controls + */ + + return ret; + +err: + s3c24xx_serial_shutdown(port); + return ret; +} + +static int s3c64xx_serial_startup(struct uart_port *port) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + unsigned long flags; + unsigned int ufcon; + int ret; + + wr_regl(port, S3C64XX_UINTM, 0xf); + if (ourport->dma) { + ret = s3c24xx_serial_request_dma(ourport); + if (ret < 0) { + devm_kfree(port->dev, ourport->dma); + ourport->dma = NULL; + } + } + + ret = request_irq(port->irq, s3c64xx_serial_handle_irq, IRQF_SHARED, + s3c24xx_serial_portname(port), ourport); + if (ret) { + dev_err(port->dev, "cannot get irq %d\n", port->irq); + return ret; + } + + /* For compatibility with s3c24xx Soc's */ + ourport->rx_enabled = 1; + ourport->tx_enabled = 0; + + spin_lock_irqsave(&port->lock, flags); + + ufcon = rd_regl(port, S3C2410_UFCON); + ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8; + if (!uart_console(port)) + ufcon |= S3C2410_UFCON_RESETTX; + wr_regl(port, S3C2410_UFCON, ufcon); + + enable_rx_pio(ourport); + + spin_unlock_irqrestore(&port->lock, flags); + + /* Enable Rx Interrupt */ + s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM); + + return ret; +} + +static int apple_s5l_serial_startup(struct uart_port *port) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + unsigned long flags; + unsigned int ufcon; + int ret; + + wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_ALL_FLAGS); + + ret = request_irq(port->irq, apple_serial_handle_irq, 0, + s3c24xx_serial_portname(port), ourport); + if (ret) { + dev_err(port->dev, "cannot get irq %d\n", port->irq); + return ret; + } + + /* For compatibility with s3c24xx Soc's */ + ourport->rx_enabled = 1; + ourport->tx_enabled = 0; + + spin_lock_irqsave(&port->lock, flags); + + ufcon = rd_regl(port, S3C2410_UFCON); + ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8; + if (!uart_console(port)) + ufcon |= S3C2410_UFCON_RESETTX; + wr_regl(port, S3C2410_UFCON, ufcon); + + enable_rx_pio(ourport); + + spin_unlock_irqrestore(&port->lock, flags); + + /* Enable Rx Interrupt */ + s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON); + s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTO_ENA, S3C2410_UCON); + + return ret; +} + +/* power power management control */ + +static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level, + unsigned int old) +{ + struct s3c24xx_uart_port *ourport = to_ourport(port); + int timeout = 10000; + + ourport->pm_level = level; + + switch (level) { + case 3: + while (--timeout && !s3c24xx_serial_txempty_nofifo(port)) + udelay(100); + + if (!IS_ERR(ourport->baudclk)) + clk_disable_unprepare(ourport->baudclk); + + clk_disable_unprepare(ourport->clk); + break; + + case 0: + clk_prepare_enable(ourport->clk); + + if (!IS_ERR(ourport->baudclk)) + clk_prepare_enable(ourport->baudclk); + break; + default: + dev_err(port->dev, "s3c24xx_serial: unknown pm %d\n", level); + } +} + +/* baud rate calculation + * + * The UARTs on the S3C2410/S3C2440 can take their clocks from a number + * of different sources, including the peripheral clock ("pclk") and an + * external clock ("uclk"). The S3C2440 also adds the core clock ("fclk") + * with a programmable extra divisor. + * + * The following code goes through the clock sources, and calculates the + * baud clocks (and the resultant actual baud rates) and then tries to + * pick the closest one and select that. + * + */ + +#define MAX_CLK_NAME_LENGTH 15 + +static inline int s3c24xx_serial_getsource(struct uart_port *port) +{ + const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); + unsigned int ucon; + + if (info->num_clks == 1) + return 0; + + ucon = rd_regl(port, S3C2410_UCON); + ucon &= info->clksel_mask; + return ucon >> info->clksel_shift; +} + +static void s3c24xx_serial_setsource(struct uart_port *port, + unsigned int clk_sel) +{ + const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); + unsigned int ucon; + + if (info->num_clks == 1) + return; + + ucon = rd_regl(port, S3C2410_UCON); + if ((ucon & info->clksel_mask) >> info->clksel_shift == clk_sel) + return; + + ucon &= ~info->clksel_mask; + ucon |= clk_sel << info->clksel_shift; + wr_regl(port, S3C2410_UCON, ucon); +} + +static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport, + unsigned int req_baud, struct clk **best_clk, + unsigned int *clk_num) +{ + const struct s3c24xx_uart_info *info = ourport->info; + struct clk *clk; + unsigned long rate; + unsigned int cnt, baud, quot, best_quot = 0; + char clkname[MAX_CLK_NAME_LENGTH]; + int calc_deviation, deviation = (1 << 30) - 1; + + for (cnt = 0; cnt < info->num_clks; cnt++) { + /* Keep selected clock if provided */ + if (ourport->cfg->clk_sel && + !(ourport->cfg->clk_sel & (1 << cnt))) + continue; + + sprintf(clkname, "clk_uart_baud%d", cnt); + clk = clk_get(ourport->port.dev, clkname); + if (IS_ERR(clk)) + continue; + + rate = clk_get_rate(clk); + if (!rate) { + dev_err(ourport->port.dev, + "Failed to get clock rate for %s.\n", clkname); + clk_put(clk); + continue; + } + + if (ourport->info->has_divslot) { + unsigned long div = rate / req_baud; + + /* The UDIVSLOT register on the newer UARTs allows us to + * get a divisor adjustment of 1/16th on the baud clock. + * + * We don't keep the UDIVSLOT value (the 16ths we + * calculated by not multiplying the baud by 16) as it + * is easy enough to recalculate. + */ + + quot = div / 16; + baud = rate / div; + } else { + quot = (rate + (8 * req_baud)) / (16 * req_baud); + baud = rate / (quot * 16); + } + quot--; + + calc_deviation = req_baud - baud; + if (calc_deviation < 0) + calc_deviation = -calc_deviation; + + if (calc_deviation < deviation) { + /* + * If we find a better clk, release the previous one, if + * any. + */ + if (!IS_ERR(*best_clk)) + clk_put(*best_clk); + *best_clk = clk; + best_quot = quot; + *clk_num = cnt; + deviation = calc_deviation; + } else { + clk_put(clk); + } + } + + return best_quot; +} + +/* udivslot_table[] + * + * This table takes the fractional value of the baud divisor and gives + * the recommended setting for the UDIVSLOT register. + */ +static const u16 udivslot_table[16] = { + [0] = 0x0000, + [1] = 0x0080, + [2] = 0x0808, + [3] = 0x0888, + [4] = 0x2222, + [5] = 0x4924, + [6] = 0x4A52, + [7] = 0x54AA, + [8] = 0x5555, + [9] = 0xD555, + [10] = 0xD5D5, + [11] = 0xDDD5, + [12] = 0xDDDD, + [13] = 0xDFDD, + [14] = 0xDFDF, + [15] = 0xFFDF, +}; + +static void s3c24xx_serial_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + const struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port); + struct s3c24xx_uart_port *ourport = to_ourport(port); + struct clk *clk = ERR_PTR(-EINVAL); + unsigned long flags; + unsigned int baud, quot, clk_sel = 0; + unsigned int ulcon; + unsigned int umcon; + unsigned int udivslot = 0; + + /* + * We don't support modem control lines. + */ + termios->c_cflag &= ~(HUPCL | CMSPAR); + termios->c_cflag |= CLOCAL; + + /* + * Ask the core to calculate the divisor for us. + */ + + baud = uart_get_baud_rate(port, termios, old, 0, 3000000); + quot = s3c24xx_serial_getclk(ourport, baud, &clk, &clk_sel); + if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) + quot = port->custom_divisor; + if (IS_ERR(clk)) + return; + + /* check to see if we need to change clock source */ + + if (ourport->baudclk != clk) { + clk_prepare_enable(clk); + + s3c24xx_serial_setsource(port, clk_sel); + + if (!IS_ERR(ourport->baudclk)) { + clk_disable_unprepare(ourport->baudclk); + ourport->baudclk = ERR_PTR(-EINVAL); + } + + ourport->baudclk = clk; + ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0; + } + + if (ourport->info->has_divslot) { + unsigned int div = ourport->baudclk_rate / baud; + + if (cfg->has_fracval) { + udivslot = (div & 15); + dev_dbg(port->dev, "fracval = %04x\n", udivslot); + } else { + udivslot = udivslot_table[div & 15]; + dev_dbg(port->dev, "udivslot = %04x (div %d)\n", + udivslot, div & 15); + } + } + + switch (termios->c_cflag & CSIZE) { + case CS5: + dev_dbg(port->dev, "config: 5bits/char\n"); + ulcon = S3C2410_LCON_CS5; + break; + case CS6: + dev_dbg(port->dev, "config: 6bits/char\n"); + ulcon = S3C2410_LCON_CS6; + break; + case CS7: + dev_dbg(port->dev, "config: 7bits/char\n"); + ulcon = S3C2410_LCON_CS7; + break; + case CS8: + default: + dev_dbg(port->dev, "config: 8bits/char\n"); + ulcon = S3C2410_LCON_CS8; + break; + } + + /* preserve original lcon IR settings */ + ulcon |= (cfg->ulcon & S3C2410_LCON_IRM); + + if (termios->c_cflag & CSTOPB) + ulcon |= S3C2410_LCON_STOPB; + + if (termios->c_cflag & PARENB) { + if (termios->c_cflag & PARODD) + ulcon |= S3C2410_LCON_PODD; + else + ulcon |= S3C2410_LCON_PEVEN; + } else { + ulcon |= S3C2410_LCON_PNONE; + } + + spin_lock_irqsave(&port->lock, flags); + + dev_dbg(port->dev, + "setting ulcon to %08x, brddiv to %d, udivslot %08x\n", + ulcon, quot, udivslot); + + wr_regl(port, S3C2410_ULCON, ulcon); + wr_regl(port, S3C2410_UBRDIV, quot); + + port->status &= ~UPSTAT_AUTOCTS; + + umcon = rd_regl(port, S3C2410_UMCON); + if (termios->c_cflag & CRTSCTS) { + umcon |= S3C2410_UMCOM_AFC; + /* Disable RTS when RX FIFO contains 63 bytes */ + umcon &= ~S3C2412_UMCON_AFC_8; + port->status = UPSTAT_AUTOCTS; + } else { + umcon &= ~S3C2410_UMCOM_AFC; + } + wr_regl(port, S3C2410_UMCON, umcon); + + if (ourport->info->has_divslot) + wr_regl(port, S3C2443_DIVSLOT, udivslot); + + dev_dbg(port->dev, + "uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n", + rd_regl(port, S3C2410_ULCON), + rd_regl(port, S3C2410_UCON), + rd_regl(port, S3C2410_UFCON)); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* + * Which character status flags are we interested in? + */ + port->read_status_mask = S3C2410_UERSTAT_OVERRUN; + if (termios->c_iflag & INPCK) + port->read_status_mask |= S3C2410_UERSTAT_FRAME | + S3C2410_UERSTAT_PARITY; + /* + * Which character status flags should we ignore? + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= S3C2410_UERSTAT_OVERRUN; + if (termios->c_iflag & IGNBRK && termios->c_iflag & IGNPAR) + port->ignore_status_mask |= S3C2410_UERSTAT_FRAME; + + /* + * Ignore all characters if CREAD is not set. + */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= RXSTAT_DUMMY_READ; + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *s3c24xx_serial_type(struct uart_port *port) +{ + const struct s3c24xx_uart_port *ourport = to_ourport(port); + + switch (ourport->info->type) { + case TYPE_S3C24XX: + return "S3C24XX"; + case TYPE_S3C6400: + return "S3C6400/10"; + case TYPE_APPLE_S5L: + return "APPLE S5L"; + default: + return NULL; + } +} + +static void s3c24xx_serial_config_port(struct uart_port *port, int flags) +{ + const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); + + if (flags & UART_CONFIG_TYPE) + port->type = info->port_type; +} + +/* + * verify the new serial_struct (for TIOCSSERIAL). + */ +static int +s3c24xx_serial_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); + + if (ser->type != PORT_UNKNOWN && ser->type != info->port_type) + return -EINVAL; + + return 0; +} + +#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE + +static struct console s3c24xx_serial_console; + +static void __init s3c24xx_serial_register_console(void) +{ + register_console(&s3c24xx_serial_console); +} + +static void s3c24xx_serial_unregister_console(void) +{ + if (s3c24xx_serial_console.flags & CON_ENABLED) + unregister_console(&s3c24xx_serial_console); +} + +#define S3C24XX_SERIAL_CONSOLE &s3c24xx_serial_console +#else +static inline void s3c24xx_serial_register_console(void) { } +static inline void s3c24xx_serial_unregister_console(void) { } +#define S3C24XX_SERIAL_CONSOLE NULL +#endif + +#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_CONSOLE_POLL) +static int s3c24xx_serial_get_poll_char(struct uart_port *port); +static void s3c24xx_serial_put_poll_char(struct uart_port *port, + unsigned char c); +#endif + +static const struct uart_ops s3c24xx_serial_ops = { + .pm = s3c24xx_serial_pm, + .tx_empty = s3c24xx_serial_tx_empty, + .get_mctrl = s3c24xx_serial_get_mctrl, + .set_mctrl = s3c24xx_serial_set_mctrl, + .stop_tx = s3c24xx_serial_stop_tx, + .start_tx = s3c24xx_serial_start_tx, + .stop_rx = s3c24xx_serial_stop_rx, + .break_ctl = s3c24xx_serial_break_ctl, + .startup = s3c24xx_serial_startup, + .shutdown = s3c24xx_serial_shutdown, + .set_termios = s3c24xx_serial_set_termios, + .type = s3c24xx_serial_type, + .config_port = s3c24xx_serial_config_port, + .verify_port = s3c24xx_serial_verify_port, +#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_CONSOLE_POLL) + .poll_get_char = s3c24xx_serial_get_poll_char, + .poll_put_char = s3c24xx_serial_put_poll_char, +#endif +}; + +static const struct uart_ops s3c64xx_serial_ops = { + .pm = s3c24xx_serial_pm, + .tx_empty = s3c24xx_serial_tx_empty, + .get_mctrl = s3c24xx_serial_get_mctrl, + .set_mctrl = s3c24xx_serial_set_mctrl, + .stop_tx = s3c24xx_serial_stop_tx, + .start_tx = s3c24xx_serial_start_tx, + .stop_rx = s3c24xx_serial_stop_rx, + .break_ctl = s3c24xx_serial_break_ctl, + .startup = s3c64xx_serial_startup, + .shutdown = s3c64xx_serial_shutdown, + .set_termios = s3c24xx_serial_set_termios, + .type = s3c24xx_serial_type, + .config_port = s3c24xx_serial_config_port, + .verify_port = s3c24xx_serial_verify_port, +#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_CONSOLE_POLL) + .poll_get_char = s3c24xx_serial_get_poll_char, + .poll_put_char = s3c24xx_serial_put_poll_char, +#endif +}; + +static const struct uart_ops apple_s5l_serial_ops = { + .pm = s3c24xx_serial_pm, + .tx_empty = s3c24xx_serial_tx_empty, + .get_mctrl = s3c24xx_serial_get_mctrl, + .set_mctrl = s3c24xx_serial_set_mctrl, + .stop_tx = s3c24xx_serial_stop_tx, + .start_tx = s3c24xx_serial_start_tx, + .stop_rx = s3c24xx_serial_stop_rx, + .break_ctl = s3c24xx_serial_break_ctl, + .startup = apple_s5l_serial_startup, + .shutdown = apple_s5l_serial_shutdown, + .set_termios = s3c24xx_serial_set_termios, + .type = s3c24xx_serial_type, + .config_port = s3c24xx_serial_config_port, + .verify_port = s3c24xx_serial_verify_port, +#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_CONSOLE_POLL) + .poll_get_char = s3c24xx_serial_get_poll_char, + .poll_put_char = s3c24xx_serial_put_poll_char, +#endif +}; + +static struct uart_driver s3c24xx_uart_drv = { + .owner = THIS_MODULE, + .driver_name = "s3c2410_serial", + .nr = UART_NR, + .cons = S3C24XX_SERIAL_CONSOLE, + .dev_name = S3C24XX_SERIAL_NAME, + .major = S3C24XX_SERIAL_MAJOR, + .minor = S3C24XX_SERIAL_MINOR, +}; + +static struct s3c24xx_uart_port s3c24xx_serial_ports[UART_NR]; + +static void s3c24xx_serial_init_port_default(int index) { + struct uart_port *port = &s3c24xx_serial_ports[index].port; + + spin_lock_init(&port->lock); + + port->iotype = UPIO_MEM; + port->uartclk = 0; + port->fifosize = 16; + port->ops = &s3c24xx_serial_ops; + port->flags = UPF_BOOT_AUTOCONF; + port->line = index; +} + +/* s3c24xx_serial_resetport + * + * reset the fifos and other the settings. + */ + +static void s3c24xx_serial_resetport(struct uart_port *port, + const struct s3c2410_uartcfg *cfg) +{ + const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); + unsigned long ucon = rd_regl(port, S3C2410_UCON); + + ucon &= (info->clksel_mask | info->ucon_mask); + wr_regl(port, S3C2410_UCON, ucon | cfg->ucon); + + /* reset both fifos */ + wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH); + wr_regl(port, S3C2410_UFCON, cfg->ufcon); + + /* some delay is required after fifo reset */ + udelay(1); +} + +#ifdef CONFIG_ARM_S3C24XX_CPUFREQ + +static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct s3c24xx_uart_port *port; + struct uart_port *uport; + + port = container_of(nb, struct s3c24xx_uart_port, freq_transition); + uport = &port->port; + + /* check to see if port is enabled */ + + if (port->pm_level != 0) + return 0; + + /* try and work out if the baudrate is changing, we can detect + * a change in rate, but we do not have support for detecting + * a disturbance in the clock-rate over the change. + */ + + if (IS_ERR(port->baudclk)) + goto exit; + + if (port->baudclk_rate == clk_get_rate(port->baudclk)) + goto exit; + + if (val == CPUFREQ_PRECHANGE) { + /* we should really shut the port down whilst the + * frequency change is in progress. + */ + + } else if (val == CPUFREQ_POSTCHANGE) { + struct ktermios *termios; + struct tty_struct *tty; + + if (uport->state == NULL) + goto exit; + + tty = uport->state->port.tty; + + if (tty == NULL) + goto exit; + + termios = &tty->termios; + + if (termios == NULL) { + dev_warn(uport->dev, "%s: no termios?\n", __func__); + goto exit; + } + + s3c24xx_serial_set_termios(uport, termios, NULL); + } + +exit: + return 0; +} + +static inline int +s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port) +{ + port->freq_transition.notifier_call = s3c24xx_serial_cpufreq_transition; + + return cpufreq_register_notifier(&port->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} + +static inline void +s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port) +{ + cpufreq_unregister_notifier(&port->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} + +#else +static inline int +s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port) +{ + return 0; +} + +static inline void +s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port) +{ +} +#endif + +static int s3c24xx_serial_enable_baudclk(struct s3c24xx_uart_port *ourport) +{ + struct device *dev = ourport->port.dev; + const struct s3c24xx_uart_info *info = ourport->info; + char clk_name[MAX_CLK_NAME_LENGTH]; + unsigned int clk_sel; + struct clk *clk; + int clk_num; + int ret; + + clk_sel = ourport->cfg->clk_sel ? : info->def_clk_sel; + for (clk_num = 0; clk_num < info->num_clks; clk_num++) { + if (!(clk_sel & (1 << clk_num))) + continue; + + sprintf(clk_name, "clk_uart_baud%d", clk_num); + clk = clk_get(dev, clk_name); + if (IS_ERR(clk)) + continue; + + ret = clk_prepare_enable(clk); + if (ret) { + clk_put(clk); + continue; + } + + ourport->baudclk = clk; + ourport->baudclk_rate = clk_get_rate(clk); + s3c24xx_serial_setsource(&ourport->port, clk_num); + + return 0; + } + + return -EINVAL; +} + +/* s3c24xx_serial_init_port + * + * initialise a single serial port from the platform device given + */ + +static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, + struct platform_device *platdev) +{ + struct uart_port *port = &ourport->port; + const struct s3c2410_uartcfg *cfg = ourport->cfg; + struct resource *res; + int ret; + + if (platdev == NULL) + return -ENODEV; + + if (port->mapbase != 0) + return -EINVAL; + + /* setup info for port */ + port->dev = &platdev->dev; + + port->uartclk = 1; + + if (cfg->uart_flags & UPF_CONS_FLOW) { + dev_dbg(port->dev, "enabling flow control\n"); + port->flags |= UPF_CONS_FLOW; + } + + /* sort our the physical and virtual addresses for each UART */ + + res = platform_get_resource(platdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(port->dev, "failed to find memory resource for uart\n"); + return -EINVAL; + } + + dev_dbg(port->dev, "resource %pR)\n", res); + + port->membase = devm_ioremap_resource(port->dev, res); + if (IS_ERR(port->membase)) { + dev_err(port->dev, "failed to remap controller address\n"); + return -EBUSY; + } + + port->mapbase = res->start; + ret = platform_get_irq(platdev, 0); + if (ret < 0) { + port->irq = 0; + } else { + port->irq = ret; + ourport->rx_irq = ret; + ourport->tx_irq = ret + 1; + } + + switch (ourport->info->type) { + case TYPE_S3C24XX: + ret = platform_get_irq(platdev, 1); + if (ret > 0) + ourport->tx_irq = ret; + break; + default: + break; + } + + /* + * DMA is currently supported only on DT platforms, if DMA properties + * are specified. + */ + if (platdev->dev.of_node && of_find_property(platdev->dev.of_node, + "dmas", NULL)) { + ourport->dma = devm_kzalloc(port->dev, + sizeof(*ourport->dma), + GFP_KERNEL); + if (!ourport->dma) { + ret = -ENOMEM; + goto err; + } + } + + ourport->clk = clk_get(&platdev->dev, "uart"); + if (IS_ERR(ourport->clk)) { + pr_err("%s: Controller clock not found\n", + dev_name(&platdev->dev)); + ret = PTR_ERR(ourport->clk); + goto err; + } + + ret = clk_prepare_enable(ourport->clk); + if (ret) { + pr_err("uart: clock failed to prepare+enable: %d\n", ret); + clk_put(ourport->clk); + goto err; + } + + ret = s3c24xx_serial_enable_baudclk(ourport); + if (ret) + pr_warn("uart: failed to enable baudclk\n"); + + /* Keep all interrupts masked and cleared */ + switch (ourport->info->type) { + case TYPE_S3C6400: + wr_regl(port, S3C64XX_UINTM, 0xf); + wr_regl(port, S3C64XX_UINTP, 0xf); + wr_regl(port, S3C64XX_UINTSP, 0xf); + break; + case TYPE_APPLE_S5L: { + unsigned int ucon; + + ucon = rd_regl(port, S3C2410_UCON); + ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK | + APPLE_S5L_UCON_RXTHRESH_ENA_MSK | + APPLE_S5L_UCON_RXTO_ENA_MSK); + wr_regl(port, S3C2410_UCON, ucon); + + wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_ALL_FLAGS); + break; + } + default: + break; + } + + dev_dbg(port->dev, "port: map=%pa, mem=%p, irq=%d (%d,%d), clock=%u\n", + &port->mapbase, port->membase, port->irq, + ourport->rx_irq, ourport->tx_irq, port->uartclk); + + /* reset the fifos (and setup the uart) */ + s3c24xx_serial_resetport(port, cfg); + + return 0; + +err: + port->mapbase = 0; + return ret; +} + +/* Device driver serial port probe */ + +static int probe_index; + +static inline const struct s3c24xx_serial_drv_data * +s3c24xx_get_driver_data(struct platform_device *pdev) +{ + if (dev_of_node(&pdev->dev)) + return of_device_get_match_data(&pdev->dev); + + return (struct s3c24xx_serial_drv_data *) + platform_get_device_id(pdev)->driver_data; +} + +static int s3c24xx_serial_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct s3c24xx_uart_port *ourport; + int index = probe_index; + int ret, prop = 0; + + if (np) { + ret = of_alias_get_id(np, "serial"); + if (ret >= 0) + index = ret; + } + + if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", index); + return -EINVAL; + } + ourport = &s3c24xx_serial_ports[index]; + + s3c24xx_serial_init_port_default(index); + + ourport->drv_data = s3c24xx_get_driver_data(pdev); + if (!ourport->drv_data) { + dev_err(&pdev->dev, "could not find driver data\n"); + return -ENODEV; + } + + ourport->baudclk = ERR_PTR(-EINVAL); + ourport->info = &ourport->drv_data->info; + ourport->cfg = (dev_get_platdata(&pdev->dev)) ? + dev_get_platdata(&pdev->dev) : + &ourport->drv_data->def_cfg; + + switch (ourport->info->type) { + case TYPE_S3C24XX: + ourport->port.ops = &s3c24xx_serial_ops; + break; + case TYPE_S3C6400: + ourport->port.ops = &s3c64xx_serial_ops; + break; + case TYPE_APPLE_S5L: + ourport->port.ops = &apple_s5l_serial_ops; + break; + } + + if (np) { + of_property_read_u32(np, + "samsung,uart-fifosize", &ourport->port.fifosize); + + if (of_property_read_u32(np, "reg-io-width", &prop) == 0) { + switch (prop) { + case 1: + ourport->port.iotype = UPIO_MEM; + break; + case 4: + ourport->port.iotype = UPIO_MEM32; + break; + default: + dev_warn(&pdev->dev, "unsupported reg-io-width (%d)\n", + prop); + return -EINVAL; + } + } + } + + if (ourport->drv_data->fifosize[index]) + ourport->port.fifosize = ourport->drv_data->fifosize[index]; + else if (ourport->info->fifosize) + ourport->port.fifosize = ourport->info->fifosize; + ourport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SAMSUNG_CONSOLE); + + /* + * DMA transfers must be aligned at least to cache line size, + * so find minimal transfer size suitable for DMA mode + */ + ourport->min_dma_size = max_t(int, ourport->port.fifosize, + dma_get_cache_alignment()); + + dev_dbg(&pdev->dev, "%s: initialising port %p...\n", __func__, ourport); + + ret = s3c24xx_serial_init_port(ourport, pdev); + if (ret < 0) + return ret; + + if (!s3c24xx_uart_drv.state) { + ret = uart_register_driver(&s3c24xx_uart_drv); + if (ret < 0) { + pr_err("Failed to register Samsung UART driver\n"); + return ret; + } + } + + dev_dbg(&pdev->dev, "%s: adding port\n", __func__); + uart_add_one_port(&s3c24xx_uart_drv, &ourport->port); + platform_set_drvdata(pdev, &ourport->port); + + /* + * Deactivate the clock enabled in s3c24xx_serial_init_port here, + * so that a potential re-enablement through the pm-callback overlaps + * and keeps the clock enabled in this case. + */ + clk_disable_unprepare(ourport->clk); + if (!IS_ERR(ourport->baudclk)) + clk_disable_unprepare(ourport->baudclk); + + ret = s3c24xx_serial_cpufreq_register(ourport); + if (ret < 0) + dev_err(&pdev->dev, "failed to add cpufreq notifier\n"); + + probe_index++; + + return 0; +} + +static int s3c24xx_serial_remove(struct platform_device *dev) +{ + struct uart_port *port = s3c24xx_dev_to_port(&dev->dev); + + if (port) { + s3c24xx_serial_cpufreq_deregister(to_ourport(port)); + uart_remove_one_port(&s3c24xx_uart_drv, port); + } + + uart_unregister_driver(&s3c24xx_uart_drv); + + return 0; +} + +/* UART power management code */ +#ifdef CONFIG_PM_SLEEP +static int s3c24xx_serial_suspend(struct device *dev) +{ + struct uart_port *port = s3c24xx_dev_to_port(dev); + + if (port) + uart_suspend_port(&s3c24xx_uart_drv, port); + + return 0; +} + +static int s3c24xx_serial_resume(struct device *dev) +{ + struct uart_port *port = s3c24xx_dev_to_port(dev); + struct s3c24xx_uart_port *ourport = to_ourport(port); + + if (port) { + clk_prepare_enable(ourport->clk); + if (!IS_ERR(ourport->baudclk)) + clk_prepare_enable(ourport->baudclk); + s3c24xx_serial_resetport(port, s3c24xx_port_to_cfg(port)); + if (!IS_ERR(ourport->baudclk)) + clk_disable_unprepare(ourport->baudclk); + clk_disable_unprepare(ourport->clk); + + uart_resume_port(&s3c24xx_uart_drv, port); + } + + return 0; +} + +static int s3c24xx_serial_resume_noirq(struct device *dev) +{ + struct uart_port *port = s3c24xx_dev_to_port(dev); + struct s3c24xx_uart_port *ourport = to_ourport(port); + + if (port) { + /* restore IRQ mask */ + switch (ourport->info->type) { + case TYPE_S3C6400: { + unsigned int uintm = 0xf; + + if (ourport->tx_enabled) + uintm &= ~S3C64XX_UINTM_TXD_MSK; + if (ourport->rx_enabled) + uintm &= ~S3C64XX_UINTM_RXD_MSK; + clk_prepare_enable(ourport->clk); + if (!IS_ERR(ourport->baudclk)) + clk_prepare_enable(ourport->baudclk); + wr_regl(port, S3C64XX_UINTM, uintm); + if (!IS_ERR(ourport->baudclk)) + clk_disable_unprepare(ourport->baudclk); + clk_disable_unprepare(ourport->clk); + break; + } + case TYPE_APPLE_S5L: { + unsigned int ucon; + int ret; + + ret = clk_prepare_enable(ourport->clk); + if (ret) { + dev_err(dev, "clk_enable clk failed: %d\n", ret); + return ret; + } + if (!IS_ERR(ourport->baudclk)) { + ret = clk_prepare_enable(ourport->baudclk); + if (ret) { + dev_err(dev, "clk_enable baudclk failed: %d\n", ret); + clk_disable_unprepare(ourport->clk); + return ret; + } + } + + ucon = rd_regl(port, S3C2410_UCON); + + ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK | + APPLE_S5L_UCON_RXTHRESH_ENA_MSK | + APPLE_S5L_UCON_RXTO_ENA_MSK); + + if (ourport->tx_enabled) + ucon |= APPLE_S5L_UCON_TXTHRESH_ENA_MSK; + if (ourport->rx_enabled) + ucon |= APPLE_S5L_UCON_RXTHRESH_ENA_MSK | + APPLE_S5L_UCON_RXTO_ENA_MSK; + + wr_regl(port, S3C2410_UCON, ucon); + + if (!IS_ERR(ourport->baudclk)) + clk_disable_unprepare(ourport->baudclk); + clk_disable_unprepare(ourport->clk); + break; + } + default: + break; + } + } + + return 0; +} + +static const struct dev_pm_ops s3c24xx_serial_pm_ops = { + .suspend = s3c24xx_serial_suspend, + .resume = s3c24xx_serial_resume, + .resume_noirq = s3c24xx_serial_resume_noirq, +}; +#define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops) + +#else /* !CONFIG_PM_SLEEP */ + +#define SERIAL_SAMSUNG_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + +/* Console code */ + +#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE + +static struct uart_port *cons_uart; + +static int +s3c24xx_serial_console_txrdy(struct uart_port *port, unsigned int ufcon) +{ + const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port); + unsigned long ufstat, utrstat; + + if (ufcon & S3C2410_UFCON_FIFOMODE) { + /* fifo mode - check amount of data in fifo registers... */ + + ufstat = rd_regl(port, S3C2410_UFSTAT); + return (ufstat & info->tx_fifofull) ? 0 : 1; + } + + /* in non-fifo mode, we go and use the tx buffer empty */ + + utrstat = rd_regl(port, S3C2410_UTRSTAT); + return (utrstat & S3C2410_UTRSTAT_TXE) ? 1 : 0; +} + +static bool +s3c24xx_port_configured(unsigned int ucon) +{ + /* consider the serial port configured if the tx/rx mode set */ + return (ucon & 0xf) != 0; +} + +#ifdef CONFIG_CONSOLE_POLL +/* + * Console polling routines for writing and reading from the uart while + * in an interrupt or debug context. + */ + +static int s3c24xx_serial_get_poll_char(struct uart_port *port) +{ + const struct s3c24xx_uart_port *ourport = to_ourport(port); + unsigned int ufstat; + + ufstat = rd_regl(port, S3C2410_UFSTAT); + if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0) + return NO_POLL_CHAR; + + return rd_reg(port, S3C2410_URXH); +} + +static void s3c24xx_serial_put_poll_char(struct uart_port *port, + unsigned char c) +{ + unsigned int ufcon = rd_regl(port, S3C2410_UFCON); + unsigned int ucon = rd_regl(port, S3C2410_UCON); + + /* not possible to xmit on unconfigured port */ + if (!s3c24xx_port_configured(ucon)) + return; + + while (!s3c24xx_serial_console_txrdy(port, ufcon)) + cpu_relax(); + wr_reg(port, S3C2410_UTXH, c); +} + +#endif /* CONFIG_CONSOLE_POLL */ + +static void +s3c24xx_serial_console_putchar(struct uart_port *port, unsigned char ch) +{ + unsigned int ufcon = rd_regl(port, S3C2410_UFCON); + + while (!s3c24xx_serial_console_txrdy(port, ufcon)) + cpu_relax(); + wr_reg(port, S3C2410_UTXH, ch); +} + +static void +s3c24xx_serial_console_write(struct console *co, const char *s, + unsigned int count) +{ + unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON); + unsigned long flags; + bool locked = true; + + /* not possible to xmit on unconfigured port */ + if (!s3c24xx_port_configured(ucon)) + return; + + if (cons_uart->sysrq) + locked = false; + else if (oops_in_progress) + locked = spin_trylock_irqsave(&cons_uart->lock, flags); + else + spin_lock_irqsave(&cons_uart->lock, flags); + + uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar); + + if (locked) + spin_unlock_irqrestore(&cons_uart->lock, flags); +} + +/* Shouldn't be __init, as it can be instantiated from other module */ +static void +s3c24xx_serial_get_options(struct uart_port *port, int *baud, + int *parity, int *bits) +{ + struct clk *clk; + unsigned int ulcon; + unsigned int ucon; + unsigned int ubrdiv; + unsigned long rate; + unsigned int clk_sel; + char clk_name[MAX_CLK_NAME_LENGTH]; + + ulcon = rd_regl(port, S3C2410_ULCON); + ucon = rd_regl(port, S3C2410_UCON); + ubrdiv = rd_regl(port, S3C2410_UBRDIV); + + if (s3c24xx_port_configured(ucon)) { + switch (ulcon & S3C2410_LCON_CSMASK) { + case S3C2410_LCON_CS5: + *bits = 5; + break; + case S3C2410_LCON_CS6: + *bits = 6; + break; + case S3C2410_LCON_CS7: + *bits = 7; + break; + case S3C2410_LCON_CS8: + default: + *bits = 8; + break; + } + + switch (ulcon & S3C2410_LCON_PMASK) { + case S3C2410_LCON_PEVEN: + *parity = 'e'; + break; + + case S3C2410_LCON_PODD: + *parity = 'o'; + break; + + case S3C2410_LCON_PNONE: + default: + *parity = 'n'; + } + + /* now calculate the baud rate */ + + clk_sel = s3c24xx_serial_getsource(port); + sprintf(clk_name, "clk_uart_baud%d", clk_sel); + + clk = clk_get(port->dev, clk_name); + if (!IS_ERR(clk)) + rate = clk_get_rate(clk); + else + rate = 1; + + *baud = rate / (16 * (ubrdiv + 1)); + dev_dbg(port->dev, "calculated baud %d\n", *baud); + } +} + +/* Shouldn't be __init, as it can be instantiated from other module */ +static int +s3c24xx_serial_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + /* is this a valid port */ + + if (co->index == -1 || co->index >= UART_NR) + co->index = 0; + + port = &s3c24xx_serial_ports[co->index].port; + + /* is the port configured? */ + + if (port->mapbase == 0x0) + return -ENODEV; + + cons_uart = port; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + s3c24xx_serial_get_options(port, &baud, &parity, &bits); + + dev_dbg(port->dev, "baud %d\n", baud); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct console s3c24xx_serial_console = { + .name = S3C24XX_SERIAL_NAME, + .device = uart_console_device, + .flags = CON_PRINTBUFFER, + .index = -1, + .write = s3c24xx_serial_console_write, + .setup = s3c24xx_serial_console_setup, + .data = &s3c24xx_uart_drv, +}; +#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */ + +#ifdef CONFIG_CPU_S3C2410 +static const struct s3c24xx_serial_drv_data s3c2410_serial_drv_data = { + .info = { + .name = "Samsung S3C2410 UART", + .type = TYPE_S3C24XX, + .port_type = PORT_S3C2410, + .fifosize = 16, + .rx_fifomask = S3C2410_UFSTAT_RXMASK, + .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT, + .rx_fifofull = S3C2410_UFSTAT_RXFULL, + .tx_fifofull = S3C2410_UFSTAT_TXFULL, + .tx_fifomask = S3C2410_UFSTAT_TXMASK, + .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT, + .def_clk_sel = S3C2410_UCON_CLKSEL0, + .num_clks = 2, + .clksel_mask = S3C2410_UCON_CLKMASK, + .clksel_shift = S3C2410_UCON_CLKSHIFT, + }, + .def_cfg = { + .ucon = S3C2410_UCON_DEFAULT, + .ufcon = S3C2410_UFCON_DEFAULT, + }, +}; +#define S3C2410_SERIAL_DRV_DATA (&s3c2410_serial_drv_data) +#else +#define S3C2410_SERIAL_DRV_DATA NULL +#endif + +#ifdef CONFIG_CPU_S3C2412 +static const struct s3c24xx_serial_drv_data s3c2412_serial_drv_data = { + .info = { + .name = "Samsung S3C2412 UART", + .type = TYPE_S3C24XX, + .port_type = PORT_S3C2412, + .fifosize = 64, + .has_divslot = 1, + .rx_fifomask = S3C2440_UFSTAT_RXMASK, + .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT, + .rx_fifofull = S3C2440_UFSTAT_RXFULL, + .tx_fifofull = S3C2440_UFSTAT_TXFULL, + .tx_fifomask = S3C2440_UFSTAT_TXMASK, + .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT, + .def_clk_sel = S3C2410_UCON_CLKSEL2, + .num_clks = 4, + .clksel_mask = S3C2412_UCON_CLKMASK, + .clksel_shift = S3C2412_UCON_CLKSHIFT, + }, + .def_cfg = { + .ucon = S3C2410_UCON_DEFAULT, + .ufcon = S3C2410_UFCON_DEFAULT, + }, +}; +#define S3C2412_SERIAL_DRV_DATA (&s3c2412_serial_drv_data) +#else +#define S3C2412_SERIAL_DRV_DATA NULL +#endif + +#if defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2416) || \ + defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2442) +static const struct s3c24xx_serial_drv_data s3c2440_serial_drv_data = { + .info = { + .name = "Samsung S3C2440 UART", + .type = TYPE_S3C24XX, + .port_type = PORT_S3C2440, + .fifosize = 64, + .has_divslot = 1, + .rx_fifomask = S3C2440_UFSTAT_RXMASK, + .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT, + .rx_fifofull = S3C2440_UFSTAT_RXFULL, + .tx_fifofull = S3C2440_UFSTAT_TXFULL, + .tx_fifomask = S3C2440_UFSTAT_TXMASK, + .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT, + .def_clk_sel = S3C2410_UCON_CLKSEL2, + .num_clks = 4, + .clksel_mask = S3C2412_UCON_CLKMASK, + .clksel_shift = S3C2412_UCON_CLKSHIFT, + .ucon_mask = S3C2440_UCON0_DIVMASK, + }, + .def_cfg = { + .ucon = S3C2410_UCON_DEFAULT, + .ufcon = S3C2410_UFCON_DEFAULT, + }, +}; +#define S3C2440_SERIAL_DRV_DATA (&s3c2440_serial_drv_data) +#else +#define S3C2440_SERIAL_DRV_DATA NULL +#endif + +#if defined(CONFIG_CPU_S3C6400) || defined(CONFIG_CPU_S3C6410) +static const struct s3c24xx_serial_drv_data s3c6400_serial_drv_data = { + .info = { + .name = "Samsung S3C6400 UART", + .type = TYPE_S3C6400, + .port_type = PORT_S3C6400, + .fifosize = 64, + .has_divslot = 1, + .rx_fifomask = S3C2440_UFSTAT_RXMASK, + .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT, + .rx_fifofull = S3C2440_UFSTAT_RXFULL, + .tx_fifofull = S3C2440_UFSTAT_TXFULL, + .tx_fifomask = S3C2440_UFSTAT_TXMASK, + .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT, + .def_clk_sel = S3C2410_UCON_CLKSEL2, + .num_clks = 4, + .clksel_mask = S3C6400_UCON_CLKMASK, + .clksel_shift = S3C6400_UCON_CLKSHIFT, + }, + .def_cfg = { + .ucon = S3C2410_UCON_DEFAULT, + .ufcon = S3C2410_UFCON_DEFAULT, + }, +}; +#define S3C6400_SERIAL_DRV_DATA (&s3c6400_serial_drv_data) +#else +#define S3C6400_SERIAL_DRV_DATA NULL +#endif + +#ifdef CONFIG_CPU_S5PV210 +static const struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = { + .info = { + .name = "Samsung S5PV210 UART", + .type = TYPE_S3C6400, + .port_type = PORT_S3C6400, + .has_divslot = 1, + .rx_fifomask = S5PV210_UFSTAT_RXMASK, + .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, + .rx_fifofull = S5PV210_UFSTAT_RXFULL, + .tx_fifofull = S5PV210_UFSTAT_TXFULL, + .tx_fifomask = S5PV210_UFSTAT_TXMASK, + .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, + .def_clk_sel = S3C2410_UCON_CLKSEL0, + .num_clks = 2, + .clksel_mask = S5PV210_UCON_CLKMASK, + .clksel_shift = S5PV210_UCON_CLKSHIFT, + }, + .def_cfg = { + .ucon = S5PV210_UCON_DEFAULT, + .ufcon = S5PV210_UFCON_DEFAULT, + }, + .fifosize = { 256, 64, 16, 16 }, +}; +#define S5PV210_SERIAL_DRV_DATA (&s5pv210_serial_drv_data) +#else +#define S5PV210_SERIAL_DRV_DATA NULL +#endif + +#if defined(CONFIG_ARCH_EXYNOS) +#define EXYNOS_COMMON_SERIAL_DRV_DATA() \ + .info = { \ + .name = "Samsung Exynos UART", \ + .type = TYPE_S3C6400, \ + .port_type = PORT_S3C6400, \ + .has_divslot = 1, \ + .rx_fifomask = S5PV210_UFSTAT_RXMASK, \ + .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \ + .rx_fifofull = S5PV210_UFSTAT_RXFULL, \ + .tx_fifofull = S5PV210_UFSTAT_TXFULL, \ + .tx_fifomask = S5PV210_UFSTAT_TXMASK, \ + .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \ + .def_clk_sel = S3C2410_UCON_CLKSEL0, \ + .num_clks = 1, \ + .clksel_mask = 0, \ + .clksel_shift = 0, \ + }, \ + .def_cfg = { \ + .ucon = S5PV210_UCON_DEFAULT, \ + .ufcon = S5PV210_UFCON_DEFAULT, \ + .has_fracval = 1, \ + } \ + +static const struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = { + EXYNOS_COMMON_SERIAL_DRV_DATA(), + .fifosize = { 256, 64, 16, 16 }, +}; + +static const struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = { + EXYNOS_COMMON_SERIAL_DRV_DATA(), + .fifosize = { 64, 256, 16, 256 }, +}; + +static const struct s3c24xx_serial_drv_data exynos850_serial_drv_data = { + EXYNOS_COMMON_SERIAL_DRV_DATA(), + .fifosize = { 256, 64, 64, 64 }, +}; + +#define EXYNOS4210_SERIAL_DRV_DATA (&exynos4210_serial_drv_data) +#define EXYNOS5433_SERIAL_DRV_DATA (&exynos5433_serial_drv_data) +#define EXYNOS850_SERIAL_DRV_DATA (&exynos850_serial_drv_data) + +#else +#define EXYNOS4210_SERIAL_DRV_DATA NULL +#define EXYNOS5433_SERIAL_DRV_DATA NULL +#define EXYNOS850_SERIAL_DRV_DATA NULL +#endif + +#ifdef CONFIG_ARCH_APPLE +static const struct s3c24xx_serial_drv_data s5l_serial_drv_data = { + .info = { + .name = "Apple S5L UART", + .type = TYPE_APPLE_S5L, + .port_type = PORT_8250, + .fifosize = 16, + .rx_fifomask = S3C2410_UFSTAT_RXMASK, + .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT, + .rx_fifofull = S3C2410_UFSTAT_RXFULL, + .tx_fifofull = S3C2410_UFSTAT_TXFULL, + .tx_fifomask = S3C2410_UFSTAT_TXMASK, + .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT, + .def_clk_sel = S3C2410_UCON_CLKSEL0, + .num_clks = 1, + .clksel_mask = 0, + .clksel_shift = 0, + .ucon_mask = APPLE_S5L_UCON_MASK, + }, + .def_cfg = { + .ucon = APPLE_S5L_UCON_DEFAULT, + .ufcon = S3C2410_UFCON_DEFAULT, + }, +}; +#define S5L_SERIAL_DRV_DATA (&s5l_serial_drv_data) +#else +#define S5L_SERIAL_DRV_DATA NULL +#endif + +#if defined(CONFIG_ARCH_ARTPEC) +static const struct s3c24xx_serial_drv_data artpec8_serial_drv_data = { + .info = { + .name = "Axis ARTPEC-8 UART", + .type = TYPE_S3C6400, + .port_type = PORT_S3C6400, + .fifosize = 64, + .has_divslot = 1, + .rx_fifomask = S5PV210_UFSTAT_RXMASK, + .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, + .rx_fifofull = S5PV210_UFSTAT_RXFULL, + .tx_fifofull = S5PV210_UFSTAT_TXFULL, + .tx_fifomask = S5PV210_UFSTAT_TXMASK, + .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, + .def_clk_sel = S3C2410_UCON_CLKSEL0, + .num_clks = 1, + .clksel_mask = 0, + .clksel_shift = 0, + }, + .def_cfg = { + .ucon = S5PV210_UCON_DEFAULT, + .ufcon = S5PV210_UFCON_DEFAULT, + .has_fracval = 1, + } +}; +#define ARTPEC8_SERIAL_DRV_DATA (&artpec8_serial_drv_data) +#else +#define ARTPEC8_SERIAL_DRV_DATA (NULL) +#endif + +static const struct platform_device_id s3c24xx_serial_driver_ids[] = { + { + .name = "s3c2410-uart", + .driver_data = (kernel_ulong_t)S3C2410_SERIAL_DRV_DATA, + }, { + .name = "s3c2412-uart", + .driver_data = (kernel_ulong_t)S3C2412_SERIAL_DRV_DATA, + }, { + .name = "s3c2440-uart", + .driver_data = (kernel_ulong_t)S3C2440_SERIAL_DRV_DATA, + }, { + .name = "s3c6400-uart", + .driver_data = (kernel_ulong_t)S3C6400_SERIAL_DRV_DATA, + }, { + .name = "s5pv210-uart", + .driver_data = (kernel_ulong_t)S5PV210_SERIAL_DRV_DATA, + }, { + .name = "exynos4210-uart", + .driver_data = (kernel_ulong_t)EXYNOS4210_SERIAL_DRV_DATA, + }, { + .name = "exynos5433-uart", + .driver_data = (kernel_ulong_t)EXYNOS5433_SERIAL_DRV_DATA, + }, { + .name = "s5l-uart", + .driver_data = (kernel_ulong_t)S5L_SERIAL_DRV_DATA, + }, { + .name = "exynos850-uart", + .driver_data = (kernel_ulong_t)EXYNOS850_SERIAL_DRV_DATA, + }, { + .name = "artpec8-uart", + .driver_data = (kernel_ulong_t)ARTPEC8_SERIAL_DRV_DATA, + }, + { }, +}; +MODULE_DEVICE_TABLE(platform, s3c24xx_serial_driver_ids); + +#ifdef CONFIG_OF +static const struct of_device_id s3c24xx_uart_dt_match[] = { + { .compatible = "samsung,s3c2410-uart", + .data = S3C2410_SERIAL_DRV_DATA }, + { .compatible = "samsung,s3c2412-uart", + .data = S3C2412_SERIAL_DRV_DATA }, + { .compatible = "samsung,s3c2440-uart", + .data = S3C2440_SERIAL_DRV_DATA }, + { .compatible = "samsung,s3c6400-uart", + .data = S3C6400_SERIAL_DRV_DATA }, + { .compatible = "samsung,s5pv210-uart", + .data = S5PV210_SERIAL_DRV_DATA }, + { .compatible = "samsung,exynos4210-uart", + .data = EXYNOS4210_SERIAL_DRV_DATA }, + { .compatible = "samsung,exynos5433-uart", + .data = EXYNOS5433_SERIAL_DRV_DATA }, + { .compatible = "apple,s5l-uart", + .data = S5L_SERIAL_DRV_DATA }, + { .compatible = "samsung,exynos850-uart", + .data = EXYNOS850_SERIAL_DRV_DATA }, + { .compatible = "axis,artpec8-uart", + .data = ARTPEC8_SERIAL_DRV_DATA }, + {}, +}; +MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match); +#endif + +static struct platform_driver samsung_serial_driver = { + .probe = s3c24xx_serial_probe, + .remove = s3c24xx_serial_remove, + .id_table = s3c24xx_serial_driver_ids, + .driver = { + .name = "samsung-uart", + .pm = SERIAL_SAMSUNG_PM_OPS, + .of_match_table = of_match_ptr(s3c24xx_uart_dt_match), + }, +}; + +static int __init samsung_serial_init(void) +{ + int ret; + + s3c24xx_serial_register_console(); + + ret = platform_driver_register(&samsung_serial_driver); + if (ret) { + s3c24xx_serial_unregister_console(); + return ret; + } + + return 0; +} + +static void __exit samsung_serial_exit(void) +{ + platform_driver_unregister(&samsung_serial_driver); + s3c24xx_serial_unregister_console(); +} + +module_init(samsung_serial_init); +module_exit(samsung_serial_exit); + +#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE +/* + * Early console. + */ + +static void wr_reg_barrier(const struct uart_port *port, u32 reg, u32 val) +{ + switch (port->iotype) { + case UPIO_MEM: + writeb(val, portaddr(port, reg)); + break; + case UPIO_MEM32: + writel(val, portaddr(port, reg)); + break; + } +} + +struct samsung_early_console_data { + u32 txfull_mask; + u32 rxfifo_mask; +}; + +static void samsung_early_busyuart(const struct uart_port *port) +{ + while (!(readl(port->membase + S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXFE)) + ; +} + +static void samsung_early_busyuart_fifo(const struct uart_port *port) +{ + const struct samsung_early_console_data *data = port->private_data; + + while (readl(port->membase + S3C2410_UFSTAT) & data->txfull_mask) + ; +} + +static void samsung_early_putc(struct uart_port *port, unsigned char c) +{ + if (readl(port->membase + S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) + samsung_early_busyuart_fifo(port); + else + samsung_early_busyuart(port); + + wr_reg_barrier(port, S3C2410_UTXH, c); +} + +static void samsung_early_write(struct console *con, const char *s, + unsigned int n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, samsung_early_putc); +} + +static int samsung_early_read(struct console *con, char *s, unsigned int n) +{ + struct earlycon_device *dev = con->data; + const struct samsung_early_console_data *data = dev->port.private_data; + int ch, ufstat, num_read = 0; + + while (num_read < n) { + ufstat = rd_regl(&dev->port, S3C2410_UFSTAT); + if (!(ufstat & data->rxfifo_mask)) + break; + ch = rd_reg(&dev->port, S3C2410_URXH); + if (ch == NO_POLL_CHAR) + break; + + s[num_read++] = ch; + } + + return num_read; +} + +static int __init samsung_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = samsung_early_write; + device->con->read = samsung_early_read; + return 0; +} + +/* S3C2410 */ +static struct samsung_early_console_data s3c2410_early_console_data = { + .txfull_mask = S3C2410_UFSTAT_TXFULL, + .rxfifo_mask = S3C2410_UFSTAT_RXFULL | S3C2410_UFSTAT_RXMASK, +}; + +static int __init s3c2410_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + device->port.private_data = &s3c2410_early_console_data; + return samsung_early_console_setup(device, opt); +} + +OF_EARLYCON_DECLARE(s3c2410, "samsung,s3c2410-uart", + s3c2410_early_console_setup); + +/* S3C2412, S3C2440, S3C64xx */ +static struct samsung_early_console_data s3c2440_early_console_data = { + .txfull_mask = S3C2440_UFSTAT_TXFULL, + .rxfifo_mask = S3C2440_UFSTAT_RXFULL | S3C2440_UFSTAT_RXMASK, +}; + +static int __init s3c2440_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + device->port.private_data = &s3c2440_early_console_data; + return samsung_early_console_setup(device, opt); +} + +OF_EARLYCON_DECLARE(s3c2412, "samsung,s3c2412-uart", + s3c2440_early_console_setup); +OF_EARLYCON_DECLARE(s3c2440, "samsung,s3c2440-uart", + s3c2440_early_console_setup); +OF_EARLYCON_DECLARE(s3c6400, "samsung,s3c6400-uart", + s3c2440_early_console_setup); + +/* S5PV210, Exynos */ +static struct samsung_early_console_data s5pv210_early_console_data = { + .txfull_mask = S5PV210_UFSTAT_TXFULL, + .rxfifo_mask = S5PV210_UFSTAT_RXFULL | S5PV210_UFSTAT_RXMASK, +}; + +static int __init s5pv210_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + device->port.private_data = &s5pv210_early_console_data; + return samsung_early_console_setup(device, opt); +} + +OF_EARLYCON_DECLARE(s5pv210, "samsung,s5pv210-uart", + s5pv210_early_console_setup); +OF_EARLYCON_DECLARE(exynos4210, "samsung,exynos4210-uart", + s5pv210_early_console_setup); +OF_EARLYCON_DECLARE(artpec8, "axis,artpec8-uart", + s5pv210_early_console_setup); + +/* Apple S5L */ +static int __init apple_s5l_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + /* Close enough to S3C2410 for earlycon... */ + device->port.private_data = &s3c2410_early_console_data; + +#ifdef CONFIG_ARM64 + /* ... but we need to override the existing fixmap entry as nGnRnE */ + __set_fixmap(FIX_EARLYCON_MEM_BASE, device->port.mapbase, + __pgprot(PROT_DEVICE_nGnRnE)); +#endif + return samsung_early_console_setup(device, opt); +} + +OF_EARLYCON_DECLARE(s5l, "apple,s5l-uart", apple_s5l_early_console_setup); +#endif + +MODULE_ALIAS("platform:samsung-uart"); +MODULE_DESCRIPTION("Samsung SoC Serial port driver"); +MODULE_AUTHOR("Ben Dooks "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c new file mode 100644 index 000000000..c5d2b6cdc --- /dev/null +++ b/drivers/tty/serial/sb1250-duart.c @@ -0,0 +1,964 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Support for the asynchronous serial interface (DUART) included + * in the BCM1250 and derived System-On-a-Chip (SOC) devices. + * + * Copyright (c) 2007 Maciej W. Rozycki + * + * Derived from drivers/char/sb1250_duart.c for which the following + * copyright applies: + * + * Copyright (c) 2000, 2001, 2002, 2003, 2004 Broadcom Corporation + * + * References: + * + * "BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + + +#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) +#include +#include + +#define SBD_CHANREGS(line) A_BCM1480_DUART_CHANREG((line), 0) +#define SBD_CTRLREGS(line) A_BCM1480_DUART_CTRLREG((line), 0) +#define SBD_INT(line) (K_BCM1480_INT_UART_0 + (line)) + +#define DUART_CHANREG_SPACING BCM1480_DUART_CHANREG_SPACING + +#define R_DUART_IMRREG(line) R_BCM1480_DUART_IMRREG(line) +#define R_DUART_INCHREG(line) R_BCM1480_DUART_INCHREG(line) +#define R_DUART_ISRREG(line) R_BCM1480_DUART_ISRREG(line) + +#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) +#include +#include + +#define SBD_CHANREGS(line) A_DUART_CHANREG((line), 0) +#define SBD_CTRLREGS(line) A_DUART_CTRLREG(0) +#define SBD_INT(line) (K_INT_UART_0 + (line)) + +#else +#error invalid SB1250 UART configuration + +#endif + + +MODULE_AUTHOR("Maciej W. Rozycki "); +MODULE_DESCRIPTION("BCM1xxx on-chip DUART serial driver"); +MODULE_LICENSE("GPL"); + + +#define DUART_MAX_CHIP 2 +#define DUART_MAX_SIDE 2 + +/* + * Per-port state. + */ +struct sbd_port { + struct sbd_duart *duart; + struct uart_port port; + unsigned char __iomem *memctrl; + int tx_stopped; + int initialised; +}; + +/* + * Per-DUART state for the shared register space. + */ +struct sbd_duart { + struct sbd_port sport[2]; + unsigned long mapctrl; + refcount_t map_guard; +}; + +#define to_sport(uport) container_of(uport, struct sbd_port, port) + +static struct sbd_duart sbd_duarts[DUART_MAX_CHIP]; + + +/* + * Reading and writing SB1250 DUART registers. + * + * There are three register spaces: two per-channel ones and + * a shared one. We have to define accessors appropriately. + * All registers are 64-bit and all but the Baud Rate Clock + * registers only define 8 least significant bits. There is + * also a workaround to take into account. Raw accessors use + * the full register width, but cooked ones truncate it + * intentionally so that the rest of the driver does not care. + */ +static u64 __read_sbdchn(struct sbd_port *sport, int reg) +{ + void __iomem *csr = sport->port.membase + reg; + + return __raw_readq(csr); +} + +static u64 __read_sbdshr(struct sbd_port *sport, int reg) +{ + void __iomem *csr = sport->memctrl + reg; + + return __raw_readq(csr); +} + +static void __write_sbdchn(struct sbd_port *sport, int reg, u64 value) +{ + void __iomem *csr = sport->port.membase + reg; + + __raw_writeq(value, csr); +} + +static void __write_sbdshr(struct sbd_port *sport, int reg, u64 value) +{ + void __iomem *csr = sport->memctrl + reg; + + __raw_writeq(value, csr); +} + +/* + * In bug 1956, we get glitches that can mess up uart registers. This + * "read-mode-reg after any register access" is an accepted workaround. + */ +static void __war_sbd1956(struct sbd_port *sport) +{ + __read_sbdchn(sport, R_DUART_MODE_REG_1); + __read_sbdchn(sport, R_DUART_MODE_REG_2); +} + +static unsigned char read_sbdchn(struct sbd_port *sport, int reg) +{ + unsigned char retval; + + retval = __read_sbdchn(sport, reg); + if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS)) + __war_sbd1956(sport); + return retval; +} + +static unsigned char read_sbdshr(struct sbd_port *sport, int reg) +{ + unsigned char retval; + + retval = __read_sbdshr(sport, reg); + if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS)) + __war_sbd1956(sport); + return retval; +} + +static void write_sbdchn(struct sbd_port *sport, int reg, unsigned int value) +{ + __write_sbdchn(sport, reg, value); + if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS)) + __war_sbd1956(sport); +} + +static void write_sbdshr(struct sbd_port *sport, int reg, unsigned int value) +{ + __write_sbdshr(sport, reg, value); + if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS)) + __war_sbd1956(sport); +} + + +static int sbd_receive_ready(struct sbd_port *sport) +{ + return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_RX_RDY; +} + +static int sbd_receive_drain(struct sbd_port *sport) +{ + int loops = 10000; + + while (sbd_receive_ready(sport) && --loops) + read_sbdchn(sport, R_DUART_RX_HOLD); + return loops; +} + +static int __maybe_unused sbd_transmit_ready(struct sbd_port *sport) +{ + return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_RDY; +} + +static int __maybe_unused sbd_transmit_drain(struct sbd_port *sport) +{ + int loops = 10000; + + while (!sbd_transmit_ready(sport) && --loops) + udelay(2); + return loops; +} + +static int sbd_transmit_empty(struct sbd_port *sport) +{ + return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_EMT; +} + +static int sbd_line_drain(struct sbd_port *sport) +{ + int loops = 10000; + + while (!sbd_transmit_empty(sport) && --loops) + udelay(2); + return loops; +} + + +static unsigned int sbd_tx_empty(struct uart_port *uport) +{ + struct sbd_port *sport = to_sport(uport); + + return sbd_transmit_empty(sport) ? TIOCSER_TEMT : 0; +} + +static unsigned int sbd_get_mctrl(struct uart_port *uport) +{ + struct sbd_port *sport = to_sport(uport); + unsigned int mctrl, status; + + status = read_sbdshr(sport, R_DUART_IN_PORT); + status >>= (uport->line) % 2; + mctrl = (!(status & M_DUART_IN_PIN0_VAL) ? TIOCM_CTS : 0) | + (!(status & M_DUART_IN_PIN4_VAL) ? TIOCM_CAR : 0) | + (!(status & M_DUART_RIN0_PIN) ? TIOCM_RNG : 0) | + (!(status & M_DUART_IN_PIN2_VAL) ? TIOCM_DSR : 0); + return mctrl; +} + +static void sbd_set_mctrl(struct uart_port *uport, unsigned int mctrl) +{ + struct sbd_port *sport = to_sport(uport); + unsigned int clr = 0, set = 0, mode2; + + if (mctrl & TIOCM_DTR) + set |= M_DUART_SET_OPR2; + else + clr |= M_DUART_CLR_OPR2; + if (mctrl & TIOCM_RTS) + set |= M_DUART_SET_OPR0; + else + clr |= M_DUART_CLR_OPR0; + clr <<= (uport->line) % 2; + set <<= (uport->line) % 2; + + mode2 = read_sbdchn(sport, R_DUART_MODE_REG_2); + mode2 &= ~M_DUART_CHAN_MODE; + if (mctrl & TIOCM_LOOP) + mode2 |= V_DUART_CHAN_MODE_LCL_LOOP; + else + mode2 |= V_DUART_CHAN_MODE_NORMAL; + + write_sbdshr(sport, R_DUART_CLEAR_OPR, clr); + write_sbdshr(sport, R_DUART_SET_OPR, set); + write_sbdchn(sport, R_DUART_MODE_REG_2, mode2); +} + +static void sbd_stop_tx(struct uart_port *uport) +{ + struct sbd_port *sport = to_sport(uport); + + write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS); + sport->tx_stopped = 1; +}; + +static void sbd_start_tx(struct uart_port *uport) +{ + struct sbd_port *sport = to_sport(uport); + unsigned int mask; + + /* Enable tx interrupts. */ + mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2)); + mask |= M_DUART_IMR_TX; + write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask); + + /* Go!, go!, go!... */ + write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN); + sport->tx_stopped = 0; +}; + +static void sbd_stop_rx(struct uart_port *uport) +{ + struct sbd_port *sport = to_sport(uport); + + write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0); +}; + +static void sbd_enable_ms(struct uart_port *uport) +{ + struct sbd_port *sport = to_sport(uport); + + write_sbdchn(sport, R_DUART_AUXCTL_X, + M_DUART_CIN_CHNG_ENA | M_DUART_CTS_CHNG_ENA); +} + +static void sbd_break_ctl(struct uart_port *uport, int break_state) +{ + struct sbd_port *sport = to_sport(uport); + + if (break_state == -1) + write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_START_BREAK); + else + write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_STOP_BREAK); +} + + +static void sbd_receive_chars(struct sbd_port *sport) +{ + struct uart_port *uport = &sport->port; + struct uart_icount *icount; + unsigned int status, ch, flag; + int count; + + for (count = 16; count; count--) { + status = read_sbdchn(sport, R_DUART_STATUS); + if (!(status & M_DUART_RX_RDY)) + break; + + ch = read_sbdchn(sport, R_DUART_RX_HOLD); + + flag = TTY_NORMAL; + + icount = &uport->icount; + icount->rx++; + + if (unlikely(status & + (M_DUART_RCVD_BRK | M_DUART_FRM_ERR | + M_DUART_PARITY_ERR | M_DUART_OVRUN_ERR))) { + if (status & M_DUART_RCVD_BRK) { + icount->brk++; + if (uart_handle_break(uport)) + continue; + } else if (status & M_DUART_FRM_ERR) + icount->frame++; + else if (status & M_DUART_PARITY_ERR) + icount->parity++; + if (status & M_DUART_OVRUN_ERR) + icount->overrun++; + + status &= uport->read_status_mask; + if (status & M_DUART_RCVD_BRK) + flag = TTY_BREAK; + else if (status & M_DUART_FRM_ERR) + flag = TTY_FRAME; + else if (status & M_DUART_PARITY_ERR) + flag = TTY_PARITY; + } + + if (uart_handle_sysrq_char(uport, ch)) + continue; + + uart_insert_char(uport, status, M_DUART_OVRUN_ERR, ch, flag); + } + + tty_flip_buffer_push(&uport->state->port); +} + +static void sbd_transmit_chars(struct sbd_port *sport) +{ + struct uart_port *uport = &sport->port; + struct circ_buf *xmit = &sport->port.state->xmit; + unsigned int mask; + int stop_tx; + + /* XON/XOFF chars. */ + if (sport->port.x_char) { + write_sbdchn(sport, R_DUART_TX_HOLD, sport->port.x_char); + sport->port.icount.tx++; + sport->port.x_char = 0; + return; + } + + /* If nothing to do or stopped or hardware stopped. */ + stop_tx = (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)); + + /* Send char. */ + if (!stop_tx) { + write_sbdchn(sport, R_DUART_TX_HOLD, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + sport->port.icount.tx++; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&sport->port); + } + + /* Are we are done? */ + if (stop_tx || uart_circ_empty(xmit)) { + /* Disable tx interrupts. */ + mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2)); + mask &= ~M_DUART_IMR_TX; + write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask); + } +} + +static void sbd_status_handle(struct sbd_port *sport) +{ + struct uart_port *uport = &sport->port; + unsigned int delta; + + delta = read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2)); + delta >>= (uport->line) % 2; + + if (delta & (M_DUART_IN_PIN0_VAL << S_DUART_IN_PIN_CHNG)) + uart_handle_cts_change(uport, !(delta & M_DUART_IN_PIN0_VAL)); + + if (delta & (M_DUART_IN_PIN2_VAL << S_DUART_IN_PIN_CHNG)) + uport->icount.dsr++; + + if (delta & ((M_DUART_IN_PIN2_VAL | M_DUART_IN_PIN0_VAL) << + S_DUART_IN_PIN_CHNG)) + wake_up_interruptible(&uport->state->port.delta_msr_wait); +} + +static irqreturn_t sbd_interrupt(int irq, void *dev_id) +{ + struct sbd_port *sport = dev_id; + struct uart_port *uport = &sport->port; + irqreturn_t status = IRQ_NONE; + unsigned int intstat; + int count; + + for (count = 16; count; count--) { + intstat = read_sbdshr(sport, + R_DUART_ISRREG((uport->line) % 2)); + intstat &= read_sbdshr(sport, + R_DUART_IMRREG((uport->line) % 2)); + intstat &= M_DUART_ISR_ALL; + if (!intstat) + break; + + if (intstat & M_DUART_ISR_RX) + sbd_receive_chars(sport); + if (intstat & M_DUART_ISR_IN) + sbd_status_handle(sport); + if (intstat & M_DUART_ISR_TX) + sbd_transmit_chars(sport); + + status = IRQ_HANDLED; + } + + return status; +} + + +static int sbd_startup(struct uart_port *uport) +{ + struct sbd_port *sport = to_sport(uport); + unsigned int mode1; + int ret; + + ret = request_irq(sport->port.irq, sbd_interrupt, + IRQF_SHARED, "sb1250-duart", sport); + if (ret) + return ret; + + /* Clear the receive FIFO. */ + sbd_receive_drain(sport); + + /* Clear the interrupt registers. */ + write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_BREAK_INT); + read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2)); + + /* Set rx/tx interrupt to FIFO available. */ + mode1 = read_sbdchn(sport, R_DUART_MODE_REG_1); + mode1 &= ~(M_DUART_RX_IRQ_SEL_RXFULL | M_DUART_TX_IRQ_SEL_TXEMPT); + write_sbdchn(sport, R_DUART_MODE_REG_1, mode1); + + /* Disable tx, enable rx. */ + write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_EN); + sport->tx_stopped = 1; + + /* Enable interrupts. */ + write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), + M_DUART_IMR_IN | M_DUART_IMR_RX); + + return 0; +} + +static void sbd_shutdown(struct uart_port *uport) +{ + struct sbd_port *sport = to_sport(uport); + + write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS); + sport->tx_stopped = 1; + free_irq(sport->port.irq, sport); +} + + +static void sbd_init_port(struct sbd_port *sport) +{ + struct uart_port *uport = &sport->port; + + if (sport->initialised) + return; + + /* There is no DUART reset feature, so just set some sane defaults. */ + write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_TX); + write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_RX); + write_sbdchn(sport, R_DUART_MODE_REG_1, V_DUART_BITS_PER_CHAR_8); + write_sbdchn(sport, R_DUART_MODE_REG_2, 0); + write_sbdchn(sport, R_DUART_FULL_CTL, + V_DUART_INT_TIME(0) | V_DUART_SIG_FULL(15)); + write_sbdchn(sport, R_DUART_OPCR_X, 0); + write_sbdchn(sport, R_DUART_AUXCTL_X, 0); + write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0); + + sport->initialised = 1; +} + +static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios, + const struct ktermios *old_termios) +{ + struct sbd_port *sport = to_sport(uport); + unsigned int mode1 = 0, mode2 = 0, aux = 0; + unsigned int mode1mask = 0, mode2mask = 0, auxmask = 0; + unsigned int oldmode1, oldmode2, oldaux; + unsigned int baud, brg; + unsigned int command; + + mode1mask |= ~(M_DUART_PARITY_MODE | M_DUART_PARITY_TYPE_ODD | + M_DUART_BITS_PER_CHAR); + mode2mask |= ~M_DUART_STOP_BIT_LEN_2; + auxmask |= ~M_DUART_CTS_CHNG_ENA; + + /* Byte size. */ + switch (termios->c_cflag & CSIZE) { + case CS5: + case CS6: + /* Unsupported, leave unchanged. */ + mode1mask |= M_DUART_PARITY_MODE; + break; + case CS7: + mode1 |= V_DUART_BITS_PER_CHAR_7; + break; + case CS8: + default: + mode1 |= V_DUART_BITS_PER_CHAR_8; + break; + } + + /* Parity and stop bits. */ + if (termios->c_cflag & CSTOPB) + mode2 |= M_DUART_STOP_BIT_LEN_2; + else + mode2 |= M_DUART_STOP_BIT_LEN_1; + if (termios->c_cflag & PARENB) + mode1 |= V_DUART_PARITY_MODE_ADD; + else + mode1 |= V_DUART_PARITY_MODE_NONE; + if (termios->c_cflag & PARODD) + mode1 |= M_DUART_PARITY_TYPE_ODD; + else + mode1 |= M_DUART_PARITY_TYPE_EVEN; + + baud = uart_get_baud_rate(uport, termios, old_termios, 1200, 5000000); + brg = V_DUART_BAUD_RATE(baud); + /* The actual lower bound is 1221bps, so compensate. */ + if (brg > M_DUART_CLK_COUNTER) + brg = M_DUART_CLK_COUNTER; + + uart_update_timeout(uport, termios->c_cflag, baud); + + uport->read_status_mask = M_DUART_OVRUN_ERR; + if (termios->c_iflag & INPCK) + uport->read_status_mask |= M_DUART_FRM_ERR | + M_DUART_PARITY_ERR; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + uport->read_status_mask |= M_DUART_RCVD_BRK; + + uport->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + uport->ignore_status_mask |= M_DUART_FRM_ERR | + M_DUART_PARITY_ERR; + if (termios->c_iflag & IGNBRK) { + uport->ignore_status_mask |= M_DUART_RCVD_BRK; + if (termios->c_iflag & IGNPAR) + uport->ignore_status_mask |= M_DUART_OVRUN_ERR; + } + + if (termios->c_cflag & CREAD) + command = M_DUART_RX_EN; + else + command = M_DUART_RX_DIS; + + if (termios->c_cflag & CRTSCTS) + aux |= M_DUART_CTS_CHNG_ENA; + else + aux &= ~M_DUART_CTS_CHNG_ENA; + + spin_lock(&uport->lock); + + if (sport->tx_stopped) + command |= M_DUART_TX_DIS; + else + command |= M_DUART_TX_EN; + + oldmode1 = read_sbdchn(sport, R_DUART_MODE_REG_1) & mode1mask; + oldmode2 = read_sbdchn(sport, R_DUART_MODE_REG_2) & mode2mask; + oldaux = read_sbdchn(sport, R_DUART_AUXCTL_X) & auxmask; + + if (!sport->tx_stopped) + sbd_line_drain(sport); + write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS); + + write_sbdchn(sport, R_DUART_MODE_REG_1, mode1 | oldmode1); + write_sbdchn(sport, R_DUART_MODE_REG_2, mode2 | oldmode2); + write_sbdchn(sport, R_DUART_CLK_SEL, brg); + write_sbdchn(sport, R_DUART_AUXCTL_X, aux | oldaux); + + write_sbdchn(sport, R_DUART_CMD, command); + + spin_unlock(&uport->lock); +} + + +static const char *sbd_type(struct uart_port *uport) +{ + return "SB1250 DUART"; +} + +static void sbd_release_port(struct uart_port *uport) +{ + struct sbd_port *sport = to_sport(uport); + struct sbd_duart *duart = sport->duart; + + iounmap(sport->memctrl); + sport->memctrl = NULL; + iounmap(uport->membase); + uport->membase = NULL; + + if(refcount_dec_and_test(&duart->map_guard)) + release_mem_region(duart->mapctrl, DUART_CHANREG_SPACING); + release_mem_region(uport->mapbase, DUART_CHANREG_SPACING); +} + +static int sbd_map_port(struct uart_port *uport) +{ + const char *err = KERN_ERR "sbd: Cannot map MMIO\n"; + struct sbd_port *sport = to_sport(uport); + struct sbd_duart *duart = sport->duart; + + if (!uport->membase) + uport->membase = ioremap(uport->mapbase, + DUART_CHANREG_SPACING); + if (!uport->membase) { + printk(err); + return -ENOMEM; + } + + if (!sport->memctrl) + sport->memctrl = ioremap(duart->mapctrl, + DUART_CHANREG_SPACING); + if (!sport->memctrl) { + printk(err); + iounmap(uport->membase); + uport->membase = NULL; + return -ENOMEM; + } + + return 0; +} + +static int sbd_request_port(struct uart_port *uport) +{ + const char *err = KERN_ERR "sbd: Unable to reserve MMIO resource\n"; + struct sbd_duart *duart = to_sport(uport)->duart; + int ret = 0; + + if (!request_mem_region(uport->mapbase, DUART_CHANREG_SPACING, + "sb1250-duart")) { + printk(err); + return -EBUSY; + } + refcount_inc(&duart->map_guard); + if (refcount_read(&duart->map_guard) == 1) { + if (!request_mem_region(duart->mapctrl, DUART_CHANREG_SPACING, + "sb1250-duart")) { + refcount_dec(&duart->map_guard); + printk(err); + ret = -EBUSY; + } + } + if (!ret) { + ret = sbd_map_port(uport); + if (ret) { + if (refcount_dec_and_test(&duart->map_guard)) + release_mem_region(duart->mapctrl, + DUART_CHANREG_SPACING); + } + } + if (ret) { + release_mem_region(uport->mapbase, DUART_CHANREG_SPACING); + return ret; + } + return 0; +} + +static void sbd_config_port(struct uart_port *uport, int flags) +{ + struct sbd_port *sport = to_sport(uport); + + if (flags & UART_CONFIG_TYPE) { + if (sbd_request_port(uport)) + return; + + uport->type = PORT_SB1250_DUART; + + sbd_init_port(sport); + } +} + +static int sbd_verify_port(struct uart_port *uport, struct serial_struct *ser) +{ + int ret = 0; + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_SB1250_DUART) + ret = -EINVAL; + if (ser->irq != uport->irq) + ret = -EINVAL; + if (ser->baud_base != uport->uartclk / 16) + ret = -EINVAL; + return ret; +} + + +static const struct uart_ops sbd_ops = { + .tx_empty = sbd_tx_empty, + .set_mctrl = sbd_set_mctrl, + .get_mctrl = sbd_get_mctrl, + .stop_tx = sbd_stop_tx, + .start_tx = sbd_start_tx, + .stop_rx = sbd_stop_rx, + .enable_ms = sbd_enable_ms, + .break_ctl = sbd_break_ctl, + .startup = sbd_startup, + .shutdown = sbd_shutdown, + .set_termios = sbd_set_termios, + .type = sbd_type, + .release_port = sbd_release_port, + .request_port = sbd_request_port, + .config_port = sbd_config_port, + .verify_port = sbd_verify_port, +}; + +/* Initialize SB1250 DUART port structures. */ +static void __init sbd_probe_duarts(void) +{ + static int probed; + int chip, side; + int max_lines, line; + + if (probed) + return; + + /* Set the number of available units based on the SOC type. */ + switch (soc_type) { + case K_SYS_SOC_TYPE_BCM1x55: + case K_SYS_SOC_TYPE_BCM1x80: + max_lines = 4; + break; + default: + /* Assume at least two serial ports at the normal address. */ + max_lines = 2; + break; + } + + probed = 1; + + for (chip = 0, line = 0; chip < DUART_MAX_CHIP && line < max_lines; + chip++) { + sbd_duarts[chip].mapctrl = SBD_CTRLREGS(line); + + for (side = 0; side < DUART_MAX_SIDE && line < max_lines; + side++, line++) { + struct sbd_port *sport = &sbd_duarts[chip].sport[side]; + struct uart_port *uport = &sport->port; + + sport->duart = &sbd_duarts[chip]; + + uport->irq = SBD_INT(line); + uport->uartclk = 100000000 / 20 * 16; + uport->fifosize = 16; + uport->iotype = UPIO_MEM; + uport->flags = UPF_BOOT_AUTOCONF; + uport->ops = &sbd_ops; + uport->line = line; + uport->mapbase = SBD_CHANREGS(line); + uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SB1250_DUART_CONSOLE); + } + } +} + + +#ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE +/* + * Serial console stuff. Very basic, polling driver for doing serial + * console output. The console_lock is held by the caller, so we + * shouldn't be interrupted for more console activity. + */ +static void sbd_console_putchar(struct uart_port *uport, unsigned char ch) +{ + struct sbd_port *sport = to_sport(uport); + + sbd_transmit_drain(sport); + write_sbdchn(sport, R_DUART_TX_HOLD, ch); +} + +static void sbd_console_write(struct console *co, const char *s, + unsigned int count) +{ + int chip = co->index / DUART_MAX_SIDE; + int side = co->index % DUART_MAX_SIDE; + struct sbd_port *sport = &sbd_duarts[chip].sport[side]; + struct uart_port *uport = &sport->port; + unsigned long flags; + unsigned int mask; + + /* Disable transmit interrupts and enable the transmitter. */ + spin_lock_irqsave(&uport->lock, flags); + mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2)); + write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), + mask & ~M_DUART_IMR_TX); + write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN); + spin_unlock_irqrestore(&uport->lock, flags); + + uart_console_write(&sport->port, s, count, sbd_console_putchar); + + /* Restore transmit interrupts and the transmitter enable. */ + spin_lock_irqsave(&uport->lock, flags); + sbd_line_drain(sport); + if (sport->tx_stopped) + write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS); + write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask); + spin_unlock_irqrestore(&uport->lock, flags); +} + +static int __init sbd_console_setup(struct console *co, char *options) +{ + int chip = co->index / DUART_MAX_SIDE; + int side = co->index % DUART_MAX_SIDE; + struct sbd_port *sport = &sbd_duarts[chip].sport[side]; + struct uart_port *uport = &sport->port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + if (!sport->duart) + return -ENXIO; + + ret = sbd_map_port(uport); + if (ret) + return ret; + + sbd_init_port(sport); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + return uart_set_options(uport, co, baud, parity, bits, flow); +} + +static struct uart_driver sbd_reg; +static struct console sbd_console = { + .name = "duart", + .write = sbd_console_write, + .device = uart_console_device, + .setup = sbd_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sbd_reg +}; + +static int __init sbd_serial_console_init(void) +{ + sbd_probe_duarts(); + register_console(&sbd_console); + + return 0; +} + +console_initcall(sbd_serial_console_init); + +#define SERIAL_SB1250_DUART_CONSOLE &sbd_console +#else +#define SERIAL_SB1250_DUART_CONSOLE NULL +#endif /* CONFIG_SERIAL_SB1250_DUART_CONSOLE */ + + +static struct uart_driver sbd_reg = { + .owner = THIS_MODULE, + .driver_name = "sb1250_duart", + .dev_name = "duart", + .major = TTY_MAJOR, + .minor = SB1250_DUART_MINOR_BASE, + .nr = DUART_MAX_CHIP * DUART_MAX_SIDE, + .cons = SERIAL_SB1250_DUART_CONSOLE, +}; + +/* Set up the driver and register it. */ +static int __init sbd_init(void) +{ + int i, ret; + + sbd_probe_duarts(); + + ret = uart_register_driver(&sbd_reg); + if (ret) + return ret; + + for (i = 0; i < DUART_MAX_CHIP * DUART_MAX_SIDE; i++) { + struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE]; + struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE]; + struct uart_port *uport = &sport->port; + + if (sport->duart) + uart_add_one_port(&sbd_reg, uport); + } + + return 0; +} + +/* Unload the driver. Unregister stuff, get ready to go away. */ +static void __exit sbd_exit(void) +{ + int i; + + for (i = DUART_MAX_CHIP * DUART_MAX_SIDE - 1; i >= 0; i--) { + struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE]; + struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE]; + struct uart_port *uport = &sport->port; + + if (sport->duart) + uart_remove_one_port(&sbd_reg, uport); + } + + uart_unregister_driver(&sbd_reg); +} + +module_init(sbd_init); +module_exit(sbd_exit); diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c new file mode 100644 index 000000000..e331b57d6 --- /dev/null +++ b/drivers/tty/serial/sc16is7xx.c @@ -0,0 +1,1900 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * SC16IS7xx tty serial driver - Copyright (C) 2014 GridPoint + * Author: Jon Ringle + * + * Based on max310x.c, by Alexander Shiyan + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SC16IS7XX_NAME "sc16is7xx" +#define SC16IS7XX_MAX_DEVS 8 + +/* SC16IS7XX register definitions */ +#define SC16IS7XX_RHR_REG (0x00) /* RX FIFO */ +#define SC16IS7XX_THR_REG (0x00) /* TX FIFO */ +#define SC16IS7XX_IER_REG (0x01) /* Interrupt enable */ +#define SC16IS7XX_IIR_REG (0x02) /* Interrupt Identification */ +#define SC16IS7XX_FCR_REG (0x02) /* FIFO control */ +#define SC16IS7XX_LCR_REG (0x03) /* Line Control */ +#define SC16IS7XX_MCR_REG (0x04) /* Modem Control */ +#define SC16IS7XX_LSR_REG (0x05) /* Line Status */ +#define SC16IS7XX_MSR_REG (0x06) /* Modem Status */ +#define SC16IS7XX_SPR_REG (0x07) /* Scratch Pad */ +#define SC16IS7XX_TXLVL_REG (0x08) /* TX FIFO level */ +#define SC16IS7XX_RXLVL_REG (0x09) /* RX FIFO level */ +#define SC16IS7XX_IODIR_REG (0x0a) /* I/O Direction + * - only on 75x/76x + */ +#define SC16IS7XX_IOSTATE_REG (0x0b) /* I/O State + * - only on 75x/76x + */ +#define SC16IS7XX_IOINTENA_REG (0x0c) /* I/O Interrupt Enable + * - only on 75x/76x + */ +#define SC16IS7XX_IOCONTROL_REG (0x0e) /* I/O Control + * - only on 75x/76x + */ +#define SC16IS7XX_EFCR_REG (0x0f) /* Extra Features Control */ + +/* TCR/TLR Register set: Only if ((MCR[2] == 1) && (EFR[4] == 1)) */ +#define SC16IS7XX_TCR_REG (0x06) /* Transmit control */ +#define SC16IS7XX_TLR_REG (0x07) /* Trigger level */ + +/* Special Register set: Only if ((LCR[7] == 1) && (LCR != 0xBF)) */ +#define SC16IS7XX_DLL_REG (0x00) /* Divisor Latch Low */ +#define SC16IS7XX_DLH_REG (0x01) /* Divisor Latch High */ + +/* Enhanced Register set: Only if (LCR == 0xBF) */ +#define SC16IS7XX_EFR_REG (0x02) /* Enhanced Features */ +#define SC16IS7XX_XON1_REG (0x04) /* Xon1 word */ +#define SC16IS7XX_XON2_REG (0x05) /* Xon2 word */ +#define SC16IS7XX_XOFF1_REG (0x06) /* Xoff1 word */ +#define SC16IS7XX_XOFF2_REG (0x07) /* Xoff2 word */ + +/* IER register bits */ +#define SC16IS7XX_IER_RDI_BIT (1 << 0) /* Enable RX data interrupt */ +#define SC16IS7XX_IER_THRI_BIT (1 << 1) /* Enable TX holding register + * interrupt */ +#define SC16IS7XX_IER_RLSI_BIT (1 << 2) /* Enable RX line status + * interrupt */ +#define SC16IS7XX_IER_MSI_BIT (1 << 3) /* Enable Modem status + * interrupt */ + +/* IER register bits - write only if (EFR[4] == 1) */ +#define SC16IS7XX_IER_SLEEP_BIT (1 << 4) /* Enable Sleep mode */ +#define SC16IS7XX_IER_XOFFI_BIT (1 << 5) /* Enable Xoff interrupt */ +#define SC16IS7XX_IER_RTSI_BIT (1 << 6) /* Enable nRTS interrupt */ +#define SC16IS7XX_IER_CTSI_BIT (1 << 7) /* Enable nCTS interrupt */ + +/* FCR register bits */ +#define SC16IS7XX_FCR_FIFO_BIT (1 << 0) /* Enable FIFO */ +#define SC16IS7XX_FCR_RXRESET_BIT (1 << 1) /* Reset RX FIFO */ +#define SC16IS7XX_FCR_TXRESET_BIT (1 << 2) /* Reset TX FIFO */ +#define SC16IS7XX_FCR_RXLVLL_BIT (1 << 6) /* RX Trigger level LSB */ +#define SC16IS7XX_FCR_RXLVLH_BIT (1 << 7) /* RX Trigger level MSB */ + +/* FCR register bits - write only if (EFR[4] == 1) */ +#define SC16IS7XX_FCR_TXLVLL_BIT (1 << 4) /* TX Trigger level LSB */ +#define SC16IS7XX_FCR_TXLVLH_BIT (1 << 5) /* TX Trigger level MSB */ + +/* IIR register bits */ +#define SC16IS7XX_IIR_NO_INT_BIT (1 << 0) /* No interrupts pending */ +#define SC16IS7XX_IIR_ID_MASK 0x3e /* Mask for the interrupt ID */ +#define SC16IS7XX_IIR_THRI_SRC 0x02 /* TX holding register empty */ +#define SC16IS7XX_IIR_RDI_SRC 0x04 /* RX data interrupt */ +#define SC16IS7XX_IIR_RLSE_SRC 0x06 /* RX line status error */ +#define SC16IS7XX_IIR_RTOI_SRC 0x0c /* RX time-out interrupt */ +#define SC16IS7XX_IIR_MSI_SRC 0x00 /* Modem status interrupt + * - only on 75x/76x + */ +#define SC16IS7XX_IIR_INPIN_SRC 0x30 /* Input pin change of state + * - only on 75x/76x + */ +#define SC16IS7XX_IIR_XOFFI_SRC 0x10 /* Received Xoff */ +#define SC16IS7XX_IIR_CTSRTS_SRC 0x20 /* nCTS,nRTS change of state + * from active (LOW) + * to inactive (HIGH) + */ +/* LCR register bits */ +#define SC16IS7XX_LCR_LENGTH0_BIT (1 << 0) /* Word length bit 0 */ +#define SC16IS7XX_LCR_LENGTH1_BIT (1 << 1) /* Word length bit 1 + * + * Word length bits table: + * 00 -> 5 bit words + * 01 -> 6 bit words + * 10 -> 7 bit words + * 11 -> 8 bit words + */ +#define SC16IS7XX_LCR_STOPLEN_BIT (1 << 2) /* STOP length bit + * + * STOP length bit table: + * 0 -> 1 stop bit + * 1 -> 1-1.5 stop bits if + * word length is 5, + * 2 stop bits otherwise + */ +#define SC16IS7XX_LCR_PARITY_BIT (1 << 3) /* Parity bit enable */ +#define SC16IS7XX_LCR_EVENPARITY_BIT (1 << 4) /* Even parity bit enable */ +#define SC16IS7XX_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */ +#define SC16IS7XX_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */ +#define SC16IS7XX_LCR_DLAB_BIT (1 << 7) /* Divisor Latch enable */ +#define SC16IS7XX_LCR_WORD_LEN_5 (0x00) +#define SC16IS7XX_LCR_WORD_LEN_6 (0x01) +#define SC16IS7XX_LCR_WORD_LEN_7 (0x02) +#define SC16IS7XX_LCR_WORD_LEN_8 (0x03) +#define SC16IS7XX_LCR_CONF_MODE_A SC16IS7XX_LCR_DLAB_BIT /* Special + * reg set */ +#define SC16IS7XX_LCR_CONF_MODE_B 0xBF /* Enhanced + * reg set */ + +/* MCR register bits */ +#define SC16IS7XX_MCR_DTR_BIT (1 << 0) /* DTR complement + * - only on 75x/76x + */ +#define SC16IS7XX_MCR_RTS_BIT (1 << 1) /* RTS complement */ +#define SC16IS7XX_MCR_TCRTLR_BIT (1 << 2) /* TCR/TLR register enable */ +#define SC16IS7XX_MCR_LOOP_BIT (1 << 4) /* Enable loopback test mode */ +#define SC16IS7XX_MCR_XONANY_BIT (1 << 5) /* Enable Xon Any + * - write enabled + * if (EFR[4] == 1) + */ +#define SC16IS7XX_MCR_IRDA_BIT (1 << 6) /* Enable IrDA mode + * - write enabled + * if (EFR[4] == 1) + */ +#define SC16IS7XX_MCR_CLKSEL_BIT (1 << 7) /* Divide clock by 4 + * - write enabled + * if (EFR[4] == 1) + */ + +/* LSR register bits */ +#define SC16IS7XX_LSR_DR_BIT (1 << 0) /* Receiver data ready */ +#define SC16IS7XX_LSR_OE_BIT (1 << 1) /* Overrun Error */ +#define SC16IS7XX_LSR_PE_BIT (1 << 2) /* Parity Error */ +#define SC16IS7XX_LSR_FE_BIT (1 << 3) /* Frame Error */ +#define SC16IS7XX_LSR_BI_BIT (1 << 4) /* Break Interrupt */ +#define SC16IS7XX_LSR_BRK_ERROR_MASK 0x1E /* BI, FE, PE, OE bits */ +#define SC16IS7XX_LSR_THRE_BIT (1 << 5) /* TX holding register empty */ +#define SC16IS7XX_LSR_TEMT_BIT (1 << 6) /* Transmitter empty */ +#define SC16IS7XX_LSR_FIFOE_BIT (1 << 7) /* Fifo Error */ + +/* MSR register bits */ +#define SC16IS7XX_MSR_DCTS_BIT (1 << 0) /* Delta CTS Clear To Send */ +#define SC16IS7XX_MSR_DDSR_BIT (1 << 1) /* Delta DSR Data Set Ready + * or (IO4) + * - only on 75x/76x + */ +#define SC16IS7XX_MSR_DRI_BIT (1 << 2) /* Delta RI Ring Indicator + * or (IO7) + * - only on 75x/76x + */ +#define SC16IS7XX_MSR_DCD_BIT (1 << 3) /* Delta CD Carrier Detect + * or (IO6) + * - only on 75x/76x + */ +#define SC16IS7XX_MSR_CTS_BIT (1 << 4) /* CTS */ +#define SC16IS7XX_MSR_DSR_BIT (1 << 5) /* DSR (IO4) + * - only on 75x/76x + */ +#define SC16IS7XX_MSR_RI_BIT (1 << 6) /* RI (IO7) + * - only on 75x/76x + */ +#define SC16IS7XX_MSR_CD_BIT (1 << 7) /* CD (IO6) + * - only on 75x/76x + */ +#define SC16IS7XX_MSR_DELTA_MASK 0x0F /* Any of the delta bits! */ + +/* + * TCR register bits + * TCR trigger levels are available from 0 to 60 characters with a granularity + * of four. + * The programmer must program the TCR such that TCR[3:0] > TCR[7:4]. There is + * no built-in hardware check to make sure this condition is met. Also, the TCR + * must be programmed with this condition before auto RTS or software flow + * control is enabled to avoid spurious operation of the device. + */ +#define SC16IS7XX_TCR_RX_HALT(words) ((((words) / 4) & 0x0f) << 0) +#define SC16IS7XX_TCR_RX_RESUME(words) ((((words) / 4) & 0x0f) << 4) + +/* + * TLR register bits + * If TLR[3:0] or TLR[7:4] are logical 0, the selectable trigger levels via the + * FIFO Control Register (FCR) are used for the transmit and receive FIFO + * trigger levels. Trigger levels from 4 characters to 60 characters are + * available with a granularity of four. + * + * When the trigger level setting in TLR is zero, the SC16IS740/750/760 uses the + * trigger level setting defined in FCR. If TLR has non-zero trigger level value + * the trigger level defined in FCR is discarded. This applies to both transmit + * FIFO and receive FIFO trigger level setting. + * + * When TLR is used for RX trigger level control, FCR[7:6] should be left at the + * default state, that is, '00'. + */ +#define SC16IS7XX_TLR_TX_TRIGGER(words) ((((words) / 4) & 0x0f) << 0) +#define SC16IS7XX_TLR_RX_TRIGGER(words) ((((words) / 4) & 0x0f) << 4) + +/* IOControl register bits (Only 750/760) */ +#define SC16IS7XX_IOCONTROL_LATCH_BIT (1 << 0) /* Enable input latching */ +#define SC16IS7XX_IOCONTROL_MODEM_A_BIT (1 << 1) /* Enable GPIO[7:4] as modem A pins */ +#define SC16IS7XX_IOCONTROL_MODEM_B_BIT (1 << 2) /* Enable GPIO[3:0] as modem B pins */ +#define SC16IS7XX_IOCONTROL_SRESET_BIT (1 << 3) /* Software Reset */ + +/* EFCR register bits */ +#define SC16IS7XX_EFCR_9BIT_MODE_BIT (1 << 0) /* Enable 9-bit or Multidrop + * mode (RS485) */ +#define SC16IS7XX_EFCR_RXDISABLE_BIT (1 << 1) /* Disable receiver */ +#define SC16IS7XX_EFCR_TXDISABLE_BIT (1 << 2) /* Disable transmitter */ +#define SC16IS7XX_EFCR_AUTO_RS485_BIT (1 << 4) /* Auto RS485 RTS direction */ +#define SC16IS7XX_EFCR_RTS_INVERT_BIT (1 << 5) /* RTS output inversion */ +#define SC16IS7XX_EFCR_IRDA_MODE_BIT (1 << 7) /* IrDA mode + * 0 = rate upto 115.2 kbit/s + * - Only 750/760 + * 1 = rate upto 1.152 Mbit/s + * - Only 760 + */ + +/* EFR register bits */ +#define SC16IS7XX_EFR_AUTORTS_BIT (1 << 6) /* Auto RTS flow ctrl enable */ +#define SC16IS7XX_EFR_AUTOCTS_BIT (1 << 7) /* Auto CTS flow ctrl enable */ +#define SC16IS7XX_EFR_XOFF2_DETECT_BIT (1 << 5) /* Enable Xoff2 detection */ +#define SC16IS7XX_EFR_ENABLE_BIT (1 << 4) /* Enable enhanced functions + * and writing to IER[7:4], + * FCR[5:4], MCR[7:5] + */ +#define SC16IS7XX_EFR_SWFLOW3_BIT (1 << 3) /* SWFLOW bit 3 */ +#define SC16IS7XX_EFR_SWFLOW2_BIT (1 << 2) /* SWFLOW bit 2 + * + * SWFLOW bits 3 & 2 table: + * 00 -> no transmitter flow + * control + * 01 -> transmitter generates + * XON2 and XOFF2 + * 10 -> transmitter generates + * XON1 and XOFF1 + * 11 -> transmitter generates + * XON1, XON2, XOFF1 and + * XOFF2 + */ +#define SC16IS7XX_EFR_SWFLOW1_BIT (1 << 1) /* SWFLOW bit 2 */ +#define SC16IS7XX_EFR_SWFLOW0_BIT (1 << 0) /* SWFLOW bit 3 + * + * SWFLOW bits 3 & 2 table: + * 00 -> no received flow + * control + * 01 -> receiver compares + * XON2 and XOFF2 + * 10 -> receiver compares + * XON1 and XOFF1 + * 11 -> receiver compares + * XON1, XON2, XOFF1 and + * XOFF2 + */ +#define SC16IS7XX_EFR_FLOWCTRL_BITS (SC16IS7XX_EFR_AUTORTS_BIT | \ + SC16IS7XX_EFR_AUTOCTS_BIT | \ + SC16IS7XX_EFR_XOFF2_DETECT_BIT | \ + SC16IS7XX_EFR_SWFLOW3_BIT | \ + SC16IS7XX_EFR_SWFLOW2_BIT | \ + SC16IS7XX_EFR_SWFLOW1_BIT | \ + SC16IS7XX_EFR_SWFLOW0_BIT) + + +/* Misc definitions */ +#define SC16IS7XX_SPI_READ_BIT BIT(7) +#define SC16IS7XX_FIFO_SIZE (64) +#define SC16IS7XX_GPIOS_PER_BANK 4 + +struct sc16is7xx_devtype { + char name[10]; + int nr_gpio; + int nr_uart; +}; + +#define SC16IS7XX_RECONF_MD (1 << 0) +#define SC16IS7XX_RECONF_IER (1 << 1) +#define SC16IS7XX_RECONF_RS485 (1 << 2) + +struct sc16is7xx_one_config { + unsigned int flags; + u8 ier_mask; + u8 ier_val; +}; + +struct sc16is7xx_one { + struct uart_port port; + struct regmap *regmap; + struct mutex efr_lock; /* EFR registers access */ + struct kthread_work tx_work; + struct kthread_work reg_work; + struct kthread_delayed_work ms_work; + struct sc16is7xx_one_config config; + bool irda_mode; + unsigned int old_mctrl; +}; + +struct sc16is7xx_port { + const struct sc16is7xx_devtype *devtype; + struct clk *clk; +#ifdef CONFIG_GPIOLIB + struct gpio_chip gpio; + unsigned long gpio_valid_mask; +#endif + u8 mctrl_mask; + unsigned char buf[SC16IS7XX_FIFO_SIZE]; + struct kthread_worker kworker; + struct task_struct *kworker_task; + struct sc16is7xx_one p[]; +}; + +static unsigned long sc16is7xx_lines; + +static struct uart_driver sc16is7xx_uart = { + .owner = THIS_MODULE, + .dev_name = "ttySC", + .nr = SC16IS7XX_MAX_DEVS, +}; + +static void sc16is7xx_ier_set(struct uart_port *port, u8 bit); +static void sc16is7xx_stop_tx(struct uart_port *port); + +#define to_sc16is7xx_port(p,e) ((container_of((p), struct sc16is7xx_port, e))) +#define to_sc16is7xx_one(p,e) ((container_of((p), struct sc16is7xx_one, e))) + +static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg) +{ + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + unsigned int val = 0; + + regmap_read(one->regmap, reg, &val); + + return val; +} + +static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val) +{ + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + regmap_write(one->regmap, reg, val); +} + +static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + regmap_noinc_read(one->regmap, SC16IS7XX_RHR_REG, s->buf, rxlen); +} + +static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + /* + * Don't send zero-length data, at least on SPI it confuses the chip + * delivering wrong TXLVL data. + */ + if (unlikely(!to_send)) + return; + + regmap_noinc_write(one->regmap, SC16IS7XX_THR_REG, s->buf, to_send); +} + +static void sc16is7xx_port_update(struct uart_port *port, u8 reg, + u8 mask, u8 val) +{ + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + regmap_update_bits(one->regmap, reg, mask, val); +} + +static void sc16is7xx_power(struct uart_port *port, int on) +{ + sc16is7xx_port_update(port, SC16IS7XX_IER_REG, + SC16IS7XX_IER_SLEEP_BIT, + on ? 0 : SC16IS7XX_IER_SLEEP_BIT); +} + +static const struct sc16is7xx_devtype sc16is74x_devtype = { + .name = "SC16IS74X", + .nr_gpio = 0, + .nr_uart = 1, +}; + +static const struct sc16is7xx_devtype sc16is750_devtype = { + .name = "SC16IS750", + .nr_gpio = 8, + .nr_uart = 1, +}; + +static const struct sc16is7xx_devtype sc16is752_devtype = { + .name = "SC16IS752", + .nr_gpio = 8, + .nr_uart = 2, +}; + +static const struct sc16is7xx_devtype sc16is760_devtype = { + .name = "SC16IS760", + .nr_gpio = 8, + .nr_uart = 1, +}; + +static const struct sc16is7xx_devtype sc16is762_devtype = { + .name = "SC16IS762", + .nr_gpio = 8, + .nr_uart = 2, +}; + +static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg) +{ + switch (reg) { + case SC16IS7XX_RHR_REG: + case SC16IS7XX_IIR_REG: + case SC16IS7XX_LSR_REG: + case SC16IS7XX_MSR_REG: + case SC16IS7XX_TXLVL_REG: + case SC16IS7XX_RXLVL_REG: + case SC16IS7XX_IOSTATE_REG: + case SC16IS7XX_IOCONTROL_REG: + return true; + default: + break; + } + + return false; +} + +static bool sc16is7xx_regmap_precious(struct device *dev, unsigned int reg) +{ + switch (reg) { + case SC16IS7XX_RHR_REG: + return true; + default: + break; + } + + return false; +} + +static bool sc16is7xx_regmap_noinc(struct device *dev, unsigned int reg) +{ + return reg == SC16IS7XX_RHR_REG; +} + +static int sc16is7xx_set_baud(struct uart_port *port, int baud) +{ + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + u8 lcr; + u8 prescaler = 0; + unsigned long clk = port->uartclk, div = clk / 16 / baud; + + if (div > 0xffff) { + prescaler = SC16IS7XX_MCR_CLKSEL_BIT; + div /= 4; + } + + /* In an amazing feat of design, the Enhanced Features Register shares + * the address of the Interrupt Identification Register, and is + * switched in by writing a magic value (0xbf) to the Line Control + * Register. Any interrupt firing during this time will see the EFR + * where it expects the IIR to be, leading to "Unexpected interrupt" + * messages. + * + * Prevent this possibility by claiming a mutex while accessing the + * EFR, and claiming the same mutex from within the interrupt handler. + * This is similar to disabling the interrupt, but that doesn't work + * because the bulk of the interrupt processing is run as a workqueue + * job in thread context. + */ + mutex_lock(&one->efr_lock); + + lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG); + + /* Open the LCR divisors for configuration */ + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, + SC16IS7XX_LCR_CONF_MODE_B); + + /* Enable enhanced features */ + regcache_cache_bypass(one->regmap, true); + sc16is7xx_port_update(port, SC16IS7XX_EFR_REG, + SC16IS7XX_EFR_ENABLE_BIT, + SC16IS7XX_EFR_ENABLE_BIT); + + regcache_cache_bypass(one->regmap, false); + + /* Put LCR back to the normal mode */ + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); + + mutex_unlock(&one->efr_lock); + + sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, + SC16IS7XX_MCR_CLKSEL_BIT, + prescaler); + + /* Open the LCR divisors for configuration */ + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, + SC16IS7XX_LCR_CONF_MODE_A); + + /* Write the new divisor */ + regcache_cache_bypass(one->regmap, true); + sc16is7xx_port_write(port, SC16IS7XX_DLH_REG, div / 256); + sc16is7xx_port_write(port, SC16IS7XX_DLL_REG, div % 256); + regcache_cache_bypass(one->regmap, false); + + /* Put LCR back to the normal mode */ + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); + + return DIV_ROUND_CLOSEST(clk / 16, div); +} + +static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, + unsigned int iir) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + unsigned int lsr = 0, ch, flag, bytes_read, i; + bool read_lsr = (iir == SC16IS7XX_IIR_RLSE_SRC) ? true : false; + + if (unlikely(rxlen >= sizeof(s->buf))) { + dev_warn_ratelimited(port->dev, + "ttySC%i: Possible RX FIFO overrun: %d\n", + port->line, rxlen); + port->icount.buf_overrun++; + /* Ensure sanity of RX level */ + rxlen = sizeof(s->buf); + } + + while (rxlen) { + /* Only read lsr if there are possible errors in FIFO */ + if (read_lsr) { + lsr = sc16is7xx_port_read(port, SC16IS7XX_LSR_REG); + if (!(lsr & SC16IS7XX_LSR_FIFOE_BIT)) + read_lsr = false; /* No errors left in FIFO */ + } else + lsr = 0; + + if (read_lsr) { + s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); + bytes_read = 1; + } else { + sc16is7xx_fifo_read(port, rxlen); + bytes_read = rxlen; + } + + lsr &= SC16IS7XX_LSR_BRK_ERROR_MASK; + + port->icount.rx++; + flag = TTY_NORMAL; + + if (unlikely(lsr)) { + if (lsr & SC16IS7XX_LSR_BI_BIT) { + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } else if (lsr & SC16IS7XX_LSR_PE_BIT) + port->icount.parity++; + else if (lsr & SC16IS7XX_LSR_FE_BIT) + port->icount.frame++; + else if (lsr & SC16IS7XX_LSR_OE_BIT) + port->icount.overrun++; + + lsr &= port->read_status_mask; + if (lsr & SC16IS7XX_LSR_BI_BIT) + flag = TTY_BREAK; + else if (lsr & SC16IS7XX_LSR_PE_BIT) + flag = TTY_PARITY; + else if (lsr & SC16IS7XX_LSR_FE_BIT) + flag = TTY_FRAME; + else if (lsr & SC16IS7XX_LSR_OE_BIT) + flag = TTY_OVERRUN; + } + + for (i = 0; i < bytes_read; ++i) { + ch = s->buf[i]; + if (uart_handle_sysrq_char(port, ch)) + continue; + + if (lsr & port->ignore_status_mask) + continue; + + uart_insert_char(port, lsr, SC16IS7XX_LSR_OE_BIT, ch, + flag); + } + rxlen -= bytes_read; + } + + tty_flip_buffer_push(&port->state->port); +} + +static void sc16is7xx_handle_tx(struct uart_port *port) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + struct circ_buf *xmit = &port->state->xmit; + unsigned int txlen, to_send, i; + unsigned long flags; + + if (unlikely(port->x_char)) { + sc16is7xx_port_write(port, SC16IS7XX_THR_REG, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + uart_port_lock_irqsave(port, &flags); + sc16is7xx_stop_tx(port); + uart_port_unlock_irqrestore(port, flags); + return; + } + + /* Get length of data pending in circular buffer */ + to_send = uart_circ_chars_pending(xmit); + if (likely(to_send)) { + /* Limit to size of TX FIFO */ + txlen = sc16is7xx_port_read(port, SC16IS7XX_TXLVL_REG); + if (txlen > SC16IS7XX_FIFO_SIZE) { + dev_err_ratelimited(port->dev, + "chip reports %d free bytes in TX fifo, but it only has %d", + txlen, SC16IS7XX_FIFO_SIZE); + txlen = 0; + } + to_send = (to_send > txlen) ? txlen : to_send; + + /* Add data to send */ + port->icount.tx += to_send; + + /* Convert to linear buffer */ + for (i = 0; i < to_send; ++i) { + s->buf[i] = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + } + + sc16is7xx_fifo_write(port, to_send); + } + + uart_port_lock_irqsave(port, &flags); + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + sc16is7xx_stop_tx(port); + else + sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT); + uart_port_unlock_irqrestore(port, flags); +} + +static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port) +{ + u8 msr = sc16is7xx_port_read(port, SC16IS7XX_MSR_REG); + unsigned int mctrl = 0; + + mctrl |= (msr & SC16IS7XX_MSR_CTS_BIT) ? TIOCM_CTS : 0; + mctrl |= (msr & SC16IS7XX_MSR_DSR_BIT) ? TIOCM_DSR : 0; + mctrl |= (msr & SC16IS7XX_MSR_CD_BIT) ? TIOCM_CAR : 0; + mctrl |= (msr & SC16IS7XX_MSR_RI_BIT) ? TIOCM_RNG : 0; + return mctrl; +} + +static void sc16is7xx_update_mlines(struct sc16is7xx_one *one) +{ + struct uart_port *port = &one->port; + unsigned long flags; + unsigned int status, changed; + + lockdep_assert_held_once(&one->efr_lock); + + status = sc16is7xx_get_hwmctrl(port); + changed = status ^ one->old_mctrl; + + if (changed == 0) + return; + + one->old_mctrl = status; + + uart_port_lock_irqsave(port, &flags); + if ((changed & TIOCM_RNG) && (status & TIOCM_RNG)) + port->icount.rng++; + if (changed & TIOCM_DSR) + port->icount.dsr++; + if (changed & TIOCM_CAR) + uart_handle_dcd_change(port, status & TIOCM_CAR); + if (changed & TIOCM_CTS) + uart_handle_cts_change(port, status & TIOCM_CTS); + + wake_up_interruptible(&port->state->port.delta_msr_wait); + uart_port_unlock_irqrestore(port, flags); +} + +static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno) +{ + bool rc = true; + unsigned int iir, rxlen; + struct uart_port *port = &s->p[portno].port; + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + mutex_lock(&one->efr_lock); + + iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG); + if (iir & SC16IS7XX_IIR_NO_INT_BIT) { + rc = false; + goto out_port_irq; + } + + iir &= SC16IS7XX_IIR_ID_MASK; + + switch (iir) { + case SC16IS7XX_IIR_RDI_SRC: + case SC16IS7XX_IIR_RLSE_SRC: + case SC16IS7XX_IIR_RTOI_SRC: + case SC16IS7XX_IIR_XOFFI_SRC: + rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG); + + /* + * There is a silicon bug that makes the chip report a + * time-out interrupt but no data in the FIFO. This is + * described in errata section 18.1.4. + * + * When this happens, read one byte from the FIFO to + * clear the interrupt. + */ + if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen) + rxlen = 1; + + if (rxlen) + sc16is7xx_handle_rx(port, rxlen, iir); + break; + /* CTSRTS interrupt comes only when CTS goes inactive */ + case SC16IS7XX_IIR_CTSRTS_SRC: + case SC16IS7XX_IIR_MSI_SRC: + sc16is7xx_update_mlines(one); + break; + case SC16IS7XX_IIR_THRI_SRC: + sc16is7xx_handle_tx(port); + break; + default: + dev_err_ratelimited(port->dev, + "ttySC%i: Unexpected interrupt: %x", + port->line, iir); + break; + } + +out_port_irq: + mutex_unlock(&one->efr_lock); + + return rc; +} + +static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) +{ + bool keep_polling; + + struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id; + + do { + int i; + + keep_polling = false; + + for (i = 0; i < s->devtype->nr_uart; ++i) + keep_polling |= sc16is7xx_port_irq(s, i); + } while (keep_polling); + + return IRQ_HANDLED; +} + +static void sc16is7xx_tx_proc(struct kthread_work *ws) +{ + struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port); + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + if ((port->rs485.flags & SER_RS485_ENABLED) && + (port->rs485.delay_rts_before_send > 0)) + msleep(port->rs485.delay_rts_before_send); + + mutex_lock(&one->efr_lock); + sc16is7xx_handle_tx(port); + mutex_unlock(&one->efr_lock); +} + +static void sc16is7xx_reconf_rs485(struct uart_port *port) +{ + const u32 mask = SC16IS7XX_EFCR_AUTO_RS485_BIT | + SC16IS7XX_EFCR_RTS_INVERT_BIT; + u32 efcr = 0; + struct serial_rs485 *rs485 = &port->rs485; + unsigned long irqflags; + + uart_port_lock_irqsave(port, &irqflags); + if (rs485->flags & SER_RS485_ENABLED) { + efcr |= SC16IS7XX_EFCR_AUTO_RS485_BIT; + + if (rs485->flags & SER_RS485_RTS_AFTER_SEND) + efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT; + } + uart_port_unlock_irqrestore(port, irqflags); + + sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr); +} + +static void sc16is7xx_reg_proc(struct kthread_work *ws) +{ + struct sc16is7xx_one *one = to_sc16is7xx_one(ws, reg_work); + struct sc16is7xx_one_config config; + unsigned long irqflags; + + uart_port_lock_irqsave(&one->port, &irqflags); + config = one->config; + memset(&one->config, 0, sizeof(one->config)); + uart_port_unlock_irqrestore(&one->port, irqflags); + + if (config.flags & SC16IS7XX_RECONF_MD) { + u8 mcr = 0; + + /* Device ignores RTS setting when hardware flow is enabled */ + if (one->port.mctrl & TIOCM_RTS) + mcr |= SC16IS7XX_MCR_RTS_BIT; + + if (one->port.mctrl & TIOCM_DTR) + mcr |= SC16IS7XX_MCR_DTR_BIT; + + if (one->port.mctrl & TIOCM_LOOP) + mcr |= SC16IS7XX_MCR_LOOP_BIT; + sc16is7xx_port_update(&one->port, SC16IS7XX_MCR_REG, + SC16IS7XX_MCR_RTS_BIT | + SC16IS7XX_MCR_DTR_BIT | + SC16IS7XX_MCR_LOOP_BIT, + mcr); + } + + if (config.flags & SC16IS7XX_RECONF_IER) + sc16is7xx_port_update(&one->port, SC16IS7XX_IER_REG, + config.ier_mask, config.ier_val); + + if (config.flags & SC16IS7XX_RECONF_RS485) + sc16is7xx_reconf_rs485(&one->port); +} + +static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + lockdep_assert_held_once(&port->lock); + + one->config.flags |= SC16IS7XX_RECONF_IER; + one->config.ier_mask |= bit; + one->config.ier_val &= ~bit; + kthread_queue_work(&s->kworker, &one->reg_work); +} + +static void sc16is7xx_ier_set(struct uart_port *port, u8 bit) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + lockdep_assert_held_once(&port->lock); + + one->config.flags |= SC16IS7XX_RECONF_IER; + one->config.ier_mask |= bit; + one->config.ier_val |= bit; + kthread_queue_work(&s->kworker, &one->reg_work); +} + +static void sc16is7xx_stop_tx(struct uart_port *port) +{ + sc16is7xx_ier_clear(port, SC16IS7XX_IER_THRI_BIT); +} + +static void sc16is7xx_stop_rx(struct uart_port *port) +{ + sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT); +} + +static void sc16is7xx_ms_proc(struct kthread_work *ws) +{ + struct sc16is7xx_one *one = to_sc16is7xx_one(ws, ms_work.work); + struct sc16is7xx_port *s = dev_get_drvdata(one->port.dev); + + if (one->port.state) { + mutex_lock(&one->efr_lock); + sc16is7xx_update_mlines(one); + mutex_unlock(&one->efr_lock); + + kthread_queue_delayed_work(&s->kworker, &one->ms_work, HZ); + } +} + +static void sc16is7xx_enable_ms(struct uart_port *port) +{ + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + + lockdep_assert_held_once(&port->lock); + + kthread_queue_delayed_work(&s->kworker, &one->ms_work, 0); +} + +static void sc16is7xx_start_tx(struct uart_port *port) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + kthread_queue_work(&s->kworker, &one->tx_work); +} + +static void sc16is7xx_throttle(struct uart_port *port) +{ + unsigned long flags; + + /* + * Hardware flow control is enabled and thus the device ignores RTS + * value set in MCR register. Stop reading data from RX FIFO so the + * AutoRTS feature will de-activate RTS output. + */ + uart_port_lock_irqsave(port, &flags); + sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT); + uart_port_unlock_irqrestore(port, flags); +} + +static void sc16is7xx_unthrottle(struct uart_port *port) +{ + unsigned long flags; + + uart_port_lock_irqsave(port, &flags); + sc16is7xx_ier_set(port, SC16IS7XX_IER_RDI_BIT); + uart_port_unlock_irqrestore(port, flags); +} + +static unsigned int sc16is7xx_tx_empty(struct uart_port *port) +{ + unsigned int lsr; + + lsr = sc16is7xx_port_read(port, SC16IS7XX_LSR_REG); + + return (lsr & SC16IS7XX_LSR_TEMT_BIT) ? TIOCSER_TEMT : 0; +} + +static unsigned int sc16is7xx_get_mctrl(struct uart_port *port) +{ + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + /* Called with port lock taken so we can only return cached value */ + return one->old_mctrl; +} + +static void sc16is7xx_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + one->config.flags |= SC16IS7XX_RECONF_MD; + kthread_queue_work(&s->kworker, &one->reg_work); +} + +static void sc16is7xx_break_ctl(struct uart_port *port, int break_state) +{ + sc16is7xx_port_update(port, SC16IS7XX_LCR_REG, + SC16IS7XX_LCR_TXBREAK_BIT, + break_state ? SC16IS7XX_LCR_TXBREAK_BIT : 0); +} + +static void sc16is7xx_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + unsigned int lcr, flow = 0; + int baud; + unsigned long flags; + + kthread_cancel_delayed_work_sync(&one->ms_work); + + /* Mask termios capabilities we don't support */ + termios->c_cflag &= ~CMSPAR; + + /* Word size */ + switch (termios->c_cflag & CSIZE) { + case CS5: + lcr = SC16IS7XX_LCR_WORD_LEN_5; + break; + case CS6: + lcr = SC16IS7XX_LCR_WORD_LEN_6; + break; + case CS7: + lcr = SC16IS7XX_LCR_WORD_LEN_7; + break; + case CS8: + lcr = SC16IS7XX_LCR_WORD_LEN_8; + break; + default: + lcr = SC16IS7XX_LCR_WORD_LEN_8; + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + break; + } + + /* Parity */ + if (termios->c_cflag & PARENB) { + lcr |= SC16IS7XX_LCR_PARITY_BIT; + if (!(termios->c_cflag & PARODD)) + lcr |= SC16IS7XX_LCR_EVENPARITY_BIT; + } + + /* Stop bits */ + if (termios->c_cflag & CSTOPB) + lcr |= SC16IS7XX_LCR_STOPLEN_BIT; /* 2 stops */ + + /* Set read status mask */ + port->read_status_mask = SC16IS7XX_LSR_OE_BIT; + if (termios->c_iflag & INPCK) + port->read_status_mask |= SC16IS7XX_LSR_PE_BIT | + SC16IS7XX_LSR_FE_BIT; + if (termios->c_iflag & (BRKINT | PARMRK)) + port->read_status_mask |= SC16IS7XX_LSR_BI_BIT; + + /* Set status ignore mask */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNBRK) + port->ignore_status_mask |= SC16IS7XX_LSR_BI_BIT; + if (!(termios->c_cflag & CREAD)) + port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK; + + /* As above, claim the mutex while accessing the EFR. */ + mutex_lock(&one->efr_lock); + + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, + SC16IS7XX_LCR_CONF_MODE_B); + + /* Configure flow control */ + regcache_cache_bypass(one->regmap, true); + sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]); + sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]); + + port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + if (termios->c_cflag & CRTSCTS) { + flow |= SC16IS7XX_EFR_AUTOCTS_BIT | + SC16IS7XX_EFR_AUTORTS_BIT; + port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; + } + if (termios->c_iflag & IXON) + flow |= SC16IS7XX_EFR_SWFLOW3_BIT; + if (termios->c_iflag & IXOFF) + flow |= SC16IS7XX_EFR_SWFLOW1_BIT; + + sc16is7xx_port_update(port, + SC16IS7XX_EFR_REG, + SC16IS7XX_EFR_FLOWCTRL_BITS, + flow); + regcache_cache_bypass(one->regmap, false); + + /* Update LCR register */ + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); + + mutex_unlock(&one->efr_lock); + + /* Get baud rate generator configuration */ + baud = uart_get_baud_rate(port, termios, old, + port->uartclk / 16 / 4 / 0xffff, + port->uartclk / 16); + + /* Setup baudrate generator */ + baud = sc16is7xx_set_baud(port, baud); + + uart_port_lock_irqsave(port, &flags); + + /* Update timeout according to new baud rate */ + uart_update_timeout(port, termios->c_cflag, baud); + + if (UART_ENABLE_MS(port, termios->c_cflag)) + sc16is7xx_enable_ms(port); + + uart_port_unlock_irqrestore(port, flags); +} + +static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + if (rs485->flags & SER_RS485_ENABLED) { + /* + * RTS signal is handled by HW, it's timing can't be influenced. + * However, it's sometimes useful to delay TX even without RTS + * control therefore we try to handle .delay_rts_before_send. + */ + if (rs485->delay_rts_after_send) + return -EINVAL; + } + + one->config.flags |= SC16IS7XX_RECONF_RS485; + kthread_queue_work(&s->kworker, &one->reg_work); + + return 0; +} + +static int sc16is7xx_startup(struct uart_port *port) +{ + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + unsigned int val; + unsigned long flags; + + sc16is7xx_power(port, 1); + + /* Reset FIFOs*/ + val = SC16IS7XX_FCR_RXRESET_BIT | SC16IS7XX_FCR_TXRESET_BIT; + sc16is7xx_port_write(port, SC16IS7XX_FCR_REG, val); + udelay(5); + sc16is7xx_port_write(port, SC16IS7XX_FCR_REG, + SC16IS7XX_FCR_FIFO_BIT); + + /* Enable EFR */ + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, + SC16IS7XX_LCR_CONF_MODE_B); + + regcache_cache_bypass(one->regmap, true); + + /* Enable write access to enhanced features and internal clock div */ + sc16is7xx_port_update(port, SC16IS7XX_EFR_REG, + SC16IS7XX_EFR_ENABLE_BIT, + SC16IS7XX_EFR_ENABLE_BIT); + + /* Enable TCR/TLR */ + sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, + SC16IS7XX_MCR_TCRTLR_BIT, + SC16IS7XX_MCR_TCRTLR_BIT); + + /* Configure flow control levels */ + /* Flow control halt level 48, resume level 24 */ + sc16is7xx_port_write(port, SC16IS7XX_TCR_REG, + SC16IS7XX_TCR_RX_RESUME(24) | + SC16IS7XX_TCR_RX_HALT(48)); + + regcache_cache_bypass(one->regmap, false); + + /* Now, initialize the UART */ + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8); + + /* Enable IrDA mode if requested in DT */ + /* This bit must be written with LCR[7] = 0 */ + sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, + SC16IS7XX_MCR_IRDA_BIT, + one->irda_mode ? + SC16IS7XX_MCR_IRDA_BIT : 0); + + /* Enable the Rx and Tx FIFO */ + sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, + SC16IS7XX_EFCR_RXDISABLE_BIT | + SC16IS7XX_EFCR_TXDISABLE_BIT, + 0); + + /* Enable RX, CTS change and modem lines interrupts */ + val = SC16IS7XX_IER_RDI_BIT | SC16IS7XX_IER_CTSI_BIT | + SC16IS7XX_IER_MSI_BIT; + sc16is7xx_port_write(port, SC16IS7XX_IER_REG, val); + + /* Enable modem status polling */ + uart_port_lock_irqsave(port, &flags); + sc16is7xx_enable_ms(port); + uart_port_unlock_irqrestore(port, flags); + + return 0; +} + +static void sc16is7xx_shutdown(struct uart_port *port) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + + kthread_cancel_delayed_work_sync(&one->ms_work); + + /* Disable all interrupts */ + sc16is7xx_port_write(port, SC16IS7XX_IER_REG, 0); + /* Disable TX/RX */ + sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, + SC16IS7XX_EFCR_RXDISABLE_BIT | + SC16IS7XX_EFCR_TXDISABLE_BIT, + SC16IS7XX_EFCR_RXDISABLE_BIT | + SC16IS7XX_EFCR_TXDISABLE_BIT); + + sc16is7xx_power(port, 0); + + kthread_flush_worker(&s->kworker); +} + +static const char *sc16is7xx_type(struct uart_port *port) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + + return (port->type == PORT_SC16IS7XX) ? s->devtype->name : NULL; +} + +static int sc16is7xx_request_port(struct uart_port *port) +{ + /* Do nothing */ + return 0; +} + +static void sc16is7xx_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_SC16IS7XX; +} + +static int sc16is7xx_verify_port(struct uart_port *port, + struct serial_struct *s) +{ + if ((s->type != PORT_UNKNOWN) && (s->type != PORT_SC16IS7XX)) + return -EINVAL; + if (s->irq != port->irq) + return -EINVAL; + + return 0; +} + +static void sc16is7xx_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + sc16is7xx_power(port, (state == UART_PM_STATE_ON) ? 1 : 0); +} + +static void sc16is7xx_null_void(struct uart_port *port) +{ + /* Do nothing */ +} + +static const struct uart_ops sc16is7xx_ops = { + .tx_empty = sc16is7xx_tx_empty, + .set_mctrl = sc16is7xx_set_mctrl, + .get_mctrl = sc16is7xx_get_mctrl, + .stop_tx = sc16is7xx_stop_tx, + .start_tx = sc16is7xx_start_tx, + .throttle = sc16is7xx_throttle, + .unthrottle = sc16is7xx_unthrottle, + .stop_rx = sc16is7xx_stop_rx, + .enable_ms = sc16is7xx_enable_ms, + .break_ctl = sc16is7xx_break_ctl, + .startup = sc16is7xx_startup, + .shutdown = sc16is7xx_shutdown, + .set_termios = sc16is7xx_set_termios, + .type = sc16is7xx_type, + .request_port = sc16is7xx_request_port, + .release_port = sc16is7xx_null_void, + .config_port = sc16is7xx_config_port, + .verify_port = sc16is7xx_verify_port, + .pm = sc16is7xx_pm, +}; + +#ifdef CONFIG_GPIOLIB +static int sc16is7xx_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + unsigned int val; + struct sc16is7xx_port *s = gpiochip_get_data(chip); + struct uart_port *port = &s->p[0].port; + + val = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG); + + return !!(val & BIT(offset)); +} + +static void sc16is7xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val) +{ + struct sc16is7xx_port *s = gpiochip_get_data(chip); + struct uart_port *port = &s->p[0].port; + + sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset), + val ? BIT(offset) : 0); +} + +static int sc16is7xx_gpio_direction_input(struct gpio_chip *chip, + unsigned offset) +{ + struct sc16is7xx_port *s = gpiochip_get_data(chip); + struct uart_port *port = &s->p[0].port; + + sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset), 0); + + return 0; +} + +static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int val) +{ + struct sc16is7xx_port *s = gpiochip_get_data(chip); + struct uart_port *port = &s->p[0].port; + u8 state = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG); + + if (val) + state |= BIT(offset); + else + state &= ~BIT(offset); + + /* + * If we write IOSTATE first, and then IODIR, the output value is not + * transferred to the corresponding I/O pin. + * The datasheet states that each register bit will be transferred to + * the corresponding I/O pin programmed as output when writing to + * IOSTATE. Therefore, configure direction first with IODIR, and then + * set value after with IOSTATE. + */ + sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset), + BIT(offset)); + sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state); + + return 0; +} + +static int sc16is7xx_gpio_init_valid_mask(struct gpio_chip *chip, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct sc16is7xx_port *s = gpiochip_get_data(chip); + + *valid_mask = s->gpio_valid_mask; + + return 0; +} + +static int sc16is7xx_setup_gpio_chip(struct sc16is7xx_port *s) +{ + struct device *dev = s->p[0].port.dev; + + if (!s->devtype->nr_gpio) + return 0; + + switch (s->mctrl_mask) { + case 0: + s->gpio_valid_mask = GENMASK(7, 0); + break; + case SC16IS7XX_IOCONTROL_MODEM_A_BIT: + s->gpio_valid_mask = GENMASK(3, 0); + break; + case SC16IS7XX_IOCONTROL_MODEM_B_BIT: + s->gpio_valid_mask = GENMASK(7, 4); + break; + default: + break; + } + + if (s->gpio_valid_mask == 0) + return 0; + + s->gpio.owner = THIS_MODULE; + s->gpio.parent = dev; + s->gpio.label = dev_name(dev); + s->gpio.init_valid_mask = sc16is7xx_gpio_init_valid_mask; + s->gpio.direction_input = sc16is7xx_gpio_direction_input; + s->gpio.get = sc16is7xx_gpio_get; + s->gpio.direction_output = sc16is7xx_gpio_direction_output; + s->gpio.set = sc16is7xx_gpio_set; + s->gpio.base = -1; + s->gpio.ngpio = s->devtype->nr_gpio; + s->gpio.can_sleep = 1; + + return gpiochip_add_data(&s->gpio, s); +} +#endif + +/* + * Configure ports designated to operate as modem control lines. + */ +static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s, + struct regmap *regmap) +{ + int i; + int ret; + int count; + u32 mctrl_port[2]; + struct device *dev = s->p[0].port.dev; + + count = device_property_count_u32(dev, "nxp,modem-control-line-ports"); + if (count < 0 || count > ARRAY_SIZE(mctrl_port)) + return 0; + + ret = device_property_read_u32_array(dev, "nxp,modem-control-line-ports", + mctrl_port, count); + if (ret) + return ret; + + s->mctrl_mask = 0; + + for (i = 0; i < count; i++) { + /* Use GPIO lines as modem control lines */ + if (mctrl_port[i] == 0) + s->mctrl_mask |= SC16IS7XX_IOCONTROL_MODEM_A_BIT; + else if (mctrl_port[i] == 1) + s->mctrl_mask |= SC16IS7XX_IOCONTROL_MODEM_B_BIT; + } + + if (s->mctrl_mask) + regmap_update_bits( + regmap, + SC16IS7XX_IOCONTROL_REG, + SC16IS7XX_IOCONTROL_MODEM_A_BIT | + SC16IS7XX_IOCONTROL_MODEM_B_BIT, s->mctrl_mask); + + return 0; +} + +static const struct serial_rs485 sc16is7xx_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND, + .delay_rts_before_send = 1, + .delay_rts_after_send = 1, /* Not supported but keep returning -EINVAL */ +}; + +static int sc16is7xx_probe(struct device *dev, + const struct sc16is7xx_devtype *devtype, + struct regmap *regmaps[], int irq) +{ + unsigned long freq = 0, *pfreq = dev_get_platdata(dev); + unsigned int val; + u32 uartclk = 0; + int i, ret; + struct sc16is7xx_port *s; + + for (i = 0; i < devtype->nr_uart; i++) + if (IS_ERR(regmaps[i])) + return PTR_ERR(regmaps[i]); + + /* + * This device does not have an identification register that would + * tell us if we are really connected to the correct device. + * The best we can do is to check if communication is at all possible. + * + * Note: regmap[0] is used in the probe function to access registers + * common to all channels/ports, as it is guaranteed to be present on + * all variants. + */ + ret = regmap_read(regmaps[0], SC16IS7XX_LSR_REG, &val); + if (ret < 0) + return -EPROBE_DEFER; + + /* Alloc port structure */ + s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL); + if (!s) { + dev_err(dev, "Error allocating port structure\n"); + return -ENOMEM; + } + + /* Always ask for fixed clock rate from a property. */ + device_property_read_u32(dev, "clock-frequency", &uartclk); + + s->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(s->clk)) + return PTR_ERR(s->clk); + + ret = clk_prepare_enable(s->clk); + if (ret) + return ret; + + freq = clk_get_rate(s->clk); + if (freq == 0) { + if (uartclk) + freq = uartclk; + if (pfreq) + freq = *pfreq; + if (freq) + dev_dbg(dev, "Clock frequency: %luHz\n", freq); + else + return -EINVAL; + } + + s->devtype = devtype; + dev_set_drvdata(dev, s); + + kthread_init_worker(&s->kworker); + s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker, + "sc16is7xx"); + if (IS_ERR(s->kworker_task)) { + ret = PTR_ERR(s->kworker_task); + goto out_clk; + } + sched_set_fifo(s->kworker_task); + + /* reset device, purging any pending irq / data */ + regmap_write(regmaps[0], SC16IS7XX_IOCONTROL_REG, + SC16IS7XX_IOCONTROL_SRESET_BIT); + + for (i = 0; i < devtype->nr_uart; ++i) { + s->p[i].port.line = find_first_zero_bit(&sc16is7xx_lines, + SC16IS7XX_MAX_DEVS); + if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) { + ret = -ERANGE; + goto out_ports; + } + + /* Initialize port data */ + s->p[i].port.dev = dev; + s->p[i].port.irq = irq; + s->p[i].port.type = PORT_SC16IS7XX; + s->p[i].port.fifosize = SC16IS7XX_FIFO_SIZE; + s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY; + s->p[i].port.iobase = i; + /* + * Use all ones as membase to make sure uart_configure_port() in + * serial_core.c does not abort for SPI/I2C devices where the + * membase address is not applicable. + */ + s->p[i].port.membase = (void __iomem *)~0; + s->p[i].port.iotype = UPIO_PORT; + s->p[i].port.uartclk = freq; + s->p[i].port.rs485_config = sc16is7xx_config_rs485; + s->p[i].port.rs485_supported = sc16is7xx_rs485_supported; + s->p[i].port.ops = &sc16is7xx_ops; + s->p[i].old_mctrl = 0; + s->p[i].regmap = regmaps[i]; + + mutex_init(&s->p[i].efr_lock); + + ret = uart_get_rs485_mode(&s->p[i].port); + if (ret) + goto out_ports; + + /* Disable all interrupts */ + sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_IER_REG, 0); + /* Disable TX/RX */ + sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFCR_REG, + SC16IS7XX_EFCR_RXDISABLE_BIT | + SC16IS7XX_EFCR_TXDISABLE_BIT); + + /* Initialize kthread work structs */ + kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc); + kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc); + kthread_init_delayed_work(&s->p[i].ms_work, sc16is7xx_ms_proc); + + /* Register port */ + ret = uart_add_one_port(&sc16is7xx_uart, &s->p[i].port); + if (ret) + goto out_ports; + + set_bit(s->p[i].port.line, &sc16is7xx_lines); + + /* Enable EFR */ + sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG, + SC16IS7XX_LCR_CONF_MODE_B); + + regcache_cache_bypass(regmaps[i], true); + + /* Enable write access to enhanced features */ + sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFR_REG, + SC16IS7XX_EFR_ENABLE_BIT); + + regcache_cache_bypass(regmaps[i], false); + + /* Restore access to general registers */ + sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG, 0x00); + + /* Go to suspend mode */ + sc16is7xx_power(&s->p[i].port, 0); + } + + if (dev->of_node) { + struct property *prop; + const __be32 *p; + u32 u; + + of_property_for_each_u32(dev->of_node, "irda-mode-ports", + prop, p, u) + if (u < devtype->nr_uart) + s->p[u].irda_mode = true; + } + + ret = sc16is7xx_setup_mctrl_ports(s, regmaps[0]); + if (ret) + goto out_ports; + +#ifdef CONFIG_GPIOLIB + ret = sc16is7xx_setup_gpio_chip(s); + if (ret) + goto out_ports; +#endif + + /* + * Setup interrupt. We first try to acquire the IRQ line as level IRQ. + * If that succeeds, we can allow sharing the interrupt as well. + * In case the interrupt controller doesn't support that, we fall + * back to a non-shared falling-edge trigger. + */ + ret = devm_request_threaded_irq(dev, irq, NULL, sc16is7xx_irq, + IRQF_TRIGGER_LOW | IRQF_SHARED | + IRQF_ONESHOT, + dev_name(dev), s); + if (!ret) + return 0; + + ret = devm_request_threaded_irq(dev, irq, NULL, sc16is7xx_irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + dev_name(dev), s); + if (!ret) + return 0; + +#ifdef CONFIG_GPIOLIB + if (s->gpio_valid_mask) + gpiochip_remove(&s->gpio); +#endif + +out_ports: + for (i = 0; i < devtype->nr_uart; i++) + if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines)) + uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port); + + kthread_stop(s->kworker_task); + +out_clk: + clk_disable_unprepare(s->clk); + + return ret; +} + +static void sc16is7xx_remove(struct device *dev) +{ + struct sc16is7xx_port *s = dev_get_drvdata(dev); + int i; + +#ifdef CONFIG_GPIOLIB + if (s->gpio_valid_mask) + gpiochip_remove(&s->gpio); +#endif + + for (i = 0; i < s->devtype->nr_uart; i++) { + kthread_cancel_delayed_work_sync(&s->p[i].ms_work); + if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines)) + uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port); + sc16is7xx_power(&s->p[i].port, 0); + } + + kthread_flush_worker(&s->kworker); + kthread_stop(s->kworker_task); + + clk_disable_unprepare(s->clk); +} + +static const struct of_device_id __maybe_unused sc16is7xx_dt_ids[] = { + { .compatible = "nxp,sc16is740", .data = &sc16is74x_devtype, }, + { .compatible = "nxp,sc16is741", .data = &sc16is74x_devtype, }, + { .compatible = "nxp,sc16is750", .data = &sc16is750_devtype, }, + { .compatible = "nxp,sc16is752", .data = &sc16is752_devtype, }, + { .compatible = "nxp,sc16is760", .data = &sc16is760_devtype, }, + { .compatible = "nxp,sc16is762", .data = &sc16is762_devtype, }, + { } +}; +MODULE_DEVICE_TABLE(of, sc16is7xx_dt_ids); + +static struct regmap_config regcfg = { + .reg_bits = 5, + .pad_bits = 3, + .val_bits = 8, + .cache_type = REGCACHE_RBTREE, + .volatile_reg = sc16is7xx_regmap_volatile, + .precious_reg = sc16is7xx_regmap_precious, + .writeable_noinc_reg = sc16is7xx_regmap_noinc, + .readable_noinc_reg = sc16is7xx_regmap_noinc, + .max_raw_read = SC16IS7XX_FIFO_SIZE, + .max_raw_write = SC16IS7XX_FIFO_SIZE, + .max_register = SC16IS7XX_EFCR_REG, +}; + +static const char *sc16is7xx_regmap_name(u8 port_id) +{ + switch (port_id) { + case 0: return "port0"; + case 1: return "port1"; + default: + WARN_ON(true); + return NULL; + } +} + +static unsigned int sc16is7xx_regmap_port_mask(unsigned int port_id) +{ + /* CH1,CH0 are at bits 2:1. */ + return port_id << 1; +} + +#ifdef CONFIG_SERIAL_SC16IS7XX_SPI +static int sc16is7xx_spi_probe(struct spi_device *spi) +{ + const struct sc16is7xx_devtype *devtype; + struct regmap *regmaps[2]; + unsigned int i; + int ret; + + /* Setup SPI bus */ + spi->bits_per_word = 8; + /* For all variants, only mode 0 is supported */ + if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0) + return dev_err_probe(&spi->dev, -EINVAL, "Unsupported SPI mode\n"); + + spi->mode = spi->mode ? : SPI_MODE_0; + spi->max_speed_hz = spi->max_speed_hz ? : 4 * HZ_PER_MHZ; + ret = spi_setup(spi); + if (ret) + return ret; + + if (spi->dev.of_node) { + devtype = device_get_match_data(&spi->dev); + if (!devtype) + return -ENODEV; + } else { + const struct spi_device_id *id_entry = spi_get_device_id(spi); + + devtype = (struct sc16is7xx_devtype *)id_entry->driver_data; + } + + for (i = 0; i < devtype->nr_uart; i++) { + regcfg.name = sc16is7xx_regmap_name(i); + /* + * If read_flag_mask is 0, the regmap code sets it to a default + * of 0x80. Since we specify our own mask, we must add the READ + * bit ourselves: + */ + regcfg.read_flag_mask = sc16is7xx_regmap_port_mask(i) | + SC16IS7XX_SPI_READ_BIT; + regcfg.write_flag_mask = sc16is7xx_regmap_port_mask(i); + regmaps[i] = devm_regmap_init_spi(spi, ®cfg); + } + + return sc16is7xx_probe(&spi->dev, devtype, regmaps, spi->irq); +} + +static void sc16is7xx_spi_remove(struct spi_device *spi) +{ + sc16is7xx_remove(&spi->dev); +} + +static const struct spi_device_id sc16is7xx_spi_id_table[] = { + { "sc16is74x", (kernel_ulong_t)&sc16is74x_devtype, }, + { "sc16is740", (kernel_ulong_t)&sc16is74x_devtype, }, + { "sc16is741", (kernel_ulong_t)&sc16is74x_devtype, }, + { "sc16is750", (kernel_ulong_t)&sc16is750_devtype, }, + { "sc16is752", (kernel_ulong_t)&sc16is752_devtype, }, + { "sc16is760", (kernel_ulong_t)&sc16is760_devtype, }, + { "sc16is762", (kernel_ulong_t)&sc16is762_devtype, }, + { } +}; + +MODULE_DEVICE_TABLE(spi, sc16is7xx_spi_id_table); + +static struct spi_driver sc16is7xx_spi_uart_driver = { + .driver = { + .name = SC16IS7XX_NAME, + .of_match_table = sc16is7xx_dt_ids, + }, + .probe = sc16is7xx_spi_probe, + .remove = sc16is7xx_spi_remove, + .id_table = sc16is7xx_spi_id_table, +}; + +MODULE_ALIAS("spi:sc16is7xx"); +#endif + +#ifdef CONFIG_SERIAL_SC16IS7XX_I2C +static int sc16is7xx_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + const struct sc16is7xx_devtype *devtype; + struct regmap *regmaps[2]; + unsigned int i; + + if (i2c->dev.of_node) { + devtype = device_get_match_data(&i2c->dev); + if (!devtype) + return -ENODEV; + } else { + devtype = (struct sc16is7xx_devtype *)id->driver_data; + } + + for (i = 0; i < devtype->nr_uart; i++) { + regcfg.name = sc16is7xx_regmap_name(i); + regcfg.read_flag_mask = sc16is7xx_regmap_port_mask(i); + regcfg.write_flag_mask = sc16is7xx_regmap_port_mask(i); + regmaps[i] = devm_regmap_init_i2c(i2c, ®cfg); + } + + return sc16is7xx_probe(&i2c->dev, devtype, regmaps, i2c->irq); +} + +static void sc16is7xx_i2c_remove(struct i2c_client *client) +{ + sc16is7xx_remove(&client->dev); +} + +static const struct i2c_device_id sc16is7xx_i2c_id_table[] = { + { "sc16is74x", (kernel_ulong_t)&sc16is74x_devtype, }, + { "sc16is740", (kernel_ulong_t)&sc16is74x_devtype, }, + { "sc16is741", (kernel_ulong_t)&sc16is74x_devtype, }, + { "sc16is750", (kernel_ulong_t)&sc16is750_devtype, }, + { "sc16is752", (kernel_ulong_t)&sc16is752_devtype, }, + { "sc16is760", (kernel_ulong_t)&sc16is760_devtype, }, + { "sc16is762", (kernel_ulong_t)&sc16is762_devtype, }, + { } +}; +MODULE_DEVICE_TABLE(i2c, sc16is7xx_i2c_id_table); + +static struct i2c_driver sc16is7xx_i2c_uart_driver = { + .driver = { + .name = SC16IS7XX_NAME, + .of_match_table = sc16is7xx_dt_ids, + }, + .probe = sc16is7xx_i2c_probe, + .remove = sc16is7xx_i2c_remove, + .id_table = sc16is7xx_i2c_id_table, +}; + +#endif + +static int __init sc16is7xx_init(void) +{ + int ret; + + ret = uart_register_driver(&sc16is7xx_uart); + if (ret) { + pr_err("Registering UART driver failed\n"); + return ret; + } + +#ifdef CONFIG_SERIAL_SC16IS7XX_I2C + ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver); + if (ret < 0) { + pr_err("failed to init sc16is7xx i2c --> %d\n", ret); + goto err_i2c; + } +#endif + +#ifdef CONFIG_SERIAL_SC16IS7XX_SPI + ret = spi_register_driver(&sc16is7xx_spi_uart_driver); + if (ret < 0) { + pr_err("failed to init sc16is7xx spi --> %d\n", ret); + goto err_spi; + } +#endif + return ret; + +#ifdef CONFIG_SERIAL_SC16IS7XX_SPI +err_spi: +#endif +#ifdef CONFIG_SERIAL_SC16IS7XX_I2C + i2c_del_driver(&sc16is7xx_i2c_uart_driver); +err_i2c: +#endif + uart_unregister_driver(&sc16is7xx_uart); + return ret; +} +module_init(sc16is7xx_init); + +static void __exit sc16is7xx_exit(void) +{ +#ifdef CONFIG_SERIAL_SC16IS7XX_I2C + i2c_del_driver(&sc16is7xx_i2c_uart_driver); +#endif + +#ifdef CONFIG_SERIAL_SC16IS7XX_SPI + spi_unregister_driver(&sc16is7xx_spi_uart_driver); +#endif + uart_unregister_driver(&sc16is7xx_uart); +} +module_exit(sc16is7xx_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jon Ringle "); +MODULE_DESCRIPTION("SC16IS7XX serial driver"); diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c new file mode 100644 index 000000000..dd98509f5 --- /dev/null +++ b/drivers/tty/serial/sccnxp.c @@ -0,0 +1,1069 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * NXP (Philips) SCC+++(SCN+++) serial driver + * + * Copyright (C) 2012 Alexander Shiyan + * + * Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SCCNXP_NAME "uart-sccnxp" +#define SCCNXP_MAJOR 204 +#define SCCNXP_MINOR 205 + +#define SCCNXP_MR_REG (0x00) +# define MR0_BAUD_NORMAL (0 << 0) +# define MR0_BAUD_EXT1 (1 << 0) +# define MR0_BAUD_EXT2 (5 << 0) +# define MR0_FIFO (1 << 3) +# define MR0_TXLVL (1 << 4) +# define MR1_BITS_5 (0 << 0) +# define MR1_BITS_6 (1 << 0) +# define MR1_BITS_7 (2 << 0) +# define MR1_BITS_8 (3 << 0) +# define MR1_PAR_EVN (0 << 2) +# define MR1_PAR_ODD (1 << 2) +# define MR1_PAR_NO (4 << 2) +# define MR2_STOP1 (7 << 0) +# define MR2_STOP2 (0xf << 0) +#define SCCNXP_SR_REG (0x01) +# define SR_RXRDY (1 << 0) +# define SR_FULL (1 << 1) +# define SR_TXRDY (1 << 2) +# define SR_TXEMT (1 << 3) +# define SR_OVR (1 << 4) +# define SR_PE (1 << 5) +# define SR_FE (1 << 6) +# define SR_BRK (1 << 7) +#define SCCNXP_CSR_REG (SCCNXP_SR_REG) +# define CSR_TIMER_MODE (0x0d) +#define SCCNXP_CR_REG (0x02) +# define CR_RX_ENABLE (1 << 0) +# define CR_RX_DISABLE (1 << 1) +# define CR_TX_ENABLE (1 << 2) +# define CR_TX_DISABLE (1 << 3) +# define CR_CMD_MRPTR1 (0x01 << 4) +# define CR_CMD_RX_RESET (0x02 << 4) +# define CR_CMD_TX_RESET (0x03 << 4) +# define CR_CMD_STATUS_RESET (0x04 << 4) +# define CR_CMD_BREAK_RESET (0x05 << 4) +# define CR_CMD_START_BREAK (0x06 << 4) +# define CR_CMD_STOP_BREAK (0x07 << 4) +# define CR_CMD_MRPTR0 (0x0b << 4) +#define SCCNXP_RHR_REG (0x03) +#define SCCNXP_THR_REG SCCNXP_RHR_REG +#define SCCNXP_IPCR_REG (0x04) +#define SCCNXP_ACR_REG SCCNXP_IPCR_REG +# define ACR_BAUD0 (0 << 7) +# define ACR_BAUD1 (1 << 7) +# define ACR_TIMER_MODE (6 << 4) +#define SCCNXP_ISR_REG (0x05) +#define SCCNXP_IMR_REG SCCNXP_ISR_REG +# define IMR_TXRDY (1 << 0) +# define IMR_RXRDY (1 << 1) +# define ISR_TXRDY(x) (1 << ((x * 4) + 0)) +# define ISR_RXRDY(x) (1 << ((x * 4) + 1)) +#define SCCNXP_CTPU_REG (0x06) +#define SCCNXP_CTPL_REG (0x07) +#define SCCNXP_IPR_REG (0x0d) +#define SCCNXP_OPCR_REG SCCNXP_IPR_REG +#define SCCNXP_SOP_REG (0x0e) +#define SCCNXP_START_COUNTER_REG SCCNXP_SOP_REG +#define SCCNXP_ROP_REG (0x0f) + +/* Route helpers */ +#define MCTRL_MASK(sig) (0xf << (sig)) +#define MCTRL_IBIT(cfg, sig) ((((cfg) >> (sig)) & 0xf) - LINE_IP0) +#define MCTRL_OBIT(cfg, sig) ((((cfg) >> (sig)) & 0xf) - LINE_OP0) + +#define SCCNXP_HAVE_IO 0x00000001 +#define SCCNXP_HAVE_MR0 0x00000002 + +struct sccnxp_chip { + const char *name; + unsigned int nr; + unsigned long freq_min; + unsigned long freq_std; + unsigned long freq_max; + unsigned int flags; + unsigned int fifosize; + /* Time between read/write cycles */ + unsigned int trwd; +}; + +struct sccnxp_port { + struct uart_driver uart; + struct uart_port port[SCCNXP_MAX_UARTS]; + bool opened[SCCNXP_MAX_UARTS]; + + int irq; + u8 imr; + + struct sccnxp_chip *chip; + +#ifdef CONFIG_SERIAL_SCCNXP_CONSOLE + struct console console; +#endif + + spinlock_t lock; + + bool poll; + struct timer_list timer; + + struct sccnxp_pdata pdata; + + struct regulator *regulator; +}; + +static const struct sccnxp_chip sc2681 = { + .name = "SC2681", + .nr = 2, + .freq_min = 1000000, + .freq_std = 3686400, + .freq_max = 4000000, + .flags = SCCNXP_HAVE_IO, + .fifosize = 3, + .trwd = 200, +}; + +static const struct sccnxp_chip sc2691 = { + .name = "SC2691", + .nr = 1, + .freq_min = 1000000, + .freq_std = 3686400, + .freq_max = 4000000, + .flags = 0, + .fifosize = 3, + .trwd = 150, +}; + +static const struct sccnxp_chip sc2692 = { + .name = "SC2692", + .nr = 2, + .freq_min = 1000000, + .freq_std = 3686400, + .freq_max = 4000000, + .flags = SCCNXP_HAVE_IO, + .fifosize = 3, + .trwd = 30, +}; + +static const struct sccnxp_chip sc2891 = { + .name = "SC2891", + .nr = 1, + .freq_min = 100000, + .freq_std = 3686400, + .freq_max = 8000000, + .flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0, + .fifosize = 16, + .trwd = 27, +}; + +static const struct sccnxp_chip sc2892 = { + .name = "SC2892", + .nr = 2, + .freq_min = 100000, + .freq_std = 3686400, + .freq_max = 8000000, + .flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0, + .fifosize = 16, + .trwd = 17, +}; + +static const struct sccnxp_chip sc28202 = { + .name = "SC28202", + .nr = 2, + .freq_min = 1000000, + .freq_std = 14745600, + .freq_max = 50000000, + .flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0, + .fifosize = 256, + .trwd = 10, +}; + +static const struct sccnxp_chip sc68681 = { + .name = "SC68681", + .nr = 2, + .freq_min = 1000000, + .freq_std = 3686400, + .freq_max = 4000000, + .flags = SCCNXP_HAVE_IO, + .fifosize = 3, + .trwd = 200, +}; + +static const struct sccnxp_chip sc68692 = { + .name = "SC68692", + .nr = 2, + .freq_min = 1000000, + .freq_std = 3686400, + .freq_max = 4000000, + .flags = SCCNXP_HAVE_IO, + .fifosize = 3, + .trwd = 200, +}; + +static u8 sccnxp_read(struct uart_port *port, u8 reg) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + u8 ret; + + ret = readb(port->membase + (reg << port->regshift)); + + ndelay(s->chip->trwd); + + return ret; +} + +static void sccnxp_write(struct uart_port *port, u8 reg, u8 v) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + + writeb(v, port->membase + (reg << port->regshift)); + + ndelay(s->chip->trwd); +} + +static u8 sccnxp_port_read(struct uart_port *port, u8 reg) +{ + return sccnxp_read(port, (port->line << 3) + reg); +} + +static void sccnxp_port_write(struct uart_port *port, u8 reg, u8 v) +{ + sccnxp_write(port, (port->line << 3) + reg, v); +} + +static int sccnxp_update_best_err(int a, int b, int *besterr) +{ + int err = abs(a - b); + + if (*besterr > err) { + *besterr = err; + return 0; + } + + return 1; +} + +static const struct { + u8 csr; + u8 acr; + u8 mr0; + int baud; +} baud_std[] = { + { 0, ACR_BAUD0, MR0_BAUD_NORMAL, 50, }, + { 0, ACR_BAUD1, MR0_BAUD_NORMAL, 75, }, + { 1, ACR_BAUD0, MR0_BAUD_NORMAL, 110, }, + { 2, ACR_BAUD0, MR0_BAUD_NORMAL, 134, }, + { 3, ACR_BAUD1, MR0_BAUD_NORMAL, 150, }, + { 3, ACR_BAUD0, MR0_BAUD_NORMAL, 200, }, + { 4, ACR_BAUD0, MR0_BAUD_NORMAL, 300, }, + { 0, ACR_BAUD1, MR0_BAUD_EXT1, 450, }, + { 1, ACR_BAUD0, MR0_BAUD_EXT2, 880, }, + { 3, ACR_BAUD1, MR0_BAUD_EXT1, 900, }, + { 5, ACR_BAUD0, MR0_BAUD_NORMAL, 600, }, + { 7, ACR_BAUD0, MR0_BAUD_NORMAL, 1050, }, + { 2, ACR_BAUD0, MR0_BAUD_EXT2, 1076, }, + { 6, ACR_BAUD0, MR0_BAUD_NORMAL, 1200, }, + { 10, ACR_BAUD1, MR0_BAUD_NORMAL, 1800, }, + { 7, ACR_BAUD1, MR0_BAUD_NORMAL, 2000, }, + { 8, ACR_BAUD0, MR0_BAUD_NORMAL, 2400, }, + { 5, ACR_BAUD1, MR0_BAUD_EXT1, 3600, }, + { 9, ACR_BAUD0, MR0_BAUD_NORMAL, 4800, }, + { 10, ACR_BAUD0, MR0_BAUD_NORMAL, 7200, }, + { 11, ACR_BAUD0, MR0_BAUD_NORMAL, 9600, }, + { 8, ACR_BAUD0, MR0_BAUD_EXT1, 14400, }, + { 12, ACR_BAUD1, MR0_BAUD_NORMAL, 19200, }, + { 9, ACR_BAUD0, MR0_BAUD_EXT1, 28800, }, + { 12, ACR_BAUD0, MR0_BAUD_NORMAL, 38400, }, + { 11, ACR_BAUD0, MR0_BAUD_EXT1, 57600, }, + { 12, ACR_BAUD1, MR0_BAUD_EXT1, 115200, }, + { 12, ACR_BAUD0, MR0_BAUD_EXT1, 230400, }, + { 0, 0, 0, 0 } +}; + +static int sccnxp_set_baud(struct uart_port *port, int baud) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + int div_std, tmp_baud, bestbaud = INT_MAX, besterr = INT_MAX; + struct sccnxp_chip *chip = s->chip; + u8 i, acr = 0, csr = 0, mr0 = 0; + + /* Find divisor to load to the timer preset registers */ + div_std = DIV_ROUND_CLOSEST(port->uartclk, 2 * 16 * baud); + if ((div_std >= 2) && (div_std <= 0xffff)) { + bestbaud = DIV_ROUND_CLOSEST(port->uartclk, 2 * 16 * div_std); + sccnxp_update_best_err(baud, bestbaud, &besterr); + csr = CSR_TIMER_MODE; + sccnxp_port_write(port, SCCNXP_CTPU_REG, div_std >> 8); + sccnxp_port_write(port, SCCNXP_CTPL_REG, div_std); + /* Issue start timer/counter command */ + sccnxp_port_read(port, SCCNXP_START_COUNTER_REG); + } + + /* Find best baud from table */ + for (i = 0; baud_std[i].baud && besterr; i++) { + if (baud_std[i].mr0 && !(chip->flags & SCCNXP_HAVE_MR0)) + continue; + div_std = DIV_ROUND_CLOSEST(chip->freq_std, baud_std[i].baud); + tmp_baud = DIV_ROUND_CLOSEST(port->uartclk, div_std); + if (!sccnxp_update_best_err(baud, tmp_baud, &besterr)) { + acr = baud_std[i].acr; + csr = baud_std[i].csr; + mr0 = baud_std[i].mr0; + bestbaud = tmp_baud; + } + } + + if (chip->flags & SCCNXP_HAVE_MR0) { + /* Enable FIFO, set half level for TX */ + mr0 |= MR0_FIFO | MR0_TXLVL; + /* Update MR0 */ + sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_MRPTR0); + sccnxp_port_write(port, SCCNXP_MR_REG, mr0); + } + + sccnxp_port_write(port, SCCNXP_ACR_REG, acr | ACR_TIMER_MODE); + sccnxp_port_write(port, SCCNXP_CSR_REG, (csr << 4) | csr); + + if (baud != bestbaud) + dev_dbg(port->dev, "Baudrate desired: %i, calculated: %i\n", + baud, bestbaud); + + return bestbaud; +} + +static void sccnxp_enable_irq(struct uart_port *port, int mask) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + + s->imr |= mask << (port->line * 4); + sccnxp_write(port, SCCNXP_IMR_REG, s->imr); +} + +static void sccnxp_disable_irq(struct uart_port *port, int mask) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + + s->imr &= ~(mask << (port->line * 4)); + sccnxp_write(port, SCCNXP_IMR_REG, s->imr); +} + +static void sccnxp_set_bit(struct uart_port *port, int sig, int state) +{ + u8 bitmask; + struct sccnxp_port *s = dev_get_drvdata(port->dev); + + if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(sig)) { + bitmask = 1 << MCTRL_OBIT(s->pdata.mctrl_cfg[port->line], sig); + if (state) + sccnxp_write(port, SCCNXP_SOP_REG, bitmask); + else + sccnxp_write(port, SCCNXP_ROP_REG, bitmask); + } +} + +static void sccnxp_handle_rx(struct uart_port *port) +{ + u8 sr; + unsigned int ch, flag; + + for (;;) { + sr = sccnxp_port_read(port, SCCNXP_SR_REG); + if (!(sr & SR_RXRDY)) + break; + sr &= SR_PE | SR_FE | SR_OVR | SR_BRK; + + ch = sccnxp_port_read(port, SCCNXP_RHR_REG); + + port->icount.rx++; + flag = TTY_NORMAL; + + if (unlikely(sr)) { + if (sr & SR_BRK) { + port->icount.brk++; + sccnxp_port_write(port, SCCNXP_CR_REG, + CR_CMD_BREAK_RESET); + if (uart_handle_break(port)) + continue; + } else if (sr & SR_PE) + port->icount.parity++; + else if (sr & SR_FE) + port->icount.frame++; + else if (sr & SR_OVR) { + port->icount.overrun++; + sccnxp_port_write(port, SCCNXP_CR_REG, + CR_CMD_STATUS_RESET); + } + + sr &= port->read_status_mask; + if (sr & SR_BRK) + flag = TTY_BREAK; + else if (sr & SR_PE) + flag = TTY_PARITY; + else if (sr & SR_FE) + flag = TTY_FRAME; + else if (sr & SR_OVR) + flag = TTY_OVERRUN; + } + + if (uart_handle_sysrq_char(port, ch)) + continue; + + if (sr & port->ignore_status_mask) + continue; + + uart_insert_char(port, sr, SR_OVR, ch, flag); + } + + tty_flip_buffer_push(&port->state->port); +} + +static void sccnxp_handle_tx(struct uart_port *port) +{ + u8 sr; + struct circ_buf *xmit = &port->state->xmit; + struct sccnxp_port *s = dev_get_drvdata(port->dev); + + if (unlikely(port->x_char)) { + sccnxp_port_write(port, SCCNXP_THR_REG, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + /* Disable TX if FIFO is empty */ + if (sccnxp_port_read(port, SCCNXP_SR_REG) & SR_TXEMT) { + sccnxp_disable_irq(port, IMR_TXRDY); + + /* Set direction to input */ + if (s->chip->flags & SCCNXP_HAVE_IO) + sccnxp_set_bit(port, DIR_OP, 0); + } + return; + } + + while (!uart_circ_empty(xmit)) { + sr = sccnxp_port_read(port, SCCNXP_SR_REG); + if (!(sr & SR_TXRDY)) + break; + + sccnxp_port_write(port, SCCNXP_THR_REG, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static void sccnxp_handle_events(struct sccnxp_port *s) +{ + int i; + u8 isr; + + do { + isr = sccnxp_read(&s->port[0], SCCNXP_ISR_REG); + isr &= s->imr; + if (!isr) + break; + + for (i = 0; i < s->uart.nr; i++) { + if (s->opened[i] && (isr & ISR_RXRDY(i))) + sccnxp_handle_rx(&s->port[i]); + if (s->opened[i] && (isr & ISR_TXRDY(i))) + sccnxp_handle_tx(&s->port[i]); + } + } while (1); +} + +static void sccnxp_timer(struct timer_list *t) +{ + struct sccnxp_port *s = from_timer(s, t, timer); + unsigned long flags; + + spin_lock_irqsave(&s->lock, flags); + sccnxp_handle_events(s); + spin_unlock_irqrestore(&s->lock, flags); + + mod_timer(&s->timer, jiffies + usecs_to_jiffies(s->pdata.poll_time_us)); +} + +static irqreturn_t sccnxp_ist(int irq, void *dev_id) +{ + struct sccnxp_port *s = (struct sccnxp_port *)dev_id; + unsigned long flags; + + spin_lock_irqsave(&s->lock, flags); + sccnxp_handle_events(s); + spin_unlock_irqrestore(&s->lock, flags); + + return IRQ_HANDLED; +} + +static void sccnxp_start_tx(struct uart_port *port) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + unsigned long flags; + + spin_lock_irqsave(&s->lock, flags); + + /* Set direction to output */ + if (s->chip->flags & SCCNXP_HAVE_IO) + sccnxp_set_bit(port, DIR_OP, 1); + + sccnxp_enable_irq(port, IMR_TXRDY); + + spin_unlock_irqrestore(&s->lock, flags); +} + +static void sccnxp_stop_tx(struct uart_port *port) +{ + /* Do nothing */ +} + +static void sccnxp_stop_rx(struct uart_port *port) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + unsigned long flags; + + spin_lock_irqsave(&s->lock, flags); + sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_DISABLE); + spin_unlock_irqrestore(&s->lock, flags); +} + +static unsigned int sccnxp_tx_empty(struct uart_port *port) +{ + u8 val; + unsigned long flags; + struct sccnxp_port *s = dev_get_drvdata(port->dev); + + spin_lock_irqsave(&s->lock, flags); + val = sccnxp_port_read(port, SCCNXP_SR_REG); + spin_unlock_irqrestore(&s->lock, flags); + + return (val & SR_TXEMT) ? TIOCSER_TEMT : 0; +} + +static void sccnxp_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + unsigned long flags; + + if (!(s->chip->flags & SCCNXP_HAVE_IO)) + return; + + spin_lock_irqsave(&s->lock, flags); + + sccnxp_set_bit(port, DTR_OP, mctrl & TIOCM_DTR); + sccnxp_set_bit(port, RTS_OP, mctrl & TIOCM_RTS); + + spin_unlock_irqrestore(&s->lock, flags); +} + +static unsigned int sccnxp_get_mctrl(struct uart_port *port) +{ + u8 bitmask, ipr; + unsigned long flags; + struct sccnxp_port *s = dev_get_drvdata(port->dev); + unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR; + + if (!(s->chip->flags & SCCNXP_HAVE_IO)) + return mctrl; + + spin_lock_irqsave(&s->lock, flags); + + ipr = ~sccnxp_read(port, SCCNXP_IPCR_REG); + + if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(DSR_IP)) { + bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line], + DSR_IP); + mctrl &= ~TIOCM_DSR; + mctrl |= (ipr & bitmask) ? TIOCM_DSR : 0; + } + if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(CTS_IP)) { + bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line], + CTS_IP); + mctrl &= ~TIOCM_CTS; + mctrl |= (ipr & bitmask) ? TIOCM_CTS : 0; + } + if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(DCD_IP)) { + bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line], + DCD_IP); + mctrl &= ~TIOCM_CAR; + mctrl |= (ipr & bitmask) ? TIOCM_CAR : 0; + } + if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(RNG_IP)) { + bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line], + RNG_IP); + mctrl &= ~TIOCM_RNG; + mctrl |= (ipr & bitmask) ? TIOCM_RNG : 0; + } + + spin_unlock_irqrestore(&s->lock, flags); + + return mctrl; +} + +static void sccnxp_break_ctl(struct uart_port *port, int break_state) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + unsigned long flags; + + spin_lock_irqsave(&s->lock, flags); + sccnxp_port_write(port, SCCNXP_CR_REG, break_state ? + CR_CMD_START_BREAK : CR_CMD_STOP_BREAK); + spin_unlock_irqrestore(&s->lock, flags); +} + +static void sccnxp_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + unsigned long flags; + u8 mr1, mr2; + int baud; + + spin_lock_irqsave(&s->lock, flags); + + /* Mask termios capabilities we don't support */ + termios->c_cflag &= ~CMSPAR; + + /* Disable RX & TX, reset break condition, status and FIFOs */ + sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_RX_RESET | + CR_RX_DISABLE | CR_TX_DISABLE); + sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_TX_RESET); + sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_STATUS_RESET); + sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_BREAK_RESET); + + /* Word size */ + switch (termios->c_cflag & CSIZE) { + case CS5: + mr1 = MR1_BITS_5; + break; + case CS6: + mr1 = MR1_BITS_6; + break; + case CS7: + mr1 = MR1_BITS_7; + break; + case CS8: + default: + mr1 = MR1_BITS_8; + break; + } + + /* Parity */ + if (termios->c_cflag & PARENB) { + if (termios->c_cflag & PARODD) + mr1 |= MR1_PAR_ODD; + } else + mr1 |= MR1_PAR_NO; + + /* Stop bits */ + mr2 = (termios->c_cflag & CSTOPB) ? MR2_STOP2 : MR2_STOP1; + + /* Update desired format */ + sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_MRPTR1); + sccnxp_port_write(port, SCCNXP_MR_REG, mr1); + sccnxp_port_write(port, SCCNXP_MR_REG, mr2); + + /* Set read status mask */ + port->read_status_mask = SR_OVR; + if (termios->c_iflag & INPCK) + port->read_status_mask |= SR_PE | SR_FE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= SR_BRK; + + /* Set status ignore mask */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNBRK) + port->ignore_status_mask |= SR_BRK; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= SR_PE; + if (!(termios->c_cflag & CREAD)) + port->ignore_status_mask |= SR_PE | SR_OVR | SR_FE | SR_BRK; + + /* Setup baudrate */ + baud = uart_get_baud_rate(port, termios, old, 50, + (s->chip->flags & SCCNXP_HAVE_MR0) ? + 230400 : 38400); + baud = sccnxp_set_baud(port, baud); + + /* Update timeout according to new baud rate */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* Report actual baudrate back to core */ + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); + + /* Enable RX & TX */ + sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_ENABLE | CR_TX_ENABLE); + + spin_unlock_irqrestore(&s->lock, flags); +} + +static int sccnxp_startup(struct uart_port *port) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + unsigned long flags; + + spin_lock_irqsave(&s->lock, flags); + + if (s->chip->flags & SCCNXP_HAVE_IO) { + /* Outputs are controlled manually */ + sccnxp_write(port, SCCNXP_OPCR_REG, 0); + } + + /* Reset break condition, status and FIFOs */ + sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_RX_RESET); + sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_TX_RESET); + sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_STATUS_RESET); + sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_BREAK_RESET); + + /* Enable RX & TX */ + sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_ENABLE | CR_TX_ENABLE); + + /* Enable RX interrupt */ + sccnxp_enable_irq(port, IMR_RXRDY); + + s->opened[port->line] = 1; + + spin_unlock_irqrestore(&s->lock, flags); + + return 0; +} + +static void sccnxp_shutdown(struct uart_port *port) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + unsigned long flags; + + spin_lock_irqsave(&s->lock, flags); + + s->opened[port->line] = 0; + + /* Disable interrupts */ + sccnxp_disable_irq(port, IMR_TXRDY | IMR_RXRDY); + + /* Disable TX & RX */ + sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_DISABLE | CR_TX_DISABLE); + + /* Leave direction to input */ + if (s->chip->flags & SCCNXP_HAVE_IO) + sccnxp_set_bit(port, DIR_OP, 0); + + spin_unlock_irqrestore(&s->lock, flags); +} + +static const char *sccnxp_type(struct uart_port *port) +{ + struct sccnxp_port *s = dev_get_drvdata(port->dev); + + return (port->type == PORT_SC26XX) ? s->chip->name : NULL; +} + +static void sccnxp_release_port(struct uart_port *port) +{ + /* Do nothing */ +} + +static int sccnxp_request_port(struct uart_port *port) +{ + /* Do nothing */ + return 0; +} + +static void sccnxp_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_SC26XX; +} + +static int sccnxp_verify_port(struct uart_port *port, struct serial_struct *s) +{ + if ((s->type == PORT_UNKNOWN) || (s->type == PORT_SC26XX)) + return 0; + if (s->irq == port->irq) + return 0; + + return -EINVAL; +} + +static const struct uart_ops sccnxp_ops = { + .tx_empty = sccnxp_tx_empty, + .set_mctrl = sccnxp_set_mctrl, + .get_mctrl = sccnxp_get_mctrl, + .stop_tx = sccnxp_stop_tx, + .start_tx = sccnxp_start_tx, + .stop_rx = sccnxp_stop_rx, + .break_ctl = sccnxp_break_ctl, + .startup = sccnxp_startup, + .shutdown = sccnxp_shutdown, + .set_termios = sccnxp_set_termios, + .type = sccnxp_type, + .release_port = sccnxp_release_port, + .request_port = sccnxp_request_port, + .config_port = sccnxp_config_port, + .verify_port = sccnxp_verify_port, +}; + +#ifdef CONFIG_SERIAL_SCCNXP_CONSOLE +static void sccnxp_console_putchar(struct uart_port *port, unsigned char c) +{ + int tryes = 100000; + + while (tryes--) { + if (sccnxp_port_read(port, SCCNXP_SR_REG) & SR_TXRDY) { + sccnxp_port_write(port, SCCNXP_THR_REG, c); + break; + } + barrier(); + } +} + +static void sccnxp_console_write(struct console *co, const char *c, unsigned n) +{ + struct sccnxp_port *s = (struct sccnxp_port *)co->data; + struct uart_port *port = &s->port[co->index]; + unsigned long flags; + + spin_lock_irqsave(&s->lock, flags); + uart_console_write(port, c, n, sccnxp_console_putchar); + spin_unlock_irqrestore(&s->lock, flags); +} + +static int sccnxp_console_setup(struct console *co, char *options) +{ + struct sccnxp_port *s = (struct sccnxp_port *)co->data; + struct uart_port *port = &s->port[(co->index > 0) ? co->index : 0]; + int baud = 9600, bits = 8, parity = 'n', flow = 'n'; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} +#endif + +static const struct platform_device_id sccnxp_id_table[] = { + { .name = "sc2681", .driver_data = (kernel_ulong_t)&sc2681, }, + { .name = "sc2691", .driver_data = (kernel_ulong_t)&sc2691, }, + { .name = "sc2692", .driver_data = (kernel_ulong_t)&sc2692, }, + { .name = "sc2891", .driver_data = (kernel_ulong_t)&sc2891, }, + { .name = "sc2892", .driver_data = (kernel_ulong_t)&sc2892, }, + { .name = "sc28202", .driver_data = (kernel_ulong_t)&sc28202, }, + { .name = "sc68681", .driver_data = (kernel_ulong_t)&sc68681, }, + { .name = "sc68692", .driver_data = (kernel_ulong_t)&sc68692, }, + { } +}; +MODULE_DEVICE_TABLE(platform, sccnxp_id_table); + +static int sccnxp_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct sccnxp_pdata *pdata = dev_get_platdata(&pdev->dev); + int i, ret, uartclk; + struct sccnxp_port *s; + void __iomem *membase; + struct clk *clk; + + membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(membase)) + return PTR_ERR(membase); + + s = devm_kzalloc(&pdev->dev, sizeof(struct sccnxp_port), GFP_KERNEL); + if (!s) { + dev_err(&pdev->dev, "Error allocating port structure\n"); + return -ENOMEM; + } + platform_set_drvdata(pdev, s); + + spin_lock_init(&s->lock); + + s->chip = (struct sccnxp_chip *)pdev->id_entry->driver_data; + + s->regulator = devm_regulator_get(&pdev->dev, "vcc"); + if (!IS_ERR(s->regulator)) { + ret = regulator_enable(s->regulator); + if (ret) { + dev_err(&pdev->dev, + "Failed to enable regulator: %i\n", ret); + return ret; + } + } else if (PTR_ERR(s->regulator) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + if (ret == -EPROBE_DEFER) + goto err_out; + uartclk = 0; + } else { + ret = clk_prepare_enable(clk); + if (ret) + goto err_out; + + ret = devm_add_action_or_reset(&pdev->dev, + (void(*)(void *))clk_disable_unprepare, + clk); + if (ret) + goto err_out; + + uartclk = clk_get_rate(clk); + } + + if (!uartclk) { + dev_notice(&pdev->dev, "Using default clock frequency\n"); + uartclk = s->chip->freq_std; + } + + /* Check input frequency */ + if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) { + dev_err(&pdev->dev, "Frequency out of bounds\n"); + ret = -EINVAL; + goto err_out; + } + + if (pdata) + memcpy(&s->pdata, pdata, sizeof(struct sccnxp_pdata)); + + if (s->pdata.poll_time_us) { + dev_info(&pdev->dev, "Using poll mode, resolution %u usecs\n", + s->pdata.poll_time_us); + s->poll = 1; + } + + if (!s->poll) { + s->irq = platform_get_irq(pdev, 0); + if (s->irq < 0) { + ret = -ENXIO; + goto err_out; + } + } + + s->uart.owner = THIS_MODULE; + s->uart.dev_name = "ttySC"; + s->uart.major = SCCNXP_MAJOR; + s->uart.minor = SCCNXP_MINOR; + s->uart.nr = s->chip->nr; +#ifdef CONFIG_SERIAL_SCCNXP_CONSOLE + s->uart.cons = &s->console; + s->uart.cons->device = uart_console_device; + s->uart.cons->write = sccnxp_console_write; + s->uart.cons->setup = sccnxp_console_setup; + s->uart.cons->flags = CON_PRINTBUFFER; + s->uart.cons->index = -1; + s->uart.cons->data = s; + strcpy(s->uart.cons->name, "ttySC"); +#endif + ret = uart_register_driver(&s->uart); + if (ret) { + dev_err(&pdev->dev, "Registering UART driver failed\n"); + goto err_out; + } + + for (i = 0; i < s->uart.nr; i++) { + s->port[i].line = i; + s->port[i].dev = &pdev->dev; + s->port[i].irq = s->irq; + s->port[i].type = PORT_SC26XX; + s->port[i].fifosize = s->chip->fifosize; + s->port[i].flags = UPF_SKIP_TEST | UPF_FIXED_TYPE; + s->port[i].iotype = UPIO_MEM; + s->port[i].mapbase = res->start; + s->port[i].membase = membase; + s->port[i].regshift = s->pdata.reg_shift; + s->port[i].uartclk = uartclk; + s->port[i].ops = &sccnxp_ops; + s->port[i].has_sysrq = IS_ENABLED(CONFIG_SERIAL_SCCNXP_CONSOLE); + uart_add_one_port(&s->uart, &s->port[i]); + /* Set direction to input */ + if (s->chip->flags & SCCNXP_HAVE_IO) + sccnxp_set_bit(&s->port[i], DIR_OP, 0); + } + + /* Disable interrupts */ + s->imr = 0; + sccnxp_write(&s->port[0], SCCNXP_IMR_REG, 0); + + if (!s->poll) { + ret = devm_request_threaded_irq(&pdev->dev, s->irq, NULL, + sccnxp_ist, + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + dev_name(&pdev->dev), s); + if (!ret) + return 0; + + dev_err(&pdev->dev, "Unable to reguest IRQ %i\n", s->irq); + } else { + timer_setup(&s->timer, sccnxp_timer, 0); + mod_timer(&s->timer, jiffies + + usecs_to_jiffies(s->pdata.poll_time_us)); + return 0; + } + + uart_unregister_driver(&s->uart); +err_out: + if (!IS_ERR(s->regulator)) + regulator_disable(s->regulator); + + return ret; +} + +static int sccnxp_remove(struct platform_device *pdev) +{ + int i; + struct sccnxp_port *s = platform_get_drvdata(pdev); + + if (!s->poll) + devm_free_irq(&pdev->dev, s->irq, s); + else + del_timer_sync(&s->timer); + + for (i = 0; i < s->uart.nr; i++) + uart_remove_one_port(&s->uart, &s->port[i]); + + uart_unregister_driver(&s->uart); + + if (!IS_ERR(s->regulator)) + return regulator_disable(s->regulator); + + return 0; +} + +static struct platform_driver sccnxp_uart_driver = { + .driver = { + .name = SCCNXP_NAME, + }, + .probe = sccnxp_probe, + .remove = sccnxp_remove, + .id_table = sccnxp_id_table, +}; +module_platform_driver(sccnxp_uart_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Alexander Shiyan "); +MODULE_DESCRIPTION("SCCNXP serial driver"); diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c new file mode 100644 index 000000000..7aa2b5b67 --- /dev/null +++ b/drivers/tty/serial/serial-tegra.c @@ -0,0 +1,1710 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * serial_tegra.c + * + * High-speed serial driver for NVIDIA Tegra SoCs + * + * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved. + * + * Author: Laxman Dewangan + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TEGRA_UART_TYPE "TEGRA_UART" +#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE) +#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3) + +#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096 +#define TEGRA_UART_LSR_TXFIFO_FULL 0x100 +#define TEGRA_UART_IER_EORD 0x20 +#define TEGRA_UART_MCR_RTS_EN 0x40 +#define TEGRA_UART_MCR_CTS_EN 0x20 +#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \ + UART_LSR_PE | UART_LSR_FE) +#define TEGRA_UART_IRDA_CSR 0x08 +#define TEGRA_UART_SIR_ENABLED 0x80 + +#define TEGRA_UART_TX_PIO 1 +#define TEGRA_UART_TX_DMA 2 +#define TEGRA_UART_MIN_DMA 16 +#define TEGRA_UART_FIFO_SIZE 32 + +/* + * Tx fifo trigger level setting in tegra uart is in + * reverse way then conventional uart. + */ +#define TEGRA_UART_TX_TRIG_16B 0x00 +#define TEGRA_UART_TX_TRIG_8B 0x10 +#define TEGRA_UART_TX_TRIG_4B 0x20 +#define TEGRA_UART_TX_TRIG_1B 0x30 + +#define TEGRA_UART_MAXIMUM 8 + +/* Default UART setting when started: 115200 no parity, stop, 8 data bits */ +#define TEGRA_UART_DEFAULT_BAUD 115200 +#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8 + +/* Tx transfer mode */ +#define TEGRA_TX_PIO 1 +#define TEGRA_TX_DMA 2 + +#define TEGRA_UART_FCR_IIR_FIFO_EN 0x40 + +/** + * struct tegra_uart_chip_data: SOC specific data. + * + * @tx_fifo_full_status: Status flag available for checking tx fifo full. + * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not. + * Tegra30 does not allow this. + * @support_clk_src_div: Clock source support the clock divider. + * @fifo_mode_enable_status: Is FIFO mode enabled? + * @uart_max_port: Maximum number of UART ports + * @max_dma_burst_bytes: Maximum size of DMA bursts + * @error_tolerance_low_range: Lowest number in the error tolerance range + * @error_tolerance_high_range: Highest number in the error tolerance range + */ +struct tegra_uart_chip_data { + bool tx_fifo_full_status; + bool allow_txfifo_reset_fifo_mode; + bool support_clk_src_div; + bool fifo_mode_enable_status; + int uart_max_port; + int max_dma_burst_bytes; + int error_tolerance_low_range; + int error_tolerance_high_range; +}; + +struct tegra_baud_tolerance { + u32 lower_range_baud; + u32 upper_range_baud; + s32 tolerance; +}; + +struct tegra_uart_port { + struct uart_port uport; + const struct tegra_uart_chip_data *cdata; + + struct clk *uart_clk; + struct reset_control *rst; + unsigned int current_baud; + + /* Register shadow */ + unsigned long fcr_shadow; + unsigned long mcr_shadow; + unsigned long lcr_shadow; + unsigned long ier_shadow; + bool rts_active; + + int tx_in_progress; + unsigned int tx_bytes; + + bool enable_modem_interrupt; + + bool rx_timeout; + int rx_in_progress; + int symb_bit; + + struct dma_chan *rx_dma_chan; + struct dma_chan *tx_dma_chan; + dma_addr_t rx_dma_buf_phys; + dma_addr_t tx_dma_buf_phys; + unsigned char *rx_dma_buf_virt; + unsigned char *tx_dma_buf_virt; + struct dma_async_tx_descriptor *tx_dma_desc; + struct dma_async_tx_descriptor *rx_dma_desc; + dma_cookie_t tx_cookie; + dma_cookie_t rx_cookie; + unsigned int tx_bytes_requested; + unsigned int rx_bytes_requested; + struct tegra_baud_tolerance *baud_tolerance; + int n_adjustable_baud_rates; + int required_rate; + int configured_rate; + bool use_rx_pio; + bool use_tx_pio; + bool rx_dma_active; +}; + +static void tegra_uart_start_next_tx(struct tegra_uart_port *tup); +static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup); +static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup, + bool dma_to_memory); + +static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup, + unsigned long reg) +{ + return readl(tup->uport.membase + (reg << tup->uport.regshift)); +} + +static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val, + unsigned long reg) +{ + writel(val, tup->uport.membase + (reg << tup->uport.regshift)); +} + +static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u) +{ + return container_of(u, struct tegra_uart_port, uport); +} + +static unsigned int tegra_uart_get_mctrl(struct uart_port *u) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + + /* + * RI - Ring detector is active + * CD/DCD/CAR - Carrier detect is always active. For some reason + * linux has different names for carrier detect. + * DSR - Data Set ready is active as the hardware doesn't support it. + * Don't know if the linux support this yet? + * CTS - Clear to send. Always set to active, as the hardware handles + * CTS automatically. + */ + if (tup->enable_modem_interrupt) + return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS; + return TIOCM_CTS; +} + +static void set_rts(struct tegra_uart_port *tup, bool active) +{ + unsigned long mcr; + + mcr = tup->mcr_shadow; + if (active) + mcr |= TEGRA_UART_MCR_RTS_EN; + else + mcr &= ~TEGRA_UART_MCR_RTS_EN; + if (mcr != tup->mcr_shadow) { + tegra_uart_write(tup, mcr, UART_MCR); + tup->mcr_shadow = mcr; + } +} + +static void set_dtr(struct tegra_uart_port *tup, bool active) +{ + unsigned long mcr; + + mcr = tup->mcr_shadow; + if (active) + mcr |= UART_MCR_DTR; + else + mcr &= ~UART_MCR_DTR; + if (mcr != tup->mcr_shadow) { + tegra_uart_write(tup, mcr, UART_MCR); + tup->mcr_shadow = mcr; + } +} + +static void set_loopbk(struct tegra_uart_port *tup, bool active) +{ + unsigned long mcr = tup->mcr_shadow; + + if (active) + mcr |= UART_MCR_LOOP; + else + mcr &= ~UART_MCR_LOOP; + + if (mcr != tup->mcr_shadow) { + tegra_uart_write(tup, mcr, UART_MCR); + tup->mcr_shadow = mcr; + } +} + +static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + int enable; + + tup->rts_active = !!(mctrl & TIOCM_RTS); + set_rts(tup, tup->rts_active); + + enable = !!(mctrl & TIOCM_DTR); + set_dtr(tup, enable); + + enable = !!(mctrl & TIOCM_LOOP); + set_loopbk(tup, enable); +} + +static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + unsigned long lcr; + + lcr = tup->lcr_shadow; + if (break_ctl) + lcr |= UART_LCR_SBC; + else + lcr &= ~UART_LCR_SBC; + tegra_uart_write(tup, lcr, UART_LCR); + tup->lcr_shadow = lcr; +} + +/** + * tegra_uart_wait_cycle_time: Wait for N UART clock periods + * + * @tup: Tegra serial port data structure. + * @cycles: Number of clock periods to wait. + * + * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART + * clock speed is 16X the current baud rate. + */ +static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup, + unsigned int cycles) +{ + if (tup->current_baud) + udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16)); +} + +/* Wait for a symbol-time. */ +static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup, + unsigned int syms) +{ + if (tup->current_baud) + udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000, + tup->current_baud)); +} + +static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup) +{ + unsigned long iir; + unsigned int tmout = 100; + + do { + iir = tegra_uart_read(tup, UART_IIR); + if (iir & TEGRA_UART_FCR_IIR_FIFO_EN) + return 0; + udelay(1); + } while (--tmout); + + return -ETIMEDOUT; +} + +static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits) +{ + unsigned long fcr = tup->fcr_shadow; + unsigned int lsr, tmout = 10000; + + if (tup->rts_active) + set_rts(tup, false); + + if (tup->cdata->allow_txfifo_reset_fifo_mode) { + fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + tegra_uart_write(tup, fcr, UART_FCR); + } else { + fcr &= ~UART_FCR_ENABLE_FIFO; + tegra_uart_write(tup, fcr, UART_FCR); + udelay(60); + fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + tegra_uart_write(tup, fcr, UART_FCR); + fcr |= UART_FCR_ENABLE_FIFO; + tegra_uart_write(tup, fcr, UART_FCR); + if (tup->cdata->fifo_mode_enable_status) + tegra_uart_wait_fifo_mode_enabled(tup); + } + + /* Dummy read to ensure the write is posted */ + tegra_uart_read(tup, UART_SCR); + + /* + * For all tegra devices (up to t210), there is a hardware issue that + * requires software to wait for 32 UART clock periods for the flush + * to propagate, otherwise data could be lost. + */ + tegra_uart_wait_cycle_time(tup, 32); + + do { + lsr = tegra_uart_read(tup, UART_LSR); + if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR)) + break; + udelay(1); + } while (--tmout); + + if (tup->rts_active) + set_rts(tup, true); +} + +static long tegra_get_tolerance_rate(struct tegra_uart_port *tup, + unsigned int baud, long rate) +{ + int i; + + for (i = 0; i < tup->n_adjustable_baud_rates; ++i) { + if (baud >= tup->baud_tolerance[i].lower_range_baud && + baud <= tup->baud_tolerance[i].upper_range_baud) + return (rate + (rate * + tup->baud_tolerance[i].tolerance) / 10000); + } + + return rate; +} + +static int tegra_check_rate_in_range(struct tegra_uart_port *tup) +{ + long diff; + + diff = ((long)(tup->configured_rate - tup->required_rate) * 10000) + / tup->required_rate; + if (diff < (tup->cdata->error_tolerance_low_range * 100) || + diff > (tup->cdata->error_tolerance_high_range * 100)) { + dev_err(tup->uport.dev, + "configured baud rate is out of range by %ld", diff); + return -EIO; + } + + return 0; +} + +static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud) +{ + unsigned long rate; + unsigned int divisor; + unsigned long lcr; + unsigned long flags; + int ret; + + if (tup->current_baud == baud) + return 0; + + if (tup->cdata->support_clk_src_div) { + rate = baud * 16; + tup->required_rate = rate; + + if (tup->n_adjustable_baud_rates) + rate = tegra_get_tolerance_rate(tup, baud, rate); + + ret = clk_set_rate(tup->uart_clk, rate); + if (ret < 0) { + dev_err(tup->uport.dev, + "clk_set_rate() failed for rate %lu\n", rate); + return ret; + } + tup->configured_rate = clk_get_rate(tup->uart_clk); + divisor = 1; + ret = tegra_check_rate_in_range(tup); + if (ret < 0) + return ret; + } else { + rate = clk_get_rate(tup->uart_clk); + divisor = DIV_ROUND_CLOSEST(rate, baud * 16); + } + + spin_lock_irqsave(&tup->uport.lock, flags); + lcr = tup->lcr_shadow; + lcr |= UART_LCR_DLAB; + tegra_uart_write(tup, lcr, UART_LCR); + + tegra_uart_write(tup, divisor & 0xFF, UART_TX); + tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER); + + lcr &= ~UART_LCR_DLAB; + tegra_uart_write(tup, lcr, UART_LCR); + + /* Dummy read to ensure the write is posted */ + tegra_uart_read(tup, UART_SCR); + spin_unlock_irqrestore(&tup->uport.lock, flags); + + tup->current_baud = baud; + + /* wait two character intervals at new rate */ + tegra_uart_wait_sym_time(tup, 2); + return 0; +} + +static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup, + unsigned long lsr) +{ + char flag = TTY_NORMAL; + + if (unlikely(lsr & TEGRA_UART_LSR_ANY)) { + if (lsr & UART_LSR_OE) { + /* Overrun error */ + flag = TTY_OVERRUN; + tup->uport.icount.overrun++; + dev_dbg(tup->uport.dev, "Got overrun errors\n"); + } else if (lsr & UART_LSR_PE) { + /* Parity error */ + flag = TTY_PARITY; + tup->uport.icount.parity++; + dev_dbg(tup->uport.dev, "Got Parity errors\n"); + } else if (lsr & UART_LSR_FE) { + flag = TTY_FRAME; + tup->uport.icount.frame++; + dev_dbg(tup->uport.dev, "Got frame errors\n"); + } else if (lsr & UART_LSR_BI) { + /* + * Break error + * If FIFO read error without any data, reset Rx FIFO + */ + if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE)) + tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR); + if (tup->uport.ignore_status_mask & UART_LSR_BI) + return TTY_BREAK; + flag = TTY_BREAK; + tup->uport.icount.brk++; + dev_dbg(tup->uport.dev, "Got Break\n"); + } + uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag); + } + + return flag; +} + +static int tegra_uart_request_port(struct uart_port *u) +{ + return 0; +} + +static void tegra_uart_release_port(struct uart_port *u) +{ + /* Nothing to do here */ +} + +static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes) +{ + struct circ_buf *xmit = &tup->uport.state->xmit; + int i; + + for (i = 0; i < max_bytes; i++) { + BUG_ON(uart_circ_empty(xmit)); + if (tup->cdata->tx_fifo_full_status) { + unsigned long lsr = tegra_uart_read(tup, UART_LSR); + if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL)) + break; + } + tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + tup->uport.icount.tx++; + } +} + +static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup, + unsigned int bytes) +{ + if (bytes > TEGRA_UART_MIN_DMA) + bytes = TEGRA_UART_MIN_DMA; + + tup->tx_in_progress = TEGRA_UART_TX_PIO; + tup->tx_bytes = bytes; + tup->ier_shadow |= UART_IER_THRI; + tegra_uart_write(tup, tup->ier_shadow, UART_IER); +} + +static void tegra_uart_tx_dma_complete(void *args) +{ + struct tegra_uart_port *tup = args; + struct circ_buf *xmit = &tup->uport.state->xmit; + struct dma_tx_state state; + unsigned long flags; + unsigned int count; + + dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); + count = tup->tx_bytes_requested - state.residue; + async_tx_ack(tup->tx_dma_desc); + spin_lock_irqsave(&tup->uport.lock, flags); + uart_xmit_advance(&tup->uport, count); + tup->tx_in_progress = 0; + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&tup->uport); + tegra_uart_start_next_tx(tup); + spin_unlock_irqrestore(&tup->uport.lock, flags); +} + +static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup, + unsigned long count) +{ + struct circ_buf *xmit = &tup->uport.state->xmit; + dma_addr_t tx_phys_addr; + + tup->tx_bytes = count & ~(0xF); + tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail; + + dma_sync_single_for_device(tup->uport.dev, tx_phys_addr, + tup->tx_bytes, DMA_TO_DEVICE); + + tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan, + tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT); + if (!tup->tx_dma_desc) { + dev_err(tup->uport.dev, "Not able to get desc for Tx\n"); + return -EIO; + } + + tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete; + tup->tx_dma_desc->callback_param = tup; + tup->tx_in_progress = TEGRA_UART_TX_DMA; + tup->tx_bytes_requested = tup->tx_bytes; + tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc); + dma_async_issue_pending(tup->tx_dma_chan); + return 0; +} + +static void tegra_uart_start_next_tx(struct tegra_uart_port *tup) +{ + unsigned long tail; + unsigned long count; + struct circ_buf *xmit = &tup->uport.state->xmit; + + if (!tup->current_baud) + return; + + tail = (unsigned long)&xmit->buf[xmit->tail]; + count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + if (!count) + return; + + if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA) + tegra_uart_start_pio_tx(tup, count); + else if (BYTES_TO_ALIGN(tail) > 0) + tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail)); + else + tegra_uart_start_tx_dma(tup, count); +} + +/* Called by serial core driver with u->lock taken. */ +static void tegra_uart_start_tx(struct uart_port *u) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + struct circ_buf *xmit = &u->state->xmit; + + if (!uart_circ_empty(xmit) && !tup->tx_in_progress) + tegra_uart_start_next_tx(tup); +} + +static unsigned int tegra_uart_tx_empty(struct uart_port *u) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + unsigned int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&u->lock, flags); + if (!tup->tx_in_progress) { + unsigned long lsr = tegra_uart_read(tup, UART_LSR); + if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS) + ret = TIOCSER_TEMT; + } + spin_unlock_irqrestore(&u->lock, flags); + return ret; +} + +static void tegra_uart_stop_tx(struct uart_port *u) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + struct dma_tx_state state; + unsigned int count; + + if (tup->tx_in_progress != TEGRA_UART_TX_DMA) + return; + + dmaengine_pause(tup->tx_dma_chan); + dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); + dmaengine_terminate_all(tup->tx_dma_chan); + count = tup->tx_bytes_requested - state.residue; + async_tx_ack(tup->tx_dma_desc); + uart_xmit_advance(&tup->uport, count); + tup->tx_in_progress = 0; +} + +static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup) +{ + struct circ_buf *xmit = &tup->uport.state->xmit; + + tegra_uart_fill_tx_fifo(tup, tup->tx_bytes); + tup->tx_in_progress = 0; + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&tup->uport); + tegra_uart_start_next_tx(tup); +} + +static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, + struct tty_port *port) +{ + do { + char flag = TTY_NORMAL; + unsigned long lsr = 0; + unsigned char ch; + + lsr = tegra_uart_read(tup, UART_LSR); + if (!(lsr & UART_LSR_DR)) + break; + + flag = tegra_uart_decode_rx_error(tup, lsr); + if (flag != TTY_NORMAL) + continue; + + ch = (unsigned char) tegra_uart_read(tup, UART_RX); + tup->uport.icount.rx++; + + if (uart_handle_sysrq_char(&tup->uport, ch)) + continue; + + if (tup->uport.ignore_status_mask & UART_LSR_DR) + continue; + + tty_insert_flip_char(port, ch, flag); + } while (1); +} + +static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, + struct tty_port *port, + unsigned int count) +{ + int copied; + + /* If count is zero, then there is no data to be copied */ + if (!count) + return; + + tup->uport.icount.rx += count; + + if (tup->uport.ignore_status_mask & UART_LSR_DR) + return; + + dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys, + count, DMA_FROM_DEVICE); + copied = tty_insert_flip_string(port, + ((unsigned char *)(tup->rx_dma_buf_virt)), count); + if (copied != count) { + WARN_ON(1); + dev_err(tup->uport.dev, "RxData copy to tty layer failed\n"); + } + dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys, + count, DMA_TO_DEVICE); +} + +static void do_handle_rx_pio(struct tegra_uart_port *tup) +{ + struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); + struct tty_port *port = &tup->uport.state->port; + + tegra_uart_handle_rx_pio(tup, port); + if (tty) { + tty_flip_buffer_push(port); + tty_kref_put(tty); + } +} + +static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup, + unsigned int residue) +{ + struct tty_port *port = &tup->uport.state->port; + unsigned int count; + + async_tx_ack(tup->rx_dma_desc); + count = tup->rx_bytes_requested - residue; + + /* If we are here, DMA is stopped */ + tegra_uart_copy_rx_to_tty(tup, port, count); + + do_handle_rx_pio(tup); +} + +static void tegra_uart_rx_dma_complete(void *args) +{ + struct tegra_uart_port *tup = args; + struct uart_port *u = &tup->uport; + unsigned long flags; + struct dma_tx_state state; + enum dma_status status; + + spin_lock_irqsave(&u->lock, flags); + + status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state); + + if (status == DMA_IN_PROGRESS) { + dev_dbg(tup->uport.dev, "RX DMA is in progress\n"); + goto done; + } + + /* Deactivate flow control to stop sender */ + if (tup->rts_active) + set_rts(tup, false); + + tup->rx_dma_active = false; + tegra_uart_rx_buffer_push(tup, 0); + tegra_uart_start_rx_dma(tup); + + /* Activate flow control to start transfer */ + if (tup->rts_active) + set_rts(tup, true); + +done: + spin_unlock_irqrestore(&u->lock, flags); +} + +static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup) +{ + struct dma_tx_state state; + + if (!tup->rx_dma_active) { + do_handle_rx_pio(tup); + return; + } + + dmaengine_pause(tup->rx_dma_chan); + dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state); + dmaengine_terminate_all(tup->rx_dma_chan); + + tegra_uart_rx_buffer_push(tup, state.residue); + tup->rx_dma_active = false; +} + +static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup) +{ + /* Deactivate flow control to stop sender */ + if (tup->rts_active) + set_rts(tup, false); + + tegra_uart_terminate_rx_dma(tup); + + if (tup->rts_active) + set_rts(tup, true); +} + +static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup) +{ + unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE; + + if (tup->rx_dma_active) + return 0; + + tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan, + tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!tup->rx_dma_desc) { + dev_err(tup->uport.dev, "Not able to get desc for Rx\n"); + return -EIO; + } + + tup->rx_dma_active = true; + tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete; + tup->rx_dma_desc->callback_param = tup; + tup->rx_bytes_requested = count; + tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc); + dma_async_issue_pending(tup->rx_dma_chan); + return 0; +} + +static void tegra_uart_handle_modem_signal_change(struct uart_port *u) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + unsigned long msr; + + msr = tegra_uart_read(tup, UART_MSR); + if (!(msr & UART_MSR_ANY_DELTA)) + return; + + if (msr & UART_MSR_TERI) + tup->uport.icount.rng++; + if (msr & UART_MSR_DDSR) + tup->uport.icount.dsr++; + /* We may only get DDCD when HW init and reset */ + if (msr & UART_MSR_DDCD) + uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD); + /* Will start/stop_tx accordingly */ + if (msr & UART_MSR_DCTS) + uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS); +} + +static irqreturn_t tegra_uart_isr(int irq, void *data) +{ + struct tegra_uart_port *tup = data; + struct uart_port *u = &tup->uport; + unsigned long iir; + unsigned long ier; + bool is_rx_start = false; + bool is_rx_int = false; + unsigned long flags; + + spin_lock_irqsave(&u->lock, flags); + while (1) { + iir = tegra_uart_read(tup, UART_IIR); + if (iir & UART_IIR_NO_INT) { + if (!tup->use_rx_pio && is_rx_int) { + tegra_uart_handle_rx_dma(tup); + if (tup->rx_in_progress) { + ier = tup->ier_shadow; + ier |= (UART_IER_RLSI | UART_IER_RTOIE | + TEGRA_UART_IER_EORD | UART_IER_RDI); + tup->ier_shadow = ier; + tegra_uart_write(tup, ier, UART_IER); + } + } else if (is_rx_start) { + tegra_uart_start_rx_dma(tup); + } + spin_unlock_irqrestore(&u->lock, flags); + return IRQ_HANDLED; + } + + switch ((iir >> 1) & 0x7) { + case 0: /* Modem signal change interrupt */ + tegra_uart_handle_modem_signal_change(u); + break; + + case 1: /* Transmit interrupt only triggered when using PIO */ + tup->ier_shadow &= ~UART_IER_THRI; + tegra_uart_write(tup, tup->ier_shadow, UART_IER); + tegra_uart_handle_tx_pio(tup); + break; + + case 4: /* End of data */ + case 6: /* Rx timeout */ + if (!tup->use_rx_pio) { + is_rx_int = tup->rx_in_progress; + /* Disable Rx interrupts */ + ier = tup->ier_shadow; + ier &= ~(UART_IER_RDI | UART_IER_RLSI | + UART_IER_RTOIE | TEGRA_UART_IER_EORD); + tup->ier_shadow = ier; + tegra_uart_write(tup, ier, UART_IER); + break; + } + fallthrough; + case 2: /* Receive */ + if (!tup->use_rx_pio) { + is_rx_start = tup->rx_in_progress; + tup->ier_shadow &= ~UART_IER_RDI; + tegra_uart_write(tup, tup->ier_shadow, + UART_IER); + } else { + do_handle_rx_pio(tup); + } + break; + + case 3: /* Receive error */ + tegra_uart_decode_rx_error(tup, + tegra_uart_read(tup, UART_LSR)); + break; + + case 5: /* break nothing to handle */ + case 7: /* break nothing to handle */ + break; + } + } +} + +static void tegra_uart_stop_rx(struct uart_port *u) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + struct tty_port *port = &tup->uport.state->port; + unsigned long ier; + + if (tup->rts_active) + set_rts(tup, false); + + if (!tup->rx_in_progress) + return; + + tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */ + + ier = tup->ier_shadow; + ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | + TEGRA_UART_IER_EORD); + tup->ier_shadow = ier; + tegra_uart_write(tup, ier, UART_IER); + tup->rx_in_progress = 0; + + if (!tup->use_rx_pio) + tegra_uart_terminate_rx_dma(tup); + else + tegra_uart_handle_rx_pio(tup, port); +} + +static void tegra_uart_hw_deinit(struct tegra_uart_port *tup) +{ + unsigned long flags; + unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud); + unsigned long fifo_empty_time = tup->uport.fifosize * char_time; + unsigned long wait_time; + unsigned long lsr; + unsigned long msr; + unsigned long mcr; + + /* Disable interrupts */ + tegra_uart_write(tup, 0, UART_IER); + + lsr = tegra_uart_read(tup, UART_LSR); + if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) { + msr = tegra_uart_read(tup, UART_MSR); + mcr = tegra_uart_read(tup, UART_MCR); + if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS)) + dev_err(tup->uport.dev, + "Tx Fifo not empty, CTS disabled, waiting\n"); + + /* Wait for Tx fifo to be empty */ + while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) { + wait_time = min(fifo_empty_time, 100lu); + udelay(wait_time); + fifo_empty_time -= wait_time; + if (!fifo_empty_time) { + msr = tegra_uart_read(tup, UART_MSR); + mcr = tegra_uart_read(tup, UART_MCR); + if ((mcr & TEGRA_UART_MCR_CTS_EN) && + (msr & UART_MSR_CTS)) + dev_err(tup->uport.dev, + "Slave not ready\n"); + break; + } + lsr = tegra_uart_read(tup, UART_LSR); + } + } + + spin_lock_irqsave(&tup->uport.lock, flags); + /* Reset the Rx and Tx FIFOs */ + tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR); + tup->current_baud = 0; + spin_unlock_irqrestore(&tup->uport.lock, flags); + + tup->rx_in_progress = 0; + tup->tx_in_progress = 0; + + if (!tup->use_rx_pio) + tegra_uart_dma_channel_free(tup, true); + if (!tup->use_tx_pio) + tegra_uart_dma_channel_free(tup, false); + + clk_disable_unprepare(tup->uart_clk); +} + +static int tegra_uart_hw_init(struct tegra_uart_port *tup) +{ + int ret; + + tup->fcr_shadow = 0; + tup->mcr_shadow = 0; + tup->lcr_shadow = 0; + tup->ier_shadow = 0; + tup->current_baud = 0; + + ret = clk_prepare_enable(tup->uart_clk); + if (ret) { + dev_err(tup->uport.dev, "could not enable clk\n"); + return ret; + } + + /* Reset the UART controller to clear all previous status.*/ + reset_control_assert(tup->rst); + udelay(10); + reset_control_deassert(tup->rst); + + tup->rx_in_progress = 0; + tup->tx_in_progress = 0; + + /* + * Set the trigger level + * + * For PIO mode: + * + * For receive, this will interrupt the CPU after that many number of + * bytes are received, for the remaining bytes the receive timeout + * interrupt is received. Rx high watermark is set to 4. + * + * For transmit, if the trasnmit interrupt is enabled, this will + * interrupt the CPU when the number of entries in the FIFO reaches the + * low watermark. Tx low watermark is set to 16 bytes. + * + * For DMA mode: + * + * Set the Tx trigger to 16. This should match the DMA burst size that + * programmed in the DMA registers. + */ + tup->fcr_shadow = UART_FCR_ENABLE_FIFO; + + if (tup->use_rx_pio) { + tup->fcr_shadow |= UART_FCR_R_TRIG_11; + } else { + if (tup->cdata->max_dma_burst_bytes == 8) + tup->fcr_shadow |= UART_FCR_R_TRIG_10; + else + tup->fcr_shadow |= UART_FCR_R_TRIG_01; + } + + tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B; + tegra_uart_write(tup, tup->fcr_shadow, UART_FCR); + + /* Dummy read to ensure the write is posted */ + tegra_uart_read(tup, UART_SCR); + + if (tup->cdata->fifo_mode_enable_status) { + ret = tegra_uart_wait_fifo_mode_enabled(tup); + if (ret < 0) { + clk_disable_unprepare(tup->uart_clk); + dev_err(tup->uport.dev, + "Failed to enable FIFO mode: %d\n", ret); + return ret; + } + } else { + /* + * For all tegra devices (up to t210), there is a hardware + * issue that requires software to wait for 3 UART clock + * periods after enabling the TX fifo, otherwise data could + * be lost. + */ + tegra_uart_wait_cycle_time(tup, 3); + } + + /* + * Initialize the UART with default configuration + * (115200, N, 8, 1) so that the receive DMA buffer may be + * enqueued + */ + ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD); + if (ret < 0) { + clk_disable_unprepare(tup->uart_clk); + dev_err(tup->uport.dev, "Failed to set baud rate\n"); + return ret; + } + if (!tup->use_rx_pio) { + tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR; + tup->fcr_shadow |= UART_FCR_DMA_SELECT; + tegra_uart_write(tup, tup->fcr_shadow, UART_FCR); + } else { + tegra_uart_write(tup, tup->fcr_shadow, UART_FCR); + } + tup->rx_in_progress = 1; + + /* + * Enable IE_RXS for the receive status interrupts like line errors. + * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd. + * + * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when + * the DATA is sitting in the FIFO and couldn't be transferred to the + * DMA as the DMA size alignment (4 bytes) is not met. EORD will be + * triggered when there is a pause of the incomming data stream for 4 + * characters long. + * + * For pauses in the data which is not aligned to 4 bytes, we get + * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first + * then the EORD. + */ + tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI; + + /* + * If using DMA mode, enable EORD interrupt to notify about RX + * completion. + */ + if (!tup->use_rx_pio) + tup->ier_shadow |= TEGRA_UART_IER_EORD; + + tegra_uart_write(tup, tup->ier_shadow, UART_IER); + return 0; +} + +static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup, + bool dma_to_memory) +{ + if (dma_to_memory) { + dmaengine_terminate_all(tup->rx_dma_chan); + dma_release_channel(tup->rx_dma_chan); + dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE, + tup->rx_dma_buf_virt, tup->rx_dma_buf_phys); + tup->rx_dma_chan = NULL; + tup->rx_dma_buf_phys = 0; + tup->rx_dma_buf_virt = NULL; + } else { + dmaengine_terminate_all(tup->tx_dma_chan); + dma_release_channel(tup->tx_dma_chan); + dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys, + UART_XMIT_SIZE, DMA_TO_DEVICE); + tup->tx_dma_chan = NULL; + tup->tx_dma_buf_phys = 0; + tup->tx_dma_buf_virt = NULL; + } +} + +static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup, + bool dma_to_memory) +{ + struct dma_chan *dma_chan; + unsigned char *dma_buf; + dma_addr_t dma_phys; + int ret; + struct dma_slave_config dma_sconfig; + + dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx"); + if (IS_ERR(dma_chan)) { + ret = PTR_ERR(dma_chan); + dev_err(tup->uport.dev, + "DMA channel alloc failed: %d\n", ret); + return ret; + } + + if (dma_to_memory) { + dma_buf = dma_alloc_coherent(tup->uport.dev, + TEGRA_UART_RX_DMA_BUFFER_SIZE, + &dma_phys, GFP_KERNEL); + if (!dma_buf) { + dev_err(tup->uport.dev, + "Not able to allocate the dma buffer\n"); + dma_release_channel(dma_chan); + return -ENOMEM; + } + dma_sync_single_for_device(tup->uport.dev, dma_phys, + TEGRA_UART_RX_DMA_BUFFER_SIZE, + DMA_TO_DEVICE); + dma_sconfig.src_addr = tup->uport.mapbase; + dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes; + tup->rx_dma_chan = dma_chan; + tup->rx_dma_buf_virt = dma_buf; + tup->rx_dma_buf_phys = dma_phys; + } else { + dma_phys = dma_map_single(tup->uport.dev, + tup->uport.state->xmit.buf, UART_XMIT_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(tup->uport.dev, dma_phys)) { + dev_err(tup->uport.dev, "dma_map_single tx failed\n"); + dma_release_channel(dma_chan); + return -ENOMEM; + } + dma_buf = tup->uport.state->xmit.buf; + dma_sconfig.dst_addr = tup->uport.mapbase; + dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_sconfig.dst_maxburst = 16; + tup->tx_dma_chan = dma_chan; + tup->tx_dma_buf_virt = dma_buf; + tup->tx_dma_buf_phys = dma_phys; + } + + ret = dmaengine_slave_config(dma_chan, &dma_sconfig); + if (ret < 0) { + dev_err(tup->uport.dev, + "Dma slave config failed, err = %d\n", ret); + tegra_uart_dma_channel_free(tup, dma_to_memory); + return ret; + } + + return 0; +} + +static int tegra_uart_startup(struct uart_port *u) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + int ret; + + if (!tup->use_tx_pio) { + ret = tegra_uart_dma_channel_allocate(tup, false); + if (ret < 0) { + dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", + ret); + return ret; + } + } + + if (!tup->use_rx_pio) { + ret = tegra_uart_dma_channel_allocate(tup, true); + if (ret < 0) { + dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", + ret); + goto fail_rx_dma; + } + } + + ret = tegra_uart_hw_init(tup); + if (ret < 0) { + dev_err(u->dev, "Uart HW init failed, err = %d\n", ret); + goto fail_hw_init; + } + + ret = request_irq(u->irq, tegra_uart_isr, 0, + dev_name(u->dev), tup); + if (ret < 0) { + dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq); + goto fail_request_irq; + } + return 0; + +fail_request_irq: + /* tup->uart_clk is already enabled in tegra_uart_hw_init */ + clk_disable_unprepare(tup->uart_clk); +fail_hw_init: + if (!tup->use_rx_pio) + tegra_uart_dma_channel_free(tup, true); +fail_rx_dma: + if (!tup->use_tx_pio) + tegra_uart_dma_channel_free(tup, false); + return ret; +} + +/* + * Flush any TX data submitted for DMA and PIO. Called when the + * TX circular buffer is reset. + */ +static void tegra_uart_flush_buffer(struct uart_port *u) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + + tup->tx_bytes = 0; + if (tup->tx_dma_chan) + dmaengine_terminate_all(tup->tx_dma_chan); +} + +static void tegra_uart_shutdown(struct uart_port *u) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + + tegra_uart_hw_deinit(tup); + free_irq(u->irq, tup); +} + +static void tegra_uart_enable_ms(struct uart_port *u) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + + if (tup->enable_modem_interrupt) { + tup->ier_shadow |= UART_IER_MSI; + tegra_uart_write(tup, tup->ier_shadow, UART_IER); + } +} + +static void tegra_uart_set_termios(struct uart_port *u, + struct ktermios *termios, + const struct ktermios *oldtermios) +{ + struct tegra_uart_port *tup = to_tegra_uport(u); + unsigned int baud; + unsigned long flags; + unsigned int lcr; + unsigned char char_bits; + struct clk *parent_clk = clk_get_parent(tup->uart_clk); + unsigned long parent_clk_rate = clk_get_rate(parent_clk); + int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF; + int ret; + + max_divider *= 16; + spin_lock_irqsave(&u->lock, flags); + + /* Changing configuration, it is safe to stop any rx now */ + if (tup->rts_active) + set_rts(tup, false); + + /* Clear all interrupts as configuration is going to be changed */ + tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER); + tegra_uart_read(tup, UART_IER); + tegra_uart_write(tup, 0, UART_IER); + tegra_uart_read(tup, UART_IER); + + /* Parity */ + lcr = tup->lcr_shadow; + lcr &= ~UART_LCR_PARITY; + + /* CMSPAR isn't supported by this driver */ + termios->c_cflag &= ~CMSPAR; + + if ((termios->c_cflag & PARENB) == PARENB) { + if (termios->c_cflag & PARODD) { + lcr |= UART_LCR_PARITY; + lcr &= ~UART_LCR_EPAR; + lcr &= ~UART_LCR_SPAR; + } else { + lcr |= UART_LCR_PARITY; + lcr |= UART_LCR_EPAR; + lcr &= ~UART_LCR_SPAR; + } + } + + char_bits = tty_get_char_size(termios->c_cflag); + lcr &= ~UART_LCR_WLEN8; + lcr |= UART_LCR_WLEN(char_bits); + + /* Stop bits */ + if (termios->c_cflag & CSTOPB) + lcr |= UART_LCR_STOP; + else + lcr &= ~UART_LCR_STOP; + + tegra_uart_write(tup, lcr, UART_LCR); + tup->lcr_shadow = lcr; + tup->symb_bit = tty_get_frame_size(termios->c_cflag); + + /* Baud rate. */ + baud = uart_get_baud_rate(u, termios, oldtermios, + parent_clk_rate/max_divider, + parent_clk_rate/16); + spin_unlock_irqrestore(&u->lock, flags); + ret = tegra_set_baudrate(tup, baud); + if (ret < 0) { + dev_err(tup->uport.dev, "Failed to set baud rate\n"); + return; + } + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); + spin_lock_irqsave(&u->lock, flags); + + /* Flow control */ + if (termios->c_cflag & CRTSCTS) { + tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN; + tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN; + tegra_uart_write(tup, tup->mcr_shadow, UART_MCR); + /* if top layer has asked to set rts active then do so here */ + if (tup->rts_active) + set_rts(tup, true); + } else { + tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN; + tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN; + tegra_uart_write(tup, tup->mcr_shadow, UART_MCR); + } + + /* update the port timeout based on new settings */ + uart_update_timeout(u, termios->c_cflag, baud); + + /* Make sure all writes have completed */ + tegra_uart_read(tup, UART_IER); + + /* Re-enable interrupt */ + tegra_uart_write(tup, tup->ier_shadow, UART_IER); + tegra_uart_read(tup, UART_IER); + + tup->uport.ignore_status_mask = 0; + /* Ignore all characters if CREAD is not set */ + if ((termios->c_cflag & CREAD) == 0) + tup->uport.ignore_status_mask |= UART_LSR_DR; + if (termios->c_iflag & IGNBRK) + tup->uport.ignore_status_mask |= UART_LSR_BI; + + spin_unlock_irqrestore(&u->lock, flags); +} + +static const char *tegra_uart_type(struct uart_port *u) +{ + return TEGRA_UART_TYPE; +} + +static const struct uart_ops tegra_uart_ops = { + .tx_empty = tegra_uart_tx_empty, + .set_mctrl = tegra_uart_set_mctrl, + .get_mctrl = tegra_uart_get_mctrl, + .stop_tx = tegra_uart_stop_tx, + .start_tx = tegra_uart_start_tx, + .stop_rx = tegra_uart_stop_rx, + .flush_buffer = tegra_uart_flush_buffer, + .enable_ms = tegra_uart_enable_ms, + .break_ctl = tegra_uart_break_ctl, + .startup = tegra_uart_startup, + .shutdown = tegra_uart_shutdown, + .set_termios = tegra_uart_set_termios, + .type = tegra_uart_type, + .request_port = tegra_uart_request_port, + .release_port = tegra_uart_release_port, +}; + +static struct uart_driver tegra_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "tegra_hsuart", + .dev_name = "ttyTHS", + .cons = NULL, + .nr = TEGRA_UART_MAXIMUM, +}; + +static int tegra_uart_parse_dt(struct platform_device *pdev, + struct tegra_uart_port *tup) +{ + struct device_node *np = pdev->dev.of_node; + int port; + int ret; + int index; + u32 pval; + int count; + int n_entries; + + port = of_alias_get_id(np, "serial"); + if (port < 0) { + dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port); + return port; + } + tup->uport.line = port; + + tup->enable_modem_interrupt = of_property_read_bool(np, + "nvidia,enable-modem-interrupt"); + + index = of_property_match_string(np, "dma-names", "rx"); + if (index < 0) { + tup->use_rx_pio = true; + dev_info(&pdev->dev, "RX in PIO mode\n"); + } + index = of_property_match_string(np, "dma-names", "tx"); + if (index < 0) { + tup->use_tx_pio = true; + dev_info(&pdev->dev, "TX in PIO mode\n"); + } + + n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates"); + if (n_entries > 0) { + tup->n_adjustable_baud_rates = n_entries / 3; + tup->baud_tolerance = + devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) * + sizeof(*tup->baud_tolerance), GFP_KERNEL); + if (!tup->baud_tolerance) + return -ENOMEM; + for (count = 0, index = 0; count < n_entries; count += 3, + index++) { + ret = + of_property_read_u32_index(np, + "nvidia,adjust-baud-rates", + count, &pval); + if (!ret) + tup->baud_tolerance[index].lower_range_baud = + pval; + ret = + of_property_read_u32_index(np, + "nvidia,adjust-baud-rates", + count + 1, &pval); + if (!ret) + tup->baud_tolerance[index].upper_range_baud = + pval; + ret = + of_property_read_u32_index(np, + "nvidia,adjust-baud-rates", + count + 2, &pval); + if (!ret) + tup->baud_tolerance[index].tolerance = + (s32)pval; + } + } else { + tup->n_adjustable_baud_rates = 0; + } + + return 0; +} + +static struct tegra_uart_chip_data tegra20_uart_chip_data = { + .tx_fifo_full_status = false, + .allow_txfifo_reset_fifo_mode = true, + .support_clk_src_div = false, + .fifo_mode_enable_status = false, + .uart_max_port = 5, + .max_dma_burst_bytes = 4, + .error_tolerance_low_range = -4, + .error_tolerance_high_range = 4, +}; + +static struct tegra_uart_chip_data tegra30_uart_chip_data = { + .tx_fifo_full_status = true, + .allow_txfifo_reset_fifo_mode = false, + .support_clk_src_div = true, + .fifo_mode_enable_status = false, + .uart_max_port = 5, + .max_dma_burst_bytes = 4, + .error_tolerance_low_range = -4, + .error_tolerance_high_range = 4, +}; + +static struct tegra_uart_chip_data tegra186_uart_chip_data = { + .tx_fifo_full_status = true, + .allow_txfifo_reset_fifo_mode = false, + .support_clk_src_div = true, + .fifo_mode_enable_status = true, + .uart_max_port = 8, + .max_dma_burst_bytes = 8, + .error_tolerance_low_range = 0, + .error_tolerance_high_range = 4, +}; + +static struct tegra_uart_chip_data tegra194_uart_chip_data = { + .tx_fifo_full_status = true, + .allow_txfifo_reset_fifo_mode = false, + .support_clk_src_div = true, + .fifo_mode_enable_status = true, + .uart_max_port = 8, + .max_dma_burst_bytes = 8, + .error_tolerance_low_range = -2, + .error_tolerance_high_range = 2, +}; + +static const struct of_device_id tegra_uart_of_match[] = { + { + .compatible = "nvidia,tegra30-hsuart", + .data = &tegra30_uart_chip_data, + }, { + .compatible = "nvidia,tegra20-hsuart", + .data = &tegra20_uart_chip_data, + }, { + .compatible = "nvidia,tegra186-hsuart", + .data = &tegra186_uart_chip_data, + }, { + .compatible = "nvidia,tegra194-hsuart", + .data = &tegra194_uart_chip_data, + }, { + }, +}; +MODULE_DEVICE_TABLE(of, tegra_uart_of_match); + +static int tegra_uart_probe(struct platform_device *pdev) +{ + struct tegra_uart_port *tup; + struct uart_port *u; + struct resource *resource; + int ret; + const struct tegra_uart_chip_data *cdata; + + cdata = of_device_get_match_data(&pdev->dev); + if (!cdata) { + dev_err(&pdev->dev, "Error: No device match found\n"); + return -ENODEV; + } + + tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL); + if (!tup) { + dev_err(&pdev->dev, "Failed to allocate memory for tup\n"); + return -ENOMEM; + } + + ret = tegra_uart_parse_dt(pdev, tup); + if (ret < 0) + return ret; + + u = &tup->uport; + u->dev = &pdev->dev; + u->ops = &tegra_uart_ops; + u->type = PORT_TEGRA; + u->fifosize = 32; + tup->cdata = cdata; + + platform_set_drvdata(pdev, tup); + resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!resource) { + dev_err(&pdev->dev, "No IO memory resource\n"); + return -ENODEV; + } + + u->mapbase = resource->start; + u->membase = devm_ioremap_resource(&pdev->dev, resource); + if (IS_ERR(u->membase)) + return PTR_ERR(u->membase); + + tup->uart_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(tup->uart_clk)) { + dev_err(&pdev->dev, "Couldn't get the clock\n"); + return PTR_ERR(tup->uart_clk); + } + + tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial"); + if (IS_ERR(tup->rst)) { + dev_err(&pdev->dev, "Couldn't get the reset\n"); + return PTR_ERR(tup->rst); + } + + u->iotype = UPIO_MEM32; + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + u->irq = ret; + u->regshift = 2; + ret = uart_add_one_port(&tegra_uart_driver, u); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret); + return ret; + } + return ret; +} + +static int tegra_uart_remove(struct platform_device *pdev) +{ + struct tegra_uart_port *tup = platform_get_drvdata(pdev); + struct uart_port *u = &tup->uport; + + uart_remove_one_port(&tegra_uart_driver, u); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int tegra_uart_suspend(struct device *dev) +{ + struct tegra_uart_port *tup = dev_get_drvdata(dev); + struct uart_port *u = &tup->uport; + + return uart_suspend_port(&tegra_uart_driver, u); +} + +static int tegra_uart_resume(struct device *dev) +{ + struct tegra_uart_port *tup = dev_get_drvdata(dev); + struct uart_port *u = &tup->uport; + + return uart_resume_port(&tegra_uart_driver, u); +} +#endif + +static const struct dev_pm_ops tegra_uart_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume) +}; + +static struct platform_driver tegra_uart_platform_driver = { + .probe = tegra_uart_probe, + .remove = tegra_uart_remove, + .driver = { + .name = "serial-tegra", + .of_match_table = tegra_uart_of_match, + .pm = &tegra_uart_pm_ops, + }, +}; + +static int __init tegra_uart_init(void) +{ + int ret; + struct device_node *node; + const struct of_device_id *match = NULL; + const struct tegra_uart_chip_data *cdata = NULL; + + node = of_find_matching_node(NULL, tegra_uart_of_match); + if (node) + match = of_match_node(tegra_uart_of_match, node); + of_node_put(node); + if (match) + cdata = match->data; + if (cdata) + tegra_uart_driver.nr = cdata->uart_max_port; + + ret = uart_register_driver(&tegra_uart_driver); + if (ret < 0) { + pr_err("Could not register %s driver\n", + tegra_uart_driver.driver_name); + return ret; + } + + ret = platform_driver_register(&tegra_uart_platform_driver); + if (ret < 0) { + pr_err("Uart platform driver register failed, e = %d\n", ret); + uart_unregister_driver(&tegra_uart_driver); + return ret; + } + return 0; +} + +static void __exit tegra_uart_exit(void) +{ + pr_info("Unloading tegra uart driver\n"); + platform_driver_unregister(&tegra_uart_platform_driver); + uart_unregister_driver(&tegra_uart_driver); +} + +module_init(tegra_uart_init); +module_exit(tegra_uart_exit); + +MODULE_ALIAS("platform:serial-tegra"); +MODULE_DESCRIPTION("High speed UART driver for tegra chipset"); +MODULE_AUTHOR("Laxman Dewangan "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c new file mode 100644 index 000000000..f0ed30d0a --- /dev/null +++ b/drivers/tty/serial/serial_core.c @@ -0,0 +1,3494 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver core for serial ports + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * Copyright 1999 ARM Limited + * Copyright (C) 2000-2001 Deep Blue Solutions Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for serial_state and serial_icounter_struct */ +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * This is used to lock changes in serial line configuration. + */ +static DEFINE_MUTEX(port_mutex); + +/* + * lockdep: port->lock is initialized in two places, but we + * want only one lock-class: + */ +static struct lock_class_key port_lock_key; + +#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) + +/* + * Max time with active RTS before/after data is sent. + */ +#define RS485_MAX_RTS_DELAY 100 /* msecs */ + +static void uart_wait_until_sent(struct tty_struct *tty, int timeout); +static void uart_change_pm(struct uart_state *state, + enum uart_pm_state pm_state); + +static void uart_port_shutdown(struct tty_port *port); + +static int uart_dcd_enabled(struct uart_port *uport) +{ + return !!(uport->status & UPSTAT_DCD_ENABLE); +} + +static inline struct uart_port *uart_port_ref(struct uart_state *state) +{ + if (atomic_add_unless(&state->refcount, 1, 0)) + return state->uart_port; + return NULL; +} + +static inline void uart_port_deref(struct uart_port *uport) +{ + if (atomic_dec_and_test(&uport->state->refcount)) + wake_up(&uport->state->remove_wait); +} + +#define uart_port_lock(state, flags) \ + ({ \ + struct uart_port *__uport = uart_port_ref(state); \ + if (__uport) \ + spin_lock_irqsave(&__uport->lock, flags); \ + __uport; \ + }) + +#define uart_port_unlock(uport, flags) \ + ({ \ + struct uart_port *__uport = uport; \ + if (__uport) { \ + spin_unlock_irqrestore(&__uport->lock, flags); \ + uart_port_deref(__uport); \ + } \ + }) + +static inline struct uart_port *uart_port_check(struct uart_state *state) +{ + lockdep_assert_held(&state->port.mutex); + return state->uart_port; +} + +/** + * uart_write_wakeup - schedule write processing + * @port: port to be processed + * + * This routine is used by the interrupt handler to schedule processing in the + * software interrupt portion of the driver. A driver is expected to call this + * function when the number of characters in the transmit buffer have dropped + * below a threshold. + * + * Locking: @port->lock should be held + */ +void uart_write_wakeup(struct uart_port *port) +{ + struct uart_state *state = port->state; + /* + * This means you called this function _after_ the port was + * closed. No cookie for you. + */ + BUG_ON(!state); + tty_port_tty_wakeup(&state->port); +} +EXPORT_SYMBOL(uart_write_wakeup); + +static void uart_stop(struct tty_struct *tty) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *port; + unsigned long flags; + + port = uart_port_lock(state, flags); + if (port) + port->ops->stop_tx(port); + uart_port_unlock(port, flags); +} + +static void __uart_start(struct tty_struct *tty) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *port = state->uart_port; + + if (port && !uart_tx_stopped(port)) + port->ops->start_tx(port); +} + +static void uart_start(struct tty_struct *tty) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *port; + unsigned long flags; + + port = uart_port_lock(state, flags); + __uart_start(tty); + uart_port_unlock(port, flags); +} + +static void +uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) +{ + unsigned long flags; + unsigned int old; + + spin_lock_irqsave(&port->lock, flags); + old = port->mctrl; + port->mctrl = (old & ~clear) | set; + if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED)) + port->ops->set_mctrl(port, port->mctrl); + spin_unlock_irqrestore(&port->lock, flags); +} + +#define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0) +#define uart_clear_mctrl(port, clear) uart_update_mctrl(port, 0, clear) + +static void uart_port_dtr_rts(struct uart_port *uport, int raise) +{ + if (raise) + uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS); + else + uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS); +} + +/* Caller holds port mutex */ +static void uart_change_line_settings(struct tty_struct *tty, struct uart_state *state, + const struct ktermios *old_termios) +{ + struct uart_port *uport = uart_port_check(state); + struct ktermios *termios; + int hw_stopped; + + /* + * If we have no tty, termios, or the port does not exist, + * then we can't set the parameters for this port. + */ + if (!tty || uport->type == PORT_UNKNOWN) + return; + + termios = &tty->termios; + uport->ops->set_termios(uport, termios, old_termios); + + /* + * Set modem status enables based on termios cflag + */ + spin_lock_irq(&uport->lock); + if (termios->c_cflag & CRTSCTS) + uport->status |= UPSTAT_CTS_ENABLE; + else + uport->status &= ~UPSTAT_CTS_ENABLE; + + if (termios->c_cflag & CLOCAL) + uport->status &= ~UPSTAT_DCD_ENABLE; + else + uport->status |= UPSTAT_DCD_ENABLE; + + /* reset sw-assisted CTS flow control based on (possibly) new mode */ + hw_stopped = uport->hw_stopped; + uport->hw_stopped = uart_softcts_mode(uport) && + !(uport->ops->get_mctrl(uport) & TIOCM_CTS); + if (uport->hw_stopped) { + if (!hw_stopped) + uport->ops->stop_tx(uport); + } else { + if (hw_stopped) + __uart_start(tty); + } + spin_unlock_irq(&uport->lock); +} + +/* + * Startup the port. This will be called once per open. All calls + * will be serialised by the per-port mutex. + */ +static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, + int init_hw) +{ + struct uart_port *uport = uart_port_check(state); + unsigned long flags; + unsigned long page; + int retval = 0; + + if (uport->type == PORT_UNKNOWN) + return 1; + + /* + * Make sure the device is in D0 state. + */ + uart_change_pm(state, UART_PM_STATE_ON); + + /* + * Initialise and allocate the transmit and temporary + * buffer. + */ + page = get_zeroed_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + uart_port_lock(state, flags); + if (!state->xmit.buf) { + state->xmit.buf = (unsigned char *) page; + uart_circ_clear(&state->xmit); + uart_port_unlock(uport, flags); + } else { + uart_port_unlock(uport, flags); + /* + * Do not free() the page under the port lock, see + * uart_shutdown(). + */ + free_page(page); + } + + retval = uport->ops->startup(uport); + if (retval == 0) { + if (uart_console(uport) && uport->cons->cflag) { + tty->termios.c_cflag = uport->cons->cflag; + tty->termios.c_ispeed = uport->cons->ispeed; + tty->termios.c_ospeed = uport->cons->ospeed; + uport->cons->cflag = 0; + uport->cons->ispeed = 0; + uport->cons->ospeed = 0; + } + /* + * Initialise the hardware port settings. + */ + uart_change_line_settings(tty, state, NULL); + + /* + * Setup the RTS and DTR signals once the + * port is open and ready to respond. + */ + if (init_hw && C_BAUD(tty)) + uart_port_dtr_rts(uport, 1); + } + + /* + * This is to allow setserial on this port. People may want to set + * port/irq/type and then reconfigure the port properly if it failed + * now. + */ + if (retval && capable(CAP_SYS_ADMIN)) + return 1; + + return retval; +} + +static int uart_startup(struct tty_struct *tty, struct uart_state *state, + int init_hw) +{ + struct tty_port *port = &state->port; + int retval; + + if (tty_port_initialized(port)) + return 0; + + retval = uart_port_startup(tty, state, init_hw); + if (retval) + set_bit(TTY_IO_ERROR, &tty->flags); + + return retval; +} + +/* + * This routine will shutdown a serial port; interrupts are disabled, and + * DTR is dropped if the hangup on close termio flag is on. Calls to + * uart_shutdown are serialised by the per-port semaphore. + * + * uport == NULL if uart_port has already been removed + */ +static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) +{ + struct uart_port *uport = uart_port_check(state); + struct tty_port *port = &state->port; + unsigned long flags; + char *xmit_buf = NULL; + + /* + * Set the TTY IO error marker + */ + if (tty) + set_bit(TTY_IO_ERROR, &tty->flags); + + if (tty_port_initialized(port)) { + tty_port_set_initialized(port, 0); + + /* + * Turn off DTR and RTS early. + */ + if (uport && uart_console(uport) && tty) { + uport->cons->cflag = tty->termios.c_cflag; + uport->cons->ispeed = tty->termios.c_ispeed; + uport->cons->ospeed = tty->termios.c_ospeed; + } + + if (!tty || C_HUPCL(tty)) + uart_port_dtr_rts(uport, 0); + + uart_port_shutdown(port); + } + + /* + * It's possible for shutdown to be called after suspend if we get + * a DCD drop (hangup) at just the right time. Clear suspended bit so + * we don't try to resume a port that has been shutdown. + */ + tty_port_set_suspended(port, 0); + + /* + * Do not free() the transmit buffer page under the port lock since + * this can create various circular locking scenarios. For instance, + * console driver may need to allocate/free a debug object, which + * can endup in printk() recursion. + */ + uart_port_lock(state, flags); + xmit_buf = state->xmit.buf; + state->xmit.buf = NULL; + uart_port_unlock(uport, flags); + + free_page((unsigned long)xmit_buf); +} + +/** + * uart_update_timeout - update per-port frame timing information + * @port: uart_port structure describing the port + * @cflag: termios cflag value + * @baud: speed of the port + * + * Set the @port frame timing information from which the FIFO timeout value is + * derived. The @cflag value should reflect the actual hardware settings as + * number of bits, parity, stop bits and baud rate is taken into account here. + * + * Locking: caller is expected to take @port->lock + */ +void +uart_update_timeout(struct uart_port *port, unsigned int cflag, + unsigned int baud) +{ + unsigned int size = tty_get_frame_size(cflag); + u64 frame_time; + + frame_time = (u64)size * NSEC_PER_SEC; + port->frame_time = DIV64_U64_ROUND_UP(frame_time, baud); +} +EXPORT_SYMBOL(uart_update_timeout); + +/** + * uart_get_baud_rate - return baud rate for a particular port + * @port: uart_port structure describing the port in question. + * @termios: desired termios settings + * @old: old termios (or %NULL) + * @min: minimum acceptable baud rate + * @max: maximum acceptable baud rate + * + * Decode the termios structure into a numeric baud rate, taking account of the + * magic 38400 baud rate (with spd_* flags), and mapping the %B0 rate to 9600 + * baud. + * + * If the new baud rate is invalid, try the @old termios setting. If it's still + * invalid, we try 9600 baud. + * + * The @termios structure is updated to reflect the baud rate we're actually + * going to be using. Don't do this for the case where B0 is requested ("hang + * up"). + * + * Locking: caller dependent + */ +unsigned int +uart_get_baud_rate(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old, unsigned int min, unsigned int max) +{ + unsigned int try; + unsigned int baud; + unsigned int altbaud; + int hung_up = 0; + upf_t flags = port->flags & UPF_SPD_MASK; + + switch (flags) { + case UPF_SPD_HI: + altbaud = 57600; + break; + case UPF_SPD_VHI: + altbaud = 115200; + break; + case UPF_SPD_SHI: + altbaud = 230400; + break; + case UPF_SPD_WARP: + altbaud = 460800; + break; + default: + altbaud = 38400; + break; + } + + for (try = 0; try < 2; try++) { + baud = tty_termios_baud_rate(termios); + + /* + * The spd_hi, spd_vhi, spd_shi, spd_warp kludge... + * Die! Die! Die! + */ + if (try == 0 && baud == 38400) + baud = altbaud; + + /* + * Special case: B0 rate. + */ + if (baud == 0) { + hung_up = 1; + baud = 9600; + } + + if (baud >= min && baud <= max) + return baud; + + /* + * Oops, the quotient was zero. Try again with + * the old baud rate if possible. + */ + termios->c_cflag &= ~CBAUD; + if (old) { + baud = tty_termios_baud_rate(old); + if (!hung_up) + tty_termios_encode_baud_rate(termios, + baud, baud); + old = NULL; + continue; + } + + /* + * As a last resort, if the range cannot be met then clip to + * the nearest chip supported rate. + */ + if (!hung_up) { + if (baud <= min) + tty_termios_encode_baud_rate(termios, + min + 1, min + 1); + else + tty_termios_encode_baud_rate(termios, + max - 1, max - 1); + } + } + /* Should never happen */ + WARN_ON(1); + return 0; +} +EXPORT_SYMBOL(uart_get_baud_rate); + +/** + * uart_get_divisor - return uart clock divisor + * @port: uart_port structure describing the port + * @baud: desired baud rate + * + * Calculate the divisor (baud_base / baud) for the specified @baud, + * appropriately rounded. + * + * If 38400 baud and custom divisor is selected, return the custom divisor + * instead. + * + * Locking: caller dependent + */ +unsigned int +uart_get_divisor(struct uart_port *port, unsigned int baud) +{ + unsigned int quot; + + /* + * Old custom speed handling. + */ + if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) + quot = port->custom_divisor; + else + quot = DIV_ROUND_CLOSEST(port->uartclk, 16 * baud); + + return quot; +} +EXPORT_SYMBOL(uart_get_divisor); + +static int uart_put_char(struct tty_struct *tty, unsigned char c) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *port; + struct circ_buf *circ; + unsigned long flags; + int ret = 0; + + circ = &state->xmit; + port = uart_port_lock(state, flags); + if (!circ->buf) { + uart_port_unlock(port, flags); + return 0; + } + + if (port && uart_circ_chars_free(circ) != 0) { + circ->buf[circ->head] = c; + circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); + ret = 1; + } + uart_port_unlock(port, flags); + return ret; +} + +static void uart_flush_chars(struct tty_struct *tty) +{ + uart_start(tty); +} + +static int uart_write(struct tty_struct *tty, + const unsigned char *buf, int count) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *port; + struct circ_buf *circ; + unsigned long flags; + int c, ret = 0; + + /* + * This means you called this function _after_ the port was + * closed. No cookie for you. + */ + if (!state) { + WARN_ON(1); + return -EL3HLT; + } + + port = uart_port_lock(state, flags); + circ = &state->xmit; + if (!circ->buf) { + uart_port_unlock(port, flags); + return 0; + } + + while (port) { + c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); + if (count < c) + c = count; + if (c <= 0) + break; + memcpy(circ->buf + circ->head, buf, c); + circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1); + buf += c; + count -= c; + ret += c; + } + + __uart_start(tty); + uart_port_unlock(port, flags); + return ret; +} + +static unsigned int uart_write_room(struct tty_struct *tty) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *port; + unsigned long flags; + unsigned int ret; + + port = uart_port_lock(state, flags); + ret = uart_circ_chars_free(&state->xmit); + uart_port_unlock(port, flags); + return ret; +} + +static unsigned int uart_chars_in_buffer(struct tty_struct *tty) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *port; + unsigned long flags; + unsigned int ret; + + port = uart_port_lock(state, flags); + ret = uart_circ_chars_pending(&state->xmit); + uart_port_unlock(port, flags); + return ret; +} + +static void uart_flush_buffer(struct tty_struct *tty) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *port; + unsigned long flags; + + /* + * This means you called this function _after_ the port was + * closed. No cookie for you. + */ + if (!state) { + WARN_ON(1); + return; + } + + pr_debug("uart_flush_buffer(%d) called\n", tty->index); + + port = uart_port_lock(state, flags); + if (!port) + return; + uart_circ_clear(&state->xmit); + if (port->ops->flush_buffer) + port->ops->flush_buffer(port); + uart_port_unlock(port, flags); + tty_port_tty_wakeup(&state->port); +} + +/* + * This function performs low-level write of high-priority XON/XOFF + * character and accounting for it. + * + * Requires uart_port to implement .serial_out(). + */ +void uart_xchar_out(struct uart_port *uport, int offset) +{ + serial_port_out(uport, offset, uport->x_char); + uport->icount.tx++; + uport->x_char = 0; +} +EXPORT_SYMBOL_GPL(uart_xchar_out); + +/* + * This function is used to send a high-priority XON/XOFF character to + * the device + */ +static void uart_send_xchar(struct tty_struct *tty, char ch) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *port; + unsigned long flags; + + port = uart_port_ref(state); + if (!port) + return; + + if (port->ops->send_xchar) + port->ops->send_xchar(port, ch); + else { + spin_lock_irqsave(&port->lock, flags); + port->x_char = ch; + if (ch) + port->ops->start_tx(port); + spin_unlock_irqrestore(&port->lock, flags); + } + uart_port_deref(port); +} + +static void uart_throttle(struct tty_struct *tty) +{ + struct uart_state *state = tty->driver_data; + upstat_t mask = UPSTAT_SYNC_FIFO; + struct uart_port *port; + + port = uart_port_ref(state); + if (!port) + return; + + if (I_IXOFF(tty)) + mask |= UPSTAT_AUTOXOFF; + if (C_CRTSCTS(tty)) + mask |= UPSTAT_AUTORTS; + + if (port->status & mask) { + port->ops->throttle(port); + mask &= ~port->status; + } + + if (mask & UPSTAT_AUTORTS) + uart_clear_mctrl(port, TIOCM_RTS); + + if (mask & UPSTAT_AUTOXOFF) + uart_send_xchar(tty, STOP_CHAR(tty)); + + uart_port_deref(port); +} + +static void uart_unthrottle(struct tty_struct *tty) +{ + struct uart_state *state = tty->driver_data; + upstat_t mask = UPSTAT_SYNC_FIFO; + struct uart_port *port; + + port = uart_port_ref(state); + if (!port) + return; + + if (I_IXOFF(tty)) + mask |= UPSTAT_AUTOXOFF; + if (C_CRTSCTS(tty)) + mask |= UPSTAT_AUTORTS; + + if (port->status & mask) { + port->ops->unthrottle(port); + mask &= ~port->status; + } + + if (mask & UPSTAT_AUTORTS) + uart_set_mctrl(port, TIOCM_RTS); + + if (mask & UPSTAT_AUTOXOFF) + uart_send_xchar(tty, START_CHAR(tty)); + + uart_port_deref(port); +} + +static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo) +{ + struct uart_state *state = container_of(port, struct uart_state, port); + struct uart_port *uport; + int ret = -ENODEV; + + /* + * Ensure the state we copy is consistent and no hardware changes + * occur as we go + */ + mutex_lock(&port->mutex); + uport = uart_port_check(state); + if (!uport) + goto out; + + retinfo->type = uport->type; + retinfo->line = uport->line; + retinfo->port = uport->iobase; + if (HIGH_BITS_OFFSET) + retinfo->port_high = (long) uport->iobase >> HIGH_BITS_OFFSET; + retinfo->irq = uport->irq; + retinfo->flags = (__force int)uport->flags; + retinfo->xmit_fifo_size = uport->fifosize; + retinfo->baud_base = uport->uartclk / 16; + retinfo->close_delay = jiffies_to_msecs(port->close_delay) / 10; + retinfo->closing_wait = port->closing_wait == ASYNC_CLOSING_WAIT_NONE ? + ASYNC_CLOSING_WAIT_NONE : + jiffies_to_msecs(port->closing_wait) / 10; + retinfo->custom_divisor = uport->custom_divisor; + retinfo->hub6 = uport->hub6; + retinfo->io_type = uport->iotype; + retinfo->iomem_reg_shift = uport->regshift; + retinfo->iomem_base = (void *)(unsigned long)uport->mapbase; + + ret = 0; +out: + mutex_unlock(&port->mutex); + return ret; +} + +static int uart_get_info_user(struct tty_struct *tty, + struct serial_struct *ss) +{ + struct uart_state *state = tty->driver_data; + struct tty_port *port = &state->port; + + return uart_get_info(port, ss) < 0 ? -EIO : 0; +} + +static int uart_set_info(struct tty_struct *tty, struct tty_port *port, + struct uart_state *state, + struct serial_struct *new_info) +{ + struct uart_port *uport = uart_port_check(state); + unsigned long new_port; + unsigned int change_irq, change_port, closing_wait; + unsigned int old_custom_divisor, close_delay; + upf_t old_flags, new_flags; + int retval = 0; + + if (!uport) + return -EIO; + + new_port = new_info->port; + if (HIGH_BITS_OFFSET) + new_port += (unsigned long) new_info->port_high << HIGH_BITS_OFFSET; + + new_info->irq = irq_canonicalize(new_info->irq); + close_delay = msecs_to_jiffies(new_info->close_delay * 10); + closing_wait = new_info->closing_wait == ASYNC_CLOSING_WAIT_NONE ? + ASYNC_CLOSING_WAIT_NONE : + msecs_to_jiffies(new_info->closing_wait * 10); + + + change_irq = !(uport->flags & UPF_FIXED_PORT) + && new_info->irq != uport->irq; + + /* + * Since changing the 'type' of the port changes its resource + * allocations, we should treat type changes the same as + * IO port changes. + */ + change_port = !(uport->flags & UPF_FIXED_PORT) + && (new_port != uport->iobase || + (unsigned long)new_info->iomem_base != uport->mapbase || + new_info->hub6 != uport->hub6 || + new_info->io_type != uport->iotype || + new_info->iomem_reg_shift != uport->regshift || + new_info->type != uport->type); + + old_flags = uport->flags; + new_flags = (__force upf_t)new_info->flags; + old_custom_divisor = uport->custom_divisor; + + if (!capable(CAP_SYS_ADMIN)) { + retval = -EPERM; + if (change_irq || change_port || + (new_info->baud_base != uport->uartclk / 16) || + (close_delay != port->close_delay) || + (closing_wait != port->closing_wait) || + (new_info->xmit_fifo_size && + new_info->xmit_fifo_size != uport->fifosize) || + (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0)) + goto exit; + uport->flags = ((uport->flags & ~UPF_USR_MASK) | + (new_flags & UPF_USR_MASK)); + uport->custom_divisor = new_info->custom_divisor; + goto check_and_exit; + } + + if (change_irq || change_port) { + retval = security_locked_down(LOCKDOWN_TIOCSSERIAL); + if (retval) + goto exit; + } + + /* + * Ask the low level driver to verify the settings. + */ + if (uport->ops->verify_port) + retval = uport->ops->verify_port(uport, new_info); + + if ((new_info->irq >= nr_irqs) || (new_info->irq < 0) || + (new_info->baud_base < 9600)) + retval = -EINVAL; + + if (retval) + goto exit; + + if (change_port || change_irq) { + retval = -EBUSY; + + /* + * Make sure that we are the sole user of this port. + */ + if (tty_port_users(port) > 1) + goto exit; + + /* + * We need to shutdown the serial port at the old + * port/type/irq combination. + */ + uart_shutdown(tty, state); + } + + if (change_port) { + unsigned long old_iobase, old_mapbase; + unsigned int old_type, old_iotype, old_hub6, old_shift; + + old_iobase = uport->iobase; + old_mapbase = uport->mapbase; + old_type = uport->type; + old_hub6 = uport->hub6; + old_iotype = uport->iotype; + old_shift = uport->regshift; + + /* + * Free and release old regions + */ + if (old_type != PORT_UNKNOWN && uport->ops->release_port) + uport->ops->release_port(uport); + + uport->iobase = new_port; + uport->type = new_info->type; + uport->hub6 = new_info->hub6; + uport->iotype = new_info->io_type; + uport->regshift = new_info->iomem_reg_shift; + uport->mapbase = (unsigned long)new_info->iomem_base; + + /* + * Claim and map the new regions + */ + if (uport->type != PORT_UNKNOWN && uport->ops->request_port) { + retval = uport->ops->request_port(uport); + } else { + /* Always success - Jean II */ + retval = 0; + } + + /* + * If we fail to request resources for the + * new port, try to restore the old settings. + */ + if (retval) { + uport->iobase = old_iobase; + uport->type = old_type; + uport->hub6 = old_hub6; + uport->iotype = old_iotype; + uport->regshift = old_shift; + uport->mapbase = old_mapbase; + + if (old_type != PORT_UNKNOWN) { + retval = uport->ops->request_port(uport); + /* + * If we failed to restore the old settings, + * we fail like this. + */ + if (retval) + uport->type = PORT_UNKNOWN; + + /* + * We failed anyway. + */ + retval = -EBUSY; + } + + /* Added to return the correct error -Ram Gupta */ + goto exit; + } + } + + if (change_irq) + uport->irq = new_info->irq; + if (!(uport->flags & UPF_FIXED_PORT)) + uport->uartclk = new_info->baud_base * 16; + uport->flags = (uport->flags & ~UPF_CHANGE_MASK) | + (new_flags & UPF_CHANGE_MASK); + uport->custom_divisor = new_info->custom_divisor; + port->close_delay = close_delay; + port->closing_wait = closing_wait; + if (new_info->xmit_fifo_size) + uport->fifosize = new_info->xmit_fifo_size; + + check_and_exit: + retval = 0; + if (uport->type == PORT_UNKNOWN) + goto exit; + if (tty_port_initialized(port)) { + if (((old_flags ^ uport->flags) & UPF_SPD_MASK) || + old_custom_divisor != uport->custom_divisor) { + /* + * If they're setting up a custom divisor or speed, + * instead of clearing it, then bitch about it. + */ + if (uport->flags & UPF_SPD_MASK) { + dev_notice_ratelimited(uport->dev, + "%s sets custom speed on %s. This is deprecated.\n", + current->comm, + tty_name(port->tty)); + } + uart_change_line_settings(tty, state, NULL); + } + } else { + retval = uart_startup(tty, state, 1); + if (retval == 0) + tty_port_set_initialized(port, true); + if (retval > 0) + retval = 0; + } + exit: + return retval; +} + +static int uart_set_info_user(struct tty_struct *tty, struct serial_struct *ss) +{ + struct uart_state *state = tty->driver_data; + struct tty_port *port = &state->port; + int retval; + + down_write(&tty->termios_rwsem); + /* + * This semaphore protects port->count. It is also + * very useful to prevent opens. Also, take the + * port configuration semaphore to make sure that a + * module insertion/removal doesn't change anything + * under us. + */ + mutex_lock(&port->mutex); + retval = uart_set_info(tty, port, state, ss); + mutex_unlock(&port->mutex); + up_write(&tty->termios_rwsem); + return retval; +} + +/** + * uart_get_lsr_info - get line status register info + * @tty: tty associated with the UART + * @state: UART being queried + * @value: returned modem value + */ +static int uart_get_lsr_info(struct tty_struct *tty, + struct uart_state *state, unsigned int __user *value) +{ + struct uart_port *uport = uart_port_check(state); + unsigned int result; + + result = uport->ops->tx_empty(uport); + + /* + * If we're about to load something into the transmit + * register, we'll pretend the transmitter isn't empty to + * avoid a race condition (depending on when the transmit + * interrupt happens). + */ + if (uport->x_char || + ((uart_circ_chars_pending(&state->xmit) > 0) && + !uart_tx_stopped(uport))) + result &= ~TIOCSER_TEMT; + + return put_user(result, value); +} + +static int uart_tiocmget(struct tty_struct *tty) +{ + struct uart_state *state = tty->driver_data; + struct tty_port *port = &state->port; + struct uart_port *uport; + int result = -EIO; + + mutex_lock(&port->mutex); + uport = uart_port_check(state); + if (!uport) + goto out; + + if (!tty_io_error(tty)) { + result = uport->mctrl; + spin_lock_irq(&uport->lock); + result |= uport->ops->get_mctrl(uport); + spin_unlock_irq(&uport->lock); + } +out: + mutex_unlock(&port->mutex); + return result; +} + +static int +uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) +{ + struct uart_state *state = tty->driver_data; + struct tty_port *port = &state->port; + struct uart_port *uport; + int ret = -EIO; + + mutex_lock(&port->mutex); + uport = uart_port_check(state); + if (!uport) + goto out; + + if (!tty_io_error(tty)) { + uart_update_mctrl(uport, set, clear); + ret = 0; + } +out: + mutex_unlock(&port->mutex); + return ret; +} + +static int uart_break_ctl(struct tty_struct *tty, int break_state) +{ + struct uart_state *state = tty->driver_data; + struct tty_port *port = &state->port; + struct uart_port *uport; + int ret = -EIO; + + mutex_lock(&port->mutex); + uport = uart_port_check(state); + if (!uport) + goto out; + + if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl) + uport->ops->break_ctl(uport, break_state); + ret = 0; +out: + mutex_unlock(&port->mutex); + return ret; +} + +static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state) +{ + struct tty_port *port = &state->port; + struct uart_port *uport; + int flags, ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + /* + * Take the per-port semaphore. This prevents count from + * changing, and hence any extra opens of the port while + * we're auto-configuring. + */ + if (mutex_lock_interruptible(&port->mutex)) + return -ERESTARTSYS; + + uport = uart_port_check(state); + if (!uport) { + ret = -EIO; + goto out; + } + + ret = -EBUSY; + if (tty_port_users(port) == 1) { + uart_shutdown(tty, state); + + /* + * If we already have a port type configured, + * we must release its resources. + */ + if (uport->type != PORT_UNKNOWN && uport->ops->release_port) + uport->ops->release_port(uport); + + flags = UART_CONFIG_TYPE; + if (uport->flags & UPF_AUTO_IRQ) + flags |= UART_CONFIG_IRQ; + + /* + * This will claim the ports resources if + * a port is found. + */ + uport->ops->config_port(uport, flags); + + ret = uart_startup(tty, state, 1); + if (ret == 0) + tty_port_set_initialized(port, true); + if (ret > 0) + ret = 0; + } +out: + mutex_unlock(&port->mutex); + return ret; +} + +static void uart_enable_ms(struct uart_port *uport) +{ + /* + * Force modem status interrupts on + */ + if (uport->ops->enable_ms) + uport->ops->enable_ms(uport); +} + +/* + * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change + * - mask passed in arg for lines of interest + * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) + * Caller should use TIOCGICOUNT to see which one it was + * + * FIXME: This wants extracting into a common all driver implementation + * of TIOCMWAIT using tty_port. + */ +static int uart_wait_modem_status(struct uart_state *state, unsigned long arg) +{ + struct uart_port *uport; + struct tty_port *port = &state->port; + DECLARE_WAITQUEUE(wait, current); + struct uart_icount cprev, cnow; + int ret; + + /* + * note the counters on entry + */ + uport = uart_port_ref(state); + if (!uport) + return -EIO; + spin_lock_irq(&uport->lock); + memcpy(&cprev, &uport->icount, sizeof(struct uart_icount)); + uart_enable_ms(uport); + spin_unlock_irq(&uport->lock); + + add_wait_queue(&port->delta_msr_wait, &wait); + for (;;) { + spin_lock_irq(&uport->lock); + memcpy(&cnow, &uport->icount, sizeof(struct uart_icount)); + spin_unlock_irq(&uport->lock); + + set_current_state(TASK_INTERRUPTIBLE); + + if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || + ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || + ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || + ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { + ret = 0; + break; + } + + schedule(); + + /* see if a signal did it */ + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + + cprev = cnow; + } + __set_current_state(TASK_RUNNING); + remove_wait_queue(&port->delta_msr_wait, &wait); + uart_port_deref(uport); + + return ret; +} + +/* + * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) + * Return: write counters to the user passed counter struct + * NB: both 1->0 and 0->1 transitions are counted except for + * RI where only 0->1 is counted. + */ +static int uart_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount) +{ + struct uart_state *state = tty->driver_data; + struct uart_icount cnow; + struct uart_port *uport; + + uport = uart_port_ref(state); + if (!uport) + return -EIO; + spin_lock_irq(&uport->lock); + memcpy(&cnow, &uport->icount, sizeof(struct uart_icount)); + spin_unlock_irq(&uport->lock); + uart_port_deref(uport); + + icount->cts = cnow.cts; + icount->dsr = cnow.dsr; + icount->rng = cnow.rng; + icount->dcd = cnow.dcd; + icount->rx = cnow.rx; + icount->tx = cnow.tx; + icount->frame = cnow.frame; + icount->overrun = cnow.overrun; + icount->parity = cnow.parity; + icount->brk = cnow.brk; + icount->buf_overrun = cnow.buf_overrun; + + return 0; +} + +#define SER_RS485_LEGACY_FLAGS (SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | \ + SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX | \ + SER_RS485_TERMINATE_BUS) + +static int uart_check_rs485_flags(struct uart_port *port, struct serial_rs485 *rs485) +{ + u32 flags = rs485->flags; + + /* Don't return -EINVAL for unsupported legacy flags */ + flags &= ~SER_RS485_LEGACY_FLAGS; + + /* + * For any bit outside of the legacy ones that is not supported by + * the driver, return -EINVAL. + */ + if (flags & ~port->rs485_supported.flags) + return -EINVAL; + + /* Asking for address w/o addressing mode? */ + if (!(rs485->flags & SER_RS485_ADDRB) && + (rs485->flags & (SER_RS485_ADDR_RECV|SER_RS485_ADDR_DEST))) + return -EINVAL; + + /* Address given but not enabled? */ + if (!(rs485->flags & SER_RS485_ADDR_RECV) && rs485->addr_recv) + return -EINVAL; + if (!(rs485->flags & SER_RS485_ADDR_DEST) && rs485->addr_dest) + return -EINVAL; + + return 0; +} + +static void uart_sanitize_serial_rs485_delays(struct uart_port *port, + struct serial_rs485 *rs485) +{ + if (!port->rs485_supported.delay_rts_before_send) { + if (rs485->delay_rts_before_send) { + dev_warn_ratelimited(port->dev, + "%s (%d): RTS delay before sending not supported\n", + port->name, port->line); + } + rs485->delay_rts_before_send = 0; + } else if (rs485->delay_rts_before_send > RS485_MAX_RTS_DELAY) { + rs485->delay_rts_before_send = RS485_MAX_RTS_DELAY; + dev_warn_ratelimited(port->dev, + "%s (%d): RTS delay before sending clamped to %u ms\n", + port->name, port->line, rs485->delay_rts_before_send); + } + + if (!port->rs485_supported.delay_rts_after_send) { + if (rs485->delay_rts_after_send) { + dev_warn_ratelimited(port->dev, + "%s (%d): RTS delay after sending not supported\n", + port->name, port->line); + } + rs485->delay_rts_after_send = 0; + } else if (rs485->delay_rts_after_send > RS485_MAX_RTS_DELAY) { + rs485->delay_rts_after_send = RS485_MAX_RTS_DELAY; + dev_warn_ratelimited(port->dev, + "%s (%d): RTS delay after sending clamped to %u ms\n", + port->name, port->line, rs485->delay_rts_after_send); + } +} + +static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs485 *rs485) +{ + u32 supported_flags = port->rs485_supported.flags; + + if (!(rs485->flags & SER_RS485_ENABLED)) { + memset(rs485, 0, sizeof(*rs485)); + return; + } + + rs485->flags &= supported_flags; + + /* Pick sane settings if the user hasn't */ + if (!(rs485->flags & SER_RS485_RTS_ON_SEND) == + !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) { + if (supported_flags & SER_RS485_RTS_ON_SEND) { + rs485->flags |= SER_RS485_RTS_ON_SEND; + rs485->flags &= ~SER_RS485_RTS_AFTER_SEND; + + dev_warn_ratelimited(port->dev, + "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n", + port->name, port->line); + } else { + rs485->flags |= SER_RS485_RTS_AFTER_SEND; + rs485->flags &= ~SER_RS485_RTS_ON_SEND; + + dev_warn_ratelimited(port->dev, + "%s (%d): invalid RTS setting, using RTS_AFTER_SEND instead\n", + port->name, port->line); + } + } + + uart_sanitize_serial_rs485_delays(port, rs485); + + /* Return clean padding area to userspace */ + memset(rs485->padding0, 0, sizeof(rs485->padding0)); + memset(rs485->padding1, 0, sizeof(rs485->padding1)); +} + +static void uart_set_rs485_termination(struct uart_port *port, + const struct serial_rs485 *rs485) +{ + if (!(rs485->flags & SER_RS485_ENABLED)) + return; + + gpiod_set_value_cansleep(port->rs485_term_gpio, + !!(rs485->flags & SER_RS485_TERMINATE_BUS)); +} + +static int uart_rs485_config(struct uart_port *port) +{ + struct serial_rs485 *rs485 = &port->rs485; + unsigned long flags; + int ret; + + if (!(rs485->flags & SER_RS485_ENABLED)) + return 0; + + uart_sanitize_serial_rs485(port, rs485); + uart_set_rs485_termination(port, rs485); + + spin_lock_irqsave(&port->lock, flags); + ret = port->rs485_config(port, NULL, rs485); + spin_unlock_irqrestore(&port->lock, flags); + if (ret) + memset(rs485, 0, sizeof(*rs485)); + + return ret; +} + +static int uart_get_rs485_config(struct uart_port *port, + struct serial_rs485 __user *rs485) +{ + unsigned long flags; + struct serial_rs485 aux; + + spin_lock_irqsave(&port->lock, flags); + aux = port->rs485; + spin_unlock_irqrestore(&port->lock, flags); + + if (copy_to_user(rs485, &aux, sizeof(aux))) + return -EFAULT; + + return 0; +} + +static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port, + struct serial_rs485 __user *rs485_user) +{ + struct serial_rs485 rs485; + int ret; + unsigned long flags; + + if (!(port->rs485_supported.flags & SER_RS485_ENABLED)) + return -ENOTTY; + + if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user))) + return -EFAULT; + + ret = uart_check_rs485_flags(port, &rs485); + if (ret) + return ret; + uart_sanitize_serial_rs485(port, &rs485); + uart_set_rs485_termination(port, &rs485); + + spin_lock_irqsave(&port->lock, flags); + ret = port->rs485_config(port, &tty->termios, &rs485); + if (!ret) { + port->rs485 = rs485; + + /* Reset RTS and other mctrl lines when disabling RS485 */ + if (!(rs485.flags & SER_RS485_ENABLED)) + port->ops->set_mctrl(port, port->mctrl); + } + spin_unlock_irqrestore(&port->lock, flags); + if (ret) + return ret; + + if (copy_to_user(rs485_user, &port->rs485, sizeof(port->rs485))) + return -EFAULT; + + return 0; +} + +static int uart_get_iso7816_config(struct uart_port *port, + struct serial_iso7816 __user *iso7816) +{ + unsigned long flags; + struct serial_iso7816 aux; + + if (!port->iso7816_config) + return -ENOTTY; + + spin_lock_irqsave(&port->lock, flags); + aux = port->iso7816; + spin_unlock_irqrestore(&port->lock, flags); + + if (copy_to_user(iso7816, &aux, sizeof(aux))) + return -EFAULT; + + return 0; +} + +static int uart_set_iso7816_config(struct uart_port *port, + struct serial_iso7816 __user *iso7816_user) +{ + struct serial_iso7816 iso7816; + int i, ret; + unsigned long flags; + + if (!port->iso7816_config) + return -ENOTTY; + + if (copy_from_user(&iso7816, iso7816_user, sizeof(*iso7816_user))) + return -EFAULT; + + /* + * There are 5 words reserved for future use. Check that userspace + * doesn't put stuff in there to prevent breakages in the future. + */ + for (i = 0; i < 5; i++) + if (iso7816.reserved[i]) + return -EINVAL; + + spin_lock_irqsave(&port->lock, flags); + ret = port->iso7816_config(port, &iso7816); + spin_unlock_irqrestore(&port->lock, flags); + if (ret) + return ret; + + if (copy_to_user(iso7816_user, &port->iso7816, sizeof(port->iso7816))) + return -EFAULT; + + return 0; +} + +/* + * Called via sys_ioctl. We can use spin_lock_irq() here. + */ +static int +uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) +{ + struct uart_state *state = tty->driver_data; + struct tty_port *port = &state->port; + struct uart_port *uport; + void __user *uarg = (void __user *)arg; + int ret = -ENOIOCTLCMD; + + + /* + * These ioctls don't rely on the hardware to be present. + */ + switch (cmd) { + case TIOCSERCONFIG: + down_write(&tty->termios_rwsem); + ret = uart_do_autoconfig(tty, state); + up_write(&tty->termios_rwsem); + break; + } + + if (ret != -ENOIOCTLCMD) + goto out; + + if (tty_io_error(tty)) { + ret = -EIO; + goto out; + } + + /* + * The following should only be used when hardware is present. + */ + switch (cmd) { + case TIOCMIWAIT: + ret = uart_wait_modem_status(state, arg); + break; + } + + if (ret != -ENOIOCTLCMD) + goto out; + + /* rs485_config requires more locking than others */ + if (cmd == TIOCSRS485) + down_write(&tty->termios_rwsem); + + mutex_lock(&port->mutex); + uport = uart_port_check(state); + + if (!uport || tty_io_error(tty)) { + ret = -EIO; + goto out_up; + } + + /* + * All these rely on hardware being present and need to be + * protected against the tty being hung up. + */ + + switch (cmd) { + case TIOCSERGETLSR: /* Get line status register */ + ret = uart_get_lsr_info(tty, state, uarg); + break; + + case TIOCGRS485: + ret = uart_get_rs485_config(uport, uarg); + break; + + case TIOCSRS485: + ret = uart_set_rs485_config(tty, uport, uarg); + break; + + case TIOCSISO7816: + ret = uart_set_iso7816_config(state->uart_port, uarg); + break; + + case TIOCGISO7816: + ret = uart_get_iso7816_config(state->uart_port, uarg); + break; + default: + if (uport->ops->ioctl) + ret = uport->ops->ioctl(uport, cmd, arg); + break; + } +out_up: + mutex_unlock(&port->mutex); + if (cmd == TIOCSRS485) + up_write(&tty->termios_rwsem); +out: + return ret; +} + +static void uart_set_ldisc(struct tty_struct *tty) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *uport; + struct tty_port *port = &state->port; + + if (!tty_port_initialized(port)) + return; + + mutex_lock(&state->port.mutex); + uport = uart_port_check(state); + if (uport && uport->ops->set_ldisc) + uport->ops->set_ldisc(uport, &tty->termios); + mutex_unlock(&state->port.mutex); +} + +static void uart_set_termios(struct tty_struct *tty, + const struct ktermios *old_termios) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *uport; + unsigned int cflag = tty->termios.c_cflag; + unsigned int iflag_mask = IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK; + bool sw_changed = false; + + mutex_lock(&state->port.mutex); + uport = uart_port_check(state); + if (!uport) + goto out; + + /* + * Drivers doing software flow control also need to know + * about changes to these input settings. + */ + if (uport->flags & UPF_SOFT_FLOW) { + iflag_mask |= IXANY|IXON|IXOFF; + sw_changed = + tty->termios.c_cc[VSTART] != old_termios->c_cc[VSTART] || + tty->termios.c_cc[VSTOP] != old_termios->c_cc[VSTOP]; + } + + /* + * These are the bits that are used to setup various + * flags in the low level driver. We can ignore the Bfoo + * bits in c_cflag; c_[io]speed will always be set + * appropriately by set_termios() in tty_ioctl.c + */ + if ((cflag ^ old_termios->c_cflag) == 0 && + tty->termios.c_ospeed == old_termios->c_ospeed && + tty->termios.c_ispeed == old_termios->c_ispeed && + ((tty->termios.c_iflag ^ old_termios->c_iflag) & iflag_mask) == 0 && + !sw_changed) { + goto out; + } + + uart_change_line_settings(tty, state, old_termios); + /* reload cflag from termios; port driver may have overridden flags */ + cflag = tty->termios.c_cflag; + + /* Handle transition to B0 status */ + if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) + uart_clear_mctrl(uport, TIOCM_RTS | TIOCM_DTR); + /* Handle transition away from B0 status */ + else if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) { + unsigned int mask = TIOCM_DTR; + + if (!(cflag & CRTSCTS) || !tty_throttled(tty)) + mask |= TIOCM_RTS; + uart_set_mctrl(uport, mask); + } +out: + mutex_unlock(&state->port.mutex); +} + +/* + * Calls to uart_close() are serialised via the tty_lock in + * drivers/tty/tty_io.c:tty_release() + * drivers/tty/tty_io.c:do_tty_hangup() + */ +static void uart_close(struct tty_struct *tty, struct file *filp) +{ + struct uart_state *state = tty->driver_data; + + if (!state) { + struct uart_driver *drv = tty->driver->driver_state; + struct tty_port *port; + + state = drv->state + tty->index; + port = &state->port; + spin_lock_irq(&port->lock); + --port->count; + spin_unlock_irq(&port->lock); + return; + } + + pr_debug("uart_close(%d) called\n", tty->index); + + tty_port_close(tty->port, tty, filp); +} + +static void uart_tty_port_shutdown(struct tty_port *port) +{ + struct uart_state *state = container_of(port, struct uart_state, port); + struct uart_port *uport = uart_port_check(state); + char *buf; + + /* + * At this point, we stop accepting input. To do this, we + * disable the receive line status interrupts. + */ + if (WARN(!uport, "detached port still initialized!\n")) + return; + + spin_lock_irq(&uport->lock); + uport->ops->stop_rx(uport); + spin_unlock_irq(&uport->lock); + + uart_port_shutdown(port); + + /* + * It's possible for shutdown to be called after suspend if we get + * a DCD drop (hangup) at just the right time. Clear suspended bit so + * we don't try to resume a port that has been shutdown. + */ + tty_port_set_suspended(port, 0); + + /* + * Free the transmit buffer. + */ + spin_lock_irq(&uport->lock); + buf = state->xmit.buf; + state->xmit.buf = NULL; + spin_unlock_irq(&uport->lock); + + free_page((unsigned long)buf); + + uart_change_pm(state, UART_PM_STATE_OFF); +} + +static void uart_wait_until_sent(struct tty_struct *tty, int timeout) +{ + struct uart_state *state = tty->driver_data; + struct uart_port *port; + unsigned long char_time, expire, fifo_timeout; + + port = uart_port_ref(state); + if (!port) + return; + + if (port->type == PORT_UNKNOWN || port->fifosize == 0) { + uart_port_deref(port); + return; + } + + /* + * Set the check interval to be 1/5 of the estimated time to + * send a single character, and make it at least 1. The check + * interval should also be less than the timeout. + * + * Note: we have to use pretty tight timings here to satisfy + * the NIST-PCTS. + */ + char_time = max(nsecs_to_jiffies(port->frame_time / 5), 1UL); + + if (timeout && timeout < char_time) + char_time = timeout; + + if (!uart_cts_enabled(port)) { + /* + * If the transmitter hasn't cleared in twice the approximate + * amount of time to send the entire FIFO, it probably won't + * ever clear. This assumes the UART isn't doing flow + * control, which is currently the case. Hence, if it ever + * takes longer than FIFO timeout, this is probably due to a + * UART bug of some kind. So, we clamp the timeout parameter at + * 2 * FIFO timeout. + */ + fifo_timeout = uart_fifo_timeout(port); + if (timeout == 0 || timeout > 2 * fifo_timeout) + timeout = 2 * fifo_timeout; + } + + expire = jiffies + timeout; + + pr_debug("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n", + port->line, jiffies, expire); + + /* + * Check whether the transmitter is empty every 'char_time'. + * 'timeout' / 'expire' give us the maximum amount of time + * we wait. + */ + while (!port->ops->tx_empty(port)) { + msleep_interruptible(jiffies_to_msecs(char_time)); + if (signal_pending(current)) + break; + if (timeout && time_after(jiffies, expire)) + break; + } + uart_port_deref(port); +} + +/* + * Calls to uart_hangup() are serialised by the tty_lock in + * drivers/tty/tty_io.c:do_tty_hangup() + * This runs from a workqueue and can sleep for a _short_ time only. + */ +static void uart_hangup(struct tty_struct *tty) +{ + struct uart_state *state = tty->driver_data; + struct tty_port *port = &state->port; + struct uart_port *uport; + unsigned long flags; + + pr_debug("uart_hangup(%d)\n", tty->index); + + mutex_lock(&port->mutex); + uport = uart_port_check(state); + WARN(!uport, "hangup of detached port!\n"); + + if (tty_port_active(port)) { + uart_flush_buffer(tty); + uart_shutdown(tty, state); + spin_lock_irqsave(&port->lock, flags); + port->count = 0; + spin_unlock_irqrestore(&port->lock, flags); + tty_port_set_active(port, 0); + tty_port_tty_set(port, NULL); + if (uport && !uart_console(uport)) + uart_change_pm(state, UART_PM_STATE_OFF); + wake_up_interruptible(&port->open_wait); + wake_up_interruptible(&port->delta_msr_wait); + } + mutex_unlock(&port->mutex); +} + +/* uport == NULL if uart_port has already been removed */ +static void uart_port_shutdown(struct tty_port *port) +{ + struct uart_state *state = container_of(port, struct uart_state, port); + struct uart_port *uport = uart_port_check(state); + + /* + * clear delta_msr_wait queue to avoid mem leaks: we may free + * the irq here so the queue might never be woken up. Note + * that we won't end up waiting on delta_msr_wait again since + * any outstanding file descriptors should be pointing at + * hung_up_tty_fops now. + */ + wake_up_interruptible(&port->delta_msr_wait); + + if (uport) { + /* Free the IRQ and disable the port. */ + uport->ops->shutdown(uport); + + /* Ensure that the IRQ handler isn't running on another CPU. */ + synchronize_irq(uport->irq); + } +} + +static int uart_carrier_raised(struct tty_port *port) +{ + struct uart_state *state = container_of(port, struct uart_state, port); + struct uart_port *uport; + int mctrl; + + uport = uart_port_ref(state); + /* + * Should never observe uport == NULL since checks for hangup should + * abort the tty_port_block_til_ready() loop before checking for carrier + * raised -- but report carrier raised if it does anyway so open will + * continue and not sleep + */ + if (WARN_ON(!uport)) + return 1; + spin_lock_irq(&uport->lock); + uart_enable_ms(uport); + mctrl = uport->ops->get_mctrl(uport); + spin_unlock_irq(&uport->lock); + uart_port_deref(uport); + if (mctrl & TIOCM_CAR) + return 1; + return 0; +} + +static void uart_dtr_rts(struct tty_port *port, int raise) +{ + struct uart_state *state = container_of(port, struct uart_state, port); + struct uart_port *uport; + + uport = uart_port_ref(state); + if (!uport) + return; + uart_port_dtr_rts(uport, raise); + uart_port_deref(uport); +} + +static int uart_install(struct tty_driver *driver, struct tty_struct *tty) +{ + struct uart_driver *drv = driver->driver_state; + struct uart_state *state = drv->state + tty->index; + + tty->driver_data = state; + + return tty_standard_install(driver, tty); +} + +/* + * Calls to uart_open are serialised by the tty_lock in + * drivers/tty/tty_io.c:tty_open() + * Note that if this fails, then uart_close() _will_ be called. + * + * In time, we want to scrap the "opening nonpresent ports" + * behaviour and implement an alternative way for setserial + * to set base addresses/ports/types. This will allow us to + * get rid of a certain amount of extra tests. + */ +static int uart_open(struct tty_struct *tty, struct file *filp) +{ + struct uart_state *state = tty->driver_data; + int retval; + + retval = tty_port_open(&state->port, tty, filp); + if (retval > 0) + retval = 0; + + return retval; +} + +static int uart_port_activate(struct tty_port *port, struct tty_struct *tty) +{ + struct uart_state *state = container_of(port, struct uart_state, port); + struct uart_port *uport; + int ret; + + uport = uart_port_check(state); + if (!uport || uport->flags & UPF_DEAD) + return -ENXIO; + + /* + * Start up the serial port. + */ + ret = uart_startup(tty, state, 0); + if (ret > 0) + tty_port_set_active(port, 1); + + return ret; +} + +static const char *uart_type(struct uart_port *port) +{ + const char *str = NULL; + + if (port->ops->type) + str = port->ops->type(port); + + if (!str) + str = "unknown"; + + return str; +} + +#ifdef CONFIG_PROC_FS + +static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) +{ + struct uart_state *state = drv->state + i; + struct tty_port *port = &state->port; + enum uart_pm_state pm_state; + struct uart_port *uport; + char stat_buf[32]; + unsigned int status; + int mmio; + + mutex_lock(&port->mutex); + uport = uart_port_check(state); + if (!uport) + goto out; + + mmio = uport->iotype >= UPIO_MEM; + seq_printf(m, "%d: uart:%s %s%08llX irq:%d", + uport->line, uart_type(uport), + mmio ? "mmio:0x" : "port:", + mmio ? (unsigned long long)uport->mapbase + : (unsigned long long)uport->iobase, + uport->irq); + + if (uport->type == PORT_UNKNOWN) { + seq_putc(m, '\n'); + goto out; + } + + if (capable(CAP_SYS_ADMIN)) { + pm_state = state->pm_state; + if (pm_state != UART_PM_STATE_ON) + uart_change_pm(state, UART_PM_STATE_ON); + spin_lock_irq(&uport->lock); + status = uport->ops->get_mctrl(uport); + spin_unlock_irq(&uport->lock); + if (pm_state != UART_PM_STATE_ON) + uart_change_pm(state, pm_state); + + seq_printf(m, " tx:%d rx:%d", + uport->icount.tx, uport->icount.rx); + if (uport->icount.frame) + seq_printf(m, " fe:%d", uport->icount.frame); + if (uport->icount.parity) + seq_printf(m, " pe:%d", uport->icount.parity); + if (uport->icount.brk) + seq_printf(m, " brk:%d", uport->icount.brk); + if (uport->icount.overrun) + seq_printf(m, " oe:%d", uport->icount.overrun); + if (uport->icount.buf_overrun) + seq_printf(m, " bo:%d", uport->icount.buf_overrun); + +#define INFOBIT(bit, str) \ + if (uport->mctrl & (bit)) \ + strncat(stat_buf, (str), sizeof(stat_buf) - \ + strlen(stat_buf) - 2) +#define STATBIT(bit, str) \ + if (status & (bit)) \ + strncat(stat_buf, (str), sizeof(stat_buf) - \ + strlen(stat_buf) - 2) + + stat_buf[0] = '\0'; + stat_buf[1] = '\0'; + INFOBIT(TIOCM_RTS, "|RTS"); + STATBIT(TIOCM_CTS, "|CTS"); + INFOBIT(TIOCM_DTR, "|DTR"); + STATBIT(TIOCM_DSR, "|DSR"); + STATBIT(TIOCM_CAR, "|CD"); + STATBIT(TIOCM_RNG, "|RI"); + if (stat_buf[0]) + stat_buf[0] = ' '; + + seq_puts(m, stat_buf); + } + seq_putc(m, '\n'); +#undef STATBIT +#undef INFOBIT +out: + mutex_unlock(&port->mutex); +} + +static int uart_proc_show(struct seq_file *m, void *v) +{ + struct tty_driver *ttydrv = m->private; + struct uart_driver *drv = ttydrv->driver_state; + int i; + + seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n", "", "", ""); + for (i = 0; i < drv->nr; i++) + uart_line_info(m, drv, i); + return 0; +} +#endif + +static void uart_port_spin_lock_init(struct uart_port *port) +{ + spin_lock_init(&port->lock); + lockdep_set_class(&port->lock, &port_lock_key); +} + +#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL) +/** + * uart_console_write - write a console message to a serial port + * @port: the port to write the message + * @s: array of characters + * @count: number of characters in string to write + * @putchar: function to write character to port + */ +void uart_console_write(struct uart_port *port, const char *s, + unsigned int count, + void (*putchar)(struct uart_port *, unsigned char)) +{ + unsigned int i; + + for (i = 0; i < count; i++, s++) { + if (*s == '\n') + putchar(port, '\r'); + putchar(port, *s); + } +} +EXPORT_SYMBOL_GPL(uart_console_write); + +/** + * uart_get_console - get uart port for console + * @ports: ports to search in + * @nr: number of @ports + * @co: console to search for + * Returns: uart_port for the console @co + * + * Check whether an invalid uart number has been specified (as @co->index), and + * if so, search for the first available port that does have console support. + */ +struct uart_port * __init +uart_get_console(struct uart_port *ports, int nr, struct console *co) +{ + int idx = co->index; + + if (idx < 0 || idx >= nr || (ports[idx].iobase == 0 && + ports[idx].membase == NULL)) + for (idx = 0; idx < nr; idx++) + if (ports[idx].iobase != 0 || + ports[idx].membase != NULL) + break; + + co->index = idx; + + return ports + idx; +} + +/** + * uart_parse_earlycon - Parse earlycon options + * @p: ptr to 2nd field (ie., just beyond ',') + * @iotype: ptr for decoded iotype (out) + * @addr: ptr for decoded mapbase/iobase (out) + * @options: ptr for field; %NULL if not present (out) + * + * Decodes earlycon kernel command line parameters of the form: + * * earlycon=,io|mmio|mmio16|mmio32|mmio32be|mmio32native,, + * * console=,io|mmio|mmio16|mmio32|mmio32be|mmio32native,, + * + * The optional form: + * * earlycon=,0x, + * * console=,0x, + * + * is also accepted; the returned @iotype will be %UPIO_MEM. + * + * Returns: 0 on success or -%EINVAL on failure + */ +int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, + char **options) +{ + if (strncmp(p, "mmio,", 5) == 0) { + *iotype = UPIO_MEM; + p += 5; + } else if (strncmp(p, "mmio16,", 7) == 0) { + *iotype = UPIO_MEM16; + p += 7; + } else if (strncmp(p, "mmio32,", 7) == 0) { + *iotype = UPIO_MEM32; + p += 7; + } else if (strncmp(p, "mmio32be,", 9) == 0) { + *iotype = UPIO_MEM32BE; + p += 9; + } else if (strncmp(p, "mmio32native,", 13) == 0) { + *iotype = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ? + UPIO_MEM32BE : UPIO_MEM32; + p += 13; + } else if (strncmp(p, "io,", 3) == 0) { + *iotype = UPIO_PORT; + p += 3; + } else if (strncmp(p, "0x", 2) == 0) { + *iotype = UPIO_MEM; + } else { + return -EINVAL; + } + + /* + * Before you replace it with kstrtoull(), think about options separator + * (',') it will not tolerate + */ + *addr = simple_strtoull(p, NULL, 0); + p = strchr(p, ','); + if (p) + p++; + + *options = p; + return 0; +} +EXPORT_SYMBOL_GPL(uart_parse_earlycon); + +/** + * uart_parse_options - Parse serial port baud/parity/bits/flow control. + * @options: pointer to option string + * @baud: pointer to an 'int' variable for the baud rate. + * @parity: pointer to an 'int' variable for the parity. + * @bits: pointer to an 'int' variable for the number of data bits. + * @flow: pointer to an 'int' variable for the flow control character. + * + * uart_parse_options() decodes a string containing the serial console + * options. The format of the string is , + * eg: 115200n8r + */ +void +uart_parse_options(const char *options, int *baud, int *parity, + int *bits, int *flow) +{ + const char *s = options; + + *baud = simple_strtoul(s, NULL, 10); + while (*s >= '0' && *s <= '9') + s++; + if (*s) + *parity = *s++; + if (*s) + *bits = *s++ - '0'; + if (*s) + *flow = *s; +} +EXPORT_SYMBOL_GPL(uart_parse_options); + +/** + * uart_set_options - setup the serial console parameters + * @port: pointer to the serial ports uart_port structure + * @co: console pointer + * @baud: baud rate + * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even) + * @bits: number of data bits + * @flow: flow control character - 'r' (rts) + */ +int +uart_set_options(struct uart_port *port, struct console *co, + int baud, int parity, int bits, int flow) +{ + struct ktermios termios; + static struct ktermios dummy; + + /* + * Ensure that the serial-console lock is initialised early. + * + * Note that the console-enabled check is needed because of kgdboc, + * which can end up calling uart_set_options() for an already enabled + * console via tty_find_polling_driver() and uart_poll_init(). + */ + if (!uart_console_enabled(port) && !port->console_reinit) + uart_port_spin_lock_init(port); + + memset(&termios, 0, sizeof(struct ktermios)); + + termios.c_cflag |= CREAD | HUPCL | CLOCAL; + tty_termios_encode_baud_rate(&termios, baud, baud); + + if (bits == 7) + termios.c_cflag |= CS7; + else + termios.c_cflag |= CS8; + + switch (parity) { + case 'o': case 'O': + termios.c_cflag |= PARODD; + fallthrough; + case 'e': case 'E': + termios.c_cflag |= PARENB; + break; + } + + if (flow == 'r') + termios.c_cflag |= CRTSCTS; + + /* + * some uarts on other side don't support no flow control. + * So we set * DTR in host uart to make them happy + */ + port->mctrl |= TIOCM_DTR; + + port->ops->set_termios(port, &termios, &dummy); + /* + * Allow the setting of the UART parameters with a NULL console + * too: + */ + if (co) { + co->cflag = termios.c_cflag; + co->ispeed = termios.c_ispeed; + co->ospeed = termios.c_ospeed; + } + + return 0; +} +EXPORT_SYMBOL_GPL(uart_set_options); +#endif /* CONFIG_SERIAL_CORE_CONSOLE */ + +/** + * uart_change_pm - set power state of the port + * + * @state: port descriptor + * @pm_state: new state + * + * Locking: port->mutex has to be held + */ +static void uart_change_pm(struct uart_state *state, + enum uart_pm_state pm_state) +{ + struct uart_port *port = uart_port_check(state); + + if (state->pm_state != pm_state) { + if (port && port->ops->pm) + port->ops->pm(port, pm_state, state->pm_state); + state->pm_state = pm_state; + } +} + +struct uart_match { + struct uart_port *port; + struct uart_driver *driver; +}; + +static int serial_match_port(struct device *dev, void *data) +{ + struct uart_match *match = data; + struct tty_driver *tty_drv = match->driver->tty_driver; + dev_t devt = MKDEV(tty_drv->major, tty_drv->minor_start) + + match->port->line; + + return dev->devt == devt; /* Actually, only one tty per port */ +} + +int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) +{ + struct uart_state *state = drv->state + uport->line; + struct tty_port *port = &state->port; + struct device *tty_dev; + struct uart_match match = {uport, drv}; + + mutex_lock(&port->mutex); + + tty_dev = device_find_child(uport->dev, &match, serial_match_port); + if (tty_dev && device_may_wakeup(tty_dev)) { + enable_irq_wake(uport->irq); + put_device(tty_dev); + mutex_unlock(&port->mutex); + return 0; + } + put_device(tty_dev); + + /* + * Nothing to do if the console is not suspending + * except stop_rx to prevent any asynchronous data + * over RX line. However ensure that we will be + * able to Re-start_rx later. + */ + if (!console_suspend_enabled && uart_console(uport)) { + if (uport->ops->start_rx) { + spin_lock_irq(&uport->lock); + uport->ops->stop_rx(uport); + spin_unlock_irq(&uport->lock); + } + goto unlock; + } + + uport->suspended = 1; + + if (tty_port_initialized(port)) { + const struct uart_ops *ops = uport->ops; + int tries; + unsigned int mctrl; + + tty_port_set_suspended(port, 1); + tty_port_set_initialized(port, 0); + + spin_lock_irq(&uport->lock); + ops->stop_tx(uport); + if (!(uport->rs485.flags & SER_RS485_ENABLED)) + ops->set_mctrl(uport, 0); + /* save mctrl so it can be restored on resume */ + mctrl = uport->mctrl; + uport->mctrl = 0; + ops->stop_rx(uport); + spin_unlock_irq(&uport->lock); + + /* + * Wait for the transmitter to empty. + */ + for (tries = 3; !ops->tx_empty(uport) && tries; tries--) + msleep(10); + if (!tries) + dev_err(uport->dev, "%s: Unable to drain transmitter\n", + uport->name); + + ops->shutdown(uport); + uport->mctrl = mctrl; + } + + /* + * Disable the console device before suspending. + */ + if (uart_console(uport)) + console_stop(uport->cons); + + uart_change_pm(state, UART_PM_STATE_OFF); +unlock: + mutex_unlock(&port->mutex); + + return 0; +} +EXPORT_SYMBOL(uart_suspend_port); + +int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) +{ + struct uart_state *state = drv->state + uport->line; + struct tty_port *port = &state->port; + struct device *tty_dev; + struct uart_match match = {uport, drv}; + struct ktermios termios; + + mutex_lock(&port->mutex); + + tty_dev = device_find_child(uport->dev, &match, serial_match_port); + if (!uport->suspended && device_may_wakeup(tty_dev)) { + if (irqd_is_wakeup_set(irq_get_irq_data((uport->irq)))) + disable_irq_wake(uport->irq); + put_device(tty_dev); + mutex_unlock(&port->mutex); + return 0; + } + put_device(tty_dev); + uport->suspended = 0; + + /* + * Re-enable the console device after suspending. + */ + if (uart_console(uport)) { + /* + * First try to use the console cflag setting. + */ + memset(&termios, 0, sizeof(struct ktermios)); + termios.c_cflag = uport->cons->cflag; + termios.c_ispeed = uport->cons->ispeed; + termios.c_ospeed = uport->cons->ospeed; + + /* + * If that's unset, use the tty termios setting. + */ + if (port->tty && termios.c_cflag == 0) + termios = port->tty->termios; + + if (console_suspend_enabled) + uart_change_pm(state, UART_PM_STATE_ON); + uport->ops->set_termios(uport, &termios, NULL); + if (!console_suspend_enabled && uport->ops->start_rx) { + spin_lock_irq(&uport->lock); + uport->ops->start_rx(uport); + spin_unlock_irq(&uport->lock); + } + if (console_suspend_enabled) + console_start(uport->cons); + } + + if (tty_port_suspended(port)) { + const struct uart_ops *ops = uport->ops; + int ret; + + uart_change_pm(state, UART_PM_STATE_ON); + spin_lock_irq(&uport->lock); + if (!(uport->rs485.flags & SER_RS485_ENABLED)) + ops->set_mctrl(uport, 0); + spin_unlock_irq(&uport->lock); + if (console_suspend_enabled || !uart_console(uport)) { + /* Protected by port mutex for now */ + struct tty_struct *tty = port->tty; + + ret = ops->startup(uport); + if (ret == 0) { + if (tty) + uart_change_line_settings(tty, state, NULL); + uart_rs485_config(uport); + spin_lock_irq(&uport->lock); + if (!(uport->rs485.flags & SER_RS485_ENABLED)) + ops->set_mctrl(uport, uport->mctrl); + ops->start_tx(uport); + spin_unlock_irq(&uport->lock); + tty_port_set_initialized(port, 1); + } else { + /* + * Failed to resume - maybe hardware went away? + * Clear the "initialized" flag so we won't try + * to call the low level drivers shutdown method. + */ + uart_shutdown(tty, state); + } + } + + tty_port_set_suspended(port, 0); + } + + mutex_unlock(&port->mutex); + + return 0; +} +EXPORT_SYMBOL(uart_resume_port); + +static inline void +uart_report_port(struct uart_driver *drv, struct uart_port *port) +{ + char address[64]; + + switch (port->iotype) { + case UPIO_PORT: + snprintf(address, sizeof(address), "I/O 0x%lx", port->iobase); + break; + case UPIO_HUB6: + snprintf(address, sizeof(address), + "I/O 0x%lx offset 0x%x", port->iobase, port->hub6); + break; + case UPIO_MEM: + case UPIO_MEM16: + case UPIO_MEM32: + case UPIO_MEM32BE: + case UPIO_AU: + case UPIO_TSI: + snprintf(address, sizeof(address), + "MMIO 0x%llx", (unsigned long long)port->mapbase); + break; + default: + strscpy(address, "*unknown*", sizeof(address)); + break; + } + + pr_info("%s%s%s at %s (irq = %d, base_baud = %d) is a %s\n", + port->dev ? dev_name(port->dev) : "", + port->dev ? ": " : "", + port->name, + address, port->irq, port->uartclk / 16, uart_type(port)); + + /* The magic multiplier feature is a bit obscure, so report it too. */ + if (port->flags & UPF_MAGIC_MULTIPLIER) + pr_info("%s%s%s extra baud rates supported: %d, %d", + port->dev ? dev_name(port->dev) : "", + port->dev ? ": " : "", + port->name, + port->uartclk / 8, port->uartclk / 4); +} + +static void +uart_configure_port(struct uart_driver *drv, struct uart_state *state, + struct uart_port *port) +{ + unsigned int flags; + + /* + * If there isn't a port here, don't do anything further. + */ + if (!port->iobase && !port->mapbase && !port->membase) + return; + + /* + * Now do the auto configuration stuff. Note that config_port + * is expected to claim the resources and map the port for us. + */ + flags = 0; + if (port->flags & UPF_AUTO_IRQ) + flags |= UART_CONFIG_IRQ; + if (port->flags & UPF_BOOT_AUTOCONF) { + if (!(port->flags & UPF_FIXED_TYPE)) { + port->type = PORT_UNKNOWN; + flags |= UART_CONFIG_TYPE; + } + port->ops->config_port(port, flags); + } + + if (port->type != PORT_UNKNOWN) { + unsigned long flags; + + uart_report_port(drv, port); + + /* Power up port for set_mctrl() */ + uart_change_pm(state, UART_PM_STATE_ON); + + /* + * Ensure that the modem control lines are de-activated. + * keep the DTR setting that is set in uart_set_options() + * We probably don't need a spinlock around this, but + */ + spin_lock_irqsave(&port->lock, flags); + port->mctrl &= TIOCM_DTR; + if (!(port->rs485.flags & SER_RS485_ENABLED)) + port->ops->set_mctrl(port, port->mctrl); + spin_unlock_irqrestore(&port->lock, flags); + + uart_rs485_config(port); + + /* + * If this driver supports console, and it hasn't been + * successfully registered yet, try to re-register it. + * It may be that the port was not available. + */ + if (port->cons && !(port->cons->flags & CON_ENABLED)) + register_console(port->cons); + + /* + * Power down all ports by default, except the + * console if we have one. + */ + if (!uart_console(port)) + uart_change_pm(state, UART_PM_STATE_OFF); + } +} + +#ifdef CONFIG_CONSOLE_POLL + +static int uart_poll_init(struct tty_driver *driver, int line, char *options) +{ + struct uart_driver *drv = driver->driver_state; + struct uart_state *state = drv->state + line; + struct tty_port *tport; + struct uart_port *port; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret = 0; + + tport = &state->port; + mutex_lock(&tport->mutex); + + port = uart_port_check(state); + if (!port || !(port->ops->poll_get_char && port->ops->poll_put_char)) { + ret = -1; + goto out; + } + + if (port->ops->poll_init) { + /* + * We don't set initialized as we only initialized the hw, + * e.g. state->xmit is still uninitialized. + */ + if (!tty_port_initialized(tport)) + ret = port->ops->poll_init(port); + } + + if (!ret && options) { + uart_parse_options(options, &baud, &parity, &bits, &flow); + ret = uart_set_options(port, NULL, baud, parity, bits, flow); + } +out: + mutex_unlock(&tport->mutex); + return ret; +} + +static int uart_poll_get_char(struct tty_driver *driver, int line) +{ + struct uart_driver *drv = driver->driver_state; + struct uart_state *state = drv->state + line; + struct uart_port *port; + int ret = -1; + + port = uart_port_ref(state); + if (port) { + ret = port->ops->poll_get_char(port); + uart_port_deref(port); + } + + return ret; +} + +static void uart_poll_put_char(struct tty_driver *driver, int line, char ch) +{ + struct uart_driver *drv = driver->driver_state; + struct uart_state *state = drv->state + line; + struct uart_port *port; + + port = uart_port_ref(state); + if (!port) + return; + + if (ch == '\n') + port->ops->poll_put_char(port, '\r'); + port->ops->poll_put_char(port, ch); + uart_port_deref(port); +} +#endif + +static const struct tty_operations uart_ops = { + .install = uart_install, + .open = uart_open, + .close = uart_close, + .write = uart_write, + .put_char = uart_put_char, + .flush_chars = uart_flush_chars, + .write_room = uart_write_room, + .chars_in_buffer= uart_chars_in_buffer, + .flush_buffer = uart_flush_buffer, + .ioctl = uart_ioctl, + .throttle = uart_throttle, + .unthrottle = uart_unthrottle, + .send_xchar = uart_send_xchar, + .set_termios = uart_set_termios, + .set_ldisc = uart_set_ldisc, + .stop = uart_stop, + .start = uart_start, + .hangup = uart_hangup, + .break_ctl = uart_break_ctl, + .wait_until_sent= uart_wait_until_sent, +#ifdef CONFIG_PROC_FS + .proc_show = uart_proc_show, +#endif + .tiocmget = uart_tiocmget, + .tiocmset = uart_tiocmset, + .set_serial = uart_set_info_user, + .get_serial = uart_get_info_user, + .get_icount = uart_get_icount, +#ifdef CONFIG_CONSOLE_POLL + .poll_init = uart_poll_init, + .poll_get_char = uart_poll_get_char, + .poll_put_char = uart_poll_put_char, +#endif +}; + +static const struct tty_port_operations uart_port_ops = { + .carrier_raised = uart_carrier_raised, + .dtr_rts = uart_dtr_rts, + .activate = uart_port_activate, + .shutdown = uart_tty_port_shutdown, +}; + +/** + * uart_register_driver - register a driver with the uart core layer + * @drv: low level driver structure + * + * Register a uart driver with the core driver. We in turn register with the + * tty layer, and initialise the core driver per-port state. + * + * We have a proc file in /proc/tty/driver which is named after the normal + * driver. + * + * @drv->port should be %NULL, and the per-port structures should be registered + * using uart_add_one_port() after this call has succeeded. + * + * Locking: none, Interrupts: enabled + */ +int uart_register_driver(struct uart_driver *drv) +{ + struct tty_driver *normal; + int i, retval = -ENOMEM; + + BUG_ON(drv->state); + + /* + * Maybe we should be using a slab cache for this, especially if + * we have a large number of ports to handle. + */ + drv->state = kcalloc(drv->nr, sizeof(struct uart_state), GFP_KERNEL); + if (!drv->state) + goto out; + + normal = tty_alloc_driver(drv->nr, TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV); + if (IS_ERR(normal)) { + retval = PTR_ERR(normal); + goto out_kfree; + } + + drv->tty_driver = normal; + + normal->driver_name = drv->driver_name; + normal->name = drv->dev_name; + normal->major = drv->major; + normal->minor_start = drv->minor; + normal->type = TTY_DRIVER_TYPE_SERIAL; + normal->subtype = SERIAL_TYPE_NORMAL; + normal->init_termios = tty_std_termios; + normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; + normal->init_termios.c_ispeed = normal->init_termios.c_ospeed = 9600; + normal->driver_state = drv; + tty_set_operations(normal, &uart_ops); + + /* + * Initialise the UART state(s). + */ + for (i = 0; i < drv->nr; i++) { + struct uart_state *state = drv->state + i; + struct tty_port *port = &state->port; + + tty_port_init(port); + port->ops = &uart_port_ops; + } + + retval = tty_register_driver(normal); + if (retval >= 0) + return retval; + + for (i = 0; i < drv->nr; i++) + tty_port_destroy(&drv->state[i].port); + tty_driver_kref_put(normal); +out_kfree: + kfree(drv->state); +out: + return retval; +} +EXPORT_SYMBOL(uart_register_driver); + +/** + * uart_unregister_driver - remove a driver from the uart core layer + * @drv: low level driver structure + * + * Remove all references to a driver from the core driver. The low level + * driver must have removed all its ports via the uart_remove_one_port() if it + * registered them with uart_add_one_port(). (I.e. @drv->port is %NULL.) + * + * Locking: none, Interrupts: enabled + */ +void uart_unregister_driver(struct uart_driver *drv) +{ + struct tty_driver *p = drv->tty_driver; + unsigned int i; + + tty_unregister_driver(p); + tty_driver_kref_put(p); + for (i = 0; i < drv->nr; i++) + tty_port_destroy(&drv->state[i].port); + kfree(drv->state); + drv->state = NULL; + drv->tty_driver = NULL; +} +EXPORT_SYMBOL(uart_unregister_driver); + +struct tty_driver *uart_console_device(struct console *co, int *index) +{ + struct uart_driver *p = co->data; + *index = co->index; + return p->tty_driver; +} +EXPORT_SYMBOL_GPL(uart_console_device); + +static ssize_t uartclk_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "%d\n", tmp.baud_base * 16); +} + +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "%d\n", tmp.type); +} + +static ssize_t line_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "%d\n", tmp.line); +} + +static ssize_t port_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + unsigned long ioaddr; + + uart_get_info(port, &tmp); + ioaddr = tmp.port; + if (HIGH_BITS_OFFSET) + ioaddr |= (unsigned long)tmp.port_high << HIGH_BITS_OFFSET; + return sprintf(buf, "0x%lX\n", ioaddr); +} + +static ssize_t irq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "%d\n", tmp.irq); +} + +static ssize_t flags_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "0x%X\n", tmp.flags); +} + +static ssize_t xmit_fifo_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "%d\n", tmp.xmit_fifo_size); +} + +static ssize_t close_delay_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "%d\n", tmp.close_delay); +} + +static ssize_t closing_wait_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "%d\n", tmp.closing_wait); +} + +static ssize_t custom_divisor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "%d\n", tmp.custom_divisor); +} + +static ssize_t io_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "%d\n", tmp.io_type); +} + +static ssize_t iomem_base_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "0x%lX\n", (unsigned long)tmp.iomem_base); +} + +static ssize_t iomem_reg_shift_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct serial_struct tmp; + struct tty_port *port = dev_get_drvdata(dev); + + uart_get_info(port, &tmp); + return sprintf(buf, "%d\n", tmp.iomem_reg_shift); +} + +static ssize_t console_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tty_port *port = dev_get_drvdata(dev); + struct uart_state *state = container_of(port, struct uart_state, port); + struct uart_port *uport; + bool console = false; + + mutex_lock(&port->mutex); + uport = uart_port_check(state); + if (uport) + console = uart_console_enabled(uport); + mutex_unlock(&port->mutex); + + return sprintf(buf, "%c\n", console ? 'Y' : 'N'); +} + +static ssize_t console_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct tty_port *port = dev_get_drvdata(dev); + struct uart_state *state = container_of(port, struct uart_state, port); + struct uart_port *uport; + bool oldconsole, newconsole; + int ret; + + ret = kstrtobool(buf, &newconsole); + if (ret) + return ret; + + mutex_lock(&port->mutex); + uport = uart_port_check(state); + if (uport) { + oldconsole = uart_console_enabled(uport); + if (oldconsole && !newconsole) { + ret = unregister_console(uport->cons); + } else if (!oldconsole && newconsole) { + if (uart_console(uport)) { + uport->console_reinit = 1; + register_console(uport->cons); + } else { + ret = -ENOENT; + } + } + } else { + ret = -ENXIO; + } + mutex_unlock(&port->mutex); + + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_RO(uartclk); +static DEVICE_ATTR_RO(type); +static DEVICE_ATTR_RO(line); +static DEVICE_ATTR_RO(port); +static DEVICE_ATTR_RO(irq); +static DEVICE_ATTR_RO(flags); +static DEVICE_ATTR_RO(xmit_fifo_size); +static DEVICE_ATTR_RO(close_delay); +static DEVICE_ATTR_RO(closing_wait); +static DEVICE_ATTR_RO(custom_divisor); +static DEVICE_ATTR_RO(io_type); +static DEVICE_ATTR_RO(iomem_base); +static DEVICE_ATTR_RO(iomem_reg_shift); +static DEVICE_ATTR_RW(console); + +static struct attribute *tty_dev_attrs[] = { + &dev_attr_uartclk.attr, + &dev_attr_type.attr, + &dev_attr_line.attr, + &dev_attr_port.attr, + &dev_attr_irq.attr, + &dev_attr_flags.attr, + &dev_attr_xmit_fifo_size.attr, + &dev_attr_close_delay.attr, + &dev_attr_closing_wait.attr, + &dev_attr_custom_divisor.attr, + &dev_attr_io_type.attr, + &dev_attr_iomem_base.attr, + &dev_attr_iomem_reg_shift.attr, + &dev_attr_console.attr, + NULL +}; + +static const struct attribute_group tty_dev_attr_group = { + .attrs = tty_dev_attrs, +}; + +/** + * uart_add_one_port - attach a driver-defined port structure + * @drv: pointer to the uart low level driver structure for this port + * @uport: uart port structure to use for this port. + * + * Context: task context, might sleep + * + * This allows the driver @drv to register its own uart_port structure with the + * core driver. The main purpose is to allow the low level uart drivers to + * expand uart_port, rather than having yet more levels of structures. + */ +int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) +{ + struct uart_state *state; + struct tty_port *port; + int ret = 0; + struct device *tty_dev; + int num_groups; + + if (uport->line >= drv->nr) + return -EINVAL; + + state = drv->state + uport->line; + port = &state->port; + + mutex_lock(&port_mutex); + mutex_lock(&port->mutex); + if (state->uart_port) { + ret = -EINVAL; + goto out; + } + + /* Link the port to the driver state table and vice versa */ + atomic_set(&state->refcount, 1); + init_waitqueue_head(&state->remove_wait); + state->uart_port = uport; + uport->state = state; + + state->pm_state = UART_PM_STATE_UNDEFINED; + uport->cons = drv->cons; + uport->minor = drv->tty_driver->minor_start + uport->line; + uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name, + drv->tty_driver->name_base + uport->line); + if (!uport->name) { + ret = -ENOMEM; + goto out; + } + + /* + * If this port is in use as a console then the spinlock is already + * initialised. + */ + if (!uart_console_enabled(uport)) + uart_port_spin_lock_init(uport); + + if (uport->cons && uport->dev) + of_console_check(uport->dev->of_node, uport->cons->name, uport->line); + + tty_port_link_device(port, drv->tty_driver, uport->line); + uart_configure_port(drv, state, uport); + + port->console = uart_console(uport); + + num_groups = 2; + if (uport->attr_group) + num_groups++; + + uport->tty_groups = kcalloc(num_groups, sizeof(*uport->tty_groups), + GFP_KERNEL); + if (!uport->tty_groups) { + ret = -ENOMEM; + goto out; + } + uport->tty_groups[0] = &tty_dev_attr_group; + if (uport->attr_group) + uport->tty_groups[1] = uport->attr_group; + + /* + * Register the port whether it's detected or not. This allows + * setserial to be used to alter this port's parameters. + */ + tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver, + uport->line, uport->dev, port, uport->tty_groups); + if (!IS_ERR(tty_dev)) { + device_set_wakeup_capable(tty_dev, 1); + } else { + dev_err(uport->dev, "Cannot register tty device on line %d\n", + uport->line); + } + + /* + * Ensure UPF_DEAD is not set. + */ + uport->flags &= ~UPF_DEAD; + + out: + mutex_unlock(&port->mutex); + mutex_unlock(&port_mutex); + + return ret; +} +EXPORT_SYMBOL(uart_add_one_port); + +/** + * uart_remove_one_port - detach a driver defined port structure + * @drv: pointer to the uart low level driver structure for this port + * @uport: uart port structure for this port + * + * Context: task context, might sleep + * + * This unhooks (and hangs up) the specified port structure from the core + * driver. No further calls will be made to the low-level code for this port. + */ +int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport) +{ + struct uart_state *state = drv->state + uport->line; + struct tty_port *port = &state->port; + struct uart_port *uart_port; + struct tty_struct *tty; + int ret = 0; + + mutex_lock(&port_mutex); + + /* + * Mark the port "dead" - this prevents any opens from + * succeeding while we shut down the port. + */ + mutex_lock(&port->mutex); + uart_port = uart_port_check(state); + if (uart_port != uport) + dev_alert(uport->dev, "Removing wrong port: %p != %p\n", + uart_port, uport); + + if (!uart_port) { + mutex_unlock(&port->mutex); + ret = -EINVAL; + goto out; + } + uport->flags |= UPF_DEAD; + mutex_unlock(&port->mutex); + + /* + * Remove the devices from the tty layer + */ + tty_port_unregister_device(port, drv->tty_driver, uport->line); + + tty = tty_port_tty_get(port); + if (tty) { + tty_vhangup(port->tty); + tty_kref_put(tty); + } + + /* + * If the port is used as a console, unregister it + */ + if (uart_console(uport)) + unregister_console(uport->cons); + + /* + * Free the port IO and memory resources, if any. + */ + if (uport->type != PORT_UNKNOWN && uport->ops->release_port) + uport->ops->release_port(uport); + kfree(uport->tty_groups); + kfree(uport->name); + + /* + * Indicate that there isn't a port here anymore. + */ + uport->type = PORT_UNKNOWN; + + mutex_lock(&port->mutex); + WARN_ON(atomic_dec_return(&state->refcount) < 0); + wait_event(state->remove_wait, !atomic_read(&state->refcount)); + state->uart_port = NULL; + mutex_unlock(&port->mutex); +out: + mutex_unlock(&port_mutex); + + return ret; +} +EXPORT_SYMBOL(uart_remove_one_port); + +/** + * uart_match_port - are the two ports equivalent? + * @port1: first port + * @port2: second port + * + * This utility function can be used to determine whether two uart_port + * structures describe the same port. + */ +bool uart_match_port(const struct uart_port *port1, + const struct uart_port *port2) +{ + if (port1->iotype != port2->iotype) + return false; + + switch (port1->iotype) { + case UPIO_PORT: + return port1->iobase == port2->iobase; + case UPIO_HUB6: + return port1->iobase == port2->iobase && + port1->hub6 == port2->hub6; + case UPIO_MEM: + case UPIO_MEM16: + case UPIO_MEM32: + case UPIO_MEM32BE: + case UPIO_AU: + case UPIO_TSI: + return port1->mapbase == port2->mapbase; + } + + return false; +} +EXPORT_SYMBOL(uart_match_port); + +/** + * uart_handle_dcd_change - handle a change of carrier detect state + * @uport: uart_port structure for the open port + * @status: new carrier detect status, nonzero if active + * + * Caller must hold uport->lock. + */ +void uart_handle_dcd_change(struct uart_port *uport, unsigned int status) +{ + struct tty_port *port = &uport->state->port; + struct tty_struct *tty = port->tty; + struct tty_ldisc *ld; + + lockdep_assert_held_once(&uport->lock); + + if (tty) { + ld = tty_ldisc_ref(tty); + if (ld) { + if (ld->ops->dcd_change) + ld->ops->dcd_change(tty, status); + tty_ldisc_deref(ld); + } + } + + uport->icount.dcd++; + + if (uart_dcd_enabled(uport)) { + if (status) + wake_up_interruptible(&port->open_wait); + else if (tty) + tty_hangup(tty); + } +} +EXPORT_SYMBOL_GPL(uart_handle_dcd_change); + +/** + * uart_handle_cts_change - handle a change of clear-to-send state + * @uport: uart_port structure for the open port + * @status: new clear to send status, nonzero if active + * + * Caller must hold uport->lock. + */ +void uart_handle_cts_change(struct uart_port *uport, unsigned int status) +{ + lockdep_assert_held_once(&uport->lock); + + uport->icount.cts++; + + if (uart_softcts_mode(uport)) { + if (uport->hw_stopped) { + if (status) { + uport->hw_stopped = 0; + uport->ops->start_tx(uport); + uart_write_wakeup(uport); + } + } else { + if (!status) { + uport->hw_stopped = 1; + uport->ops->stop_tx(uport); + } + } + + } +} +EXPORT_SYMBOL_GPL(uart_handle_cts_change); + +/** + * uart_insert_char - push a char to the uart layer + * + * User is responsible to call tty_flip_buffer_push when they are done with + * insertion. + * + * @port: corresponding port + * @status: state of the serial port RX buffer (LSR for 8250) + * @overrun: mask of overrun bits in @status + * @ch: character to push + * @flag: flag for the character (see TTY_NORMAL and friends) + */ +void uart_insert_char(struct uart_port *port, unsigned int status, + unsigned int overrun, unsigned int ch, unsigned int flag) +{ + struct tty_port *tport = &port->state->port; + + if ((status & port->ignore_status_mask & ~overrun) == 0) + if (tty_insert_flip_char(tport, ch, flag) == 0) + ++port->icount.buf_overrun; + + /* + * Overrun is special. Since it's reported immediately, + * it doesn't affect the current character. + */ + if (status & ~port->ignore_status_mask & overrun) + if (tty_insert_flip_char(tport, 0, TTY_OVERRUN) == 0) + ++port->icount.buf_overrun; +} +EXPORT_SYMBOL_GPL(uart_insert_char); + +#ifdef CONFIG_MAGIC_SYSRQ_SERIAL +static const char sysrq_toggle_seq[] = CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE; + +static void uart_sysrq_on(struct work_struct *w) +{ + int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq); + + sysrq_toggle_support(1); + pr_info("SysRq is enabled by magic sequence '%*pE' on serial\n", + sysrq_toggle_seq_len, sysrq_toggle_seq); +} +static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on); + +/** + * uart_try_toggle_sysrq - Enables SysRq from serial line + * @port: uart_port structure where char(s) after BREAK met + * @ch: new character in the sequence after received BREAK + * + * Enables magic SysRq when the required sequence is met on port + * (see CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE). + * + * Returns: %false if @ch is out of enabling sequence and should be + * handled some other way, %true if @ch was consumed. + */ +bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) +{ + int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq); + + if (!sysrq_toggle_seq_len) + return false; + + BUILD_BUG_ON(ARRAY_SIZE(sysrq_toggle_seq) >= U8_MAX); + if (sysrq_toggle_seq[port->sysrq_seq] != ch) { + port->sysrq_seq = 0; + return false; + } + + if (++port->sysrq_seq < sysrq_toggle_seq_len) { + port->sysrq = jiffies + SYSRQ_TIMEOUT; + return true; + } + + schedule_work(&sysrq_enable_work); + + port->sysrq = 0; + return true; +} +EXPORT_SYMBOL_GPL(uart_try_toggle_sysrq); +#endif + +/** + * uart_get_rs485_mode() - retrieve rs485 properties for given uart + * @port: uart device's target port + * + * This function implements the device tree binding described in + * Documentation/devicetree/bindings/serial/rs485.txt. + */ +int uart_get_rs485_mode(struct uart_port *port) +{ + struct serial_rs485 *rs485conf = &port->rs485; + struct device *dev = port->dev; + u32 rs485_delay[2]; + int ret; + + if (!(port->rs485_supported.flags & SER_RS485_ENABLED)) + return 0; + + ret = device_property_read_u32_array(dev, "rs485-rts-delay", + rs485_delay, 2); + if (!ret) { + rs485conf->delay_rts_before_send = rs485_delay[0]; + rs485conf->delay_rts_after_send = rs485_delay[1]; + } else { + rs485conf->delay_rts_before_send = 0; + rs485conf->delay_rts_after_send = 0; + } + + uart_sanitize_serial_rs485_delays(port, rs485conf); + + /* + * Clear full-duplex and enabled flags, set RTS polarity to active high + * to get to a defined state with the following properties: + */ + rs485conf->flags &= ~(SER_RS485_RX_DURING_TX | SER_RS485_ENABLED | + SER_RS485_TERMINATE_BUS | + SER_RS485_RTS_AFTER_SEND); + rs485conf->flags |= SER_RS485_RTS_ON_SEND; + + if (device_property_read_bool(dev, "rs485-rx-during-tx")) + rs485conf->flags |= SER_RS485_RX_DURING_TX; + + if (device_property_read_bool(dev, "linux,rs485-enabled-at-boot-time")) + rs485conf->flags |= SER_RS485_ENABLED; + + if (device_property_read_bool(dev, "rs485-rts-active-low")) { + rs485conf->flags &= ~SER_RS485_RTS_ON_SEND; + rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; + } + + /* + * Disabling termination by default is the safe choice: Else if many + * bus participants enable it, no communication is possible at all. + * Works fine for short cables and users may enable for longer cables. + */ + port->rs485_term_gpio = devm_gpiod_get_optional(dev, "rs485-term", + GPIOD_OUT_LOW); + if (IS_ERR(port->rs485_term_gpio)) { + ret = PTR_ERR(port->rs485_term_gpio); + port->rs485_term_gpio = NULL; + return dev_err_probe(dev, ret, "Cannot get rs485-term-gpios\n"); + } + if (port->rs485_term_gpio) + port->rs485_supported.flags |= SER_RS485_TERMINATE_BUS; + + return 0; +} +EXPORT_SYMBOL_GPL(uart_get_rs485_mode); + +/* Compile-time assertions for serial_rs485 layout */ +static_assert(offsetof(struct serial_rs485, padding) == + (offsetof(struct serial_rs485, delay_rts_after_send) + sizeof(__u32))); +static_assert(offsetof(struct serial_rs485, padding1) == + offsetof(struct serial_rs485, padding[1])); +static_assert((offsetof(struct serial_rs485, padding[4]) + sizeof(__u32)) == + sizeof(struct serial_rs485)); + +MODULE_DESCRIPTION("Serial driver core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c new file mode 100644 index 000000000..7d5aaa8d4 --- /dev/null +++ b/drivers/tty/serial/serial_mctrl_gpio.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Helpers for controlling modem lines via GPIO + * + * Copyright (C) 2014 Paratronic S.A. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "serial_mctrl_gpio.h" + +struct mctrl_gpios { + struct uart_port *port; + struct gpio_desc *gpio[UART_GPIO_MAX]; + int irq[UART_GPIO_MAX]; + unsigned int mctrl_prev; + bool mctrl_on; +}; + +static const struct { + const char *name; + unsigned int mctrl; + enum gpiod_flags flags; +} mctrl_gpios_desc[UART_GPIO_MAX] = { + { "cts", TIOCM_CTS, GPIOD_IN, }, + { "dsr", TIOCM_DSR, GPIOD_IN, }, + { "dcd", TIOCM_CD, GPIOD_IN, }, + { "rng", TIOCM_RNG, GPIOD_IN, }, + { "rts", TIOCM_RTS, GPIOD_OUT_LOW, }, + { "dtr", TIOCM_DTR, GPIOD_OUT_LOW, }, +}; + +static bool mctrl_gpio_flags_is_dir_out(unsigned int idx) +{ + return mctrl_gpios_desc[idx].flags & GPIOD_FLAGS_BIT_DIR_OUT; +} + +/** + * mctrl_gpio_set - set gpios according to mctrl state + * @gpios: gpios to set + * @mctrl: state to set + * + * Set the gpios according to the mctrl state. + */ +void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl) +{ + enum mctrl_gpio_idx i; + struct gpio_desc *desc_array[UART_GPIO_MAX]; + DECLARE_BITMAP(values, UART_GPIO_MAX); + unsigned int count = 0; + + if (gpios == NULL) + return; + + for (i = 0; i < UART_GPIO_MAX; i++) + if (gpios->gpio[i] && mctrl_gpio_flags_is_dir_out(i)) { + desc_array[count] = gpios->gpio[i]; + __assign_bit(count, values, + mctrl & mctrl_gpios_desc[i].mctrl); + count++; + } + gpiod_set_array_value(count, desc_array, NULL, values); +} +EXPORT_SYMBOL_GPL(mctrl_gpio_set); + +/** + * mctrl_gpio_to_gpiod - obtain gpio_desc of modem line index + * @gpios: gpios to look into + * @gidx: index of the modem line + * Returns: the gpio_desc structure associated to the modem line index + */ +struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios, + enum mctrl_gpio_idx gidx) +{ + if (gpios == NULL) + return NULL; + + return gpios->gpio[gidx]; +} +EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod); + +/** + * mctrl_gpio_get - update mctrl with the gpios values. + * @gpios: gpios to get the info from + * @mctrl: mctrl to set + * Returns: modified mctrl (the same value as in @mctrl) + * + * Update mctrl with the gpios values. + */ +unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl) +{ + enum mctrl_gpio_idx i; + + if (gpios == NULL) + return *mctrl; + + for (i = 0; i < UART_GPIO_MAX; i++) { + if (gpios->gpio[i] && !mctrl_gpio_flags_is_dir_out(i)) { + if (gpiod_get_value(gpios->gpio[i])) + *mctrl |= mctrl_gpios_desc[i].mctrl; + else + *mctrl &= ~mctrl_gpios_desc[i].mctrl; + } + } + + return *mctrl; +} +EXPORT_SYMBOL_GPL(mctrl_gpio_get); + +unsigned int +mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl) +{ + enum mctrl_gpio_idx i; + + if (gpios == NULL) + return *mctrl; + + for (i = 0; i < UART_GPIO_MAX; i++) { + if (gpios->gpio[i] && mctrl_gpio_flags_is_dir_out(i)) { + if (gpiod_get_value(gpios->gpio[i])) + *mctrl |= mctrl_gpios_desc[i].mctrl; + else + *mctrl &= ~mctrl_gpios_desc[i].mctrl; + } + } + + return *mctrl; +} +EXPORT_SYMBOL_GPL(mctrl_gpio_get_outputs); + +struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx) +{ + struct mctrl_gpios *gpios; + enum mctrl_gpio_idx i; + + gpios = devm_kzalloc(dev, sizeof(*gpios), GFP_KERNEL); + if (!gpios) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < UART_GPIO_MAX; i++) { + char *gpio_str; + bool present; + + /* Check if GPIO property exists and continue if not */ + gpio_str = kasprintf(GFP_KERNEL, "%s-gpios", + mctrl_gpios_desc[i].name); + if (!gpio_str) + continue; + + present = device_property_present(dev, gpio_str); + kfree(gpio_str); + if (!present) + continue; + + gpios->gpio[i] = + devm_gpiod_get_index_optional(dev, + mctrl_gpios_desc[i].name, + idx, + mctrl_gpios_desc[i].flags); + + if (IS_ERR(gpios->gpio[i])) + return ERR_CAST(gpios->gpio[i]); + } + + return gpios; +} +EXPORT_SYMBOL_GPL(mctrl_gpio_init_noauto); + +#define MCTRL_ANY_DELTA (TIOCM_RI | TIOCM_DSR | TIOCM_CD | TIOCM_CTS) +static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context) +{ + struct mctrl_gpios *gpios = context; + struct uart_port *port = gpios->port; + u32 mctrl = gpios->mctrl_prev; + u32 mctrl_diff; + unsigned long flags; + + mctrl_gpio_get(gpios, &mctrl); + + spin_lock_irqsave(&port->lock, flags); + + mctrl_diff = mctrl ^ gpios->mctrl_prev; + gpios->mctrl_prev = mctrl; + + if (mctrl_diff & MCTRL_ANY_DELTA && port->state != NULL) { + if ((mctrl_diff & mctrl) & TIOCM_RI) + port->icount.rng++; + + if ((mctrl_diff & mctrl) & TIOCM_DSR) + port->icount.dsr++; + + if (mctrl_diff & TIOCM_CD) + uart_handle_dcd_change(port, mctrl & TIOCM_CD); + + if (mctrl_diff & TIOCM_CTS) + uart_handle_cts_change(port, mctrl & TIOCM_CTS); + + wake_up_interruptible(&port->state->port.delta_msr_wait); + } + + spin_unlock_irqrestore(&port->lock, flags); + + return IRQ_HANDLED; +} + +/** + * mctrl_gpio_init - initialize uart gpios + * @port: port to initialize gpios for + * @idx: index of the gpio in the @port's device + * + * This will get the {cts,rts,...}-gpios from device tree if they are present + * and request them, set direction etc, and return an allocated structure. + * `devm_*` functions are used, so there's no need to call mctrl_gpio_free(). + * As this sets up the irq handling, make sure to not handle changes to the + * gpio input lines in your driver, too. + */ +struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx) +{ + struct mctrl_gpios *gpios; + enum mctrl_gpio_idx i; + + gpios = mctrl_gpio_init_noauto(port->dev, idx); + if (IS_ERR(gpios)) + return gpios; + + gpios->port = port; + + for (i = 0; i < UART_GPIO_MAX; ++i) { + int ret; + + if (!gpios->gpio[i] || mctrl_gpio_flags_is_dir_out(i)) + continue; + + ret = gpiod_to_irq(gpios->gpio[i]); + if (ret < 0) { + dev_err(port->dev, + "failed to find corresponding irq for %s (idx=%d, err=%d)\n", + mctrl_gpios_desc[i].name, idx, ret); + return ERR_PTR(ret); + } + gpios->irq[i] = ret; + + /* irqs should only be enabled in .enable_ms */ + irq_set_status_flags(gpios->irq[i], IRQ_NOAUTOEN); + + ret = devm_request_irq(port->dev, gpios->irq[i], + mctrl_gpio_irq_handle, + IRQ_TYPE_EDGE_BOTH, dev_name(port->dev), + gpios); + if (ret) { + /* alternatively implement polling */ + dev_err(port->dev, + "failed to request irq for %s (idx=%d, err=%d)\n", + mctrl_gpios_desc[i].name, idx, ret); + return ERR_PTR(ret); + } + } + + return gpios; +} +EXPORT_SYMBOL_GPL(mctrl_gpio_init); + +/** + * mctrl_gpio_free - explicitly free uart gpios + * @dev: uart port's device + * @gpios: gpios structure to be freed + * + * This will free the requested gpios in mctrl_gpio_init(). As `devm_*` + * functions are used, there's generally no need to call this function. + */ +void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios) +{ + enum mctrl_gpio_idx i; + + if (gpios == NULL) + return; + + for (i = 0; i < UART_GPIO_MAX; i++) { + if (gpios->irq[i]) + devm_free_irq(gpios->port->dev, gpios->irq[i], gpios); + + if (gpios->gpio[i]) + devm_gpiod_put(dev, gpios->gpio[i]); + } + devm_kfree(dev, gpios); +} +EXPORT_SYMBOL_GPL(mctrl_gpio_free); + +/** + * mctrl_gpio_enable_ms - enable irqs and handling of changes to the ms lines + * @gpios: gpios to enable + */ +void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios) +{ + enum mctrl_gpio_idx i; + + if (gpios == NULL) + return; + + /* .enable_ms may be called multiple times */ + if (gpios->mctrl_on) + return; + + gpios->mctrl_on = true; + + /* get initial status of modem lines GPIOs */ + mctrl_gpio_get(gpios, &gpios->mctrl_prev); + + for (i = 0; i < UART_GPIO_MAX; ++i) { + if (!gpios->irq[i]) + continue; + + enable_irq(gpios->irq[i]); + } +} +EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms); + +/** + * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines + * @gpios: gpios to disable + */ +void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) +{ + enum mctrl_gpio_idx i; + + if (gpios == NULL) + return; + + if (!gpios->mctrl_on) + return; + + gpios->mctrl_on = false; + + for (i = 0; i < UART_GPIO_MAX; ++i) { + if (!gpios->irq[i]) + continue; + + disable_irq(gpios->irq[i]); + } +} +EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms); + +void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios) +{ + enum mctrl_gpio_idx i; + + if (!gpios) + return; + + if (!gpios->mctrl_on) + return; + + for (i = 0; i < UART_GPIO_MAX; ++i) { + if (!gpios->irq[i]) + continue; + + enable_irq_wake(gpios->irq[i]); + } +} +EXPORT_SYMBOL_GPL(mctrl_gpio_enable_irq_wake); + +void mctrl_gpio_disable_irq_wake(struct mctrl_gpios *gpios) +{ + enum mctrl_gpio_idx i; + + if (!gpios) + return; + + if (!gpios->mctrl_on) + return; + + for (i = 0; i < UART_GPIO_MAX; ++i) { + if (!gpios->irq[i]) + continue; + + disable_irq_wake(gpios->irq[i]); + } +} +EXPORT_SYMBOL_GPL(mctrl_gpio_disable_irq_wake); + +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h new file mode 100644 index 000000000..fc76910fb --- /dev/null +++ b/drivers/tty/serial/serial_mctrl_gpio.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Helpers for controlling modem lines via GPIO + * + * Copyright (C) 2014 Paratronic S.A. + */ + +#ifndef __SERIAL_MCTRL_GPIO__ +#define __SERIAL_MCTRL_GPIO__ + +#include +#include +#include + +struct uart_port; + +enum mctrl_gpio_idx { + UART_GPIO_CTS, + UART_GPIO_DSR, + UART_GPIO_DCD, + UART_GPIO_RNG, + UART_GPIO_RI = UART_GPIO_RNG, + UART_GPIO_RTS, + UART_GPIO_DTR, + UART_GPIO_MAX, +}; + +/* + * Opaque descriptor for modem lines controlled by GPIOs + */ +struct mctrl_gpios; + +#ifdef CONFIG_GPIOLIB + +/* + * Set state of the modem control output lines via GPIOs. + */ +void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl); + +/* + * Get state of the modem control input lines from GPIOs. + * The mctrl flags are updated and returned. + */ +unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl); + +/* + * Get state of the modem control output lines from GPIOs. + * The mctrl flags are updated and returned. + */ +unsigned int +mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl); + +/* + * Returns the associated struct gpio_desc to the modem line gidx + */ +struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios, + enum mctrl_gpio_idx gidx); + +/* + * Request and set direction of modem control line GPIOs and set up irq + * handling. + * devm_* functions are used, so there's no need to call mctrl_gpio_free(). + * Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on + * allocation error. + */ +struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx); + +/* + * Request and set direction of modem control line GPIOs. + * devm_* functions are used, so there's no need to call mctrl_gpio_free(). + * Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on + * allocation error. + */ +struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, + unsigned int idx); + +/* + * Free the mctrl_gpios structure. + * Normally, this function will not be called, as the GPIOs will + * be disposed of by the resource management code. + */ +void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios); + +/* + * Enable gpio interrupts to report status line changes. + */ +void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios); + +/* + * Disable gpio interrupts to report status line changes. + */ +void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios); + +/* + * Enable gpio wakeup interrupts to enable wake up source. + */ +void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios); + +/* + * Disable gpio wakeup interrupts to enable wake up source. + */ +void mctrl_gpio_disable_irq_wake(struct mctrl_gpios *gpios); + +#else /* GPIOLIB */ + +static inline +void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl) +{ +} + +static inline +unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl) +{ + return *mctrl; +} + +static inline unsigned int +mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl) +{ + return *mctrl; +} + +static inline +struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios, + enum mctrl_gpio_idx gidx) +{ + return NULL; +} + +static inline +struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx) +{ + return NULL; +} + +static inline +struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx) +{ + return NULL; +} + +static inline +void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios) +{ +} + +static inline void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios) +{ +} + +static inline void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) +{ +} + +static inline void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios) +{ +} + +static inline void mctrl_gpio_disable_irq_wake(struct mctrl_gpios *gpios) +{ +} + +#endif /* GPIOLIB */ + +#endif diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c new file mode 100644 index 000000000..e12f1dc18 --- /dev/null +++ b/drivers/tty/serial/serial_txx9.c @@ -0,0 +1,1291 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Derived from many drivers using generic_serial interface, + * especially serial_tx3912.c by Steven J. Hill and r39xx_serial.c + * (was in Linux/VR tree) by Jim Pick. + * + * Copyright (C) 1999 Harald Koerfgen + * Copyright (C) 2000 Jim Pick + * Copyright (C) 2001 Steven J. Hill (sjhill@realitydiluted.com) + * Copyright (C) 2000-2002 Toshiba Corporation + * + * Serial driver for TX3927/TX4927/TX4925/TX4938 internal SIO controller + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define PASS_LIMIT 256 + +#if !defined(CONFIG_SERIAL_TXX9_STDSERIAL) +/* "ttyS" is used for standard serial driver */ +#define TXX9_TTY_NAME "ttyTX" +#define TXX9_TTY_MINOR_START 196 +#define TXX9_TTY_MAJOR 204 +#else +/* acts like standard serial driver */ +#define TXX9_TTY_NAME "ttyS" +#define TXX9_TTY_MINOR_START 64 +#define TXX9_TTY_MAJOR TTY_MAJOR +#endif + +/* flag aliases */ +#define UPF_TXX9_HAVE_CTS_LINE UPF_BUGGY_UART +#define UPF_TXX9_USE_SCLK UPF_MAGIC_MULTIPLIER + +#ifdef CONFIG_PCI +/* support for Toshiba TC86C001 SIO */ +#define ENABLE_SERIAL_TXX9_PCI +#endif + +/* + * Number of serial ports + */ +#define UART_NR CONFIG_SERIAL_TXX9_NR_UARTS + +#define TXX9_REGION_SIZE 0x24 + +/* TXX9 Serial Registers */ +#define TXX9_SILCR 0x00 +#define TXX9_SIDICR 0x04 +#define TXX9_SIDISR 0x08 +#define TXX9_SICISR 0x0c +#define TXX9_SIFCR 0x10 +#define TXX9_SIFLCR 0x14 +#define TXX9_SIBGR 0x18 +#define TXX9_SITFIFO 0x1c +#define TXX9_SIRFIFO 0x20 + +/* SILCR : Line Control */ +#define TXX9_SILCR_SCS_MASK 0x00000060 +#define TXX9_SILCR_SCS_IMCLK 0x00000000 +#define TXX9_SILCR_SCS_IMCLK_BG 0x00000020 +#define TXX9_SILCR_SCS_SCLK 0x00000040 +#define TXX9_SILCR_SCS_SCLK_BG 0x00000060 +#define TXX9_SILCR_UEPS 0x00000010 +#define TXX9_SILCR_UPEN 0x00000008 +#define TXX9_SILCR_USBL_MASK 0x00000004 +#define TXX9_SILCR_USBL_1BIT 0x00000000 +#define TXX9_SILCR_USBL_2BIT 0x00000004 +#define TXX9_SILCR_UMODE_MASK 0x00000003 +#define TXX9_SILCR_UMODE_8BIT 0x00000000 +#define TXX9_SILCR_UMODE_7BIT 0x00000001 + +/* SIDICR : DMA/Int. Control */ +#define TXX9_SIDICR_TDE 0x00008000 +#define TXX9_SIDICR_RDE 0x00004000 +#define TXX9_SIDICR_TIE 0x00002000 +#define TXX9_SIDICR_RIE 0x00001000 +#define TXX9_SIDICR_SPIE 0x00000800 +#define TXX9_SIDICR_CTSAC 0x00000600 +#define TXX9_SIDICR_STIE_MASK 0x0000003f +#define TXX9_SIDICR_STIE_OERS 0x00000020 +#define TXX9_SIDICR_STIE_CTSS 0x00000010 +#define TXX9_SIDICR_STIE_RBRKD 0x00000008 +#define TXX9_SIDICR_STIE_TRDY 0x00000004 +#define TXX9_SIDICR_STIE_TXALS 0x00000002 +#define TXX9_SIDICR_STIE_UBRKD 0x00000001 + +/* SIDISR : DMA/Int. Status */ +#define TXX9_SIDISR_UBRK 0x00008000 +#define TXX9_SIDISR_UVALID 0x00004000 +#define TXX9_SIDISR_UFER 0x00002000 +#define TXX9_SIDISR_UPER 0x00001000 +#define TXX9_SIDISR_UOER 0x00000800 +#define TXX9_SIDISR_ERI 0x00000400 +#define TXX9_SIDISR_TOUT 0x00000200 +#define TXX9_SIDISR_TDIS 0x00000100 +#define TXX9_SIDISR_RDIS 0x00000080 +#define TXX9_SIDISR_STIS 0x00000040 +#define TXX9_SIDISR_RFDN_MASK 0x0000001f + +/* SICISR : Change Int. Status */ +#define TXX9_SICISR_OERS 0x00000020 +#define TXX9_SICISR_CTSS 0x00000010 +#define TXX9_SICISR_RBRKD 0x00000008 +#define TXX9_SICISR_TRDY 0x00000004 +#define TXX9_SICISR_TXALS 0x00000002 +#define TXX9_SICISR_UBRKD 0x00000001 + +/* SIFCR : FIFO Control */ +#define TXX9_SIFCR_SWRST 0x00008000 +#define TXX9_SIFCR_RDIL_MASK 0x00000180 +#define TXX9_SIFCR_RDIL_1 0x00000000 +#define TXX9_SIFCR_RDIL_4 0x00000080 +#define TXX9_SIFCR_RDIL_8 0x00000100 +#define TXX9_SIFCR_RDIL_12 0x00000180 +#define TXX9_SIFCR_RDIL_MAX 0x00000180 +#define TXX9_SIFCR_TDIL_MASK 0x00000018 +#define TXX9_SIFCR_TDIL_1 0x00000000 +#define TXX9_SIFCR_TDIL_4 0x00000001 +#define TXX9_SIFCR_TDIL_8 0x00000010 +#define TXX9_SIFCR_TDIL_MAX 0x00000010 +#define TXX9_SIFCR_TFRST 0x00000004 +#define TXX9_SIFCR_RFRST 0x00000002 +#define TXX9_SIFCR_FRSTE 0x00000001 +#define TXX9_SIO_TX_FIFO 8 +#define TXX9_SIO_RX_FIFO 16 + +/* SIFLCR : Flow Control */ +#define TXX9_SIFLCR_RCS 0x00001000 +#define TXX9_SIFLCR_TES 0x00000800 +#define TXX9_SIFLCR_RTSSC 0x00000200 +#define TXX9_SIFLCR_RSDE 0x00000100 +#define TXX9_SIFLCR_TSDE 0x00000080 +#define TXX9_SIFLCR_RTSTL_MASK 0x0000001e +#define TXX9_SIFLCR_RTSTL_MAX 0x0000001e +#define TXX9_SIFLCR_TBRK 0x00000001 + +/* SIBGR : Baudrate Control */ +#define TXX9_SIBGR_BCLK_MASK 0x00000300 +#define TXX9_SIBGR_BCLK_T0 0x00000000 +#define TXX9_SIBGR_BCLK_T2 0x00000100 +#define TXX9_SIBGR_BCLK_T4 0x00000200 +#define TXX9_SIBGR_BCLK_T6 0x00000300 +#define TXX9_SIBGR_BRD_MASK 0x000000ff + +static inline unsigned int sio_in(struct uart_port *up, int offset) +{ + switch (up->iotype) { + default: + return __raw_readl(up->membase + offset); + case UPIO_PORT: + return inl(up->iobase + offset); + } +} + +static inline void +sio_out(struct uart_port *up, int offset, int value) +{ + switch (up->iotype) { + default: + __raw_writel(value, up->membase + offset); + break; + case UPIO_PORT: + outl(value, up->iobase + offset); + break; + } +} + +static inline void +sio_mask(struct uart_port *up, int offset, unsigned int value) +{ + sio_out(up, offset, sio_in(up, offset) & ~value); +} +static inline void +sio_set(struct uart_port *up, int offset, unsigned int value) +{ + sio_out(up, offset, sio_in(up, offset) | value); +} + +static inline void +sio_quot_set(struct uart_port *up, int quot) +{ + quot >>= 1; + if (quot < 256) + sio_out(up, TXX9_SIBGR, quot | TXX9_SIBGR_BCLK_T0); + else if (quot < (256 << 2)) + sio_out(up, TXX9_SIBGR, (quot >> 2) | TXX9_SIBGR_BCLK_T2); + else if (quot < (256 << 4)) + sio_out(up, TXX9_SIBGR, (quot >> 4) | TXX9_SIBGR_BCLK_T4); + else if (quot < (256 << 6)) + sio_out(up, TXX9_SIBGR, (quot >> 6) | TXX9_SIBGR_BCLK_T6); + else + sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6); +} + +static void serial_txx9_stop_tx(struct uart_port *up) +{ + sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE); +} + +static void serial_txx9_start_tx(struct uart_port *up) +{ + sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE); +} + +static void serial_txx9_stop_rx(struct uart_port *up) +{ + up->read_status_mask &= ~TXX9_SIDISR_RDIS; +} + +static void serial_txx9_initialize(struct uart_port *up) +{ + unsigned int tmout = 10000; + + sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST); + /* TX4925 BUG WORKAROUND. Accessing SIOC register + * immediately after soft reset causes bus error. */ + udelay(1); + while ((sio_in(up, TXX9_SIFCR) & TXX9_SIFCR_SWRST) && --tmout) + udelay(1); + /* TX Int by FIFO Empty, RX Int by Receiving 1 char. */ + sio_set(up, TXX9_SIFCR, + TXX9_SIFCR_TDIL_MAX | TXX9_SIFCR_RDIL_1); + /* initial settings */ + sio_out(up, TXX9_SILCR, + TXX9_SILCR_UMODE_8BIT | TXX9_SILCR_USBL_1BIT | + ((up->flags & UPF_TXX9_USE_SCLK) ? + TXX9_SILCR_SCS_SCLK_BG : TXX9_SILCR_SCS_IMCLK_BG)); + sio_quot_set(up, uart_get_divisor(up, 9600)); + sio_out(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSTL_MAX /* 15 */); + sio_out(up, TXX9_SIDICR, 0); +} + +static inline void +receive_chars(struct uart_port *up, unsigned int *status) +{ + unsigned char ch; + unsigned int disr = *status; + int max_count = 256; + char flag; + unsigned int next_ignore_status_mask; + + do { + ch = sio_in(up, TXX9_SIRFIFO); + flag = TTY_NORMAL; + up->icount.rx++; + + /* mask out RFDN_MASK bit added by previous overrun */ + next_ignore_status_mask = + up->ignore_status_mask & ~TXX9_SIDISR_RFDN_MASK; + if (unlikely(disr & (TXX9_SIDISR_UBRK | TXX9_SIDISR_UPER | + TXX9_SIDISR_UFER | TXX9_SIDISR_UOER))) { + /* + * For statistics only + */ + if (disr & TXX9_SIDISR_UBRK) { + disr &= ~(TXX9_SIDISR_UFER | TXX9_SIDISR_UPER); + up->icount.brk++; + /* + * We do the SysRQ and SAK checking + * here because otherwise the break + * may get masked by ignore_status_mask + * or read_status_mask. + */ + if (uart_handle_break(up)) + goto ignore_char; + } else if (disr & TXX9_SIDISR_UPER) + up->icount.parity++; + else if (disr & TXX9_SIDISR_UFER) + up->icount.frame++; + if (disr & TXX9_SIDISR_UOER) { + up->icount.overrun++; + /* + * The receiver read buffer still hold + * a char which caused overrun. + * Ignore next char by adding RFDN_MASK + * to ignore_status_mask temporarily. + */ + next_ignore_status_mask |= + TXX9_SIDISR_RFDN_MASK; + } + + /* + * Mask off conditions which should be ingored. + */ + disr &= up->read_status_mask; + + if (disr & TXX9_SIDISR_UBRK) { + flag = TTY_BREAK; + } else if (disr & TXX9_SIDISR_UPER) + flag = TTY_PARITY; + else if (disr & TXX9_SIDISR_UFER) + flag = TTY_FRAME; + } + if (uart_handle_sysrq_char(up, ch)) + goto ignore_char; + + uart_insert_char(up, disr, TXX9_SIDISR_UOER, ch, flag); + + ignore_char: + up->ignore_status_mask = next_ignore_status_mask; + disr = sio_in(up, TXX9_SIDISR); + } while (!(disr & TXX9_SIDISR_UVALID) && (max_count-- > 0)); + + tty_flip_buffer_push(&up->state->port); + + *status = disr; +} + +static inline void transmit_chars(struct uart_port *up) +{ + struct circ_buf *xmit = &up->state->xmit; + int count; + + if (up->x_char) { + sio_out(up, TXX9_SITFIFO, up->x_char); + up->icount.tx++; + up->x_char = 0; + return; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(up)) { + serial_txx9_stop_tx(up); + return; + } + + count = TXX9_SIO_TX_FIFO; + do { + sio_out(up, TXX9_SITFIFO, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(up); + + if (uart_circ_empty(xmit)) + serial_txx9_stop_tx(up); +} + +static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id) +{ + int pass_counter = 0; + struct uart_port *up = dev_id; + unsigned int status; + + while (1) { + spin_lock(&up->lock); + status = sio_in(up, TXX9_SIDISR); + if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE)) + status &= ~TXX9_SIDISR_TDIS; + if (!(status & (TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS | + TXX9_SIDISR_TOUT))) { + spin_unlock(&up->lock); + break; + } + + if (status & TXX9_SIDISR_RDIS) + receive_chars(up, &status); + if (status & TXX9_SIDISR_TDIS) + transmit_chars(up); + /* Clear TX/RX Int. Status */ + sio_mask(up, TXX9_SIDISR, + TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS | + TXX9_SIDISR_TOUT); + spin_unlock(&up->lock); + + if (pass_counter++ > PASS_LIMIT) + break; + } + + return pass_counter ? IRQ_HANDLED : IRQ_NONE; +} + +static unsigned int serial_txx9_tx_empty(struct uart_port *up) +{ + unsigned long flags; + unsigned int ret; + + spin_lock_irqsave(&up->lock, flags); + ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0; + spin_unlock_irqrestore(&up->lock, flags); + + return ret; +} + +static unsigned int serial_txx9_get_mctrl(struct uart_port *up) +{ + unsigned int ret; + + /* no modem control lines */ + ret = TIOCM_CAR | TIOCM_DSR; + ret |= (sio_in(up, TXX9_SIFLCR) & TXX9_SIFLCR_RTSSC) ? 0 : TIOCM_RTS; + ret |= (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS) ? 0 : TIOCM_CTS; + + return ret; +} + +static void serial_txx9_set_mctrl(struct uart_port *up, unsigned int mctrl) +{ + + if (mctrl & TIOCM_RTS) + sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC); + else + sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC); +} + +static void serial_txx9_break_ctl(struct uart_port *up, int break_state) +{ + unsigned long flags; + + spin_lock_irqsave(&up->lock, flags); + if (break_state == -1) + sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK); + else + sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK); + spin_unlock_irqrestore(&up->lock, flags); +} + +#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL) +/* + * Wait for transmitter & holding register to empty + */ +static void wait_for_xmitr(struct uart_port *up) +{ + unsigned int tmout = 10000; + + /* Wait up to 10ms for the character(s) to be sent. */ + while (--tmout && + !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS)) + udelay(1); + + /* Wait up to 1s for flow control if necessary */ + if (up->flags & UPF_CONS_FLOW) { + tmout = 1000000; + while (--tmout && + (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS)) + udelay(1); + } +} +#endif + +#ifdef CONFIG_CONSOLE_POLL +/* + * Console polling routines for writing and reading from the uart while + * in an interrupt or debug context. + */ + +static int serial_txx9_get_poll_char(struct uart_port *up) +{ + unsigned int ier; + unsigned char c; + + /* + * First save the IER then disable the interrupts + */ + ier = sio_in(up, TXX9_SIDICR); + sio_out(up, TXX9_SIDICR, 0); + + while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID) + ; + + c = sio_in(up, TXX9_SIRFIFO); + + /* + * Finally, clear RX interrupt status + * and restore the IER + */ + sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS); + sio_out(up, TXX9_SIDICR, ier); + return c; +} + + +static void serial_txx9_put_poll_char(struct uart_port *up, unsigned char c) +{ + unsigned int ier; + + /* + * First save the IER then disable the interrupts + */ + ier = sio_in(up, TXX9_SIDICR); + sio_out(up, TXX9_SIDICR, 0); + + wait_for_xmitr(up); + /* + * Send the character out. + */ + sio_out(up, TXX9_SITFIFO, c); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + wait_for_xmitr(up); + sio_out(up, TXX9_SIDICR, ier); +} + +#endif /* CONFIG_CONSOLE_POLL */ + +static int serial_txx9_startup(struct uart_port *up) +{ + unsigned long flags; + int retval; + + /* + * Clear the FIFO buffers and disable them. + * (they will be reenabled in set_termios()) + */ + sio_set(up, TXX9_SIFCR, + TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE); + /* clear reset */ + sio_mask(up, TXX9_SIFCR, + TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE); + sio_out(up, TXX9_SIDICR, 0); + + /* + * Clear the interrupt registers. + */ + sio_out(up, TXX9_SIDISR, 0); + + retval = request_irq(up->irq, serial_txx9_interrupt, + IRQF_SHARED, "serial_txx9", up); + if (retval) + return retval; + + /* + * Now, initialize the UART + */ + spin_lock_irqsave(&up->lock, flags); + serial_txx9_set_mctrl(up, up->mctrl); + spin_unlock_irqrestore(&up->lock, flags); + + /* Enable RX/TX */ + sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE); + + /* + * Finally, enable interrupts. + */ + sio_set(up, TXX9_SIDICR, TXX9_SIDICR_RIE); + + return 0; +} + +static void serial_txx9_shutdown(struct uart_port *up) +{ + unsigned long flags; + + /* + * Disable interrupts from this port + */ + sio_out(up, TXX9_SIDICR, 0); /* disable all intrs */ + + spin_lock_irqsave(&up->lock, flags); + serial_txx9_set_mctrl(up, up->mctrl); + spin_unlock_irqrestore(&up->lock, flags); + + /* + * Disable break condition + */ + sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK); + +#ifdef CONFIG_SERIAL_TXX9_CONSOLE + if (up->cons && up->line == up->cons->index) { + free_irq(up->irq, up); + return; + } +#endif + /* reset FIFOs */ + sio_set(up, TXX9_SIFCR, + TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE); + /* clear reset */ + sio_mask(up, TXX9_SIFCR, + TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE); + + /* Disable RX/TX */ + sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE); + + free_irq(up->irq, up); +} + +static void +serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int cval, fcr = 0; + unsigned long flags; + unsigned int baud, quot; + + /* + * We don't support modem control lines. + */ + termios->c_cflag &= ~(HUPCL | CMSPAR); + termios->c_cflag |= CLOCAL; + + cval = sio_in(up, TXX9_SILCR); + /* byte size and parity */ + cval &= ~TXX9_SILCR_UMODE_MASK; + switch (termios->c_cflag & CSIZE) { + case CS7: + cval |= TXX9_SILCR_UMODE_7BIT; + break; + default: + case CS5: /* not supported */ + case CS6: /* not supported */ + case CS8: + cval |= TXX9_SILCR_UMODE_8BIT; + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + break; + } + + cval &= ~TXX9_SILCR_USBL_MASK; + if (termios->c_cflag & CSTOPB) + cval |= TXX9_SILCR_USBL_2BIT; + else + cval |= TXX9_SILCR_USBL_1BIT; + cval &= ~(TXX9_SILCR_UPEN | TXX9_SILCR_UEPS); + if (termios->c_cflag & PARENB) + cval |= TXX9_SILCR_UPEN; + if (!(termios->c_cflag & PARODD)) + cval |= TXX9_SILCR_UEPS; + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(up, termios, old, 0, up->uartclk/16/2); + quot = uart_get_divisor(up, baud); + + /* Set up FIFOs */ + /* TX Int by FIFO Empty, RX Int by Receiving 1 char. */ + fcr = TXX9_SIFCR_TDIL_MAX | TXX9_SIFCR_RDIL_1; + + /* + * Ok, we're now changing the port state. Do it with + * interrupts disabled. + */ + spin_lock_irqsave(&up->lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(up, termios->c_cflag, baud); + + up->read_status_mask = TXX9_SIDISR_UOER | + TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS; + if (termios->c_iflag & INPCK) + up->read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + up->read_status_mask |= TXX9_SIDISR_UBRK; + + /* + * Characteres to ignore + */ + up->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + up->ignore_status_mask |= TXX9_SIDISR_UPER | TXX9_SIDISR_UFER; + if (termios->c_iflag & IGNBRK) { + up->ignore_status_mask |= TXX9_SIDISR_UBRK; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + up->ignore_status_mask |= TXX9_SIDISR_UOER; + } + + /* + * ignore all characters if CREAD is not set + */ + if ((termios->c_cflag & CREAD) == 0) + up->ignore_status_mask |= TXX9_SIDISR_RDIS; + + /* CTS flow control flag */ + if ((termios->c_cflag & CRTSCTS) && + (up->flags & UPF_TXX9_HAVE_CTS_LINE)) { + sio_set(up, TXX9_SIFLCR, + TXX9_SIFLCR_RCS | TXX9_SIFLCR_TES); + } else { + sio_mask(up, TXX9_SIFLCR, + TXX9_SIFLCR_RCS | TXX9_SIFLCR_TES); + } + + sio_out(up, TXX9_SILCR, cval); + sio_quot_set(up, quot); + sio_out(up, TXX9_SIFCR, fcr); + + serial_txx9_set_mctrl(up, up->mctrl); + spin_unlock_irqrestore(&up->lock, flags); +} + +static void +serial_txx9_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + /* + * If oldstate was -1 this is called from + * uart_configure_port(). In this case do not initialize the + * port now, because the port was already initialized (for + * non-console port) or should not be initialized here (for + * console port). If we initialized the port here we lose + * serial console settings. + */ + if (state == 0 && oldstate != -1) + serial_txx9_initialize(port); +} + +static int serial_txx9_request_resource(struct uart_port *up) +{ + unsigned int size = TXX9_REGION_SIZE; + int ret = 0; + + switch (up->iotype) { + default: + if (!up->mapbase) + break; + + if (!request_mem_region(up->mapbase, size, "serial_txx9")) { + ret = -EBUSY; + break; + } + + if (up->flags & UPF_IOREMAP) { + up->membase = ioremap(up->mapbase, size); + if (!up->membase) { + release_mem_region(up->mapbase, size); + ret = -ENOMEM; + } + } + break; + + case UPIO_PORT: + if (!request_region(up->iobase, size, "serial_txx9")) + ret = -EBUSY; + break; + } + return ret; +} + +static void serial_txx9_release_resource(struct uart_port *up) +{ + unsigned int size = TXX9_REGION_SIZE; + + switch (up->iotype) { + default: + if (!up->mapbase) + break; + + if (up->flags & UPF_IOREMAP) { + iounmap(up->membase); + up->membase = NULL; + } + + release_mem_region(up->mapbase, size); + break; + + case UPIO_PORT: + release_region(up->iobase, size); + break; + } +} + +static void serial_txx9_release_port(struct uart_port *up) +{ + serial_txx9_release_resource(up); +} + +static int serial_txx9_request_port(struct uart_port *up) +{ + return serial_txx9_request_resource(up); +} + +static void serial_txx9_config_port(struct uart_port *up, int uflags) +{ + int ret; + + /* + * Find the region that we can probe for. This in turn + * tells us whether we can probe for the type of port. + */ + ret = serial_txx9_request_resource(up); + if (ret < 0) + return; + up->type = PORT_TXX9; + up->fifosize = TXX9_SIO_TX_FIFO; + +#ifdef CONFIG_SERIAL_TXX9_CONSOLE + if (up->line == up->cons->index) + return; +#endif + serial_txx9_initialize(up); +} + +static const char * +serial_txx9_type(struct uart_port *port) +{ + return "txx9"; +} + +static const struct uart_ops serial_txx9_pops = { + .tx_empty = serial_txx9_tx_empty, + .set_mctrl = serial_txx9_set_mctrl, + .get_mctrl = serial_txx9_get_mctrl, + .stop_tx = serial_txx9_stop_tx, + .start_tx = serial_txx9_start_tx, + .stop_rx = serial_txx9_stop_rx, + .break_ctl = serial_txx9_break_ctl, + .startup = serial_txx9_startup, + .shutdown = serial_txx9_shutdown, + .set_termios = serial_txx9_set_termios, + .pm = serial_txx9_pm, + .type = serial_txx9_type, + .release_port = serial_txx9_release_port, + .request_port = serial_txx9_request_port, + .config_port = serial_txx9_config_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = serial_txx9_get_poll_char, + .poll_put_char = serial_txx9_put_poll_char, +#endif +}; + +static struct uart_port serial_txx9_ports[UART_NR]; + +static void __init serial_txx9_register_ports(struct uart_driver *drv, + struct device *dev) +{ + int i; + + for (i = 0; i < UART_NR; i++) { + struct uart_port *up = &serial_txx9_ports[i]; + + up->line = i; + up->ops = &serial_txx9_pops; + up->dev = dev; + if (up->iobase || up->mapbase) + uart_add_one_port(drv, up); + } +} + +#ifdef CONFIG_SERIAL_TXX9_CONSOLE + +static void serial_txx9_console_putchar(struct uart_port *up, unsigned char ch) +{ + wait_for_xmitr(up); + sio_out(up, TXX9_SITFIFO, ch); +} + +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + * + * The console_lock must be held when we get here. + */ +static void +serial_txx9_console_write(struct console *co, const char *s, unsigned int count) +{ + struct uart_port *up = &serial_txx9_ports[co->index]; + unsigned int ier, flcr; + + /* + * First save the UER then disable the interrupts + */ + ier = sio_in(up, TXX9_SIDICR); + sio_out(up, TXX9_SIDICR, 0); + /* + * Disable flow-control if enabled (and unnecessary) + */ + flcr = sio_in(up, TXX9_SIFLCR); + if (!(up->flags & UPF_CONS_FLOW) && (flcr & TXX9_SIFLCR_TES)) + sio_out(up, TXX9_SIFLCR, flcr & ~TXX9_SIFLCR_TES); + + uart_console_write(up, s, count, serial_txx9_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + wait_for_xmitr(up); + sio_out(up, TXX9_SIFLCR, flcr); + sio_out(up, TXX9_SIDICR, ier); +} + +static int __init serial_txx9_console_setup(struct console *co, char *options) +{ + struct uart_port *up; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index >= UART_NR) + co->index = 0; + up = &serial_txx9_ports[co->index]; + if (!up->ops) + return -ENODEV; + + serial_txx9_initialize(up); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(up, co, baud, parity, bits, flow); +} + +static struct uart_driver serial_txx9_reg; +static struct console serial_txx9_console = { + .name = TXX9_TTY_NAME, + .write = serial_txx9_console_write, + .device = uart_console_device, + .setup = serial_txx9_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &serial_txx9_reg, +}; + +static int __init serial_txx9_console_init(void) +{ + register_console(&serial_txx9_console); + return 0; +} +console_initcall(serial_txx9_console_init); + +#define SERIAL_TXX9_CONSOLE &serial_txx9_console +#else +#define SERIAL_TXX9_CONSOLE NULL +#endif + +static struct uart_driver serial_txx9_reg = { + .owner = THIS_MODULE, + .driver_name = "serial_txx9", + .dev_name = TXX9_TTY_NAME, + .major = TXX9_TTY_MAJOR, + .minor = TXX9_TTY_MINOR_START, + .nr = UART_NR, + .cons = SERIAL_TXX9_CONSOLE, +}; + +int __init early_serial_txx9_setup(struct uart_port *port) +{ + if (port->line >= ARRAY_SIZE(serial_txx9_ports)) + return -ENODEV; + + serial_txx9_ports[port->line] = *port; + serial_txx9_ports[port->line].ops = &serial_txx9_pops; + serial_txx9_ports[port->line].flags |= + UPF_BOOT_AUTOCONF | UPF_FIXED_PORT; + return 0; +} + +static DEFINE_MUTEX(serial_txx9_mutex); + +/** + * serial_txx9_register_port - register a serial port + * @port: serial port template + * + * Configure the serial port specified by the request. + * + * The port is then probed and if necessary the IRQ is autodetected + * If this fails an error is returned. + * + * On success the port is ready to use and the line number is returned. + */ +static int serial_txx9_register_port(struct uart_port *port) +{ + int i; + struct uart_port *uart; + int ret = -ENOSPC; + + mutex_lock(&serial_txx9_mutex); + for (i = 0; i < UART_NR; i++) { + uart = &serial_txx9_ports[i]; + if (uart_match_port(uart, port)) { + uart_remove_one_port(&serial_txx9_reg, uart); + break; + } + } + if (i == UART_NR) { + /* Find unused port */ + for (i = 0; i < UART_NR; i++) { + uart = &serial_txx9_ports[i]; + if (!(uart->iobase || uart->mapbase)) + break; + } + } + if (i < UART_NR) { + uart->iobase = port->iobase; + uart->membase = port->membase; + uart->irq = port->irq; + uart->uartclk = port->uartclk; + uart->iotype = port->iotype; + uart->flags = port->flags + | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT; + uart->mapbase = port->mapbase; + if (port->dev) + uart->dev = port->dev; + ret = uart_add_one_port(&serial_txx9_reg, uart); + if (ret == 0) + ret = uart->line; + } + mutex_unlock(&serial_txx9_mutex); + return ret; +} + +/** + * serial_txx9_unregister_port - remove a txx9 serial port at runtime + * @line: serial line number + * + * Remove one serial port. This may not be called from interrupt + * context. We hand the port back to the our control. + */ +static void serial_txx9_unregister_port(int line) +{ + struct uart_port *uart = &serial_txx9_ports[line]; + + mutex_lock(&serial_txx9_mutex); + uart_remove_one_port(&serial_txx9_reg, uart); + uart->flags = 0; + uart->type = PORT_UNKNOWN; + uart->iobase = 0; + uart->mapbase = 0; + uart->membase = NULL; + uart->dev = NULL; + mutex_unlock(&serial_txx9_mutex); +} + +/* + * Register a set of serial devices attached to a platform device. + */ +static int serial_txx9_probe(struct platform_device *dev) +{ + struct uart_port *p = dev_get_platdata(&dev->dev); + struct uart_port port; + int ret, i; + + memset(&port, 0, sizeof(struct uart_port)); + for (i = 0; p && p->uartclk != 0; p++, i++) { + port.iobase = p->iobase; + port.membase = p->membase; + port.irq = p->irq; + port.uartclk = p->uartclk; + port.iotype = p->iotype; + port.flags = p->flags; + port.mapbase = p->mapbase; + port.dev = &dev->dev; + port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_TXX9_CONSOLE); + ret = serial_txx9_register_port(&port); + if (ret < 0) { + dev_err(&dev->dev, "unable to register port at index %d " + "(IO%lx MEM%llx IRQ%d): %d\n", i, + p->iobase, (unsigned long long)p->mapbase, + p->irq, ret); + } + } + return 0; +} + +/* + * Remove serial ports registered against a platform device. + */ +static int serial_txx9_remove(struct platform_device *dev) +{ + int i; + + for (i = 0; i < UART_NR; i++) { + struct uart_port *up = &serial_txx9_ports[i]; + + if (up->dev == &dev->dev) + serial_txx9_unregister_port(i); + } + return 0; +} + +#ifdef CONFIG_PM +static int serial_txx9_suspend(struct platform_device *dev, pm_message_t state) +{ + int i; + + for (i = 0; i < UART_NR; i++) { + struct uart_port *up = &serial_txx9_ports[i]; + + if (up->type != PORT_UNKNOWN && up->dev == &dev->dev) + uart_suspend_port(&serial_txx9_reg, up); + } + + return 0; +} + +static int serial_txx9_resume(struct platform_device *dev) +{ + int i; + + for (i = 0; i < UART_NR; i++) { + struct uart_port *up = &serial_txx9_ports[i]; + + if (up->type != PORT_UNKNOWN && up->dev == &dev->dev) + uart_resume_port(&serial_txx9_reg, up); + } + + return 0; +} +#endif + +static struct platform_driver serial_txx9_plat_driver = { + .probe = serial_txx9_probe, + .remove = serial_txx9_remove, +#ifdef CONFIG_PM + .suspend = serial_txx9_suspend, + .resume = serial_txx9_resume, +#endif + .driver = { + .name = "serial_txx9", + }, +}; + +#ifdef ENABLE_SERIAL_TXX9_PCI +/* + * Probe one serial board. Unfortunately, there is no rhyme nor reason + * to the arrangement of serial ports on a PCI card. + */ +static int +pciserial_txx9_init_one(struct pci_dev *dev, const struct pci_device_id *ent) +{ + struct uart_port port; + int line; + int rc; + + rc = pci_enable_device(dev); + if (rc) + return rc; + + memset(&port, 0, sizeof(port)); + port.ops = &serial_txx9_pops; + port.flags |= UPF_TXX9_HAVE_CTS_LINE; + port.uartclk = 66670000; + port.irq = dev->irq; + port.iotype = UPIO_PORT; + port.iobase = pci_resource_start(dev, 1); + port.dev = &dev->dev; + line = serial_txx9_register_port(&port); + if (line < 0) { + printk(KERN_WARNING "Couldn't register serial port %s: %d\n", pci_name(dev), line); + pci_disable_device(dev); + return line; + } + pci_set_drvdata(dev, &serial_txx9_ports[line]); + + return 0; +} + +static void pciserial_txx9_remove_one(struct pci_dev *dev) +{ + struct uart_port *up = pci_get_drvdata(dev); + + if (up) { + serial_txx9_unregister_port(up->line); + pci_disable_device(dev); + } +} + +#ifdef CONFIG_PM +static int pciserial_txx9_suspend_one(struct pci_dev *dev, pm_message_t state) +{ + struct uart_port *up = pci_get_drvdata(dev); + + if (up) + uart_suspend_port(&serial_txx9_reg, up); + pci_save_state(dev); + pci_set_power_state(dev, pci_choose_state(dev, state)); + return 0; +} + +static int pciserial_txx9_resume_one(struct pci_dev *dev) +{ + struct uart_port *up = pci_get_drvdata(dev); + + pci_set_power_state(dev, PCI_D0); + pci_restore_state(dev); + if (up) + uart_resume_port(&serial_txx9_reg, up); + return 0; +} +#endif + +static const struct pci_device_id serial_txx9_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC) }, + { 0, } +}; + +static struct pci_driver serial_txx9_pci_driver = { + .name = "serial_txx9", + .probe = pciserial_txx9_init_one, + .remove = pciserial_txx9_remove_one, +#ifdef CONFIG_PM + .suspend = pciserial_txx9_suspend_one, + .resume = pciserial_txx9_resume_one, +#endif + .id_table = serial_txx9_pci_tbl, +}; + +MODULE_DEVICE_TABLE(pci, serial_txx9_pci_tbl); +#endif /* ENABLE_SERIAL_TXX9_PCI */ + +static struct platform_device *serial_txx9_plat_devs; + +static int __init serial_txx9_init(void) +{ + int ret; + + ret = uart_register_driver(&serial_txx9_reg); + if (ret) + goto out; + + serial_txx9_plat_devs = platform_device_alloc("serial_txx9", -1); + if (!serial_txx9_plat_devs) { + ret = -ENOMEM; + goto unreg_uart_drv; + } + + ret = platform_device_add(serial_txx9_plat_devs); + if (ret) + goto put_dev; + + serial_txx9_register_ports(&serial_txx9_reg, + &serial_txx9_plat_devs->dev); + + ret = platform_driver_register(&serial_txx9_plat_driver); + if (ret) + goto del_dev; + +#ifdef ENABLE_SERIAL_TXX9_PCI + ret = pci_register_driver(&serial_txx9_pci_driver); + if (ret) { + platform_driver_unregister(&serial_txx9_plat_driver); + } +#endif + if (ret == 0) + goto out; + + del_dev: + platform_device_del(serial_txx9_plat_devs); + put_dev: + platform_device_put(serial_txx9_plat_devs); + unreg_uart_drv: + uart_unregister_driver(&serial_txx9_reg); + out: + return ret; +} + +static void __exit serial_txx9_exit(void) +{ + int i; + +#ifdef ENABLE_SERIAL_TXX9_PCI + pci_unregister_driver(&serial_txx9_pci_driver); +#endif + platform_driver_unregister(&serial_txx9_plat_driver); + platform_device_unregister(serial_txx9_plat_devs); + for (i = 0; i < UART_NR; i++) { + struct uart_port *up = &serial_txx9_ports[i]; + if (up->iobase || up->mapbase) + uart_remove_one_port(&serial_txx9_reg, up); + } + + uart_unregister_driver(&serial_txx9_reg); +} + +module_init(serial_txx9_init); +module_exit(serial_txx9_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TX39/49 serial driver"); + +MODULE_ALIAS_CHARDEV_MAJOR(TXX9_TTY_MAJOR); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c new file mode 100644 index 000000000..e67d3a886 --- /dev/null +++ b/drivers/tty/serial/sh-sci.c @@ -0,0 +1,3498 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO) + * + * Copyright (C) 2002 - 2011 Paul Mundt + * Copyright (C) 2015 Glider bvba + * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007). + * + * based off of the old drivers/char/sh-sci.c by: + * + * Copyright (C) 1999, 2000 Niibe Yutaka + * Copyright (C) 2000 Sugioka Toshinobu + * Modified to support multiple serial ports. Stuart Menefy (May 2000). + * Modified to support SecureEdge. David McCullough (2002) + * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003). + * Removed SH7300 support (Jul 2007). + */ +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SUPERH +#include +#include +#endif + +#include "serial_mctrl_gpio.h" +#include "sh-sci.h" + +/* Offsets into the sci_port->irqs array */ +enum { + SCIx_ERI_IRQ, + SCIx_RXI_IRQ, + SCIx_TXI_IRQ, + SCIx_BRI_IRQ, + SCIx_DRI_IRQ, + SCIx_TEI_IRQ, + SCIx_NR_IRQS, + + SCIx_MUX_IRQ = SCIx_NR_IRQS, /* special case */ +}; + +#define SCIx_IRQ_IS_MUXED(port) \ + ((port)->irqs[SCIx_ERI_IRQ] == \ + (port)->irqs[SCIx_RXI_IRQ]) || \ + ((port)->irqs[SCIx_ERI_IRQ] && \ + ((port)->irqs[SCIx_RXI_IRQ] < 0)) + +enum SCI_CLKS { + SCI_FCK, /* Functional Clock */ + SCI_SCK, /* Optional External Clock */ + SCI_BRG_INT, /* Optional BRG Internal Clock Source */ + SCI_SCIF_CLK, /* Optional BRG External Clock Source */ + SCI_NUM_CLKS +}; + +/* Bit x set means sampling rate x + 1 is supported */ +#define SCI_SR(x) BIT((x) - 1) +#define SCI_SR_RANGE(x, y) GENMASK((y) - 1, (x) - 1) + +#define SCI_SR_SCIFAB SCI_SR(5) | SCI_SR(7) | SCI_SR(11) | \ + SCI_SR(13) | SCI_SR(16) | SCI_SR(17) | \ + SCI_SR(19) | SCI_SR(27) + +#define min_sr(_port) ffs((_port)->sampling_rate_mask) +#define max_sr(_port) fls((_port)->sampling_rate_mask) + +/* Iterate over all supported sampling rates, from high to low */ +#define for_each_sr(_sr, _port) \ + for ((_sr) = max_sr(_port); (_sr) >= min_sr(_port); (_sr)--) \ + if ((_port)->sampling_rate_mask & SCI_SR((_sr))) + +struct plat_sci_reg { + u8 offset, size; +}; + +struct sci_port_params { + const struct plat_sci_reg regs[SCIx_NR_REGS]; + unsigned int fifosize; + unsigned int overrun_reg; + unsigned int overrun_mask; + unsigned int sampling_rate_mask; + unsigned int error_mask; + unsigned int error_clear; +}; + +struct sci_port { + struct uart_port port; + + /* Platform configuration */ + const struct sci_port_params *params; + const struct plat_sci_port *cfg; + unsigned int sampling_rate_mask; + resource_size_t reg_size; + struct mctrl_gpios *gpios; + + /* Clocks */ + struct clk *clks[SCI_NUM_CLKS]; + unsigned long clk_rates[SCI_NUM_CLKS]; + + int irqs[SCIx_NR_IRQS]; + char *irqstr[SCIx_NR_IRQS]; + + struct dma_chan *chan_tx; + struct dma_chan *chan_rx; + +#ifdef CONFIG_SERIAL_SH_SCI_DMA + struct dma_chan *chan_tx_saved; + struct dma_chan *chan_rx_saved; + dma_cookie_t cookie_tx; + dma_cookie_t cookie_rx[2]; + dma_cookie_t active_rx; + dma_addr_t tx_dma_addr; + unsigned int tx_dma_len; + struct scatterlist sg_rx[2]; + void *rx_buf[2]; + size_t buf_len_rx; + struct work_struct work_tx; + struct hrtimer rx_timer; + unsigned int rx_timeout; /* microseconds */ +#endif + unsigned int rx_frame; + int rx_trigger; + struct timer_list rx_fifo_timer; + int rx_fifo_timeout; + u16 hscif_tot; + + bool has_rtscts; + bool autorts; +}; + +#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS + +static struct sci_port sci_ports[SCI_NPORTS]; +static unsigned long sci_ports_in_use; +static struct uart_driver sci_uart_driver; + +static inline struct sci_port * +to_sci_port(struct uart_port *uart) +{ + return container_of(uart, struct sci_port, port); +} + +static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = { + /* + * Common SCI definitions, dependent on the port's regshift + * value. + */ + [SCIx_SCI_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 8 }, + [SCBRR] = { 0x01, 8 }, + [SCSCR] = { 0x02, 8 }, + [SCxTDR] = { 0x03, 8 }, + [SCxSR] = { 0x04, 8 }, + [SCxRDR] = { 0x05, 8 }, + }, + .fifosize = 1, + .overrun_reg = SCxSR, + .overrun_mask = SCI_ORER, + .sampling_rate_mask = SCI_SR(32), + .error_mask = SCI_DEFAULT_ERROR_MASK | SCI_ORER, + .error_clear = SCI_ERROR_CLEAR & ~SCI_ORER, + }, + + /* + * Common definitions for legacy IrDA ports. + */ + [SCIx_IRDA_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 8 }, + [SCBRR] = { 0x02, 8 }, + [SCSCR] = { 0x04, 8 }, + [SCxTDR] = { 0x06, 8 }, + [SCxSR] = { 0x08, 16 }, + [SCxRDR] = { 0x0a, 8 }, + [SCFCR] = { 0x0c, 8 }, + [SCFDR] = { 0x0e, 16 }, + }, + .fifosize = 1, + .overrun_reg = SCxSR, + .overrun_mask = SCI_ORER, + .sampling_rate_mask = SCI_SR(32), + .error_mask = SCI_DEFAULT_ERROR_MASK | SCI_ORER, + .error_clear = SCI_ERROR_CLEAR & ~SCI_ORER, + }, + + /* + * Common SCIFA definitions. + */ + [SCIx_SCIFA_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x20, 8 }, + [SCxSR] = { 0x14, 16 }, + [SCxRDR] = { 0x24, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCPCR] = { 0x30, 16 }, + [SCPDR] = { 0x34, 16 }, + }, + .fifosize = 64, + .overrun_reg = SCxSR, + .overrun_mask = SCIFA_ORER, + .sampling_rate_mask = SCI_SR_SCIFAB, + .error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER, + .error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER, + }, + + /* + * Common SCIFB definitions. + */ + [SCIx_SCIFB_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x40, 8 }, + [SCxSR] = { 0x14, 16 }, + [SCxRDR] = { 0x60, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCTFDR] = { 0x38, 16 }, + [SCRFDR] = { 0x3c, 16 }, + [SCPCR] = { 0x30, 16 }, + [SCPDR] = { 0x34, 16 }, + }, + .fifosize = 256, + .overrun_reg = SCxSR, + .overrun_mask = SCIFA_ORER, + .sampling_rate_mask = SCI_SR_SCIFAB, + .error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER, + .error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER, + }, + + /* + * Common SH-2(A) SCIF definitions for ports with FIFO data + * count registers. + */ + [SCIx_SH2_SCIF_FIFODATA_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCSPTR] = { 0x20, 16 }, + [SCLSR] = { 0x24, 16 }, + }, + .fifosize = 16, + .overrun_reg = SCLSR, + .overrun_mask = SCLSR_ORER, + .sampling_rate_mask = SCI_SR(32), + .error_mask = SCIF_DEFAULT_ERROR_MASK, + .error_clear = SCIF_ERROR_CLEAR, + }, + + /* + * The "SCIFA" that is in RZ/A2, RZ/G2L and RZ/T. + * It looks like a normal SCIF with FIFO data, but with a + * compressed address space. Also, the break out of interrupts + * are different: ERI/BRI, RXI, TXI, TEI, DRI. + */ + [SCIx_RZ_SCIFA_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x02, 8 }, + [SCSCR] = { 0x04, 16 }, + [SCxTDR] = { 0x06, 8 }, + [SCxSR] = { 0x08, 16 }, + [SCxRDR] = { 0x0A, 8 }, + [SCFCR] = { 0x0C, 16 }, + [SCFDR] = { 0x0E, 16 }, + [SCSPTR] = { 0x10, 16 }, + [SCLSR] = { 0x12, 16 }, + [SEMR] = { 0x14, 8 }, + }, + .fifosize = 16, + .overrun_reg = SCLSR, + .overrun_mask = SCLSR_ORER, + .sampling_rate_mask = SCI_SR(32), + .error_mask = SCIF_DEFAULT_ERROR_MASK, + .error_clear = SCIF_ERROR_CLEAR, + }, + + /* + * Common SH-3 SCIF definitions. + */ + [SCIx_SH3_SCIF_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 8 }, + [SCBRR] = { 0x02, 8 }, + [SCSCR] = { 0x04, 8 }, + [SCxTDR] = { 0x06, 8 }, + [SCxSR] = { 0x08, 16 }, + [SCxRDR] = { 0x0a, 8 }, + [SCFCR] = { 0x0c, 8 }, + [SCFDR] = { 0x0e, 16 }, + }, + .fifosize = 16, + .overrun_reg = SCLSR, + .overrun_mask = SCLSR_ORER, + .sampling_rate_mask = SCI_SR(32), + .error_mask = SCIF_DEFAULT_ERROR_MASK, + .error_clear = SCIF_ERROR_CLEAR, + }, + + /* + * Common SH-4(A) SCIF(B) definitions. + */ + [SCIx_SH4_SCIF_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCSPTR] = { 0x20, 16 }, + [SCLSR] = { 0x24, 16 }, + }, + .fifosize = 16, + .overrun_reg = SCLSR, + .overrun_mask = SCLSR_ORER, + .sampling_rate_mask = SCI_SR(32), + .error_mask = SCIF_DEFAULT_ERROR_MASK, + .error_clear = SCIF_ERROR_CLEAR, + }, + + /* + * Common SCIF definitions for ports with a Baud Rate Generator for + * External Clock (BRG). + */ + [SCIx_SH4_SCIF_BRG_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCSPTR] = { 0x20, 16 }, + [SCLSR] = { 0x24, 16 }, + [SCDL] = { 0x30, 16 }, + [SCCKS] = { 0x34, 16 }, + }, + .fifosize = 16, + .overrun_reg = SCLSR, + .overrun_mask = SCLSR_ORER, + .sampling_rate_mask = SCI_SR(32), + .error_mask = SCIF_DEFAULT_ERROR_MASK, + .error_clear = SCIF_ERROR_CLEAR, + }, + + /* + * Common HSCIF definitions. + */ + [SCIx_HSCIF_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCSPTR] = { 0x20, 16 }, + [SCLSR] = { 0x24, 16 }, + [HSSRR] = { 0x40, 16 }, + [SCDL] = { 0x30, 16 }, + [SCCKS] = { 0x34, 16 }, + [HSRTRGR] = { 0x54, 16 }, + [HSTTRGR] = { 0x58, 16 }, + }, + .fifosize = 128, + .overrun_reg = SCLSR, + .overrun_mask = SCLSR_ORER, + .sampling_rate_mask = SCI_SR_RANGE(8, 32), + .error_mask = SCIF_DEFAULT_ERROR_MASK, + .error_clear = SCIF_ERROR_CLEAR, + }, + + /* + * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR + * register. + */ + [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCLSR] = { 0x24, 16 }, + }, + .fifosize = 16, + .overrun_reg = SCLSR, + .overrun_mask = SCLSR_ORER, + .sampling_rate_mask = SCI_SR(32), + .error_mask = SCIF_DEFAULT_ERROR_MASK, + .error_clear = SCIF_ERROR_CLEAR, + }, + + /* + * Common SH-4(A) SCIF(B) definitions for ports with FIFO data + * count registers. + */ + [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */ + [SCRFDR] = { 0x20, 16 }, + [SCSPTR] = { 0x24, 16 }, + [SCLSR] = { 0x28, 16 }, + }, + .fifosize = 16, + .overrun_reg = SCLSR, + .overrun_mask = SCLSR_ORER, + .sampling_rate_mask = SCI_SR(32), + .error_mask = SCIF_DEFAULT_ERROR_MASK, + .error_clear = SCIF_ERROR_CLEAR, + }, + + /* + * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR + * registers. + */ + [SCIx_SH7705_SCIF_REGTYPE] = { + .regs = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x20, 8 }, + [SCxSR] = { 0x14, 16 }, + [SCxRDR] = { 0x24, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + }, + .fifosize = 64, + .overrun_reg = SCxSR, + .overrun_mask = SCIFA_ORER, + .sampling_rate_mask = SCI_SR(16), + .error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER, + .error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER, + }, +}; + +#define sci_getreg(up, offset) (&to_sci_port(up)->params->regs[offset]) + +/* + * The "offset" here is rather misleading, in that it refers to an enum + * value relative to the port mapping rather than the fixed offset + * itself, which needs to be manually retrieved from the platform's + * register map for the given port. + */ +static unsigned int sci_serial_in(struct uart_port *p, int offset) +{ + const struct plat_sci_reg *reg = sci_getreg(p, offset); + + if (reg->size == 8) + return ioread8(p->membase + (reg->offset << p->regshift)); + else if (reg->size == 16) + return ioread16(p->membase + (reg->offset << p->regshift)); + else + WARN(1, "Invalid register access\n"); + + return 0; +} + +static void sci_serial_out(struct uart_port *p, int offset, int value) +{ + const struct plat_sci_reg *reg = sci_getreg(p, offset); + + if (reg->size == 8) + iowrite8(value, p->membase + (reg->offset << p->regshift)); + else if (reg->size == 16) + iowrite16(value, p->membase + (reg->offset << p->regshift)); + else + WARN(1, "Invalid register access\n"); +} + +static void sci_port_enable(struct sci_port *sci_port) +{ + unsigned int i; + + if (!sci_port->port.dev) + return; + + pm_runtime_get_sync(sci_port->port.dev); + + for (i = 0; i < SCI_NUM_CLKS; i++) { + clk_prepare_enable(sci_port->clks[i]); + sci_port->clk_rates[i] = clk_get_rate(sci_port->clks[i]); + } + sci_port->port.uartclk = sci_port->clk_rates[SCI_FCK]; +} + +static void sci_port_disable(struct sci_port *sci_port) +{ + unsigned int i; + + if (!sci_port->port.dev) + return; + + for (i = SCI_NUM_CLKS; i-- > 0; ) + clk_disable_unprepare(sci_port->clks[i]); + + pm_runtime_put_sync(sci_port->port.dev); +} + +static inline unsigned long port_rx_irq_mask(struct uart_port *port) +{ + /* + * Not all ports (such as SCIFA) will support REIE. Rather than + * special-casing the port type, we check the port initialization + * IRQ enable mask to see whether the IRQ is desired at all. If + * it's unset, it's logically inferred that there's no point in + * testing for it. + */ + return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE); +} + +static void sci_start_tx(struct uart_port *port) +{ + struct sci_port *s = to_sci_port(port); + unsigned short ctrl; + +#ifdef CONFIG_SERIAL_SH_SCI_DMA + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { + u16 new, scr = serial_port_in(port, SCSCR); + if (s->chan_tx) + new = scr | SCSCR_TDRQE; + else + new = scr & ~SCSCR_TDRQE; + if (new != scr) + serial_port_out(port, SCSCR, new); + } + + if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) && + dma_submit_error(s->cookie_tx)) { + s->cookie_tx = 0; + schedule_work(&s->work_tx); + } +#endif + + if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) { + /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ + ctrl = serial_port_in(port, SCSCR); + serial_port_out(port, SCSCR, ctrl | SCSCR_TIE); + } +} + +static void sci_stop_tx(struct uart_port *port) +{ + unsigned short ctrl; + + /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ + ctrl = serial_port_in(port, SCSCR); + + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) + ctrl &= ~SCSCR_TDRQE; + + ctrl &= ~SCSCR_TIE; + + serial_port_out(port, SCSCR, ctrl); + +#ifdef CONFIG_SERIAL_SH_SCI_DMA + if (to_sci_port(port)->chan_tx && + !dma_submit_error(to_sci_port(port)->cookie_tx)) { + dmaengine_terminate_async(to_sci_port(port)->chan_tx); + to_sci_port(port)->cookie_tx = -EINVAL; + } +#endif +} + +static void sci_start_rx(struct uart_port *port) +{ + unsigned short ctrl; + + ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port); + + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) + ctrl &= ~SCSCR_RDRQE; + + serial_port_out(port, SCSCR, ctrl); +} + +static void sci_stop_rx(struct uart_port *port) +{ + unsigned short ctrl; + + ctrl = serial_port_in(port, SCSCR); + + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) + ctrl &= ~SCSCR_RDRQE; + + ctrl &= ~port_rx_irq_mask(port); + + serial_port_out(port, SCSCR, ctrl); +} + +static void sci_clear_SCxSR(struct uart_port *port, unsigned int mask) +{ + if (port->type == PORT_SCI) { + /* Just store the mask */ + serial_port_out(port, SCxSR, mask); + } else if (to_sci_port(port)->params->overrun_mask == SCIFA_ORER) { + /* SCIFA/SCIFB and SCIF on SH7705/SH7720/SH7721 */ + /* Only clear the status bits we want to clear */ + serial_port_out(port, SCxSR, + serial_port_in(port, SCxSR) & mask); + } else { + /* Store the mask, clear parity/framing errors */ + serial_port_out(port, SCxSR, mask & ~(SCIF_FERC | SCIF_PERC)); + } +} + +#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \ + defined(CONFIG_SERIAL_SH_SCI_EARLYCON) + +#ifdef CONFIG_CONSOLE_POLL +static int sci_poll_get_char(struct uart_port *port) +{ + unsigned short status; + int c; + + do { + status = serial_port_in(port, SCxSR); + if (status & SCxSR_ERRORS(port)) { + sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port)); + continue; + } + break; + } while (1); + + if (!(status & SCxSR_RDxF(port))) + return NO_POLL_CHAR; + + c = serial_port_in(port, SCxRDR); + + /* Dummy read */ + serial_port_in(port, SCxSR); + sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); + + return c; +} +#endif + +static void sci_poll_put_char(struct uart_port *port, unsigned char c) +{ + unsigned short status; + + do { + status = serial_port_in(port, SCxSR); + } while (!(status & SCxSR_TDxE(port))); + + serial_port_out(port, SCxTDR, c); + sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port)); +} +#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE || + CONFIG_SERIAL_SH_SCI_EARLYCON */ + +static void sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + struct sci_port *s = to_sci_port(port); + + /* + * Use port-specific handler if provided. + */ + if (s->cfg->ops && s->cfg->ops->init_pins) { + s->cfg->ops->init_pins(port, cflag); + return; + } + + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { + u16 data = serial_port_in(port, SCPDR); + u16 ctrl = serial_port_in(port, SCPCR); + + /* Enable RXD and TXD pin functions */ + ctrl &= ~(SCPCR_RXDC | SCPCR_TXDC); + if (to_sci_port(port)->has_rtscts) { + /* RTS# is output, active low, unless autorts */ + if (!(port->mctrl & TIOCM_RTS)) { + ctrl |= SCPCR_RTSC; + data |= SCPDR_RTSD; + } else if (!s->autorts) { + ctrl |= SCPCR_RTSC; + data &= ~SCPDR_RTSD; + } else { + /* Enable RTS# pin function */ + ctrl &= ~SCPCR_RTSC; + } + /* Enable CTS# pin function */ + ctrl &= ~SCPCR_CTSC; + } + serial_port_out(port, SCPDR, data); + serial_port_out(port, SCPCR, ctrl); + } else if (sci_getreg(port, SCSPTR)->size) { + u16 status = serial_port_in(port, SCSPTR); + + /* RTS# is always output; and active low, unless autorts */ + status |= SCSPTR_RTSIO; + if (!(port->mctrl & TIOCM_RTS)) + status |= SCSPTR_RTSDT; + else if (!s->autorts) + status &= ~SCSPTR_RTSDT; + /* CTS# and SCK are inputs */ + status &= ~(SCSPTR_CTSIO | SCSPTR_SCKIO); + serial_port_out(port, SCSPTR, status); + } +} + +static int sci_txfill(struct uart_port *port) +{ + struct sci_port *s = to_sci_port(port); + unsigned int fifo_mask = (s->params->fifosize << 1) - 1; + const struct plat_sci_reg *reg; + + reg = sci_getreg(port, SCTFDR); + if (reg->size) + return serial_port_in(port, SCTFDR) & fifo_mask; + + reg = sci_getreg(port, SCFDR); + if (reg->size) + return serial_port_in(port, SCFDR) >> 8; + + return !(serial_port_in(port, SCxSR) & SCI_TDRE); +} + +static int sci_txroom(struct uart_port *port) +{ + return port->fifosize - sci_txfill(port); +} + +static int sci_rxfill(struct uart_port *port) +{ + struct sci_port *s = to_sci_port(port); + unsigned int fifo_mask = (s->params->fifosize << 1) - 1; + const struct plat_sci_reg *reg; + + reg = sci_getreg(port, SCRFDR); + if (reg->size) + return serial_port_in(port, SCRFDR) & fifo_mask; + + reg = sci_getreg(port, SCFDR); + if (reg->size) + return serial_port_in(port, SCFDR) & fifo_mask; + + return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; +} + +/* ********************************************************************** * + * the interrupt related routines * + * ********************************************************************** */ + +static void sci_transmit_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + unsigned int stopped = uart_tx_stopped(port); + unsigned short status; + unsigned short ctrl; + int count; + + status = serial_port_in(port, SCxSR); + if (!(status & SCxSR_TDxE(port))) { + ctrl = serial_port_in(port, SCSCR); + if (uart_circ_empty(xmit)) + ctrl &= ~SCSCR_TIE; + else + ctrl |= SCSCR_TIE; + serial_port_out(port, SCSCR, ctrl); + return; + } + + count = sci_txroom(port); + + do { + unsigned char c; + + if (port->x_char) { + c = port->x_char; + port->x_char = 0; + } else if (!uart_circ_empty(xmit) && !stopped) { + c = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + } else { + break; + } + + serial_port_out(port, SCxTDR, c); + + port->icount.tx++; + } while (--count > 0); + + sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port)); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + if (uart_circ_empty(xmit)) + sci_stop_tx(port); + +} + +static void sci_receive_chars(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + int i, count, copied = 0; + unsigned short status; + unsigned char flag; + + status = serial_port_in(port, SCxSR); + if (!(status & SCxSR_RDxF(port))) + return; + + while (1) { + /* Don't copy more bytes than there is room for in the buffer */ + count = tty_buffer_request_room(tport, sci_rxfill(port)); + + /* If for any reason we can't copy more data, we're done! */ + if (count == 0) + break; + + if (port->type == PORT_SCI) { + char c = serial_port_in(port, SCxRDR); + if (uart_handle_sysrq_char(port, c)) + count = 0; + else + tty_insert_flip_char(tport, c, TTY_NORMAL); + } else { + for (i = 0; i < count; i++) { + char c; + + if (port->type == PORT_SCIF || + port->type == PORT_HSCIF) { + status = serial_port_in(port, SCxSR); + c = serial_port_in(port, SCxRDR); + } else { + c = serial_port_in(port, SCxRDR); + status = serial_port_in(port, SCxSR); + } + if (uart_handle_sysrq_char(port, c)) { + count--; i--; + continue; + } + + /* Store data and status */ + if (status & SCxSR_FER(port)) { + flag = TTY_FRAME; + port->icount.frame++; + } else if (status & SCxSR_PER(port)) { + flag = TTY_PARITY; + port->icount.parity++; + } else + flag = TTY_NORMAL; + + tty_insert_flip_char(tport, c, flag); + } + } + + serial_port_in(port, SCxSR); /* dummy read */ + sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); + + copied += count; + port->icount.rx += count; + } + + if (copied) { + /* Tell the rest of the system the news. New characters! */ + tty_flip_buffer_push(tport); + } else { + /* TTY buffers full; read from RX reg to prevent lockup */ + serial_port_in(port, SCxRDR); + serial_port_in(port, SCxSR); /* dummy read */ + sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); + } +} + +static int sci_handle_errors(struct uart_port *port) +{ + int copied = 0; + unsigned short status = serial_port_in(port, SCxSR); + struct tty_port *tport = &port->state->port; + struct sci_port *s = to_sci_port(port); + + /* Handle overruns */ + if (status & s->params->overrun_mask) { + port->icount.overrun++; + + /* overrun error */ + if (tty_insert_flip_char(tport, 0, TTY_OVERRUN)) + copied++; + } + + if (status & SCxSR_FER(port)) { + /* frame error */ + port->icount.frame++; + + if (tty_insert_flip_char(tport, 0, TTY_FRAME)) + copied++; + } + + if (status & SCxSR_PER(port)) { + /* parity error */ + port->icount.parity++; + + if (tty_insert_flip_char(tport, 0, TTY_PARITY)) + copied++; + } + + if (copied) + tty_flip_buffer_push(tport); + + return copied; +} + +static int sci_handle_fifo_overrun(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + struct sci_port *s = to_sci_port(port); + const struct plat_sci_reg *reg; + int copied = 0; + u16 status; + + reg = sci_getreg(port, s->params->overrun_reg); + if (!reg->size) + return 0; + + status = serial_port_in(port, s->params->overrun_reg); + if (status & s->params->overrun_mask) { + status &= ~s->params->overrun_mask; + serial_port_out(port, s->params->overrun_reg, status); + + port->icount.overrun++; + + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + tty_flip_buffer_push(tport); + copied++; + } + + return copied; +} + +static int sci_handle_breaks(struct uart_port *port) +{ + int copied = 0; + unsigned short status = serial_port_in(port, SCxSR); + struct tty_port *tport = &port->state->port; + + if (uart_handle_break(port)) + return 0; + + if (status & SCxSR_BRK(port)) { + port->icount.brk++; + + /* Notify of BREAK */ + if (tty_insert_flip_char(tport, 0, TTY_BREAK)) + copied++; + } + + if (copied) + tty_flip_buffer_push(tport); + + copied += sci_handle_fifo_overrun(port); + + return copied; +} + +static int scif_set_rtrg(struct uart_port *port, int rx_trig) +{ + unsigned int bits; + + if (rx_trig >= port->fifosize) + rx_trig = port->fifosize - 1; + if (rx_trig < 1) + rx_trig = 1; + + /* HSCIF can be set to an arbitrary level. */ + if (sci_getreg(port, HSRTRGR)->size) { + serial_port_out(port, HSRTRGR, rx_trig); + return rx_trig; + } + + switch (port->type) { + case PORT_SCIF: + if (rx_trig < 4) { + bits = 0; + rx_trig = 1; + } else if (rx_trig < 8) { + bits = SCFCR_RTRG0; + rx_trig = 4; + } else if (rx_trig < 14) { + bits = SCFCR_RTRG1; + rx_trig = 8; + } else { + bits = SCFCR_RTRG0 | SCFCR_RTRG1; + rx_trig = 14; + } + break; + case PORT_SCIFA: + case PORT_SCIFB: + if (rx_trig < 16) { + bits = 0; + rx_trig = 1; + } else if (rx_trig < 32) { + bits = SCFCR_RTRG0; + rx_trig = 16; + } else if (rx_trig < 48) { + bits = SCFCR_RTRG1; + rx_trig = 32; + } else { + bits = SCFCR_RTRG0 | SCFCR_RTRG1; + rx_trig = 48; + } + break; + default: + WARN(1, "unknown FIFO configuration"); + return 1; + } + + serial_port_out(port, SCFCR, + (serial_port_in(port, SCFCR) & + ~(SCFCR_RTRG1 | SCFCR_RTRG0)) | bits); + + return rx_trig; +} + +static int scif_rtrg_enabled(struct uart_port *port) +{ + if (sci_getreg(port, HSRTRGR)->size) + return serial_port_in(port, HSRTRGR) != 0; + else + return (serial_port_in(port, SCFCR) & + (SCFCR_RTRG0 | SCFCR_RTRG1)) != 0; +} + +static void rx_fifo_timer_fn(struct timer_list *t) +{ + struct sci_port *s = from_timer(s, t, rx_fifo_timer); + struct uart_port *port = &s->port; + + dev_dbg(port->dev, "Rx timed out\n"); + scif_set_rtrg(port, 1); +} + +static ssize_t rx_fifo_trigger_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct sci_port *sci = to_sci_port(port); + + return sprintf(buf, "%d\n", sci->rx_trigger); +} + +static ssize_t rx_fifo_trigger_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct sci_port *sci = to_sci_port(port); + int ret; + long r; + + ret = kstrtol(buf, 0, &r); + if (ret) + return ret; + + sci->rx_trigger = scif_set_rtrg(port, r); + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) + scif_set_rtrg(port, 1); + + return count; +} + +static DEVICE_ATTR_RW(rx_fifo_trigger); + +static ssize_t rx_fifo_timeout_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct sci_port *sci = to_sci_port(port); + int v; + + if (port->type == PORT_HSCIF) + v = sci->hscif_tot >> HSSCR_TOT_SHIFT; + else + v = sci->rx_fifo_timeout; + + return sprintf(buf, "%d\n", v); +} + +static ssize_t rx_fifo_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct sci_port *sci = to_sci_port(port); + int ret; + long r; + + ret = kstrtol(buf, 0, &r); + if (ret) + return ret; + + if (port->type == PORT_HSCIF) { + if (r < 0 || r > 3) + return -EINVAL; + sci->hscif_tot = r << HSSCR_TOT_SHIFT; + } else { + sci->rx_fifo_timeout = r; + scif_set_rtrg(port, 1); + if (r > 0) + timer_setup(&sci->rx_fifo_timer, rx_fifo_timer_fn, 0); + } + + return count; +} + +static DEVICE_ATTR_RW(rx_fifo_timeout); + + +#ifdef CONFIG_SERIAL_SH_SCI_DMA +static void sci_dma_tx_complete(void *arg) +{ + struct sci_port *s = arg; + struct uart_port *port = &s->port; + struct circ_buf *xmit = &port->state->xmit; + unsigned long flags; + + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); + + spin_lock_irqsave(&port->lock, flags); + + xmit->tail += s->tx_dma_len; + xmit->tail &= UART_XMIT_SIZE - 1; + + port->icount.tx += s->tx_dma_len; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (!uart_circ_empty(xmit)) { + s->cookie_tx = 0; + schedule_work(&s->work_tx); + } else { + s->cookie_tx = -EINVAL; + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { + u16 ctrl = serial_port_in(port, SCSCR); + serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE); + } + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +/* Locking: called with port lock held */ +static int sci_dma_rx_push(struct sci_port *s, void *buf, size_t count) +{ + struct uart_port *port = &s->port; + struct tty_port *tport = &port->state->port; + int copied; + + copied = tty_insert_flip_string(tport, buf, count); + if (copied < count) + port->icount.buf_overrun++; + + port->icount.rx += copied; + + return copied; +} + +static int sci_dma_rx_find_active(struct sci_port *s) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->cookie_rx); i++) + if (s->active_rx == s->cookie_rx[i]) + return i; + + return -1; +} + +static void sci_dma_rx_chan_invalidate(struct sci_port *s) +{ + unsigned int i; + + s->chan_rx = NULL; + for (i = 0; i < ARRAY_SIZE(s->cookie_rx); i++) + s->cookie_rx[i] = -EINVAL; + s->active_rx = 0; +} + +static void sci_dma_rx_release(struct sci_port *s) +{ + struct dma_chan *chan = s->chan_rx_saved; + + s->chan_rx_saved = NULL; + sci_dma_rx_chan_invalidate(s); + dmaengine_terminate_sync(chan); + dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0], + sg_dma_address(&s->sg_rx[0])); + dma_release_channel(chan); +} + +static void start_hrtimer_us(struct hrtimer *hrt, unsigned long usec) +{ + long sec = usec / 1000000; + long nsec = (usec % 1000000) * 1000; + ktime_t t = ktime_set(sec, nsec); + + hrtimer_start(hrt, t, HRTIMER_MODE_REL); +} + +static void sci_dma_rx_reenable_irq(struct sci_port *s) +{ + struct uart_port *port = &s->port; + u16 scr; + + /* Direct new serial port interrupts back to CPU */ + scr = serial_port_in(port, SCSCR); + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { + scr &= ~SCSCR_RDRQE; + enable_irq(s->irqs[SCIx_RXI_IRQ]); + } + serial_port_out(port, SCSCR, scr | SCSCR_RIE); +} + +static void sci_dma_rx_complete(void *arg) +{ + struct sci_port *s = arg; + struct dma_chan *chan = s->chan_rx; + struct uart_port *port = &s->port; + struct dma_async_tx_descriptor *desc; + unsigned long flags; + int active, count = 0; + + dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line, + s->active_rx); + + spin_lock_irqsave(&port->lock, flags); + + active = sci_dma_rx_find_active(s); + if (active >= 0) + count = sci_dma_rx_push(s, s->rx_buf[active], s->buf_len_rx); + + start_hrtimer_us(&s->rx_timer, s->rx_timeout); + + if (count) + tty_flip_buffer_push(&port->state->port); + + desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[active], 1, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + goto fail; + + desc->callback = sci_dma_rx_complete; + desc->callback_param = s; + s->cookie_rx[active] = dmaengine_submit(desc); + if (dma_submit_error(s->cookie_rx[active])) + goto fail; + + s->active_rx = s->cookie_rx[!active]; + + dma_async_issue_pending(chan); + + spin_unlock_irqrestore(&port->lock, flags); + dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n", + __func__, s->cookie_rx[active], active, s->active_rx); + return; + +fail: + spin_unlock_irqrestore(&port->lock, flags); + dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); + /* Switch to PIO */ + spin_lock_irqsave(&port->lock, flags); + dmaengine_terminate_async(chan); + sci_dma_rx_chan_invalidate(s); + sci_dma_rx_reenable_irq(s); + spin_unlock_irqrestore(&port->lock, flags); +} + +static void sci_dma_tx_release(struct sci_port *s) +{ + struct dma_chan *chan = s->chan_tx_saved; + + cancel_work_sync(&s->work_tx); + s->chan_tx_saved = s->chan_tx = NULL; + s->cookie_tx = -EINVAL; + dmaengine_terminate_sync(chan); + dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE, + DMA_TO_DEVICE); + dma_release_channel(chan); +} + +static int sci_dma_rx_submit(struct sci_port *s, bool port_lock_held) +{ + struct dma_chan *chan = s->chan_rx; + struct uart_port *port = &s->port; + unsigned long flags; + int i; + + for (i = 0; i < 2; i++) { + struct scatterlist *sg = &s->sg_rx[i]; + struct dma_async_tx_descriptor *desc; + + desc = dmaengine_prep_slave_sg(chan, + sg, 1, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + goto fail; + + desc->callback = sci_dma_rx_complete; + desc->callback_param = s; + s->cookie_rx[i] = dmaengine_submit(desc); + if (dma_submit_error(s->cookie_rx[i])) + goto fail; + + } + + s->active_rx = s->cookie_rx[0]; + + dma_async_issue_pending(chan); + return 0; + +fail: + /* Switch to PIO */ + if (!port_lock_held) + spin_lock_irqsave(&port->lock, flags); + if (i) + dmaengine_terminate_async(chan); + sci_dma_rx_chan_invalidate(s); + sci_start_rx(port); + if (!port_lock_held) + spin_unlock_irqrestore(&port->lock, flags); + return -EAGAIN; +} + +static void sci_dma_tx_work_fn(struct work_struct *work) +{ + struct sci_port *s = container_of(work, struct sci_port, work_tx); + struct dma_async_tx_descriptor *desc; + struct dma_chan *chan = s->chan_tx; + struct uart_port *port = &s->port; + struct circ_buf *xmit = &port->state->xmit; + unsigned long flags; + dma_addr_t buf; + int head, tail; + + /* + * DMA is idle now. + * Port xmit buffer is already mapped, and it is one page... Just adjust + * offsets and lengths. Since it is a circular buffer, we have to + * transmit till the end, and then the rest. Take the port lock to get a + * consistent xmit buffer state. + */ + spin_lock_irq(&port->lock); + head = xmit->head; + tail = xmit->tail; + buf = s->tx_dma_addr + tail; + s->tx_dma_len = CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE); + if (!s->tx_dma_len) { + /* Transmit buffer has been flushed */ + spin_unlock_irq(&port->lock); + return; + } + + desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + spin_unlock_irq(&port->lock); + dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n"); + goto switch_to_pio; + } + + dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len, + DMA_TO_DEVICE); + + desc->callback = sci_dma_tx_complete; + desc->callback_param = s; + s->cookie_tx = dmaengine_submit(desc); + if (dma_submit_error(s->cookie_tx)) { + spin_unlock_irq(&port->lock); + dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); + goto switch_to_pio; + } + + spin_unlock_irq(&port->lock); + dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", + __func__, xmit->buf, tail, head, s->cookie_tx); + + dma_async_issue_pending(chan); + return; + +switch_to_pio: + spin_lock_irqsave(&port->lock, flags); + s->chan_tx = NULL; + sci_start_tx(port); + spin_unlock_irqrestore(&port->lock, flags); + return; +} + +static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t) +{ + struct sci_port *s = container_of(t, struct sci_port, rx_timer); + struct dma_chan *chan = s->chan_rx; + struct uart_port *port = &s->port; + struct dma_tx_state state; + enum dma_status status; + unsigned long flags; + unsigned int read; + int active, count; + + dev_dbg(port->dev, "DMA Rx timed out\n"); + + spin_lock_irqsave(&port->lock, flags); + + active = sci_dma_rx_find_active(s); + if (active < 0) { + spin_unlock_irqrestore(&port->lock, flags); + return HRTIMER_NORESTART; + } + + status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state); + if (status == DMA_COMPLETE) { + spin_unlock_irqrestore(&port->lock, flags); + dev_dbg(port->dev, "Cookie %d #%d has already completed\n", + s->active_rx, active); + + /* Let packet complete handler take care of the packet */ + return HRTIMER_NORESTART; + } + + dmaengine_pause(chan); + + /* + * sometimes DMA transfer doesn't stop even if it is stopped and + * data keeps on coming until transaction is complete so check + * for DMA_COMPLETE again + * Let packet complete handler take care of the packet + */ + status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state); + if (status == DMA_COMPLETE) { + spin_unlock_irqrestore(&port->lock, flags); + dev_dbg(port->dev, "Transaction complete after DMA engine was stopped"); + return HRTIMER_NORESTART; + } + + /* Handle incomplete DMA receive */ + dmaengine_terminate_async(s->chan_rx); + read = sg_dma_len(&s->sg_rx[active]) - state.residue; + + if (read) { + count = sci_dma_rx_push(s, s->rx_buf[active], read); + if (count) + tty_flip_buffer_push(&port->state->port); + } + + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) + sci_dma_rx_submit(s, true); + + sci_dma_rx_reenable_irq(s); + + spin_unlock_irqrestore(&port->lock, flags); + + return HRTIMER_NORESTART; +} + +static struct dma_chan *sci_request_dma_chan(struct uart_port *port, + enum dma_transfer_direction dir) +{ + struct dma_chan *chan; + struct dma_slave_config cfg; + int ret; + + chan = dma_request_slave_channel(port->dev, + dir == DMA_MEM_TO_DEV ? "tx" : "rx"); + if (!chan) { + dev_dbg(port->dev, "dma_request_slave_channel failed\n"); + return NULL; + } + + memset(&cfg, 0, sizeof(cfg)); + cfg.direction = dir; + if (dir == DMA_MEM_TO_DEV) { + cfg.dst_addr = port->mapbase + + (sci_getreg(port, SCxTDR)->offset << port->regshift); + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + } else { + cfg.src_addr = port->mapbase + + (sci_getreg(port, SCxRDR)->offset << port->regshift); + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + } + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) { + dev_warn(port->dev, "dmaengine_slave_config failed %d\n", ret); + dma_release_channel(chan); + return NULL; + } + + return chan; +} + +static void sci_request_dma(struct uart_port *port) +{ + struct sci_port *s = to_sci_port(port); + struct dma_chan *chan; + + dev_dbg(port->dev, "%s: port %d\n", __func__, port->line); + + /* + * DMA on console may interfere with Kernel log messages which use + * plain putchar(). So, simply don't use it with a console. + */ + if (uart_console(port)) + return; + + if (!port->dev->of_node) + return; + + s->cookie_tx = -EINVAL; + + /* + * Don't request a dma channel if no channel was specified + * in the device tree. + */ + if (!of_find_property(port->dev->of_node, "dmas", NULL)) + return; + + chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV); + dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); + if (chan) { + /* UART circular tx buffer is an aligned page. */ + s->tx_dma_addr = dma_map_single(chan->device->dev, + port->state->xmit.buf, + UART_XMIT_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(chan->device->dev, s->tx_dma_addr)) { + dev_warn(port->dev, "Failed mapping Tx DMA descriptor\n"); + dma_release_channel(chan); + } else { + dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n", + __func__, UART_XMIT_SIZE, + port->state->xmit.buf, &s->tx_dma_addr); + + INIT_WORK(&s->work_tx, sci_dma_tx_work_fn); + s->chan_tx_saved = s->chan_tx = chan; + } + } + + chan = sci_request_dma_chan(port, DMA_DEV_TO_MEM); + dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); + if (chan) { + unsigned int i; + dma_addr_t dma; + void *buf; + + s->buf_len_rx = 2 * max_t(size_t, 16, port->fifosize); + buf = dma_alloc_coherent(chan->device->dev, s->buf_len_rx * 2, + &dma, GFP_KERNEL); + if (!buf) { + dev_warn(port->dev, + "Failed to allocate Rx dma buffer, using PIO\n"); + dma_release_channel(chan); + return; + } + + for (i = 0; i < 2; i++) { + struct scatterlist *sg = &s->sg_rx[i]; + + sg_init_table(sg, 1); + s->rx_buf[i] = buf; + sg_dma_address(sg) = dma; + sg_dma_len(sg) = s->buf_len_rx; + + buf += s->buf_len_rx; + dma += s->buf_len_rx; + } + + hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + s->rx_timer.function = sci_dma_rx_timer_fn; + + s->chan_rx_saved = s->chan_rx = chan; + + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) + sci_dma_rx_submit(s, false); + } +} + +static void sci_free_dma(struct uart_port *port) +{ + struct sci_port *s = to_sci_port(port); + + if (s->chan_tx_saved) + sci_dma_tx_release(s); + if (s->chan_rx_saved) + sci_dma_rx_release(s); +} + +static void sci_flush_buffer(struct uart_port *port) +{ + struct sci_port *s = to_sci_port(port); + + /* + * In uart_flush_buffer(), the xmit circular buffer has just been + * cleared, so we have to reset tx_dma_len accordingly, and stop any + * pending transfers + */ + s->tx_dma_len = 0; + if (s->chan_tx) { + dmaengine_terminate_async(s->chan_tx); + s->cookie_tx = -EINVAL; + } +} +#else /* !CONFIG_SERIAL_SH_SCI_DMA */ +static inline void sci_request_dma(struct uart_port *port) +{ +} + +static inline void sci_free_dma(struct uart_port *port) +{ +} + +#define sci_flush_buffer NULL +#endif /* !CONFIG_SERIAL_SH_SCI_DMA */ + +static irqreturn_t sci_rx_interrupt(int irq, void *ptr) +{ + struct uart_port *port = ptr; + struct sci_port *s = to_sci_port(port); + +#ifdef CONFIG_SERIAL_SH_SCI_DMA + if (s->chan_rx) { + u16 scr = serial_port_in(port, SCSCR); + u16 ssr = serial_port_in(port, SCxSR); + + /* Disable future Rx interrupts */ + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { + disable_irq_nosync(irq); + scr |= SCSCR_RDRQE; + } else { + if (sci_dma_rx_submit(s, false) < 0) + goto handle_pio; + + scr &= ~SCSCR_RIE; + } + serial_port_out(port, SCSCR, scr); + /* Clear current interrupt */ + serial_port_out(port, SCxSR, + ssr & ~(SCIF_DR | SCxSR_RDxF(port))); + dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u us\n", + jiffies, s->rx_timeout); + start_hrtimer_us(&s->rx_timer, s->rx_timeout); + + return IRQ_HANDLED; + } + +handle_pio: +#endif + + if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) { + if (!scif_rtrg_enabled(port)) + scif_set_rtrg(port, s->rx_trigger); + + mod_timer(&s->rx_fifo_timer, jiffies + DIV_ROUND_UP( + s->rx_frame * HZ * s->rx_fifo_timeout, 1000000)); + } + + /* I think sci_receive_chars has to be called irrespective + * of whether the I_IXOFF is set, otherwise, how is the interrupt + * to be disabled? + */ + sci_receive_chars(port); + + return IRQ_HANDLED; +} + +static irqreturn_t sci_tx_interrupt(int irq, void *ptr) +{ + struct uart_port *port = ptr; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + sci_transmit_chars(port); + spin_unlock_irqrestore(&port->lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t sci_br_interrupt(int irq, void *ptr) +{ + struct uart_port *port = ptr; + + /* Handle BREAKs */ + sci_handle_breaks(port); + + /* drop invalid character received before break was detected */ + serial_port_in(port, SCxRDR); + + sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port)); + + return IRQ_HANDLED; +} + +static irqreturn_t sci_er_interrupt(int irq, void *ptr) +{ + struct uart_port *port = ptr; + struct sci_port *s = to_sci_port(port); + + if (s->irqs[SCIx_ERI_IRQ] == s->irqs[SCIx_BRI_IRQ]) { + /* Break and Error interrupts are muxed */ + unsigned short ssr_status = serial_port_in(port, SCxSR); + + /* Break Interrupt */ + if (ssr_status & SCxSR_BRK(port)) + sci_br_interrupt(irq, ptr); + + /* Break only? */ + if (!(ssr_status & SCxSR_ERRORS(port))) + return IRQ_HANDLED; + } + + /* Handle errors */ + if (port->type == PORT_SCI) { + if (sci_handle_errors(port)) { + /* discard character in rx buffer */ + serial_port_in(port, SCxSR); + sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); + } + } else { + sci_handle_fifo_overrun(port); + if (!s->chan_rx) + sci_receive_chars(port); + } + + sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port)); + + /* Kick the transmission */ + if (!s->chan_tx) + sci_tx_interrupt(irq, ptr); + + return IRQ_HANDLED; +} + +static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) +{ + unsigned short ssr_status, scr_status, err_enabled, orer_status = 0; + struct uart_port *port = ptr; + struct sci_port *s = to_sci_port(port); + irqreturn_t ret = IRQ_NONE; + + ssr_status = serial_port_in(port, SCxSR); + scr_status = serial_port_in(port, SCSCR); + if (s->params->overrun_reg == SCxSR) + orer_status = ssr_status; + else if (sci_getreg(port, s->params->overrun_reg)->size) + orer_status = serial_port_in(port, s->params->overrun_reg); + + err_enabled = scr_status & port_rx_irq_mask(port); + + /* Tx Interrupt */ + if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) && + !s->chan_tx) + ret = sci_tx_interrupt(irq, ptr); + + /* + * Rx Interrupt: if we're using DMA, the DMA controller clears RDF / + * DR flags + */ + if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) && + (scr_status & SCSCR_RIE)) + ret = sci_rx_interrupt(irq, ptr); + + /* Error Interrupt */ + if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) + ret = sci_er_interrupt(irq, ptr); + + /* Break Interrupt */ + if (s->irqs[SCIx_ERI_IRQ] != s->irqs[SCIx_BRI_IRQ] && + (ssr_status & SCxSR_BRK(port)) && err_enabled) + ret = sci_br_interrupt(irq, ptr); + + /* Overrun Interrupt */ + if (orer_status & s->params->overrun_mask) { + sci_handle_fifo_overrun(port); + ret = IRQ_HANDLED; + } + + return ret; +} + +static const struct sci_irq_desc { + const char *desc; + irq_handler_t handler; +} sci_irq_desc[] = { + /* + * Split out handlers, the default case. + */ + [SCIx_ERI_IRQ] = { + .desc = "rx err", + .handler = sci_er_interrupt, + }, + + [SCIx_RXI_IRQ] = { + .desc = "rx full", + .handler = sci_rx_interrupt, + }, + + [SCIx_TXI_IRQ] = { + .desc = "tx empty", + .handler = sci_tx_interrupt, + }, + + [SCIx_BRI_IRQ] = { + .desc = "break", + .handler = sci_br_interrupt, + }, + + [SCIx_DRI_IRQ] = { + .desc = "rx ready", + .handler = sci_rx_interrupt, + }, + + [SCIx_TEI_IRQ] = { + .desc = "tx end", + .handler = sci_tx_interrupt, + }, + + /* + * Special muxed handler. + */ + [SCIx_MUX_IRQ] = { + .desc = "mux", + .handler = sci_mpxed_interrupt, + }, +}; + +static int sci_request_irq(struct sci_port *port) +{ + struct uart_port *up = &port->port; + int i, j, w, ret = 0; + + for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) { + const struct sci_irq_desc *desc; + int irq; + + /* Check if already registered (muxed) */ + for (w = 0; w < i; w++) + if (port->irqs[w] == port->irqs[i]) + w = i + 1; + if (w > i) + continue; + + if (SCIx_IRQ_IS_MUXED(port)) { + i = SCIx_MUX_IRQ; + irq = up->irq; + } else { + irq = port->irqs[i]; + + /* + * Certain port types won't support all of the + * available interrupt sources. + */ + if (unlikely(irq < 0)) + continue; + } + + desc = sci_irq_desc + i; + port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s", + dev_name(up->dev), desc->desc); + if (!port->irqstr[j]) { + ret = -ENOMEM; + goto out_nomem; + } + + ret = request_irq(irq, desc->handler, up->irqflags, + port->irqstr[j], port); + if (unlikely(ret)) { + dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc); + goto out_noirq; + } + } + + return 0; + +out_noirq: + while (--i >= 0) + free_irq(port->irqs[i], port); + +out_nomem: + while (--j >= 0) + kfree(port->irqstr[j]); + + return ret; +} + +static void sci_free_irq(struct sci_port *port) +{ + int i, j; + + /* + * Intentionally in reverse order so we iterate over the muxed + * IRQ first. + */ + for (i = 0; i < SCIx_NR_IRQS; i++) { + int irq = port->irqs[i]; + + /* + * Certain port types won't support all of the available + * interrupt sources. + */ + if (unlikely(irq < 0)) + continue; + + /* Check if already freed (irq was muxed) */ + for (j = 0; j < i; j++) + if (port->irqs[j] == irq) + j = i + 1; + if (j > i) + continue; + + free_irq(port->irqs[i], port); + kfree(port->irqstr[i]); + + if (SCIx_IRQ_IS_MUXED(port)) { + /* If there's only one IRQ, we're done. */ + return; + } + } +} + +static unsigned int sci_tx_empty(struct uart_port *port) +{ + unsigned short status = serial_port_in(port, SCxSR); + unsigned short in_tx_fifo = sci_txfill(port); + + return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; +} + +static void sci_set_rts(struct uart_port *port, bool state) +{ + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { + u16 data = serial_port_in(port, SCPDR); + + /* Active low */ + if (state) + data &= ~SCPDR_RTSD; + else + data |= SCPDR_RTSD; + serial_port_out(port, SCPDR, data); + + /* RTS# is output */ + serial_port_out(port, SCPCR, + serial_port_in(port, SCPCR) | SCPCR_RTSC); + } else if (sci_getreg(port, SCSPTR)->size) { + u16 ctrl = serial_port_in(port, SCSPTR); + + /* Active low */ + if (state) + ctrl &= ~SCSPTR_RTSDT; + else + ctrl |= SCSPTR_RTSDT; + serial_port_out(port, SCSPTR, ctrl); + } +} + +static bool sci_get_cts(struct uart_port *port) +{ + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { + /* Active low */ + return !(serial_port_in(port, SCPDR) & SCPDR_CTSD); + } else if (sci_getreg(port, SCSPTR)->size) { + /* Active low */ + return !(serial_port_in(port, SCSPTR) & SCSPTR_CTSDT); + } + + return true; +} + +/* + * Modem control is a bit of a mixed bag for SCI(F) ports. Generally + * CTS/RTS is supported in hardware by at least one port and controlled + * via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently + * handled via the ->init_pins() op, which is a bit of a one-way street, + * lacking any ability to defer pin control -- this will later be + * converted over to the GPIO framework). + * + * Other modes (such as loopback) are supported generically on certain + * port types, but not others. For these it's sufficient to test for the + * existence of the support register and simply ignore the port type. + */ +static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct sci_port *s = to_sci_port(port); + + if (mctrl & TIOCM_LOOP) { + const struct plat_sci_reg *reg; + + /* + * Standard loopback mode for SCFCR ports. + */ + reg = sci_getreg(port, SCFCR); + if (reg->size) + serial_port_out(port, SCFCR, + serial_port_in(port, SCFCR) | + SCFCR_LOOP); + } + + mctrl_gpio_set(s->gpios, mctrl); + + if (!s->has_rtscts) + return; + + if (!(mctrl & TIOCM_RTS)) { + /* Disable Auto RTS */ + serial_port_out(port, SCFCR, + serial_port_in(port, SCFCR) & ~SCFCR_MCE); + + /* Clear RTS */ + sci_set_rts(port, 0); + } else if (s->autorts) { + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { + /* Enable RTS# pin function */ + serial_port_out(port, SCPCR, + serial_port_in(port, SCPCR) & ~SCPCR_RTSC); + } + + /* Enable Auto RTS */ + serial_port_out(port, SCFCR, + serial_port_in(port, SCFCR) | SCFCR_MCE); + } else { + /* Set RTS */ + sci_set_rts(port, 1); + } +} + +static unsigned int sci_get_mctrl(struct uart_port *port) +{ + struct sci_port *s = to_sci_port(port); + struct mctrl_gpios *gpios = s->gpios; + unsigned int mctrl = 0; + + mctrl_gpio_get(gpios, &mctrl); + + /* + * CTS/RTS is handled in hardware when supported, while nothing + * else is wired up. + */ + if (s->autorts) { + if (sci_get_cts(port)) + mctrl |= TIOCM_CTS; + } else if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_CTS)) { + mctrl |= TIOCM_CTS; + } + if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_DSR)) + mctrl |= TIOCM_DSR; + if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_DCD)) + mctrl |= TIOCM_CAR; + + return mctrl; +} + +static void sci_enable_ms(struct uart_port *port) +{ + mctrl_gpio_enable_ms(to_sci_port(port)->gpios); +} + +static void sci_break_ctl(struct uart_port *port, int break_state) +{ + unsigned short scscr, scsptr; + unsigned long flags; + + /* check whether the port has SCSPTR */ + if (!sci_getreg(port, SCSPTR)->size) { + /* + * Not supported by hardware. Most parts couple break and rx + * interrupts together, with break detection always enabled. + */ + return; + } + + spin_lock_irqsave(&port->lock, flags); + scsptr = serial_port_in(port, SCSPTR); + scscr = serial_port_in(port, SCSCR); + + if (break_state == -1) { + scsptr = (scsptr | SCSPTR_SPB2IO) & ~SCSPTR_SPB2DT; + scscr &= ~SCSCR_TE; + } else { + scsptr = (scsptr | SCSPTR_SPB2DT) & ~SCSPTR_SPB2IO; + scscr |= SCSCR_TE; + } + + serial_port_out(port, SCSPTR, scsptr); + serial_port_out(port, SCSCR, scscr); + spin_unlock_irqrestore(&port->lock, flags); +} + +static int sci_startup(struct uart_port *port) +{ + struct sci_port *s = to_sci_port(port); + int ret; + + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); + + sci_request_dma(port); + + ret = sci_request_irq(s); + if (unlikely(ret < 0)) { + sci_free_dma(port); + return ret; + } + + return 0; +} + +static void sci_shutdown(struct uart_port *port) +{ + struct sci_port *s = to_sci_port(port); + unsigned long flags; + u16 scr; + + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); + + s->autorts = false; + mctrl_gpio_disable_ms(to_sci_port(port)->gpios); + + spin_lock_irqsave(&port->lock, flags); + sci_stop_rx(port); + sci_stop_tx(port); + /* + * Stop RX and TX, disable related interrupts, keep clock source + * and HSCIF TOT bits + */ + scr = serial_port_in(port, SCSCR); + serial_port_out(port, SCSCR, scr & + (SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot)); + spin_unlock_irqrestore(&port->lock, flags); + +#ifdef CONFIG_SERIAL_SH_SCI_DMA + if (s->chan_rx_saved) { + dev_dbg(port->dev, "%s(%d) deleting rx_timer\n", __func__, + port->line); + hrtimer_cancel(&s->rx_timer); + } +#endif + + if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) + del_timer_sync(&s->rx_fifo_timer); + sci_free_irq(s); + sci_free_dma(port); +} + +static int sci_sck_calc(struct sci_port *s, unsigned int bps, + unsigned int *srr) +{ + unsigned long freq = s->clk_rates[SCI_SCK]; + int err, min_err = INT_MAX; + unsigned int sr; + + if (s->port.type != PORT_HSCIF) + freq *= 2; + + for_each_sr(sr, s) { + err = DIV_ROUND_CLOSEST(freq, sr) - bps; + if (abs(err) >= abs(min_err)) + continue; + + min_err = err; + *srr = sr - 1; + + if (!err) + break; + } + + dev_dbg(s->port.dev, "SCK: %u%+d bps using SR %u\n", bps, min_err, + *srr + 1); + return min_err; +} + +static int sci_brg_calc(struct sci_port *s, unsigned int bps, + unsigned long freq, unsigned int *dlr, + unsigned int *srr) +{ + int err, min_err = INT_MAX; + unsigned int sr, dl; + + if (s->port.type != PORT_HSCIF) + freq *= 2; + + for_each_sr(sr, s) { + dl = DIV_ROUND_CLOSEST(freq, sr * bps); + dl = clamp(dl, 1U, 65535U); + + err = DIV_ROUND_CLOSEST(freq, sr * dl) - bps; + if (abs(err) >= abs(min_err)) + continue; + + min_err = err; + *dlr = dl; + *srr = sr - 1; + + if (!err) + break; + } + + dev_dbg(s->port.dev, "BRG: %u%+d bps using DL %u SR %u\n", bps, + min_err, *dlr, *srr + 1); + return min_err; +} + +/* calculate sample rate, BRR, and clock select */ +static int sci_scbrr_calc(struct sci_port *s, unsigned int bps, + unsigned int *brr, unsigned int *srr, + unsigned int *cks) +{ + unsigned long freq = s->clk_rates[SCI_FCK]; + unsigned int sr, br, prediv, scrate, c; + int err, min_err = INT_MAX; + + if (s->port.type != PORT_HSCIF) + freq *= 2; + + /* + * Find the combination of sample rate and clock select with the + * smallest deviation from the desired baud rate. + * Prefer high sample rates to maximise the receive margin. + * + * M: Receive margin (%) + * N: Ratio of bit rate to clock (N = sampling rate) + * D: Clock duty (D = 0 to 1.0) + * L: Frame length (L = 9 to 12) + * F: Absolute value of clock frequency deviation + * + * M = |(0.5 - 1 / 2 * N) - ((L - 0.5) * F) - + * (|D - 0.5| / N * (1 + F))| + * NOTE: Usually, treat D for 0.5, F is 0 by this calculation. + */ + for_each_sr(sr, s) { + for (c = 0; c <= 3; c++) { + /* integerized formulas from HSCIF documentation */ + prediv = sr << (2 * c + 1); + + /* + * We need to calculate: + * + * br = freq / (prediv * bps) clamped to [1..256] + * err = freq / (br * prediv) - bps + * + * Watch out for overflow when calculating the desired + * sampling clock rate! + */ + if (bps > UINT_MAX / prediv) + break; + + scrate = prediv * bps; + br = DIV_ROUND_CLOSEST(freq, scrate); + br = clamp(br, 1U, 256U); + + err = DIV_ROUND_CLOSEST(freq, br * prediv) - bps; + if (abs(err) >= abs(min_err)) + continue; + + min_err = err; + *brr = br - 1; + *srr = sr - 1; + *cks = c; + + if (!err) + goto found; + } + } + +found: + dev_dbg(s->port.dev, "BRR: %u%+d bps using N %u SR %u cks %u\n", bps, + min_err, *brr, *srr + 1, *cks); + return min_err; +} + +static void sci_reset(struct uart_port *port) +{ + const struct plat_sci_reg *reg; + unsigned int status; + struct sci_port *s = to_sci_port(port); + + serial_port_out(port, SCSCR, s->hscif_tot); /* TE=0, RE=0, CKE1=0 */ + + reg = sci_getreg(port, SCFCR); + if (reg->size) + serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); + + sci_clear_SCxSR(port, + SCxSR_RDxF_CLEAR(port) & SCxSR_ERROR_CLEAR(port) & + SCxSR_BREAK_CLEAR(port)); + if (sci_getreg(port, SCLSR)->size) { + status = serial_port_in(port, SCLSR); + status &= ~(SCLSR_TO | SCLSR_ORER); + serial_port_out(port, SCLSR, status); + } + + if (s->rx_trigger > 1) { + if (s->rx_fifo_timeout) { + scif_set_rtrg(port, 1); + timer_setup(&s->rx_fifo_timer, rx_fifo_timer_fn, 0); + } else { + if (port->type == PORT_SCIFA || + port->type == PORT_SCIFB) + scif_set_rtrg(port, 1); + else + scif_set_rtrg(port, s->rx_trigger); + } + } +} + +static void sci_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud, smr_val = SCSMR_ASYNC, scr_val = 0, i, bits; + unsigned int brr = 255, cks = 0, srr = 15, dl = 0, sccks = 0; + unsigned int brr1 = 255, cks1 = 0, srr1 = 15, dl1 = 0; + struct sci_port *s = to_sci_port(port); + const struct plat_sci_reg *reg; + int min_err = INT_MAX, err; + unsigned long max_freq = 0; + int best_clk = -1; + unsigned long flags; + + if ((termios->c_cflag & CSIZE) == CS7) { + smr_val |= SCSMR_CHR; + } else { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + } + if (termios->c_cflag & PARENB) + smr_val |= SCSMR_PE; + if (termios->c_cflag & PARODD) + smr_val |= SCSMR_PE | SCSMR_ODD; + if (termios->c_cflag & CSTOPB) + smr_val |= SCSMR_STOP; + + /* + * earlyprintk comes here early on with port->uartclk set to zero. + * the clock framework is not up and running at this point so here + * we assume that 115200 is the maximum baud rate. please note that + * the baud rate is not programmed during earlyprintk - it is assumed + * that the previous boot loader has enabled required clocks and + * setup the baud rate generator hardware for us already. + */ + if (!port->uartclk) { + baud = uart_get_baud_rate(port, termios, old, 0, 115200); + goto done; + } + + for (i = 0; i < SCI_NUM_CLKS; i++) + max_freq = max(max_freq, s->clk_rates[i]); + + baud = uart_get_baud_rate(port, termios, old, 0, max_freq / min_sr(s)); + if (!baud) + goto done; + + /* + * There can be multiple sources for the sampling clock. Find the one + * that gives us the smallest deviation from the desired baud rate. + */ + + /* Optional Undivided External Clock */ + if (s->clk_rates[SCI_SCK] && port->type != PORT_SCIFA && + port->type != PORT_SCIFB) { + err = sci_sck_calc(s, baud, &srr1); + if (abs(err) < abs(min_err)) { + best_clk = SCI_SCK; + scr_val = SCSCR_CKE1; + sccks = SCCKS_CKS; + min_err = err; + srr = srr1; + if (!err) + goto done; + } + } + + /* Optional BRG Frequency Divided External Clock */ + if (s->clk_rates[SCI_SCIF_CLK] && sci_getreg(port, SCDL)->size) { + err = sci_brg_calc(s, baud, s->clk_rates[SCI_SCIF_CLK], &dl1, + &srr1); + if (abs(err) < abs(min_err)) { + best_clk = SCI_SCIF_CLK; + scr_val = SCSCR_CKE1; + sccks = 0; + min_err = err; + dl = dl1; + srr = srr1; + if (!err) + goto done; + } + } + + /* Optional BRG Frequency Divided Internal Clock */ + if (s->clk_rates[SCI_BRG_INT] && sci_getreg(port, SCDL)->size) { + err = sci_brg_calc(s, baud, s->clk_rates[SCI_BRG_INT], &dl1, + &srr1); + if (abs(err) < abs(min_err)) { + best_clk = SCI_BRG_INT; + scr_val = SCSCR_CKE1; + sccks = SCCKS_XIN; + min_err = err; + dl = dl1; + srr = srr1; + if (!min_err) + goto done; + } + } + + /* Divided Functional Clock using standard Bit Rate Register */ + err = sci_scbrr_calc(s, baud, &brr1, &srr1, &cks1); + if (abs(err) < abs(min_err)) { + best_clk = SCI_FCK; + scr_val = 0; + min_err = err; + brr = brr1; + srr = srr1; + cks = cks1; + } + +done: + if (best_clk >= 0) + dev_dbg(port->dev, "Using clk %pC for %u%+d bps\n", + s->clks[best_clk], baud, min_err); + + sci_port_enable(s); + + /* + * Program the optional External Baud Rate Generator (BRG) first. + * It controls the mux to select (H)SCK or frequency divided clock. + */ + if (best_clk >= 0 && sci_getreg(port, SCCKS)->size) { + serial_port_out(port, SCDL, dl); + serial_port_out(port, SCCKS, sccks); + } + + spin_lock_irqsave(&port->lock, flags); + + sci_reset(port); + + uart_update_timeout(port, termios->c_cflag, baud); + + /* byte size and parity */ + bits = tty_get_frame_size(termios->c_cflag); + + if (sci_getreg(port, SEMR)->size) + serial_port_out(port, SEMR, 0); + + if (best_clk >= 0) { + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) + switch (srr + 1) { + case 5: smr_val |= SCSMR_SRC_5; break; + case 7: smr_val |= SCSMR_SRC_7; break; + case 11: smr_val |= SCSMR_SRC_11; break; + case 13: smr_val |= SCSMR_SRC_13; break; + case 16: smr_val |= SCSMR_SRC_16; break; + case 17: smr_val |= SCSMR_SRC_17; break; + case 19: smr_val |= SCSMR_SRC_19; break; + case 27: smr_val |= SCSMR_SRC_27; break; + } + smr_val |= cks; + serial_port_out(port, SCSCR, scr_val | s->hscif_tot); + serial_port_out(port, SCSMR, smr_val); + serial_port_out(port, SCBRR, brr); + if (sci_getreg(port, HSSRR)->size) { + unsigned int hssrr = srr | HSCIF_SRE; + /* Calculate deviation from intended rate at the + * center of the last stop bit in sampling clocks. + */ + int last_stop = bits * 2 - 1; + int deviation = DIV_ROUND_CLOSEST(min_err * last_stop * + (int)(srr + 1), + 2 * (int)baud); + + if (abs(deviation) >= 2) { + /* At least two sampling clocks off at the + * last stop bit; we can increase the error + * margin by shifting the sampling point. + */ + int shift = clamp(deviation / 2, -8, 7); + + hssrr |= (shift << HSCIF_SRHP_SHIFT) & + HSCIF_SRHP_MASK; + hssrr |= HSCIF_SRDE; + } + serial_port_out(port, HSSRR, hssrr); + } + + /* Wait one bit interval */ + udelay((1000000 + (baud - 1)) / baud); + } else { + /* Don't touch the bit rate configuration */ + scr_val = s->cfg->scscr & (SCSCR_CKE1 | SCSCR_CKE0); + smr_val |= serial_port_in(port, SCSMR) & + (SCSMR_CKEDG | SCSMR_SRC_MASK | SCSMR_CKS); + serial_port_out(port, SCSCR, scr_val | s->hscif_tot); + serial_port_out(port, SCSMR, smr_val); + } + + sci_init_pins(port, termios->c_cflag); + + port->status &= ~UPSTAT_AUTOCTS; + s->autorts = false; + reg = sci_getreg(port, SCFCR); + if (reg->size) { + unsigned short ctrl = serial_port_in(port, SCFCR); + + if ((port->flags & UPF_HARD_FLOW) && + (termios->c_cflag & CRTSCTS)) { + /* There is no CTS interrupt to restart the hardware */ + port->status |= UPSTAT_AUTOCTS; + /* MCE is enabled when RTS is raised */ + s->autorts = true; + } + + /* + * As we've done a sci_reset() above, ensure we don't + * interfere with the FIFOs while toggling MCE. As the + * reset values could still be set, simply mask them out. + */ + ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST); + + serial_port_out(port, SCFCR, ctrl); + } + if (port->flags & UPF_HARD_FLOW) { + /* Refresh (Auto) RTS */ + sci_set_mctrl(port, port->mctrl); + } + + scr_val |= SCSCR_RE | SCSCR_TE | + (s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)); + serial_port_out(port, SCSCR, scr_val | s->hscif_tot); + if ((srr + 1 == 5) && + (port->type == PORT_SCIFA || port->type == PORT_SCIFB)) { + /* + * In asynchronous mode, when the sampling rate is 1/5, first + * received data may become invalid on some SCIFA and SCIFB. + * To avoid this problem wait more than 1 serial data time (1 + * bit time x serial data number) after setting SCSCR.RE = 1. + */ + udelay(DIV_ROUND_UP(10 * 1000000, baud)); + } + + /* Calculate delay for 2 DMA buffers (4 FIFO). */ + s->rx_frame = (10000 * bits) / (baud / 100); +#ifdef CONFIG_SERIAL_SH_SCI_DMA + s->rx_timeout = s->buf_len_rx * 2 * s->rx_frame; +#endif + + if ((termios->c_cflag & CREAD) != 0) + sci_start_rx(port); + + spin_unlock_irqrestore(&port->lock, flags); + + sci_port_disable(s); + + if (UART_ENABLE_MS(port, termios->c_cflag)) + sci_enable_ms(port); +} + +static void sci_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct sci_port *sci_port = to_sci_port(port); + + switch (state) { + case UART_PM_STATE_OFF: + sci_port_disable(sci_port); + break; + default: + sci_port_enable(sci_port); + break; + } +} + +static const char *sci_type(struct uart_port *port) +{ + switch (port->type) { + case PORT_IRDA: + return "irda"; + case PORT_SCI: + return "sci"; + case PORT_SCIF: + return "scif"; + case PORT_SCIFA: + return "scifa"; + case PORT_SCIFB: + return "scifb"; + case PORT_HSCIF: + return "hscif"; + } + + return NULL; +} + +static int sci_remap_port(struct uart_port *port) +{ + struct sci_port *sport = to_sci_port(port); + + /* + * Nothing to do if there's already an established membase. + */ + if (port->membase) + return 0; + + if (port->dev->of_node || (port->flags & UPF_IOREMAP)) { + port->membase = ioremap(port->mapbase, sport->reg_size); + if (unlikely(!port->membase)) { + dev_err(port->dev, "can't remap port#%d\n", port->line); + return -ENXIO; + } + } else { + /* + * For the simple (and majority of) cases where we don't + * need to do any remapping, just cast the cookie + * directly. + */ + port->membase = (void __iomem *)(uintptr_t)port->mapbase; + } + + return 0; +} + +static void sci_release_port(struct uart_port *port) +{ + struct sci_port *sport = to_sci_port(port); + + if (port->dev->of_node || (port->flags & UPF_IOREMAP)) { + iounmap(port->membase); + port->membase = NULL; + } + + release_mem_region(port->mapbase, sport->reg_size); +} + +static int sci_request_port(struct uart_port *port) +{ + struct resource *res; + struct sci_port *sport = to_sci_port(port); + int ret; + + res = request_mem_region(port->mapbase, sport->reg_size, + dev_name(port->dev)); + if (unlikely(res == NULL)) { + dev_err(port->dev, "request_mem_region failed."); + return -EBUSY; + } + + ret = sci_remap_port(port); + if (unlikely(ret != 0)) { + release_resource(res); + return ret; + } + + return 0; +} + +static void sci_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + struct sci_port *sport = to_sci_port(port); + + port->type = sport->cfg->type; + sci_request_port(port); + } +} + +static int sci_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (ser->baud_base < 2400) + /* No paper tape reader for Mitch.. */ + return -EINVAL; + + return 0; +} + +static const struct uart_ops sci_uart_ops = { + .tx_empty = sci_tx_empty, + .set_mctrl = sci_set_mctrl, + .get_mctrl = sci_get_mctrl, + .start_tx = sci_start_tx, + .stop_tx = sci_stop_tx, + .stop_rx = sci_stop_rx, + .enable_ms = sci_enable_ms, + .break_ctl = sci_break_ctl, + .startup = sci_startup, + .shutdown = sci_shutdown, + .flush_buffer = sci_flush_buffer, + .set_termios = sci_set_termios, + .pm = sci_pm, + .type = sci_type, + .release_port = sci_release_port, + .request_port = sci_request_port, + .config_port = sci_config_port, + .verify_port = sci_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = sci_poll_get_char, + .poll_put_char = sci_poll_put_char, +#endif +}; + +static int sci_init_clocks(struct sci_port *sci_port, struct device *dev) +{ + const char *clk_names[] = { + [SCI_FCK] = "fck", + [SCI_SCK] = "sck", + [SCI_BRG_INT] = "brg_int", + [SCI_SCIF_CLK] = "scif_clk", + }; + struct clk *clk; + unsigned int i; + + if (sci_port->cfg->type == PORT_HSCIF) + clk_names[SCI_SCK] = "hsck"; + + for (i = 0; i < SCI_NUM_CLKS; i++) { + clk = devm_clk_get_optional(dev, clk_names[i]); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + if (!clk && i == SCI_FCK) { + /* + * Not all SH platforms declare a clock lookup entry + * for SCI devices, in which case we need to get the + * global "peripheral_clk" clock. + */ + clk = devm_clk_get(dev, "peripheral_clk"); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), + "failed to get %s\n", + clk_names[i]); + } + + if (!clk) + dev_dbg(dev, "failed to get %s\n", clk_names[i]); + else + dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i], + clk, clk_get_rate(clk)); + sci_port->clks[i] = clk; + } + return 0; +} + +static const struct sci_port_params * +sci_probe_regmap(const struct plat_sci_port *cfg) +{ + unsigned int regtype; + + if (cfg->regtype != SCIx_PROBE_REGTYPE) + return &sci_port_params[cfg->regtype]; + + switch (cfg->type) { + case PORT_SCI: + regtype = SCIx_SCI_REGTYPE; + break; + case PORT_IRDA: + regtype = SCIx_IRDA_REGTYPE; + break; + case PORT_SCIFA: + regtype = SCIx_SCIFA_REGTYPE; + break; + case PORT_SCIFB: + regtype = SCIx_SCIFB_REGTYPE; + break; + case PORT_SCIF: + /* + * The SH-4 is a bit of a misnomer here, although that's + * where this particular port layout originated. This + * configuration (or some slight variation thereof) + * remains the dominant model for all SCIFs. + */ + regtype = SCIx_SH4_SCIF_REGTYPE; + break; + case PORT_HSCIF: + regtype = SCIx_HSCIF_REGTYPE; + break; + default: + pr_err("Can't probe register map for given port\n"); + return NULL; + } + + return &sci_port_params[regtype]; +} + +static int sci_init_single(struct platform_device *dev, + struct sci_port *sci_port, unsigned int index, + const struct plat_sci_port *p, bool early) +{ + struct uart_port *port = &sci_port->port; + const struct resource *res; + unsigned int i; + int ret; + + sci_port->cfg = p; + + port->ops = &sci_uart_ops; + port->iotype = UPIO_MEM; + port->line = index; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SH_SCI_CONSOLE); + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (res == NULL) + return -ENOMEM; + + port->mapbase = res->start; + sci_port->reg_size = resource_size(res); + + for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i) { + if (i) + sci_port->irqs[i] = platform_get_irq_optional(dev, i); + else + sci_port->irqs[i] = platform_get_irq(dev, i); + } + + /* + * The fourth interrupt on SCI port is transmit end interrupt, so + * shuffle the interrupts. + */ + if (p->type == PORT_SCI) + swap(sci_port->irqs[SCIx_BRI_IRQ], sci_port->irqs[SCIx_TEI_IRQ]); + + /* The SCI generates several interrupts. They can be muxed together or + * connected to different interrupt lines. In the muxed case only one + * interrupt resource is specified as there is only one interrupt ID. + * In the non-muxed case, up to 6 interrupt signals might be generated + * from the SCI, however those signals might have their own individual + * interrupt ID numbers, or muxed together with another interrupt. + */ + if (sci_port->irqs[0] < 0) + return -ENXIO; + + if (sci_port->irqs[1] < 0) + for (i = 1; i < ARRAY_SIZE(sci_port->irqs); i++) + sci_port->irqs[i] = sci_port->irqs[0]; + + sci_port->params = sci_probe_regmap(p); + if (unlikely(sci_port->params == NULL)) + return -EINVAL; + + switch (p->type) { + case PORT_SCIFB: + sci_port->rx_trigger = 48; + break; + case PORT_HSCIF: + sci_port->rx_trigger = 64; + break; + case PORT_SCIFA: + sci_port->rx_trigger = 32; + break; + case PORT_SCIF: + if (p->regtype == SCIx_SH7705_SCIF_REGTYPE) + /* RX triggering not implemented for this IP */ + sci_port->rx_trigger = 1; + else + sci_port->rx_trigger = 8; + break; + default: + sci_port->rx_trigger = 1; + break; + } + + sci_port->rx_fifo_timeout = 0; + sci_port->hscif_tot = 0; + + /* SCIFA on sh7723 and sh7724 need a custom sampling rate that doesn't + * match the SoC datasheet, this should be investigated. Let platform + * data override the sampling rate for now. + */ + sci_port->sampling_rate_mask = p->sampling_rate + ? SCI_SR(p->sampling_rate) + : sci_port->params->sampling_rate_mask; + + if (!early) { + ret = sci_init_clocks(sci_port, &dev->dev); + if (ret < 0) + return ret; + + port->dev = &dev->dev; + + pm_runtime_enable(&dev->dev); + } + + port->type = p->type; + port->flags = UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags; + port->fifosize = sci_port->params->fifosize; + + if (port->type == PORT_SCI && !dev->dev.of_node) { + if (sci_port->reg_size >= 0x20) + port->regshift = 2; + else + port->regshift = 1; + } + + /* + * The UART port needs an IRQ value, so we peg this to the RX IRQ + * for the multi-IRQ ports, which is where we are primarily + * concerned with the shutdown path synchronization. + * + * For the muxed case there's nothing more to do. + */ + port->irq = sci_port->irqs[SCIx_RXI_IRQ]; + port->irqflags = 0; + + port->serial_in = sci_serial_in; + port->serial_out = sci_serial_out; + + return 0; +} + +static void sci_cleanup_single(struct sci_port *port) +{ + pm_runtime_disable(port->port.dev); +} + +#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \ + defined(CONFIG_SERIAL_SH_SCI_EARLYCON) +static void serial_console_putchar(struct uart_port *port, unsigned char ch) +{ + sci_poll_put_char(port, ch); +} + +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + */ +static void serial_console_write(struct console *co, const char *s, + unsigned count) +{ + struct sci_port *sci_port = &sci_ports[co->index]; + struct uart_port *port = &sci_port->port; + unsigned short bits, ctrl, ctrl_temp; + unsigned long flags; + int locked = 1; + + if (port->sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); + + /* first save SCSCR then disable interrupts, keep clock source */ + ctrl = serial_port_in(port, SCSCR); + ctrl_temp = SCSCR_RE | SCSCR_TE | + (sci_port->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)) | + (ctrl & (SCSCR_CKE1 | SCSCR_CKE0)); + serial_port_out(port, SCSCR, ctrl_temp | sci_port->hscif_tot); + + uart_console_write(port, s, count, serial_console_putchar); + + /* wait until fifo is empty and last bit has been transmitted */ + bits = SCxSR_TDxE(port) | SCxSR_TEND(port); + while ((serial_port_in(port, SCxSR) & bits) != bits) + cpu_relax(); + + /* restore the SCSCR */ + serial_port_out(port, SCSCR, ctrl); + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); +} + +static int serial_console_setup(struct console *co, char *options) +{ + struct sci_port *sci_port; + struct uart_port *port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + /* + * Refuse to handle any bogus ports. + */ + if (co->index < 0 || co->index >= SCI_NPORTS) + return -ENODEV; + + sci_port = &sci_ports[co->index]; + port = &sci_port->port; + + /* + * Refuse to handle uninitialized ports. + */ + if (!port->ops) + return -ENODEV; + + ret = sci_remap_port(port); + if (unlikely(ret != 0)) + return ret; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct console serial_console = { + .name = "ttySC", + .device = uart_console_device, + .write = serial_console_write, + .setup = serial_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sci_uart_driver, +}; + +#ifdef CONFIG_SUPERH +static struct console early_serial_console = { + .name = "early_ttySC", + .write = serial_console_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +static char early_serial_buf[32]; + +static int sci_probe_earlyprintk(struct platform_device *pdev) +{ + const struct plat_sci_port *cfg = dev_get_platdata(&pdev->dev); + + if (early_serial_console.data) + return -EEXIST; + + early_serial_console.index = pdev->id; + + sci_init_single(pdev, &sci_ports[pdev->id], pdev->id, cfg, true); + + serial_console_setup(&early_serial_console, early_serial_buf); + + if (!strstr(early_serial_buf, "keep")) + early_serial_console.flags |= CON_BOOT; + + register_console(&early_serial_console); + return 0; +} +#endif + +#define SCI_CONSOLE (&serial_console) + +#else +static inline int sci_probe_earlyprintk(struct platform_device *pdev) +{ + return -EINVAL; +} + +#define SCI_CONSOLE NULL + +#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE || CONFIG_SERIAL_SH_SCI_EARLYCON */ + +static const char banner[] __initconst = "SuperH (H)SCI(F) driver initialized"; + +static DEFINE_MUTEX(sci_uart_registration_lock); +static struct uart_driver sci_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "sci", + .dev_name = "ttySC", + .major = SCI_MAJOR, + .minor = SCI_MINOR_START, + .nr = SCI_NPORTS, + .cons = SCI_CONSOLE, +}; + +static int sci_remove(struct platform_device *dev) +{ + struct sci_port *port = platform_get_drvdata(dev); + unsigned int type = port->port.type; /* uart_remove_... clears it */ + + sci_ports_in_use &= ~BIT(port->port.line); + uart_remove_one_port(&sci_uart_driver, &port->port); + + sci_cleanup_single(port); + + if (port->port.fifosize > 1) + device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger); + if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) + device_remove_file(&dev->dev, &dev_attr_rx_fifo_timeout); + + return 0; +} + + +#define SCI_OF_DATA(type, regtype) (void *)((type) << 16 | (regtype)) +#define SCI_OF_TYPE(data) ((unsigned long)(data) >> 16) +#define SCI_OF_REGTYPE(data) ((unsigned long)(data) & 0xffff) + +static const struct of_device_id of_sci_match[] = { + /* SoC-specific types */ + { + .compatible = "renesas,scif-r7s72100", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE), + }, + { + .compatible = "renesas,scif-r7s9210", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE), + }, + { + .compatible = "renesas,scif-r9a07g044", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE), + }, + /* Family-specific types */ + { + .compatible = "renesas,rcar-gen1-scif", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE), + }, { + .compatible = "renesas,rcar-gen2-scif", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE), + }, { + .compatible = "renesas,rcar-gen3-scif", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE), + }, { + .compatible = "renesas,rcar-gen4-scif", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE), + }, + /* Generic types */ + { + .compatible = "renesas,scif", + .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_REGTYPE), + }, { + .compatible = "renesas,scifa", + .data = SCI_OF_DATA(PORT_SCIFA, SCIx_SCIFA_REGTYPE), + }, { + .compatible = "renesas,scifb", + .data = SCI_OF_DATA(PORT_SCIFB, SCIx_SCIFB_REGTYPE), + }, { + .compatible = "renesas,hscif", + .data = SCI_OF_DATA(PORT_HSCIF, SCIx_HSCIF_REGTYPE), + }, { + .compatible = "renesas,sci", + .data = SCI_OF_DATA(PORT_SCI, SCIx_SCI_REGTYPE), + }, { + /* Terminator */ + }, +}; +MODULE_DEVICE_TABLE(of, of_sci_match); + +static void sci_reset_control_assert(void *data) +{ + reset_control_assert(data); +} + +static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev, + unsigned int *dev_id) +{ + struct device_node *np = pdev->dev.of_node; + struct reset_control *rstc; + struct plat_sci_port *p; + struct sci_port *sp; + const void *data; + int id, ret; + + if (!IS_ENABLED(CONFIG_OF) || !np) + return ERR_PTR(-EINVAL); + + data = of_device_get_match_data(&pdev->dev); + + rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(rstc)) + return ERR_PTR(dev_err_probe(&pdev->dev, PTR_ERR(rstc), + "failed to get reset ctrl\n")); + + ret = reset_control_deassert(rstc); + if (ret) { + dev_err(&pdev->dev, "failed to deassert reset %d\n", ret); + return ERR_PTR(ret); + } + + ret = devm_add_action_or_reset(&pdev->dev, sci_reset_control_assert, rstc); + if (ret) { + dev_err(&pdev->dev, "failed to register assert devm action, %d\n", + ret); + return ERR_PTR(ret); + } + + p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + /* Get the line number from the aliases node. */ + id = of_alias_get_id(np, "serial"); + if (id < 0 && ~sci_ports_in_use) + id = ffz(sci_ports_in_use); + if (id < 0) { + dev_err(&pdev->dev, "failed to get alias id (%d)\n", id); + return ERR_PTR(-EINVAL); + } + if (id >= ARRAY_SIZE(sci_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", id); + return ERR_PTR(-EINVAL); + } + + sp = &sci_ports[id]; + *dev_id = id; + + p->type = SCI_OF_TYPE(data); + p->regtype = SCI_OF_REGTYPE(data); + + sp->has_rtscts = of_property_read_bool(np, "uart-has-rtscts"); + + return p; +} + +static int sci_probe_single(struct platform_device *dev, + unsigned int index, + struct plat_sci_port *p, + struct sci_port *sciport) +{ + int ret; + + /* Sanity check */ + if (unlikely(index >= SCI_NPORTS)) { + dev_notice(&dev->dev, "Attempting to register port %d when only %d are available\n", + index+1, SCI_NPORTS); + dev_notice(&dev->dev, "Consider bumping CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); + return -EINVAL; + } + BUILD_BUG_ON(SCI_NPORTS > sizeof(sci_ports_in_use) * 8); + if (sci_ports_in_use & BIT(index)) + return -EBUSY; + + mutex_lock(&sci_uart_registration_lock); + if (!sci_uart_driver.state) { + ret = uart_register_driver(&sci_uart_driver); + if (ret) { + mutex_unlock(&sci_uart_registration_lock); + return ret; + } + } + mutex_unlock(&sci_uart_registration_lock); + + ret = sci_init_single(dev, sciport, index, p, false); + if (ret) + return ret; + + sciport->gpios = mctrl_gpio_init(&sciport->port, 0); + if (IS_ERR(sciport->gpios)) + return PTR_ERR(sciport->gpios); + + if (sciport->has_rtscts) { + if (mctrl_gpio_to_gpiod(sciport->gpios, UART_GPIO_CTS) || + mctrl_gpio_to_gpiod(sciport->gpios, UART_GPIO_RTS)) { + dev_err(&dev->dev, "Conflicting RTS/CTS config\n"); + return -EINVAL; + } + sciport->port.flags |= UPF_HARD_FLOW; + } + + ret = uart_add_one_port(&sci_uart_driver, &sciport->port); + if (ret) { + sci_cleanup_single(sciport); + return ret; + } + + return 0; +} + +static int sci_probe(struct platform_device *dev) +{ + struct plat_sci_port *p; + struct sci_port *sp; + unsigned int dev_id; + int ret; + + /* + * If we've come here via earlyprintk initialization, head off to + * the special early probe. We don't have sufficient device state + * to make it beyond this yet. + */ +#ifdef CONFIG_SUPERH + if (is_sh_early_platform_device(dev)) + return sci_probe_earlyprintk(dev); +#endif + + if (dev->dev.of_node) { + p = sci_parse_dt(dev, &dev_id); + if (IS_ERR(p)) + return PTR_ERR(p); + } else { + p = dev->dev.platform_data; + if (p == NULL) { + dev_err(&dev->dev, "no platform data supplied\n"); + return -EINVAL; + } + + dev_id = dev->id; + } + + sp = &sci_ports[dev_id]; + platform_set_drvdata(dev, sp); + + ret = sci_probe_single(dev, dev_id, p, sp); + if (ret) + return ret; + + if (sp->port.fifosize > 1) { + ret = device_create_file(&dev->dev, &dev_attr_rx_fifo_trigger); + if (ret) + return ret; + } + if (sp->port.type == PORT_SCIFA || sp->port.type == PORT_SCIFB || + sp->port.type == PORT_HSCIF) { + ret = device_create_file(&dev->dev, &dev_attr_rx_fifo_timeout); + if (ret) { + if (sp->port.fifosize > 1) { + device_remove_file(&dev->dev, + &dev_attr_rx_fifo_trigger); + } + return ret; + } + } + +#ifdef CONFIG_SH_STANDARD_BIOS + sh_bios_gdb_detach(); +#endif + + sci_ports_in_use |= BIT(dev_id); + return 0; +} + +static __maybe_unused int sci_suspend(struct device *dev) +{ + struct sci_port *sport = dev_get_drvdata(dev); + + if (sport) + uart_suspend_port(&sci_uart_driver, &sport->port); + + return 0; +} + +static __maybe_unused int sci_resume(struct device *dev) +{ + struct sci_port *sport = dev_get_drvdata(dev); + + if (sport) + uart_resume_port(&sci_uart_driver, &sport->port); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(sci_dev_pm_ops, sci_suspend, sci_resume); + +static struct platform_driver sci_driver = { + .probe = sci_probe, + .remove = sci_remove, + .driver = { + .name = "sh-sci", + .pm = &sci_dev_pm_ops, + .of_match_table = of_match_ptr(of_sci_match), + }, +}; + +static int __init sci_init(void) +{ + pr_info("%s\n", banner); + + return platform_driver_register(&sci_driver); +} + +static void __exit sci_exit(void) +{ + platform_driver_unregister(&sci_driver); + + if (sci_uart_driver.state) + uart_unregister_driver(&sci_uart_driver); +} + +#if defined(CONFIG_SUPERH) && defined(CONFIG_SERIAL_SH_SCI_CONSOLE) +sh_early_platform_init_buffer("earlyprintk", &sci_driver, + early_serial_buf, ARRAY_SIZE(early_serial_buf)); +#endif +#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON +static struct plat_sci_port port_cfg __initdata; + +static int __init early_console_setup(struct earlycon_device *device, + int type) +{ + if (!device->port.membase) + return -ENODEV; + + device->port.serial_in = sci_serial_in; + device->port.serial_out = sci_serial_out; + device->port.type = type; + memcpy(&sci_ports[0].port, &device->port, sizeof(struct uart_port)); + port_cfg.type = type; + sci_ports[0].cfg = &port_cfg; + sci_ports[0].params = sci_probe_regmap(&port_cfg); + port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR); + sci_serial_out(&sci_ports[0].port, SCSCR, + SCSCR_RE | SCSCR_TE | port_cfg.scscr); + + device->con->write = serial_console_write; + return 0; +} +static int __init sci_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + return early_console_setup(device, PORT_SCI); +} +static int __init scif_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + return early_console_setup(device, PORT_SCIF); +} +static int __init rzscifa_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + port_cfg.regtype = SCIx_RZ_SCIFA_REGTYPE; + return early_console_setup(device, PORT_SCIF); +} + +static int __init scifa_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + return early_console_setup(device, PORT_SCIFA); +} +static int __init scifb_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + return early_console_setup(device, PORT_SCIFB); +} +static int __init hscif_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + return early_console_setup(device, PORT_HSCIF); +} + +OF_EARLYCON_DECLARE(sci, "renesas,sci", sci_early_console_setup); +OF_EARLYCON_DECLARE(scif, "renesas,scif", scif_early_console_setup); +OF_EARLYCON_DECLARE(scif, "renesas,scif-r7s9210", rzscifa_early_console_setup); +OF_EARLYCON_DECLARE(scif, "renesas,scif-r9a07g044", rzscifa_early_console_setup); +OF_EARLYCON_DECLARE(scifa, "renesas,scifa", scifa_early_console_setup); +OF_EARLYCON_DECLARE(scifb, "renesas,scifb", scifb_early_console_setup); +OF_EARLYCON_DECLARE(hscif, "renesas,hscif", hscif_early_console_setup); +#endif /* CONFIG_SERIAL_SH_SCI_EARLYCON */ + +module_init(sci_init); +module_exit(sci_exit); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:sh-sci"); +MODULE_AUTHOR("Paul Mundt"); +MODULE_DESCRIPTION("SuperH (H)SCI(F) serial driver"); diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h new file mode 100644 index 000000000..c0ae78632 --- /dev/null +++ b/drivers/tty/serial/sh-sci.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include + +#define SCI_MAJOR 204 +#define SCI_MINOR_START 8 + + +/* + * SCI register subset common for all port types. + * Not all registers will exist on all parts. + */ +enum { + SCSMR, /* Serial Mode Register */ + SCBRR, /* Bit Rate Register */ + SCSCR, /* Serial Control Register */ + SCxSR, /* Serial Status Register */ + SCFCR, /* FIFO Control Register */ + SCFDR, /* FIFO Data Count Register */ + SCxTDR, /* Transmit (FIFO) Data Register */ + SCxRDR, /* Receive (FIFO) Data Register */ + SCLSR, /* Line Status Register */ + SCTFDR, /* Transmit FIFO Data Count Register */ + SCRFDR, /* Receive FIFO Data Count Register */ + SCSPTR, /* Serial Port Register */ + HSSRR, /* Sampling Rate Register */ + SCPCR, /* Serial Port Control Register */ + SCPDR, /* Serial Port Data Register */ + SCDL, /* BRG Frequency Division Register */ + SCCKS, /* BRG Clock Select Register */ + HSRTRGR, /* Rx FIFO Data Count Trigger Register */ + HSTTRGR, /* Tx FIFO Data Count Trigger Register */ + SEMR, /* Serial extended mode register */ + + SCIx_NR_REGS, +}; + + +/* SCSMR (Serial Mode Register) */ +#define SCSMR_C_A BIT(7) /* Communication Mode */ +#define SCSMR_CSYNC BIT(7) /* - Clocked synchronous mode */ +#define SCSMR_ASYNC 0 /* - Asynchronous mode */ +#define SCSMR_CHR BIT(6) /* 7-bit Character Length */ +#define SCSMR_PE BIT(5) /* Parity Enable */ +#define SCSMR_ODD BIT(4) /* Odd Parity */ +#define SCSMR_STOP BIT(3) /* Stop Bit Length */ +#define SCSMR_CKS 0x0003 /* Clock Select */ + +/* Serial Mode Register, SCIFA/SCIFB only bits */ +#define SCSMR_CKEDG BIT(12) /* Transmit/Receive Clock Edge Select */ +#define SCSMR_SRC_MASK 0x0700 /* Sampling Control */ +#define SCSMR_SRC_16 0x0000 /* Sampling rate 1/16 */ +#define SCSMR_SRC_5 0x0100 /* Sampling rate 1/5 */ +#define SCSMR_SRC_7 0x0200 /* Sampling rate 1/7 */ +#define SCSMR_SRC_11 0x0300 /* Sampling rate 1/11 */ +#define SCSMR_SRC_13 0x0400 /* Sampling rate 1/13 */ +#define SCSMR_SRC_17 0x0500 /* Sampling rate 1/17 */ +#define SCSMR_SRC_19 0x0600 /* Sampling rate 1/19 */ +#define SCSMR_SRC_27 0x0700 /* Sampling rate 1/27 */ + +/* Serial Control Register, SCIFA/SCIFB only bits */ +#define SCSCR_TDRQE BIT(15) /* Tx Data Transfer Request Enable */ +#define SCSCR_RDRQE BIT(14) /* Rx Data Transfer Request Enable */ + +/* Serial Control Register, HSCIF-only bits */ +#define HSSCR_TOT_SHIFT 14 + +/* SCxSR (Serial Status Register) on SCI */ +#define SCI_TDRE BIT(7) /* Transmit Data Register Empty */ +#define SCI_RDRF BIT(6) /* Receive Data Register Full */ +#define SCI_ORER BIT(5) /* Overrun Error */ +#define SCI_FER BIT(4) /* Framing Error */ +#define SCI_PER BIT(3) /* Parity Error */ +#define SCI_TEND BIT(2) /* Transmit End */ +#define SCI_RESERVED 0x03 /* All reserved bits */ + +#define SCI_DEFAULT_ERROR_MASK (SCI_PER | SCI_FER) + +#define SCI_RDxF_CLEAR (u32)(~(SCI_RESERVED | SCI_RDRF)) +#define SCI_ERROR_CLEAR (u32)(~(SCI_RESERVED | SCI_PER | SCI_FER | SCI_ORER)) +#define SCI_TDxE_CLEAR (u32)(~(SCI_RESERVED | SCI_TEND | SCI_TDRE)) +#define SCI_BREAK_CLEAR (u32)(~(SCI_RESERVED | SCI_PER | SCI_FER | SCI_ORER)) + +/* SCxSR (Serial Status Register) on SCIF, SCIFA, SCIFB, HSCIF */ +#define SCIF_ER BIT(7) /* Receive Error */ +#define SCIF_TEND BIT(6) /* Transmission End */ +#define SCIF_TDFE BIT(5) /* Transmit FIFO Data Empty */ +#define SCIF_BRK BIT(4) /* Break Detect */ +#define SCIF_FER BIT(3) /* Framing Error */ +#define SCIF_PER BIT(2) /* Parity Error */ +#define SCIF_RDF BIT(1) /* Receive FIFO Data Full */ +#define SCIF_DR BIT(0) /* Receive Data Ready */ +/* SCIF only (optional) */ +#define SCIF_PERC 0xf000 /* Number of Parity Errors */ +#define SCIF_FERC 0x0f00 /* Number of Framing Errors */ +/*SCIFA/SCIFB and SCIF on SH7705/SH7720/SH7721 only */ +#define SCIFA_ORER BIT(9) /* Overrun Error */ + +#define SCIF_DEFAULT_ERROR_MASK (SCIF_PER | SCIF_FER | SCIF_BRK | SCIF_ER) + +#define SCIF_RDxF_CLEAR (u32)(~(SCIF_DR | SCIF_RDF)) +#define SCIF_ERROR_CLEAR (u32)(~(SCIF_PER | SCIF_FER | SCIF_ER)) +#define SCIF_TDxE_CLEAR (u32)(~(SCIF_TDFE)) +#define SCIF_BREAK_CLEAR (u32)(~(SCIF_PER | SCIF_FER | SCIF_BRK)) + +/* SCFCR (FIFO Control Register) */ +#define SCFCR_RTRG1 BIT(7) /* Receive FIFO Data Count Trigger */ +#define SCFCR_RTRG0 BIT(6) +#define SCFCR_TTRG1 BIT(5) /* Transmit FIFO Data Count Trigger */ +#define SCFCR_TTRG0 BIT(4) +#define SCFCR_MCE BIT(3) /* Modem Control Enable */ +#define SCFCR_TFRST BIT(2) /* Transmit FIFO Data Register Reset */ +#define SCFCR_RFRST BIT(1) /* Receive FIFO Data Register Reset */ +#define SCFCR_LOOP BIT(0) /* Loopback Test */ + +/* SCLSR (Line Status Register) on (H)SCIF */ +#define SCLSR_TO BIT(2) /* Timeout */ +#define SCLSR_ORER BIT(0) /* Overrun Error */ + +/* SCSPTR (Serial Port Register), optional */ +#define SCSPTR_RTSIO BIT(7) /* Serial Port RTS# Pin Input/Output */ +#define SCSPTR_RTSDT BIT(6) /* Serial Port RTS# Pin Data */ +#define SCSPTR_CTSIO BIT(5) /* Serial Port CTS# Pin Input/Output */ +#define SCSPTR_CTSDT BIT(4) /* Serial Port CTS# Pin Data */ +#define SCSPTR_SCKIO BIT(3) /* Serial Port Clock Pin Input/Output */ +#define SCSPTR_SCKDT BIT(2) /* Serial Port Clock Pin Data */ +#define SCSPTR_SPB2IO BIT(1) /* Serial Port Break Input/Output */ +#define SCSPTR_SPB2DT BIT(0) /* Serial Port Break Data */ + +/* HSSRR HSCIF */ +#define HSCIF_SRE BIT(15) /* Sampling Rate Register Enable */ +#define HSCIF_SRDE BIT(14) /* Sampling Point Register Enable */ + +#define HSCIF_SRHP_SHIFT 8 +#define HSCIF_SRHP_MASK 0x0f00 + +/* SCPCR (Serial Port Control Register), SCIFA/SCIFB only */ +#define SCPCR_RTSC BIT(4) /* Serial Port RTS# Pin / Output Pin */ +#define SCPCR_CTSC BIT(3) /* Serial Port CTS# Pin / Input Pin */ +#define SCPCR_SCKC BIT(2) /* Serial Port SCK Pin / Output Pin */ +#define SCPCR_RXDC BIT(1) /* Serial Port RXD Pin / Input Pin */ +#define SCPCR_TXDC BIT(0) /* Serial Port TXD Pin / Output Pin */ + +/* SCPDR (Serial Port Data Register), SCIFA/SCIFB only */ +#define SCPDR_RTSD BIT(4) /* Serial Port RTS# Output Pin Data */ +#define SCPDR_CTSD BIT(3) /* Serial Port CTS# Input Pin Data */ +#define SCPDR_SCKD BIT(2) /* Serial Port SCK Output Pin Data */ +#define SCPDR_RXDD BIT(1) /* Serial Port RXD Input Pin Data */ +#define SCPDR_TXDD BIT(0) /* Serial Port TXD Output Pin Data */ + +/* + * BRG Clock Select Register (Some SCIF and HSCIF) + * The Baud Rate Generator for external clock can provide a clock source for + * the sampling clock. It outputs either its frequency divided clock, or the + * (undivided) (H)SCK external clock. + */ +#define SCCKS_CKS BIT(15) /* Select (H)SCK (1) or divided SC_CLK (0) */ +#define SCCKS_XIN BIT(14) /* SC_CLK uses bus clock (1) or SCIF_CLK (0) */ + +#define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND) +#define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_DR | SCIF_RDF) +#define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE) +#define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER) +#define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER) +#define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK) + +#define SCxSR_ERRORS(port) (to_sci_port(port)->params->error_mask) + +#define SCxSR_RDxF_CLEAR(port) \ + (((port)->type == PORT_SCI) ? SCI_RDxF_CLEAR : SCIF_RDxF_CLEAR) +#define SCxSR_ERROR_CLEAR(port) \ + (to_sci_port(port)->params->error_clear) +#define SCxSR_TDxE_CLEAR(port) \ + (((port)->type == PORT_SCI) ? SCI_TDxE_CLEAR : SCIF_TDxE_CLEAR) +#define SCxSR_BREAK_CLEAR(port) \ + (((port)->type == PORT_SCI) ? SCI_BREAK_CLEAR : SCIF_BREAK_CLEAR) diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c new file mode 100644 index 000000000..2affd1351 --- /dev/null +++ b/drivers/tty/serial/sifive.c @@ -0,0 +1,1093 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * SiFive UART driver + * Copyright (C) 2018 Paul Walmsley + * Copyright (C) 2018-2019 SiFive + * + * Based partially on: + * - drivers/tty/serial/pxa.c + * - drivers/tty/serial/amba-pl011.c + * - drivers/tty/serial/uartlite.c + * - drivers/tty/serial/omap-serial.c + * - drivers/pwm/pwm-sifive.c + * + * See the following sources for further documentation: + * - Chapter 19 "Universal Asynchronous Receiver/Transmitter (UART)" of + * SiFive FE310-G000 v2p3 + * - The tree/master/src/main/scala/devices/uart directory of + * https://github.com/sifive/sifive-blocks/ + * + * The SiFive UART design is not 8250-compatible. The following common + * features are not supported: + * - Word lengths other than 8 bits + * - Break handling + * - Parity + * - Flow control + * - Modem signals (DSR, RI, etc.) + * On the other hand, the design is free from the baggage of the 8250 + * programming model. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Register offsets + */ + +/* TXDATA */ +#define SIFIVE_SERIAL_TXDATA_OFFS 0x0 +#define SIFIVE_SERIAL_TXDATA_FULL_SHIFT 31 +#define SIFIVE_SERIAL_TXDATA_FULL_MASK (1 << SIFIVE_SERIAL_TXDATA_FULL_SHIFT) +#define SIFIVE_SERIAL_TXDATA_DATA_SHIFT 0 +#define SIFIVE_SERIAL_TXDATA_DATA_MASK (0xff << SIFIVE_SERIAL_TXDATA_DATA_SHIFT) + +/* RXDATA */ +#define SIFIVE_SERIAL_RXDATA_OFFS 0x4 +#define SIFIVE_SERIAL_RXDATA_EMPTY_SHIFT 31 +#define SIFIVE_SERIAL_RXDATA_EMPTY_MASK (1 << SIFIVE_SERIAL_RXDATA_EMPTY_SHIFT) +#define SIFIVE_SERIAL_RXDATA_DATA_SHIFT 0 +#define SIFIVE_SERIAL_RXDATA_DATA_MASK (0xff << SIFIVE_SERIAL_RXDATA_DATA_SHIFT) + +/* TXCTRL */ +#define SIFIVE_SERIAL_TXCTRL_OFFS 0x8 +#define SIFIVE_SERIAL_TXCTRL_TXCNT_SHIFT 16 +#define SIFIVE_SERIAL_TXCTRL_TXCNT_MASK (0x7 << SIFIVE_SERIAL_TXCTRL_TXCNT_SHIFT) +#define SIFIVE_SERIAL_TXCTRL_NSTOP_SHIFT 1 +#define SIFIVE_SERIAL_TXCTRL_NSTOP_MASK (1 << SIFIVE_SERIAL_TXCTRL_NSTOP_SHIFT) +#define SIFIVE_SERIAL_TXCTRL_TXEN_SHIFT 0 +#define SIFIVE_SERIAL_TXCTRL_TXEN_MASK (1 << SIFIVE_SERIAL_TXCTRL_TXEN_SHIFT) + +/* RXCTRL */ +#define SIFIVE_SERIAL_RXCTRL_OFFS 0xC +#define SIFIVE_SERIAL_RXCTRL_RXCNT_SHIFT 16 +#define SIFIVE_SERIAL_RXCTRL_RXCNT_MASK (0x7 << SIFIVE_SERIAL_TXCTRL_TXCNT_SHIFT) +#define SIFIVE_SERIAL_RXCTRL_RXEN_SHIFT 0 +#define SIFIVE_SERIAL_RXCTRL_RXEN_MASK (1 << SIFIVE_SERIAL_RXCTRL_RXEN_SHIFT) + +/* IE */ +#define SIFIVE_SERIAL_IE_OFFS 0x10 +#define SIFIVE_SERIAL_IE_RXWM_SHIFT 1 +#define SIFIVE_SERIAL_IE_RXWM_MASK (1 << SIFIVE_SERIAL_IE_RXWM_SHIFT) +#define SIFIVE_SERIAL_IE_TXWM_SHIFT 0 +#define SIFIVE_SERIAL_IE_TXWM_MASK (1 << SIFIVE_SERIAL_IE_TXWM_SHIFT) + +/* IP */ +#define SIFIVE_SERIAL_IP_OFFS 0x14 +#define SIFIVE_SERIAL_IP_RXWM_SHIFT 1 +#define SIFIVE_SERIAL_IP_RXWM_MASK (1 << SIFIVE_SERIAL_IP_RXWM_SHIFT) +#define SIFIVE_SERIAL_IP_TXWM_SHIFT 0 +#define SIFIVE_SERIAL_IP_TXWM_MASK (1 << SIFIVE_SERIAL_IP_TXWM_SHIFT) + +/* DIV */ +#define SIFIVE_SERIAL_DIV_OFFS 0x18 +#define SIFIVE_SERIAL_DIV_DIV_SHIFT 0 +#define SIFIVE_SERIAL_DIV_DIV_MASK (0xffff << SIFIVE_SERIAL_IP_DIV_SHIFT) + +/* + * Config macros + */ + +/* + * SIFIVE_SERIAL_MAX_PORTS: maximum number of UARTs on a device that can + * host a serial console + */ +#define SIFIVE_SERIAL_MAX_PORTS 8 + +/* + * SIFIVE_DEFAULT_BAUD_RATE: default baud rate that the driver should + * configure itself to use + */ +#define SIFIVE_DEFAULT_BAUD_RATE 115200 + +/* SIFIVE_SERIAL_NAME: our driver's name that we pass to the operating system */ +#define SIFIVE_SERIAL_NAME "sifive-serial" + +/* SIFIVE_TTY_PREFIX: tty name prefix for SiFive serial ports */ +#define SIFIVE_TTY_PREFIX "ttySIF" + +/* SIFIVE_TX_FIFO_DEPTH: depth of the TX FIFO (in bytes) */ +#define SIFIVE_TX_FIFO_DEPTH 8 + +/* SIFIVE_RX_FIFO_DEPTH: depth of the TX FIFO (in bytes) */ +#define SIFIVE_RX_FIFO_DEPTH 8 + +#if (SIFIVE_TX_FIFO_DEPTH != SIFIVE_RX_FIFO_DEPTH) +#error Driver does not support configurations with different TX, RX FIFO sizes +#endif + +/* + * + */ + +/** + * struct sifive_serial_port - driver-specific data extension to struct uart_port + * @port: struct uart_port embedded in this struct + * @dev: struct device * + * @ier: shadowed copy of the interrupt enable register + * @baud_rate: UART serial line rate (e.g., 115200 baud) + * @clk: reference to this device's clock + * @clk_notifier: clock rate change notifier for upstream clock changes + * + * Configuration data specific to this SiFive UART. + */ +struct sifive_serial_port { + struct uart_port port; + struct device *dev; + unsigned char ier; + unsigned long baud_rate; + struct clk *clk; + struct notifier_block clk_notifier; +}; + +/* + * Structure container-of macros + */ + +#define port_to_sifive_serial_port(p) (container_of((p), \ + struct sifive_serial_port, \ + port)) + +#define notifier_to_sifive_serial_port(nb) (container_of((nb), \ + struct sifive_serial_port, \ + clk_notifier)) + +/* + * Forward declarations + */ +static void sifive_serial_stop_tx(struct uart_port *port); + +/* + * Internal functions + */ + +/** + * __ssp_early_writel() - write to a SiFive serial port register (early) + * @port: pointer to a struct uart_port record + * @offs: register address offset from the IP block base address + * @v: value to write to the register + * + * Given a pointer @port to a struct uart_port record, write the value + * @v to the IP block register address offset @offs. This function is + * intended for early console use. + * + * Context: Intended to be used only by the earlyconsole code. + */ +static void __ssp_early_writel(u32 v, u16 offs, struct uart_port *port) +{ + writel_relaxed(v, port->membase + offs); +} + +/** + * __ssp_early_readl() - read from a SiFive serial port register (early) + * @port: pointer to a struct uart_port record + * @offs: register address offset from the IP block base address + * + * Given a pointer @port to a struct uart_port record, read the + * contents of the IP block register located at offset @offs from the + * IP block base and return it. This function is intended for early + * console use. + * + * Context: Intended to be called only by the earlyconsole code or by + * __ssp_readl() or __ssp_writel() (in this driver) + * + * Returns: the register value read from the UART. + */ +static u32 __ssp_early_readl(struct uart_port *port, u16 offs) +{ + return readl_relaxed(port->membase + offs); +} + +/** + * __ssp_writel() - write to a SiFive serial port register + * @v: value to write to the register + * @offs: register address offset from the IP block base address + * @ssp: pointer to a struct sifive_serial_port record + * + * Write the value @v to the IP block register located at offset @offs from the + * IP block base, given a pointer @ssp to a struct sifive_serial_port record. + * + * Context: Any context. + */ +static void __ssp_writel(u32 v, u16 offs, struct sifive_serial_port *ssp) +{ + __ssp_early_writel(v, offs, &ssp->port); +} + +/** + * __ssp_readl() - read from a SiFive serial port register + * @ssp: pointer to a struct sifive_serial_port record + * @offs: register address offset from the IP block base address + * + * Read the contents of the IP block register located at offset @offs from the + * IP block base, given a pointer @ssp to a struct sifive_serial_port record. + * + * Context: Any context. + * + * Returns: the value of the UART register + */ +static u32 __ssp_readl(struct sifive_serial_port *ssp, u16 offs) +{ + return __ssp_early_readl(&ssp->port, offs); +} + +/** + * sifive_serial_is_txfifo_full() - is the TXFIFO full? + * @ssp: pointer to a struct sifive_serial_port + * + * Read the transmit FIFO "full" bit, returning a non-zero value if the + * TX FIFO is full, or zero if space remains. Intended to be used to prevent + * writes to the TX FIFO when it's full. + * + * Returns: SIFIVE_SERIAL_TXDATA_FULL_MASK (non-zero) if the transmit FIFO + * is full, or 0 if space remains. + */ +static int sifive_serial_is_txfifo_full(struct sifive_serial_port *ssp) +{ + return __ssp_readl(ssp, SIFIVE_SERIAL_TXDATA_OFFS) & + SIFIVE_SERIAL_TXDATA_FULL_MASK; +} + +/** + * __ssp_transmit_char() - enqueue a byte to transmit onto the TX FIFO + * @ssp: pointer to a struct sifive_serial_port + * @ch: character to transmit + * + * Enqueue a byte @ch onto the transmit FIFO, given a pointer @ssp to the + * struct sifive_serial_port * to transmit on. Caller should first check to + * ensure that the TXFIFO has space; see sifive_serial_is_txfifo_full(). + * + * Context: Any context. + */ +static void __ssp_transmit_char(struct sifive_serial_port *ssp, int ch) +{ + __ssp_writel(ch, SIFIVE_SERIAL_TXDATA_OFFS, ssp); +} + +/** + * __ssp_transmit_chars() - enqueue multiple bytes onto the TX FIFO + * @ssp: pointer to a struct sifive_serial_port + * + * Transfer up to a TX FIFO size's worth of characters from the Linux serial + * transmit buffer to the SiFive UART TX FIFO. + * + * Context: Any context. Expects @ssp->port.lock to be held by caller. + */ +static void __ssp_transmit_chars(struct sifive_serial_port *ssp) +{ + struct circ_buf *xmit = &ssp->port.state->xmit; + int count; + + if (ssp->port.x_char) { + __ssp_transmit_char(ssp, ssp->port.x_char); + ssp->port.icount.tx++; + ssp->port.x_char = 0; + return; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(&ssp->port)) { + sifive_serial_stop_tx(&ssp->port); + return; + } + count = SIFIVE_TX_FIFO_DEPTH; + do { + __ssp_transmit_char(ssp, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + ssp->port.icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&ssp->port); + + if (uart_circ_empty(xmit)) + sifive_serial_stop_tx(&ssp->port); +} + +/** + * __ssp_enable_txwm() - enable transmit watermark interrupts + * @ssp: pointer to a struct sifive_serial_port + * + * Enable interrupt generation when the transmit FIFO watermark is reached + * on the SiFive UART referred to by @ssp. + */ +static void __ssp_enable_txwm(struct sifive_serial_port *ssp) +{ + if (ssp->ier & SIFIVE_SERIAL_IE_TXWM_MASK) + return; + + ssp->ier |= SIFIVE_SERIAL_IE_TXWM_MASK; + __ssp_writel(ssp->ier, SIFIVE_SERIAL_IE_OFFS, ssp); +} + +/** + * __ssp_enable_rxwm() - enable receive watermark interrupts + * @ssp: pointer to a struct sifive_serial_port + * + * Enable interrupt generation when the receive FIFO watermark is reached + * on the SiFive UART referred to by @ssp. + */ +static void __ssp_enable_rxwm(struct sifive_serial_port *ssp) +{ + if (ssp->ier & SIFIVE_SERIAL_IE_RXWM_MASK) + return; + + ssp->ier |= SIFIVE_SERIAL_IE_RXWM_MASK; + __ssp_writel(ssp->ier, SIFIVE_SERIAL_IE_OFFS, ssp); +} + +/** + * __ssp_disable_txwm() - disable transmit watermark interrupts + * @ssp: pointer to a struct sifive_serial_port + * + * Disable interrupt generation when the transmit FIFO watermark is reached + * on the UART referred to by @ssp. + */ +static void __ssp_disable_txwm(struct sifive_serial_port *ssp) +{ + if (!(ssp->ier & SIFIVE_SERIAL_IE_TXWM_MASK)) + return; + + ssp->ier &= ~SIFIVE_SERIAL_IE_TXWM_MASK; + __ssp_writel(ssp->ier, SIFIVE_SERIAL_IE_OFFS, ssp); +} + +/** + * __ssp_disable_rxwm() - disable receive watermark interrupts + * @ssp: pointer to a struct sifive_serial_port + * + * Disable interrupt generation when the receive FIFO watermark is reached + * on the UART referred to by @ssp. + */ +static void __ssp_disable_rxwm(struct sifive_serial_port *ssp) +{ + if (!(ssp->ier & SIFIVE_SERIAL_IE_RXWM_MASK)) + return; + + ssp->ier &= ~SIFIVE_SERIAL_IE_RXWM_MASK; + __ssp_writel(ssp->ier, SIFIVE_SERIAL_IE_OFFS, ssp); +} + +/** + * __ssp_receive_char() - receive a byte from the UART + * @ssp: pointer to a struct sifive_serial_port + * @is_empty: char pointer to return whether the RX FIFO is empty + * + * Try to read a byte from the SiFive UART RX FIFO, referenced by + * @ssp, and to return it. Also returns the RX FIFO empty bit in + * the char pointed to by @ch. The caller must pass the byte back to the + * Linux serial layer if needed. + * + * Returns: the byte read from the UART RX FIFO. + */ +static char __ssp_receive_char(struct sifive_serial_port *ssp, char *is_empty) +{ + u32 v; + u8 ch; + + v = __ssp_readl(ssp, SIFIVE_SERIAL_RXDATA_OFFS); + + if (!is_empty) + WARN_ON(1); + else + *is_empty = (v & SIFIVE_SERIAL_RXDATA_EMPTY_MASK) >> + SIFIVE_SERIAL_RXDATA_EMPTY_SHIFT; + + ch = (v & SIFIVE_SERIAL_RXDATA_DATA_MASK) >> + SIFIVE_SERIAL_RXDATA_DATA_SHIFT; + + return ch; +} + +/** + * __ssp_receive_chars() - receive multiple bytes from the UART + * @ssp: pointer to a struct sifive_serial_port + * + * Receive up to an RX FIFO's worth of bytes from the SiFive UART referred + * to by @ssp and pass them up to the Linux serial layer. + * + * Context: Expects ssp->port.lock to be held by caller. + */ +static void __ssp_receive_chars(struct sifive_serial_port *ssp) +{ + unsigned char ch; + char is_empty; + int c; + + for (c = SIFIVE_RX_FIFO_DEPTH; c > 0; --c) { + ch = __ssp_receive_char(ssp, &is_empty); + if (is_empty) + break; + + ssp->port.icount.rx++; + uart_insert_char(&ssp->port, 0, 0, ch, TTY_NORMAL); + } + + tty_flip_buffer_push(&ssp->port.state->port); +} + +/** + * __ssp_update_div() - calculate the divisor setting by the line rate + * @ssp: pointer to a struct sifive_serial_port + * + * Calculate the appropriate value of the clock divisor for the UART + * and target line rate referred to by @ssp and write it into the + * hardware. + */ +static void __ssp_update_div(struct sifive_serial_port *ssp) +{ + u16 div; + + div = DIV_ROUND_UP(ssp->port.uartclk, ssp->baud_rate) - 1; + + __ssp_writel(div, SIFIVE_SERIAL_DIV_OFFS, ssp); +} + +/** + * __ssp_update_baud_rate() - set the UART "baud rate" + * @ssp: pointer to a struct sifive_serial_port + * @rate: new target bit rate + * + * Calculate the UART divisor value for the target bit rate @rate for the + * SiFive UART described by @ssp and program it into the UART. There may + * be some error between the target bit rate and the actual bit rate implemented + * by the UART due to clock ratio granularity. + */ +static void __ssp_update_baud_rate(struct sifive_serial_port *ssp, + unsigned int rate) +{ + if (ssp->baud_rate == rate) + return; + + ssp->baud_rate = rate; + __ssp_update_div(ssp); +} + +/** + * __ssp_set_stop_bits() - set the number of stop bits + * @ssp: pointer to a struct sifive_serial_port + * @nstop: 1 or 2 (stop bits) + * + * Program the SiFive UART referred to by @ssp to use @nstop stop bits. + */ +static void __ssp_set_stop_bits(struct sifive_serial_port *ssp, char nstop) +{ + u32 v; + + if (nstop < 1 || nstop > 2) { + WARN_ON(1); + return; + } + + v = __ssp_readl(ssp, SIFIVE_SERIAL_TXCTRL_OFFS); + v &= ~SIFIVE_SERIAL_TXCTRL_NSTOP_MASK; + v |= (nstop - 1) << SIFIVE_SERIAL_TXCTRL_NSTOP_SHIFT; + __ssp_writel(v, SIFIVE_SERIAL_TXCTRL_OFFS, ssp); +} + +/** + * __ssp_wait_for_xmitr() - wait for an empty slot on the TX FIFO + * @ssp: pointer to a struct sifive_serial_port + * + * Delay while the UART TX FIFO referred to by @ssp is marked as full. + * + * Context: Any context. + */ +static void __maybe_unused __ssp_wait_for_xmitr(struct sifive_serial_port *ssp) +{ + while (sifive_serial_is_txfifo_full(ssp)) + udelay(1); /* XXX Could probably be more intelligent here */ +} + +/* + * Linux serial API functions + */ + +static void sifive_serial_stop_tx(struct uart_port *port) +{ + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + + __ssp_disable_txwm(ssp); +} + +static void sifive_serial_stop_rx(struct uart_port *port) +{ + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + + __ssp_disable_rxwm(ssp); +} + +static void sifive_serial_start_tx(struct uart_port *port) +{ + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + + __ssp_enable_txwm(ssp); +} + +static irqreturn_t sifive_serial_irq(int irq, void *dev_id) +{ + struct sifive_serial_port *ssp = dev_id; + u32 ip; + + spin_lock(&ssp->port.lock); + + ip = __ssp_readl(ssp, SIFIVE_SERIAL_IP_OFFS); + if (!ip) { + spin_unlock(&ssp->port.lock); + return IRQ_NONE; + } + + if (ip & SIFIVE_SERIAL_IP_RXWM_MASK) + __ssp_receive_chars(ssp); + if (ip & SIFIVE_SERIAL_IP_TXWM_MASK) + __ssp_transmit_chars(ssp); + + spin_unlock(&ssp->port.lock); + + return IRQ_HANDLED; +} + +static unsigned int sifive_serial_tx_empty(struct uart_port *port) +{ + return TIOCSER_TEMT; +} + +static unsigned int sifive_serial_get_mctrl(struct uart_port *port) +{ + return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR; +} + +static void sifive_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* IP block does not support these signals */ +} + +static void sifive_serial_break_ctl(struct uart_port *port, int break_state) +{ + /* IP block does not support sending a break */ +} + +static int sifive_serial_startup(struct uart_port *port) +{ + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + + __ssp_enable_rxwm(ssp); + + return 0; +} + +static void sifive_serial_shutdown(struct uart_port *port) +{ + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + + __ssp_disable_rxwm(ssp); + __ssp_disable_txwm(ssp); +} + +/** + * sifive_serial_clk_notifier() - clock post-rate-change notifier + * @nb: pointer to the struct notifier_block, from the notifier code + * @event: event mask from the notifier code + * @data: pointer to the struct clk_notifier_data from the notifier code + * + * On the V0 SoC, the UART IP block is derived from the CPU clock source + * after a synchronous divide-by-two divider, so any CPU clock rate change + * requires the UART baud rate to be updated. This presumably corrupts any + * serial word currently being transmitted or received. In order to avoid + * corrupting the output data stream, we drain the transmit queue before + * allowing the clock's rate to be changed. + */ +static int sifive_serial_clk_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct clk_notifier_data *cnd = data; + struct sifive_serial_port *ssp = notifier_to_sifive_serial_port(nb); + + if (event == PRE_RATE_CHANGE) { + /* + * The TX watermark is always set to 1 by this driver, which + * means that the TX busy bit will lower when there are 0 bytes + * left in the TX queue -- in other words, when the TX FIFO is + * empty. + */ + __ssp_wait_for_xmitr(ssp); + /* + * On the cycle the TX FIFO goes empty there is still a full + * UART frame left to be transmitted in the shift register. + * The UART provides no way for software to directly determine + * when that last frame has been transmitted, so we just sleep + * here instead. As we're not tracking the number of stop bits + * they're just worst cased here. The rest of the serial + * framing parameters aren't configurable by software. + */ + udelay(DIV_ROUND_UP(12 * 1000 * 1000, ssp->baud_rate)); + } + + if (event == POST_RATE_CHANGE && ssp->port.uartclk != cnd->new_rate) { + ssp->port.uartclk = cnd->new_rate; + __ssp_update_div(ssp); + } + + return NOTIFY_OK; +} + +static void sifive_serial_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + unsigned long flags; + u32 v, old_v; + int rate; + char nstop; + + if ((termios->c_cflag & CSIZE) != CS8) { + dev_err_once(ssp->port.dev, "only 8-bit words supported\n"); + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + } + if (termios->c_iflag & (INPCK | PARMRK)) + dev_err_once(ssp->port.dev, "parity checking not supported\n"); + if (termios->c_iflag & BRKINT) + dev_err_once(ssp->port.dev, "BREAK detection not supported\n"); + termios->c_iflag &= ~(INPCK|PARMRK|BRKINT); + + /* Set number of stop bits */ + nstop = (termios->c_cflag & CSTOPB) ? 2 : 1; + __ssp_set_stop_bits(ssp, nstop); + + /* Set line rate */ + rate = uart_get_baud_rate(port, termios, old, 0, + ssp->port.uartclk / 16); + __ssp_update_baud_rate(ssp, rate); + + spin_lock_irqsave(&ssp->port.lock, flags); + + /* Update the per-port timeout */ + uart_update_timeout(port, termios->c_cflag, rate); + + ssp->port.read_status_mask = 0; + + /* Ignore all characters if CREAD is not set */ + v = __ssp_readl(ssp, SIFIVE_SERIAL_RXCTRL_OFFS); + old_v = v; + if ((termios->c_cflag & CREAD) == 0) + v &= SIFIVE_SERIAL_RXCTRL_RXEN_MASK; + else + v |= SIFIVE_SERIAL_RXCTRL_RXEN_MASK; + if (v != old_v) + __ssp_writel(v, SIFIVE_SERIAL_RXCTRL_OFFS, ssp); + + spin_unlock_irqrestore(&ssp->port.lock, flags); +} + +static void sifive_serial_release_port(struct uart_port *port) +{ +} + +static int sifive_serial_request_port(struct uart_port *port) +{ + return 0; +} + +static void sifive_serial_config_port(struct uart_port *port, int flags) +{ + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + + ssp->port.type = PORT_SIFIVE_V0; +} + +static int sifive_serial_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + return -EINVAL; +} + +static const char *sifive_serial_type(struct uart_port *port) +{ + return port->type == PORT_SIFIVE_V0 ? "SiFive UART v0" : NULL; +} + +#ifdef CONFIG_CONSOLE_POLL +static int sifive_serial_poll_get_char(struct uart_port *port) +{ + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + char is_empty, ch; + + ch = __ssp_receive_char(ssp, &is_empty); + if (is_empty) + return NO_POLL_CHAR; + + return ch; +} + +static void sifive_serial_poll_put_char(struct uart_port *port, + unsigned char c) +{ + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + + __ssp_wait_for_xmitr(ssp); + __ssp_transmit_char(ssp, c); +} +#endif /* CONFIG_CONSOLE_POLL */ + +/* + * Early console support + */ + +#ifdef CONFIG_SERIAL_EARLYCON +static void early_sifive_serial_putc(struct uart_port *port, unsigned char c) +{ + while (__ssp_early_readl(port, SIFIVE_SERIAL_TXDATA_OFFS) & + SIFIVE_SERIAL_TXDATA_FULL_MASK) + cpu_relax(); + + __ssp_early_writel(c, SIFIVE_SERIAL_TXDATA_OFFS, port); +} + +static void early_sifive_serial_write(struct console *con, const char *s, + unsigned int n) +{ + struct earlycon_device *dev = con->data; + struct uart_port *port = &dev->port; + + uart_console_write(port, s, n, early_sifive_serial_putc); +} + +static int __init early_sifive_serial_setup(struct earlycon_device *dev, + const char *options) +{ + struct uart_port *port = &dev->port; + + if (!port->membase) + return -ENODEV; + + dev->con->write = early_sifive_serial_write; + + return 0; +} + +OF_EARLYCON_DECLARE(sifive, "sifive,uart0", early_sifive_serial_setup); +OF_EARLYCON_DECLARE(sifive, "sifive,fu540-c000-uart0", + early_sifive_serial_setup); +#endif /* CONFIG_SERIAL_EARLYCON */ + +/* + * Linux console interface + */ + +#ifdef CONFIG_SERIAL_SIFIVE_CONSOLE + +static struct sifive_serial_port *sifive_serial_console_ports[SIFIVE_SERIAL_MAX_PORTS]; + +static void sifive_serial_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + + __ssp_wait_for_xmitr(ssp); + __ssp_transmit_char(ssp, ch); +} + +static void sifive_serial_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct sifive_serial_port *ssp = sifive_serial_console_ports[co->index]; + unsigned long flags; + unsigned int ier; + int locked = 1; + + if (!ssp) + return; + + local_irq_save(flags); + if (ssp->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&ssp->port.lock); + else + spin_lock(&ssp->port.lock); + + ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS); + __ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp); + + uart_console_write(&ssp->port, s, count, sifive_serial_console_putchar); + + __ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp); + + if (locked) + spin_unlock(&ssp->port.lock); + local_irq_restore(flags); +} + +static int sifive_serial_console_setup(struct console *co, char *options) +{ + struct sifive_serial_port *ssp; + int baud = SIFIVE_DEFAULT_BAUD_RATE; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= SIFIVE_SERIAL_MAX_PORTS) + return -ENODEV; + + ssp = sifive_serial_console_ports[co->index]; + if (!ssp) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&ssp->port, co, baud, parity, bits, flow); +} + +static struct uart_driver sifive_serial_uart_driver; + +static struct console sifive_serial_console = { + .name = SIFIVE_TTY_PREFIX, + .write = sifive_serial_console_write, + .device = uart_console_device, + .setup = sifive_serial_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sifive_serial_uart_driver, +}; + +static int __init sifive_console_init(void) +{ + register_console(&sifive_serial_console); + return 0; +} + +console_initcall(sifive_console_init); + +static void __ssp_add_console_port(struct sifive_serial_port *ssp) +{ + sifive_serial_console_ports[ssp->port.line] = ssp; +} + +static void __ssp_remove_console_port(struct sifive_serial_port *ssp) +{ + sifive_serial_console_ports[ssp->port.line] = NULL; +} + +#define SIFIVE_SERIAL_CONSOLE (&sifive_serial_console) + +#else + +#define SIFIVE_SERIAL_CONSOLE NULL + +static void __ssp_add_console_port(struct sifive_serial_port *ssp) +{} +static void __ssp_remove_console_port(struct sifive_serial_port *ssp) +{} + +#endif + +static const struct uart_ops sifive_serial_uops = { + .tx_empty = sifive_serial_tx_empty, + .set_mctrl = sifive_serial_set_mctrl, + .get_mctrl = sifive_serial_get_mctrl, + .stop_tx = sifive_serial_stop_tx, + .start_tx = sifive_serial_start_tx, + .stop_rx = sifive_serial_stop_rx, + .break_ctl = sifive_serial_break_ctl, + .startup = sifive_serial_startup, + .shutdown = sifive_serial_shutdown, + .set_termios = sifive_serial_set_termios, + .type = sifive_serial_type, + .release_port = sifive_serial_release_port, + .request_port = sifive_serial_request_port, + .config_port = sifive_serial_config_port, + .verify_port = sifive_serial_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = sifive_serial_poll_get_char, + .poll_put_char = sifive_serial_poll_put_char, +#endif +}; + +static struct uart_driver sifive_serial_uart_driver = { + .owner = THIS_MODULE, + .driver_name = SIFIVE_SERIAL_NAME, + .dev_name = SIFIVE_TTY_PREFIX, + .nr = SIFIVE_SERIAL_MAX_PORTS, + .cons = SIFIVE_SERIAL_CONSOLE, +}; + +static int sifive_serial_probe(struct platform_device *pdev) +{ + struct sifive_serial_port *ssp; + struct resource *mem; + struct clk *clk; + void __iomem *base; + int irq, id, r; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EPROBE_DEFER; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(base)) { + dev_err(&pdev->dev, "could not acquire device memory\n"); + return PTR_ERR(base); + } + + clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "unable to find controller clock\n"); + return PTR_ERR(clk); + } + + id = of_alias_get_id(pdev->dev.of_node, "serial"); + if (id < 0) { + dev_err(&pdev->dev, "missing aliases entry\n"); + return id; + } + +#ifdef CONFIG_SERIAL_SIFIVE_CONSOLE + if (id > SIFIVE_SERIAL_MAX_PORTS) { + dev_err(&pdev->dev, "too many UARTs (%d)\n", id); + return -EINVAL; + } +#endif + + ssp = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); + if (!ssp) + return -ENOMEM; + + ssp->port.dev = &pdev->dev; + ssp->port.type = PORT_SIFIVE_V0; + ssp->port.iotype = UPIO_MEM; + ssp->port.irq = irq; + ssp->port.fifosize = SIFIVE_TX_FIFO_DEPTH; + ssp->port.ops = &sifive_serial_uops; + ssp->port.line = id; + ssp->port.mapbase = mem->start; + ssp->port.membase = base; + ssp->dev = &pdev->dev; + ssp->clk = clk; + ssp->clk_notifier.notifier_call = sifive_serial_clk_notifier; + + r = clk_notifier_register(ssp->clk, &ssp->clk_notifier); + if (r) { + dev_err(&pdev->dev, "could not register clock notifier: %d\n", + r); + goto probe_out1; + } + + /* Set up clock divider */ + ssp->port.uartclk = clk_get_rate(ssp->clk); + ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE; + __ssp_update_div(ssp); + + platform_set_drvdata(pdev, ssp); + + /* Enable transmits and set the watermark level to 1 */ + __ssp_writel((1 << SIFIVE_SERIAL_TXCTRL_TXCNT_SHIFT) | + SIFIVE_SERIAL_TXCTRL_TXEN_MASK, + SIFIVE_SERIAL_TXCTRL_OFFS, ssp); + + /* Enable receives and set the watermark level to 0 */ + __ssp_writel((0 << SIFIVE_SERIAL_RXCTRL_RXCNT_SHIFT) | + SIFIVE_SERIAL_RXCTRL_RXEN_MASK, + SIFIVE_SERIAL_RXCTRL_OFFS, ssp); + + r = request_irq(ssp->port.irq, sifive_serial_irq, ssp->port.irqflags, + dev_name(&pdev->dev), ssp); + if (r) { + dev_err(&pdev->dev, "could not attach interrupt: %d\n", r); + goto probe_out2; + } + + __ssp_add_console_port(ssp); + + r = uart_add_one_port(&sifive_serial_uart_driver, &ssp->port); + if (r != 0) { + dev_err(&pdev->dev, "could not add uart: %d\n", r); + goto probe_out3; + } + + return 0; + +probe_out3: + __ssp_remove_console_port(ssp); + free_irq(ssp->port.irq, ssp); +probe_out2: + clk_notifier_unregister(ssp->clk, &ssp->clk_notifier); +probe_out1: + return r; +} + +static int sifive_serial_remove(struct platform_device *dev) +{ + struct sifive_serial_port *ssp = platform_get_drvdata(dev); + + __ssp_remove_console_port(ssp); + uart_remove_one_port(&sifive_serial_uart_driver, &ssp->port); + free_irq(ssp->port.irq, ssp); + clk_notifier_unregister(ssp->clk, &ssp->clk_notifier); + + return 0; +} + +static const struct of_device_id sifive_serial_of_match[] = { + { .compatible = "sifive,fu540-c000-uart0" }, + { .compatible = "sifive,uart0" }, + {}, +}; +MODULE_DEVICE_TABLE(of, sifive_serial_of_match); + +static struct platform_driver sifive_serial_platform_driver = { + .probe = sifive_serial_probe, + .remove = sifive_serial_remove, + .driver = { + .name = SIFIVE_SERIAL_NAME, + .of_match_table = of_match_ptr(sifive_serial_of_match), + }, +}; + +static int __init sifive_serial_init(void) +{ + int r; + + r = uart_register_driver(&sifive_serial_uart_driver); + if (r) + goto init_out1; + + r = platform_driver_register(&sifive_serial_platform_driver); + if (r) + goto init_out2; + + return 0; + +init_out2: + uart_unregister_driver(&sifive_serial_uart_driver); +init_out1: + return r; +} + +static void __exit sifive_serial_exit(void) +{ + platform_driver_unregister(&sifive_serial_platform_driver); + uart_unregister_driver(&sifive_serial_uart_driver); +} + +module_init(sifive_serial_init); +module_exit(sifive_serial_exit); + +MODULE_DESCRIPTION("SiFive UART serial driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Paul Walmsley "); diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c new file mode 100644 index 000000000..9c7f71993 --- /dev/null +++ b/drivers/tty/serial/sprd_serial.c @@ -0,0 +1,1297 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012-2015 Spreadtrum Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* device name */ +#define UART_NR_MAX 8 +#define SPRD_TTY_NAME "ttyS" +#define SPRD_FIFO_SIZE 128 +#define SPRD_DEF_RATE 26000000 +#define SPRD_BAUD_IO_LIMIT 3000000 +#define SPRD_TIMEOUT 256000 + +/* the offset of serial registers and BITs for them */ +/* data registers */ +#define SPRD_TXD 0x0000 +#define SPRD_RXD 0x0004 + +/* line status register and its BITs */ +#define SPRD_LSR 0x0008 +#define SPRD_LSR_OE BIT(4) +#define SPRD_LSR_FE BIT(3) +#define SPRD_LSR_PE BIT(2) +#define SPRD_LSR_BI BIT(7) +#define SPRD_LSR_TX_OVER BIT(15) + +/* data number in TX and RX fifo */ +#define SPRD_STS1 0x000C +#define SPRD_RX_FIFO_CNT_MASK GENMASK(7, 0) +#define SPRD_TX_FIFO_CNT_MASK GENMASK(15, 8) + +/* interrupt enable register and its BITs */ +#define SPRD_IEN 0x0010 +#define SPRD_IEN_RX_FULL BIT(0) +#define SPRD_IEN_TX_EMPTY BIT(1) +#define SPRD_IEN_BREAK_DETECT BIT(7) +#define SPRD_IEN_TIMEOUT BIT(13) + +/* interrupt clear register */ +#define SPRD_ICLR 0x0014 +#define SPRD_ICLR_TIMEOUT BIT(13) + +/* line control register */ +#define SPRD_LCR 0x0018 +#define SPRD_LCR_STOP_1BIT 0x10 +#define SPRD_LCR_STOP_2BIT 0x30 +#define SPRD_LCR_DATA_LEN (BIT(2) | BIT(3)) +#define SPRD_LCR_DATA_LEN5 0x0 +#define SPRD_LCR_DATA_LEN6 0x4 +#define SPRD_LCR_DATA_LEN7 0x8 +#define SPRD_LCR_DATA_LEN8 0xc +#define SPRD_LCR_PARITY (BIT(0) | BIT(1)) +#define SPRD_LCR_PARITY_EN 0x2 +#define SPRD_LCR_EVEN_PAR 0x0 +#define SPRD_LCR_ODD_PAR 0x1 + +/* control register 1 */ +#define SPRD_CTL1 0x001C +#define SPRD_DMA_EN BIT(15) +#define SPRD_LOOPBACK_EN BIT(14) +#define RX_HW_FLOW_CTL_THLD BIT(6) +#define RX_HW_FLOW_CTL_EN BIT(7) +#define TX_HW_FLOW_CTL_EN BIT(8) +#define RX_TOUT_THLD_DEF 0x3E00 +#define RX_HFC_THLD_DEF 0x40 + +/* fifo threshold register */ +#define SPRD_CTL2 0x0020 +#define THLD_TX_EMPTY 0x40 +#define THLD_TX_EMPTY_SHIFT 8 +#define THLD_RX_FULL 0x40 +#define THLD_RX_FULL_MASK GENMASK(6, 0) + +/* config baud rate register */ +#define SPRD_CLKD0 0x0024 +#define SPRD_CLKD0_MASK GENMASK(15, 0) +#define SPRD_CLKD1 0x0028 +#define SPRD_CLKD1_MASK GENMASK(20, 16) +#define SPRD_CLKD1_SHIFT 16 + +/* interrupt mask status register */ +#define SPRD_IMSR 0x002C +#define SPRD_IMSR_RX_FIFO_FULL BIT(0) +#define SPRD_IMSR_TX_FIFO_EMPTY BIT(1) +#define SPRD_IMSR_BREAK_DETECT BIT(7) +#define SPRD_IMSR_TIMEOUT BIT(13) +#define SPRD_DEFAULT_SOURCE_CLK 26000000 + +#define SPRD_RX_DMA_STEP 1 +#define SPRD_RX_FIFO_FULL 1 +#define SPRD_TX_FIFO_FULL 0x20 +#define SPRD_UART_RX_SIZE (UART_XMIT_SIZE / 4) + +struct sprd_uart_dma { + struct dma_chan *chn; + unsigned char *virt; + dma_addr_t phys_addr; + dma_cookie_t cookie; + u32 trans_len; + bool enable; +}; + +struct sprd_uart_port { + struct uart_port port; + char name[16]; + struct clk *clk; + struct sprd_uart_dma tx_dma; + struct sprd_uart_dma rx_dma; + dma_addr_t pos; + unsigned char *rx_buf_tail; +}; + +static struct sprd_uart_port *sprd_port[UART_NR_MAX]; +static int sprd_ports_num; + +static int sprd_start_dma_rx(struct uart_port *port); +static int sprd_tx_dma_config(struct uart_port *port); + +static inline unsigned int serial_in(struct uart_port *port, + unsigned int offset) +{ + return readl_relaxed(port->membase + offset); +} + +static inline void serial_out(struct uart_port *port, unsigned int offset, + int value) +{ + writel_relaxed(value, port->membase + offset); +} + +static unsigned int sprd_tx_empty(struct uart_port *port) +{ + if (serial_in(port, SPRD_STS1) & SPRD_TX_FIFO_CNT_MASK) + return 0; + else + return TIOCSER_TEMT; +} + +static unsigned int sprd_get_mctrl(struct uart_port *port) +{ + return TIOCM_DSR | TIOCM_CTS; +} + +static void sprd_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + u32 val = serial_in(port, SPRD_CTL1); + + if (mctrl & TIOCM_LOOP) + val |= SPRD_LOOPBACK_EN; + else + val &= ~SPRD_LOOPBACK_EN; + + serial_out(port, SPRD_CTL1, val); +} + +static void sprd_stop_rx(struct uart_port *port) +{ + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + unsigned int ien, iclr; + + if (sp->rx_dma.enable) + dmaengine_terminate_all(sp->rx_dma.chn); + + iclr = serial_in(port, SPRD_ICLR); + ien = serial_in(port, SPRD_IEN); + + ien &= ~(SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT); + iclr |= SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT; + + serial_out(port, SPRD_IEN, ien); + serial_out(port, SPRD_ICLR, iclr); +} + +static void sprd_uart_dma_enable(struct uart_port *port, bool enable) +{ + u32 val = serial_in(port, SPRD_CTL1); + + if (enable) + val |= SPRD_DMA_EN; + else + val &= ~SPRD_DMA_EN; + + serial_out(port, SPRD_CTL1, val); +} + +static void sprd_stop_tx_dma(struct uart_port *port) +{ + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + struct circ_buf *xmit = &port->state->xmit; + struct dma_tx_state state; + u32 trans_len; + + dmaengine_pause(sp->tx_dma.chn); + + dmaengine_tx_status(sp->tx_dma.chn, sp->tx_dma.cookie, &state); + if (state.residue) { + trans_len = state.residue - sp->tx_dma.phys_addr; + xmit->tail = (xmit->tail + trans_len) & (UART_XMIT_SIZE - 1); + port->icount.tx += trans_len; + dma_unmap_single(port->dev, sp->tx_dma.phys_addr, + sp->tx_dma.trans_len, DMA_TO_DEVICE); + } + + dmaengine_terminate_all(sp->tx_dma.chn); + sp->tx_dma.trans_len = 0; +} + +static int sprd_tx_buf_remap(struct uart_port *port) +{ + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + struct circ_buf *xmit = &port->state->xmit; + + sp->tx_dma.trans_len = + CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + + sp->tx_dma.phys_addr = dma_map_single(port->dev, + (void *)&(xmit->buf[xmit->tail]), + sp->tx_dma.trans_len, + DMA_TO_DEVICE); + return dma_mapping_error(port->dev, sp->tx_dma.phys_addr); +} + +static void sprd_complete_tx_dma(void *data) +{ + struct uart_port *port = (struct uart_port *)data; + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + struct circ_buf *xmit = &port->state->xmit; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + dma_unmap_single(port->dev, sp->tx_dma.phys_addr, + sp->tx_dma.trans_len, DMA_TO_DEVICE); + + xmit->tail = (xmit->tail + sp->tx_dma.trans_len) & (UART_XMIT_SIZE - 1); + port->icount.tx += sp->tx_dma.trans_len; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit) || sprd_tx_buf_remap(port) || + sprd_tx_dma_config(port)) + sp->tx_dma.trans_len = 0; + + spin_unlock_irqrestore(&port->lock, flags); +} + +static int sprd_uart_dma_submit(struct uart_port *port, + struct sprd_uart_dma *ud, u32 trans_len, + enum dma_transfer_direction direction, + dma_async_tx_callback callback) +{ + struct dma_async_tx_descriptor *dma_des; + unsigned long flags; + + flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, + SPRD_DMA_NO_TRG, + SPRD_DMA_FRAG_REQ, + SPRD_DMA_TRANS_INT); + + dma_des = dmaengine_prep_slave_single(ud->chn, ud->phys_addr, trans_len, + direction, flags); + if (!dma_des) + return -ENODEV; + + dma_des->callback = callback; + dma_des->callback_param = port; + + ud->cookie = dmaengine_submit(dma_des); + if (dma_submit_error(ud->cookie)) + return dma_submit_error(ud->cookie); + + dma_async_issue_pending(ud->chn); + + return 0; +} + +static int sprd_tx_dma_config(struct uart_port *port) +{ + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + u32 burst = sp->tx_dma.trans_len > SPRD_TX_FIFO_FULL ? + SPRD_TX_FIFO_FULL : sp->tx_dma.trans_len; + int ret; + struct dma_slave_config cfg = { + .dst_addr = port->mapbase + SPRD_TXD, + .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .src_maxburst = burst, + }; + + ret = dmaengine_slave_config(sp->tx_dma.chn, &cfg); + if (ret < 0) + return ret; + + return sprd_uart_dma_submit(port, &sp->tx_dma, sp->tx_dma.trans_len, + DMA_MEM_TO_DEV, sprd_complete_tx_dma); +} + +static void sprd_start_tx_dma(struct uart_port *port) +{ + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + struct circ_buf *xmit = &port->state->xmit; + + if (port->x_char) { + serial_out(port, SPRD_TXD, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + sprd_stop_tx_dma(port); + return; + } + + if (sp->tx_dma.trans_len) + return; + + if (sprd_tx_buf_remap(port) || sprd_tx_dma_config(port)) + sp->tx_dma.trans_len = 0; +} + +static void sprd_rx_full_thld(struct uart_port *port, u32 thld) +{ + u32 val = serial_in(port, SPRD_CTL2); + + val &= ~THLD_RX_FULL_MASK; + val |= thld & THLD_RX_FULL_MASK; + serial_out(port, SPRD_CTL2, val); +} + +static int sprd_rx_alloc_buf(struct sprd_uart_port *sp) +{ + sp->rx_dma.virt = dma_alloc_coherent(sp->port.dev, SPRD_UART_RX_SIZE, + &sp->rx_dma.phys_addr, GFP_KERNEL); + if (!sp->rx_dma.virt) + return -ENOMEM; + + return 0; +} + +static void sprd_rx_free_buf(struct sprd_uart_port *sp) +{ + if (sp->rx_dma.virt) + dma_free_coherent(sp->port.dev, SPRD_UART_RX_SIZE, + sp->rx_dma.virt, sp->rx_dma.phys_addr); + sp->rx_dma.virt = NULL; +} + +static int sprd_rx_dma_config(struct uart_port *port, u32 burst) +{ + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + struct dma_slave_config cfg = { + .src_addr = port->mapbase + SPRD_RXD, + .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .src_maxburst = burst, + }; + + return dmaengine_slave_config(sp->rx_dma.chn, &cfg); +} + +static void sprd_uart_dma_rx(struct uart_port *port) +{ + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + struct tty_port *tty = &port->state->port; + + port->icount.rx += sp->rx_dma.trans_len; + tty_insert_flip_string(tty, sp->rx_buf_tail, sp->rx_dma.trans_len); + tty_flip_buffer_push(tty); +} + +static void sprd_uart_dma_irq(struct uart_port *port) +{ + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + struct dma_tx_state state; + enum dma_status status; + + status = dmaengine_tx_status(sp->rx_dma.chn, + sp->rx_dma.cookie, &state); + if (status == DMA_ERROR) + sprd_stop_rx(port); + + if (!state.residue && sp->pos == sp->rx_dma.phys_addr) + return; + + if (!state.residue) { + sp->rx_dma.trans_len = SPRD_UART_RX_SIZE + + sp->rx_dma.phys_addr - sp->pos; + sp->pos = sp->rx_dma.phys_addr; + } else { + sp->rx_dma.trans_len = state.residue - sp->pos; + sp->pos = state.residue; + } + + sprd_uart_dma_rx(port); + sp->rx_buf_tail += sp->rx_dma.trans_len; +} + +static void sprd_complete_rx_dma(void *data) +{ + struct uart_port *port = (struct uart_port *)data; + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + struct dma_tx_state state; + enum dma_status status; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + status = dmaengine_tx_status(sp->rx_dma.chn, + sp->rx_dma.cookie, &state); + if (status != DMA_COMPLETE) { + sprd_stop_rx(port); + spin_unlock_irqrestore(&port->lock, flags); + return; + } + + if (sp->pos != sp->rx_dma.phys_addr) { + sp->rx_dma.trans_len = SPRD_UART_RX_SIZE + + sp->rx_dma.phys_addr - sp->pos; + sprd_uart_dma_rx(port); + sp->rx_buf_tail += sp->rx_dma.trans_len; + } + + if (sprd_start_dma_rx(port)) + sprd_stop_rx(port); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static int sprd_start_dma_rx(struct uart_port *port) +{ + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + int ret; + + if (!sp->rx_dma.enable) + return 0; + + sp->pos = sp->rx_dma.phys_addr; + sp->rx_buf_tail = sp->rx_dma.virt; + sprd_rx_full_thld(port, SPRD_RX_FIFO_FULL); + ret = sprd_rx_dma_config(port, SPRD_RX_DMA_STEP); + if (ret) + return ret; + + return sprd_uart_dma_submit(port, &sp->rx_dma, SPRD_UART_RX_SIZE, + DMA_DEV_TO_MEM, sprd_complete_rx_dma); +} + +static void sprd_release_dma(struct uart_port *port) +{ + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + + sprd_uart_dma_enable(port, false); + + if (sp->rx_dma.enable) + dma_release_channel(sp->rx_dma.chn); + + if (sp->tx_dma.enable) + dma_release_channel(sp->tx_dma.chn); + + sp->tx_dma.enable = false; + sp->rx_dma.enable = false; +} + +static void sprd_request_dma(struct uart_port *port) +{ + struct sprd_uart_port *sp = + container_of(port, struct sprd_uart_port, port); + + sp->tx_dma.enable = true; + sp->rx_dma.enable = true; + + sp->tx_dma.chn = dma_request_chan(port->dev, "tx"); + if (IS_ERR(sp->tx_dma.chn)) { + dev_err(port->dev, "request TX DMA channel failed, ret = %ld\n", + PTR_ERR(sp->tx_dma.chn)); + sp->tx_dma.enable = false; + } + + sp->rx_dma.chn = dma_request_chan(port->dev, "rx"); + if (IS_ERR(sp->rx_dma.chn)) { + dev_err(port->dev, "request RX DMA channel failed, ret = %ld\n", + PTR_ERR(sp->rx_dma.chn)); + sp->rx_dma.enable = false; + } +} + +static void sprd_stop_tx(struct uart_port *port) +{ + struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, + port); + unsigned int ien, iclr; + + if (sp->tx_dma.enable) { + sprd_stop_tx_dma(port); + return; + } + + iclr = serial_in(port, SPRD_ICLR); + ien = serial_in(port, SPRD_IEN); + + iclr |= SPRD_IEN_TX_EMPTY; + ien &= ~SPRD_IEN_TX_EMPTY; + + serial_out(port, SPRD_IEN, ien); + serial_out(port, SPRD_ICLR, iclr); +} + +static void sprd_start_tx(struct uart_port *port) +{ + struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, + port); + unsigned int ien; + + if (sp->tx_dma.enable) { + sprd_start_tx_dma(port); + return; + } + + ien = serial_in(port, SPRD_IEN); + if (!(ien & SPRD_IEN_TX_EMPTY)) { + ien |= SPRD_IEN_TX_EMPTY; + serial_out(port, SPRD_IEN, ien); + } +} + +/* The Sprd serial does not support this function. */ +static void sprd_break_ctl(struct uart_port *port, int break_state) +{ + /* nothing to do */ +} + +static int handle_lsr_errors(struct uart_port *port, + unsigned int *flag, + unsigned int *lsr) +{ + int ret = 0; + + /* statistics */ + if (*lsr & SPRD_LSR_BI) { + *lsr &= ~(SPRD_LSR_FE | SPRD_LSR_PE); + port->icount.brk++; + ret = uart_handle_break(port); + if (ret) + return ret; + } else if (*lsr & SPRD_LSR_PE) + port->icount.parity++; + else if (*lsr & SPRD_LSR_FE) + port->icount.frame++; + if (*lsr & SPRD_LSR_OE) + port->icount.overrun++; + + /* mask off conditions which should be ignored */ + *lsr &= port->read_status_mask; + if (*lsr & SPRD_LSR_BI) + *flag = TTY_BREAK; + else if (*lsr & SPRD_LSR_PE) + *flag = TTY_PARITY; + else if (*lsr & SPRD_LSR_FE) + *flag = TTY_FRAME; + + return ret; +} + +static inline void sprd_rx(struct uart_port *port) +{ + struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, + port); + struct tty_port *tty = &port->state->port; + unsigned int ch, flag, lsr, max_count = SPRD_TIMEOUT; + + if (sp->rx_dma.enable) { + sprd_uart_dma_irq(port); + return; + } + + while ((serial_in(port, SPRD_STS1) & SPRD_RX_FIFO_CNT_MASK) && + max_count--) { + lsr = serial_in(port, SPRD_LSR); + ch = serial_in(port, SPRD_RXD); + flag = TTY_NORMAL; + port->icount.rx++; + + if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE | + SPRD_LSR_FE | SPRD_LSR_OE)) + if (handle_lsr_errors(port, &flag, &lsr)) + continue; + if (uart_handle_sysrq_char(port, ch)) + continue; + + uart_insert_char(port, lsr, SPRD_LSR_OE, ch, flag); + } + + tty_flip_buffer_push(tty); +} + +static inline void sprd_tx(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + int count; + + if (port->x_char) { + serial_out(port, SPRD_TXD, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + sprd_stop_tx(port); + return; + } + + count = THLD_TX_EMPTY; + do { + serial_out(port, SPRD_TXD, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + sprd_stop_tx(port); +} + +/* this handles the interrupt from one port */ +static irqreturn_t sprd_handle_irq(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + unsigned int ims; + + spin_lock(&port->lock); + + ims = serial_in(port, SPRD_IMSR); + + if (!ims) { + spin_unlock(&port->lock); + return IRQ_NONE; + } + + if (ims & SPRD_IMSR_TIMEOUT) + serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT); + + if (ims & SPRD_IMSR_BREAK_DETECT) + serial_out(port, SPRD_ICLR, SPRD_IMSR_BREAK_DETECT); + + if (ims & (SPRD_IMSR_RX_FIFO_FULL | SPRD_IMSR_BREAK_DETECT | + SPRD_IMSR_TIMEOUT)) + sprd_rx(port); + + if (ims & SPRD_IMSR_TX_FIFO_EMPTY) + sprd_tx(port); + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static void sprd_uart_dma_startup(struct uart_port *port, + struct sprd_uart_port *sp) +{ + int ret; + + sprd_request_dma(port); + if (!(sp->rx_dma.enable || sp->tx_dma.enable)) + return; + + ret = sprd_start_dma_rx(port); + if (ret) { + sp->rx_dma.enable = false; + dma_release_channel(sp->rx_dma.chn); + dev_warn(port->dev, "fail to start RX dma mode\n"); + } + + sprd_uart_dma_enable(port, true); +} + +static int sprd_startup(struct uart_port *port) +{ + int ret = 0; + unsigned int ien, fc; + unsigned int timeout; + struct sprd_uart_port *sp; + unsigned long flags; + + serial_out(port, SPRD_CTL2, + THLD_TX_EMPTY << THLD_TX_EMPTY_SHIFT | THLD_RX_FULL); + + /* clear rx fifo */ + timeout = SPRD_TIMEOUT; + while (timeout-- && serial_in(port, SPRD_STS1) & SPRD_RX_FIFO_CNT_MASK) + serial_in(port, SPRD_RXD); + + /* clear tx fifo */ + timeout = SPRD_TIMEOUT; + while (timeout-- && serial_in(port, SPRD_STS1) & SPRD_TX_FIFO_CNT_MASK) + cpu_relax(); + + /* clear interrupt */ + serial_out(port, SPRD_IEN, 0); + serial_out(port, SPRD_ICLR, ~0); + + /* allocate irq */ + sp = container_of(port, struct sprd_uart_port, port); + snprintf(sp->name, sizeof(sp->name), "sprd_serial%d", port->line); + + sprd_uart_dma_startup(port, sp); + + ret = devm_request_irq(port->dev, port->irq, sprd_handle_irq, + IRQF_SHARED, sp->name, port); + if (ret) { + dev_err(port->dev, "fail to request serial irq %d, ret=%d\n", + port->irq, ret); + return ret; + } + fc = serial_in(port, SPRD_CTL1); + fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF; + serial_out(port, SPRD_CTL1, fc); + + /* enable interrupt */ + spin_lock_irqsave(&port->lock, flags); + ien = serial_in(port, SPRD_IEN); + ien |= SPRD_IEN_BREAK_DETECT | SPRD_IEN_TIMEOUT; + if (!sp->rx_dma.enable) + ien |= SPRD_IEN_RX_FULL; + serial_out(port, SPRD_IEN, ien); + spin_unlock_irqrestore(&port->lock, flags); + + return 0; +} + +static void sprd_shutdown(struct uart_port *port) +{ + sprd_release_dma(port); + serial_out(port, SPRD_IEN, 0); + serial_out(port, SPRD_ICLR, ~0); + devm_free_irq(port->dev, port->irq, port); +} + +static void sprd_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud, quot; + unsigned int lcr = 0, fc; + unsigned long flags; + + /* ask the core to calculate the divisor for us */ + baud = uart_get_baud_rate(port, termios, old, 0, SPRD_BAUD_IO_LIMIT); + + quot = port->uartclk / baud; + + /* set data length */ + switch (termios->c_cflag & CSIZE) { + case CS5: + lcr |= SPRD_LCR_DATA_LEN5; + break; + case CS6: + lcr |= SPRD_LCR_DATA_LEN6; + break; + case CS7: + lcr |= SPRD_LCR_DATA_LEN7; + break; + case CS8: + default: + lcr |= SPRD_LCR_DATA_LEN8; + break; + } + + /* calculate stop bits */ + lcr &= ~(SPRD_LCR_STOP_1BIT | SPRD_LCR_STOP_2BIT); + if (termios->c_cflag & CSTOPB) + lcr |= SPRD_LCR_STOP_2BIT; + else + lcr |= SPRD_LCR_STOP_1BIT; + + /* calculate parity */ + lcr &= ~SPRD_LCR_PARITY; + termios->c_cflag &= ~CMSPAR; /* no support mark/space */ + if (termios->c_cflag & PARENB) { + lcr |= SPRD_LCR_PARITY_EN; + if (termios->c_cflag & PARODD) + lcr |= SPRD_LCR_ODD_PAR; + else + lcr |= SPRD_LCR_EVEN_PAR; + } + + spin_lock_irqsave(&port->lock, flags); + + /* update the per-port timeout */ + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = SPRD_LSR_OE; + if (termios->c_iflag & INPCK) + port->read_status_mask |= SPRD_LSR_FE | SPRD_LSR_PE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= SPRD_LSR_BI; + + /* characters to ignore */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= SPRD_LSR_PE | SPRD_LSR_FE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= SPRD_LSR_BI; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= SPRD_LSR_OE; + } + + /* flow control */ + fc = serial_in(port, SPRD_CTL1); + fc &= ~(RX_HW_FLOW_CTL_THLD | RX_HW_FLOW_CTL_EN | TX_HW_FLOW_CTL_EN); + if (termios->c_cflag & CRTSCTS) { + fc |= RX_HW_FLOW_CTL_THLD; + fc |= RX_HW_FLOW_CTL_EN; + fc |= TX_HW_FLOW_CTL_EN; + } + + /* clock divider bit0~bit15 */ + serial_out(port, SPRD_CLKD0, quot & SPRD_CLKD0_MASK); + + /* clock divider bit16~bit20 */ + serial_out(port, SPRD_CLKD1, + (quot & SPRD_CLKD1_MASK) >> SPRD_CLKD1_SHIFT); + serial_out(port, SPRD_LCR, lcr); + fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF; + serial_out(port, SPRD_CTL1, fc); + + spin_unlock_irqrestore(&port->lock, flags); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} + +static const char *sprd_type(struct uart_port *port) +{ + return "SPX"; +} + +static void sprd_release_port(struct uart_port *port) +{ + /* nothing to do */ +} + +static int sprd_request_port(struct uart_port *port) +{ + return 0; +} + +static void sprd_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_SPRD; +} + +static int sprd_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (ser->type != PORT_SPRD) + return -EINVAL; + if (port->irq != ser->irq) + return -EINVAL; + if (port->iotype != ser->io_type) + return -EINVAL; + return 0; +} + +static void sprd_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct sprd_uart_port *sup = + container_of(port, struct sprd_uart_port, port); + + switch (state) { + case UART_PM_STATE_ON: + clk_prepare_enable(sup->clk); + break; + case UART_PM_STATE_OFF: + clk_disable_unprepare(sup->clk); + break; + } +} + +#ifdef CONFIG_CONSOLE_POLL +static int sprd_poll_init(struct uart_port *port) +{ + if (port->state->pm_state != UART_PM_STATE_ON) { + sprd_pm(port, UART_PM_STATE_ON, 0); + port->state->pm_state = UART_PM_STATE_ON; + } + + return 0; +} + +static int sprd_poll_get_char(struct uart_port *port) +{ + while (!(serial_in(port, SPRD_STS1) & SPRD_RX_FIFO_CNT_MASK)) + cpu_relax(); + + return serial_in(port, SPRD_RXD); +} + +static void sprd_poll_put_char(struct uart_port *port, unsigned char ch) +{ + while (serial_in(port, SPRD_STS1) & SPRD_TX_FIFO_CNT_MASK) + cpu_relax(); + + serial_out(port, SPRD_TXD, ch); +} +#endif + +static const struct uart_ops serial_sprd_ops = { + .tx_empty = sprd_tx_empty, + .get_mctrl = sprd_get_mctrl, + .set_mctrl = sprd_set_mctrl, + .stop_tx = sprd_stop_tx, + .start_tx = sprd_start_tx, + .stop_rx = sprd_stop_rx, + .break_ctl = sprd_break_ctl, + .startup = sprd_startup, + .shutdown = sprd_shutdown, + .set_termios = sprd_set_termios, + .type = sprd_type, + .release_port = sprd_release_port, + .request_port = sprd_request_port, + .config_port = sprd_config_port, + .verify_port = sprd_verify_port, + .pm = sprd_pm, +#ifdef CONFIG_CONSOLE_POLL + .poll_init = sprd_poll_init, + .poll_get_char = sprd_poll_get_char, + .poll_put_char = sprd_poll_put_char, +#endif +}; + +#ifdef CONFIG_SERIAL_SPRD_CONSOLE +static void wait_for_xmitr(struct uart_port *port) +{ + unsigned int status, tmout = 10000; + + /* wait up to 10ms for the character(s) to be sent */ + do { + status = serial_in(port, SPRD_STS1); + if (--tmout == 0) + break; + udelay(1); + } while (status & SPRD_TX_FIFO_CNT_MASK); +} + +static void sprd_console_putchar(struct uart_port *port, unsigned char ch) +{ + wait_for_xmitr(port); + serial_out(port, SPRD_TXD, ch); +} + +static void sprd_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *port = &sprd_port[co->index]->port; + int locked = 1; + unsigned long flags; + + if (port->sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); + + uart_console_write(port, s, count, sprd_console_putchar); + + /* wait for transmitter to become empty */ + wait_for_xmitr(port); + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); +} + +static int sprd_console_setup(struct console *co, char *options) +{ + struct sprd_uart_port *sprd_uart_port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index >= UART_NR_MAX || co->index < 0) + co->index = 0; + + sprd_uart_port = sprd_port[co->index]; + if (!sprd_uart_port || !sprd_uart_port->port.membase) { + pr_info("serial port %d not yet initialized\n", co->index); + return -ENODEV; + } + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&sprd_uart_port->port, co, baud, + parity, bits, flow); +} + +static struct uart_driver sprd_uart_driver; +static struct console sprd_console = { + .name = SPRD_TTY_NAME, + .write = sprd_console_write, + .device = uart_console_device, + .setup = sprd_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sprd_uart_driver, +}; + +static int __init sprd_serial_console_init(void) +{ + register_console(&sprd_console); + return 0; +} +console_initcall(sprd_serial_console_init); + +#define SPRD_CONSOLE (&sprd_console) + +/* Support for earlycon */ +static void sprd_putc(struct uart_port *port, unsigned char c) +{ + unsigned int timeout = SPRD_TIMEOUT; + + while (timeout-- && + !(readl(port->membase + SPRD_LSR) & SPRD_LSR_TX_OVER)) + cpu_relax(); + + writeb(c, port->membase + SPRD_TXD); +} + +static void sprd_early_write(struct console *con, const char *s, unsigned int n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, sprd_putc); +} + +static int __init sprd_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = sprd_early_write; + return 0; +} +OF_EARLYCON_DECLARE(sprd_serial, "sprd,sc9836-uart", + sprd_early_console_setup); + +#else /* !CONFIG_SERIAL_SPRD_CONSOLE */ +#define SPRD_CONSOLE NULL +#endif + +static struct uart_driver sprd_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "sprd_serial", + .dev_name = SPRD_TTY_NAME, + .major = 0, + .minor = 0, + .nr = UART_NR_MAX, + .cons = SPRD_CONSOLE, +}; + +static int sprd_remove(struct platform_device *dev) +{ + struct sprd_uart_port *sup = platform_get_drvdata(dev); + + if (sup) { + uart_remove_one_port(&sprd_uart_driver, &sup->port); + sprd_port[sup->port.line] = NULL; + sprd_rx_free_buf(sup); + sprd_ports_num--; + } + + if (!sprd_ports_num) + uart_unregister_driver(&sprd_uart_driver); + + return 0; +} + +static bool sprd_uart_is_console(struct uart_port *uport) +{ + struct console *cons = sprd_uart_driver.cons; + + if ((cons && cons->index >= 0 && cons->index == uport->line) || + of_console_check(uport->dev->of_node, SPRD_TTY_NAME, uport->line)) + return true; + + return false; +} + +static int sprd_clk_init(struct uart_port *uport) +{ + struct clk *clk_uart, *clk_parent; + struct sprd_uart_port *u = container_of(uport, struct sprd_uart_port, port); + + clk_uart = devm_clk_get(uport->dev, "uart"); + if (IS_ERR(clk_uart)) { + dev_warn(uport->dev, "uart%d can't get uart clock\n", + uport->line); + clk_uart = NULL; + } + + clk_parent = devm_clk_get(uport->dev, "source"); + if (IS_ERR(clk_parent)) { + dev_warn(uport->dev, "uart%d can't get source clock\n", + uport->line); + clk_parent = NULL; + } + + if (!clk_uart || clk_set_parent(clk_uart, clk_parent)) + uport->uartclk = SPRD_DEFAULT_SOURCE_CLK; + else + uport->uartclk = clk_get_rate(clk_uart); + + u->clk = devm_clk_get(uport->dev, "enable"); + if (IS_ERR(u->clk)) { + if (PTR_ERR(u->clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + dev_warn(uport->dev, "uart%d can't get enable clock\n", + uport->line); + + /* To keep console alive even if the error occurred */ + if (!sprd_uart_is_console(uport)) + return PTR_ERR(u->clk); + + u->clk = NULL; + } + + return 0; +} + +static int sprd_probe(struct platform_device *pdev) +{ + struct resource *res; + struct uart_port *up; + struct sprd_uart_port *sport; + int irq; + int index; + int ret; + + index = of_alias_get_id(pdev->dev.of_node, "serial"); + if (index < 0 || index >= UART_NR_MAX) { + dev_err(&pdev->dev, "got a wrong serial alias id %d\n", index); + return -EINVAL; + } + + sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); + if (!sport) + return -ENOMEM; + + up = &sport->port; + up->dev = &pdev->dev; + up->line = index; + up->type = PORT_SPRD; + up->iotype = UPIO_MEM; + up->uartclk = SPRD_DEF_RATE; + up->fifosize = SPRD_FIFO_SIZE; + up->ops = &serial_sprd_ops; + up->flags = UPF_BOOT_AUTOCONF; + up->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SPRD_CONSOLE); + + ret = sprd_clk_init(up); + if (ret) + return ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + up->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(up->membase)) + return PTR_ERR(up->membase); + + up->mapbase = res->start; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + up->irq = irq; + + /* + * Allocate one dma buffer to prepare for receive transfer, in case + * memory allocation failure at runtime. + */ + ret = sprd_rx_alloc_buf(sport); + if (ret) + return ret; + + if (!sprd_ports_num) { + ret = uart_register_driver(&sprd_uart_driver); + if (ret < 0) { + pr_err("Failed to register SPRD-UART driver\n"); + goto free_rx_buf; + } + } + + sprd_ports_num++; + sprd_port[index] = sport; + + ret = uart_add_one_port(&sprd_uart_driver, up); + if (ret) + goto clean_port; + + platform_set_drvdata(pdev, up); + + return 0; + +clean_port: + sprd_port[index] = NULL; + if (--sprd_ports_num == 0) + uart_unregister_driver(&sprd_uart_driver); +free_rx_buf: + sprd_rx_free_buf(sport); + return ret; +} + +#ifdef CONFIG_PM_SLEEP +static int sprd_suspend(struct device *dev) +{ + struct sprd_uart_port *sup = dev_get_drvdata(dev); + + uart_suspend_port(&sprd_uart_driver, &sup->port); + + return 0; +} + +static int sprd_resume(struct device *dev) +{ + struct sprd_uart_port *sup = dev_get_drvdata(dev); + + uart_resume_port(&sprd_uart_driver, &sup->port); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(sprd_pm_ops, sprd_suspend, sprd_resume); + +static const struct of_device_id serial_ids[] = { + {.compatible = "sprd,sc9836-uart",}, + {} +}; +MODULE_DEVICE_TABLE(of, serial_ids); + +static struct platform_driver sprd_platform_driver = { + .probe = sprd_probe, + .remove = sprd_remove, + .driver = { + .name = "sprd_serial", + .of_match_table = of_match_ptr(serial_ids), + .pm = &sprd_pm_ops, + }, +}; + +module_platform_driver(sprd_platform_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Spreadtrum SoC serial driver series"); diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c new file mode 100644 index 000000000..fcecea689 --- /dev/null +++ b/drivers/tty/serial/st-asc.c @@ -0,0 +1,1009 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * st-asc.c: ST Asynchronous serial controller (ASC) driver + * + * Copyright (C) 2003-2013 STMicroelectronics (R&D) Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "st-asc" +#define ASC_SERIAL_NAME "ttyAS" +#define ASC_FIFO_SIZE 16 +#define ASC_MAX_PORTS 8 + +/* Pinctrl states */ +#define DEFAULT 0 +#define NO_HW_FLOWCTRL 1 + +struct asc_port { + struct uart_port port; + struct gpio_desc *rts; + struct clk *clk; + struct pinctrl *pinctrl; + struct pinctrl_state *states[2]; + unsigned int hw_flow_control:1; + unsigned int force_m1:1; +}; + +static struct asc_port asc_ports[ASC_MAX_PORTS]; +static struct uart_driver asc_uart_driver; + +/*---- UART Register definitions ------------------------------*/ + +/* Register offsets */ + +#define ASC_BAUDRATE 0x00 +#define ASC_TXBUF 0x04 +#define ASC_RXBUF 0x08 +#define ASC_CTL 0x0C +#define ASC_INTEN 0x10 +#define ASC_STA 0x14 +#define ASC_GUARDTIME 0x18 +#define ASC_TIMEOUT 0x1C +#define ASC_TXRESET 0x20 +#define ASC_RXRESET 0x24 +#define ASC_RETRIES 0x28 + +/* ASC_RXBUF */ +#define ASC_RXBUF_PE 0x100 +#define ASC_RXBUF_FE 0x200 +/* + * Some of status comes from higher bits of the character and some come from + * the status register. Combining both of them in to single status using dummy + * bits. + */ +#define ASC_RXBUF_DUMMY_RX 0x10000 +#define ASC_RXBUF_DUMMY_BE 0x20000 +#define ASC_RXBUF_DUMMY_OE 0x40000 + +/* ASC_CTL */ + +#define ASC_CTL_MODE_MSK 0x0007 +#define ASC_CTL_MODE_8BIT 0x0001 +#define ASC_CTL_MODE_7BIT_PAR 0x0003 +#define ASC_CTL_MODE_9BIT 0x0004 +#define ASC_CTL_MODE_8BIT_WKUP 0x0005 +#define ASC_CTL_MODE_8BIT_PAR 0x0007 +#define ASC_CTL_STOP_MSK 0x0018 +#define ASC_CTL_STOP_HALFBIT 0x0000 +#define ASC_CTL_STOP_1BIT 0x0008 +#define ASC_CTL_STOP_1_HALFBIT 0x0010 +#define ASC_CTL_STOP_2BIT 0x0018 +#define ASC_CTL_PARITYODD 0x0020 +#define ASC_CTL_LOOPBACK 0x0040 +#define ASC_CTL_RUN 0x0080 +#define ASC_CTL_RXENABLE 0x0100 +#define ASC_CTL_SCENABLE 0x0200 +#define ASC_CTL_FIFOENABLE 0x0400 +#define ASC_CTL_CTSENABLE 0x0800 +#define ASC_CTL_BAUDMODE 0x1000 + +/* ASC_GUARDTIME */ + +#define ASC_GUARDTIME_MSK 0x00FF + +/* ASC_INTEN */ + +#define ASC_INTEN_RBE 0x0001 +#define ASC_INTEN_TE 0x0002 +#define ASC_INTEN_THE 0x0004 +#define ASC_INTEN_PE 0x0008 +#define ASC_INTEN_FE 0x0010 +#define ASC_INTEN_OE 0x0020 +#define ASC_INTEN_TNE 0x0040 +#define ASC_INTEN_TOI 0x0080 +#define ASC_INTEN_RHF 0x0100 + +/* ASC_RETRIES */ + +#define ASC_RETRIES_MSK 0x00FF + +/* ASC_RXBUF */ + +#define ASC_RXBUF_MSK 0x03FF + +/* ASC_STA */ + +#define ASC_STA_RBF 0x0001 +#define ASC_STA_TE 0x0002 +#define ASC_STA_THE 0x0004 +#define ASC_STA_PE 0x0008 +#define ASC_STA_FE 0x0010 +#define ASC_STA_OE 0x0020 +#define ASC_STA_TNE 0x0040 +#define ASC_STA_TOI 0x0080 +#define ASC_STA_RHF 0x0100 +#define ASC_STA_TF 0x0200 +#define ASC_STA_NKD 0x0400 + +/* ASC_TIMEOUT */ + +#define ASC_TIMEOUT_MSK 0x00FF + +/* ASC_TXBUF */ + +#define ASC_TXBUF_MSK 0x01FF + +/*---- Inline function definitions ---------------------------*/ + +static inline struct asc_port *to_asc_port(struct uart_port *port) +{ + return container_of(port, struct asc_port, port); +} + +static inline u32 asc_in(struct uart_port *port, u32 offset) +{ +#ifdef readl_relaxed + return readl_relaxed(port->membase + offset); +#else + return readl(port->membase + offset); +#endif +} + +static inline void asc_out(struct uart_port *port, u32 offset, u32 value) +{ +#ifdef writel_relaxed + writel_relaxed(value, port->membase + offset); +#else + writel(value, port->membase + offset); +#endif +} + +/* + * Some simple utility functions to enable and disable interrupts. + * Note that these need to be called with interrupts disabled. + */ +static inline void asc_disable_tx_interrupts(struct uart_port *port) +{ + u32 intenable = asc_in(port, ASC_INTEN) & ~ASC_INTEN_THE; + asc_out(port, ASC_INTEN, intenable); + (void)asc_in(port, ASC_INTEN); /* Defeat bus write posting */ +} + +static inline void asc_enable_tx_interrupts(struct uart_port *port) +{ + u32 intenable = asc_in(port, ASC_INTEN) | ASC_INTEN_THE; + asc_out(port, ASC_INTEN, intenable); +} + +static inline void asc_disable_rx_interrupts(struct uart_port *port) +{ + u32 intenable = asc_in(port, ASC_INTEN) & ~ASC_INTEN_RBE; + asc_out(port, ASC_INTEN, intenable); + (void)asc_in(port, ASC_INTEN); /* Defeat bus write posting */ +} + +static inline void asc_enable_rx_interrupts(struct uart_port *port) +{ + u32 intenable = asc_in(port, ASC_INTEN) | ASC_INTEN_RBE; + asc_out(port, ASC_INTEN, intenable); +} + +static inline u32 asc_txfifo_is_empty(struct uart_port *port) +{ + return asc_in(port, ASC_STA) & ASC_STA_TE; +} + +static inline u32 asc_txfifo_is_half_empty(struct uart_port *port) +{ + return asc_in(port, ASC_STA) & ASC_STA_THE; +} + +static inline const char *asc_port_name(struct uart_port *port) +{ + return to_platform_device(port->dev)->name; +} + +/*----------------------------------------------------------------------*/ + +/* + * This section contains code to support the use of the ASC as a + * generic serial port. + */ + +static inline unsigned asc_hw_txroom(struct uart_port *port) +{ + u32 status = asc_in(port, ASC_STA); + + if (status & ASC_STA_THE) + return port->fifosize / 2; + else if (!(status & ASC_STA_TF)) + return 1; + + return 0; +} + +/* + * Start transmitting chars. + * This is called from both interrupt and task level. + * Either way interrupts are disabled. + */ +static void asc_transmit_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + int txroom; + unsigned char c; + + txroom = asc_hw_txroom(port); + + if ((txroom != 0) && port->x_char) { + c = port->x_char; + port->x_char = 0; + asc_out(port, ASC_TXBUF, c); + port->icount.tx++; + txroom = asc_hw_txroom(port); + } + + if (uart_tx_stopped(port)) { + /* + * We should try and stop the hardware here, but I + * don't think the ASC has any way to do that. + */ + asc_disable_tx_interrupts(port); + return; + } + + if (uart_circ_empty(xmit)) { + asc_disable_tx_interrupts(port); + return; + } + + if (txroom == 0) + return; + + do { + c = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + asc_out(port, ASC_TXBUF, c); + port->icount.tx++; + txroom--; + } while ((txroom > 0) && (!uart_circ_empty(xmit))); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + asc_disable_tx_interrupts(port); +} + +static void asc_receive_chars(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + unsigned long status, mode; + unsigned long c = 0; + char flag; + bool ignore_pe = false; + + /* + * Datasheet states: If the MODE field selects an 8-bit frame then + * this [parity error] bit is undefined. Software should ignore this + * bit when reading 8-bit frames. + */ + mode = asc_in(port, ASC_CTL) & ASC_CTL_MODE_MSK; + if (mode == ASC_CTL_MODE_8BIT || mode == ASC_CTL_MODE_8BIT_PAR) + ignore_pe = true; + + if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) + pm_wakeup_event(tport->tty->dev, 0); + + while ((status = asc_in(port, ASC_STA)) & ASC_STA_RBF) { + c = asc_in(port, ASC_RXBUF) | ASC_RXBUF_DUMMY_RX; + flag = TTY_NORMAL; + port->icount.rx++; + + if (status & ASC_STA_OE || c & ASC_RXBUF_FE || + (c & ASC_RXBUF_PE && !ignore_pe)) { + + if (c & ASC_RXBUF_FE) { + if (c == (ASC_RXBUF_FE | ASC_RXBUF_DUMMY_RX)) { + port->icount.brk++; + if (uart_handle_break(port)) + continue; + c |= ASC_RXBUF_DUMMY_BE; + } else { + port->icount.frame++; + } + } else if (c & ASC_RXBUF_PE) { + port->icount.parity++; + } + /* + * Reading any data from the RX FIFO clears the + * overflow error condition. + */ + if (status & ASC_STA_OE) { + port->icount.overrun++; + c |= ASC_RXBUF_DUMMY_OE; + } + + c &= port->read_status_mask; + + if (c & ASC_RXBUF_DUMMY_BE) + flag = TTY_BREAK; + else if (c & ASC_RXBUF_PE) + flag = TTY_PARITY; + else if (c & ASC_RXBUF_FE) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(port, c & 0xff)) + continue; + + uart_insert_char(port, c, ASC_RXBUF_DUMMY_OE, c & 0xff, flag); + } + + /* Tell the rest of the system the news. New characters! */ + tty_flip_buffer_push(tport); +} + +static irqreturn_t asc_interrupt(int irq, void *ptr) +{ + struct uart_port *port = ptr; + u32 status; + + spin_lock(&port->lock); + + status = asc_in(port, ASC_STA); + + if (status & ASC_STA_RBF) { + /* Receive FIFO not empty */ + asc_receive_chars(port); + } + + if ((status & ASC_STA_THE) && + (asc_in(port, ASC_INTEN) & ASC_INTEN_THE)) { + /* Transmitter FIFO at least half empty */ + asc_transmit_chars(port); + } + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +/*----------------------------------------------------------------------*/ + +/* + * UART Functions + */ + +static unsigned int asc_tx_empty(struct uart_port *port) +{ + return asc_txfifo_is_empty(port) ? TIOCSER_TEMT : 0; +} + +static void asc_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct asc_port *ascport = to_asc_port(port); + + /* + * This routine is used for seting signals of: DTR, DCD, CTS and RTS. + * We use ASC's hardware for CTS/RTS when hardware flow-control is + * enabled, however if the RTS line is required for another purpose, + * commonly controlled using HUP from userspace, then we need to toggle + * it manually, using GPIO. + * + * Some boards also have DTR and DCD implemented using PIO pins, code to + * do this should be hooked in here. + */ + + if (!ascport->rts) + return; + + /* If HW flow-control is enabled, we can't fiddle with the RTS line */ + if (asc_in(port, ASC_CTL) & ASC_CTL_CTSENABLE) + return; + + gpiod_set_value(ascport->rts, mctrl & TIOCM_RTS); +} + +static unsigned int asc_get_mctrl(struct uart_port *port) +{ + /* + * This routine is used for geting signals of: DTR, DCD, DSR, RI, + * and CTS/RTS + */ + return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; +} + +/* There are probably characters waiting to be transmitted. */ +static void asc_start_tx(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + + if (!uart_circ_empty(xmit)) + asc_enable_tx_interrupts(port); +} + +/* Transmit stop */ +static void asc_stop_tx(struct uart_port *port) +{ + asc_disable_tx_interrupts(port); +} + +/* Receive stop */ +static void asc_stop_rx(struct uart_port *port) +{ + asc_disable_rx_interrupts(port); +} + +/* Handle breaks - ignored by us */ +static void asc_break_ctl(struct uart_port *port, int break_state) +{ + /* Nothing here yet .. */ +} + +/* + * Enable port for reception. + */ +static int asc_startup(struct uart_port *port) +{ + if (request_irq(port->irq, asc_interrupt, 0, + asc_port_name(port), port)) { + dev_err(port->dev, "cannot allocate irq.\n"); + return -ENODEV; + } + + asc_transmit_chars(port); + asc_enable_rx_interrupts(port); + + return 0; +} + +static void asc_shutdown(struct uart_port *port) +{ + asc_disable_tx_interrupts(port); + asc_disable_rx_interrupts(port); + free_irq(port->irq, port); +} + +static void asc_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct asc_port *ascport = to_asc_port(port); + unsigned long flags; + u32 ctl; + + switch (state) { + case UART_PM_STATE_ON: + clk_prepare_enable(ascport->clk); + break; + case UART_PM_STATE_OFF: + /* + * Disable the ASC baud rate generator, which is as close as + * we can come to turning it off. Note this is not called with + * the port spinlock held. + */ + spin_lock_irqsave(&port->lock, flags); + ctl = asc_in(port, ASC_CTL) & ~ASC_CTL_RUN; + asc_out(port, ASC_CTL, ctl); + spin_unlock_irqrestore(&port->lock, flags); + clk_disable_unprepare(ascport->clk); + break; + } +} + +static void asc_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct asc_port *ascport = to_asc_port(port); + struct gpio_desc *gpiod; + unsigned int baud; + u32 ctrl_val; + tcflag_t cflag; + unsigned long flags; + + /* Update termios to reflect hardware capabilities */ + termios->c_cflag &= ~(CMSPAR | + (ascport->hw_flow_control ? 0 : CRTSCTS)); + + port->uartclk = clk_get_rate(ascport->clk); + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + cflag = termios->c_cflag; + + spin_lock_irqsave(&port->lock, flags); + + /* read control register */ + ctrl_val = asc_in(port, ASC_CTL); + + /* stop serial port and reset value */ + asc_out(port, ASC_CTL, (ctrl_val & ~ASC_CTL_RUN)); + ctrl_val = ASC_CTL_RXENABLE | ASC_CTL_FIFOENABLE; + + /* reset fifo rx & tx */ + asc_out(port, ASC_TXRESET, 1); + asc_out(port, ASC_RXRESET, 1); + + /* set character length */ + if ((cflag & CSIZE) == CS7) { + ctrl_val |= ASC_CTL_MODE_7BIT_PAR; + cflag |= PARENB; + } else { + ctrl_val |= (cflag & PARENB) ? ASC_CTL_MODE_8BIT_PAR : + ASC_CTL_MODE_8BIT; + cflag &= ~CSIZE; + cflag |= CS8; + } + termios->c_cflag = cflag; + + /* set stop bit */ + ctrl_val |= (cflag & CSTOPB) ? ASC_CTL_STOP_2BIT : ASC_CTL_STOP_1BIT; + + /* odd parity */ + if (cflag & PARODD) + ctrl_val |= ASC_CTL_PARITYODD; + + /* hardware flow control */ + if ((cflag & CRTSCTS)) { + ctrl_val |= ASC_CTL_CTSENABLE; + + /* If flow-control selected, stop handling RTS manually */ + if (ascport->rts) { + devm_gpiod_put(port->dev, ascport->rts); + ascport->rts = NULL; + + pinctrl_select_state(ascport->pinctrl, + ascport->states[DEFAULT]); + } + } else { + /* If flow-control disabled, it's safe to handle RTS manually */ + if (!ascport->rts && ascport->states[NO_HW_FLOWCTRL]) { + pinctrl_select_state(ascport->pinctrl, + ascport->states[NO_HW_FLOWCTRL]); + + gpiod = devm_gpiod_get(port->dev, "rts", GPIOD_OUT_LOW); + if (!IS_ERR(gpiod)) { + gpiod_set_consumer_name(gpiod, + port->dev->of_node->name); + ascport->rts = gpiod; + } + } + } + + if ((baud < 19200) && !ascport->force_m1) { + asc_out(port, ASC_BAUDRATE, (port->uartclk / (16 * baud))); + } else { + /* + * MODE 1: recommended for high bit rates (above 19.2K) + * + * baudrate * 16 * 2^16 + * ASCBaudRate = ------------------------ + * inputclock + * + * To keep maths inside 64bits, we divide inputclock by 16. + */ + u64 dividend = (u64)baud * (1 << 16); + + do_div(dividend, port->uartclk / 16); + asc_out(port, ASC_BAUDRATE, dividend); + ctrl_val |= ASC_CTL_BAUDMODE; + } + + uart_update_timeout(port, cflag, baud); + + ascport->port.read_status_mask = ASC_RXBUF_DUMMY_OE; + if (termios->c_iflag & INPCK) + ascport->port.read_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + ascport->port.read_status_mask |= ASC_RXBUF_DUMMY_BE; + + /* + * Characters to ignore + */ + ascport->port.ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + ascport->port.ignore_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE; + if (termios->c_iflag & IGNBRK) { + ascport->port.ignore_status_mask |= ASC_RXBUF_DUMMY_BE; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + ascport->port.ignore_status_mask |= ASC_RXBUF_DUMMY_OE; + } + + /* + * Ignore all characters if CREAD is not set. + */ + if (!(termios->c_cflag & CREAD)) + ascport->port.ignore_status_mask |= ASC_RXBUF_DUMMY_RX; + + /* Set the timeout */ + asc_out(port, ASC_TIMEOUT, 20); + + /* write final value and enable port */ + asc_out(port, ASC_CTL, (ctrl_val | ASC_CTL_RUN)); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *asc_type(struct uart_port *port) +{ + return (port->type == PORT_ASC) ? DRIVER_NAME : NULL; +} + +static void asc_release_port(struct uart_port *port) +{ +} + +static int asc_request_port(struct uart_port *port) +{ + return 0; +} + +/* + * Called when the port is opened, and UPF_BOOT_AUTOCONF flag is set + * Set type field if successful + */ +static void asc_config_port(struct uart_port *port, int flags) +{ + if ((flags & UART_CONFIG_TYPE)) + port->type = PORT_ASC; +} + +static int +asc_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + /* No user changeable parameters */ + return -EINVAL; +} + +#ifdef CONFIG_CONSOLE_POLL +/* + * Console polling routines for writing and reading from the uart while + * in an interrupt or debug context (i.e. kgdb). + */ + +static int asc_get_poll_char(struct uart_port *port) +{ + if (!(asc_in(port, ASC_STA) & ASC_STA_RBF)) + return NO_POLL_CHAR; + + return asc_in(port, ASC_RXBUF); +} + +static void asc_put_poll_char(struct uart_port *port, unsigned char c) +{ + while (!asc_txfifo_is_half_empty(port)) + cpu_relax(); + asc_out(port, ASC_TXBUF, c); +} + +#endif /* CONFIG_CONSOLE_POLL */ + +/*---------------------------------------------------------------------*/ + +static const struct uart_ops asc_uart_ops = { + .tx_empty = asc_tx_empty, + .set_mctrl = asc_set_mctrl, + .get_mctrl = asc_get_mctrl, + .start_tx = asc_start_tx, + .stop_tx = asc_stop_tx, + .stop_rx = asc_stop_rx, + .break_ctl = asc_break_ctl, + .startup = asc_startup, + .shutdown = asc_shutdown, + .set_termios = asc_set_termios, + .type = asc_type, + .release_port = asc_release_port, + .request_port = asc_request_port, + .config_port = asc_config_port, + .verify_port = asc_verify_port, + .pm = asc_pm, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = asc_get_poll_char, + .poll_put_char = asc_put_poll_char, +#endif /* CONFIG_CONSOLE_POLL */ +}; + +static int asc_init_port(struct asc_port *ascport, + struct platform_device *pdev) +{ + struct uart_port *port = &ascport->port; + struct resource *res; + int ret; + + port->iotype = UPIO_MEM; + port->flags = UPF_BOOT_AUTOCONF; + port->ops = &asc_uart_ops; + port->fifosize = ASC_FIFO_SIZE; + port->dev = &pdev->dev; + port->irq = platform_get_irq(pdev, 0); + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ST_ASC_CONSOLE); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + port->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(port->membase)) + return PTR_ERR(port->membase); + port->mapbase = res->start; + + spin_lock_init(&port->lock); + + ascport->clk = devm_clk_get(&pdev->dev, NULL); + + if (WARN_ON(IS_ERR(ascport->clk))) + return -EINVAL; + /* ensure that clk rate is correct by enabling the clk */ + clk_prepare_enable(ascport->clk); + ascport->port.uartclk = clk_get_rate(ascport->clk); + WARN_ON(ascport->port.uartclk == 0); + clk_disable_unprepare(ascport->clk); + + ascport->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(ascport->pinctrl)) { + ret = PTR_ERR(ascport->pinctrl); + dev_err(&pdev->dev, "Failed to get Pinctrl: %d\n", ret); + return ret; + } + + ascport->states[DEFAULT] = + pinctrl_lookup_state(ascport->pinctrl, "default"); + if (IS_ERR(ascport->states[DEFAULT])) { + ret = PTR_ERR(ascport->states[DEFAULT]); + dev_err(&pdev->dev, + "Failed to look up Pinctrl state 'default': %d\n", ret); + return ret; + } + + /* "no-hw-flowctrl" state is optional */ + ascport->states[NO_HW_FLOWCTRL] = + pinctrl_lookup_state(ascport->pinctrl, "no-hw-flowctrl"); + if (IS_ERR(ascport->states[NO_HW_FLOWCTRL])) + ascport->states[NO_HW_FLOWCTRL] = NULL; + + return 0; +} + +static struct asc_port *asc_of_get_asc_port(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + int id; + + if (!np) + return NULL; + + id = of_alias_get_id(np, "serial"); + if (id < 0) + id = of_alias_get_id(np, ASC_SERIAL_NAME); + + if (id < 0) + id = 0; + + if (WARN_ON(id >= ASC_MAX_PORTS)) + return NULL; + + asc_ports[id].hw_flow_control = of_property_read_bool(np, + "uart-has-rtscts"); + asc_ports[id].force_m1 = of_property_read_bool(np, "st,force_m1"); + asc_ports[id].port.line = id; + asc_ports[id].rts = NULL; + + return &asc_ports[id]; +} + +#ifdef CONFIG_OF +static const struct of_device_id asc_match[] = { + { .compatible = "st,asc", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, asc_match); +#endif + +static int asc_serial_probe(struct platform_device *pdev) +{ + int ret; + struct asc_port *ascport; + + ascport = asc_of_get_asc_port(pdev); + if (!ascport) + return -ENODEV; + + ret = asc_init_port(ascport, pdev); + if (ret) + return ret; + + ret = uart_add_one_port(&asc_uart_driver, &ascport->port); + if (ret) + return ret; + + platform_set_drvdata(pdev, &ascport->port); + + return 0; +} + +static int asc_serial_remove(struct platform_device *pdev) +{ + struct uart_port *port = platform_get_drvdata(pdev); + + return uart_remove_one_port(&asc_uart_driver, port); +} + +#ifdef CONFIG_PM_SLEEP +static int asc_serial_suspend(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + + return uart_suspend_port(&asc_uart_driver, port); +} + +static int asc_serial_resume(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + + return uart_resume_port(&asc_uart_driver, port); +} + +#endif /* CONFIG_PM_SLEEP */ + +/*----------------------------------------------------------------------*/ + +#ifdef CONFIG_SERIAL_ST_ASC_CONSOLE +static void asc_console_putchar(struct uart_port *port, unsigned char ch) +{ + unsigned int timeout = 1000000; + + /* Wait for upto 1 second in case flow control is stopping us. */ + while (--timeout && !asc_txfifo_is_half_empty(port)) + udelay(1); + + asc_out(port, ASC_TXBUF, ch); +} + +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + */ + +static void asc_console_write(struct console *co, const char *s, unsigned count) +{ + struct uart_port *port = &asc_ports[co->index].port; + unsigned long flags; + unsigned long timeout = 1000000; + int locked = 1; + u32 intenable; + + if (port->sysrq) + locked = 0; /* asc_interrupt has already claimed the lock */ + else if (oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); + + /* + * Disable interrupts so we don't get the IRQ line bouncing + * up and down while interrupts are disabled. + */ + intenable = asc_in(port, ASC_INTEN); + asc_out(port, ASC_INTEN, 0); + (void)asc_in(port, ASC_INTEN); /* Defeat bus write posting */ + + uart_console_write(port, s, count, asc_console_putchar); + + while (--timeout && !asc_txfifo_is_empty(port)) + udelay(1); + + asc_out(port, ASC_INTEN, intenable); + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); +} + +static int asc_console_setup(struct console *co, char *options) +{ + struct asc_port *ascport; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index >= ASC_MAX_PORTS) + return -ENODEV; + + ascport = &asc_ports[co->index]; + + /* + * This driver does not support early console initialization + * (use ARM early printk support instead), so we only expect + * this to be called during the uart port registration when the + * driver gets probed and the port should be mapped at that point. + */ + if (ascport->port.mapbase == 0 || ascport->port.membase == NULL) + return -ENXIO; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&ascport->port, co, baud, parity, bits, flow); +} + +static struct console asc_console = { + .name = ASC_SERIAL_NAME, + .device = uart_console_device, + .write = asc_console_write, + .setup = asc_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &asc_uart_driver, +}; + +#define ASC_SERIAL_CONSOLE (&asc_console) + +#else +#define ASC_SERIAL_CONSOLE NULL +#endif /* CONFIG_SERIAL_ST_ASC_CONSOLE */ + +static struct uart_driver asc_uart_driver = { + .owner = THIS_MODULE, + .driver_name = DRIVER_NAME, + .dev_name = ASC_SERIAL_NAME, + .major = 0, + .minor = 0, + .nr = ASC_MAX_PORTS, + .cons = ASC_SERIAL_CONSOLE, +}; + +static const struct dev_pm_ops asc_serial_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(asc_serial_suspend, asc_serial_resume) +}; + +static struct platform_driver asc_serial_driver = { + .probe = asc_serial_probe, + .remove = asc_serial_remove, + .driver = { + .name = DRIVER_NAME, + .pm = &asc_serial_pm_ops, + .of_match_table = of_match_ptr(asc_match), + }, +}; + +static int __init asc_init(void) +{ + int ret; + static const char banner[] __initconst = + KERN_INFO "STMicroelectronics ASC driver initialized\n"; + + printk(banner); + + ret = uart_register_driver(&asc_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&asc_serial_driver); + if (ret) + uart_unregister_driver(&asc_uart_driver); + + return ret; +} + +static void __exit asc_exit(void) +{ + platform_driver_unregister(&asc_serial_driver); + uart_unregister_driver(&asc_uart_driver); +} + +module_init(asc_init); +module_exit(asc_exit); + +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_AUTHOR("STMicroelectronics (R&D) Limited"); +MODULE_DESCRIPTION("STMicroelectronics ASC serial port driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c new file mode 100644 index 000000000..2a9c40588 --- /dev/null +++ b/drivers/tty/serial/stm32-usart.c @@ -0,0 +1,2113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Maxime Coquelin 2015 + * Copyright (C) STMicroelectronics SA 2017 + * Authors: Maxime Coquelin + * Gerald Baeza + * Erwan Le Ray + * + * Inspired by st-asc.c from STMicroelectronics (c) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "serial_mctrl_gpio.h" +#include "stm32-usart.h" + + +/* Register offsets */ +static struct stm32_usart_info __maybe_unused stm32f4_info = { + .ofs = { + .isr = 0x00, + .rdr = 0x04, + .tdr = 0x04, + .brr = 0x08, + .cr1 = 0x0c, + .cr2 = 0x10, + .cr3 = 0x14, + .gtpr = 0x18, + .rtor = UNDEF_REG, + .rqr = UNDEF_REG, + .icr = UNDEF_REG, + }, + .cfg = { + .uart_enable_bit = 13, + .has_7bits_data = false, + .fifosize = 1, + } +}; + +static struct stm32_usart_info __maybe_unused stm32f7_info = { + .ofs = { + .cr1 = 0x00, + .cr2 = 0x04, + .cr3 = 0x08, + .brr = 0x0c, + .gtpr = 0x10, + .rtor = 0x14, + .rqr = 0x18, + .isr = 0x1c, + .icr = 0x20, + .rdr = 0x24, + .tdr = 0x28, + }, + .cfg = { + .uart_enable_bit = 0, + .has_7bits_data = true, + .has_swap = true, + .fifosize = 1, + } +}; + +static struct stm32_usart_info __maybe_unused stm32h7_info = { + .ofs = { + .cr1 = 0x00, + .cr2 = 0x04, + .cr3 = 0x08, + .brr = 0x0c, + .gtpr = 0x10, + .rtor = 0x14, + .rqr = 0x18, + .isr = 0x1c, + .icr = 0x20, + .rdr = 0x24, + .tdr = 0x28, + }, + .cfg = { + .uart_enable_bit = 0, + .has_7bits_data = true, + .has_swap = true, + .has_wakeup = true, + .has_fifo = true, + .fifosize = 16, + } +}; + +static void stm32_usart_stop_tx(struct uart_port *port); +static void stm32_usart_transmit_chars(struct uart_port *port); +static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch); + +static inline struct stm32_port *to_stm32_port(struct uart_port *port) +{ + return container_of(port, struct stm32_port, port); +} + +static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits) +{ + u32 val; + + val = readl_relaxed(port->membase + reg); + val |= bits; + writel_relaxed(val, port->membase + reg); +} + +static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits) +{ + u32 val; + + val = readl_relaxed(port->membase + reg); + val &= ~bits; + writel_relaxed(val, port->membase + reg); +} + +static unsigned int stm32_usart_tx_empty(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC) + return TIOCSER_TEMT; + + return 0; +} + +static void stm32_usart_rs485_rts_enable(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + struct serial_rs485 *rs485conf = &port->rs485; + + if (stm32_port->hw_flow_control || + !(rs485conf->flags & SER_RS485_ENABLED)) + return; + + if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { + mctrl_gpio_set(stm32_port->gpios, + stm32_port->port.mctrl | TIOCM_RTS); + } else { + mctrl_gpio_set(stm32_port->gpios, + stm32_port->port.mctrl & ~TIOCM_RTS); + } +} + +static void stm32_usart_rs485_rts_disable(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + struct serial_rs485 *rs485conf = &port->rs485; + + if (stm32_port->hw_flow_control || + !(rs485conf->flags & SER_RS485_ENABLED)) + return; + + if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { + mctrl_gpio_set(stm32_port->gpios, + stm32_port->port.mctrl & ~TIOCM_RTS); + } else { + mctrl_gpio_set(stm32_port->gpios, + stm32_port->port.mctrl | TIOCM_RTS); + } +} + +static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, + u32 delay_DDE, u32 baud) +{ + u32 rs485_deat_dedt; + u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT); + bool over8; + + *cr3 |= USART_CR3_DEM; + over8 = *cr1 & USART_CR1_OVER8; + + *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); + + if (over8) + rs485_deat_dedt = delay_ADE * baud * 8; + else + rs485_deat_dedt = delay_ADE * baud * 16; + + rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); + rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? + rs485_deat_dedt_max : rs485_deat_dedt; + rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) & + USART_CR1_DEAT_MASK; + *cr1 |= rs485_deat_dedt; + + if (over8) + rs485_deat_dedt = delay_DDE * baud * 8; + else + rs485_deat_dedt = delay_DDE * baud * 16; + + rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); + rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? + rs485_deat_dedt_max : rs485_deat_dedt; + rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) & + USART_CR1_DEDT_MASK; + *cr1 |= rs485_deat_dedt; +} + +static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios, + struct serial_rs485 *rs485conf) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + const struct stm32_usart_config *cfg = &stm32_port->info->cfg; + u32 usartdiv, baud, cr1, cr3; + bool over8; + + stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); + + rs485conf->flags |= SER_RS485_RX_DURING_TX; + + if (rs485conf->flags & SER_RS485_ENABLED) { + cr1 = readl_relaxed(port->membase + ofs->cr1); + cr3 = readl_relaxed(port->membase + ofs->cr3); + usartdiv = readl_relaxed(port->membase + ofs->brr); + usartdiv = usartdiv & GENMASK(15, 0); + over8 = cr1 & USART_CR1_OVER8; + + if (over8) + usartdiv = usartdiv | (usartdiv & GENMASK(4, 0)) + << USART_BRR_04_R_SHIFT; + + baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv); + stm32_usart_config_reg_rs485(&cr1, &cr3, + rs485conf->delay_rts_before_send, + rs485conf->delay_rts_after_send, + baud); + + if (rs485conf->flags & SER_RS485_RTS_ON_SEND) + cr3 &= ~USART_CR3_DEP; + else + cr3 |= USART_CR3_DEP; + + writel_relaxed(cr3, port->membase + ofs->cr3); + writel_relaxed(cr1, port->membase + ofs->cr1); + } else { + stm32_usart_clr_bits(port, ofs->cr3, + USART_CR3_DEM | USART_CR3_DEP); + stm32_usart_clr_bits(port, ofs->cr1, + USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); + } + + stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); + + /* Adjust RTS polarity in case it's driven in software */ + if (stm32_usart_tx_empty(port)) + stm32_usart_rs485_rts_disable(port); + else + stm32_usart_rs485_rts_enable(port); + + return 0; +} + +static int stm32_usart_init_rs485(struct uart_port *port, + struct platform_device *pdev) +{ + struct serial_rs485 *rs485conf = &port->rs485; + + rs485conf->flags = 0; + rs485conf->delay_rts_before_send = 0; + rs485conf->delay_rts_after_send = 0; + + if (!pdev->dev.of_node) + return -ENODEV; + + return uart_get_rs485_mode(port); +} + +static bool stm32_usart_rx_dma_enabled(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + if (!stm32_port->rx_ch) + return false; + + return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR); +} + +/* Return true when data is pending (in pio mode), and false when no data is pending. */ +static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + *sr = readl_relaxed(port->membase + ofs->isr); + /* Get pending characters in RDR or FIFO */ + if (*sr & USART_SR_RXNE) { + /* Get all pending characters from the RDR or the FIFO when using interrupts */ + if (!stm32_usart_rx_dma_enabled(port)) + return true; + + /* Handle only RX data errors when using DMA */ + if (*sr & USART_SR_ERR_MASK) + return true; + } + + return false; +} + +static unsigned long stm32_usart_get_char_pio(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + unsigned long c; + + c = readl_relaxed(port->membase + ofs->rdr); + /* Apply RDR data mask */ + c &= stm32_port->rdr_mask; + + return c; +} + +static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + unsigned long c; + unsigned int size = 0; + u32 sr; + char flag; + + while (stm32_usart_pending_rx_pio(port, &sr)) { + sr |= USART_SR_DUMMY_RX; + flag = TTY_NORMAL; + + /* + * Status bits has to be cleared before reading the RDR: + * In FIFO mode, reading the RDR will pop the next data + * (if any) along with its status bits into the SR. + * Not doing so leads to misalignement between RDR and SR, + * and clear status bits of the next rx data. + * + * Clear errors flags for stm32f7 and stm32h7 compatible + * devices. On stm32f4 compatible devices, the error bit is + * cleared by the sequence [read SR - read DR]. + */ + if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG) + writel_relaxed(sr & USART_SR_ERR_MASK, + port->membase + ofs->icr); + + c = stm32_usart_get_char_pio(port); + port->icount.rx++; + size++; + if (sr & USART_SR_ERR_MASK) { + if (sr & USART_SR_ORE) { + port->icount.overrun++; + } else if (sr & USART_SR_PE) { + port->icount.parity++; + } else if (sr & USART_SR_FE) { + /* Break detection if character is null */ + if (!c) { + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } else { + port->icount.frame++; + } + } + + sr &= port->read_status_mask; + + if (sr & USART_SR_PE) { + flag = TTY_PARITY; + } else if (sr & USART_SR_FE) { + if (!c) + flag = TTY_BREAK; + else + flag = TTY_FRAME; + } + } + + if (uart_prepare_sysrq_char(port, c)) + continue; + uart_insert_char(port, sr, USART_SR_ORE, c, flag); + } + + return size; +} + +static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + struct tty_port *ttyport = &stm32_port->port.state->port; + unsigned char *dma_start; + int dma_count, i; + + dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res); + + /* + * Apply rdr_mask on buffer in order to mask parity bit. + * This loop is useless in cs8 mode because DMA copies only + * 8 bits and already ignores parity bit. + */ + if (!(stm32_port->rdr_mask == (BIT(8) - 1))) + for (i = 0; i < dma_size; i++) + *(dma_start + i) &= stm32_port->rdr_mask; + + dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size); + port->icount.rx += dma_count; + if (dma_count != dma_size) + port->icount.buf_overrun++; + stm32_port->last_res -= dma_count; + if (stm32_port->last_res == 0) + stm32_port->last_res = RX_BUF_L; +} + +static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + unsigned int dma_size, size = 0; + + /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */ + if (stm32_port->rx_dma_state.residue > stm32_port->last_res) { + /* Conditional first part: from last_res to end of DMA buffer */ + dma_size = stm32_port->last_res; + stm32_usart_push_buffer_dma(port, dma_size); + size = dma_size; + } + + dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue; + stm32_usart_push_buffer_dma(port, dma_size); + size += dma_size; + + return size; +} + +static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + enum dma_status rx_dma_status; + u32 sr; + unsigned int size = 0; + + if (stm32_usart_rx_dma_enabled(port) || force_dma_flush) { + rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, + stm32_port->rx_ch->cookie, + &stm32_port->rx_dma_state); + if (rx_dma_status == DMA_IN_PROGRESS) { + /* Empty DMA buffer */ + size = stm32_usart_receive_chars_dma(port); + sr = readl_relaxed(port->membase + ofs->isr); + if (sr & USART_SR_ERR_MASK) { + /* Disable DMA request line */ + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); + + /* Switch to PIO mode to handle the errors */ + size += stm32_usart_receive_chars_pio(port); + + /* Switch back to DMA mode */ + stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); + } + } else { + /* Disable RX DMA */ + dmaengine_terminate_async(stm32_port->rx_ch); + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); + /* Fall back to interrupt mode */ + dev_dbg(port->dev, "DMA error, fallback to irq mode\n"); + size = stm32_usart_receive_chars_pio(port); + } + } else { + size = stm32_usart_receive_chars_pio(port); + } + + return size; +} + +static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port) +{ + dmaengine_terminate_async(stm32_port->tx_ch); + stm32_port->tx_dma_busy = false; +} + +static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port) +{ + /* + * We cannot use the function "dmaengine_tx_status" to know the + * status of DMA. This function does not show if the "dma complete" + * callback of the DMA transaction has been called. So we prefer + * to use "tx_dma_busy" flag to prevent dual DMA transaction at the + * same time. + */ + return stm32_port->tx_dma_busy; +} + +static bool stm32_usart_tx_dma_enabled(struct stm32_port *stm32_port) +{ + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + return !!(readl_relaxed(stm32_port->port.membase + ofs->cr3) & USART_CR3_DMAT); +} + +static void stm32_usart_tx_dma_complete(void *arg) +{ + struct uart_port *port = arg; + struct stm32_port *stm32port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; + unsigned long flags; + + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + stm32_usart_tx_dma_terminate(stm32port); + + /* Let's see if we have pending data to send */ + spin_lock_irqsave(&port->lock, flags); + stm32_usart_transmit_chars(port); + spin_unlock_irqrestore(&port->lock, flags); +} + +static void stm32_usart_tx_interrupt_enable(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + /* + * Enables TX FIFO threashold irq when FIFO is enabled, + * or TX empty irq when FIFO is disabled + */ + if (stm32_port->fifoen && stm32_port->txftcfg >= 0) + stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE); + else + stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE); +} + +static void stm32_usart_tc_interrupt_enable(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE); +} + +static void stm32_usart_rx_dma_complete(void *arg) +{ + struct uart_port *port = arg; + struct tty_port *tport = &port->state->port; + unsigned int size; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + size = stm32_usart_receive_chars(port, false); + uart_unlock_and_check_sysrq_irqrestore(port, flags); + if (size) + tty_flip_buffer_push(tport); +} + +static void stm32_usart_tx_interrupt_disable(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + if (stm32_port->fifoen && stm32_port->txftcfg >= 0) + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE); + else + stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE); +} + +static void stm32_usart_tc_interrupt_disable(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE); +} + +static void stm32_usart_transmit_chars_pio(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct circ_buf *xmit = &port->state->xmit; + + if (stm32_usart_tx_dma_enabled(stm32_port)) + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + + while (!uart_circ_empty(xmit)) { + /* Check that TDR is empty before filling FIFO */ + if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) + break; + writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + /* rely on TXE irq (mask or unmask) for sending remaining data */ + if (uart_circ_empty(xmit)) + stm32_usart_tx_interrupt_disable(port); + else + stm32_usart_tx_interrupt_enable(port); +} + +static void stm32_usart_transmit_chars_dma(struct uart_port *port) +{ + struct stm32_port *stm32port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; + struct circ_buf *xmit = &port->state->xmit; + struct dma_async_tx_descriptor *desc = NULL; + unsigned int count; + + if (stm32_usart_tx_dma_started(stm32port)) { + if (!stm32_usart_tx_dma_enabled(stm32port)) + stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); + return; + } + + count = uart_circ_chars_pending(xmit); + + if (count > TX_BUF_L) + count = TX_BUF_L; + + if (xmit->tail < xmit->head) { + memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count); + } else { + size_t one = UART_XMIT_SIZE - xmit->tail; + size_t two; + + if (one > count) + one = count; + two = count - one; + + memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one); + if (two) + memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two); + } + + desc = dmaengine_prep_slave_single(stm32port->tx_ch, + stm32port->tx_dma_buf, + count, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT); + + if (!desc) + goto fallback_err; + + /* + * Set "tx_dma_busy" flag. This flag will be released when + * dmaengine_terminate_async will be called. This flag helps + * transmit_chars_dma not to start another DMA transaction + * if the callback of the previous is not yet called. + */ + stm32port->tx_dma_busy = true; + + desc->callback = stm32_usart_tx_dma_complete; + desc->callback_param = port; + + /* Push current DMA TX transaction in the pending queue */ + if (dma_submit_error(dmaengine_submit(desc))) { + /* dma no yet started, safe to free resources */ + stm32_usart_tx_dma_terminate(stm32port); + goto fallback_err; + } + + /* Issue pending DMA TX requests */ + dma_async_issue_pending(stm32port->tx_ch); + + stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); + + xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + port->icount.tx += count; + return; + +fallback_err: + stm32_usart_transmit_chars_pio(port); +} + +static void stm32_usart_transmit_chars(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct circ_buf *xmit = &port->state->xmit; + u32 isr; + int ret; + + if (!stm32_port->hw_flow_control && + port->rs485.flags & SER_RS485_ENABLED && + (port->x_char || + !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) { + stm32_usart_tc_interrupt_disable(port); + stm32_usart_rs485_rts_enable(port); + } + + if (port->x_char) { + if (stm32_usart_tx_dma_started(stm32_port) && + stm32_usart_tx_dma_enabled(stm32_port)) + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + + /* Check that TDR is empty before filling FIFO */ + ret = + readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, + isr, + (isr & USART_SR_TXE), + 10, 1000); + if (ret) + dev_warn(port->dev, "1 character may be erased\n"); + + writel_relaxed(port->x_char, port->membase + ofs->tdr); + port->x_char = 0; + port->icount.tx++; + if (stm32_usart_tx_dma_started(stm32_port)) + stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + stm32_usart_tx_interrupt_disable(port); + return; + } + + if (ofs->icr == UNDEF_REG) + stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC); + else + writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr); + + if (stm32_port->tx_ch) + stm32_usart_transmit_chars_dma(port); + else + stm32_usart_transmit_chars_pio(port); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) { + stm32_usart_tx_interrupt_disable(port); + if (!stm32_port->hw_flow_control && + port->rs485.flags & SER_RS485_ENABLED) { + stm32_port->txdone = true; + stm32_usart_tc_interrupt_enable(port); + } + } +} + +static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) +{ + struct uart_port *port = ptr; + struct tty_port *tport = &port->state->port; + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + u32 sr; + unsigned int size; + + sr = readl_relaxed(port->membase + ofs->isr); + + if (!stm32_port->hw_flow_control && + port->rs485.flags & SER_RS485_ENABLED && + (sr & USART_SR_TC)) { + stm32_usart_tc_interrupt_disable(port); + stm32_usart_rs485_rts_disable(port); + } + + if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) + writel_relaxed(USART_ICR_RTOCF, + port->membase + ofs->icr); + + if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) { + /* Clear wake up flag and disable wake up interrupt */ + writel_relaxed(USART_ICR_WUCF, + port->membase + ofs->icr); + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); + if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) + pm_wakeup_event(tport->tty->dev, 0); + } + + /* + * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request + * line has been masked by HW and rx data are stacking in FIFO. + */ + if (!stm32_port->throttled) { + if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) || + ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port))) { + spin_lock(&port->lock); + size = stm32_usart_receive_chars(port, false); + uart_unlock_and_check_sysrq(port); + if (size) + tty_flip_buffer_push(tport); + } + } + + if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) { + spin_lock(&port->lock); + stm32_usart_transmit_chars(port); + spin_unlock(&port->lock); + } + + /* Receiver timeout irq for DMA RX */ + if (stm32_usart_rx_dma_enabled(port) && !stm32_port->throttled) { + spin_lock(&port->lock); + size = stm32_usart_receive_chars(port, false); + uart_unlock_and_check_sysrq(port); + if (size) + tty_flip_buffer_push(tport); + } + + return IRQ_HANDLED; +} + +static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) + stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE); + else + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE); + + mctrl_gpio_set(stm32_port->gpios, mctrl); +} + +static unsigned int stm32_usart_get_mctrl(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + unsigned int ret; + + /* This routine is used to get signals of: DCD, DSR, RI, and CTS */ + ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; + + return mctrl_gpio_get(stm32_port->gpios, &ret); +} + +static void stm32_usart_enable_ms(struct uart_port *port) +{ + mctrl_gpio_enable_ms(to_stm32_port(port)->gpios); +} + +static void stm32_usart_disable_ms(struct uart_port *port) +{ + mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); +} + +/* Transmit stop */ +static void stm32_usart_stop_tx(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + stm32_usart_tx_interrupt_disable(port); + if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port)) + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + + stm32_usart_rs485_rts_disable(port); +} + +/* There are probably characters waiting to be transmitted. */ +static void stm32_usart_start_tx(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + + if (uart_circ_empty(xmit) && !port->x_char) { + stm32_usart_rs485_rts_disable(port); + return; + } + + stm32_usart_rs485_rts_enable(port); + + stm32_usart_transmit_chars(port); +} + +/* Flush the transmit buffer. */ +static void stm32_usart_flush_buffer(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + if (stm32_port->tx_ch) { + stm32_usart_tx_dma_terminate(stm32_port); + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + } +} + +/* Throttle the remote when input buffer is about to overflow. */ +static void stm32_usart_throttle(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* + * Disable DMA request line if enabled, so the RX data gets queued into the FIFO. + * Hardware flow control is triggered when RX FIFO is full. + */ + if (stm32_usart_rx_dma_enabled(port)) + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); + + stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); + if (stm32_port->cr3_irq) + stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); + + stm32_port->throttled = true; + spin_unlock_irqrestore(&port->lock, flags); +} + +/* Unthrottle the remote, the input buffer can now accept data. */ +static void stm32_usart_unthrottle(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq); + if (stm32_port->cr3_irq) + stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq); + + /* + * Switch back to DMA mode (re-enable DMA request line). + * Hardware flow control is stopped when FIFO is not full any more. + */ + if (stm32_port->rx_ch) + stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); + + stm32_port->throttled = false; + spin_unlock_irqrestore(&port->lock, flags); +} + +/* Receive stop */ +static void stm32_usart_stop_rx(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + /* Disable DMA request line. */ + if (stm32_port->rx_ch) + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); + + stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); + if (stm32_port->cr3_irq) + stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); +} + +/* Handle breaks - ignored by us */ +static void stm32_usart_break_ctl(struct uart_port *port, int break_state) +{ +} + +static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct dma_async_tx_descriptor *desc; + int ret; + + stm32_port->last_res = RX_BUF_L; + /* Prepare a DMA cyclic transaction */ + desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch, + stm32_port->rx_dma_buf, + RX_BUF_L, RX_BUF_P, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(port->dev, "rx dma prep cyclic failed\n"); + return -ENODEV; + } + + desc->callback = stm32_usart_rx_dma_complete; + desc->callback_param = port; + + /* Push current DMA transaction in the pending queue */ + ret = dma_submit_error(dmaengine_submit(desc)); + if (ret) { + dmaengine_terminate_sync(stm32_port->rx_ch); + return ret; + } + + /* Issue pending DMA requests */ + dma_async_issue_pending(stm32_port->rx_ch); + + /* + * DMA request line not re-enabled at resume when port is throttled. + * It will be re-enabled by unthrottle ops. + */ + if (!stm32_port->throttled) + stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); + + return 0; +} + +static int stm32_usart_startup(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + const struct stm32_usart_config *cfg = &stm32_port->info->cfg; + const char *name = to_platform_device(port->dev)->name; + u32 val; + int ret; + + ret = request_irq(port->irq, stm32_usart_interrupt, + IRQF_NO_SUSPEND, name, port); + if (ret) + return ret; + + if (stm32_port->swap) { + val = readl_relaxed(port->membase + ofs->cr2); + val |= USART_CR2_SWAP; + writel_relaxed(val, port->membase + ofs->cr2); + } + + /* RX FIFO Flush */ + if (ofs->rqr != UNDEF_REG) + writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr); + + if (stm32_port->rx_ch) { + ret = stm32_usart_start_rx_dma_cyclic(port); + if (ret) { + free_irq(port->irq, port); + return ret; + } + } + + /* RX enabling */ + val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit); + stm32_usart_set_bits(port, ofs->cr1, val); + + return 0; +} + +static void stm32_usart_shutdown(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + const struct stm32_usart_config *cfg = &stm32_port->info->cfg; + u32 val, isr; + int ret; + + if (stm32_usart_tx_dma_enabled(stm32_port)) + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + + if (stm32_usart_tx_dma_started(stm32_port)) + stm32_usart_tx_dma_terminate(stm32_port); + + /* Disable modem control interrupts */ + stm32_usart_disable_ms(port); + + val = USART_CR1_TXEIE | USART_CR1_TE; + val |= stm32_port->cr1_irq | USART_CR1_RE; + val |= BIT(cfg->uart_enable_bit); + if (stm32_port->fifoen) + val |= USART_CR1_FIFOEN; + + ret = readl_relaxed_poll_timeout(port->membase + ofs->isr, + isr, (isr & USART_SR_TC), + 10, 100000); + + /* Send the TC error message only when ISR_TC is not set */ + if (ret) + dev_err(port->dev, "Transmission is not complete\n"); + + /* Disable RX DMA. */ + if (stm32_port->rx_ch) + dmaengine_terminate_async(stm32_port->rx_ch); + + /* flush RX & TX FIFO */ + if (ofs->rqr != UNDEF_REG) + writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, + port->membase + ofs->rqr); + + stm32_usart_clr_bits(port, ofs->cr1, val); + + free_irq(port->irq, port); +} + +static void stm32_usart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + const struct stm32_usart_config *cfg = &stm32_port->info->cfg; + struct serial_rs485 *rs485conf = &port->rs485; + unsigned int baud, bits; + u32 usartdiv, mantissa, fraction, oversampling; + tcflag_t cflag = termios->c_cflag; + u32 cr1, cr2, cr3, isr; + unsigned long flags; + int ret; + + if (!stm32_port->hw_flow_control) + cflag &= ~CRTSCTS; + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8); + + spin_lock_irqsave(&port->lock, flags); + + ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, + isr, + (isr & USART_SR_TC), + 10, 100000); + + /* Send the TC error message only when ISR_TC is not set. */ + if (ret) + dev_err(port->dev, "Transmission is not complete\n"); + + /* Stop serial port and reset value */ + writel_relaxed(0, port->membase + ofs->cr1); + + /* flush RX & TX FIFO */ + if (ofs->rqr != UNDEF_REG) + writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, + port->membase + ofs->rqr); + + cr1 = USART_CR1_TE | USART_CR1_RE; + if (stm32_port->fifoen) + cr1 |= USART_CR1_FIFOEN; + cr2 = stm32_port->swap ? USART_CR2_SWAP : 0; + + /* Tx and RX FIFO configuration */ + cr3 = readl_relaxed(port->membase + ofs->cr3); + cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE; + if (stm32_port->fifoen) { + if (stm32_port->txftcfg >= 0) + cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT; + if (stm32_port->rxftcfg >= 0) + cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT; + } + + if (cflag & CSTOPB) + cr2 |= USART_CR2_STOP_2B; + + bits = tty_get_char_size(cflag); + stm32_port->rdr_mask = (BIT(bits) - 1); + + if (cflag & PARENB) { + bits++; + cr1 |= USART_CR1_PCE; + } + + /* + * Word length configuration: + * CS8 + parity, 9 bits word aka [M1:M0] = 0b01 + * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10 + * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00 + * M0 and M1 already cleared by cr1 initialization. + */ + if (bits == 9) { + cr1 |= USART_CR1_M0; + } else if ((bits == 7) && cfg->has_7bits_data) { + cr1 |= USART_CR1_M1; + } else if (bits != 8) { + dev_dbg(port->dev, "Unsupported data bits config: %u bits\n" + , bits); + cflag &= ~CSIZE; + cflag |= CS8; + termios->c_cflag = cflag; + bits = 8; + if (cflag & PARENB) { + bits++; + cr1 |= USART_CR1_M0; + } + } + + if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch || + (stm32_port->fifoen && + stm32_port->rxftcfg >= 0))) { + if (cflag & CSTOPB) + bits = bits + 3; /* 1 start bit + 2 stop bits */ + else + bits = bits + 2; /* 1 start bit + 1 stop bit */ + + /* RX timeout irq to occur after last stop bit + bits */ + stm32_port->cr1_irq = USART_CR1_RTOIE; + writel_relaxed(bits, port->membase + ofs->rtor); + cr2 |= USART_CR2_RTOEN; + /* + * Enable fifo threshold irq in two cases, either when there is no DMA, or when + * wake up over usart, from low power until the DMA gets re-enabled by resume. + */ + stm32_port->cr3_irq = USART_CR3_RXFTIE; + } + + cr1 |= stm32_port->cr1_irq; + cr3 |= stm32_port->cr3_irq; + + if (cflag & PARODD) + cr1 |= USART_CR1_PS; + + port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + if (cflag & CRTSCTS) { + port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; + cr3 |= USART_CR3_CTSE | USART_CR3_RTSE; + } + + usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud); + + /* + * The USART supports 16 or 8 times oversampling. + * By default we prefer 16 times oversampling, so that the receiver + * has a better tolerance to clock deviations. + * 8 times oversampling is only used to achieve higher speeds. + */ + if (usartdiv < 16) { + oversampling = 8; + cr1 |= USART_CR1_OVER8; + stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8); + } else { + oversampling = 16; + cr1 &= ~USART_CR1_OVER8; + stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8); + } + + mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT; + fraction = usartdiv % oversampling; + writel_relaxed(mantissa | fraction, port->membase + ofs->brr); + + uart_update_timeout(port, cflag, baud); + + port->read_status_mask = USART_SR_ORE; + if (termios->c_iflag & INPCK) + port->read_status_mask |= USART_SR_PE | USART_SR_FE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= USART_SR_FE; + + /* Characters to ignore */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask = USART_SR_PE | USART_SR_FE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= USART_SR_FE; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= USART_SR_ORE; + } + + /* Ignore all characters if CREAD is not set */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= USART_SR_DUMMY_RX; + + if (stm32_port->rx_ch) { + /* + * Setup DMA to collect only valid data and enable error irqs. + * This also enables break reception when using DMA. + */ + cr1 |= USART_CR1_PEIE; + cr3 |= USART_CR3_EIE; + cr3 |= USART_CR3_DMAR; + cr3 |= USART_CR3_DDRE; + } + + if (rs485conf->flags & SER_RS485_ENABLED) { + stm32_usart_config_reg_rs485(&cr1, &cr3, + rs485conf->delay_rts_before_send, + rs485conf->delay_rts_after_send, + baud); + if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { + cr3 &= ~USART_CR3_DEP; + rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; + } else { + cr3 |= USART_CR3_DEP; + rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; + } + + } else { + cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP); + cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); + } + + /* Configure wake up from low power on start bit detection */ + if (stm32_port->wakeup_src) { + cr3 &= ~USART_CR3_WUS_MASK; + cr3 |= USART_CR3_WUS_START_BIT; + } + + writel_relaxed(cr3, port->membase + ofs->cr3); + writel_relaxed(cr2, port->membase + ofs->cr2); + writel_relaxed(cr1, port->membase + ofs->cr1); + + stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); + spin_unlock_irqrestore(&port->lock, flags); + + /* Handle modem control interrupts */ + if (UART_ENABLE_MS(port, termios->c_cflag)) + stm32_usart_enable_ms(port); + else + stm32_usart_disable_ms(port); +} + +static const char *stm32_usart_type(struct uart_port *port) +{ + return (port->type == PORT_STM32) ? DRIVER_NAME : NULL; +} + +static void stm32_usart_release_port(struct uart_port *port) +{ +} + +static int stm32_usart_request_port(struct uart_port *port) +{ + return 0; +} + +static void stm32_usart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_STM32; +} + +static int +stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + /* No user changeable parameters */ + return -EINVAL; +} + +static void stm32_usart_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct stm32_port *stm32port = container_of(port, + struct stm32_port, port); + const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; + const struct stm32_usart_config *cfg = &stm32port->info->cfg; + unsigned long flags; + + switch (state) { + case UART_PM_STATE_ON: + pm_runtime_get_sync(port->dev); + break; + case UART_PM_STATE_OFF: + spin_lock_irqsave(&port->lock, flags); + stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); + spin_unlock_irqrestore(&port->lock, flags); + pm_runtime_put_sync(port->dev); + break; + } +} + +#if defined(CONFIG_CONSOLE_POLL) + + /* Callbacks for characters polling in debug context (i.e. KGDB). */ +static int stm32_usart_poll_init(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + + return clk_prepare_enable(stm32_port->clk); +} + +static int stm32_usart_poll_get_char(struct uart_port *port) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + + if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE)) + return NO_POLL_CHAR; + + return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask; +} + +static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch) +{ + stm32_usart_console_putchar(port, ch); +} +#endif /* CONFIG_CONSOLE_POLL */ + +static const struct uart_ops stm32_uart_ops = { + .tx_empty = stm32_usart_tx_empty, + .set_mctrl = stm32_usart_set_mctrl, + .get_mctrl = stm32_usart_get_mctrl, + .stop_tx = stm32_usart_stop_tx, + .start_tx = stm32_usart_start_tx, + .throttle = stm32_usart_throttle, + .unthrottle = stm32_usart_unthrottle, + .stop_rx = stm32_usart_stop_rx, + .enable_ms = stm32_usart_enable_ms, + .break_ctl = stm32_usart_break_ctl, + .startup = stm32_usart_startup, + .shutdown = stm32_usart_shutdown, + .flush_buffer = stm32_usart_flush_buffer, + .set_termios = stm32_usart_set_termios, + .pm = stm32_usart_pm, + .type = stm32_usart_type, + .release_port = stm32_usart_release_port, + .request_port = stm32_usart_request_port, + .config_port = stm32_usart_config_port, + .verify_port = stm32_usart_verify_port, +#if defined(CONFIG_CONSOLE_POLL) + .poll_init = stm32_usart_poll_init, + .poll_get_char = stm32_usart_poll_get_char, + .poll_put_char = stm32_usart_poll_put_char, +#endif /* CONFIG_CONSOLE_POLL */ +}; + +/* + * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG) + * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case, + * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE. + * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1. + */ +static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 }; + +static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p, + int *ftcfg) +{ + u32 bytes, i; + + /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */ + if (of_property_read_u32(pdev->dev.of_node, p, &bytes)) + bytes = 8; + + for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) + if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes) + break; + if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg)) + i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1; + + dev_dbg(&pdev->dev, "%s set to %d bytes\n", p, + stm32h7_usart_fifo_thresh_cfg[i]); + + /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */ + if (i) + *ftcfg = i - 1; + else + *ftcfg = -EINVAL; +} + +static void stm32_usart_deinit_port(struct stm32_port *stm32port) +{ + clk_disable_unprepare(stm32port->clk); +} + +static const struct serial_rs485 stm32_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | + SER_RS485_RX_DURING_TX, + .delay_rts_before_send = 1, + .delay_rts_after_send = 1, +}; + +static int stm32_usart_init_port(struct stm32_port *stm32port, + struct platform_device *pdev) +{ + struct uart_port *port = &stm32port->port; + struct resource *res; + int ret, irq; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + port->iotype = UPIO_MEM; + port->flags = UPF_BOOT_AUTOCONF; + port->ops = &stm32_uart_ops; + port->dev = &pdev->dev; + port->fifosize = stm32port->info->cfg.fifosize; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE); + port->irq = irq; + port->rs485_config = stm32_usart_config_rs485; + port->rs485_supported = stm32_rs485_supported; + + ret = stm32_usart_init_rs485(port, pdev); + if (ret) + return ret; + + stm32port->wakeup_src = stm32port->info->cfg.has_wakeup && + of_property_read_bool(pdev->dev.of_node, "wakeup-source"); + + stm32port->swap = stm32port->info->cfg.has_swap && + of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"); + + stm32port->fifoen = stm32port->info->cfg.has_fifo; + if (stm32port->fifoen) { + stm32_usart_get_ftcfg(pdev, "rx-threshold", + &stm32port->rxftcfg); + stm32_usart_get_ftcfg(pdev, "tx-threshold", + &stm32port->txftcfg); + } + + port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(port->membase)) + return PTR_ERR(port->membase); + port->mapbase = res->start; + + spin_lock_init(&port->lock); + + stm32port->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(stm32port->clk)) + return PTR_ERR(stm32port->clk); + + /* Ensure that clk rate is correct by enabling the clk */ + ret = clk_prepare_enable(stm32port->clk); + if (ret) + return ret; + + stm32port->port.uartclk = clk_get_rate(stm32port->clk); + if (!stm32port->port.uartclk) { + ret = -EINVAL; + goto err_clk; + } + + stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0); + if (IS_ERR(stm32port->gpios)) { + ret = PTR_ERR(stm32port->gpios); + goto err_clk; + } + + /* + * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts" + * properties should not be specified. + */ + if (stm32port->hw_flow_control) { + if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) || + mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) { + dev_err(&pdev->dev, "Conflicting RTS/CTS config\n"); + ret = -EINVAL; + goto err_clk; + } + } + + return ret; + +err_clk: + clk_disable_unprepare(stm32port->clk); + + return ret; +} + +static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + int id; + + if (!np) + return NULL; + + id = of_alias_get_id(np, "serial"); + if (id < 0) { + dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id); + return NULL; + } + + if (WARN_ON(id >= STM32_MAX_PORTS)) + return NULL; + + stm32_ports[id].hw_flow_control = + of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ || + of_property_read_bool (np, "uart-has-rtscts"); + stm32_ports[id].port.line = id; + stm32_ports[id].cr1_irq = USART_CR1_RXNEIE; + stm32_ports[id].cr3_irq = 0; + stm32_ports[id].last_res = RX_BUF_L; + return &stm32_ports[id]; +} + +#ifdef CONFIG_OF +static const struct of_device_id stm32_match[] = { + { .compatible = "st,stm32-uart", .data = &stm32f4_info}, + { .compatible = "st,stm32f7-uart", .data = &stm32f7_info}, + { .compatible = "st,stm32h7-uart", .data = &stm32h7_info}, + {}, +}; + +MODULE_DEVICE_TABLE(of, stm32_match); +#endif + +static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port, + struct platform_device *pdev) +{ + if (stm32port->rx_buf) + dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf, + stm32port->rx_dma_buf); +} + +static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, + struct platform_device *pdev) +{ + const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; + struct uart_port *port = &stm32port->port; + struct device *dev = &pdev->dev; + struct dma_slave_config config; + int ret; + + stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L, + &stm32port->rx_dma_buf, + GFP_KERNEL); + if (!stm32port->rx_buf) + return -ENOMEM; + + /* Configure DMA channel */ + memset(&config, 0, sizeof(config)); + config.src_addr = port->mapbase + ofs->rdr; + config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + + ret = dmaengine_slave_config(stm32port->rx_ch, &config); + if (ret < 0) { + dev_err(dev, "rx dma channel config failed\n"); + stm32_usart_of_dma_rx_remove(stm32port, pdev); + return ret; + } + + return 0; +} + +static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port, + struct platform_device *pdev) +{ + if (stm32port->tx_buf) + dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf, + stm32port->tx_dma_buf); +} + +static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port, + struct platform_device *pdev) +{ + const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; + struct uart_port *port = &stm32port->port; + struct device *dev = &pdev->dev; + struct dma_slave_config config; + int ret; + + stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L, + &stm32port->tx_dma_buf, + GFP_KERNEL); + if (!stm32port->tx_buf) + return -ENOMEM; + + /* Configure DMA channel */ + memset(&config, 0, sizeof(config)); + config.dst_addr = port->mapbase + ofs->tdr; + config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + + ret = dmaengine_slave_config(stm32port->tx_ch, &config); + if (ret < 0) { + dev_err(dev, "tx dma channel config failed\n"); + stm32_usart_of_dma_tx_remove(stm32port, pdev); + return ret; + } + + return 0; +} + +static int stm32_usart_serial_probe(struct platform_device *pdev) +{ + struct stm32_port *stm32port; + int ret; + + stm32port = stm32_usart_of_get_port(pdev); + if (!stm32port) + return -ENODEV; + + stm32port->info = of_device_get_match_data(&pdev->dev); + if (!stm32port->info) + return -EINVAL; + + stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); + if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + /* Fall back in interrupt mode for any non-deferral error */ + if (IS_ERR(stm32port->rx_ch)) + stm32port->rx_ch = NULL; + + stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx"); + if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_dma_rx; + } + /* Fall back in interrupt mode for any non-deferral error */ + if (IS_ERR(stm32port->tx_ch)) + stm32port->tx_ch = NULL; + + ret = stm32_usart_init_port(stm32port, pdev); + if (ret) + goto err_dma_tx; + + if (stm32port->wakeup_src) { + device_set_wakeup_capable(&pdev->dev, true); + ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); + if (ret) + goto err_deinit_port; + } + + if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { + /* Fall back in interrupt mode */ + dma_release_channel(stm32port->rx_ch); + stm32port->rx_ch = NULL; + } + + if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) { + /* Fall back in interrupt mode */ + dma_release_channel(stm32port->tx_ch); + stm32port->tx_ch = NULL; + } + + if (!stm32port->rx_ch) + dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n"); + if (!stm32port->tx_ch) + dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n"); + + platform_set_drvdata(pdev, &stm32port->port); + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port); + if (ret) + goto err_port; + + pm_runtime_put_sync(&pdev->dev); + + return 0; + +err_port: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + if (stm32port->tx_ch) + stm32_usart_of_dma_tx_remove(stm32port, pdev); + if (stm32port->rx_ch) + stm32_usart_of_dma_rx_remove(stm32port, pdev); + + if (stm32port->wakeup_src) + dev_pm_clear_wake_irq(&pdev->dev); + +err_deinit_port: + if (stm32port->wakeup_src) + device_set_wakeup_capable(&pdev->dev, false); + + stm32_usart_deinit_port(stm32port); + +err_dma_tx: + if (stm32port->tx_ch) + dma_release_channel(stm32port->tx_ch); + +err_dma_rx: + if (stm32port->rx_ch) + dma_release_channel(stm32port->rx_ch); + + return ret; +} + +static int stm32_usart_serial_remove(struct platform_device *pdev) +{ + struct uart_port *port = platform_get_drvdata(pdev); + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + u32 cr3; + + pm_runtime_get_sync(&pdev->dev); + uart_remove_one_port(&stm32_usart_driver, port); + + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE); + cr3 = readl_relaxed(port->membase + ofs->cr3); + cr3 &= ~USART_CR3_EIE; + cr3 &= ~USART_CR3_DMAR; + cr3 &= ~USART_CR3_DDRE; + writel_relaxed(cr3, port->membase + ofs->cr3); + + if (stm32_port->tx_ch) { + stm32_usart_of_dma_tx_remove(stm32_port, pdev); + dma_release_channel(stm32_port->tx_ch); + } + + if (stm32_port->rx_ch) { + stm32_usart_of_dma_rx_remove(stm32_port, pdev); + dma_release_channel(stm32_port->rx_ch); + } + + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + + if (stm32_port->wakeup_src) { + dev_pm_clear_wake_irq(&pdev->dev); + device_init_wakeup(&pdev->dev, false); + } + + stm32_usart_deinit_port(stm32_port); + + return 0; +} + +static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + u32 isr; + int ret; + + ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr, + (isr & USART_SR_TXE), 100, + STM32_USART_TIMEOUT_USEC); + if (ret != 0) { + dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret); + return; + } + writel_relaxed(ch, port->membase + ofs->tdr); +} + +#ifdef CONFIG_SERIAL_STM32_CONSOLE +static void stm32_usart_console_write(struct console *co, const char *s, + unsigned int cnt) +{ + struct uart_port *port = &stm32_ports[co->index].port; + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + const struct stm32_usart_config *cfg = &stm32_port->info->cfg; + unsigned long flags; + u32 old_cr1, new_cr1; + int locked = 1; + + if (oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); + + /* Save and disable interrupts, enable the transmitter */ + old_cr1 = readl_relaxed(port->membase + ofs->cr1); + new_cr1 = old_cr1 & ~USART_CR1_IE_MASK; + new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit); + writel_relaxed(new_cr1, port->membase + ofs->cr1); + + uart_console_write(port, s, cnt, stm32_usart_console_putchar); + + /* Restore interrupt state */ + writel_relaxed(old_cr1, port->membase + ofs->cr1); + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); +} + +static int stm32_usart_console_setup(struct console *co, char *options) +{ + struct stm32_port *stm32port; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index >= STM32_MAX_PORTS) + return -ENODEV; + + stm32port = &stm32_ports[co->index]; + + /* + * This driver does not support early console initialization + * (use ARM early printk support instead), so we only expect + * this to be called during the uart port registration when the + * driver gets probed and the port should be mapped at that point. + */ + if (stm32port->port.mapbase == 0 || !stm32port->port.membase) + return -ENXIO; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&stm32port->port, co, baud, parity, bits, flow); +} + +static struct console stm32_console = { + .name = STM32_SERIAL_NAME, + .device = uart_console_device, + .write = stm32_usart_console_write, + .setup = stm32_usart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &stm32_usart_driver, +}; + +#define STM32_SERIAL_CONSOLE (&stm32_console) + +#else +#define STM32_SERIAL_CONSOLE NULL +#endif /* CONFIG_SERIAL_STM32_CONSOLE */ + +#ifdef CONFIG_SERIAL_EARLYCON +static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct stm32_usart_info *info = port->private_data; + + while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE)) + cpu_relax(); + + writel_relaxed(ch, port->membase + info->ofs.tdr); +} + +static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count) +{ + struct earlycon_device *device = console->data; + struct uart_port *port = &device->port; + + uart_console_write(port, s, count, early_stm32_usart_console_putchar); +} + +static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options) +{ + if (!(device->port.membase || device->port.iobase)) + return -ENODEV; + device->port.private_data = &stm32h7_info; + device->con->write = early_stm32_serial_write; + return 0; +} + +static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options) +{ + if (!(device->port.membase || device->port.iobase)) + return -ENODEV; + device->port.private_data = &stm32f7_info; + device->con->write = early_stm32_serial_write; + return 0; +} + +static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options) +{ + if (!(device->port.membase || device->port.iobase)) + return -ENODEV; + device->port.private_data = &stm32f4_info; + device->con->write = early_stm32_serial_write; + return 0; +} + +OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup); +OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup); +OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup); +#endif /* CONFIG_SERIAL_EARLYCON */ + +static struct uart_driver stm32_usart_driver = { + .driver_name = DRIVER_NAME, + .dev_name = STM32_SERIAL_NAME, + .major = 0, + .minor = 0, + .nr = STM32_MAX_PORTS, + .cons = STM32_SERIAL_CONSOLE, +}; + +static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, + bool enable) +{ + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; + struct tty_port *tport = &port->state->port; + int ret; + unsigned int size; + unsigned long flags; + + if (!stm32_port->wakeup_src || !tty_port_initialized(tport)) + return 0; + + /* + * Enable low-power wake-up and wake-up irq if argument is set to + * "enable", disable low-power wake-up and wake-up irq otherwise + */ + if (enable) { + stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM); + stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE); + mctrl_gpio_enable_irq_wake(stm32_port->gpios); + + /* + * When DMA is used for reception, it must be disabled before + * entering low-power mode and re-enabled when exiting from + * low-power mode. + */ + if (stm32_port->rx_ch) { + spin_lock_irqsave(&port->lock, flags); + /* Avoid race with RX IRQ when DMAR is cleared */ + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); + /* Poll data from DMA RX buffer if any */ + size = stm32_usart_receive_chars(port, true); + dmaengine_terminate_async(stm32_port->rx_ch); + uart_unlock_and_check_sysrq_irqrestore(port, flags); + if (size) + tty_flip_buffer_push(tport); + } + + /* Poll data from RX FIFO if any */ + stm32_usart_receive_chars(port, false); + } else { + if (stm32_port->rx_ch) { + ret = stm32_usart_start_rx_dma_cyclic(port); + if (ret) + return ret; + } + mctrl_gpio_disable_irq_wake(stm32_port->gpios); + stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM); + stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); + } + + return 0; +} + +static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + int ret; + + uart_suspend_port(&stm32_usart_driver, port); + + if (device_may_wakeup(dev) || device_wakeup_path(dev)) { + ret = stm32_usart_serial_en_wakeup(port, true); + if (ret) + return ret; + } + + /* + * When "no_console_suspend" is enabled, keep the pinctrl default state + * and rely on bootloader stage to restore this state upon resume. + * Otherwise, apply the idle or sleep states depending on wakeup + * capabilities. + */ + if (console_suspend_enabled || !uart_console(port)) { + if (device_may_wakeup(dev) || device_wakeup_path(dev)) + pinctrl_pm_select_idle_state(dev); + else + pinctrl_pm_select_sleep_state(dev); + } + + return 0; +} + +static int __maybe_unused stm32_usart_serial_resume(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + int ret; + + pinctrl_pm_select_default_state(dev); + + if (device_may_wakeup(dev) || device_wakeup_path(dev)) { + ret = stm32_usart_serial_en_wakeup(port, false); + if (ret) + return ret; + } + + return uart_resume_port(&stm32_usart_driver, port); +} + +static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct stm32_port *stm32port = container_of(port, + struct stm32_port, port); + + clk_disable_unprepare(stm32port->clk); + + return 0; +} + +static int __maybe_unused stm32_usart_runtime_resume(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct stm32_port *stm32port = container_of(port, + struct stm32_port, port); + + return clk_prepare_enable(stm32port->clk); +} + +static const struct dev_pm_ops stm32_serial_pm_ops = { + SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend, + stm32_usart_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend, + stm32_usart_serial_resume) +}; + +static struct platform_driver stm32_serial_driver = { + .probe = stm32_usart_serial_probe, + .remove = stm32_usart_serial_remove, + .driver = { + .name = DRIVER_NAME, + .pm = &stm32_serial_pm_ops, + .of_match_table = of_match_ptr(stm32_match), + }, +}; + +static int __init stm32_usart_init(void) +{ + static char banner[] __initdata = "STM32 USART driver initialized"; + int ret; + + pr_info("%s\n", banner); + + ret = uart_register_driver(&stm32_usart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&stm32_serial_driver); + if (ret) + uart_unregister_driver(&stm32_usart_driver); + + return ret; +} + +static void __exit stm32_usart_exit(void) +{ + platform_driver_unregister(&stm32_serial_driver); + uart_unregister_driver(&stm32_usart_driver); +} + +module_init(stm32_usart_init); +module_exit(stm32_usart_exit); + +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h new file mode 100644 index 000000000..0ec41a732 --- /dev/null +++ b/drivers/tty/serial/stm32-usart.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Maxime Coquelin 2015 + * Copyright (C) STMicroelectronics SA 2017 + * Authors: Maxime Coquelin + * Gerald Baeza + */ + +#define DRIVER_NAME "stm32-usart" + +struct stm32_usart_offsets { + u8 cr1; + u8 cr2; + u8 cr3; + u8 brr; + u8 gtpr; + u8 rtor; + u8 rqr; + u8 isr; + u8 icr; + u8 rdr; + u8 tdr; +}; + +struct stm32_usart_config { + u8 uart_enable_bit; /* USART_CR1_UE */ + bool has_7bits_data; + bool has_swap; + bool has_wakeup; + bool has_fifo; + int fifosize; +}; + +struct stm32_usart_info { + struct stm32_usart_offsets ofs; + struct stm32_usart_config cfg; +}; + +#define UNDEF_REG 0xff + +/* USART_SR (F4) / USART_ISR (F7) */ +#define USART_SR_PE BIT(0) +#define USART_SR_FE BIT(1) +#define USART_SR_NE BIT(2) /* F7 (NF for F4) */ +#define USART_SR_ORE BIT(3) +#define USART_SR_IDLE BIT(4) +#define USART_SR_RXNE BIT(5) +#define USART_SR_TC BIT(6) +#define USART_SR_TXE BIT(7) +#define USART_SR_CTSIF BIT(9) +#define USART_SR_CTS BIT(10) /* F7 */ +#define USART_SR_RTOF BIT(11) /* F7 */ +#define USART_SR_EOBF BIT(12) /* F7 */ +#define USART_SR_ABRE BIT(14) /* F7 */ +#define USART_SR_ABRF BIT(15) /* F7 */ +#define USART_SR_BUSY BIT(16) /* F7 */ +#define USART_SR_CMF BIT(17) /* F7 */ +#define USART_SR_SBKF BIT(18) /* F7 */ +#define USART_SR_WUF BIT(20) /* H7 */ +#define USART_SR_TEACK BIT(21) /* F7 */ +#define USART_SR_ERR_MASK (USART_SR_ORE | USART_SR_NE | USART_SR_FE |\ + USART_SR_PE) +/* Dummy bits */ +#define USART_SR_DUMMY_RX BIT(16) + +/* USART_DR */ +#define USART_DR_MASK GENMASK(8, 0) + +/* USART_BRR */ +#define USART_BRR_DIV_F_MASK GENMASK(3, 0) +#define USART_BRR_DIV_M_MASK GENMASK(15, 4) +#define USART_BRR_DIV_M_SHIFT 4 +#define USART_BRR_04_R_SHIFT 1 + +/* USART_CR1 */ +#define USART_CR1_SBK BIT(0) +#define USART_CR1_RWU BIT(1) /* F4 */ +#define USART_CR1_UESM BIT(1) /* H7 */ +#define USART_CR1_RE BIT(2) +#define USART_CR1_TE BIT(3) +#define USART_CR1_IDLEIE BIT(4) +#define USART_CR1_RXNEIE BIT(5) +#define USART_CR1_TCIE BIT(6) +#define USART_CR1_TXEIE BIT(7) +#define USART_CR1_PEIE BIT(8) +#define USART_CR1_PS BIT(9) +#define USART_CR1_PCE BIT(10) +#define USART_CR1_WAKE BIT(11) +#define USART_CR1_M0 BIT(12) /* F7 (CR1_M for F4) */ +#define USART_CR1_MME BIT(13) /* F7 */ +#define USART_CR1_CMIE BIT(14) /* F7 */ +#define USART_CR1_OVER8 BIT(15) +#define USART_CR1_DEDT_MASK GENMASK(20, 16) /* F7 */ +#define USART_CR1_DEAT_MASK GENMASK(25, 21) /* F7 */ +#define USART_CR1_RTOIE BIT(26) /* F7 */ +#define USART_CR1_EOBIE BIT(27) /* F7 */ +#define USART_CR1_M1 BIT(28) /* F7 */ +#define USART_CR1_IE_MASK (GENMASK(8, 4) | BIT(14) | BIT(26) | BIT(27)) +#define USART_CR1_FIFOEN BIT(29) /* H7 */ +#define USART_CR1_DEAT_SHIFT 21 +#define USART_CR1_DEDT_SHIFT 16 + +/* USART_CR2 */ +#define USART_CR2_ADD_MASK GENMASK(3, 0) /* F4 */ +#define USART_CR2_ADDM7 BIT(4) /* F7 */ +#define USART_CR2_LBCL BIT(8) +#define USART_CR2_CPHA BIT(9) +#define USART_CR2_CPOL BIT(10) +#define USART_CR2_CLKEN BIT(11) +#define USART_CR2_STOP_2B BIT(13) +#define USART_CR2_STOP_MASK GENMASK(13, 12) +#define USART_CR2_LINEN BIT(14) +#define USART_CR2_SWAP BIT(15) /* F7 */ +#define USART_CR2_RXINV BIT(16) /* F7 */ +#define USART_CR2_TXINV BIT(17) /* F7 */ +#define USART_CR2_DATAINV BIT(18) /* F7 */ +#define USART_CR2_MSBFIRST BIT(19) /* F7 */ +#define USART_CR2_ABREN BIT(20) /* F7 */ +#define USART_CR2_ABRMOD_MASK GENMASK(22, 21) /* F7 */ +#define USART_CR2_RTOEN BIT(23) /* F7 */ +#define USART_CR2_ADD_F7_MASK GENMASK(31, 24) /* F7 */ + +/* USART_CR3 */ +#define USART_CR3_EIE BIT(0) +#define USART_CR3_IREN BIT(1) +#define USART_CR3_IRLP BIT(2) +#define USART_CR3_HDSEL BIT(3) +#define USART_CR3_NACK BIT(4) +#define USART_CR3_SCEN BIT(5) +#define USART_CR3_DMAR BIT(6) +#define USART_CR3_DMAT BIT(7) +#define USART_CR3_RTSE BIT(8) +#define USART_CR3_CTSE BIT(9) +#define USART_CR3_CTSIE BIT(10) +#define USART_CR3_ONEBIT BIT(11) +#define USART_CR3_OVRDIS BIT(12) /* F7 */ +#define USART_CR3_DDRE BIT(13) /* F7 */ +#define USART_CR3_DEM BIT(14) /* F7 */ +#define USART_CR3_DEP BIT(15) /* F7 */ +#define USART_CR3_SCARCNT_MASK GENMASK(19, 17) /* F7 */ +#define USART_CR3_WUS_MASK GENMASK(21, 20) /* H7 */ +#define USART_CR3_WUS_START_BIT BIT(21) /* H7 */ +#define USART_CR3_WUFIE BIT(22) /* H7 */ +#define USART_CR3_TXFTIE BIT(23) /* H7 */ +#define USART_CR3_TCBGTIE BIT(24) /* H7 */ +#define USART_CR3_RXFTCFG_MASK GENMASK(27, 25) /* H7 */ +#define USART_CR3_RXFTCFG_SHIFT 25 /* H7 */ +#define USART_CR3_RXFTIE BIT(28) /* H7 */ +#define USART_CR3_TXFTCFG_MASK GENMASK(31, 29) /* H7 */ +#define USART_CR3_TXFTCFG_SHIFT 29 /* H7 */ + +/* USART_GTPR */ +#define USART_GTPR_PSC_MASK GENMASK(7, 0) +#define USART_GTPR_GT_MASK GENMASK(15, 8) + +/* USART_RTOR */ +#define USART_RTOR_RTO_MASK GENMASK(23, 0) /* F7 */ +#define USART_RTOR_BLEN_MASK GENMASK(31, 24) /* F7 */ + +/* USART_RQR */ +#define USART_RQR_ABRRQ BIT(0) /* F7 */ +#define USART_RQR_SBKRQ BIT(1) /* F7 */ +#define USART_RQR_MMRQ BIT(2) /* F7 */ +#define USART_RQR_RXFRQ BIT(3) /* F7 */ +#define USART_RQR_TXFRQ BIT(4) /* F7 */ + +/* USART_ICR */ +#define USART_ICR_PECF BIT(0) /* F7 */ +#define USART_ICR_FECF BIT(1) /* F7 */ +#define USART_ICR_ORECF BIT(3) /* F7 */ +#define USART_ICR_IDLECF BIT(4) /* F7 */ +#define USART_ICR_TCCF BIT(6) /* F7 */ +#define USART_ICR_CTSCF BIT(9) /* F7 */ +#define USART_ICR_RTOCF BIT(11) /* F7 */ +#define USART_ICR_EOBCF BIT(12) /* F7 */ +#define USART_ICR_CMCF BIT(17) /* F7 */ +#define USART_ICR_WUCF BIT(20) /* H7 */ + +#define STM32_SERIAL_NAME "ttySTM" +#define STM32_MAX_PORTS 8 + +#define RX_BUF_L 4096 /* dma rx buffer length */ +#define RX_BUF_P (RX_BUF_L / 2) /* dma rx buffer period */ +#define TX_BUF_L RX_BUF_L /* dma tx buffer length */ + +#define STM32_USART_TIMEOUT_USEC USEC_PER_SEC /* 1s timeout in µs */ + +struct stm32_port { + struct uart_port port; + struct clk *clk; + const struct stm32_usart_info *info; + struct dma_chan *rx_ch; /* dma rx channel */ + dma_addr_t rx_dma_buf; /* dma rx buffer bus address */ + unsigned char *rx_buf; /* dma rx buffer cpu address */ + struct dma_chan *tx_ch; /* dma tx channel */ + dma_addr_t tx_dma_buf; /* dma tx buffer bus address */ + unsigned char *tx_buf; /* dma tx buffer cpu address */ + u32 cr1_irq; /* USART_CR1_RXNEIE or RTOIE */ + u32 cr3_irq; /* USART_CR3_RXFTIE */ + int last_res; + bool tx_dma_busy; /* dma tx transaction in progress */ + bool throttled; /* port throttled */ + bool hw_flow_control; + bool swap; /* swap RX & TX pins */ + bool fifoen; + bool txdone; + int rxftcfg; /* RX FIFO threshold CFG */ + int txftcfg; /* TX FIFO threshold CFG */ + bool wakeup_src; + int rdr_mask; /* receive data register mask */ + struct mctrl_gpios *gpios; /* modem control gpios */ + struct dma_tx_state rx_dma_state; +}; + +static struct stm32_port stm32_ports[STM32_MAX_PORTS]; +static struct uart_driver stm32_usart_driver; diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c new file mode 100644 index 000000000..2491551c2 --- /dev/null +++ b/drivers/tty/serial/suncore.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0 +/* suncore.c + * + * Common SUN serial routines. Based entirely + * upon drivers/sbus/char/sunserial.c which is: + * + * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) + * + * Adaptation to new UART layer is: + * + * Copyright (C) 2002 David S. Miller (davem@redhat.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +static int sunserial_current_minor = 64; + +int sunserial_register_minors(struct uart_driver *drv, int count) +{ + int err = 0; + + drv->minor = sunserial_current_minor; + drv->nr += count; + /* Register the driver on the first call */ + if (drv->nr == count) + err = uart_register_driver(drv); + if (err == 0) { + sunserial_current_minor += count; + drv->tty_driver->name_base = drv->minor - 64; + } + return err; +} +EXPORT_SYMBOL(sunserial_register_minors); + +void sunserial_unregister_minors(struct uart_driver *drv, int count) +{ + drv->nr -= count; + sunserial_current_minor -= count; + + if (drv->nr == 0) + uart_unregister_driver(drv); +} +EXPORT_SYMBOL(sunserial_unregister_minors); + +int sunserial_console_match(struct console *con, struct device_node *dp, + struct uart_driver *drv, int line, bool ignore_line) +{ + if (!con) + return 0; + + drv->cons = con; + + if (of_console_device != dp) + return 0; + + if (!ignore_line) { + int off = 0; + + if (of_console_options && + *of_console_options == 'b') + off = 1; + + if ((line & 1) != off) + return 0; + } + + if (!console_set_on_cmdline) { + con->index = line; + add_preferred_console(con->name, line, NULL); + } + return 1; +} +EXPORT_SYMBOL(sunserial_console_match); + +void sunserial_console_termios(struct console *con, struct device_node *uart_dp) +{ + const char *mode, *s; + char mode_prop[] = "ttyX-mode"; + int baud, bits, stop, cflag; + char parity; + + if (of_node_name_eq(uart_dp, "rsc") || + of_node_name_eq(uart_dp, "rsc-console") || + of_node_name_eq(uart_dp, "rsc-control")) { + mode = of_get_property(uart_dp, + "ssp-console-modes", NULL); + if (!mode) + mode = "115200,8,n,1,-"; + } else if (of_node_name_eq(uart_dp, "lom-console")) { + mode = "9600,8,n,1,-"; + } else { + struct device_node *dp; + char c; + + c = 'a'; + if (of_console_options) + c = *of_console_options; + + mode_prop[3] = c; + + dp = of_find_node_by_path("/options"); + mode = of_get_property(dp, mode_prop, NULL); + if (!mode) + mode = "9600,8,n,1,-"; + of_node_put(dp); + } + + cflag = CREAD | HUPCL | CLOCAL; + + s = mode; + baud = simple_strtoul(s, NULL, 0); + s = strchr(s, ','); + bits = simple_strtoul(++s, NULL, 0); + s = strchr(s, ','); + parity = *(++s); + s = strchr(s, ','); + stop = simple_strtoul(++s, NULL, 0); + s = strchr(s, ','); + /* XXX handshake is not handled here. */ + + switch (baud) { + case 150: cflag |= B150; break; + case 300: cflag |= B300; break; + case 600: cflag |= B600; break; + case 1200: cflag |= B1200; break; + case 2400: cflag |= B2400; break; + case 4800: cflag |= B4800; break; + case 9600: cflag |= B9600; break; + case 19200: cflag |= B19200; break; + case 38400: cflag |= B38400; break; + case 57600: cflag |= B57600; break; + case 115200: cflag |= B115200; break; + case 230400: cflag |= B230400; break; + case 460800: cflag |= B460800; break; + default: baud = 9600; cflag |= B9600; break; + } + + switch (bits) { + case 5: cflag |= CS5; break; + case 6: cflag |= CS6; break; + case 7: cflag |= CS7; break; + case 8: cflag |= CS8; break; + default: cflag |= CS8; break; + } + + switch (parity) { + case 'o': cflag |= (PARENB | PARODD); break; + case 'e': cflag |= PARENB; break; + case 'n': default: break; + } + + switch (stop) { + case 2: cflag |= CSTOPB; break; + case 1: default: break; + } + + con->cflag = cflag; +} + +/* Sun serial MOUSE auto baud rate detection. */ +static struct mouse_baud_cflag { + int baud; + unsigned int cflag; +} mouse_baud_table[] = { + { 1200, B1200 }, + { 2400, B2400 }, + { 4800, B4800 }, + { 9600, B9600 }, + { -1, ~0 }, + { -1, ~0 }, +}; + +unsigned int suncore_mouse_baud_cflag_next(unsigned int cflag, int *new_baud) +{ + int i; + + for (i = 0; mouse_baud_table[i].baud != -1; i++) + if (mouse_baud_table[i].cflag == (cflag & CBAUD)) + break; + + i += 1; + if (mouse_baud_table[i].baud == -1) + i = 0; + + *new_baud = mouse_baud_table[i].baud; + return mouse_baud_table[i].cflag; +} + +EXPORT_SYMBOL(suncore_mouse_baud_cflag_next); + +/* Basically, when the baud rate is wrong the mouse spits out + * breaks to us. + */ +int suncore_mouse_baud_detection(unsigned char ch, int is_break) +{ + static int mouse_got_break = 0; + static int ctr = 0; + + if (is_break) { + /* Let a few normal bytes go by before we jump the gun + * and say we need to try another baud rate. + */ + if (mouse_got_break && ctr < 8) + return 1; + + /* Ok, we need to try another baud. */ + ctr = 0; + mouse_got_break = 1; + return 2; + } + if (mouse_got_break) { + ctr++; + if (ch == 0x87) { + /* Correct baud rate determined. */ + mouse_got_break = 0; + } + return 1; + } + return 0; +} + +EXPORT_SYMBOL(suncore_mouse_baud_detection); + +static int __init suncore_init(void) +{ + return 0; +} +device_initcall(suncore_init); + +#if 0 /* ..def MODULE ; never supported as such */ +MODULE_AUTHOR("Eddie C. Dost, David S. Miller"); +MODULE_DESCRIPTION("Sun serial common layer"); +MODULE_LICENSE("GPL"); +#endif diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c new file mode 100644 index 000000000..1938ba5e9 --- /dev/null +++ b/drivers/tty/serial/sunhv.c @@ -0,0 +1,652 @@ +// SPDX-License-Identifier: GPL-2.0 +/* sunhv.c: Serial driver for SUN4V hypervisor console. + * + * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#define CON_BREAK ((long)-1) +#define CON_HUP ((long)-2) + +#define IGNORE_BREAK 0x1 +#define IGNORE_ALL 0x2 + +static char *con_write_page; +static char *con_read_page; + +static int hung_up = 0; + +static void transmit_chars_putchar(struct uart_port *port, struct circ_buf *xmit) +{ + while (!uart_circ_empty(xmit)) { + long status = sun4v_con_putchar(xmit->buf[xmit->tail]); + + if (status != HV_EOK) + break; + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } +} + +static void transmit_chars_write(struct uart_port *port, struct circ_buf *xmit) +{ + while (!uart_circ_empty(xmit)) { + unsigned long ra = __pa(xmit->buf + xmit->tail); + unsigned long len, status, sent; + + len = CIRC_CNT_TO_END(xmit->head, xmit->tail, + UART_XMIT_SIZE); + status = sun4v_con_write(ra, len, &sent); + if (status != HV_EOK) + break; + xmit->tail = (xmit->tail + sent) & (UART_XMIT_SIZE - 1); + port->icount.tx += sent; + } +} + +static int receive_chars_getchar(struct uart_port *port) +{ + int saw_console_brk = 0; + int limit = 10000; + + while (limit-- > 0) { + long status; + long c = sun4v_con_getchar(&status); + + if (status == HV_EWOULDBLOCK) + break; + + if (c == CON_BREAK) { + if (uart_handle_break(port)) + continue; + saw_console_brk = 1; + c = 0; + } + + if (c == CON_HUP) { + hung_up = 1; + uart_handle_dcd_change(port, 0); + } else if (hung_up) { + hung_up = 0; + uart_handle_dcd_change(port, 1); + } + + if (port->state == NULL) { + uart_handle_sysrq_char(port, c); + continue; + } + + port->icount.rx++; + + if (uart_handle_sysrq_char(port, c)) + continue; + + tty_insert_flip_char(&port->state->port, c, TTY_NORMAL); + } + + return saw_console_brk; +} + +static int receive_chars_read(struct uart_port *port) +{ + static int saw_console_brk; + int limit = 10000; + + while (limit-- > 0) { + unsigned long ra = __pa(con_read_page); + unsigned long bytes_read, i; + long stat = sun4v_con_read(ra, PAGE_SIZE, &bytes_read); + + if (stat != HV_EOK) { + bytes_read = 0; + + if (stat == CON_BREAK) { + if (saw_console_brk) + sun_do_break(); + + if (uart_handle_break(port)) + continue; + saw_console_brk = 1; + *con_read_page = 0; + bytes_read = 1; + } else if (stat == CON_HUP) { + hung_up = 1; + uart_handle_dcd_change(port, 0); + continue; + } else { + /* HV_EWOULDBLOCK, etc. */ + break; + } + } + + if (hung_up) { + hung_up = 0; + uart_handle_dcd_change(port, 1); + } + + if (port->sysrq != 0 && *con_read_page) { + for (i = 0; i < bytes_read; i++) + uart_handle_sysrq_char(port, con_read_page[i]); + saw_console_brk = 0; + } + + if (port->state == NULL) + continue; + + port->icount.rx += bytes_read; + + tty_insert_flip_string(&port->state->port, con_read_page, + bytes_read); + } + + return saw_console_brk; +} + +struct sunhv_ops { + void (*transmit_chars)(struct uart_port *port, struct circ_buf *xmit); + int (*receive_chars)(struct uart_port *port); +}; + +static const struct sunhv_ops bychar_ops = { + .transmit_chars = transmit_chars_putchar, + .receive_chars = receive_chars_getchar, +}; + +static const struct sunhv_ops bywrite_ops = { + .transmit_chars = transmit_chars_write, + .receive_chars = receive_chars_read, +}; + +static const struct sunhv_ops *sunhv_ops = &bychar_ops; + +static struct tty_port *receive_chars(struct uart_port *port) +{ + struct tty_port *tport = NULL; + + if (port->state != NULL) /* Unopened serial console */ + tport = &port->state->port; + + if (sunhv_ops->receive_chars(port)) + sun_do_break(); + + return tport; +} + +static void transmit_chars(struct uart_port *port) +{ + struct circ_buf *xmit; + + if (!port->state) + return; + + xmit = &port->state->xmit; + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + return; + + sunhv_ops->transmit_chars(port, xmit); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static irqreturn_t sunhv_interrupt(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct tty_port *tport; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + tport = receive_chars(port); + transmit_chars(port); + spin_unlock_irqrestore(&port->lock, flags); + + if (tport) + tty_flip_buffer_push(tport); + + return IRQ_HANDLED; +} + +/* port->lock is not held. */ +static unsigned int sunhv_tx_empty(struct uart_port *port) +{ + /* Transmitter is always empty for us. If the circ buffer + * is non-empty or there is an x_char pending, our caller + * will do the right thing and ignore what we return here. + */ + return TIOCSER_TEMT; +} + +/* port->lock held by caller. */ +static void sunhv_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + return; +} + +/* port->lock is held by caller and interrupts are disabled. */ +static unsigned int sunhv_get_mctrl(struct uart_port *port) +{ + return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS; +} + +/* port->lock held by caller. */ +static void sunhv_stop_tx(struct uart_port *port) +{ + return; +} + +/* port->lock held by caller. */ +static void sunhv_start_tx(struct uart_port *port) +{ + transmit_chars(port); +} + +/* port->lock is not held. */ +static void sunhv_send_xchar(struct uart_port *port, char ch) +{ + unsigned long flags; + int limit = 10000; + + if (ch == __DISABLED_CHAR) + return; + + spin_lock_irqsave(&port->lock, flags); + + while (limit-- > 0) { + long status = sun4v_con_putchar(ch); + if (status == HV_EOK) + break; + udelay(1); + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +/* port->lock held by caller. */ +static void sunhv_stop_rx(struct uart_port *port) +{ +} + +/* port->lock is not held. */ +static void sunhv_break_ctl(struct uart_port *port, int break_state) +{ + if (break_state) { + unsigned long flags; + int limit = 10000; + + spin_lock_irqsave(&port->lock, flags); + + while (limit-- > 0) { + long status = sun4v_con_putchar(CON_BREAK); + if (status == HV_EOK) + break; + udelay(1); + } + + spin_unlock_irqrestore(&port->lock, flags); + } +} + +/* port->lock is not held. */ +static int sunhv_startup(struct uart_port *port) +{ + return 0; +} + +/* port->lock is not held. */ +static void sunhv_shutdown(struct uart_port *port) +{ +} + +/* port->lock is not held. */ +static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000); + unsigned int quot = uart_get_divisor(port, baud); + unsigned int iflag, cflag; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + iflag = termios->c_iflag; + cflag = termios->c_cflag; + + port->ignore_status_mask = 0; + if (iflag & IGNBRK) + port->ignore_status_mask |= IGNORE_BREAK; + if ((cflag & CREAD) == 0) + port->ignore_status_mask |= IGNORE_ALL; + + /* XXX */ + uart_update_timeout(port, cflag, + (port->uartclk / (16 * quot))); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *sunhv_type(struct uart_port *port) +{ + return "SUN4V HCONS"; +} + +static void sunhv_release_port(struct uart_port *port) +{ +} + +static int sunhv_request_port(struct uart_port *port) +{ + return 0; +} + +static void sunhv_config_port(struct uart_port *port, int flags) +{ +} + +static int sunhv_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + return -EINVAL; +} + +static const struct uart_ops sunhv_pops = { + .tx_empty = sunhv_tx_empty, + .set_mctrl = sunhv_set_mctrl, + .get_mctrl = sunhv_get_mctrl, + .stop_tx = sunhv_stop_tx, + .start_tx = sunhv_start_tx, + .send_xchar = sunhv_send_xchar, + .stop_rx = sunhv_stop_rx, + .break_ctl = sunhv_break_ctl, + .startup = sunhv_startup, + .shutdown = sunhv_shutdown, + .set_termios = sunhv_set_termios, + .type = sunhv_type, + .release_port = sunhv_release_port, + .request_port = sunhv_request_port, + .config_port = sunhv_config_port, + .verify_port = sunhv_verify_port, +}; + +static struct uart_driver sunhv_reg = { + .owner = THIS_MODULE, + .driver_name = "sunhv", + .dev_name = "ttyHV", + .major = TTY_MAJOR, +}; + +static struct uart_port *sunhv_port; + +void sunhv_migrate_hvcons_irq(int cpu) +{ + /* Migrate hvcons irq to param cpu */ + irq_force_affinity(sunhv_port->irq, cpumask_of(cpu)); +} + +/* Copy 's' into the con_write_page, decoding "\n" into + * "\r\n" along the way. We have to return two lengths + * because the caller needs to know how much to advance + * 's' and also how many bytes to output via con_write_page. + */ +static int fill_con_write_page(const char *s, unsigned int n, + unsigned long *page_bytes) +{ + const char *orig_s = s; + char *p = con_write_page; + int left = PAGE_SIZE; + + while (n--) { + if (*s == '\n') { + if (left < 2) + break; + *p++ = '\r'; + left--; + } else if (left < 1) + break; + *p++ = *s++; + left--; + } + *page_bytes = p - con_write_page; + return s - orig_s; +} + +static void sunhv_console_write_paged(struct console *con, const char *s, unsigned n) +{ + struct uart_port *port = sunhv_port; + unsigned long flags; + int locked = 1; + + if (port->sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); + + while (n > 0) { + unsigned long ra = __pa(con_write_page); + unsigned long page_bytes; + unsigned int cpy = fill_con_write_page(s, n, + &page_bytes); + + n -= cpy; + s += cpy; + while (page_bytes > 0) { + unsigned long written; + int limit = 1000000; + + while (limit--) { + unsigned long stat; + + stat = sun4v_con_write(ra, page_bytes, + &written); + if (stat == HV_EOK) + break; + udelay(1); + } + if (limit < 0) + break; + page_bytes -= written; + ra += written; + } + } + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); +} + +static inline void sunhv_console_putchar(struct uart_port *port, char c) +{ + int limit = 1000000; + + while (limit-- > 0) { + long status = sun4v_con_putchar(c); + if (status == HV_EOK) + break; + udelay(1); + } +} + +static void sunhv_console_write_bychar(struct console *con, const char *s, unsigned n) +{ + struct uart_port *port = sunhv_port; + unsigned long flags; + int i, locked = 1; + + if (port->sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); + + for (i = 0; i < n; i++) { + if (*s == '\n') + sunhv_console_putchar(port, '\r'); + sunhv_console_putchar(port, *s++); + } + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); +} + +static struct console sunhv_console = { + .name = "ttyHV", + .write = sunhv_console_write_bychar, + .device = uart_console_device, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sunhv_reg, +}; + +static int hv_probe(struct platform_device *op) +{ + struct uart_port *port; + unsigned long minor; + int err; + + if (op->archdata.irqs[0] == 0xffffffff) + return -ENODEV; + + port = kzalloc(sizeof(struct uart_port), GFP_KERNEL); + if (unlikely(!port)) + return -ENOMEM; + + minor = 1; + if (sun4v_hvapi_register(HV_GRP_CORE, 1, &minor) == 0 && + minor >= 1) { + err = -ENOMEM; + con_write_page = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!con_write_page) + goto out_free_port; + + con_read_page = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!con_read_page) + goto out_free_con_write_page; + + sunhv_console.write = sunhv_console_write_paged; + sunhv_ops = &bywrite_ops; + } + + sunhv_port = port; + + port->has_sysrq = 1; + port->line = 0; + port->ops = &sunhv_pops; + port->type = PORT_SUNHV; + port->uartclk = ( 29491200 / 16 ); /* arbitrary */ + + port->membase = (unsigned char __iomem *) __pa(port); + + port->irq = op->archdata.irqs[0]; + + port->dev = &op->dev; + + err = sunserial_register_minors(&sunhv_reg, 1); + if (err) + goto out_free_con_read_page; + + sunserial_console_match(&sunhv_console, op->dev.of_node, + &sunhv_reg, port->line, false); + + err = uart_add_one_port(&sunhv_reg, port); + if (err) + goto out_unregister_driver; + + err = request_irq(port->irq, sunhv_interrupt, 0, "hvcons", port); + if (err) + goto out_remove_port; + + platform_set_drvdata(op, port); + + return 0; + +out_remove_port: + uart_remove_one_port(&sunhv_reg, port); + +out_unregister_driver: + sunserial_unregister_minors(&sunhv_reg, 1); + +out_free_con_read_page: + kfree(con_read_page); + +out_free_con_write_page: + kfree(con_write_page); + +out_free_port: + kfree(port); + sunhv_port = NULL; + return err; +} + +static int hv_remove(struct platform_device *dev) +{ + struct uart_port *port = platform_get_drvdata(dev); + + free_irq(port->irq, port); + + uart_remove_one_port(&sunhv_reg, port); + + sunserial_unregister_minors(&sunhv_reg, 1); + kfree(con_read_page); + kfree(con_write_page); + kfree(port); + sunhv_port = NULL; + + return 0; +} + +static const struct of_device_id hv_match[] = { + { + .name = "console", + .compatible = "qcn", + }, + { + .name = "console", + .compatible = "SUNW,sun4v-console", + }, + {}, +}; + +static struct platform_driver hv_driver = { + .driver = { + .name = "hv", + .of_match_table = hv_match, + }, + .probe = hv_probe, + .remove = hv_remove, +}; + +static int __init sunhv_init(void) +{ + if (tlb_type != hypervisor) + return -ENODEV; + + return platform_driver_register(&hv_driver); +} +device_initcall(sunhv_init); + +#if 0 /* ...def MODULE ; never supported as such */ +MODULE_AUTHOR("David S. Miller"); +MODULE_DESCRIPTION("SUN4V Hypervisor console driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); +#endif diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c new file mode 100644 index 000000000..7afe61a0e --- /dev/null +++ b/drivers/tty/serial/sunplus-uart.c @@ -0,0 +1,775 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sunplus SoC UART driver + * + * Author: Hammer Hsieh + * + * Note1: This driver is 8250-like uart, but are not register compatible. + * + * Note2: On some buses, for preventing data incoherence, must do a read + * for ensure write made it to hardware. In this driver, function startup + * and shutdown did not do a read but only do a write directly. For what? + * In Sunplus bus communication between memory bus and peripheral bus with + * posted write, it will send a specific command after last write command + * to make sure write done. Then memory bus identify the specific command + * and send done signal back to master device. After master device received + * done signal, then proceed next write command. It is no need to do a read + * before write. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Register offsets */ +#define SUP_UART_DATA 0x00 +#define SUP_UART_LSR 0x04 +#define SUP_UART_MSR 0x08 +#define SUP_UART_LCR 0x0C +#define SUP_UART_MCR 0x10 +#define SUP_UART_DIV_L 0x14 +#define SUP_UART_DIV_H 0x18 +#define SUP_UART_ISC 0x1C +#define SUP_UART_TX_RESIDUE 0x20 +#define SUP_UART_RX_RESIDUE 0x24 + +/* Line Status Register bits */ +#define SUP_UART_LSR_BC BIT(5) /* break condition status */ +#define SUP_UART_LSR_FE BIT(4) /* frame error status */ +#define SUP_UART_LSR_OE BIT(3) /* overrun error status */ +#define SUP_UART_LSR_PE BIT(2) /* parity error status */ +#define SUP_UART_LSR_RX BIT(1) /* 1: receive fifo not empty */ +#define SUP_UART_LSR_TX BIT(0) /* 1: transmit fifo is not full */ +#define SUP_UART_LSR_TX_NOT_FULL 1 +#define SUP_UART_LSR_BRK_ERROR_BITS GENMASK(5, 2) + +/* Line Control Register bits */ +#define SUP_UART_LCR_SBC BIT(5) /* select break condition */ + +/* Modem Control Register bits */ +#define SUP_UART_MCR_RI BIT(3) /* ring indicator */ +#define SUP_UART_MCR_DCD BIT(2) /* data carrier detect */ + +/* Interrupt Status/Control Register bits */ +#define SUP_UART_ISC_RXM BIT(5) /* RX interrupt enable */ +#define SUP_UART_ISC_TXM BIT(4) /* TX interrupt enable */ +#define SUP_UART_ISC_RX BIT(1) /* RX interrupt status */ +#define SUP_UART_ISC_TX BIT(0) /* TX interrupt status */ + +#define SUP_DUMMY_READ BIT(16) /* drop bytes received on a !CREAD port */ +#define SUP_UART_NR 5 + +struct sunplus_uart_port { + struct uart_port port; + struct clk *clk; + struct reset_control *rstc; +}; + +static void sp_uart_put_char(struct uart_port *port, unsigned int ch) +{ + writel(ch, port->membase + SUP_UART_DATA); +} + +static u32 sunplus_tx_buf_not_full(struct uart_port *port) +{ + unsigned int lsr = readl(port->membase + SUP_UART_LSR); + + return (lsr & SUP_UART_LSR_TX) ? SUP_UART_LSR_TX_NOT_FULL : 0; +} + +static unsigned int sunplus_tx_empty(struct uart_port *port) +{ + unsigned int lsr = readl(port->membase + SUP_UART_LSR); + + return (lsr & UART_LSR_TEMT) ? TIOCSER_TEMT : 0; +} + +static void sunplus_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + unsigned int mcr = readl(port->membase + SUP_UART_MCR); + + if (mctrl & TIOCM_DTR) + mcr |= UART_MCR_DTR; + else + mcr &= ~UART_MCR_DTR; + + if (mctrl & TIOCM_RTS) + mcr |= UART_MCR_RTS; + else + mcr &= ~UART_MCR_RTS; + + if (mctrl & TIOCM_CAR) + mcr |= SUP_UART_MCR_DCD; + else + mcr &= ~SUP_UART_MCR_DCD; + + if (mctrl & TIOCM_RI) + mcr |= SUP_UART_MCR_RI; + else + mcr &= ~SUP_UART_MCR_RI; + + if (mctrl & TIOCM_LOOP) + mcr |= UART_MCR_LOOP; + else + mcr &= ~UART_MCR_LOOP; + + writel(mcr, port->membase + SUP_UART_MCR); +} + +static unsigned int sunplus_get_mctrl(struct uart_port *port) +{ + unsigned int mcr, ret = 0; + + mcr = readl(port->membase + SUP_UART_MCR); + + if (mcr & UART_MCR_DTR) + ret |= TIOCM_DTR; + + if (mcr & UART_MCR_RTS) + ret |= TIOCM_RTS; + + if (mcr & SUP_UART_MCR_DCD) + ret |= TIOCM_CAR; + + if (mcr & SUP_UART_MCR_RI) + ret |= TIOCM_RI; + + if (mcr & UART_MCR_LOOP) + ret |= TIOCM_LOOP; + + return ret; +} + +static void sunplus_stop_tx(struct uart_port *port) +{ + unsigned int isc; + + isc = readl(port->membase + SUP_UART_ISC); + isc &= ~SUP_UART_ISC_TXM; + writel(isc, port->membase + SUP_UART_ISC); +} + +static void sunplus_start_tx(struct uart_port *port) +{ + unsigned int isc; + + isc = readl(port->membase + SUP_UART_ISC); + isc |= SUP_UART_ISC_TXM; + writel(isc, port->membase + SUP_UART_ISC); +} + +static void sunplus_stop_rx(struct uart_port *port) +{ + unsigned int isc; + + isc = readl(port->membase + SUP_UART_ISC); + isc &= ~SUP_UART_ISC_RXM; + writel(isc, port->membase + SUP_UART_ISC); +} + +static void sunplus_break_ctl(struct uart_port *port, int ctl) +{ + unsigned long flags; + unsigned int lcr; + + spin_lock_irqsave(&port->lock, flags); + + lcr = readl(port->membase + SUP_UART_LCR); + + if (ctl) + lcr |= SUP_UART_LCR_SBC; /* start break */ + else + lcr &= ~SUP_UART_LCR_SBC; /* stop break */ + + writel(lcr, port->membase + SUP_UART_LCR); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void transmit_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + + if (port->x_char) { + sp_uart_put_char(port, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + sunplus_stop_tx(port); + return; + } + + do { + sp_uart_put_char(port, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) % UART_XMIT_SIZE; + port->icount.tx++; + + if (uart_circ_empty(xmit)) + break; + } while (sunplus_tx_buf_not_full(port)); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + sunplus_stop_tx(port); +} + +static void receive_chars(struct uart_port *port) +{ + unsigned int lsr = readl(port->membase + SUP_UART_LSR); + unsigned int ch, flag; + + do { + ch = readl(port->membase + SUP_UART_DATA); + flag = TTY_NORMAL; + port->icount.rx++; + + if (unlikely(lsr & SUP_UART_LSR_BRK_ERROR_BITS)) { + if (lsr & SUP_UART_LSR_BC) { + lsr &= ~(SUP_UART_LSR_FE | SUP_UART_LSR_PE); + port->icount.brk++; + flag = TTY_BREAK; + if (uart_handle_break(port)) + goto ignore_char; + } else if (lsr & SUP_UART_LSR_PE) { + port->icount.parity++; + flag = TTY_PARITY; + } else if (lsr & SUP_UART_LSR_FE) { + port->icount.frame++; + flag = TTY_FRAME; + } + + if (lsr & SUP_UART_LSR_OE) + port->icount.overrun++; + } + + if (port->ignore_status_mask & SUP_DUMMY_READ) + goto ignore_char; + + if (uart_handle_sysrq_char(port, ch)) + goto ignore_char; + + uart_insert_char(port, lsr, SUP_UART_LSR_OE, ch, flag); + +ignore_char: + lsr = readl(port->membase + SUP_UART_LSR); + } while (lsr & SUP_UART_LSR_RX); + + tty_flip_buffer_push(&port->state->port); +} + +static irqreturn_t sunplus_uart_irq(int irq, void *args) +{ + struct uart_port *port = args; + unsigned int isc; + + spin_lock(&port->lock); + + isc = readl(port->membase + SUP_UART_ISC); + + if (isc & SUP_UART_ISC_RX) + receive_chars(port); + + if (isc & SUP_UART_ISC_TX) + transmit_chars(port); + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static int sunplus_startup(struct uart_port *port) +{ + unsigned long flags; + unsigned int isc = 0; + int ret; + + ret = request_irq(port->irq, sunplus_uart_irq, 0, "sunplus_uart", port); + if (ret) + return ret; + + spin_lock_irqsave(&port->lock, flags); + /* isc define Bit[7:4] int setting, Bit[3:0] int status + * isc register will clean Bit[3:0] int status after read + * only do a write to Bit[7:4] int setting + */ + isc |= SUP_UART_ISC_RXM; + writel(isc, port->membase + SUP_UART_ISC); + spin_unlock_irqrestore(&port->lock, flags); + + return 0; +} + +static void sunplus_shutdown(struct uart_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + /* isc define Bit[7:4] int setting, Bit[3:0] int status + * isc register will clean Bit[3:0] int status after read + * only do a write to Bit[7:4] int setting + */ + writel(0, port->membase + SUP_UART_ISC); /* disable all interrupt */ + spin_unlock_irqrestore(&port->lock, flags); + + free_irq(port->irq, port); +} + +static void sunplus_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *oldtermios) +{ + u32 ext, div, div_l, div_h, baud, lcr; + u32 clk = port->uartclk; + unsigned long flags; + + baud = uart_get_baud_rate(port, termios, oldtermios, 0, port->uartclk / 16); + + /* baud rate = uartclk / ((16 * divisor + 1) + divisor_ext) */ + clk += baud >> 1; + div = clk / baud; + ext = div & 0x0F; + div = (div >> 4) - 1; + div_l = (div & 0xFF) | (ext << 12); + div_h = div >> 8; + + switch (termios->c_cflag & CSIZE) { + case CS5: + lcr = UART_LCR_WLEN5; + break; + case CS6: + lcr = UART_LCR_WLEN6; + break; + case CS7: + lcr = UART_LCR_WLEN7; + break; + default: + lcr = UART_LCR_WLEN8; + break; + } + + if (termios->c_cflag & CSTOPB) + lcr |= UART_LCR_STOP; + + if (termios->c_cflag & PARENB) { + lcr |= UART_LCR_PARITY; + + if (!(termios->c_cflag & PARODD)) + lcr |= UART_LCR_EPAR; + } + + spin_lock_irqsave(&port->lock, flags); + + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = 0; + if (termios->c_iflag & INPCK) + port->read_status_mask |= SUP_UART_LSR_PE | SUP_UART_LSR_FE; + + if (termios->c_iflag & (BRKINT | PARMRK)) + port->read_status_mask |= SUP_UART_LSR_BC; + + /* Characters to ignore */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= SUP_UART_LSR_FE | SUP_UART_LSR_PE; + + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= SUP_UART_LSR_BC; + + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= SUP_UART_LSR_OE; + } + + /* Ignore all characters if CREAD is not set */ + if ((termios->c_cflag & CREAD) == 0) { + port->ignore_status_mask |= SUP_DUMMY_READ; + /* flush rx data FIFO */ + writel(0, port->membase + SUP_UART_RX_RESIDUE); + } + + /* Settings for baud rate divisor and lcr */ + writel(div_h, port->membase + SUP_UART_DIV_H); + writel(div_l, port->membase + SUP_UART_DIV_L); + writel(lcr, port->membase + SUP_UART_LCR); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void sunplus_set_ldisc(struct uart_port *port, struct ktermios *termios) +{ + int new = termios->c_line; + + if (new == N_PPS) + port->flags |= UPF_HARDPPS_CD; + else + port->flags &= ~UPF_HARDPPS_CD; +} + +static const char *sunplus_type(struct uart_port *port) +{ + return port->type == PORT_SUNPLUS ? "sunplus_uart" : NULL; +} + +static void sunplus_config_port(struct uart_port *port, int type) +{ + if (type & UART_CONFIG_TYPE) + port->type = PORT_SUNPLUS; +} + +static int sunplus_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (ser->type != PORT_UNKNOWN && ser->type != PORT_SUNPLUS) + return -EINVAL; + + return 0; +} + +#if defined(CONFIG_SERIAL_SUNPLUS_CONSOLE) || defined(CONFIG_CONSOLE_POLL) +static void wait_for_xmitr(struct uart_port *port) +{ + unsigned int val; + int ret; + + /* Wait while FIFO is full or timeout */ + ret = readl_poll_timeout_atomic(port->membase + SUP_UART_LSR, val, + (val & SUP_UART_LSR_TX), 1, 10000); + + if (ret == -ETIMEDOUT) { + dev_err(port->dev, "Timeout waiting while UART TX FULL\n"); + return; + } +} +#endif + +#ifdef CONFIG_CONSOLE_POLL +static void sunplus_poll_put_char(struct uart_port *port, unsigned char data) +{ + wait_for_xmitr(port); + sp_uart_put_char(port, data); +} + +static int sunplus_poll_get_char(struct uart_port *port) +{ + unsigned int lsr = readl(port->membase + SUP_UART_LSR); + + if (!(lsr & SUP_UART_LSR_RX)) + return NO_POLL_CHAR; + + return readl(port->membase + SUP_UART_DATA); +} +#endif + +static const struct uart_ops sunplus_uart_ops = { + .tx_empty = sunplus_tx_empty, + .set_mctrl = sunplus_set_mctrl, + .get_mctrl = sunplus_get_mctrl, + .stop_tx = sunplus_stop_tx, + .start_tx = sunplus_start_tx, + .stop_rx = sunplus_stop_rx, + .break_ctl = sunplus_break_ctl, + .startup = sunplus_startup, + .shutdown = sunplus_shutdown, + .set_termios = sunplus_set_termios, + .set_ldisc = sunplus_set_ldisc, + .type = sunplus_type, + .config_port = sunplus_config_port, + .verify_port = sunplus_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_put_char = sunplus_poll_put_char, + .poll_get_char = sunplus_poll_get_char, +#endif +}; + +#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE +static struct sunplus_uart_port *sunplus_console_ports[SUP_UART_NR]; + +static void sunplus_uart_console_putchar(struct uart_port *port, + unsigned char ch) +{ + wait_for_xmitr(port); + sp_uart_put_char(port, ch); +} + +static void sunplus_console_write(struct console *co, + const char *s, + unsigned int count) +{ + unsigned long flags; + int locked = 1; + + local_irq_save(flags); + + if (sunplus_console_ports[co->index]->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&sunplus_console_ports[co->index]->port.lock); + else + spin_lock(&sunplus_console_ports[co->index]->port.lock); + + uart_console_write(&sunplus_console_ports[co->index]->port, s, count, + sunplus_uart_console_putchar); + + if (locked) + spin_unlock(&sunplus_console_ports[co->index]->port.lock); + + local_irq_restore(flags); +} + +static int __init sunplus_console_setup(struct console *co, char *options) +{ + struct sunplus_uart_port *sup; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= SUP_UART_NR) + return -EINVAL; + + sup = sunplus_console_ports[co->index]; + if (!sup) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&sup->port, co, baud, parity, bits, flow); +} + +static struct uart_driver sunplus_uart_driver; +static struct console sunplus_uart_console = { + .name = "ttySUP", + .write = sunplus_console_write, + .device = uart_console_device, + .setup = sunplus_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sunplus_uart_driver +}; + +#define SERIAL_SUNPLUS_CONSOLE (&sunplus_uart_console) +#else +#define SERIAL_SUNPLUS_CONSOLE NULL +#endif + +static struct uart_driver sunplus_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "sunplus_uart", + .dev_name = "ttySUP", + .major = TTY_MAJOR, + .minor = 64, + .nr = SUP_UART_NR, + .cons = SERIAL_SUNPLUS_CONSOLE, +}; + +static void sunplus_uart_disable_unprepare(void *data) +{ + clk_disable_unprepare(data); +} + +static void sunplus_uart_reset_control_assert(void *data) +{ + reset_control_assert(data); +} + +static int sunplus_uart_probe(struct platform_device *pdev) +{ + struct sunplus_uart_port *sup; + struct uart_port *port; + struct resource *res; + int ret, irq; + + pdev->id = of_alias_get_id(pdev->dev.of_node, "serial"); + + if (pdev->id < 0 || pdev->id >= SUP_UART_NR) + return -EINVAL; + + sup = devm_kzalloc(&pdev->dev, sizeof(*sup), GFP_KERNEL); + if (!sup) + return -ENOMEM; + + sup->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(sup->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(sup->clk), "clk not found\n"); + + ret = clk_prepare_enable(sup->clk); + if (ret) + return ret; + + ret = devm_add_action_or_reset(&pdev->dev, sunplus_uart_disable_unprepare, sup->clk); + if (ret) + return ret; + + sup->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(sup->rstc)) + return dev_err_probe(&pdev->dev, PTR_ERR(sup->rstc), "rstc not found\n"); + + port = &sup->port; + + port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(port->membase)) + return dev_err_probe(&pdev->dev, PTR_ERR(port->membase), "membase not found\n"); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + port->mapbase = res->start; + port->uartclk = clk_get_rate(sup->clk); + port->line = pdev->id; + port->irq = irq; + port->dev = &pdev->dev; + port->iotype = UPIO_MEM; + port->ops = &sunplus_uart_ops; + port->flags = UPF_BOOT_AUTOCONF; + port->fifosize = 128; + + ret = reset_control_deassert(sup->rstc); + if (ret) + return ret; + + ret = devm_add_action_or_reset(&pdev->dev, sunplus_uart_reset_control_assert, sup->rstc); + if (ret) + return ret; + +#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE + sunplus_console_ports[sup->port.line] = sup; +#endif + + platform_set_drvdata(pdev, &sup->port); + + ret = uart_add_one_port(&sunplus_uart_driver, &sup->port); +#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE + if (ret) + sunplus_console_ports[sup->port.line] = NULL; +#endif + + return ret; +} + +static int sunplus_uart_remove(struct platform_device *pdev) +{ + struct sunplus_uart_port *sup = platform_get_drvdata(pdev); + + uart_remove_one_port(&sunplus_uart_driver, &sup->port); + + return 0; +} + +static int __maybe_unused sunplus_uart_suspend(struct device *dev) +{ + struct sunplus_uart_port *sup = dev_get_drvdata(dev); + + if (!uart_console(&sup->port)) + uart_suspend_port(&sunplus_uart_driver, &sup->port); + + return 0; +} + +static int __maybe_unused sunplus_uart_resume(struct device *dev) +{ + struct sunplus_uart_port *sup = dev_get_drvdata(dev); + + if (!uart_console(&sup->port)) + uart_resume_port(&sunplus_uart_driver, &sup->port); + + return 0; +} + +static const struct dev_pm_ops sunplus_uart_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sunplus_uart_suspend, sunplus_uart_resume) +}; + +static const struct of_device_id sp_uart_of_match[] = { + { .compatible = "sunplus,sp7021-uart" }, + {} +}; +MODULE_DEVICE_TABLE(of, sp_uart_of_match); + +static struct platform_driver sunplus_uart_platform_driver = { + .probe = sunplus_uart_probe, + .remove = sunplus_uart_remove, + .driver = { + .name = "sunplus_uart", + .of_match_table = sp_uart_of_match, + .pm = &sunplus_uart_pm_ops, + } +}; + +static int __init sunplus_uart_init(void) +{ + int ret; + + ret = uart_register_driver(&sunplus_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&sunplus_uart_platform_driver); + if (ret) + uart_unregister_driver(&sunplus_uart_driver); + + return ret; +} +module_init(sunplus_uart_init); + +static void __exit sunplus_uart_exit(void) +{ + platform_driver_unregister(&sunplus_uart_platform_driver); + uart_unregister_driver(&sunplus_uart_driver); +} +module_exit(sunplus_uart_exit); + +#ifdef CONFIG_SERIAL_EARLYCON +static void sunplus_uart_putc(struct uart_port *port, unsigned char c) +{ + unsigned int val; + int ret; + + ret = readl_poll_timeout_atomic(port->membase + SUP_UART_LSR, val, + (val & UART_LSR_TEMT), 1, 10000); + if (ret) + return; + + writel(c, port->membase + SUP_UART_DATA); +} + +static void sunplus_uart_early_write(struct console *con, const char *s, unsigned int n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, sunplus_uart_putc); +} + +static int __init +sunplus_uart_early_setup(struct earlycon_device *dev, const char *opt) +{ + if (!(dev->port.membase || dev->port.iobase)) + return -ENODEV; + + dev->con->write = sunplus_uart_early_write; + + return 0; +} +OF_EARLYCON_DECLARE(sunplus_uart, "sunplus,sp7021-uart", sunplus_uart_early_setup); +#endif + +MODULE_DESCRIPTION("Sunplus UART driver"); +MODULE_AUTHOR("Hammer Hsieh "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c new file mode 100644 index 000000000..7ace3aa49 --- /dev/null +++ b/drivers/tty/serial/sunsab.c @@ -0,0 +1,1161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* sunsab.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC. + * + * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 2002, 2006 David S. Miller (davem@davemloft.net) + * + * Rewrote buffer handling to use CIRC(Circular Buffer) macros. + * Maxim Krasnyanskiy + * + * Fixed to use tty_get_baud_rate, and to allow for arbitrary baud + * rates to be programmed into the UART. Also eliminated a lot of + * duplicated code in the console setup. + * Theodore Ts'o , 2001-Oct-12 + * + * Ported to new 2.5.x UART layer. + * David S. Miller + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include "sunsab.h" + +struct uart_sunsab_port { + struct uart_port port; /* Generic UART port */ + union sab82532_async_regs __iomem *regs; /* Chip registers */ + unsigned long irqflags; /* IRQ state flags */ + int dsr; /* Current DSR state */ + unsigned int cec_timeout; /* Chip poll timeout... */ + unsigned int tec_timeout; /* likewise */ + unsigned char interrupt_mask0;/* ISR0 masking */ + unsigned char interrupt_mask1;/* ISR1 masking */ + unsigned char pvr_dtr_bit; /* Which PVR bit is DTR */ + unsigned char pvr_dsr_bit; /* Which PVR bit is DSR */ + unsigned int gis_shift; + int type; /* SAB82532 version */ + + /* Setting configuration bits while the transmitter is active + * can cause garbage characters to get emitted by the chip. + * Therefore, we cache such writes here and do the real register + * write the next time the transmitter becomes idle. + */ + unsigned int cached_ebrg; + unsigned char cached_mode; + unsigned char cached_pvr; + unsigned char cached_dafo; +}; + +/* + * This assumes you have a 29.4912 MHz clock for your UART. + */ +#define SAB_BASE_BAUD ( 29491200 / 16 ) + +static char *sab82532_version[16] = { + "V1.0", "V2.0", "V3.2", "V(0x03)", + "V(0x04)", "V(0x05)", "V(0x06)", "V(0x07)", + "V(0x08)", "V(0x09)", "V(0x0a)", "V(0x0b)", + "V(0x0c)", "V(0x0d)", "V(0x0e)", "V(0x0f)" +}; + +#define SAB82532_MAX_TEC_TIMEOUT 200000 /* 1 character time (at 50 baud) */ +#define SAB82532_MAX_CEC_TIMEOUT 50000 /* 2.5 TX CLKs (at 50 baud) */ + +#define SAB82532_RECV_FIFO_SIZE 32 /* Standard async fifo sizes */ +#define SAB82532_XMIT_FIFO_SIZE 32 + +static __inline__ void sunsab_tec_wait(struct uart_sunsab_port *up) +{ + int timeout = up->tec_timeout; + + while ((readb(&up->regs->r.star) & SAB82532_STAR_TEC) && --timeout) + udelay(1); +} + +static __inline__ void sunsab_cec_wait(struct uart_sunsab_port *up) +{ + int timeout = up->cec_timeout; + + while ((readb(&up->regs->r.star) & SAB82532_STAR_CEC) && --timeout) + udelay(1); +} + +static struct tty_port * +receive_chars(struct uart_sunsab_port *up, + union sab82532_irq_status *stat) +{ + struct tty_port *port = NULL; + unsigned char buf[32]; + int saw_console_brk = 0; + int free_fifo = 0; + int count = 0; + int i; + + if (up->port.state != NULL) /* Unopened serial console */ + port = &up->port.state->port; + + /* Read number of BYTES (Character + Status) available. */ + if (stat->sreg.isr0 & SAB82532_ISR0_RPF) { + count = SAB82532_RECV_FIFO_SIZE; + free_fifo++; + } + + if (stat->sreg.isr0 & SAB82532_ISR0_TCD) { + count = readb(&up->regs->r.rbcl) & (SAB82532_RECV_FIFO_SIZE - 1); + free_fifo++; + } + + /* Issue a FIFO read command in case we where idle. */ + if (stat->sreg.isr0 & SAB82532_ISR0_TIME) { + sunsab_cec_wait(up); + writeb(SAB82532_CMDR_RFRD, &up->regs->w.cmdr); + return port; + } + + if (stat->sreg.isr0 & SAB82532_ISR0_RFO) + free_fifo++; + + /* Read the FIFO. */ + for (i = 0; i < count; i++) + buf[i] = readb(&up->regs->r.rfifo[i]); + + /* Issue Receive Message Complete command. */ + if (free_fifo) { + sunsab_cec_wait(up); + writeb(SAB82532_CMDR_RMC, &up->regs->w.cmdr); + } + + /* Count may be zero for BRK, so we check for it here */ + if ((stat->sreg.isr1 & SAB82532_ISR1_BRK) && + (up->port.line == up->port.cons->index)) + saw_console_brk = 1; + + if (count == 0) { + if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) { + stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR | + SAB82532_ISR0_FERR); + up->port.icount.brk++; + uart_handle_break(&up->port); + } + } + + for (i = 0; i < count; i++) { + unsigned char ch = buf[i], flag; + + flag = TTY_NORMAL; + up->port.icount.rx++; + + if (unlikely(stat->sreg.isr0 & (SAB82532_ISR0_PERR | + SAB82532_ISR0_FERR | + SAB82532_ISR0_RFO)) || + unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) { + /* + * For statistics only + */ + if (stat->sreg.isr1 & SAB82532_ISR1_BRK) { + stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR | + SAB82532_ISR0_FERR); + up->port.icount.brk++; + /* + * We do the SysRQ and SAK checking + * here because otherwise the break + * may get masked by ignore_status_mask + * or read_status_mask. + */ + if (uart_handle_break(&up->port)) + continue; + } else if (stat->sreg.isr0 & SAB82532_ISR0_PERR) + up->port.icount.parity++; + else if (stat->sreg.isr0 & SAB82532_ISR0_FERR) + up->port.icount.frame++; + if (stat->sreg.isr0 & SAB82532_ISR0_RFO) + up->port.icount.overrun++; + + /* + * Mask off conditions which should be ingored. + */ + stat->sreg.isr0 &= (up->port.read_status_mask & 0xff); + stat->sreg.isr1 &= ((up->port.read_status_mask >> 8) & 0xff); + + if (stat->sreg.isr1 & SAB82532_ISR1_BRK) { + flag = TTY_BREAK; + } else if (stat->sreg.isr0 & SAB82532_ISR0_PERR) + flag = TTY_PARITY; + else if (stat->sreg.isr0 & SAB82532_ISR0_FERR) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(&up->port, ch) || !port) + continue; + + if ((stat->sreg.isr0 & (up->port.ignore_status_mask & 0xff)) == 0 && + (stat->sreg.isr1 & ((up->port.ignore_status_mask >> 8) & 0xff)) == 0) + tty_insert_flip_char(port, ch, flag); + if (stat->sreg.isr0 & SAB82532_ISR0_RFO) + tty_insert_flip_char(port, 0, TTY_OVERRUN); + } + + if (saw_console_brk) + sun_do_break(); + + return port; +} + +static void sunsab_stop_tx(struct uart_port *); +static void sunsab_tx_idle(struct uart_sunsab_port *); + +static void transmit_chars(struct uart_sunsab_port *up, + union sab82532_irq_status *stat) +{ + struct circ_buf *xmit = &up->port.state->xmit; + int i; + + if (stat->sreg.isr1 & SAB82532_ISR1_ALLS) { + up->interrupt_mask1 |= SAB82532_IMR1_ALLS; + writeb(up->interrupt_mask1, &up->regs->w.imr1); + set_bit(SAB82532_ALLS, &up->irqflags); + } + +#if 0 /* bde@nwlink.com says this check causes problems */ + if (!(stat->sreg.isr1 & SAB82532_ISR1_XPR)) + return; +#endif + + if (!(readb(&up->regs->r.star) & SAB82532_STAR_XFW)) + return; + + set_bit(SAB82532_XPR, &up->irqflags); + sunsab_tx_idle(up); + + if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { + up->interrupt_mask1 |= SAB82532_IMR1_XPR; + writeb(up->interrupt_mask1, &up->regs->w.imr1); + return; + } + + up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); + writeb(up->interrupt_mask1, &up->regs->w.imr1); + clear_bit(SAB82532_ALLS, &up->irqflags); + + /* Stuff 32 bytes into Transmit FIFO. */ + clear_bit(SAB82532_XPR, &up->irqflags); + for (i = 0; i < up->port.fifosize; i++) { + writeb(xmit->buf[xmit->tail], + &up->regs->w.xfifo[i]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->port.icount.tx++; + if (uart_circ_empty(xmit)) + break; + } + + /* Issue a Transmit Frame command. */ + sunsab_cec_wait(up); + writeb(SAB82532_CMDR_XF, &up->regs->w.cmdr); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); + + if (uart_circ_empty(xmit)) + sunsab_stop_tx(&up->port); +} + +static void check_status(struct uart_sunsab_port *up, + union sab82532_irq_status *stat) +{ + if (stat->sreg.isr0 & SAB82532_ISR0_CDSC) + uart_handle_dcd_change(&up->port, + !(readb(&up->regs->r.vstr) & SAB82532_VSTR_CD)); + + if (stat->sreg.isr1 & SAB82532_ISR1_CSC) + uart_handle_cts_change(&up->port, + (readb(&up->regs->r.star) & SAB82532_STAR_CTS)); + + if ((readb(&up->regs->r.pvr) & up->pvr_dsr_bit) ^ up->dsr) { + up->dsr = (readb(&up->regs->r.pvr) & up->pvr_dsr_bit) ? 0 : 1; + up->port.icount.dsr++; + } + + wake_up_interruptible(&up->port.state->port.delta_msr_wait); +} + +static irqreturn_t sunsab_interrupt(int irq, void *dev_id) +{ + struct uart_sunsab_port *up = dev_id; + struct tty_port *port = NULL; + union sab82532_irq_status status; + unsigned long flags; + unsigned char gis; + + spin_lock_irqsave(&up->port.lock, flags); + + status.stat = 0; + gis = readb(&up->regs->r.gis) >> up->gis_shift; + if (gis & 1) + status.sreg.isr0 = readb(&up->regs->r.isr0); + if (gis & 2) + status.sreg.isr1 = readb(&up->regs->r.isr1); + + if (status.stat) { + if ((status.sreg.isr0 & (SAB82532_ISR0_TCD | SAB82532_ISR0_TIME | + SAB82532_ISR0_RFO | SAB82532_ISR0_RPF)) || + (status.sreg.isr1 & SAB82532_ISR1_BRK)) + port = receive_chars(up, &status); + if ((status.sreg.isr0 & SAB82532_ISR0_CDSC) || + (status.sreg.isr1 & SAB82532_ISR1_CSC)) + check_status(up, &status); + if (status.sreg.isr1 & (SAB82532_ISR1_ALLS | SAB82532_ISR1_XPR)) + transmit_chars(up, &status); + } + + spin_unlock_irqrestore(&up->port.lock, flags); + + if (port) + tty_flip_buffer_push(port); + + return IRQ_HANDLED; +} + +/* port->lock is not held. */ +static unsigned int sunsab_tx_empty(struct uart_port *port) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + int ret; + + /* Do not need a lock for a state test like this. */ + if (test_bit(SAB82532_ALLS, &up->irqflags)) + ret = TIOCSER_TEMT; + else + ret = 0; + + return ret; +} + +/* port->lock held by caller. */ +static void sunsab_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + + if (mctrl & TIOCM_RTS) { + up->cached_mode &= ~SAB82532_MODE_FRTS; + up->cached_mode |= SAB82532_MODE_RTS; + } else { + up->cached_mode |= (SAB82532_MODE_FRTS | + SAB82532_MODE_RTS); + } + if (mctrl & TIOCM_DTR) { + up->cached_pvr &= ~(up->pvr_dtr_bit); + } else { + up->cached_pvr |= up->pvr_dtr_bit; + } + + set_bit(SAB82532_REGS_PENDING, &up->irqflags); + if (test_bit(SAB82532_XPR, &up->irqflags)) + sunsab_tx_idle(up); +} + +/* port->lock is held by caller and interrupts are disabled. */ +static unsigned int sunsab_get_mctrl(struct uart_port *port) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + unsigned char val; + unsigned int result; + + result = 0; + + val = readb(&up->regs->r.pvr); + result |= (val & up->pvr_dsr_bit) ? 0 : TIOCM_DSR; + + val = readb(&up->regs->r.vstr); + result |= (val & SAB82532_VSTR_CD) ? 0 : TIOCM_CAR; + + val = readb(&up->regs->r.star); + result |= (val & SAB82532_STAR_CTS) ? TIOCM_CTS : 0; + + return result; +} + +/* port->lock held by caller. */ +static void sunsab_stop_tx(struct uart_port *port) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + + up->interrupt_mask1 |= SAB82532_IMR1_XPR; + writeb(up->interrupt_mask1, &up->regs->w.imr1); +} + +/* port->lock held by caller. */ +static void sunsab_tx_idle(struct uart_sunsab_port *up) +{ + if (test_bit(SAB82532_REGS_PENDING, &up->irqflags)) { + u8 tmp; + + clear_bit(SAB82532_REGS_PENDING, &up->irqflags); + writeb(up->cached_mode, &up->regs->rw.mode); + writeb(up->cached_pvr, &up->regs->rw.pvr); + writeb(up->cached_dafo, &up->regs->w.dafo); + + writeb(up->cached_ebrg & 0xff, &up->regs->w.bgr); + tmp = readb(&up->regs->rw.ccr2); + tmp &= ~0xc0; + tmp |= (up->cached_ebrg >> 2) & 0xc0; + writeb(tmp, &up->regs->rw.ccr2); + } +} + +/* port->lock held by caller. */ +static void sunsab_start_tx(struct uart_port *port) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + struct circ_buf *xmit = &up->port.state->xmit; + int i; + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + return; + + up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); + writeb(up->interrupt_mask1, &up->regs->w.imr1); + + if (!test_bit(SAB82532_XPR, &up->irqflags)) + return; + + clear_bit(SAB82532_ALLS, &up->irqflags); + clear_bit(SAB82532_XPR, &up->irqflags); + + for (i = 0; i < up->port.fifosize; i++) { + writeb(xmit->buf[xmit->tail], + &up->regs->w.xfifo[i]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->port.icount.tx++; + if (uart_circ_empty(xmit)) + break; + } + + /* Issue a Transmit Frame command. */ + sunsab_cec_wait(up); + writeb(SAB82532_CMDR_XF, &up->regs->w.cmdr); +} + +/* port->lock is not held. */ +static void sunsab_send_xchar(struct uart_port *port, char ch) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + unsigned long flags; + + if (ch == __DISABLED_CHAR) + return; + + spin_lock_irqsave(&up->port.lock, flags); + + sunsab_tec_wait(up); + writeb(ch, &up->regs->w.tic); + + spin_unlock_irqrestore(&up->port.lock, flags); +} + +/* port->lock held by caller. */ +static void sunsab_stop_rx(struct uart_port *port) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + + up->interrupt_mask0 |= SAB82532_IMR0_TCD; + writeb(up->interrupt_mask1, &up->regs->w.imr0); +} + +/* port->lock is not held. */ +static void sunsab_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + unsigned long flags; + unsigned char val; + + spin_lock_irqsave(&up->port.lock, flags); + + val = up->cached_dafo; + if (break_state) + val |= SAB82532_DAFO_XBRK; + else + val &= ~SAB82532_DAFO_XBRK; + up->cached_dafo = val; + + set_bit(SAB82532_REGS_PENDING, &up->irqflags); + if (test_bit(SAB82532_XPR, &up->irqflags)) + sunsab_tx_idle(up); + + spin_unlock_irqrestore(&up->port.lock, flags); +} + +/* port->lock is not held. */ +static int sunsab_startup(struct uart_port *port) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + unsigned long flags; + unsigned char tmp; + int err = request_irq(up->port.irq, sunsab_interrupt, + IRQF_SHARED, "sab", up); + if (err) + return err; + + spin_lock_irqsave(&up->port.lock, flags); + + /* + * Wait for any commands or immediate characters + */ + sunsab_cec_wait(up); + sunsab_tec_wait(up); + + /* + * Clear the FIFO buffers. + */ + writeb(SAB82532_CMDR_RRES, &up->regs->w.cmdr); + sunsab_cec_wait(up); + writeb(SAB82532_CMDR_XRES, &up->regs->w.cmdr); + + /* + * Clear the interrupt registers. + */ + (void) readb(&up->regs->r.isr0); + (void) readb(&up->regs->r.isr1); + + /* + * Now, initialize the UART + */ + writeb(0, &up->regs->w.ccr0); /* power-down */ + writeb(SAB82532_CCR0_MCE | SAB82532_CCR0_SC_NRZ | + SAB82532_CCR0_SM_ASYNC, &up->regs->w.ccr0); + writeb(SAB82532_CCR1_ODS | SAB82532_CCR1_BCR | 7, &up->regs->w.ccr1); + writeb(SAB82532_CCR2_BDF | SAB82532_CCR2_SSEL | + SAB82532_CCR2_TOE, &up->regs->w.ccr2); + writeb(0, &up->regs->w.ccr3); + writeb(SAB82532_CCR4_MCK4 | SAB82532_CCR4_EBRG, &up->regs->w.ccr4); + up->cached_mode = (SAB82532_MODE_RTS | SAB82532_MODE_FCTS | + SAB82532_MODE_RAC); + writeb(up->cached_mode, &up->regs->w.mode); + writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc); + + tmp = readb(&up->regs->rw.ccr0); + tmp |= SAB82532_CCR0_PU; /* power-up */ + writeb(tmp, &up->regs->rw.ccr0); + + /* + * Finally, enable interrupts + */ + up->interrupt_mask0 = (SAB82532_IMR0_PERR | SAB82532_IMR0_FERR | + SAB82532_IMR0_PLLA); + writeb(up->interrupt_mask0, &up->regs->w.imr0); + up->interrupt_mask1 = (SAB82532_IMR1_BRKT | SAB82532_IMR1_ALLS | + SAB82532_IMR1_XOFF | SAB82532_IMR1_TIN | + SAB82532_IMR1_CSC | SAB82532_IMR1_XON | + SAB82532_IMR1_XPR); + writeb(up->interrupt_mask1, &up->regs->w.imr1); + set_bit(SAB82532_ALLS, &up->irqflags); + set_bit(SAB82532_XPR, &up->irqflags); + + spin_unlock_irqrestore(&up->port.lock, flags); + + return 0; +} + +/* port->lock is not held. */ +static void sunsab_shutdown(struct uart_port *port) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + + /* Disable Interrupts */ + up->interrupt_mask0 = 0xff; + writeb(up->interrupt_mask0, &up->regs->w.imr0); + up->interrupt_mask1 = 0xff; + writeb(up->interrupt_mask1, &up->regs->w.imr1); + + /* Disable break condition */ + up->cached_dafo = readb(&up->regs->rw.dafo); + up->cached_dafo &= ~SAB82532_DAFO_XBRK; + writeb(up->cached_dafo, &up->regs->rw.dafo); + + /* Disable Receiver */ + up->cached_mode &= ~SAB82532_MODE_RAC; + writeb(up->cached_mode, &up->regs->rw.mode); + + /* + * XXX FIXME + * + * If the chip is powered down here the system hangs/crashes during + * reboot or shutdown. This needs to be investigated further, + * similar behaviour occurs in 2.4 when the driver is configured + * as a module only. One hint may be that data is sometimes + * transmitted at 9600 baud during shutdown (regardless of the + * speed the chip was configured for when the port was open). + */ +#if 0 + /* Power Down */ + tmp = readb(&up->regs->rw.ccr0); + tmp &= ~SAB82532_CCR0_PU; + writeb(tmp, &up->regs->rw.ccr0); +#endif + + spin_unlock_irqrestore(&up->port.lock, flags); + free_irq(up->port.irq, up); +} + +/* + * This is used to figure out the divisor speeds. + * + * The formula is: Baud = SAB_BASE_BAUD / ((N + 1) * (1 << M)), + * + * with 0 <= N < 64 and 0 <= M < 16 + */ + +static void calc_ebrg(int baud, int *n_ret, int *m_ret) +{ + int n, m; + + if (baud == 0) { + *n_ret = 0; + *m_ret = 0; + return; + } + + /* + * We scale numbers by 10 so that we get better accuracy + * without having to use floating point. Here we increment m + * until n is within the valid range. + */ + n = (SAB_BASE_BAUD * 10) / baud; + m = 0; + while (n >= 640) { + n = n / 2; + m++; + } + n = (n+5) / 10; + /* + * We try very hard to avoid speeds with M == 0 since they may + * not work correctly for XTAL frequences above 10 MHz. + */ + if ((m == 0) && ((n & 1) == 0)) { + n = n / 2; + m++; + } + *n_ret = n - 1; + *m_ret = m; +} + +/* Internal routine, port->lock is held and local interrupts are disabled. */ +static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cflag, + unsigned int iflag, unsigned int baud, + unsigned int quot) +{ + unsigned char dafo; + int n, m; + + /* Byte size and parity */ + switch (cflag & CSIZE) { + case CS5: dafo = SAB82532_DAFO_CHL5; break; + case CS6: dafo = SAB82532_DAFO_CHL6; break; + case CS7: dafo = SAB82532_DAFO_CHL7; break; + case CS8: dafo = SAB82532_DAFO_CHL8; break; + /* Never happens, but GCC is too dumb to figure it out */ + default: dafo = SAB82532_DAFO_CHL5; break; + } + + if (cflag & CSTOPB) + dafo |= SAB82532_DAFO_STOP; + + if (cflag & PARENB) + dafo |= SAB82532_DAFO_PARE; + + if (cflag & PARODD) { + dafo |= SAB82532_DAFO_PAR_ODD; + } else { + dafo |= SAB82532_DAFO_PAR_EVEN; + } + up->cached_dafo = dafo; + + calc_ebrg(baud, &n, &m); + + up->cached_ebrg = n | (m << 6); + + up->tec_timeout = (10 * 1000000) / baud; + up->cec_timeout = up->tec_timeout >> 2; + + /* CTS flow control flags */ + /* We encode read_status_mask and ignore_status_mask like so: + * + * --------------------- + * | ... | ISR1 | ISR0 | + * --------------------- + * .. 15 8 7 0 + */ + + up->port.read_status_mask = (SAB82532_ISR0_TCD | SAB82532_ISR0_TIME | + SAB82532_ISR0_RFO | SAB82532_ISR0_RPF | + SAB82532_ISR0_CDSC); + up->port.read_status_mask |= (SAB82532_ISR1_CSC | + SAB82532_ISR1_ALLS | + SAB82532_ISR1_XPR) << 8; + if (iflag & INPCK) + up->port.read_status_mask |= (SAB82532_ISR0_PERR | + SAB82532_ISR0_FERR); + if (iflag & (IGNBRK | BRKINT | PARMRK)) + up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8); + + /* + * Characteres to ignore + */ + up->port.ignore_status_mask = 0; + if (iflag & IGNPAR) + up->port.ignore_status_mask |= (SAB82532_ISR0_PERR | + SAB82532_ISR0_FERR); + if (iflag & IGNBRK) { + up->port.ignore_status_mask |= (SAB82532_ISR1_BRK << 8); + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (iflag & IGNPAR) + up->port.ignore_status_mask |= SAB82532_ISR0_RFO; + } + + /* + * ignore all characters if CREAD is not set + */ + if ((cflag & CREAD) == 0) + up->port.ignore_status_mask |= (SAB82532_ISR0_RPF | + SAB82532_ISR0_TCD); + + uart_update_timeout(&up->port, cflag, + (up->port.uartclk / (16 * quot))); + + /* Now schedule a register update when the chip's + * transmitter is idle. + */ + up->cached_mode |= SAB82532_MODE_RAC; + set_bit(SAB82532_REGS_PENDING, &up->irqflags); + if (test_bit(SAB82532_XPR, &up->irqflags)) + sunsab_tx_idle(up); +} + +/* port->lock is not held. */ +static void sunsab_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + unsigned long flags; + unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000); + unsigned int quot = uart_get_divisor(port, baud); + + spin_lock_irqsave(&up->port.lock, flags); + sunsab_convert_to_sab(up, termios->c_cflag, termios->c_iflag, baud, quot); + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static const char *sunsab_type(struct uart_port *port) +{ + struct uart_sunsab_port *up = (void *)port; + static char buf[36]; + + sprintf(buf, "SAB82532 %s", sab82532_version[up->type]); + return buf; +} + +static void sunsab_release_port(struct uart_port *port) +{ +} + +static int sunsab_request_port(struct uart_port *port) +{ + return 0; +} + +static void sunsab_config_port(struct uart_port *port, int flags) +{ +} + +static int sunsab_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + return -EINVAL; +} + +static const struct uart_ops sunsab_pops = { + .tx_empty = sunsab_tx_empty, + .set_mctrl = sunsab_set_mctrl, + .get_mctrl = sunsab_get_mctrl, + .stop_tx = sunsab_stop_tx, + .start_tx = sunsab_start_tx, + .send_xchar = sunsab_send_xchar, + .stop_rx = sunsab_stop_rx, + .break_ctl = sunsab_break_ctl, + .startup = sunsab_startup, + .shutdown = sunsab_shutdown, + .set_termios = sunsab_set_termios, + .type = sunsab_type, + .release_port = sunsab_release_port, + .request_port = sunsab_request_port, + .config_port = sunsab_config_port, + .verify_port = sunsab_verify_port, +}; + +static struct uart_driver sunsab_reg = { + .owner = THIS_MODULE, + .driver_name = "sunsab", + .dev_name = "ttyS", + .major = TTY_MAJOR, +}; + +static struct uart_sunsab_port *sunsab_ports; + +#ifdef CONFIG_SERIAL_SUNSAB_CONSOLE + +static void sunsab_console_putchar(struct uart_port *port, unsigned char c) +{ + struct uart_sunsab_port *up = + container_of(port, struct uart_sunsab_port, port); + + sunsab_tec_wait(up); + writeb(c, &up->regs->w.tic); +} + +static void sunsab_console_write(struct console *con, const char *s, unsigned n) +{ + struct uart_sunsab_port *up = &sunsab_ports[con->index]; + unsigned long flags; + int locked = 1; + + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); + else + spin_lock_irqsave(&up->port.lock, flags); + + uart_console_write(&up->port, s, n, sunsab_console_putchar); + sunsab_tec_wait(up); + + if (locked) + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static int sunsab_console_setup(struct console *con, char *options) +{ + struct uart_sunsab_port *up = &sunsab_ports[con->index]; + unsigned long flags; + unsigned int baud, quot; + + /* + * The console framework calls us for each and every port + * registered. Defer the console setup until the requested + * port has been properly discovered. A bit of a hack, + * though... + */ + if (up->port.type != PORT_SUNSAB) + return -EINVAL; + + printk("Console: ttyS%d (SAB82532)\n", + (sunsab_reg.minor - 64) + con->index); + + sunserial_console_termios(con, up->port.dev->of_node); + + switch (con->cflag & CBAUD) { + case B150: baud = 150; break; + case B300: baud = 300; break; + case B600: baud = 600; break; + case B1200: baud = 1200; break; + case B2400: baud = 2400; break; + case B4800: baud = 4800; break; + default: case B9600: baud = 9600; break; + case B19200: baud = 19200; break; + case B38400: baud = 38400; break; + case B57600: baud = 57600; break; + case B115200: baud = 115200; break; + case B230400: baud = 230400; break; + case B460800: baud = 460800; break; + } + + /* + * Temporary fix. + */ + spin_lock_init(&up->port.lock); + + /* + * Initialize the hardware + */ + sunsab_startup(&up->port); + + spin_lock_irqsave(&up->port.lock, flags); + + /* + * Finally, enable interrupts + */ + up->interrupt_mask0 = SAB82532_IMR0_PERR | SAB82532_IMR0_FERR | + SAB82532_IMR0_PLLA | SAB82532_IMR0_CDSC; + writeb(up->interrupt_mask0, &up->regs->w.imr0); + up->interrupt_mask1 = SAB82532_IMR1_BRKT | SAB82532_IMR1_ALLS | + SAB82532_IMR1_XOFF | SAB82532_IMR1_TIN | + SAB82532_IMR1_CSC | SAB82532_IMR1_XON | + SAB82532_IMR1_XPR; + writeb(up->interrupt_mask1, &up->regs->w.imr1); + + quot = uart_get_divisor(&up->port, baud); + sunsab_convert_to_sab(up, con->cflag, 0, baud, quot); + sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS); + + spin_unlock_irqrestore(&up->port.lock, flags); + + return 0; +} + +static struct console sunsab_console = { + .name = "ttyS", + .write = sunsab_console_write, + .device = uart_console_device, + .setup = sunsab_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sunsab_reg, +}; + +static inline struct console *SUNSAB_CONSOLE(void) +{ + return &sunsab_console; +} +#else +#define SUNSAB_CONSOLE() (NULL) +#define sunsab_console_init() do { } while (0) +#endif + +static int sunsab_init_one(struct uart_sunsab_port *up, + struct platform_device *op, + unsigned long offset, + int line) +{ + up->port.line = line; + up->port.dev = &op->dev; + + up->port.mapbase = op->resource[0].start + offset; + up->port.membase = of_ioremap(&op->resource[0], offset, + sizeof(union sab82532_async_regs), + "sab"); + if (!up->port.membase) + return -ENOMEM; + up->regs = (union sab82532_async_regs __iomem *) up->port.membase; + + up->port.irq = op->archdata.irqs[0]; + + up->port.fifosize = SAB82532_XMIT_FIFO_SIZE; + up->port.iotype = UPIO_MEM; + up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNSAB_CONSOLE); + + writeb(SAB82532_IPC_IC_ACT_LOW, &up->regs->w.ipc); + + up->port.ops = &sunsab_pops; + up->port.type = PORT_SUNSAB; + up->port.uartclk = SAB_BASE_BAUD; + + up->type = readb(&up->regs->r.vstr) & 0x0f; + writeb(~((1 << 1) | (1 << 2) | (1 << 4)), &up->regs->w.pcr); + writeb(0xff, &up->regs->w.pim); + if ((up->port.line & 0x1) == 0) { + up->pvr_dsr_bit = (1 << 0); + up->pvr_dtr_bit = (1 << 1); + up->gis_shift = 2; + } else { + up->pvr_dsr_bit = (1 << 3); + up->pvr_dtr_bit = (1 << 2); + up->gis_shift = 0; + } + up->cached_pvr = (1 << 1) | (1 << 2) | (1 << 4); + writeb(up->cached_pvr, &up->regs->w.pvr); + up->cached_mode = readb(&up->regs->rw.mode); + up->cached_mode |= SAB82532_MODE_FRTS; + writeb(up->cached_mode, &up->regs->rw.mode); + up->cached_mode |= SAB82532_MODE_RTS; + writeb(up->cached_mode, &up->regs->rw.mode); + + up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT; + up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT; + + return 0; +} + +static int sab_probe(struct platform_device *op) +{ + static int inst; + struct uart_sunsab_port *up; + int err; + + up = &sunsab_ports[inst * 2]; + + err = sunsab_init_one(&up[0], op, + 0, + (inst * 2) + 0); + if (err) + goto out; + + err = sunsab_init_one(&up[1], op, + sizeof(union sab82532_async_regs), + (inst * 2) + 1); + if (err) + goto out1; + + sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node, + &sunsab_reg, up[0].port.line, + false); + + sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node, + &sunsab_reg, up[1].port.line, + false); + + err = uart_add_one_port(&sunsab_reg, &up[0].port); + if (err) + goto out2; + + err = uart_add_one_port(&sunsab_reg, &up[1].port); + if (err) + goto out3; + + platform_set_drvdata(op, &up[0]); + + inst++; + + return 0; + +out3: + uart_remove_one_port(&sunsab_reg, &up[0].port); +out2: + of_iounmap(&op->resource[0], + up[1].port.membase, + sizeof(union sab82532_async_regs)); +out1: + of_iounmap(&op->resource[0], + up[0].port.membase, + sizeof(union sab82532_async_regs)); +out: + return err; +} + +static int sab_remove(struct platform_device *op) +{ + struct uart_sunsab_port *up = platform_get_drvdata(op); + + uart_remove_one_port(&sunsab_reg, &up[1].port); + uart_remove_one_port(&sunsab_reg, &up[0].port); + of_iounmap(&op->resource[0], + up[1].port.membase, + sizeof(union sab82532_async_regs)); + of_iounmap(&op->resource[0], + up[0].port.membase, + sizeof(union sab82532_async_regs)); + + return 0; +} + +static const struct of_device_id sab_match[] = { + { + .name = "se", + }, + { + .name = "serial", + .compatible = "sab82532", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, sab_match); + +static struct platform_driver sab_driver = { + .driver = { + .name = "sab", + .of_match_table = sab_match, + }, + .probe = sab_probe, + .remove = sab_remove, +}; + +static int __init sunsab_init(void) +{ + struct device_node *dp; + int err; + int num_channels = 0; + + for_each_node_by_name(dp, "se") + num_channels += 2; + for_each_node_by_name(dp, "serial") { + if (of_device_is_compatible(dp, "sab82532")) + num_channels += 2; + } + + if (num_channels) { + sunsab_ports = kcalloc(num_channels, + sizeof(struct uart_sunsab_port), + GFP_KERNEL); + if (!sunsab_ports) + return -ENOMEM; + + err = sunserial_register_minors(&sunsab_reg, num_channels); + if (err) { + kfree(sunsab_ports); + sunsab_ports = NULL; + + return err; + } + } + + err = platform_driver_register(&sab_driver); + if (err) { + kfree(sunsab_ports); + sunsab_ports = NULL; + } + + return err; +} + +static void __exit sunsab_exit(void) +{ + platform_driver_unregister(&sab_driver); + if (sunsab_reg.nr) { + sunserial_unregister_minors(&sunsab_reg, sunsab_reg.nr); + } + + kfree(sunsab_ports); + sunsab_ports = NULL; +} + +module_init(sunsab_init); +module_exit(sunsab_exit); + +MODULE_AUTHOR("Eddie C. Dost and David S. Miller"); +MODULE_DESCRIPTION("Sun SAB82532 serial port driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/sunsab.h b/drivers/tty/serial/sunsab.h new file mode 100644 index 000000000..1644031aa --- /dev/null +++ b/drivers/tty/serial/sunsab.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* sunsab.h: Register Definitions for the Siemens SAB82532 DUSCC + * + * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) + */ + +#ifndef _SUNSAB_H +#define _SUNSAB_H + +struct sab82532_async_rd_regs { + u8 rfifo[0x20]; /* Receive FIFO */ + u8 star; /* Status Register */ + u8 __pad1; + u8 mode; /* Mode Register */ + u8 timr; /* Timer Register */ + u8 xon; /* XON Character */ + u8 xoff; /* XOFF Character */ + u8 tcr; /* Termination Character Register */ + u8 dafo; /* Data Format */ + u8 rfc; /* RFIFO Control Register */ + u8 __pad2; + u8 rbcl; /* Receive Byte Count Low */ + u8 rbch; /* Receive Byte Count High */ + u8 ccr0; /* Channel Configuration Register 0 */ + u8 ccr1; /* Channel Configuration Register 1 */ + u8 ccr2; /* Channel Configuration Register 2 */ + u8 ccr3; /* Channel Configuration Register 3 */ + u8 __pad3[4]; + u8 vstr; /* Version Status Register */ + u8 __pad4[3]; + u8 gis; /* Global Interrupt Status */ + u8 ipc; /* Interrupt Port Configuration */ + u8 isr0; /* Interrupt Status 0 */ + u8 isr1; /* Interrupt Status 1 */ + u8 pvr; /* Port Value Register */ + u8 pis; /* Port Interrupt Status */ + u8 pcr; /* Port Configuration Register */ + u8 ccr4; /* Channel Configuration Register 4 */ +}; + +struct sab82532_async_wr_regs { + u8 xfifo[0x20]; /* Transmit FIFO */ + u8 cmdr; /* Command Register */ + u8 __pad1; + u8 mode; + u8 timr; + u8 xon; + u8 xoff; + u8 tcr; + u8 dafo; + u8 rfc; + u8 __pad2; + u8 xbcl; /* Transmit Byte Count Low */ + u8 xbch; /* Transmit Byte Count High */ + u8 ccr0; + u8 ccr1; + u8 ccr2; + u8 ccr3; + u8 tsax; /* Time-Slot Assignment Reg. Transmit */ + u8 tsar; /* Time-Slot Assignment Reg. Receive */ + u8 xccr; /* Transmit Channel Capacity Register */ + u8 rccr; /* Receive Channel Capacity Register */ + u8 bgr; /* Baud Rate Generator Register */ + u8 tic; /* Transmit Immediate Character */ + u8 mxn; /* Mask XON Character */ + u8 mxf; /* Mask XOFF Character */ + u8 iva; /* Interrupt Vector Address */ + u8 ipc; + u8 imr0; /* Interrupt Mask Register 0 */ + u8 imr1; /* Interrupt Mask Register 1 */ + u8 pvr; + u8 pim; /* Port Interrupt Mask */ + u8 pcr; + u8 ccr4; +}; + +struct sab82532_async_rw_regs { /* Read/Write registers */ + u8 __pad1[0x20]; + u8 __pad2; + u8 __pad3; + u8 mode; + u8 timr; + u8 xon; + u8 xoff; + u8 tcr; + u8 dafo; + u8 rfc; + u8 __pad4; + u8 __pad5; + u8 __pad6; + u8 ccr0; + u8 ccr1; + u8 ccr2; + u8 ccr3; + u8 __pad7; + u8 __pad8; + u8 __pad9; + u8 __pad10; + u8 __pad11; + u8 __pad12; + u8 __pad13; + u8 __pad14; + u8 __pad15; + u8 ipc; + u8 __pad16; + u8 __pad17; + u8 pvr; + u8 __pad18; + u8 pcr; + u8 ccr4; +}; + +union sab82532_async_regs { + __volatile__ struct sab82532_async_rd_regs r; + __volatile__ struct sab82532_async_wr_regs w; + __volatile__ struct sab82532_async_rw_regs rw; +}; + +union sab82532_irq_status { + unsigned short stat; + struct { + unsigned char isr0; + unsigned char isr1; + } sreg; +}; + +/* irqflags bits */ +#define SAB82532_ALLS 0x00000001 +#define SAB82532_XPR 0x00000002 +#define SAB82532_REGS_PENDING 0x00000004 + +/* RFIFO Status Byte */ +#define SAB82532_RSTAT_PE 0x80 +#define SAB82532_RSTAT_FE 0x40 +#define SAB82532_RSTAT_PARITY 0x01 + +/* Status Register (STAR) */ +#define SAB82532_STAR_XDOV 0x80 +#define SAB82532_STAR_XFW 0x40 +#define SAB82532_STAR_RFNE 0x20 +#define SAB82532_STAR_FCS 0x10 +#define SAB82532_STAR_TEC 0x08 +#define SAB82532_STAR_CEC 0x04 +#define SAB82532_STAR_CTS 0x02 + +/* Command Register (CMDR) */ +#define SAB82532_CMDR_RMC 0x80 +#define SAB82532_CMDR_RRES 0x40 +#define SAB82532_CMDR_RFRD 0x20 +#define SAB82532_CMDR_STI 0x10 +#define SAB82532_CMDR_XF 0x08 +#define SAB82532_CMDR_XRES 0x01 + +/* Mode Register (MODE) */ +#define SAB82532_MODE_FRTS 0x40 +#define SAB82532_MODE_FCTS 0x20 +#define SAB82532_MODE_FLON 0x10 +#define SAB82532_MODE_RAC 0x08 +#define SAB82532_MODE_RTS 0x04 +#define SAB82532_MODE_TRS 0x02 +#define SAB82532_MODE_TLP 0x01 + +/* Timer Register (TIMR) */ +#define SAB82532_TIMR_CNT_MASK 0xe0 +#define SAB82532_TIMR_VALUE_MASK 0x1f + +/* Data Format (DAFO) */ +#define SAB82532_DAFO_XBRK 0x40 +#define SAB82532_DAFO_STOP 0x20 +#define SAB82532_DAFO_PAR_SPACE 0x00 +#define SAB82532_DAFO_PAR_ODD 0x08 +#define SAB82532_DAFO_PAR_EVEN 0x10 +#define SAB82532_DAFO_PAR_MARK 0x18 +#define SAB82532_DAFO_PARE 0x04 +#define SAB82532_DAFO_CHL8 0x00 +#define SAB82532_DAFO_CHL7 0x01 +#define SAB82532_DAFO_CHL6 0x02 +#define SAB82532_DAFO_CHL5 0x03 + +/* RFIFO Control Register (RFC) */ +#define SAB82532_RFC_DPS 0x40 +#define SAB82532_RFC_DXS 0x20 +#define SAB82532_RFC_RFDF 0x10 +#define SAB82532_RFC_RFTH_1 0x00 +#define SAB82532_RFC_RFTH_4 0x04 +#define SAB82532_RFC_RFTH_16 0x08 +#define SAB82532_RFC_RFTH_32 0x0c +#define SAB82532_RFC_TCDE 0x01 + +/* Received Byte Count High (RBCH) */ +#define SAB82532_RBCH_DMA 0x80 +#define SAB82532_RBCH_CAS 0x20 + +/* Transmit Byte Count High (XBCH) */ +#define SAB82532_XBCH_DMA 0x80 +#define SAB82532_XBCH_CAS 0x20 +#define SAB82532_XBCH_XC 0x10 + +/* Channel Configuration Register 0 (CCR0) */ +#define SAB82532_CCR0_PU 0x80 +#define SAB82532_CCR0_MCE 0x40 +#define SAB82532_CCR0_SC_NRZ 0x00 +#define SAB82532_CCR0_SC_NRZI 0x08 +#define SAB82532_CCR0_SC_FM0 0x10 +#define SAB82532_CCR0_SC_FM1 0x14 +#define SAB82532_CCR0_SC_MANCH 0x18 +#define SAB82532_CCR0_SM_HDLC 0x00 +#define SAB82532_CCR0_SM_SDLC_LOOP 0x01 +#define SAB82532_CCR0_SM_BISYNC 0x02 +#define SAB82532_CCR0_SM_ASYNC 0x03 + +/* Channel Configuration Register 1 (CCR1) */ +#define SAB82532_CCR1_ODS 0x10 +#define SAB82532_CCR1_BCR 0x08 +#define SAB82532_CCR1_CM_MASK 0x07 + +/* Channel Configuration Register 2 (CCR2) */ +#define SAB82532_CCR2_SOC1 0x80 +#define SAB82532_CCR2_SOC0 0x40 +#define SAB82532_CCR2_BR9 0x80 +#define SAB82532_CCR2_BR8 0x40 +#define SAB82532_CCR2_BDF 0x20 +#define SAB82532_CCR2_SSEL 0x10 +#define SAB82532_CCR2_XCS0 0x20 +#define SAB82532_CCR2_RCS0 0x10 +#define SAB82532_CCR2_TOE 0x08 +#define SAB82532_CCR2_RWX 0x04 +#define SAB82532_CCR2_DIV 0x01 + +/* Channel Configuration Register 3 (CCR3) */ +#define SAB82532_CCR3_PSD 0x01 + +/* Time Slot Assignment Register Transmit (TSAX) */ +#define SAB82532_TSAX_TSNX_MASK 0xfc +#define SAB82532_TSAX_XCS2 0x02 /* see also CCR2 */ +#define SAB82532_TSAX_XCS1 0x01 + +/* Time Slot Assignment Register Receive (TSAR) */ +#define SAB82532_TSAR_TSNR_MASK 0xfc +#define SAB82532_TSAR_RCS2 0x02 /* see also CCR2 */ +#define SAB82532_TSAR_RCS1 0x01 + +/* Version Status Register (VSTR) */ +#define SAB82532_VSTR_CD 0x80 +#define SAB82532_VSTR_DPLA 0x40 +#define SAB82532_VSTR_VN_MASK 0x0f +#define SAB82532_VSTR_VN_1 0x00 +#define SAB82532_VSTR_VN_2 0x01 +#define SAB82532_VSTR_VN_3_2 0x02 + +/* Global Interrupt Status Register (GIS) */ +#define SAB82532_GIS_PI 0x80 +#define SAB82532_GIS_ISA1 0x08 +#define SAB82532_GIS_ISA0 0x04 +#define SAB82532_GIS_ISB1 0x02 +#define SAB82532_GIS_ISB0 0x01 + +/* Interrupt Vector Address (IVA) */ +#define SAB82532_IVA_MASK 0xf1 + +/* Interrupt Port Configuration (IPC) */ +#define SAB82532_IPC_VIS 0x80 +#define SAB82532_IPC_SLA1 0x10 +#define SAB82532_IPC_SLA0 0x08 +#define SAB82532_IPC_CASM 0x04 +#define SAB82532_IPC_IC_OPEN_DRAIN 0x00 +#define SAB82532_IPC_IC_ACT_LOW 0x01 +#define SAB82532_IPC_IC_ACT_HIGH 0x03 + +/* Interrupt Status Register 0 (ISR0) */ +#define SAB82532_ISR0_TCD 0x80 +#define SAB82532_ISR0_TIME 0x40 +#define SAB82532_ISR0_PERR 0x20 +#define SAB82532_ISR0_FERR 0x10 +#define SAB82532_ISR0_PLLA 0x08 +#define SAB82532_ISR0_CDSC 0x04 +#define SAB82532_ISR0_RFO 0x02 +#define SAB82532_ISR0_RPF 0x01 + +/* Interrupt Status Register 1 (ISR1) */ +#define SAB82532_ISR1_BRK 0x80 +#define SAB82532_ISR1_BRKT 0x40 +#define SAB82532_ISR1_ALLS 0x20 +#define SAB82532_ISR1_XOFF 0x10 +#define SAB82532_ISR1_TIN 0x08 +#define SAB82532_ISR1_CSC 0x04 +#define SAB82532_ISR1_XON 0x02 +#define SAB82532_ISR1_XPR 0x01 + +/* Interrupt Mask Register 0 (IMR0) */ +#define SAB82532_IMR0_TCD 0x80 +#define SAB82532_IMR0_TIME 0x40 +#define SAB82532_IMR0_PERR 0x20 +#define SAB82532_IMR0_FERR 0x10 +#define SAB82532_IMR0_PLLA 0x08 +#define SAB82532_IMR0_CDSC 0x04 +#define SAB82532_IMR0_RFO 0x02 +#define SAB82532_IMR0_RPF 0x01 + +/* Interrupt Mask Register 1 (IMR1) */ +#define SAB82532_IMR1_BRK 0x80 +#define SAB82532_IMR1_BRKT 0x40 +#define SAB82532_IMR1_ALLS 0x20 +#define SAB82532_IMR1_XOFF 0x10 +#define SAB82532_IMR1_TIN 0x08 +#define SAB82532_IMR1_CSC 0x04 +#define SAB82532_IMR1_XON 0x02 +#define SAB82532_IMR1_XPR 0x01 + +/* Port Interrupt Status Register (PIS) */ +#define SAB82532_PIS_SYNC_B 0x08 +#define SAB82532_PIS_DTR_B 0x04 +#define SAB82532_PIS_DTR_A 0x02 +#define SAB82532_PIS_SYNC_A 0x01 + +/* Channel Configuration Register 4 (CCR4) */ +#define SAB82532_CCR4_MCK4 0x80 +#define SAB82532_CCR4_EBRG 0x40 +#define SAB82532_CCR4_TST1 0x20 +#define SAB82532_CCR4_ICD 0x10 + + +#endif /* !(_SUNSAB_H) */ diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c new file mode 100644 index 000000000..9ea7e5675 --- /dev/null +++ b/drivers/tty/serial/sunsu.c @@ -0,0 +1,1625 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * su.c: Small serial driver for keyboard/mouse interface on sparc32/PCI + * + * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1998-1999 Pete Zaitcev (zaitcev@yahoo.com) + * + * This is mainly a variation of 8250.c, credits go to authors mentioned + * therein. In fact this driver should be merged into the generic 8250.c + * infrastructure perhaps using a 8250_sparc.c module. + * + * Fixed to use tty_get_baud_rate(). + * Theodore Ts'o , 2001-Oct-12 + * + * Converted to new 2.5.x UART layer. + * David S. Miller (davem@davemloft.net), 2002-Jul-29 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_SERIO +#include +#endif +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +/* We are on a NS PC87303 clocked with 24.0 MHz, which results + * in a UART clock of 1.8462 MHz. + */ +#define SU_BASE_BAUD (1846200 / 16) + +enum su_type { SU_PORT_NONE, SU_PORT_MS, SU_PORT_KBD, SU_PORT_PORT }; +static char *su_typev[] = { "su(???)", "su(mouse)", "su(kbd)", "su(serial)" }; + +struct serial_uart_config { + char *name; + int dfl_xmit_fifo_size; + int flags; +}; + +/* + * Here we define the default xmit fifo size used for each type of UART. + */ +static const struct serial_uart_config uart_config[] = { + { "unknown", 1, 0 }, + { "8250", 1, 0 }, + { "16450", 1, 0 }, + { "16550", 1, 0 }, + { "16550A", 16, UART_CLEAR_FIFO | UART_USE_FIFO }, + { "Cirrus", 1, 0 }, + { "ST16650", 1, UART_CLEAR_FIFO | UART_STARTECH }, + { "ST16650V2", 32, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH }, + { "TI16750", 64, UART_CLEAR_FIFO | UART_USE_FIFO }, + { "Startech", 1, 0 }, + { "16C950/954", 128, UART_CLEAR_FIFO | UART_USE_FIFO }, + { "ST16654", 64, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH }, + { "XR16850", 128, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH }, + { "RSA", 2048, UART_CLEAR_FIFO | UART_USE_FIFO } +}; + +struct uart_sunsu_port { + struct uart_port port; + unsigned char acr; + unsigned char ier; + unsigned short rev; + unsigned char lcr; + unsigned int lsr_break_flag; + unsigned int cflag; + + /* Probing information. */ + enum su_type su_type; + unsigned int type_probed; /* XXX Stupid */ + unsigned long reg_size; + +#ifdef CONFIG_SERIO + struct serio serio; + int serio_open; +#endif +}; + +static unsigned int serial_in(struct uart_sunsu_port *up, int offset) +{ + offset <<= up->port.regshift; + + switch (up->port.iotype) { + case UPIO_HUB6: + outb(up->port.hub6 - 1 + offset, up->port.iobase); + return inb(up->port.iobase + 1); + + case UPIO_MEM: + return readb(up->port.membase + offset); + + default: + return inb(up->port.iobase + offset); + } +} + +static void serial_out(struct uart_sunsu_port *up, int offset, int value) +{ +#ifndef CONFIG_SPARC64 + /* + * MrCoffee has weird schematics: IRQ4 & P10(?) pins of SuperIO are + * connected with a gate then go to SlavIO. When IRQ4 goes tristated + * gate outputs a logical one. Since we use level triggered interrupts + * we have lockup and watchdog reset. We cannot mask IRQ because + * keyboard shares IRQ with us (Word has it as Bob Smelik's design). + * This problem is similar to what Alpha people suffer, see + * 8250_alpha.c. + */ + if (offset == UART_MCR) + value |= UART_MCR_OUT2; +#endif + offset <<= up->port.regshift; + + switch (up->port.iotype) { + case UPIO_HUB6: + outb(up->port.hub6 - 1 + offset, up->port.iobase); + outb(value, up->port.iobase + 1); + break; + + case UPIO_MEM: + writeb(value, up->port.membase + offset); + break; + + default: + outb(value, up->port.iobase + offset); + } +} + +/* + * We used to support using pause I/O for certain machines. We + * haven't supported this for a while, but just in case it's badly + * needed for certain old 386 machines, I've left these #define's + * in.... + */ +#define serial_inp(up, offset) serial_in(up, offset) +#define serial_outp(up, offset, value) serial_out(up, offset, value) + + +/* + * For the 16C950 + */ +static void serial_icr_write(struct uart_sunsu_port *up, int offset, int value) +{ + serial_out(up, UART_SCR, offset); + serial_out(up, UART_ICR, value); +} + +#if 0 /* Unused currently */ +static unsigned int serial_icr_read(struct uart_sunsu_port *up, int offset) +{ + unsigned int value; + + serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD); + serial_out(up, UART_SCR, offset); + value = serial_in(up, UART_ICR); + serial_icr_write(up, UART_ACR, up->acr); + + return value; +} +#endif + +#ifdef CONFIG_SERIAL_8250_RSA +/* + * Attempts to turn on the RSA FIFO. Returns zero on failure. + * We set the port uart clock rate if we succeed. + */ +static int __enable_rsa(struct uart_sunsu_port *up) +{ + unsigned char mode; + int result; + + mode = serial_inp(up, UART_RSA_MSR); + result = mode & UART_RSA_MSR_FIFO; + + if (!result) { + serial_outp(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO); + mode = serial_inp(up, UART_RSA_MSR); + result = mode & UART_RSA_MSR_FIFO; + } + + if (result) + up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16; + + return result; +} + +static void enable_rsa(struct uart_sunsu_port *up) +{ + if (up->port.type == PORT_RSA) { + if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) { + spin_lock_irq(&up->port.lock); + __enable_rsa(up); + spin_unlock_irq(&up->port.lock); + } + if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) + serial_outp(up, UART_RSA_FRR, 0); + } +} + +/* + * Attempts to turn off the RSA FIFO. Returns zero on failure. + * It is unknown why interrupts were disabled in here. However, + * the caller is expected to preserve this behaviour by grabbing + * the spinlock before calling this function. + */ +static void disable_rsa(struct uart_sunsu_port *up) +{ + unsigned char mode; + int result; + + if (up->port.type == PORT_RSA && + up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) { + spin_lock_irq(&up->port.lock); + + mode = serial_inp(up, UART_RSA_MSR); + result = !(mode & UART_RSA_MSR_FIFO); + + if (!result) { + serial_outp(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO); + mode = serial_inp(up, UART_RSA_MSR); + result = !(mode & UART_RSA_MSR_FIFO); + } + + if (result) + up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16; + spin_unlock_irq(&up->port.lock); + } +} +#endif /* CONFIG_SERIAL_8250_RSA */ + +static inline void __stop_tx(struct uart_sunsu_port *p) +{ + if (p->ier & UART_IER_THRI) { + p->ier &= ~UART_IER_THRI; + serial_out(p, UART_IER, p->ier); + } +} + +static void sunsu_stop_tx(struct uart_port *port) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + + __stop_tx(up); + + /* + * We really want to stop the transmitter from sending. + */ + if (up->port.type == PORT_16C950) { + up->acr |= UART_ACR_TXDIS; + serial_icr_write(up, UART_ACR, up->acr); + } +} + +static void sunsu_start_tx(struct uart_port *port) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + + if (!(up->ier & UART_IER_THRI)) { + up->ier |= UART_IER_THRI; + serial_out(up, UART_IER, up->ier); + } + + /* + * Re-enable the transmitter if we disabled it. + */ + if (up->port.type == PORT_16C950 && up->acr & UART_ACR_TXDIS) { + up->acr &= ~UART_ACR_TXDIS; + serial_icr_write(up, UART_ACR, up->acr); + } +} + +static void sunsu_stop_rx(struct uart_port *port) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + + up->ier &= ~UART_IER_RLSI; + up->port.read_status_mask &= ~UART_LSR_DR; + serial_out(up, UART_IER, up->ier); +} + +static void sunsu_enable_ms(struct uart_port *port) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + up->ier |= UART_IER_MSI; + serial_out(up, UART_IER, up->ier); + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static void +receive_chars(struct uart_sunsu_port *up, unsigned char *status) +{ + struct tty_port *port = &up->port.state->port; + unsigned char ch, flag; + int max_count = 256; + int saw_console_brk = 0; + + do { + ch = serial_inp(up, UART_RX); + flag = TTY_NORMAL; + up->port.icount.rx++; + + if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE | + UART_LSR_FE | UART_LSR_OE))) { + /* + * For statistics only + */ + if (*status & UART_LSR_BI) { + *status &= ~(UART_LSR_FE | UART_LSR_PE); + up->port.icount.brk++; + if (up->port.cons != NULL && + up->port.line == up->port.cons->index) + saw_console_brk = 1; + /* + * We do the SysRQ and SAK checking + * here because otherwise the break + * may get masked by ignore_status_mask + * or read_status_mask. + */ + if (uart_handle_break(&up->port)) + goto ignore_char; + } else if (*status & UART_LSR_PE) + up->port.icount.parity++; + else if (*status & UART_LSR_FE) + up->port.icount.frame++; + if (*status & UART_LSR_OE) + up->port.icount.overrun++; + + /* + * Mask off conditions which should be ingored. + */ + *status &= up->port.read_status_mask; + + if (up->port.cons != NULL && + up->port.line == up->port.cons->index) { + /* Recover the break flag from console xmit */ + *status |= up->lsr_break_flag; + up->lsr_break_flag = 0; + } + + if (*status & UART_LSR_BI) { + flag = TTY_BREAK; + } else if (*status & UART_LSR_PE) + flag = TTY_PARITY; + else if (*status & UART_LSR_FE) + flag = TTY_FRAME; + } + if (uart_handle_sysrq_char(&up->port, ch)) + goto ignore_char; + if ((*status & up->port.ignore_status_mask) == 0) + tty_insert_flip_char(port, ch, flag); + if (*status & UART_LSR_OE) + /* + * Overrun is special, since it's reported + * immediately, and doesn't affect the current + * character. + */ + tty_insert_flip_char(port, 0, TTY_OVERRUN); + ignore_char: + *status = serial_inp(up, UART_LSR); + } while ((*status & UART_LSR_DR) && (max_count-- > 0)); + + if (saw_console_brk) + sun_do_break(); +} + +static void transmit_chars(struct uart_sunsu_port *up) +{ + struct circ_buf *xmit = &up->port.state->xmit; + int count; + + if (up->port.x_char) { + serial_outp(up, UART_TX, up->port.x_char); + up->port.icount.tx++; + up->port.x_char = 0; + return; + } + if (uart_tx_stopped(&up->port)) { + sunsu_stop_tx(&up->port); + return; + } + if (uart_circ_empty(xmit)) { + __stop_tx(up); + return; + } + + count = up->port.fifosize; + do { + serial_out(up, UART_TX, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->port.icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); + + if (uart_circ_empty(xmit)) + __stop_tx(up); +} + +static void check_modem_status(struct uart_sunsu_port *up) +{ + int status; + + status = serial_in(up, UART_MSR); + + if ((status & UART_MSR_ANY_DELTA) == 0) + return; + + if (status & UART_MSR_TERI) + up->port.icount.rng++; + if (status & UART_MSR_DDSR) + up->port.icount.dsr++; + if (status & UART_MSR_DDCD) + uart_handle_dcd_change(&up->port, status & UART_MSR_DCD); + if (status & UART_MSR_DCTS) + uart_handle_cts_change(&up->port, status & UART_MSR_CTS); + + wake_up_interruptible(&up->port.state->port.delta_msr_wait); +} + +static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id) +{ + struct uart_sunsu_port *up = dev_id; + unsigned long flags; + unsigned char status; + + spin_lock_irqsave(&up->port.lock, flags); + + do { + status = serial_inp(up, UART_LSR); + if (status & UART_LSR_DR) + receive_chars(up, &status); + check_modem_status(up); + if (status & UART_LSR_THRE) + transmit_chars(up); + + tty_flip_buffer_push(&up->port.state->port); + + } while (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT)); + + spin_unlock_irqrestore(&up->port.lock, flags); + + return IRQ_HANDLED; +} + +/* Separate interrupt handling path for keyboard/mouse ports. */ + +static void +sunsu_change_speed(struct uart_port *port, unsigned int cflag, + unsigned int iflag, unsigned int quot); + +static void sunsu_change_mouse_baud(struct uart_sunsu_port *up) +{ + unsigned int cur_cflag = up->cflag; + int quot, new_baud; + + up->cflag &= ~CBAUD; + up->cflag |= suncore_mouse_baud_cflag_next(cur_cflag, &new_baud); + + quot = up->port.uartclk / (16 * new_baud); + + sunsu_change_speed(&up->port, up->cflag, 0, quot); +} + +static void receive_kbd_ms_chars(struct uart_sunsu_port *up, int is_break) +{ + do { + unsigned char ch = serial_inp(up, UART_RX); + + /* Stop-A is handled by drivers/char/keyboard.c now. */ + if (up->su_type == SU_PORT_KBD) { +#ifdef CONFIG_SERIO + serio_interrupt(&up->serio, ch, 0); +#endif + } else if (up->su_type == SU_PORT_MS) { + int ret = suncore_mouse_baud_detection(ch, is_break); + + switch (ret) { + case 2: + sunsu_change_mouse_baud(up); + fallthrough; + case 1: + break; + + case 0: +#ifdef CONFIG_SERIO + serio_interrupt(&up->serio, ch, 0); +#endif + break; + } + } + } while (serial_in(up, UART_LSR) & UART_LSR_DR); +} + +static irqreturn_t sunsu_kbd_ms_interrupt(int irq, void *dev_id) +{ + struct uart_sunsu_port *up = dev_id; + + if (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT)) { + unsigned char status = serial_inp(up, UART_LSR); + + if ((status & UART_LSR_DR) || (status & UART_LSR_BI)) + receive_kbd_ms_chars(up, (status & UART_LSR_BI) != 0); + } + + return IRQ_HANDLED; +} + +static unsigned int sunsu_tx_empty(struct uart_port *port) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + unsigned long flags; + unsigned int ret; + + spin_lock_irqsave(&up->port.lock, flags); + ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; + spin_unlock_irqrestore(&up->port.lock, flags); + + return ret; +} + +static unsigned int sunsu_get_mctrl(struct uart_port *port) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + unsigned char status; + unsigned int ret; + + status = serial_in(up, UART_MSR); + + ret = 0; + if (status & UART_MSR_DCD) + ret |= TIOCM_CAR; + if (status & UART_MSR_RI) + ret |= TIOCM_RNG; + if (status & UART_MSR_DSR) + ret |= TIOCM_DSR; + if (status & UART_MSR_CTS) + ret |= TIOCM_CTS; + return ret; +} + +static void sunsu_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + unsigned char mcr = 0; + + if (mctrl & TIOCM_RTS) + mcr |= UART_MCR_RTS; + if (mctrl & TIOCM_DTR) + mcr |= UART_MCR_DTR; + if (mctrl & TIOCM_OUT1) + mcr |= UART_MCR_OUT1; + if (mctrl & TIOCM_OUT2) + mcr |= UART_MCR_OUT2; + if (mctrl & TIOCM_LOOP) + mcr |= UART_MCR_LOOP; + + serial_out(up, UART_MCR, mcr); +} + +static void sunsu_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + if (break_state == -1) + up->lcr |= UART_LCR_SBC; + else + up->lcr &= ~UART_LCR_SBC; + serial_out(up, UART_LCR, up->lcr); + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static int sunsu_startup(struct uart_port *port) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + unsigned long flags; + int retval; + + if (up->port.type == PORT_16C950) { + /* Wake up and initialize UART */ + up->acr = 0; + serial_outp(up, UART_LCR, 0xBF); + serial_outp(up, UART_EFR, UART_EFR_ECB); + serial_outp(up, UART_IER, 0); + serial_outp(up, UART_LCR, 0); + serial_icr_write(up, UART_CSR, 0); /* Reset the UART */ + serial_outp(up, UART_LCR, 0xBF); + serial_outp(up, UART_EFR, UART_EFR_ECB); + serial_outp(up, UART_LCR, 0); + } + +#ifdef CONFIG_SERIAL_8250_RSA + /* + * If this is an RSA port, see if we can kick it up to the + * higher speed clock. + */ + enable_rsa(up); +#endif + + /* + * Clear the FIFO buffers and disable them. + * (they will be reenabled in set_termios()) + */ + if (uart_config[up->port.type].flags & UART_CLEAR_FIFO) { + serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); + serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + serial_outp(up, UART_FCR, 0); + } + + /* + * Clear the interrupt registers. + */ + (void) serial_inp(up, UART_LSR); + (void) serial_inp(up, UART_RX); + (void) serial_inp(up, UART_IIR); + (void) serial_inp(up, UART_MSR); + + /* + * At this point, there's no way the LSR could still be 0xff; + * if it is, then bail out, because there's likely no UART + * here. + */ + if (!(up->port.flags & UPF_BUGGY_UART) && + (serial_inp(up, UART_LSR) == 0xff)) { + printk("ttyS%d: LSR safety check engaged!\n", up->port.line); + return -ENODEV; + } + + if (up->su_type != SU_PORT_PORT) { + retval = request_irq(up->port.irq, sunsu_kbd_ms_interrupt, + IRQF_SHARED, su_typev[up->su_type], up); + } else { + retval = request_irq(up->port.irq, sunsu_serial_interrupt, + IRQF_SHARED, su_typev[up->su_type], up); + } + if (retval) { + printk("su: Cannot register IRQ %d\n", up->port.irq); + return retval; + } + + /* + * Now, initialize the UART + */ + serial_outp(up, UART_LCR, UART_LCR_WLEN8); + + spin_lock_irqsave(&up->port.lock, flags); + + up->port.mctrl |= TIOCM_OUT2; + + sunsu_set_mctrl(&up->port, up->port.mctrl); + spin_unlock_irqrestore(&up->port.lock, flags); + + /* + * Finally, enable interrupts. Note: Modem status interrupts + * are set via set_termios(), which will be occurring imminently + * anyway, so we don't enable them here. + */ + up->ier = UART_IER_RLSI | UART_IER_RDI; + serial_outp(up, UART_IER, up->ier); + + if (up->port.flags & UPF_FOURPORT) { + unsigned int icp; + /* + * Enable interrupts on the AST Fourport board + */ + icp = (up->port.iobase & 0xfe0) | 0x01f; + outb_p(0x80, icp); + (void) inb_p(icp); + } + + /* + * And clear the interrupt registers again for luck. + */ + (void) serial_inp(up, UART_LSR); + (void) serial_inp(up, UART_RX); + (void) serial_inp(up, UART_IIR); + (void) serial_inp(up, UART_MSR); + + return 0; +} + +static void sunsu_shutdown(struct uart_port *port) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + unsigned long flags; + + /* + * Disable interrupts from this port + */ + up->ier = 0; + serial_outp(up, UART_IER, 0); + + spin_lock_irqsave(&up->port.lock, flags); + if (up->port.flags & UPF_FOURPORT) { + /* reset interrupts on the AST Fourport board */ + inb((up->port.iobase & 0xfe0) | 0x1f); + up->port.mctrl |= TIOCM_OUT1; + } else + up->port.mctrl &= ~TIOCM_OUT2; + + sunsu_set_mctrl(&up->port, up->port.mctrl); + spin_unlock_irqrestore(&up->port.lock, flags); + + /* + * Disable break condition and FIFOs + */ + serial_out(up, UART_LCR, serial_inp(up, UART_LCR) & ~UART_LCR_SBC); + serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | + UART_FCR_CLEAR_XMIT); + serial_outp(up, UART_FCR, 0); + +#ifdef CONFIG_SERIAL_8250_RSA + /* + * Reset the RSA board back to 115kbps compat mode. + */ + disable_rsa(up); +#endif + + /* + * Read data port to reset things. + */ + (void) serial_in(up, UART_RX); + + free_irq(up->port.irq, up); +} + +static void +sunsu_change_speed(struct uart_port *port, unsigned int cflag, + unsigned int iflag, unsigned int quot) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + unsigned char cval, fcr = 0; + unsigned long flags; + + switch (cflag & CSIZE) { + case CS5: + cval = 0x00; + break; + case CS6: + cval = 0x01; + break; + case CS7: + cval = 0x02; + break; + default: + case CS8: + cval = 0x03; + break; + } + + if (cflag & CSTOPB) + cval |= 0x04; + if (cflag & PARENB) + cval |= UART_LCR_PARITY; + if (!(cflag & PARODD)) + cval |= UART_LCR_EPAR; + if (cflag & CMSPAR) + cval |= UART_LCR_SPAR; + + /* + * Work around a bug in the Oxford Semiconductor 952 rev B + * chip which causes it to seriously miscalculate baud rates + * when DLL is 0. + */ + if ((quot & 0xff) == 0 && up->port.type == PORT_16C950 && + up->rev == 0x5201) + quot ++; + + if (uart_config[up->port.type].flags & UART_USE_FIFO) { + if ((up->port.uartclk / quot) < (2400 * 16)) + fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1; +#ifdef CONFIG_SERIAL_8250_RSA + else if (up->port.type == PORT_RSA) + fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_14; +#endif + else + fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_8; + } + if (up->port.type == PORT_16750) + fcr |= UART_FCR7_64BYTE; + + /* + * Ok, we're now changing the port state. Do it with + * interrupts disabled. + */ + spin_lock_irqsave(&up->port.lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, cflag, (port->uartclk / (16 * quot))); + + up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; + if (iflag & INPCK) + up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; + if (iflag & (IGNBRK | BRKINT | PARMRK)) + up->port.read_status_mask |= UART_LSR_BI; + + /* + * Characteres to ignore + */ + up->port.ignore_status_mask = 0; + if (iflag & IGNPAR) + up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; + if (iflag & IGNBRK) { + up->port.ignore_status_mask |= UART_LSR_BI; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (iflag & IGNPAR) + up->port.ignore_status_mask |= UART_LSR_OE; + } + + /* + * ignore all characters if CREAD is not set + */ + if ((cflag & CREAD) == 0) + up->port.ignore_status_mask |= UART_LSR_DR; + + /* + * CTS flow control flag and modem status interrupts + */ + up->ier &= ~UART_IER_MSI; + if (UART_ENABLE_MS(&up->port, cflag)) + up->ier |= UART_IER_MSI; + + serial_out(up, UART_IER, up->ier); + + if (uart_config[up->port.type].flags & UART_STARTECH) { + serial_outp(up, UART_LCR, 0xBF); + serial_outp(up, UART_EFR, cflag & CRTSCTS ? UART_EFR_CTS :0); + } + serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */ + serial_outp(up, UART_DLL, quot & 0xff); /* LS of divisor */ + serial_outp(up, UART_DLM, quot >> 8); /* MS of divisor */ + if (up->port.type == PORT_16750) + serial_outp(up, UART_FCR, fcr); /* set fcr */ + serial_outp(up, UART_LCR, cval); /* reset DLAB */ + up->lcr = cval; /* Save LCR */ + if (up->port.type != PORT_16750) { + if (fcr & UART_FCR_ENABLE_FIFO) { + /* emulated UARTs (Lucent Venus 167x) need two steps */ + serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); + } + serial_outp(up, UART_FCR, fcr); /* set fcr */ + } + + up->cflag = cflag; + + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static void +sunsu_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud, quot; + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + quot = uart_get_divisor(port, baud); + + sunsu_change_speed(port, termios->c_cflag, termios->c_iflag, quot); +} + +static void sunsu_release_port(struct uart_port *port) +{ +} + +static int sunsu_request_port(struct uart_port *port) +{ + return 0; +} + +static void sunsu_config_port(struct uart_port *port, int flags) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + + if (flags & UART_CONFIG_TYPE) { + /* + * We are supposed to call autoconfig here, but this requires + * splitting all the OBP probing crap from the UART probing. + * We'll do it when we kill sunsu.c altogether. + */ + port->type = up->type_probed; /* XXX */ + } +} + +static int +sunsu_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + return -EINVAL; +} + +static const char * +sunsu_type(struct uart_port *port) +{ + int type = port->type; + + if (type >= ARRAY_SIZE(uart_config)) + type = 0; + return uart_config[type].name; +} + +static const struct uart_ops sunsu_pops = { + .tx_empty = sunsu_tx_empty, + .set_mctrl = sunsu_set_mctrl, + .get_mctrl = sunsu_get_mctrl, + .stop_tx = sunsu_stop_tx, + .start_tx = sunsu_start_tx, + .stop_rx = sunsu_stop_rx, + .enable_ms = sunsu_enable_ms, + .break_ctl = sunsu_break_ctl, + .startup = sunsu_startup, + .shutdown = sunsu_shutdown, + .set_termios = sunsu_set_termios, + .type = sunsu_type, + .release_port = sunsu_release_port, + .request_port = sunsu_request_port, + .config_port = sunsu_config_port, + .verify_port = sunsu_verify_port, +}; + +#define UART_NR 4 + +static struct uart_sunsu_port sunsu_ports[UART_NR]; +static int nr_inst; /* Number of already registered ports */ + +#ifdef CONFIG_SERIO + +static DEFINE_SPINLOCK(sunsu_serio_lock); + +static int sunsu_serio_write(struct serio *serio, unsigned char ch) +{ + struct uart_sunsu_port *up = serio->port_data; + unsigned long flags; + int lsr; + + spin_lock_irqsave(&sunsu_serio_lock, flags); + + do { + lsr = serial_in(up, UART_LSR); + } while (!(lsr & UART_LSR_THRE)); + + /* Send the character out. */ + serial_out(up, UART_TX, ch); + + spin_unlock_irqrestore(&sunsu_serio_lock, flags); + + return 0; +} + +static int sunsu_serio_open(struct serio *serio) +{ + struct uart_sunsu_port *up = serio->port_data; + unsigned long flags; + int ret; + + spin_lock_irqsave(&sunsu_serio_lock, flags); + if (!up->serio_open) { + up->serio_open = 1; + ret = 0; + } else + ret = -EBUSY; + spin_unlock_irqrestore(&sunsu_serio_lock, flags); + + return ret; +} + +static void sunsu_serio_close(struct serio *serio) +{ + struct uart_sunsu_port *up = serio->port_data; + unsigned long flags; + + spin_lock_irqsave(&sunsu_serio_lock, flags); + up->serio_open = 0; + spin_unlock_irqrestore(&sunsu_serio_lock, flags); +} + +#endif /* CONFIG_SERIO */ + +static void sunsu_autoconfig(struct uart_sunsu_port *up) +{ + unsigned char status1, status2, scratch, scratch2, scratch3; + unsigned char save_lcr, save_mcr; + unsigned long flags; + + if (up->su_type == SU_PORT_NONE) + return; + + up->type_probed = PORT_UNKNOWN; + up->port.iotype = UPIO_MEM; + + spin_lock_irqsave(&up->port.lock, flags); + + if (!(up->port.flags & UPF_BUGGY_UART)) { + /* + * Do a simple existence test first; if we fail this, there's + * no point trying anything else. + * + * 0x80 is used as a nonsense port to prevent against false + * positives due to ISA bus float. The assumption is that + * 0x80 is a non-existent port; which should be safe since + * include/asm/io.h also makes this assumption. + */ + scratch = serial_inp(up, UART_IER); + serial_outp(up, UART_IER, 0); +#ifdef __i386__ + outb(0xff, 0x080); +#endif + scratch2 = serial_inp(up, UART_IER); + serial_outp(up, UART_IER, 0x0f); +#ifdef __i386__ + outb(0, 0x080); +#endif + scratch3 = serial_inp(up, UART_IER); + serial_outp(up, UART_IER, scratch); + if (scratch2 != 0 || scratch3 != 0x0F) + goto out; /* We failed; there's nothing here */ + } + + save_mcr = serial_in(up, UART_MCR); + save_lcr = serial_in(up, UART_LCR); + + /* + * Check to see if a UART is really there. Certain broken + * internal modems based on the Rockwell chipset fail this + * test, because they apparently don't implement the loopback + * test mode. So this test is skipped on the COM 1 through + * COM 4 ports. This *should* be safe, since no board + * manufacturer would be stupid enough to design a board + * that conflicts with COM 1-4 --- we hope! + */ + if (!(up->port.flags & UPF_SKIP_TEST)) { + serial_outp(up, UART_MCR, UART_MCR_LOOP | 0x0A); + status1 = serial_inp(up, UART_MSR) & 0xF0; + serial_outp(up, UART_MCR, save_mcr); + if (status1 != 0x90) + goto out; /* We failed loopback test */ + } + serial_outp(up, UART_LCR, 0xBF); /* set up for StarTech test */ + serial_outp(up, UART_EFR, 0); /* EFR is the same as FCR */ + serial_outp(up, UART_LCR, 0); + serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); + scratch = serial_in(up, UART_IIR) >> 6; + switch (scratch) { + case 0: + up->port.type = PORT_16450; + break; + case 1: + up->port.type = PORT_UNKNOWN; + break; + case 2: + up->port.type = PORT_16550; + break; + case 3: + up->port.type = PORT_16550A; + break; + } + if (up->port.type == PORT_16550A) { + /* Check for Startech UART's */ + serial_outp(up, UART_LCR, UART_LCR_DLAB); + if (serial_in(up, UART_EFR) == 0) { + up->port.type = PORT_16650; + } else { + serial_outp(up, UART_LCR, 0xBF); + if (serial_in(up, UART_EFR) == 0) + up->port.type = PORT_16650V2; + } + } + if (up->port.type == PORT_16550A) { + /* Check for TI 16750 */ + serial_outp(up, UART_LCR, save_lcr | UART_LCR_DLAB); + serial_outp(up, UART_FCR, + UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE); + scratch = serial_in(up, UART_IIR) >> 5; + if (scratch == 7) { + /* + * If this is a 16750, and not a cheap UART + * clone, then it should only go into 64 byte + * mode if the UART_FCR7_64BYTE bit was set + * while UART_LCR_DLAB was latched. + */ + serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); + serial_outp(up, UART_LCR, 0); + serial_outp(up, UART_FCR, + UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE); + scratch = serial_in(up, UART_IIR) >> 5; + if (scratch == 6) + up->port.type = PORT_16750; + } + serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); + } + serial_outp(up, UART_LCR, save_lcr); + if (up->port.type == PORT_16450) { + scratch = serial_in(up, UART_SCR); + serial_outp(up, UART_SCR, 0xa5); + status1 = serial_in(up, UART_SCR); + serial_outp(up, UART_SCR, 0x5a); + status2 = serial_in(up, UART_SCR); + serial_outp(up, UART_SCR, scratch); + + if ((status1 != 0xa5) || (status2 != 0x5a)) + up->port.type = PORT_8250; + } + + up->port.fifosize = uart_config[up->port.type].dfl_xmit_fifo_size; + + if (up->port.type == PORT_UNKNOWN) + goto out; + up->type_probed = up->port.type; /* XXX */ + + /* + * Reset the UART. + */ +#ifdef CONFIG_SERIAL_8250_RSA + if (up->port.type == PORT_RSA) + serial_outp(up, UART_RSA_FRR, 0); +#endif + serial_outp(up, UART_MCR, save_mcr); + serial_outp(up, UART_FCR, (UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | + UART_FCR_CLEAR_XMIT)); + serial_outp(up, UART_FCR, 0); + (void)serial_in(up, UART_RX); + serial_outp(up, UART_IER, 0); + +out: + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static struct uart_driver sunsu_reg = { + .owner = THIS_MODULE, + .driver_name = "sunsu", + .dev_name = "ttyS", + .major = TTY_MAJOR, +}; + +static int sunsu_kbd_ms_init(struct uart_sunsu_port *up) +{ + int quot, baud; +#ifdef CONFIG_SERIO + struct serio *serio; +#endif + + if (up->su_type == SU_PORT_KBD) { + up->cflag = B1200 | CS8 | CLOCAL | CREAD; + baud = 1200; + } else { + up->cflag = B4800 | CS8 | CLOCAL | CREAD; + baud = 4800; + } + quot = up->port.uartclk / (16 * baud); + + sunsu_autoconfig(up); + if (up->port.type == PORT_UNKNOWN) + return -ENODEV; + + printk("%pOF: %s port at %llx, irq %u\n", + up->port.dev->of_node, + (up->su_type == SU_PORT_KBD) ? "Keyboard" : "Mouse", + (unsigned long long) up->port.mapbase, + up->port.irq); + +#ifdef CONFIG_SERIO + serio = &up->serio; + serio->port_data = up; + + serio->id.type = SERIO_RS232; + if (up->su_type == SU_PORT_KBD) { + serio->id.proto = SERIO_SUNKBD; + strscpy(serio->name, "sukbd", sizeof(serio->name)); + } else { + serio->id.proto = SERIO_SUN; + serio->id.extra = 1; + strscpy(serio->name, "sums", sizeof(serio->name)); + } + strscpy(serio->phys, + (!(up->port.line & 1) ? "su/serio0" : "su/serio1"), + sizeof(serio->phys)); + + serio->write = sunsu_serio_write; + serio->open = sunsu_serio_open; + serio->close = sunsu_serio_close; + serio->dev.parent = up->port.dev; + + serio_register_port(serio); +#endif + + sunsu_change_speed(&up->port, up->cflag, 0, quot); + + sunsu_startup(&up->port); + return 0; +} + +/* + * ------------------------------------------------------------ + * Serial console driver + * ------------------------------------------------------------ + */ + +#ifdef CONFIG_SERIAL_SUNSU_CONSOLE + +/* + * Wait for transmitter & holding register to empty + */ +static void wait_for_xmitr(struct uart_sunsu_port *up) +{ + unsigned int status, tmout = 10000; + + /* Wait up to 10ms for the character(s) to be sent. */ + do { + status = serial_in(up, UART_LSR); + + if (status & UART_LSR_BI) + up->lsr_break_flag = UART_LSR_BI; + + if (--tmout == 0) + break; + udelay(1); + } while (!uart_lsr_tx_empty(status)); + + /* Wait up to 1s for flow control if necessary */ + if (up->port.flags & UPF_CONS_FLOW) { + tmout = 1000000; + while (--tmout && + ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0)) + udelay(1); + } +} + +static void sunsu_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct uart_sunsu_port *up = + container_of(port, struct uart_sunsu_port, port); + + wait_for_xmitr(up); + serial_out(up, UART_TX, ch); +} + +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + */ +static void sunsu_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_sunsu_port *up = &sunsu_ports[co->index]; + unsigned long flags; + unsigned int ier; + int locked = 1; + + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); + else + spin_lock_irqsave(&up->port.lock, flags); + + /* + * First save the UER then disable the interrupts + */ + ier = serial_in(up, UART_IER); + serial_out(up, UART_IER, 0); + + uart_console_write(&up->port, s, count, sunsu_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + wait_for_xmitr(up); + serial_out(up, UART_IER, ier); + + if (locked) + spin_unlock_irqrestore(&up->port.lock, flags); +} + +/* + * Setup initial baud/bits/parity. We do two things here: + * - construct a cflag setting for the first su_open() + * - initialize the serial port + * Return non-zero if we didn't find a serial port. + */ +static int __init sunsu_console_setup(struct console *co, char *options) +{ + static struct ktermios dummy; + struct ktermios termios; + struct uart_port *port; + + printk("Console: ttyS%d (SU)\n", + (sunsu_reg.minor - 64) + co->index); + + if (co->index > nr_inst) + return -ENODEV; + port = &sunsu_ports[co->index].port; + + /* + * Temporary fix. + */ + spin_lock_init(&port->lock); + + /* Get firmware console settings. */ + sunserial_console_termios(co, port->dev->of_node); + + memset(&termios, 0, sizeof(struct ktermios)); + termios.c_cflag = co->cflag; + port->mctrl |= TIOCM_DTR; + port->ops->set_termios(port, &termios, &dummy); + + return 0; +} + +static struct console sunsu_console = { + .name = "ttyS", + .write = sunsu_console_write, + .device = uart_console_device, + .setup = sunsu_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sunsu_reg, +}; + +/* + * Register console. + */ + +static inline struct console *SUNSU_CONSOLE(void) +{ + return &sunsu_console; +} +#else +#define SUNSU_CONSOLE() (NULL) +#define sunsu_serial_console_init() do { } while (0) +#endif + +static enum su_type su_get_type(struct device_node *dp) +{ + struct device_node *ap = of_find_node_by_path("/aliases"); + enum su_type rc = SU_PORT_PORT; + + if (ap) { + const char *keyb = of_get_property(ap, "keyboard", NULL); + const char *ms = of_get_property(ap, "mouse", NULL); + struct device_node *match; + + if (keyb) { + match = of_find_node_by_path(keyb); + + /* + * The pointer is used as an identifier not + * as a pointer, we can drop the refcount on + * the of__node immediately after getting it. + */ + of_node_put(match); + + if (dp == match) { + rc = SU_PORT_KBD; + goto out; + } + } + if (ms) { + match = of_find_node_by_path(ms); + + of_node_put(match); + + if (dp == match) { + rc = SU_PORT_MS; + goto out; + } + } + } + +out: + of_node_put(ap); + return rc; +} + +static int su_probe(struct platform_device *op) +{ + struct device_node *dp = op->dev.of_node; + struct uart_sunsu_port *up; + struct resource *rp; + enum su_type type; + bool ignore_line; + int err; + + type = su_get_type(dp); + if (type == SU_PORT_PORT) { + if (nr_inst >= UART_NR) + return -EINVAL; + up = &sunsu_ports[nr_inst]; + } else { + up = kzalloc(sizeof(*up), GFP_KERNEL); + if (!up) + return -ENOMEM; + } + + up->port.line = nr_inst; + + spin_lock_init(&up->port.lock); + + up->su_type = type; + + rp = &op->resource[0]; + up->port.mapbase = rp->start; + up->reg_size = resource_size(rp); + up->port.membase = of_ioremap(rp, 0, up->reg_size, "su"); + if (!up->port.membase) { + if (type != SU_PORT_PORT) + kfree(up); + return -ENOMEM; + } + + up->port.irq = op->archdata.irqs[0]; + + up->port.dev = &op->dev; + + up->port.type = PORT_UNKNOWN; + up->port.uartclk = (SU_BASE_BAUD * 16); + up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNSU_CONSOLE); + + err = 0; + if (up->su_type == SU_PORT_KBD || up->su_type == SU_PORT_MS) { + err = sunsu_kbd_ms_init(up); + if (err) { + of_iounmap(&op->resource[0], + up->port.membase, up->reg_size); + kfree(up); + return err; + } + platform_set_drvdata(op, up); + + nr_inst++; + + return 0; + } + + up->port.flags |= UPF_BOOT_AUTOCONF; + + sunsu_autoconfig(up); + + err = -ENODEV; + if (up->port.type == PORT_UNKNOWN) + goto out_unmap; + + up->port.ops = &sunsu_pops; + + ignore_line = false; + if (of_node_name_eq(dp, "rsc-console") || + of_node_name_eq(dp, "lom-console")) + ignore_line = true; + + sunserial_console_match(SUNSU_CONSOLE(), dp, + &sunsu_reg, up->port.line, + ignore_line); + err = uart_add_one_port(&sunsu_reg, &up->port); + if (err) + goto out_unmap; + + platform_set_drvdata(op, up); + + nr_inst++; + + return 0; + +out_unmap: + of_iounmap(&op->resource[0], up->port.membase, up->reg_size); + kfree(up); + return err; +} + +static int su_remove(struct platform_device *op) +{ + struct uart_sunsu_port *up = platform_get_drvdata(op); + bool kbdms = false; + + if (up->su_type == SU_PORT_MS || + up->su_type == SU_PORT_KBD) + kbdms = true; + + if (kbdms) { +#ifdef CONFIG_SERIO + serio_unregister_port(&up->serio); +#endif + } else if (up->port.type != PORT_UNKNOWN) + uart_remove_one_port(&sunsu_reg, &up->port); + + if (up->port.membase) + of_iounmap(&op->resource[0], up->port.membase, up->reg_size); + + if (kbdms) + kfree(up); + + return 0; +} + +static const struct of_device_id su_match[] = { + { + .name = "su", + }, + { + .name = "su_pnp", + }, + { + .name = "serial", + .compatible = "su", + }, + { + .type = "serial", + .compatible = "su", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, su_match); + +static struct platform_driver su_driver = { + .driver = { + .name = "su", + .of_match_table = su_match, + }, + .probe = su_probe, + .remove = su_remove, +}; + +static int __init sunsu_init(void) +{ + struct device_node *dp; + int err; + int num_uart = 0; + + for_each_node_by_name(dp, "su") { + if (su_get_type(dp) == SU_PORT_PORT) + num_uart++; + } + for_each_node_by_name(dp, "su_pnp") { + if (su_get_type(dp) == SU_PORT_PORT) + num_uart++; + } + for_each_node_by_name(dp, "serial") { + if (of_device_is_compatible(dp, "su")) { + if (su_get_type(dp) == SU_PORT_PORT) + num_uart++; + } + } + for_each_node_by_type(dp, "serial") { + if (of_device_is_compatible(dp, "su")) { + if (su_get_type(dp) == SU_PORT_PORT) + num_uart++; + } + } + + if (num_uart) { + err = sunserial_register_minors(&sunsu_reg, num_uart); + if (err) + return err; + } + + err = platform_driver_register(&su_driver); + if (err && num_uart) + sunserial_unregister_minors(&sunsu_reg, num_uart); + + return err; +} + +static void __exit sunsu_exit(void) +{ + platform_driver_unregister(&su_driver); + if (sunsu_reg.nr) + sunserial_unregister_minors(&sunsu_reg, sunsu_reg.nr); +} + +module_init(sunsu_init); +module_exit(sunsu_exit); + +MODULE_AUTHOR("Eddie C. Dost, Peter Zaitcev, and David S. Miller"); +MODULE_DESCRIPTION("Sun SU serial port driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c new file mode 100644 index 000000000..874252906 --- /dev/null +++ b/drivers/tty/serial/sunzilog.c @@ -0,0 +1,1651 @@ +// SPDX-License-Identifier: GPL-2.0 +/* sunzilog.c: Zilog serial driver for Sparc systems. + * + * Driver for Zilog serial chips found on Sun workstations and + * servers. This driver could actually be made more generic. + * + * This is based on the old drivers/sbus/char/zs.c code. A lot + * of code has been simply moved over directly from there but + * much has been rewritten. Credits therefore go out to Eddie + * C. Dost, Pete Zaitcev, Ted Ts'o and Alex Buell for their + * work there. + * + * Copyright (C) 2002, 2006, 2007 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_SERIO +#include +#endif +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include "sunzilog.h" + +/* On 32-bit sparcs we need to delay after register accesses + * to accommodate sun4 systems, but we do not need to flush writes. + * On 64-bit sparc we only need to flush single writes to ensure + * completion. + */ +#ifndef CONFIG_SPARC64 +#define ZSDELAY() udelay(5) +#define ZSDELAY_LONG() udelay(20) +#define ZS_WSYNC(channel) do { } while (0) +#else +#define ZSDELAY() +#define ZSDELAY_LONG() +#define ZS_WSYNC(__channel) \ + readb(&((__channel)->control)) +#endif + +#define ZS_CLOCK 4915200 /* Zilog input clock rate. */ +#define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */ + +/* + * We wrap our port structure around the generic uart_port. + */ +struct uart_sunzilog_port { + struct uart_port port; + + /* IRQ servicing chain. */ + struct uart_sunzilog_port *next; + + /* Current values of Zilog write registers. */ + unsigned char curregs[NUM_ZSREGS]; + + unsigned int flags; +#define SUNZILOG_FLAG_CONS_KEYB 0x00000001 +#define SUNZILOG_FLAG_CONS_MOUSE 0x00000002 +#define SUNZILOG_FLAG_IS_CONS 0x00000004 +#define SUNZILOG_FLAG_IS_KGDB 0x00000008 +#define SUNZILOG_FLAG_MODEM_STATUS 0x00000010 +#define SUNZILOG_FLAG_IS_CHANNEL_A 0x00000020 +#define SUNZILOG_FLAG_REGS_HELD 0x00000040 +#define SUNZILOG_FLAG_TX_STOPPED 0x00000080 +#define SUNZILOG_FLAG_TX_ACTIVE 0x00000100 +#define SUNZILOG_FLAG_ESCC 0x00000200 +#define SUNZILOG_FLAG_ISR_HANDLER 0x00000400 + + unsigned int cflag; + + unsigned char parity_mask; + unsigned char prev_status; + +#ifdef CONFIG_SERIO + struct serio serio; + int serio_open; +#endif +}; + +static void sunzilog_putchar(struct uart_port *port, unsigned char ch); + +#define ZILOG_CHANNEL_FROM_PORT(PORT) ((struct zilog_channel __iomem *)((PORT)->membase)) +#define UART_ZILOG(PORT) ((struct uart_sunzilog_port *)(PORT)) + +#define ZS_IS_KEYB(UP) ((UP)->flags & SUNZILOG_FLAG_CONS_KEYB) +#define ZS_IS_MOUSE(UP) ((UP)->flags & SUNZILOG_FLAG_CONS_MOUSE) +#define ZS_IS_CONS(UP) ((UP)->flags & SUNZILOG_FLAG_IS_CONS) +#define ZS_IS_KGDB(UP) ((UP)->flags & SUNZILOG_FLAG_IS_KGDB) +#define ZS_WANTS_MODEM_STATUS(UP) ((UP)->flags & SUNZILOG_FLAG_MODEM_STATUS) +#define ZS_IS_CHANNEL_A(UP) ((UP)->flags & SUNZILOG_FLAG_IS_CHANNEL_A) +#define ZS_REGS_HELD(UP) ((UP)->flags & SUNZILOG_FLAG_REGS_HELD) +#define ZS_TX_STOPPED(UP) ((UP)->flags & SUNZILOG_FLAG_TX_STOPPED) +#define ZS_TX_ACTIVE(UP) ((UP)->flags & SUNZILOG_FLAG_TX_ACTIVE) + +/* Reading and writing Zilog8530 registers. The delays are to make this + * driver work on the Sun4 which needs a settling delay after each chip + * register access, other machines handle this in hardware via auxiliary + * flip-flops which implement the settle time we do in software. + * + * The port lock must be held and local IRQs must be disabled + * when {read,write}_zsreg is invoked. + */ +static unsigned char read_zsreg(struct zilog_channel __iomem *channel, + unsigned char reg) +{ + unsigned char retval; + + writeb(reg, &channel->control); + ZSDELAY(); + retval = readb(&channel->control); + ZSDELAY(); + + return retval; +} + +static void write_zsreg(struct zilog_channel __iomem *channel, + unsigned char reg, unsigned char value) +{ + writeb(reg, &channel->control); + ZSDELAY(); + writeb(value, &channel->control); + ZSDELAY(); +} + +static void sunzilog_clear_fifo(struct zilog_channel __iomem *channel) +{ + int i; + + for (i = 0; i < 32; i++) { + unsigned char regval; + + regval = readb(&channel->control); + ZSDELAY(); + if (regval & Rx_CH_AV) + break; + + regval = read_zsreg(channel, R1); + readb(&channel->data); + ZSDELAY(); + + if (regval & (PAR_ERR | Rx_OVR | CRC_ERR)) { + writeb(ERR_RES, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + } + } +} + +/* This function must only be called when the TX is not busy. The UART + * port lock must be held and local interrupts disabled. + */ +static int __load_zsregs(struct zilog_channel __iomem *channel, unsigned char *regs) +{ + int i; + int escc; + unsigned char r15; + + /* Let pending transmits finish. */ + for (i = 0; i < 1000; i++) { + unsigned char stat = read_zsreg(channel, R1); + if (stat & ALL_SNT) + break; + udelay(100); + } + + writeb(ERR_RES, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + + sunzilog_clear_fifo(channel); + + /* Disable all interrupts. */ + write_zsreg(channel, R1, + regs[R1] & ~(RxINT_MASK | TxINT_ENAB | EXT_INT_ENAB)); + + /* Set parity, sync config, stop bits, and clock divisor. */ + write_zsreg(channel, R4, regs[R4]); + + /* Set misc. TX/RX control bits. */ + write_zsreg(channel, R10, regs[R10]); + + /* Set TX/RX controls sans the enable bits. */ + write_zsreg(channel, R3, regs[R3] & ~RxENAB); + write_zsreg(channel, R5, regs[R5] & ~TxENAB); + + /* Synchronous mode config. */ + write_zsreg(channel, R6, regs[R6]); + write_zsreg(channel, R7, regs[R7]); + + /* Don't mess with the interrupt vector (R2, unused by us) and + * master interrupt control (R9). We make sure this is setup + * properly at probe time then never touch it again. + */ + + /* Disable baud generator. */ + write_zsreg(channel, R14, regs[R14] & ~BRENAB); + + /* Clock mode control. */ + write_zsreg(channel, R11, regs[R11]); + + /* Lower and upper byte of baud rate generator divisor. */ + write_zsreg(channel, R12, regs[R12]); + write_zsreg(channel, R13, regs[R13]); + + /* Now rewrite R14, with BRENAB (if set). */ + write_zsreg(channel, R14, regs[R14]); + + /* External status interrupt control. */ + write_zsreg(channel, R15, (regs[R15] | WR7pEN) & ~FIFOEN); + + /* ESCC Extension Register */ + r15 = read_zsreg(channel, R15); + if (r15 & 0x01) { + write_zsreg(channel, R7, regs[R7p]); + + /* External status interrupt and FIFO control. */ + write_zsreg(channel, R15, regs[R15] & ~WR7pEN); + escc = 1; + } else { + /* Clear FIFO bit case it is an issue */ + regs[R15] &= ~FIFOEN; + escc = 0; + } + + /* Reset external status interrupts. */ + write_zsreg(channel, R0, RES_EXT_INT); /* First Latch */ + write_zsreg(channel, R0, RES_EXT_INT); /* Second Latch */ + + /* Rewrite R3/R5, this time without enables masked. */ + write_zsreg(channel, R3, regs[R3]); + write_zsreg(channel, R5, regs[R5]); + + /* Rewrite R1, this time without IRQ enabled masked. */ + write_zsreg(channel, R1, regs[R1]); + + return escc; +} + +/* Reprogram the Zilog channel HW registers with the copies found in the + * software state struct. If the transmitter is busy, we defer this update + * until the next TX complete interrupt. Else, we do it right now. + * + * The UART port lock must be held and local interrupts disabled. + */ +static void sunzilog_maybe_update_regs(struct uart_sunzilog_port *up, + struct zilog_channel __iomem *channel) +{ + if (!ZS_REGS_HELD(up)) { + if (ZS_TX_ACTIVE(up)) { + up->flags |= SUNZILOG_FLAG_REGS_HELD; + } else { + __load_zsregs(channel, up->curregs); + } + } +} + +static void sunzilog_change_mouse_baud(struct uart_sunzilog_port *up) +{ + unsigned int cur_cflag = up->cflag; + int brg, new_baud; + + up->cflag &= ~CBAUD; + up->cflag |= suncore_mouse_baud_cflag_next(cur_cflag, &new_baud); + + brg = BPS_TO_BRG(new_baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); + up->curregs[R12] = (brg & 0xff); + up->curregs[R13] = (brg >> 8) & 0xff; + sunzilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(&up->port)); +} + +static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up, + unsigned char ch, int is_break) +{ + if (ZS_IS_KEYB(up)) { + /* Stop-A is handled by drivers/char/keyboard.c now. */ +#ifdef CONFIG_SERIO + if (up->serio_open) + serio_interrupt(&up->serio, ch, 0); +#endif + } else if (ZS_IS_MOUSE(up)) { + int ret = suncore_mouse_baud_detection(ch, is_break); + + switch (ret) { + case 2: + sunzilog_change_mouse_baud(up); + fallthrough; + case 1: + break; + + case 0: +#ifdef CONFIG_SERIO + if (up->serio_open) + serio_interrupt(&up->serio, ch, 0); +#endif + break; + } + } +} + +static struct tty_port * +sunzilog_receive_chars(struct uart_sunzilog_port *up, + struct zilog_channel __iomem *channel) +{ + struct tty_port *port = NULL; + unsigned char ch, r1, flag; + + if (up->port.state != NULL) /* Unopened serial console */ + port = &up->port.state->port; + + for (;;) { + + r1 = read_zsreg(channel, R1); + if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) { + writeb(ERR_RES, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + } + + ch = readb(&channel->control); + ZSDELAY(); + + /* This funny hack depends upon BRK_ABRT not interfering + * with the other bits we care about in R1. + */ + if (ch & BRK_ABRT) + r1 |= BRK_ABRT; + + if (!(ch & Rx_CH_AV)) + break; + + ch = readb(&channel->data); + ZSDELAY(); + + ch &= up->parity_mask; + + if (unlikely(ZS_IS_KEYB(up)) || unlikely(ZS_IS_MOUSE(up))) { + sunzilog_kbdms_receive_chars(up, ch, 0); + continue; + } + + /* A real serial line, record the character and status. */ + flag = TTY_NORMAL; + up->port.icount.rx++; + if (r1 & (BRK_ABRT | PAR_ERR | Rx_OVR | CRC_ERR)) { + if (r1 & BRK_ABRT) { + r1 &= ~(PAR_ERR | CRC_ERR); + up->port.icount.brk++; + if (uart_handle_break(&up->port)) + continue; + } + else if (r1 & PAR_ERR) + up->port.icount.parity++; + else if (r1 & CRC_ERR) + up->port.icount.frame++; + if (r1 & Rx_OVR) + up->port.icount.overrun++; + r1 &= up->port.read_status_mask; + if (r1 & BRK_ABRT) + flag = TTY_BREAK; + else if (r1 & PAR_ERR) + flag = TTY_PARITY; + else if (r1 & CRC_ERR) + flag = TTY_FRAME; + } + if (uart_handle_sysrq_char(&up->port, ch) || !port) + continue; + + if (up->port.ignore_status_mask == 0xff || + (r1 & up->port.ignore_status_mask) == 0) { + tty_insert_flip_char(port, ch, flag); + } + if (r1 & Rx_OVR) + tty_insert_flip_char(port, 0, TTY_OVERRUN); + } + + return port; +} + +static void sunzilog_status_handle(struct uart_sunzilog_port *up, + struct zilog_channel __iomem *channel) +{ + unsigned char status; + + status = readb(&channel->control); + ZSDELAY(); + + writeb(RES_EXT_INT, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + + if (status & BRK_ABRT) { + if (ZS_IS_MOUSE(up)) + sunzilog_kbdms_receive_chars(up, 0, 1); + if (ZS_IS_CONS(up)) { + /* Wait for BREAK to deassert to avoid potentially + * confusing the PROM. + */ + while (1) { + status = readb(&channel->control); + ZSDELAY(); + if (!(status & BRK_ABRT)) + break; + } + sun_do_break(); + return; + } + } + + if (ZS_WANTS_MODEM_STATUS(up)) { + if (status & SYNC) + up->port.icount.dsr++; + + /* The Zilog just gives us an interrupt when DCD/CTS/etc. change. + * But it does not tell us which bit has changed, we have to keep + * track of this ourselves. + */ + if ((status ^ up->prev_status) ^ DCD) + uart_handle_dcd_change(&up->port, + (status & DCD)); + if ((status ^ up->prev_status) ^ CTS) + uart_handle_cts_change(&up->port, + (status & CTS)); + + wake_up_interruptible(&up->port.state->port.delta_msr_wait); + } + + up->prev_status = status; +} + +static void sunzilog_transmit_chars(struct uart_sunzilog_port *up, + struct zilog_channel __iomem *channel) +{ + struct circ_buf *xmit; + + if (ZS_IS_CONS(up)) { + unsigned char status = readb(&channel->control); + ZSDELAY(); + + /* TX still busy? Just wait for the next TX done interrupt. + * + * It can occur because of how we do serial console writes. It would + * be nice to transmit console writes just like we normally would for + * a TTY line. (ie. buffered and TX interrupt driven). That is not + * easy because console writes cannot sleep. One solution might be + * to poll on enough port->xmit space becoming free. -DaveM + */ + if (!(status & Tx_BUF_EMP)) + return; + } + + up->flags &= ~SUNZILOG_FLAG_TX_ACTIVE; + + if (ZS_REGS_HELD(up)) { + __load_zsregs(channel, up->curregs); + up->flags &= ~SUNZILOG_FLAG_REGS_HELD; + } + + if (ZS_TX_STOPPED(up)) { + up->flags &= ~SUNZILOG_FLAG_TX_STOPPED; + goto ack_tx_int; + } + + if (up->port.x_char) { + up->flags |= SUNZILOG_FLAG_TX_ACTIVE; + writeb(up->port.x_char, &channel->data); + ZSDELAY(); + ZS_WSYNC(channel); + + up->port.icount.tx++; + up->port.x_char = 0; + return; + } + + if (up->port.state == NULL) + goto ack_tx_int; + xmit = &up->port.state->xmit; + if (uart_circ_empty(xmit)) + goto ack_tx_int; + + if (uart_tx_stopped(&up->port)) + goto ack_tx_int; + + up->flags |= SUNZILOG_FLAG_TX_ACTIVE; + writeb(xmit->buf[xmit->tail], &channel->data); + ZSDELAY(); + ZS_WSYNC(channel); + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->port.icount.tx++; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); + + return; + +ack_tx_int: + writeb(RES_Tx_P, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); +} + +static irqreturn_t sunzilog_interrupt(int irq, void *dev_id) +{ + struct uart_sunzilog_port *up = dev_id; + + while (up) { + struct zilog_channel __iomem *channel + = ZILOG_CHANNEL_FROM_PORT(&up->port); + struct tty_port *port; + unsigned char r3; + + spin_lock(&up->port.lock); + r3 = read_zsreg(channel, R3); + + /* Channel A */ + port = NULL; + if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { + writeb(RES_H_IUS, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + + if (r3 & CHARxIP) + port = sunzilog_receive_chars(up, channel); + if (r3 & CHAEXT) + sunzilog_status_handle(up, channel); + if (r3 & CHATxIP) + sunzilog_transmit_chars(up, channel); + } + spin_unlock(&up->port.lock); + + if (port) + tty_flip_buffer_push(port); + + /* Channel B */ + up = up->next; + channel = ZILOG_CHANNEL_FROM_PORT(&up->port); + + spin_lock(&up->port.lock); + port = NULL; + if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { + writeb(RES_H_IUS, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + + if (r3 & CHBRxIP) + port = sunzilog_receive_chars(up, channel); + if (r3 & CHBEXT) + sunzilog_status_handle(up, channel); + if (r3 & CHBTxIP) + sunzilog_transmit_chars(up, channel); + } + spin_unlock(&up->port.lock); + + if (port) + tty_flip_buffer_push(port); + + up = up->next; + } + + return IRQ_HANDLED; +} + +/* A convenient way to quickly get R0 status. The caller must _not_ hold the + * port lock, it is acquired here. + */ +static __inline__ unsigned char sunzilog_read_channel_status(struct uart_port *port) +{ + struct zilog_channel __iomem *channel; + unsigned char status; + + channel = ZILOG_CHANNEL_FROM_PORT(port); + status = readb(&channel->control); + ZSDELAY(); + + return status; +} + +/* The port lock is not held. */ +static unsigned int sunzilog_tx_empty(struct uart_port *port) +{ + unsigned long flags; + unsigned char status; + unsigned int ret; + + spin_lock_irqsave(&port->lock, flags); + + status = sunzilog_read_channel_status(port); + + spin_unlock_irqrestore(&port->lock, flags); + + if (status & Tx_BUF_EMP) + ret = TIOCSER_TEMT; + else + ret = 0; + + return ret; +} + +/* The port lock is held and interrupts are disabled. */ +static unsigned int sunzilog_get_mctrl(struct uart_port *port) +{ + unsigned char status; + unsigned int ret; + + status = sunzilog_read_channel_status(port); + + ret = 0; + if (status & DCD) + ret |= TIOCM_CAR; + if (status & SYNC) + ret |= TIOCM_DSR; + if (status & CTS) + ret |= TIOCM_CTS; + + return ret; +} + +/* The port lock is held and interrupts are disabled. */ +static void sunzilog_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct uart_sunzilog_port *up = + container_of(port, struct uart_sunzilog_port, port); + struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); + unsigned char set_bits, clear_bits; + + set_bits = clear_bits = 0; + + if (mctrl & TIOCM_RTS) + set_bits |= RTS; + else + clear_bits |= RTS; + if (mctrl & TIOCM_DTR) + set_bits |= DTR; + else + clear_bits |= DTR; + + /* NOTE: Not subject to 'transmitter active' rule. */ + up->curregs[R5] |= set_bits; + up->curregs[R5] &= ~clear_bits; + write_zsreg(channel, R5, up->curregs[R5]); +} + +/* The port lock is held and interrupts are disabled. */ +static void sunzilog_stop_tx(struct uart_port *port) +{ + struct uart_sunzilog_port *up = + container_of(port, struct uart_sunzilog_port, port); + + up->flags |= SUNZILOG_FLAG_TX_STOPPED; +} + +/* The port lock is held and interrupts are disabled. */ +static void sunzilog_start_tx(struct uart_port *port) +{ + struct uart_sunzilog_port *up = + container_of(port, struct uart_sunzilog_port, port); + struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); + unsigned char status; + + up->flags |= SUNZILOG_FLAG_TX_ACTIVE; + up->flags &= ~SUNZILOG_FLAG_TX_STOPPED; + + status = readb(&channel->control); + ZSDELAY(); + + /* TX busy? Just wait for the TX done interrupt. */ + if (!(status & Tx_BUF_EMP)) + return; + + /* Send the first character to jump-start the TX done + * IRQ sending engine. + */ + if (port->x_char) { + writeb(port->x_char, &channel->data); + ZSDELAY(); + ZS_WSYNC(channel); + + port->icount.tx++; + port->x_char = 0; + } else { + struct circ_buf *xmit = &port->state->xmit; + + if (uart_circ_empty(xmit)) + return; + writeb(xmit->buf[xmit->tail], &channel->data); + ZSDELAY(); + ZS_WSYNC(channel); + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); + } +} + +/* The port lock is held. */ +static void sunzilog_stop_rx(struct uart_port *port) +{ + struct uart_sunzilog_port *up = UART_ZILOG(port); + struct zilog_channel __iomem *channel; + + if (ZS_IS_CONS(up)) + return; + + channel = ZILOG_CHANNEL_FROM_PORT(port); + + /* Disable all RX interrupts. */ + up->curregs[R1] &= ~RxINT_MASK; + sunzilog_maybe_update_regs(up, channel); +} + +/* The port lock is held. */ +static void sunzilog_enable_ms(struct uart_port *port) +{ + struct uart_sunzilog_port *up = + container_of(port, struct uart_sunzilog_port, port); + struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); + unsigned char new_reg; + + new_reg = up->curregs[R15] | (DCDIE | SYNCIE | CTSIE); + if (new_reg != up->curregs[R15]) { + up->curregs[R15] = new_reg; + + /* NOTE: Not subject to 'transmitter active' rule. */ + write_zsreg(channel, R15, up->curregs[R15] & ~WR7pEN); + } +} + +/* The port lock is not held. */ +static void sunzilog_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_sunzilog_port *up = + container_of(port, struct uart_sunzilog_port, port); + struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); + unsigned char set_bits, clear_bits, new_reg; + unsigned long flags; + + set_bits = clear_bits = 0; + + if (break_state) + set_bits |= SND_BRK; + else + clear_bits |= SND_BRK; + + spin_lock_irqsave(&port->lock, flags); + + new_reg = (up->curregs[R5] | set_bits) & ~clear_bits; + if (new_reg != up->curregs[R5]) { + up->curregs[R5] = new_reg; + + /* NOTE: Not subject to 'transmitter active' rule. */ + write_zsreg(channel, R5, up->curregs[R5]); + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void __sunzilog_startup(struct uart_sunzilog_port *up) +{ + struct zilog_channel __iomem *channel; + + channel = ZILOG_CHANNEL_FROM_PORT(&up->port); + up->prev_status = readb(&channel->control); + + /* Enable receiver and transmitter. */ + up->curregs[R3] |= RxENAB; + up->curregs[R5] |= TxENAB; + + up->curregs[R1] |= EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB; + sunzilog_maybe_update_regs(up, channel); +} + +static int sunzilog_startup(struct uart_port *port) +{ + struct uart_sunzilog_port *up = UART_ZILOG(port); + unsigned long flags; + + if (ZS_IS_CONS(up)) + return 0; + + spin_lock_irqsave(&port->lock, flags); + __sunzilog_startup(up); + spin_unlock_irqrestore(&port->lock, flags); + return 0; +} + +/* + * The test for ZS_IS_CONS is explained by the following e-mail: + ***** + * From: Russell King + * Date: Sun, 8 Dec 2002 10:18:38 +0000 + * + * On Sun, Dec 08, 2002 at 02:43:36AM -0500, Pete Zaitcev wrote: + * > I boot my 2.5 boxes using "console=ttyS0,9600" argument, + * > and I noticed that something is not right with reference + * > counting in this case. It seems that when the console + * > is open by kernel initially, this is not accounted + * > as an open, and uart_startup is not called. + * + * That is correct. We are unable to call uart_startup when the serial + * console is initialised because it may need to allocate memory (as + * request_irq does) and the memory allocators may not have been + * initialised. + * + * 1. initialise the port into a state where it can send characters in the + * console write method. + * + * 2. don't do the actual hardware shutdown in your shutdown() method (but + * do the normal software shutdown - ie, free irqs etc) + ***** + */ +static void sunzilog_shutdown(struct uart_port *port) +{ + struct uart_sunzilog_port *up = UART_ZILOG(port); + struct zilog_channel __iomem *channel; + unsigned long flags; + + if (ZS_IS_CONS(up)) + return; + + spin_lock_irqsave(&port->lock, flags); + + channel = ZILOG_CHANNEL_FROM_PORT(port); + + /* Disable receiver and transmitter. */ + up->curregs[R3] &= ~RxENAB; + up->curregs[R5] &= ~TxENAB; + + /* Disable all interrupts and BRK assertion. */ + up->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK); + up->curregs[R5] &= ~SND_BRK; + sunzilog_maybe_update_regs(up, channel); + + spin_unlock_irqrestore(&port->lock, flags); +} + +/* Shared by TTY driver and serial console setup. The port lock is held + * and local interrupts are disabled. + */ +static void +sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag, + unsigned int iflag, int brg) +{ + + up->curregs[R10] = NRZ; + up->curregs[R11] = TCBR | RCBR; + + /* Program BAUD and clock source. */ + up->curregs[R4] &= ~XCLK_MASK; + up->curregs[R4] |= X16CLK; + up->curregs[R12] = brg & 0xff; + up->curregs[R13] = (brg >> 8) & 0xff; + up->curregs[R14] = BRSRC | BRENAB; + + /* Character size, stop bits, and parity. */ + up->curregs[R3] &= ~RxN_MASK; + up->curregs[R5] &= ~TxN_MASK; + switch (cflag & CSIZE) { + case CS5: + up->curregs[R3] |= Rx5; + up->curregs[R5] |= Tx5; + up->parity_mask = 0x1f; + break; + case CS6: + up->curregs[R3] |= Rx6; + up->curregs[R5] |= Tx6; + up->parity_mask = 0x3f; + break; + case CS7: + up->curregs[R3] |= Rx7; + up->curregs[R5] |= Tx7; + up->parity_mask = 0x7f; + break; + case CS8: + default: + up->curregs[R3] |= Rx8; + up->curregs[R5] |= Tx8; + up->parity_mask = 0xff; + break; + } + up->curregs[R4] &= ~0x0c; + if (cflag & CSTOPB) + up->curregs[R4] |= SB2; + else + up->curregs[R4] |= SB1; + if (cflag & PARENB) + up->curregs[R4] |= PAR_ENAB; + else + up->curregs[R4] &= ~PAR_ENAB; + if (!(cflag & PARODD)) + up->curregs[R4] |= PAR_EVEN; + else + up->curregs[R4] &= ~PAR_EVEN; + + up->port.read_status_mask = Rx_OVR; + if (iflag & INPCK) + up->port.read_status_mask |= CRC_ERR | PAR_ERR; + if (iflag & (IGNBRK | BRKINT | PARMRK)) + up->port.read_status_mask |= BRK_ABRT; + + up->port.ignore_status_mask = 0; + if (iflag & IGNPAR) + up->port.ignore_status_mask |= CRC_ERR | PAR_ERR; + if (iflag & IGNBRK) { + up->port.ignore_status_mask |= BRK_ABRT; + if (iflag & IGNPAR) + up->port.ignore_status_mask |= Rx_OVR; + } + + if ((cflag & CREAD) == 0) + up->port.ignore_status_mask = 0xff; +} + +/* The port lock is not held. */ +static void +sunzilog_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_sunzilog_port *up = + container_of(port, struct uart_sunzilog_port, port); + unsigned long flags; + int baud, brg; + + baud = uart_get_baud_rate(port, termios, old, 1200, 76800); + + spin_lock_irqsave(&up->port.lock, flags); + + brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); + + sunzilog_convert_to_zs(up, termios->c_cflag, termios->c_iflag, brg); + + if (UART_ENABLE_MS(&up->port, termios->c_cflag)) + up->flags |= SUNZILOG_FLAG_MODEM_STATUS; + else + up->flags &= ~SUNZILOG_FLAG_MODEM_STATUS; + + up->cflag = termios->c_cflag; + + sunzilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port)); + + uart_update_timeout(port, termios->c_cflag, baud); + + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static const char *sunzilog_type(struct uart_port *port) +{ + struct uart_sunzilog_port *up = UART_ZILOG(port); + + return (up->flags & SUNZILOG_FLAG_ESCC) ? "zs (ESCC)" : "zs"; +} + +/* We do not request/release mappings of the registers here, this + * happens at early serial probe time. + */ +static void sunzilog_release_port(struct uart_port *port) +{ +} + +static int sunzilog_request_port(struct uart_port *port) +{ + return 0; +} + +/* These do not need to do anything interesting either. */ +static void sunzilog_config_port(struct uart_port *port, int flags) +{ +} + +/* We do not support letting the user mess with the divisor, IRQ, etc. */ +static int sunzilog_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + return -EINVAL; +} + +#ifdef CONFIG_CONSOLE_POLL +static int sunzilog_get_poll_char(struct uart_port *port) +{ + unsigned char ch, r1; + struct uart_sunzilog_port *up = + container_of(port, struct uart_sunzilog_port, port); + struct zilog_channel __iomem *channel + = ZILOG_CHANNEL_FROM_PORT(&up->port); + + + r1 = read_zsreg(channel, R1); + if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) { + writeb(ERR_RES, &channel->control); + ZSDELAY(); + ZS_WSYNC(channel); + } + + ch = readb(&channel->control); + ZSDELAY(); + + /* This funny hack depends upon BRK_ABRT not interfering + * with the other bits we care about in R1. + */ + if (ch & BRK_ABRT) + r1 |= BRK_ABRT; + + if (!(ch & Rx_CH_AV)) + return NO_POLL_CHAR; + + ch = readb(&channel->data); + ZSDELAY(); + + ch &= up->parity_mask; + return ch; +} + +static void sunzilog_put_poll_char(struct uart_port *port, + unsigned char ch) +{ + struct uart_sunzilog_port *up = + container_of(port, struct uart_sunzilog_port, port); + + sunzilog_putchar(&up->port, ch); +} +#endif /* CONFIG_CONSOLE_POLL */ + +static const struct uart_ops sunzilog_pops = { + .tx_empty = sunzilog_tx_empty, + .set_mctrl = sunzilog_set_mctrl, + .get_mctrl = sunzilog_get_mctrl, + .stop_tx = sunzilog_stop_tx, + .start_tx = sunzilog_start_tx, + .stop_rx = sunzilog_stop_rx, + .enable_ms = sunzilog_enable_ms, + .break_ctl = sunzilog_break_ctl, + .startup = sunzilog_startup, + .shutdown = sunzilog_shutdown, + .set_termios = sunzilog_set_termios, + .type = sunzilog_type, + .release_port = sunzilog_release_port, + .request_port = sunzilog_request_port, + .config_port = sunzilog_config_port, + .verify_port = sunzilog_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = sunzilog_get_poll_char, + .poll_put_char = sunzilog_put_poll_char, +#endif +}; + +static int uart_chip_count; +static struct uart_sunzilog_port *sunzilog_port_table; +static struct zilog_layout __iomem **sunzilog_chip_regs; + +static struct uart_sunzilog_port *sunzilog_irq_chain; + +static struct uart_driver sunzilog_reg = { + .owner = THIS_MODULE, + .driver_name = "sunzilog", + .dev_name = "ttyS", + .major = TTY_MAJOR, +}; + +static int __init sunzilog_alloc_tables(int num_sunzilog) +{ + struct uart_sunzilog_port *up; + unsigned long size; + int num_channels = num_sunzilog * 2; + int i; + + size = num_channels * sizeof(struct uart_sunzilog_port); + sunzilog_port_table = kzalloc(size, GFP_KERNEL); + if (!sunzilog_port_table) + return -ENOMEM; + + for (i = 0; i < num_channels; i++) { + up = &sunzilog_port_table[i]; + + spin_lock_init(&up->port.lock); + + if (i == 0) + sunzilog_irq_chain = up; + + if (i < num_channels - 1) + up->next = up + 1; + else + up->next = NULL; + } + + size = num_sunzilog * sizeof(struct zilog_layout __iomem *); + sunzilog_chip_regs = kzalloc(size, GFP_KERNEL); + if (!sunzilog_chip_regs) { + kfree(sunzilog_port_table); + sunzilog_irq_chain = NULL; + return -ENOMEM; + } + + return 0; +} + +static void sunzilog_free_tables(void) +{ + kfree(sunzilog_port_table); + sunzilog_irq_chain = NULL; + kfree(sunzilog_chip_regs); +} + +#define ZS_PUT_CHAR_MAX_DELAY 2000 /* 10 ms */ + +static void __maybe_unused sunzilog_putchar(struct uart_port *port, unsigned char ch) +{ + struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); + int loops = ZS_PUT_CHAR_MAX_DELAY; + + /* This is a timed polling loop so do not switch the explicit + * udelay with ZSDELAY as that is a NOP on some platforms. -DaveM + */ + do { + unsigned char val = readb(&channel->control); + if (val & Tx_BUF_EMP) { + ZSDELAY(); + break; + } + udelay(5); + } while (--loops); + + writeb(ch, &channel->data); + ZSDELAY(); + ZS_WSYNC(channel); +} + +#ifdef CONFIG_SERIO + +static DEFINE_SPINLOCK(sunzilog_serio_lock); + +static int sunzilog_serio_write(struct serio *serio, unsigned char ch) +{ + struct uart_sunzilog_port *up = serio->port_data; + unsigned long flags; + + spin_lock_irqsave(&sunzilog_serio_lock, flags); + + sunzilog_putchar(&up->port, ch); + + spin_unlock_irqrestore(&sunzilog_serio_lock, flags); + + return 0; +} + +static int sunzilog_serio_open(struct serio *serio) +{ + struct uart_sunzilog_port *up = serio->port_data; + unsigned long flags; + int ret; + + spin_lock_irqsave(&sunzilog_serio_lock, flags); + if (!up->serio_open) { + up->serio_open = 1; + ret = 0; + } else + ret = -EBUSY; + spin_unlock_irqrestore(&sunzilog_serio_lock, flags); + + return ret; +} + +static void sunzilog_serio_close(struct serio *serio) +{ + struct uart_sunzilog_port *up = serio->port_data; + unsigned long flags; + + spin_lock_irqsave(&sunzilog_serio_lock, flags); + up->serio_open = 0; + spin_unlock_irqrestore(&sunzilog_serio_lock, flags); +} + +#endif /* CONFIG_SERIO */ + +#ifdef CONFIG_SERIAL_SUNZILOG_CONSOLE +static void +sunzilog_console_write(struct console *con, const char *s, unsigned int count) +{ + struct uart_sunzilog_port *up = &sunzilog_port_table[con->index]; + unsigned long flags; + int locked = 1; + + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); + else + spin_lock_irqsave(&up->port.lock, flags); + + uart_console_write(&up->port, s, count, sunzilog_putchar); + udelay(2); + + if (locked) + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static int __init sunzilog_console_setup(struct console *con, char *options) +{ + struct uart_sunzilog_port *up = &sunzilog_port_table[con->index]; + unsigned long flags; + int baud, brg; + + if (up->port.type != PORT_SUNZILOG) + return -EINVAL; + + printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n", + (sunzilog_reg.minor - 64) + con->index, con->index); + + /* Get firmware console settings. */ + sunserial_console_termios(con, up->port.dev->of_node); + + /* Firmware console speed is limited to 150-->38400 baud so + * this hackish cflag thing is OK. + */ + switch (con->cflag & CBAUD) { + case B150: baud = 150; break; + case B300: baud = 300; break; + case B600: baud = 600; break; + case B1200: baud = 1200; break; + case B2400: baud = 2400; break; + case B4800: baud = 4800; break; + default: case B9600: baud = 9600; break; + case B19200: baud = 19200; break; + case B38400: baud = 38400; break; + } + + brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); + + spin_lock_irqsave(&up->port.lock, flags); + + up->curregs[R15] |= BRKIE; + sunzilog_convert_to_zs(up, con->cflag, 0, brg); + + sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS); + __sunzilog_startup(up); + + spin_unlock_irqrestore(&up->port.lock, flags); + + return 0; +} + +static struct console sunzilog_console_ops = { + .name = "ttyS", + .write = sunzilog_console_write, + .device = uart_console_device, + .setup = sunzilog_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sunzilog_reg, +}; + +static inline struct console *SUNZILOG_CONSOLE(void) +{ + return &sunzilog_console_ops; +} + +#else +#define SUNZILOG_CONSOLE() (NULL) +#endif + +static void sunzilog_init_kbdms(struct uart_sunzilog_port *up) +{ + int baud, brg; + + if (up->flags & SUNZILOG_FLAG_CONS_KEYB) { + up->cflag = B1200 | CS8 | CLOCAL | CREAD; + baud = 1200; + } else { + up->cflag = B4800 | CS8 | CLOCAL | CREAD; + baud = 4800; + } + + up->curregs[R15] |= BRKIE; + brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); + sunzilog_convert_to_zs(up, up->cflag, 0, brg); + sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS); + __sunzilog_startup(up); +} + +#ifdef CONFIG_SERIO +static void sunzilog_register_serio(struct uart_sunzilog_port *up) +{ + struct serio *serio = &up->serio; + + serio->port_data = up; + + serio->id.type = SERIO_RS232; + if (up->flags & SUNZILOG_FLAG_CONS_KEYB) { + serio->id.proto = SERIO_SUNKBD; + strscpy(serio->name, "zskbd", sizeof(serio->name)); + } else { + serio->id.proto = SERIO_SUN; + serio->id.extra = 1; + strscpy(serio->name, "zsms", sizeof(serio->name)); + } + strscpy(serio->phys, + ((up->flags & SUNZILOG_FLAG_CONS_KEYB) ? + "zs/serio0" : "zs/serio1"), + sizeof(serio->phys)); + + serio->write = sunzilog_serio_write; + serio->open = sunzilog_serio_open; + serio->close = sunzilog_serio_close; + serio->dev.parent = up->port.dev; + + serio_register_port(serio); +} +#endif + +static void sunzilog_init_hw(struct uart_sunzilog_port *up) +{ + struct zilog_channel __iomem *channel; + unsigned long flags; + int baud, brg; + + channel = ZILOG_CHANNEL_FROM_PORT(&up->port); + + spin_lock_irqsave(&up->port.lock, flags); + if (ZS_IS_CHANNEL_A(up)) { + write_zsreg(channel, R9, FHWRES); + ZSDELAY_LONG(); + (void) read_zsreg(channel, R0); + } + + if (up->flags & (SUNZILOG_FLAG_CONS_KEYB | + SUNZILOG_FLAG_CONS_MOUSE)) { + up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB; + up->curregs[R4] = PAR_EVEN | X16CLK | SB1; + up->curregs[R3] = RxENAB | Rx8; + up->curregs[R5] = TxENAB | Tx8; + up->curregs[R6] = 0x00; /* SDLC Address */ + up->curregs[R7] = 0x7E; /* SDLC Flag */ + up->curregs[R9] = NV; + up->curregs[R7p] = 0x00; + sunzilog_init_kbdms(up); + /* Only enable interrupts if an ISR handler available */ + if (up->flags & SUNZILOG_FLAG_ISR_HANDLER) + up->curregs[R9] |= MIE; + write_zsreg(channel, R9, up->curregs[R9]); + } else { + /* Normal serial TTY. */ + up->parity_mask = 0xff; + up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB; + up->curregs[R4] = PAR_EVEN | X16CLK | SB1; + up->curregs[R3] = RxENAB | Rx8; + up->curregs[R5] = TxENAB | Tx8; + up->curregs[R6] = 0x00; /* SDLC Address */ + up->curregs[R7] = 0x7E; /* SDLC Flag */ + up->curregs[R9] = NV; + up->curregs[R10] = NRZ; + up->curregs[R11] = TCBR | RCBR; + baud = 9600; + brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); + up->curregs[R12] = (brg & 0xff); + up->curregs[R13] = (brg >> 8) & 0xff; + up->curregs[R14] = BRSRC | BRENAB; + up->curregs[R15] = FIFOEN; /* Use FIFO if on ESCC */ + up->curregs[R7p] = TxFIFO_LVL | RxFIFO_LVL; + if (__load_zsregs(channel, up->curregs)) { + up->flags |= SUNZILOG_FLAG_ESCC; + } + /* Only enable interrupts if an ISR handler available */ + if (up->flags & SUNZILOG_FLAG_ISR_HANDLER) + up->curregs[R9] |= MIE; + write_zsreg(channel, R9, up->curregs[R9]); + } + + spin_unlock_irqrestore(&up->port.lock, flags); + +#ifdef CONFIG_SERIO + if (up->flags & (SUNZILOG_FLAG_CONS_KEYB | + SUNZILOG_FLAG_CONS_MOUSE)) + sunzilog_register_serio(up); +#endif +} + +static int zilog_irq; + +static int zs_probe(struct platform_device *op) +{ + static int kbm_inst, uart_inst; + int inst; + struct uart_sunzilog_port *up; + struct zilog_layout __iomem *rp; + int keyboard_mouse = 0; + int err; + + if (of_find_property(op->dev.of_node, "keyboard", NULL)) + keyboard_mouse = 1; + + /* uarts must come before keyboards/mice */ + if (keyboard_mouse) + inst = uart_chip_count + kbm_inst; + else + inst = uart_inst; + + sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0, + sizeof(struct zilog_layout), + "zs"); + if (!sunzilog_chip_regs[inst]) + return -ENOMEM; + + rp = sunzilog_chip_regs[inst]; + + if (!zilog_irq) + zilog_irq = op->archdata.irqs[0]; + + up = &sunzilog_port_table[inst * 2]; + + /* Channel A */ + up[0].port.mapbase = op->resource[0].start + 0x00; + up[0].port.membase = (void __iomem *) &rp->channelA; + up[0].port.iotype = UPIO_MEM; + up[0].port.irq = op->archdata.irqs[0]; + up[0].port.uartclk = ZS_CLOCK; + up[0].port.fifosize = 1; + up[0].port.ops = &sunzilog_pops; + up[0].port.type = PORT_SUNZILOG; + up[0].port.flags = 0; + up[0].port.line = (inst * 2) + 0; + up[0].port.dev = &op->dev; + up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A; + up[0].port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNZILOG_CONSOLE); + if (keyboard_mouse) + up[0].flags |= SUNZILOG_FLAG_CONS_KEYB; + sunzilog_init_hw(&up[0]); + + /* Channel B */ + up[1].port.mapbase = op->resource[0].start + 0x04; + up[1].port.membase = (void __iomem *) &rp->channelB; + up[1].port.iotype = UPIO_MEM; + up[1].port.irq = op->archdata.irqs[0]; + up[1].port.uartclk = ZS_CLOCK; + up[1].port.fifosize = 1; + up[1].port.ops = &sunzilog_pops; + up[1].port.type = PORT_SUNZILOG; + up[1].port.flags = 0; + up[1].port.line = (inst * 2) + 1; + up[1].port.dev = &op->dev; + up[1].flags |= 0; + up[1].port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNZILOG_CONSOLE); + if (keyboard_mouse) + up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE; + sunzilog_init_hw(&up[1]); + + if (!keyboard_mouse) { + if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node, + &sunzilog_reg, up[0].port.line, + false)) + up->flags |= SUNZILOG_FLAG_IS_CONS; + err = uart_add_one_port(&sunzilog_reg, &up[0].port); + if (err) { + of_iounmap(&op->resource[0], + rp, sizeof(struct zilog_layout)); + return err; + } + if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node, + &sunzilog_reg, up[1].port.line, + false)) + up->flags |= SUNZILOG_FLAG_IS_CONS; + err = uart_add_one_port(&sunzilog_reg, &up[1].port); + if (err) { + uart_remove_one_port(&sunzilog_reg, &up[0].port); + of_iounmap(&op->resource[0], + rp, sizeof(struct zilog_layout)); + return err; + } + uart_inst++; + } else { + printk(KERN_INFO "%s: Keyboard at MMIO 0x%llx (irq = %d) " + "is a %s\n", + dev_name(&op->dev), + (unsigned long long) up[0].port.mapbase, + op->archdata.irqs[0], sunzilog_type(&up[0].port)); + printk(KERN_INFO "%s: Mouse at MMIO 0x%llx (irq = %d) " + "is a %s\n", + dev_name(&op->dev), + (unsigned long long) up[1].port.mapbase, + op->archdata.irqs[0], sunzilog_type(&up[1].port)); + kbm_inst++; + } + + platform_set_drvdata(op, &up[0]); + + return 0; +} + +static void zs_remove_one(struct uart_sunzilog_port *up) +{ + if (ZS_IS_KEYB(up) || ZS_IS_MOUSE(up)) { +#ifdef CONFIG_SERIO + serio_unregister_port(&up->serio); +#endif + } else + uart_remove_one_port(&sunzilog_reg, &up->port); +} + +static int zs_remove(struct platform_device *op) +{ + struct uart_sunzilog_port *up = platform_get_drvdata(op); + struct zilog_layout __iomem *regs; + + zs_remove_one(&up[0]); + zs_remove_one(&up[1]); + + regs = sunzilog_chip_regs[up[0].port.line / 2]; + of_iounmap(&op->resource[0], regs, sizeof(struct zilog_layout)); + + return 0; +} + +static const struct of_device_id zs_match[] = { + { + .name = "zs", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, zs_match); + +static struct platform_driver zs_driver = { + .driver = { + .name = "zs", + .of_match_table = zs_match, + }, + .probe = zs_probe, + .remove = zs_remove, +}; + +static int __init sunzilog_init(void) +{ + struct device_node *dp; + int err; + int num_keybms = 0; + int num_sunzilog = 0; + + for_each_node_by_name(dp, "zs") { + num_sunzilog++; + if (of_find_property(dp, "keyboard", NULL)) + num_keybms++; + } + + if (num_sunzilog) { + err = sunzilog_alloc_tables(num_sunzilog); + if (err) + goto out; + + uart_chip_count = num_sunzilog - num_keybms; + + err = sunserial_register_minors(&sunzilog_reg, + uart_chip_count * 2); + if (err) + goto out_free_tables; + } + + err = platform_driver_register(&zs_driver); + if (err) + goto out_unregister_uart; + + if (zilog_irq) { + struct uart_sunzilog_port *up = sunzilog_irq_chain; + err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED, + "zs", sunzilog_irq_chain); + if (err) + goto out_unregister_driver; + + /* Enable Interrupts */ + while (up) { + struct zilog_channel __iomem *channel; + + /* printk (KERN_INFO "Enable IRQ for ZILOG Hardware %p\n", up); */ + channel = ZILOG_CHANNEL_FROM_PORT(&up->port); + up->flags |= SUNZILOG_FLAG_ISR_HANDLER; + up->curregs[R9] |= MIE; + write_zsreg(channel, R9, up->curregs[R9]); + up = up->next; + } + } + +out: + return err; + +out_unregister_driver: + platform_driver_unregister(&zs_driver); + +out_unregister_uart: + if (num_sunzilog) { + sunserial_unregister_minors(&sunzilog_reg, num_sunzilog); + sunzilog_reg.cons = NULL; + } + +out_free_tables: + sunzilog_free_tables(); + goto out; +} + +static void __exit sunzilog_exit(void) +{ + platform_driver_unregister(&zs_driver); + + if (zilog_irq) { + struct uart_sunzilog_port *up = sunzilog_irq_chain; + + /* Disable Interrupts */ + while (up) { + struct zilog_channel __iomem *channel; + + /* printk (KERN_INFO "Disable IRQ for ZILOG Hardware %p\n", up); */ + channel = ZILOG_CHANNEL_FROM_PORT(&up->port); + up->flags &= ~SUNZILOG_FLAG_ISR_HANDLER; + up->curregs[R9] &= ~MIE; + write_zsreg(channel, R9, up->curregs[R9]); + up = up->next; + } + + free_irq(zilog_irq, sunzilog_irq_chain); + zilog_irq = 0; + } + + if (sunzilog_reg.nr) { + sunserial_unregister_minors(&sunzilog_reg, sunzilog_reg.nr); + sunzilog_free_tables(); + } +} + +module_init(sunzilog_init); +module_exit(sunzilog_exit); + +MODULE_AUTHOR("David S. Miller"); +MODULE_DESCRIPTION("Sun Zilog serial port driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/sunzilog.h b/drivers/tty/serial/sunzilog.h new file mode 100644 index 000000000..6d6764f0a --- /dev/null +++ b/drivers/tty/serial/sunzilog.h @@ -0,0 +1,290 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SUNZILOG_H +#define _SUNZILOG_H + +struct zilog_channel { + volatile unsigned char control; + volatile unsigned char __pad1; + volatile unsigned char data; + volatile unsigned char __pad2; +}; + +struct zilog_layout { + struct zilog_channel channelB; + struct zilog_channel channelA; +}; + +#define NUM_ZSREGS 17 +#define R7p 16 /* Written as R7 with P15 bit 0 set */ + +/* Conversion routines to/from brg time constants from/to bits + * per second. + */ +#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2)) +#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2) + +/* The Zilog register set */ + +#define FLAG 0x7e + +/* Write Register 0 */ +#define R0 0 /* Register selects */ +#define R1 1 +#define R2 2 +#define R3 3 +#define R4 4 +#define R5 5 +#define R6 6 +#define R7 7 +#define R8 8 +#define R9 9 +#define R10 10 +#define R11 11 +#define R12 12 +#define R13 13 +#define R14 14 +#define R15 15 + +#define NULLCODE 0 /* Null Code */ +#define POINT_HIGH 0x8 /* Select upper half of registers */ +#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */ +#define SEND_ABORT 0x18 /* HDLC Abort */ +#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */ +#define RES_Tx_P 0x28 /* Reset TxINT Pending */ +#define ERR_RES 0x30 /* Error Reset */ +#define RES_H_IUS 0x38 /* Reset highest IUS */ + +#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */ +#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */ +#define RES_EOM_L 0xC0 /* Reset EOM latch */ + +/* Write Register 1 */ + +#define EXT_INT_ENAB 0x1 /* Ext Int Enable */ +#define TxINT_ENAB 0x2 /* Tx Int Enable */ +#define PAR_SPEC 0x4 /* Parity is special condition */ + +#define RxINT_DISAB 0 /* Rx Int Disable */ +#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */ +#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */ +#define INT_ERR_Rx 0x18 /* Int on error only */ +#define RxINT_MASK 0x18 + +#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */ +#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */ +#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */ + +/* Write Register #2 (Interrupt Vector) */ + +/* Write Register 3 */ + +#define RxENAB 0x1 /* Rx Enable */ +#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */ +#define ADD_SM 0x4 /* Address Search Mode (SDLC) */ +#define RxCRC_ENAB 0x8 /* Rx CRC Enable */ +#define ENT_HM 0x10 /* Enter Hunt Mode */ +#define AUTO_ENAB 0x20 /* Auto Enables */ +#define Rx5 0x0 /* Rx 5 Bits/Character */ +#define Rx7 0x40 /* Rx 7 Bits/Character */ +#define Rx6 0x80 /* Rx 6 Bits/Character */ +#define Rx8 0xc0 /* Rx 8 Bits/Character */ +#define RxN_MASK 0xc0 + +/* Write Register 4 */ + +#define PAR_ENAB 0x1 /* Parity Enable */ +#define PAR_EVEN 0x2 /* Parity Even/Odd* */ + +#define SYNC_ENAB 0 /* Sync Modes Enable */ +#define SB1 0x4 /* 1 stop bit/char */ +#define SB15 0x8 /* 1.5 stop bits/char */ +#define SB2 0xc /* 2 stop bits/char */ + +#define MONSYNC 0 /* 8 Bit Sync character */ +#define BISYNC 0x10 /* 16 bit sync character */ +#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */ +#define EXTSYNC 0x30 /* External Sync Mode */ + +#define X1CLK 0x0 /* x1 clock mode */ +#define X16CLK 0x40 /* x16 clock mode */ +#define X32CLK 0x80 /* x32 clock mode */ +#define X64CLK 0xC0 /* x64 clock mode */ +#define XCLK_MASK 0xC0 + +/* Write Register 5 */ + +#define TxCRC_ENAB 0x1 /* Tx CRC Enable */ +#define RTS 0x2 /* RTS */ +#define SDLC_CRC 0x4 /* SDLC/CRC-16 */ +#define TxENAB 0x8 /* Tx Enable */ +#define SND_BRK 0x10 /* Send Break */ +#define Tx5 0x0 /* Tx 5 bits (or less)/character */ +#define Tx7 0x20 /* Tx 7 bits/character */ +#define Tx6 0x40 /* Tx 6 bits/character */ +#define Tx8 0x60 /* Tx 8 bits/character */ +#define TxN_MASK 0x60 +#define DTR 0x80 /* DTR */ + +/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */ + +/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */ + +/* Write Register 7' (ESCC Only) */ +#define AUTO_TxFLAG 1 /* Automatic Tx SDLC Flag */ +#define AUTO_EOM_RST 2 /* Automatic EOM Reset */ +#define AUTOnRTS 4 /* Automatic /RTS pin deactivation */ +#define RxFIFO_LVL 8 /* Receive FIFO interrupt level */ +#define nDTRnREQ 0x10 /* /DTR/REQ timing */ +#define TxFIFO_LVL 0x20 /* Transmit FIFO interrupt level */ +#define EXT_RD_EN 0x40 /* Extended read register enable */ + +/* Write Register 8 (transmit buffer) */ + +/* Write Register 9 (Master interrupt control) */ +#define VIS 1 /* Vector Includes Status */ +#define NV 2 /* No Vector */ +#define DLC 4 /* Disable Lower Chain */ +#define MIE 8 /* Master Interrupt Enable */ +#define STATHI 0x10 /* Status high */ +#define SWIACK 0x20 /* Software Interrupt Ack (not on NMOS) */ +#define NORESET 0 /* No reset on write to R9 */ +#define CHRB 0x40 /* Reset channel B */ +#define CHRA 0x80 /* Reset channel A */ +#define FHWRES 0xc0 /* Force hardware reset */ + +/* Write Register 10 (misc control bits) */ +#define BIT6 1 /* 6 bit/8bit sync */ +#define LOOPMODE 2 /* SDLC Loop mode */ +#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */ +#define MARKIDLE 8 /* Mark/flag on idle */ +#define GAOP 0x10 /* Go active on poll */ +#define NRZ 0 /* NRZ mode */ +#define NRZI 0x20 /* NRZI mode */ +#define FM1 0x40 /* FM1 (transition = 1) */ +#define FM0 0x60 /* FM0 (transition = 0) */ +#define CRCPS 0x80 /* CRC Preset I/O */ + +/* Write Register 11 (Clock Mode control) */ +#define TRxCXT 0 /* TRxC = Xtal output */ +#define TRxCTC 1 /* TRxC = Transmit clock */ +#define TRxCBR 2 /* TRxC = BR Generator Output */ +#define TRxCDP 3 /* TRxC = DPLL output */ +#define TRxCOI 4 /* TRxC O/I */ +#define TCRTxCP 0 /* Transmit clock = RTxC pin */ +#define TCTRxCP 8 /* Transmit clock = TRxC pin */ +#define TCBR 0x10 /* Transmit clock = BR Generator output */ +#define TCDPLL 0x18 /* Transmit clock = DPLL output */ +#define RCRTxCP 0 /* Receive clock = RTxC pin */ +#define RCTRxCP 0x20 /* Receive clock = TRxC pin */ +#define RCBR 0x40 /* Receive clock = BR Generator output */ +#define RCDPLL 0x60 /* Receive clock = DPLL output */ +#define RTxCX 0x80 /* RTxC Xtal/No Xtal */ + +/* Write Register 12 (lower byte of baud rate generator time constant) */ + +/* Write Register 13 (upper byte of baud rate generator time constant) */ + +/* Write Register 14 (Misc control bits) */ +#define BRENAB 1 /* Baud rate generator enable */ +#define BRSRC 2 /* Baud rate generator source */ +#define DTRREQ 4 /* DTR/Request function */ +#define AUTOECHO 8 /* Auto Echo */ +#define LOOPBAK 0x10 /* Local loopback */ +#define SEARCH 0x20 /* Enter search mode */ +#define RMC 0x40 /* Reset missing clock */ +#define DISDPLL 0x60 /* Disable DPLL */ +#define SSBR 0x80 /* Set DPLL source = BR generator */ +#define SSRTxC 0xa0 /* Set DPLL source = RTxC */ +#define SFMM 0xc0 /* Set FM mode */ +#define SNRZI 0xe0 /* Set NRZI mode */ + +/* Write Register 15 (external/status interrupt control) */ +#define WR7pEN 1 /* WR7' Enable (ESCC only) */ +#define ZCIE 2 /* Zero count IE */ +#define FIFOEN 4 /* FIFO Enable (ESCC only) */ +#define DCDIE 8 /* DCD IE */ +#define SYNCIE 0x10 /* Sync/hunt IE */ +#define CTSIE 0x20 /* CTS IE */ +#define TxUIE 0x40 /* Tx Underrun/EOM IE */ +#define BRKIE 0x80 /* Break/Abort IE */ + + +/* Read Register 0 */ +#define Rx_CH_AV 0x1 /* Rx Character Available */ +#define ZCOUNT 0x2 /* Zero count */ +#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */ +#define DCD 0x8 /* DCD */ +#define SYNC 0x10 /* Sync/hunt */ +#define CTS 0x20 /* CTS */ +#define TxEOM 0x40 /* Tx underrun */ +#define BRK_ABRT 0x80 /* Break/Abort */ + +/* Read Register 1 */ +#define ALL_SNT 0x1 /* All sent */ +/* Residue Data for 8 Rx bits/char programmed */ +#define RES3 0x8 /* 0/3 */ +#define RES4 0x4 /* 0/4 */ +#define RES5 0xc /* 0/5 */ +#define RES6 0x2 /* 0/6 */ +#define RES7 0xa /* 0/7 */ +#define RES8 0x6 /* 0/8 */ +#define RES18 0xe /* 1/8 */ +#define RES28 0x0 /* 2/8 */ +/* Special Rx Condition Interrupts */ +#define PAR_ERR 0x10 /* Parity error */ +#define Rx_OVR 0x20 /* Rx Overrun Error */ +#define CRC_ERR 0x40 /* CRC/Framing Error */ +#define END_FR 0x80 /* End of Frame (SDLC) */ + +/* Read Register 2 (channel b only) - Interrupt vector */ +#define CHB_Tx_EMPTY 0x00 +#define CHB_EXT_STAT 0x02 +#define CHB_Rx_AVAIL 0x04 +#define CHB_SPECIAL 0x06 +#define CHA_Tx_EMPTY 0x08 +#define CHA_EXT_STAT 0x0a +#define CHA_Rx_AVAIL 0x0c +#define CHA_SPECIAL 0x0e +#define STATUS_MASK 0x0e + +/* Read Register 3 (interrupt pending register) ch a only */ +#define CHBEXT 0x1 /* Channel B Ext/Stat IP */ +#define CHBTxIP 0x2 /* Channel B Tx IP */ +#define CHBRxIP 0x4 /* Channel B Rx IP */ +#define CHAEXT 0x8 /* Channel A Ext/Stat IP */ +#define CHATxIP 0x10 /* Channel A Tx IP */ +#define CHARxIP 0x20 /* Channel A Rx IP */ + +/* Read Register 6 (LSB frame byte count [Not on NMOS]) */ + +/* Read Register 7 (MSB frame byte count and FIFO status [Not on NMOS]) */ + +/* Read Register 8 (receive data register) */ + +/* Read Register 10 (misc status bits) */ +#define ONLOOP 2 /* On loop */ +#define LOOPSEND 0x10 /* Loop sending */ +#define CLK2MIS 0x40 /* Two clocks missing */ +#define CLK1MIS 0x80 /* One clock missing */ + +/* Read Register 12 (lower byte of baud rate generator constant) */ + +/* Read Register 13 (upper byte of baud rate generator constant) */ + +/* Read Register 15 (value of WR 15) */ + +/* Misc macros */ +#define ZS_CLEARERR(channel) do { sbus_writeb(ERR_RES, &channel->control); \ + udelay(5); } while(0) + +#define ZS_CLEARSTAT(channel) do { sbus_writeb(RES_EXT_INT, &channel->control); \ + udelay(5); } while(0) + +#define ZS_CLEARFIFO(channel) do { sbus_readb(&channel->data); \ + udelay(2); \ + sbus_readb(&channel->data); \ + udelay(2); \ + sbus_readb(&channel->data); \ + udelay(2); } while(0) + +#endif /* _SUNZILOG_H */ diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c new file mode 100644 index 000000000..23500b342 --- /dev/null +++ b/drivers/tty/serial/tegra-tcu.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TCU_MBOX_BYTE(i, x) ((x) << (i * 8)) +#define TCU_MBOX_BYTE_V(x, i) (((x) >> (i * 8)) & 0xff) +#define TCU_MBOX_NUM_BYTES(x) ((x) << 24) +#define TCU_MBOX_NUM_BYTES_V(x) (((x) >> 24) & 0x3) + +struct tegra_tcu { + struct uart_driver driver; +#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE) + struct console console; +#endif + struct uart_port port; + + struct mbox_client tx_client, rx_client; + struct mbox_chan *tx, *rx; +}; + +static unsigned int tegra_tcu_uart_tx_empty(struct uart_port *port) +{ + return TIOCSER_TEMT; +} + +static void tegra_tcu_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static unsigned int tegra_tcu_uart_get_mctrl(struct uart_port *port) +{ + return 0; +} + +static void tegra_tcu_uart_stop_tx(struct uart_port *port) +{ +} + +static void tegra_tcu_write_one(struct tegra_tcu *tcu, u32 value, + unsigned int count) +{ + void *msg; + + value |= TCU_MBOX_NUM_BYTES(count); + msg = (void *)(unsigned long)value; + mbox_send_message(tcu->tx, msg); + mbox_flush(tcu->tx, 1000); +} + +static void tegra_tcu_write(struct tegra_tcu *tcu, const char *s, + unsigned int count) +{ + unsigned int written = 0, i = 0; + bool insert_nl = false; + u32 value = 0; + + while (i < count) { + if (insert_nl) { + value |= TCU_MBOX_BYTE(written++, '\n'); + insert_nl = false; + i++; + } else if (s[i] == '\n') { + value |= TCU_MBOX_BYTE(written++, '\r'); + insert_nl = true; + } else { + value |= TCU_MBOX_BYTE(written++, s[i++]); + } + + if (written == 3) { + tegra_tcu_write_one(tcu, value, 3); + value = written = 0; + } + } + + if (written) + tegra_tcu_write_one(tcu, value, written); +} + +static void tegra_tcu_uart_start_tx(struct uart_port *port) +{ + struct tegra_tcu *tcu = port->private_data; + struct circ_buf *xmit = &port->state->xmit; + unsigned long count; + + for (;;) { + count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + if (!count) + break; + + tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count); + uart_xmit_advance(port, count); + } + + uart_write_wakeup(port); +} + +static void tegra_tcu_uart_stop_rx(struct uart_port *port) +{ +} + +static void tegra_tcu_uart_break_ctl(struct uart_port *port, int ctl) +{ +} + +static int tegra_tcu_uart_startup(struct uart_port *port) +{ + return 0; +} + +static void tegra_tcu_uart_shutdown(struct uart_port *port) +{ +} + +static void tegra_tcu_uart_set_termios(struct uart_port *port, + struct ktermios *new, + const struct ktermios *old) +{ +} + +static const struct uart_ops tegra_tcu_uart_ops = { + .tx_empty = tegra_tcu_uart_tx_empty, + .set_mctrl = tegra_tcu_uart_set_mctrl, + .get_mctrl = tegra_tcu_uart_get_mctrl, + .stop_tx = tegra_tcu_uart_stop_tx, + .start_tx = tegra_tcu_uart_start_tx, + .stop_rx = tegra_tcu_uart_stop_rx, + .break_ctl = tegra_tcu_uart_break_ctl, + .startup = tegra_tcu_uart_startup, + .shutdown = tegra_tcu_uart_shutdown, + .set_termios = tegra_tcu_uart_set_termios, +}; + +#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE) +static void tegra_tcu_console_write(struct console *cons, const char *s, + unsigned int count) +{ + struct tegra_tcu *tcu = container_of(cons, struct tegra_tcu, console); + + tegra_tcu_write(tcu, s, count); +} + +static int tegra_tcu_console_setup(struct console *cons, char *options) +{ + return 0; +} +#endif + +static void tegra_tcu_receive(struct mbox_client *cl, void *msg) +{ + struct tegra_tcu *tcu = container_of(cl, struct tegra_tcu, rx_client); + struct tty_port *port = &tcu->port.state->port; + u32 value = (u32)(unsigned long)msg; + unsigned int num_bytes, i; + + num_bytes = TCU_MBOX_NUM_BYTES_V(value); + + for (i = 0; i < num_bytes; i++) + tty_insert_flip_char(port, TCU_MBOX_BYTE_V(value, i), + TTY_NORMAL); + + tty_flip_buffer_push(port); +} + +static int tegra_tcu_probe(struct platform_device *pdev) +{ + struct uart_port *port; + struct tegra_tcu *tcu; + int err; + + tcu = devm_kzalloc(&pdev->dev, sizeof(*tcu), GFP_KERNEL); + if (!tcu) + return -ENOMEM; + + tcu->tx_client.dev = &pdev->dev; + tcu->rx_client.dev = &pdev->dev; + tcu->rx_client.rx_callback = tegra_tcu_receive; + + tcu->tx = mbox_request_channel_byname(&tcu->tx_client, "tx"); + if (IS_ERR(tcu->tx)) { + err = PTR_ERR(tcu->tx); + dev_err(&pdev->dev, "failed to get tx mailbox: %d\n", err); + return err; + } + +#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE) + /* setup the console */ + strcpy(tcu->console.name, "ttyTCU"); + tcu->console.device = uart_console_device; + tcu->console.flags = CON_PRINTBUFFER | CON_ANYTIME; + tcu->console.index = -1; + tcu->console.write = tegra_tcu_console_write; + tcu->console.setup = tegra_tcu_console_setup; + tcu->console.data = &tcu->driver; +#endif + + /* setup the driver */ + tcu->driver.owner = THIS_MODULE; + tcu->driver.driver_name = "tegra-tcu"; + tcu->driver.dev_name = "ttyTCU"; +#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE) + tcu->driver.cons = &tcu->console; +#endif + tcu->driver.nr = 1; + + err = uart_register_driver(&tcu->driver); + if (err) { + dev_err(&pdev->dev, "failed to register UART driver: %d\n", + err); + goto free_tx; + } + + /* setup the port */ + port = &tcu->port; + spin_lock_init(&port->lock); + port->dev = &pdev->dev; + port->type = PORT_TEGRA_TCU; + port->ops = &tegra_tcu_uart_ops; + port->fifosize = 1; + port->iotype = UPIO_MEM; + port->flags = UPF_BOOT_AUTOCONF; + port->private_data = tcu; + + err = uart_add_one_port(&tcu->driver, port); + if (err) { + dev_err(&pdev->dev, "failed to add UART port: %d\n", err); + goto unregister_uart; + } + + /* + * Request RX channel after creating port to ensure tcu->port + * is ready for any immediate incoming bytes. + */ + tcu->rx = mbox_request_channel_byname(&tcu->rx_client, "rx"); + if (IS_ERR(tcu->rx)) { + err = PTR_ERR(tcu->rx); + dev_err(&pdev->dev, "failed to get rx mailbox: %d\n", err); + goto remove_uart_port; + } + + platform_set_drvdata(pdev, tcu); +#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE) + register_console(&tcu->console); +#endif + + return 0; + +remove_uart_port: + uart_remove_one_port(&tcu->driver, &tcu->port); +unregister_uart: + uart_unregister_driver(&tcu->driver); +free_tx: + mbox_free_channel(tcu->tx); + + return err; +} + +static int tegra_tcu_remove(struct platform_device *pdev) +{ + struct tegra_tcu *tcu = platform_get_drvdata(pdev); + +#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE) + unregister_console(&tcu->console); +#endif + mbox_free_channel(tcu->rx); + uart_remove_one_port(&tcu->driver, &tcu->port); + uart_unregister_driver(&tcu->driver); + mbox_free_channel(tcu->tx); + + return 0; +} + +static const struct of_device_id tegra_tcu_match[] = { + { .compatible = "nvidia,tegra194-tcu" }, + { } +}; +MODULE_DEVICE_TABLE(of, tegra_tcu_match); + +static struct platform_driver tegra_tcu_driver = { + .driver = { + .name = "tegra-tcu", + .of_match_table = tegra_tcu_match, + }, + .probe = tegra_tcu_probe, + .remove = tegra_tcu_remove, +}; +module_platform_driver(tegra_tcu_driver); + +MODULE_AUTHOR("Mikko Perttunen "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("NVIDIA Tegra Combined UART driver"); diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c new file mode 100644 index 000000000..bb19ed012 --- /dev/null +++ b/drivers/tty/serial/timbuart.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * timbuart.c timberdale FPGA UART driver + * Copyright (c) 2009 Intel Corporation + */ + +/* Supports: + * Timberdale FPGA UART + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "timbuart.h" + +struct timbuart_port { + struct uart_port port; + struct tasklet_struct tasklet; + int usedma; + u32 last_ier; + struct platform_device *dev; +}; + +static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800, + 921600, 1843200, 3250000}; + +static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier); + +static irqreturn_t timbuart_handleinterrupt(int irq, void *devid); + +static void timbuart_stop_rx(struct uart_port *port) +{ + /* spin lock held by upper layer, disable all RX interrupts */ + u32 ier = ioread32(port->membase + TIMBUART_IER) & ~RXFLAGS; + iowrite32(ier, port->membase + TIMBUART_IER); +} + +static void timbuart_stop_tx(struct uart_port *port) +{ + /* spinlock held by upper layer, disable TX interrupt */ + u32 ier = ioread32(port->membase + TIMBUART_IER) & ~TXBAE; + iowrite32(ier, port->membase + TIMBUART_IER); +} + +static void timbuart_start_tx(struct uart_port *port) +{ + struct timbuart_port *uart = + container_of(port, struct timbuart_port, port); + + /* do not transfer anything here -> fire off the tasklet */ + tasklet_schedule(&uart->tasklet); +} + +static unsigned int timbuart_tx_empty(struct uart_port *port) +{ + u32 isr = ioread32(port->membase + TIMBUART_ISR); + + return (isr & TXBE) ? TIOCSER_TEMT : 0; +} + +static void timbuart_flush_buffer(struct uart_port *port) +{ + if (!timbuart_tx_empty(port)) { + u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | + TIMBUART_CTRL_FLSHTX; + + iowrite8(ctl, port->membase + TIMBUART_CTRL); + iowrite32(TXBF, port->membase + TIMBUART_ISR); + } +} + +static void timbuart_rx_chars(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + + while (ioread32(port->membase + TIMBUART_ISR) & RXDP) { + u8 ch = ioread8(port->membase + TIMBUART_RXFIFO); + port->icount.rx++; + tty_insert_flip_char(tport, ch, TTY_NORMAL); + } + + tty_flip_buffer_push(tport); + + dev_dbg(port->dev, "%s - total read %d bytes\n", + __func__, port->icount.rx); +} + +static void timbuart_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + + while (!(ioread32(port->membase + TIMBUART_ISR) & TXBF) && + !uart_circ_empty(xmit)) { + iowrite8(xmit->buf[xmit->tail], + port->membase + TIMBUART_TXFIFO); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + dev_dbg(port->dev, + "%s - total written %d bytes, CTL: %x, RTS: %x, baud: %x\n", + __func__, + port->icount.tx, + ioread8(port->membase + TIMBUART_CTRL), + port->mctrl & TIOCM_RTS, + ioread8(port->membase + TIMBUART_BAUDRATE)); +} + +static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier) +{ + struct timbuart_port *uart = + container_of(port, struct timbuart_port, port); + struct circ_buf *xmit = &port->state->xmit; + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + return; + + if (port->x_char) + return; + + if (isr & TXFLAGS) { + timbuart_tx_chars(port); + /* clear all TX interrupts */ + iowrite32(TXFLAGS, port->membase + TIMBUART_ISR); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + } else + /* Re-enable any tx interrupt */ + *ier |= uart->last_ier & TXFLAGS; + + /* enable interrupts if there are chars in the transmit buffer, + * Or if we delivered some bytes and want the almost empty interrupt + * we wake up the upper layer later when we got the interrupt + * to give it some time to go out... + */ + if (!uart_circ_empty(xmit)) + *ier |= TXBAE; + + dev_dbg(port->dev, "%s - leaving\n", __func__); +} + +static void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier) +{ + if (isr & RXFLAGS) { + /* Some RX status is set */ + if (isr & RXBF) { + u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | + TIMBUART_CTRL_FLSHRX; + iowrite8(ctl, port->membase + TIMBUART_CTRL); + port->icount.overrun++; + } else if (isr & (RXDP)) + timbuart_rx_chars(port); + + /* ack all RX interrupts */ + iowrite32(RXFLAGS, port->membase + TIMBUART_ISR); + } + + /* always have the RX interrupts enabled */ + *ier |= RXBAF | RXBF | RXTT; + + dev_dbg(port->dev, "%s - leaving\n", __func__); +} + +static void timbuart_tasklet(struct tasklet_struct *t) +{ + struct timbuart_port *uart = from_tasklet(uart, t, tasklet); + u32 isr, ier = 0; + + spin_lock(&uart->port.lock); + + isr = ioread32(uart->port.membase + TIMBUART_ISR); + dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr); + + if (!uart->usedma) + timbuart_handle_tx_port(&uart->port, isr, &ier); + + timbuart_mctrl_check(&uart->port, isr, &ier); + + if (!uart->usedma) + timbuart_handle_rx_port(&uart->port, isr, &ier); + + iowrite32(ier, uart->port.membase + TIMBUART_IER); + + spin_unlock(&uart->port.lock); + dev_dbg(uart->port.dev, "%s leaving\n", __func__); +} + +static unsigned int timbuart_get_mctrl(struct uart_port *port) +{ + u8 cts = ioread8(port->membase + TIMBUART_CTRL); + dev_dbg(port->dev, "%s - cts %x\n", __func__, cts); + + if (cts & TIMBUART_CTRL_CTS) + return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; + else + return TIOCM_DSR | TIOCM_CAR; +} + +static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + dev_dbg(port->dev, "%s - %x\n", __func__, mctrl); + + if (mctrl & TIOCM_RTS) + iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL); + else + iowrite8(0, port->membase + TIMBUART_CTRL); +} + +static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier) +{ + unsigned int cts; + + if (isr & CTS_DELTA) { + /* ack */ + iowrite32(CTS_DELTA, port->membase + TIMBUART_ISR); + cts = timbuart_get_mctrl(port); + uart_handle_cts_change(port, cts & TIOCM_CTS); + wake_up_interruptible(&port->state->port.delta_msr_wait); + } + + *ier |= CTS_DELTA; +} + +static void timbuart_break_ctl(struct uart_port *port, int ctl) +{ + /* N/A */ +} + +static int timbuart_startup(struct uart_port *port) +{ + struct timbuart_port *uart = + container_of(port, struct timbuart_port, port); + + dev_dbg(port->dev, "%s\n", __func__); + + iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL); + iowrite32(0x1ff, port->membase + TIMBUART_ISR); + /* Enable all but TX interrupts */ + iowrite32(RXBAF | RXBF | RXTT | CTS_DELTA, + port->membase + TIMBUART_IER); + + return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED, + "timb-uart", uart); +} + +static void timbuart_shutdown(struct uart_port *port) +{ + struct timbuart_port *uart = + container_of(port, struct timbuart_port, port); + dev_dbg(port->dev, "%s\n", __func__); + free_irq(port->irq, uart); + iowrite32(0, port->membase + TIMBUART_IER); + + timbuart_flush_buffer(port); +} + +static int get_bindex(int baud) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(baudrates); i++) + if (baud <= baudrates[i]) + return i; + + return -1; +} + +static void timbuart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud; + short bindex; + unsigned long flags; + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); + bindex = get_bindex(baud); + dev_dbg(port->dev, "%s - bindex %d\n", __func__, bindex); + + if (bindex < 0) + bindex = 0; + baud = baudrates[bindex]; + + /* The serial layer calls into this once with old = NULL when setting + up initially */ + if (old) + tty_termios_copy_hw(termios, old); + tty_termios_encode_baud_rate(termios, baud, baud); + + spin_lock_irqsave(&port->lock, flags); + iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE); + uart_update_timeout(port, termios->c_cflag, baud); + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *timbuart_type(struct uart_port *port) +{ + return port->type == PORT_UNKNOWN ? "timbuart" : NULL; +} + +/* We do not request/release mappings of the registers here, + * currently it's done in the proble function. + */ +static void timbuart_release_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + int size = + resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0)); + + if (port->flags & UPF_IOREMAP) { + iounmap(port->membase); + port->membase = NULL; + } + + release_mem_region(port->mapbase, size); +} + +static int timbuart_request_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + int size = + resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0)); + + if (!request_mem_region(port->mapbase, size, "timb-uart")) + return -EBUSY; + + if (port->flags & UPF_IOREMAP) { + port->membase = ioremap(port->mapbase, size); + if (port->membase == NULL) { + release_mem_region(port->mapbase, size); + return -ENOMEM; + } + } + + return 0; +} + +static irqreturn_t timbuart_handleinterrupt(int irq, void *devid) +{ + struct timbuart_port *uart = (struct timbuart_port *)devid; + + if (ioread8(uart->port.membase + TIMBUART_IPR)) { + uart->last_ier = ioread32(uart->port.membase + TIMBUART_IER); + + /* disable interrupts, the tasklet enables them again */ + iowrite32(0, uart->port.membase + TIMBUART_IER); + + /* fire off bottom half */ + tasklet_schedule(&uart->tasklet); + + return IRQ_HANDLED; + } else + return IRQ_NONE; +} + +/* + * Configure/autoconfigure the port. + */ +static void timbuart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_TIMBUART; + timbuart_request_port(port); + } +} + +static int timbuart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + /* we don't want the core code to modify any port params */ + return -EINVAL; +} + +static const struct uart_ops timbuart_ops = { + .tx_empty = timbuart_tx_empty, + .set_mctrl = timbuart_set_mctrl, + .get_mctrl = timbuart_get_mctrl, + .stop_tx = timbuart_stop_tx, + .start_tx = timbuart_start_tx, + .flush_buffer = timbuart_flush_buffer, + .stop_rx = timbuart_stop_rx, + .break_ctl = timbuart_break_ctl, + .startup = timbuart_startup, + .shutdown = timbuart_shutdown, + .set_termios = timbuart_set_termios, + .type = timbuart_type, + .release_port = timbuart_release_port, + .request_port = timbuart_request_port, + .config_port = timbuart_config_port, + .verify_port = timbuart_verify_port +}; + +static struct uart_driver timbuart_driver = { + .owner = THIS_MODULE, + .driver_name = "timberdale_uart", + .dev_name = "ttyTU", + .major = TIMBUART_MAJOR, + .minor = TIMBUART_MINOR, + .nr = 1 +}; + +static int timbuart_probe(struct platform_device *dev) +{ + int err, irq; + struct timbuart_port *uart; + struct resource *iomem; + + dev_dbg(&dev->dev, "%s\n", __func__); + + uart = kzalloc(sizeof(*uart), GFP_KERNEL); + if (!uart) { + err = -EINVAL; + goto err_mem; + } + + uart->usedma = 0; + + uart->port.uartclk = 3250000 * 16; + uart->port.fifosize = TIMBUART_FIFO_SIZE; + uart->port.regshift = 2; + uart->port.iotype = UPIO_MEM; + uart->port.ops = &timbuart_ops; + uart->port.irq = 0; + uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP; + uart->port.line = 0; + uart->port.dev = &dev->dev; + + iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!iomem) { + err = -ENOMEM; + goto err_register; + } + uart->port.mapbase = iomem->start; + uart->port.membase = NULL; + + irq = platform_get_irq(dev, 0); + if (irq < 0) { + err = -EINVAL; + goto err_register; + } + uart->port.irq = irq; + + tasklet_setup(&uart->tasklet, timbuart_tasklet); + + err = uart_register_driver(&timbuart_driver); + if (err) + goto err_register; + + err = uart_add_one_port(&timbuart_driver, &uart->port); + if (err) + goto err_add_port; + + platform_set_drvdata(dev, uart); + + return 0; + +err_add_port: + uart_unregister_driver(&timbuart_driver); +err_register: + kfree(uart); +err_mem: + printk(KERN_ERR "timberdale: Failed to register Timberdale UART: %d\n", + err); + + return err; +} + +static int timbuart_remove(struct platform_device *dev) +{ + struct timbuart_port *uart = platform_get_drvdata(dev); + + tasklet_kill(&uart->tasklet); + uart_remove_one_port(&timbuart_driver, &uart->port); + uart_unregister_driver(&timbuart_driver); + kfree(uart); + + return 0; +} + +static struct platform_driver timbuart_platform_driver = { + .driver = { + .name = "timb-uart", + }, + .probe = timbuart_probe, + .remove = timbuart_remove, +}; + +module_platform_driver(timbuart_platform_driver); + +MODULE_DESCRIPTION("Timberdale UART driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:timb-uart"); + diff --git a/drivers/tty/serial/timbuart.h b/drivers/tty/serial/timbuart.h new file mode 100644 index 000000000..007e59af6 --- /dev/null +++ b/drivers/tty/serial/timbuart.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * timbuart.c timberdale FPGA GPIO driver + * Copyright (c) 2009 Intel Corporation + */ + +/* Supports: + * Timberdale FPGA UART + */ + +#ifndef _TIMBUART_H +#define _TIMBUART_H + +#define TIMBUART_FIFO_SIZE 2048 + +#define TIMBUART_RXFIFO 0x08 +#define TIMBUART_TXFIFO 0x0c +#define TIMBUART_IER 0x10 +#define TIMBUART_IPR 0x14 +#define TIMBUART_ISR 0x18 +#define TIMBUART_CTRL 0x1c +#define TIMBUART_BAUDRATE 0x20 + +#define TIMBUART_CTRL_RTS 0x01 +#define TIMBUART_CTRL_CTS 0x02 +#define TIMBUART_CTRL_FLSHTX 0x40 +#define TIMBUART_CTRL_FLSHRX 0x80 + +#define TXBF 0x01 +#define TXBAE 0x02 +#define CTS_DELTA 0x04 +#define RXDP 0x08 +#define RXBAF 0x10 +#define RXBF 0x20 +#define RXTT 0x40 +#define RXBNAE 0x80 +#define TXBE 0x100 + +#define RXFLAGS (RXDP | RXBAF | RXBF | RXTT | RXBNAE) +#define TXFLAGS (TXBF | TXBAE) + +#define TIMBUART_MAJOR 204 +#define TIMBUART_MINOR 192 + +#endif /* _TIMBUART_H */ + diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c new file mode 100644 index 000000000..eca41ac54 --- /dev/null +++ b/drivers/tty/serial/uartlite.c @@ -0,0 +1,950 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * uartlite.c: Serial driver for Xilinx uartlite serial controller + * + * Copyright (C) 2006 Peter Korsgaard + * Copyright (C) 2007 Secret Lab Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ULITE_NAME "ttyUL" +#define ULITE_MAJOR 204 +#define ULITE_MINOR 187 +#define ULITE_NR_UARTS CONFIG_SERIAL_UARTLITE_NR_UARTS + +/* --------------------------------------------------------------------- + * Register definitions + * + * For register details see datasheet: + * https://www.xilinx.com/support/documentation/ip_documentation/opb_uartlite.pdf + */ + +#define ULITE_RX 0x00 +#define ULITE_TX 0x04 +#define ULITE_STATUS 0x08 +#define ULITE_CONTROL 0x0c + +#define ULITE_REGION 16 + +#define ULITE_STATUS_RXVALID 0x01 +#define ULITE_STATUS_RXFULL 0x02 +#define ULITE_STATUS_TXEMPTY 0x04 +#define ULITE_STATUS_TXFULL 0x08 +#define ULITE_STATUS_IE 0x10 +#define ULITE_STATUS_OVERRUN 0x20 +#define ULITE_STATUS_FRAME 0x40 +#define ULITE_STATUS_PARITY 0x80 + +#define ULITE_CONTROL_RST_TX 0x01 +#define ULITE_CONTROL_RST_RX 0x02 +#define ULITE_CONTROL_IE 0x10 +#define UART_AUTOSUSPEND_TIMEOUT 3000 /* ms */ + +/* Static pointer to console port */ +#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE +static struct uart_port *console_port; +#endif + +/** + * struct uartlite_data: Driver private data + * reg_ops: Functions to read/write registers + * clk: Our parent clock, if present + * baud: The baud rate configured when this device was synthesized + * cflags: The cflags for parity and data bits + */ +struct uartlite_data { + const struct uartlite_reg_ops *reg_ops; + struct clk *clk; + unsigned int baud; + tcflag_t cflags; +}; + +struct uartlite_reg_ops { + u32 (*in)(void __iomem *addr); + void (*out)(u32 val, void __iomem *addr); +}; + +static u32 uartlite_inbe32(void __iomem *addr) +{ + return ioread32be(addr); +} + +static void uartlite_outbe32(u32 val, void __iomem *addr) +{ + iowrite32be(val, addr); +} + +static const struct uartlite_reg_ops uartlite_be = { + .in = uartlite_inbe32, + .out = uartlite_outbe32, +}; + +static u32 uartlite_inle32(void __iomem *addr) +{ + return ioread32(addr); +} + +static void uartlite_outle32(u32 val, void __iomem *addr) +{ + iowrite32(val, addr); +} + +static const struct uartlite_reg_ops uartlite_le = { + .in = uartlite_inle32, + .out = uartlite_outle32, +}; + +static inline u32 uart_in32(u32 offset, struct uart_port *port) +{ + struct uartlite_data *pdata = port->private_data; + + return pdata->reg_ops->in(port->membase + offset); +} + +static inline void uart_out32(u32 val, u32 offset, struct uart_port *port) +{ + struct uartlite_data *pdata = port->private_data; + + pdata->reg_ops->out(val, port->membase + offset); +} + +static struct uart_port ulite_ports[ULITE_NR_UARTS]; + +static struct uart_driver ulite_uart_driver; + +/* --------------------------------------------------------------------- + * Core UART driver operations + */ + +static int ulite_receive(struct uart_port *port, int stat) +{ + struct tty_port *tport = &port->state->port; + unsigned char ch = 0; + char flag = TTY_NORMAL; + + if ((stat & (ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN + | ULITE_STATUS_FRAME)) == 0) + return 0; + + /* stats */ + if (stat & ULITE_STATUS_RXVALID) { + port->icount.rx++; + ch = uart_in32(ULITE_RX, port); + + if (stat & ULITE_STATUS_PARITY) + port->icount.parity++; + } + + if (stat & ULITE_STATUS_OVERRUN) + port->icount.overrun++; + + if (stat & ULITE_STATUS_FRAME) + port->icount.frame++; + + + /* drop byte with parity error if IGNPAR specificed */ + if (stat & port->ignore_status_mask & ULITE_STATUS_PARITY) + stat &= ~ULITE_STATUS_RXVALID; + + stat &= port->read_status_mask; + + if (stat & ULITE_STATUS_PARITY) + flag = TTY_PARITY; + + + stat &= ~port->ignore_status_mask; + + if (stat & ULITE_STATUS_RXVALID) + tty_insert_flip_char(tport, ch, flag); + + if (stat & ULITE_STATUS_FRAME) + tty_insert_flip_char(tport, 0, TTY_FRAME); + + if (stat & ULITE_STATUS_OVERRUN) + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + + return 1; +} + +static int ulite_transmit(struct uart_port *port, int stat) +{ + struct circ_buf *xmit = &port->state->xmit; + + if (stat & ULITE_STATUS_TXFULL) + return 0; + + if (port->x_char) { + uart_out32(port->x_char, ULITE_TX, port); + port->x_char = 0; + port->icount.tx++; + return 1; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + return 0; + + uart_out32(xmit->buf[xmit->tail], ULITE_TX, port); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1); + port->icount.tx++; + + /* wake up */ + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + return 1; +} + +static irqreturn_t ulite_isr(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + int stat, busy, n = 0; + unsigned long flags; + + do { + spin_lock_irqsave(&port->lock, flags); + stat = uart_in32(ULITE_STATUS, port); + busy = ulite_receive(port, stat); + busy |= ulite_transmit(port, stat); + spin_unlock_irqrestore(&port->lock, flags); + n++; + } while (busy); + + /* work done? */ + if (n > 1) { + tty_flip_buffer_push(&port->state->port); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } +} + +static unsigned int ulite_tx_empty(struct uart_port *port) +{ + unsigned long flags; + unsigned int ret; + + spin_lock_irqsave(&port->lock, flags); + ret = uart_in32(ULITE_STATUS, port); + spin_unlock_irqrestore(&port->lock, flags); + + return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0; +} + +static unsigned int ulite_get_mctrl(struct uart_port *port) +{ + return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; +} + +static void ulite_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* N/A */ +} + +static void ulite_stop_tx(struct uart_port *port) +{ + /* N/A */ +} + +static void ulite_start_tx(struct uart_port *port) +{ + ulite_transmit(port, uart_in32(ULITE_STATUS, port)); +} + +static void ulite_stop_rx(struct uart_port *port) +{ + /* don't forward any more data (like !CREAD) */ + port->ignore_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY + | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN; +} + +static void ulite_break_ctl(struct uart_port *port, int ctl) +{ + /* N/A */ +} + +static int ulite_startup(struct uart_port *port) +{ + struct uartlite_data *pdata = port->private_data; + int ret; + + ret = clk_enable(pdata->clk); + if (ret) { + dev_err(port->dev, "Failed to enable clock\n"); + return ret; + } + + ret = request_irq(port->irq, ulite_isr, IRQF_SHARED | IRQF_TRIGGER_RISING, + "uartlite", port); + if (ret) + return ret; + + uart_out32(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX, + ULITE_CONTROL, port); + uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port); + + return 0; +} + +static void ulite_shutdown(struct uart_port *port) +{ + struct uartlite_data *pdata = port->private_data; + + uart_out32(0, ULITE_CONTROL, port); + uart_in32(ULITE_CONTROL, port); /* dummy */ + free_irq(port->irq, port); + clk_disable(pdata->clk); +} + +static void ulite_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + unsigned long flags; + struct uartlite_data *pdata = port->private_data; + + /* Set termios to what the hardware supports */ + termios->c_iflag &= ~BRKINT; + termios->c_cflag &= ~(CSTOPB | PARENB | PARODD | CSIZE); + termios->c_cflag |= pdata->cflags & (PARENB | PARODD | CSIZE); + tty_termios_encode_baud_rate(termios, pdata->baud, pdata->baud); + + spin_lock_irqsave(&port->lock, flags); + + port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN + | ULITE_STATUS_TXFULL; + + if (termios->c_iflag & INPCK) + port->read_status_mask |= + ULITE_STATUS_PARITY | ULITE_STATUS_FRAME; + + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= ULITE_STATUS_PARITY + | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN; + + /* ignore all characters if CREAD is not set */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= + ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY + | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN; + + /* update timeout */ + uart_update_timeout(port, termios->c_cflag, pdata->baud); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *ulite_type(struct uart_port *port) +{ + return port->type == PORT_UARTLITE ? "uartlite" : NULL; +} + +static void ulite_release_port(struct uart_port *port) +{ + release_mem_region(port->mapbase, ULITE_REGION); + iounmap(port->membase); + port->membase = NULL; +} + +static int ulite_request_port(struct uart_port *port) +{ + struct uartlite_data *pdata = port->private_data; + int ret; + + pr_debug("ulite console: port=%p; port->mapbase=%llx\n", + port, (unsigned long long) port->mapbase); + + if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) { + dev_err(port->dev, "Memory region busy\n"); + return -EBUSY; + } + + port->membase = ioremap(port->mapbase, ULITE_REGION); + if (!port->membase) { + dev_err(port->dev, "Unable to map registers\n"); + release_mem_region(port->mapbase, ULITE_REGION); + return -EBUSY; + } + + pdata->reg_ops = &uartlite_be; + ret = uart_in32(ULITE_CONTROL, port); + uart_out32(ULITE_CONTROL_RST_TX, ULITE_CONTROL, port); + ret = uart_in32(ULITE_STATUS, port); + /* Endianess detection */ + if ((ret & ULITE_STATUS_TXEMPTY) != ULITE_STATUS_TXEMPTY) + pdata->reg_ops = &uartlite_le; + + return 0; +} + +static void ulite_config_port(struct uart_port *port, int flags) +{ + if (!ulite_request_port(port)) + port->type = PORT_UARTLITE; +} + +static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + /* we don't want the core code to modify any port params */ + return -EINVAL; +} + +static void ulite_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + int ret; + + if (!state) { + ret = pm_runtime_get_sync(port->dev); + if (ret < 0) + dev_err(port->dev, "Failed to enable clocks\n"); + } else { + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); + } +} + +#ifdef CONFIG_CONSOLE_POLL +static int ulite_get_poll_char(struct uart_port *port) +{ + if (!(uart_in32(ULITE_STATUS, port) & ULITE_STATUS_RXVALID)) + return NO_POLL_CHAR; + + return uart_in32(ULITE_RX, port); +} + +static void ulite_put_poll_char(struct uart_port *port, unsigned char ch) +{ + while (uart_in32(ULITE_STATUS, port) & ULITE_STATUS_TXFULL) + cpu_relax(); + + /* write char to device */ + uart_out32(ch, ULITE_TX, port); +} +#endif + +static const struct uart_ops ulite_ops = { + .tx_empty = ulite_tx_empty, + .set_mctrl = ulite_set_mctrl, + .get_mctrl = ulite_get_mctrl, + .stop_tx = ulite_stop_tx, + .start_tx = ulite_start_tx, + .stop_rx = ulite_stop_rx, + .break_ctl = ulite_break_ctl, + .startup = ulite_startup, + .shutdown = ulite_shutdown, + .set_termios = ulite_set_termios, + .type = ulite_type, + .release_port = ulite_release_port, + .request_port = ulite_request_port, + .config_port = ulite_config_port, + .verify_port = ulite_verify_port, + .pm = ulite_pm, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = ulite_get_poll_char, + .poll_put_char = ulite_put_poll_char, +#endif +}; + +/* --------------------------------------------------------------------- + * Console driver operations + */ + +#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE +static void ulite_console_wait_tx(struct uart_port *port) +{ + u8 val; + + /* + * Spin waiting for TX fifo to have space available. + * When using the Microblaze Debug Module this can take up to 1s + */ + if (read_poll_timeout_atomic(uart_in32, val, !(val & ULITE_STATUS_TXFULL), + 0, 1000000, false, ULITE_STATUS, port)) + dev_warn(port->dev, + "timeout waiting for TX buffer empty\n"); +} + +static void ulite_console_putchar(struct uart_port *port, unsigned char ch) +{ + ulite_console_wait_tx(port); + uart_out32(ch, ULITE_TX, port); +} + +static void ulite_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *port = console_port; + unsigned long flags; + unsigned int ier; + int locked = 1; + + if (oops_in_progress) { + locked = spin_trylock_irqsave(&port->lock, flags); + } else + spin_lock_irqsave(&port->lock, flags); + + /* save and disable interrupt */ + ier = uart_in32(ULITE_STATUS, port) & ULITE_STATUS_IE; + uart_out32(0, ULITE_CONTROL, port); + + uart_console_write(port, s, count, ulite_console_putchar); + + ulite_console_wait_tx(port); + + /* restore interrupt state */ + if (ier) + uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port); + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); +} + +static int ulite_console_setup(struct console *co, char *options) +{ + struct uart_port *port = NULL; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index >= 0 && co->index < ULITE_NR_UARTS) + port = ulite_ports + co->index; + + /* Has the device been initialized yet? */ + if (!port || !port->mapbase) { + pr_debug("console on ttyUL%i not present\n", co->index); + return -ENODEV; + } + + console_port = port; + + /* not initialized yet? */ + if (!port->membase) { + if (ulite_request_port(port)) + return -ENODEV; + } + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct console ulite_console = { + .name = ULITE_NAME, + .write = ulite_console_write, + .device = uart_console_device, + .setup = ulite_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, /* Specified on the cmdline (e.g. console=ttyUL0 ) */ + .data = &ulite_uart_driver, +}; + +static void early_uartlite_putc(struct uart_port *port, unsigned char c) +{ + /* + * Limit how many times we'll spin waiting for TX FIFO status. + * This will prevent lockups if the base address is incorrectly + * set, or any other issue on the UARTLITE. + * This limit is pretty arbitrary, unless we are at about 10 baud + * we'll never timeout on a working UART. + */ + unsigned retries = 1000000; + + while (--retries && + (readl(port->membase + ULITE_STATUS) & ULITE_STATUS_TXFULL)) + ; + + /* Only attempt the iowrite if we didn't timeout */ + if (retries) + writel(c & 0xff, port->membase + ULITE_TX); +} + +static void early_uartlite_write(struct console *console, + const char *s, unsigned n) +{ + struct earlycon_device *device = console->data; + uart_console_write(&device->port, s, n, early_uartlite_putc); +} + +static int __init early_uartlite_setup(struct earlycon_device *device, + const char *options) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = early_uartlite_write; + return 0; +} +EARLYCON_DECLARE(uartlite, early_uartlite_setup); +OF_EARLYCON_DECLARE(uartlite_b, "xlnx,opb-uartlite-1.00.b", early_uartlite_setup); +OF_EARLYCON_DECLARE(uartlite_a, "xlnx,xps-uartlite-1.00.a", early_uartlite_setup); + +#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */ + +static struct uart_driver ulite_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "uartlite", + .dev_name = ULITE_NAME, + .major = ULITE_MAJOR, + .minor = ULITE_MINOR, + .nr = ULITE_NR_UARTS, +#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE + .cons = &ulite_console, +#endif +}; + +/* --------------------------------------------------------------------- + * Port assignment functions (mapping devices to uart_port structures) + */ + +/** ulite_assign: register a uartlite device with the driver + * + * @dev: pointer to device structure + * @id: requested id number. Pass -1 for automatic port assignment + * @base: base address of uartlite registers + * @irq: irq number for uartlite + * @pdata: private data for uartlite + * + * Returns: 0 on success, <0 otherwise + */ +static int ulite_assign(struct device *dev, int id, phys_addr_t base, int irq, + struct uartlite_data *pdata) +{ + struct uart_port *port; + int rc; + + /* if id = -1; then scan for a free id and use that */ + if (id < 0) { + for (id = 0; id < ULITE_NR_UARTS; id++) + if (ulite_ports[id].mapbase == 0) + break; + } + if (id < 0 || id >= ULITE_NR_UARTS) { + dev_err(dev, "%s%i too large\n", ULITE_NAME, id); + return -EINVAL; + } + + if ((ulite_ports[id].mapbase) && (ulite_ports[id].mapbase != base)) { + dev_err(dev, "cannot assign to %s%i; it is already in use\n", + ULITE_NAME, id); + return -EBUSY; + } + + port = &ulite_ports[id]; + + spin_lock_init(&port->lock); + port->fifosize = 16; + port->regshift = 2; + port->iotype = UPIO_MEM; + port->iobase = 1; /* mark port in use */ + port->mapbase = base; + port->membase = NULL; + port->ops = &ulite_ops; + port->irq = irq; + port->flags = UPF_BOOT_AUTOCONF; + port->dev = dev; + port->type = PORT_UNKNOWN; + port->line = id; + port->private_data = pdata; + + dev_set_drvdata(dev, port); + + /* Register the port */ + rc = uart_add_one_port(&ulite_uart_driver, port); + if (rc) { + dev_err(dev, "uart_add_one_port() failed; err=%i\n", rc); + port->mapbase = 0; + dev_set_drvdata(dev, NULL); + return rc; + } + + return 0; +} + +/** ulite_release: register a uartlite device with the driver + * + * @dev: pointer to device structure + */ +static int ulite_release(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + int rc = 0; + + if (port) { + rc = uart_remove_one_port(&ulite_uart_driver, port); + dev_set_drvdata(dev, NULL); + port->mapbase = 0; + } + + return rc; +} + +/** + * ulite_suspend - Stop the device. + * + * @dev: handle to the device structure. + * Return: 0 always. + */ +static int __maybe_unused ulite_suspend(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + + if (port) + uart_suspend_port(&ulite_uart_driver, port); + + return 0; +} + +/** + * ulite_resume - Resume the device. + * + * @dev: handle to the device structure. + * Return: 0 on success, errno otherwise. + */ +static int __maybe_unused ulite_resume(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + + if (port) + uart_resume_port(&ulite_uart_driver, port); + + return 0; +} + +static int __maybe_unused ulite_runtime_suspend(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct uartlite_data *pdata = port->private_data; + + clk_disable(pdata->clk); + return 0; +}; + +static int __maybe_unused ulite_runtime_resume(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct uartlite_data *pdata = port->private_data; + int ret; + + ret = clk_enable(pdata->clk); + if (ret) { + dev_err(dev, "Cannot enable clock.\n"); + return ret; + } + return 0; +} + +/* --------------------------------------------------------------------- + * Platform bus binding + */ + +static const struct dev_pm_ops ulite_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ulite_suspend, ulite_resume) + SET_RUNTIME_PM_OPS(ulite_runtime_suspend, + ulite_runtime_resume, NULL) +}; + +#if defined(CONFIG_OF) +/* Match table for of_platform binding */ +static const struct of_device_id ulite_of_match[] = { + { .compatible = "xlnx,opb-uartlite-1.00.b", }, + { .compatible = "xlnx,xps-uartlite-1.00.a", }, + {} +}; +MODULE_DEVICE_TABLE(of, ulite_of_match); +#endif /* CONFIG_OF */ + +static int ulite_probe(struct platform_device *pdev) +{ + struct resource *res; + struct uartlite_data *pdata; + int irq, ret; + int id = pdev->id; + + pdata = devm_kzalloc(&pdev->dev, sizeof(struct uartlite_data), + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + if (IS_ENABLED(CONFIG_OF)) { + const char *prop; + struct device_node *np = pdev->dev.of_node; + u32 val = 0; + + prop = "port-number"; + ret = of_property_read_u32(np, prop, &id); + if (ret && ret != -EINVAL) +of_err: + return dev_err_probe(&pdev->dev, ret, + "could not read %s\n", prop); + + prop = "current-speed"; + ret = of_property_read_u32(np, prop, &pdata->baud); + if (ret) + goto of_err; + + prop = "xlnx,use-parity"; + ret = of_property_read_u32(np, prop, &val); + if (ret && ret != -EINVAL) + goto of_err; + + if (val) { + prop = "xlnx,odd-parity"; + ret = of_property_read_u32(np, prop, &val); + if (ret) + goto of_err; + + if (val) + pdata->cflags |= PARODD; + pdata->cflags |= PARENB; + } + + val = 8; + prop = "xlnx,data-bits"; + ret = of_property_read_u32(np, prop, &val); + if (ret && ret != -EINVAL) + goto of_err; + + switch (val) { + case 5: + pdata->cflags |= CS5; + break; + case 6: + pdata->cflags |= CS6; + break; + case 7: + pdata->cflags |= CS7; + break; + case 8: + pdata->cflags |= CS8; + break; + default: + return dev_err_probe(&pdev->dev, -EINVAL, + "bad data bits %d\n", val); + } + } else { + pdata->baud = 9600; + pdata->cflags = CS8; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + pdata->clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); + if (IS_ERR(pdata->clk)) { + if (PTR_ERR(pdata->clk) != -ENOENT) + return PTR_ERR(pdata->clk); + + /* + * Clock framework support is optional, continue on + * anyways if we don't find a matching clock. + */ + pdata->clk = NULL; + } + + ret = clk_prepare_enable(pdata->clk); + if (ret) { + dev_err(&pdev->dev, "Failed to prepare clock\n"); + return ret; + } + + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + if (!ulite_uart_driver.state) { + dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n"); + ret = uart_register_driver(&ulite_uart_driver); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register driver\n"); + clk_disable_unprepare(pdata->clk); + return ret; + } + } + + ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata); + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + + return ret; +} + +static int ulite_remove(struct platform_device *pdev) +{ + struct uart_port *port = dev_get_drvdata(&pdev->dev); + struct uartlite_data *pdata = port->private_data; + int rc; + + clk_disable_unprepare(pdata->clk); + rc = ulite_release(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + return rc; +} + +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:uartlite"); + +static struct platform_driver ulite_platform_driver = { + .probe = ulite_probe, + .remove = ulite_remove, + .driver = { + .name = "uartlite", + .of_match_table = of_match_ptr(ulite_of_match), + .pm = &ulite_pm_ops, + }, +}; + +/* --------------------------------------------------------------------- + * Module setup/teardown + */ + +static int __init ulite_init(void) +{ + + pr_debug("uartlite: calling platform_driver_register()\n"); + return platform_driver_register(&ulite_platform_driver); +} + +static void __exit ulite_exit(void) +{ + platform_driver_unregister(&ulite_platform_driver); + if (ulite_uart_driver.state) + uart_unregister_driver(&ulite_uart_driver); +} + +module_init(ulite_init); +module_exit(ulite_exit); + +MODULE_AUTHOR("Peter Korsgaard "); +MODULE_DESCRIPTION("Xilinx uartlite serial driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c new file mode 100644 index 000000000..82cf14dd3 --- /dev/null +++ b/drivers/tty/serial/ucc_uart.c @@ -0,0 +1,1539 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Freescale QUICC Engine UART device driver + * + * Author: Timur Tabi + * + * Copyright 2007 Freescale Semiconductor, Inc. + * + * This driver adds support for UART devices via Freescale's QUICC Engine + * found on some Freescale SOCs. + * + * If Soft-UART support is needed but not already present, then this driver + * will request and upload the "Soft-UART" microcode upon probe. The + * filename of the microcode should be fsl_qe_ucode_uart_X_YZ.bin, where "X" + * is the name of the SOC (e.g. 8323), and YZ is the revision of the SOC, + * (e.g. "11" for 1.1). + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#ifdef CONFIG_PPC32 +#include /* mfspr, SPRN_SVR */ +#endif + +/* + * The GUMR flag for Soft UART. This would normally be defined in qe.h, + * but Soft-UART is a hack and we want to keep everything related to it in + * this file. + */ +#define UCC_SLOW_GUMR_H_SUART 0x00004000 /* Soft-UART */ + +/* + * soft_uart is 1 if we need to use Soft-UART mode + */ +static int soft_uart; +/* + * firmware_loaded is 1 if the firmware has been loaded, 0 otherwise. + */ +static int firmware_loaded; + +/* Enable this macro to configure all serial ports in internal loopback + mode */ +/* #define LOOPBACK */ + +/* The major and minor device numbers are defined in + * http://www.lanana.org/docs/device-list/devices-2.6+.txt. For the QE + * UART, we have major number 204 and minor numbers 46 - 49, which are the + * same as for the CPM2. This decision was made because no Freescale part + * has both a CPM and a QE. + */ +#define SERIAL_QE_MAJOR 204 +#define SERIAL_QE_MINOR 46 + +/* Since we only have minor numbers 46 - 49, there is a hard limit of 4 ports */ +#define UCC_MAX_UART 4 + +/* The number of buffer descriptors for receiving characters. */ +#define RX_NUM_FIFO 4 + +/* The number of buffer descriptors for transmitting characters. */ +#define TX_NUM_FIFO 4 + +/* The maximum size of the character buffer for a single RX BD. */ +#define RX_BUF_SIZE 32 + +/* The maximum size of the character buffer for a single TX BD. */ +#define TX_BUF_SIZE 32 + +/* + * The number of jiffies to wait after receiving a close command before the + * device is actually closed. This allows the last few characters to be + * sent over the wire. + */ +#define UCC_WAIT_CLOSING 100 + +struct ucc_uart_pram { + struct ucc_slow_pram common; + u8 res1[8]; /* reserved */ + __be16 maxidl; /* Maximum idle chars */ + __be16 idlc; /* temp idle counter */ + __be16 brkcr; /* Break count register */ + __be16 parec; /* receive parity error counter */ + __be16 frmec; /* receive framing error counter */ + __be16 nosec; /* receive noise counter */ + __be16 brkec; /* receive break condition counter */ + __be16 brkln; /* last received break length */ + __be16 uaddr[2]; /* UART address character 1 & 2 */ + __be16 rtemp; /* Temp storage */ + __be16 toseq; /* Transmit out of sequence char */ + __be16 cchars[8]; /* control characters 1-8 */ + __be16 rccm; /* receive control character mask */ + __be16 rccr; /* receive control character register */ + __be16 rlbc; /* receive last break character */ + __be16 res2; /* reserved */ + __be32 res3; /* reserved, should be cleared */ + u8 res4; /* reserved, should be cleared */ + u8 res5[3]; /* reserved, should be cleared */ + __be32 res6; /* reserved, should be cleared */ + __be32 res7; /* reserved, should be cleared */ + __be32 res8; /* reserved, should be cleared */ + __be32 res9; /* reserved, should be cleared */ + __be32 res10; /* reserved, should be cleared */ + __be32 res11; /* reserved, should be cleared */ + __be32 res12; /* reserved, should be cleared */ + __be32 res13; /* reserved, should be cleared */ +/* The rest is for Soft-UART only */ + __be16 supsmr; /* 0x90, Shadow UPSMR */ + __be16 res92; /* 0x92, reserved, initialize to 0 */ + __be32 rx_state; /* 0x94, RX state, initialize to 0 */ + __be32 rx_cnt; /* 0x98, RX count, initialize to 0 */ + u8 rx_length; /* 0x9C, Char length, set to 1+CL+PEN+1+SL */ + u8 rx_bitmark; /* 0x9D, reserved, initialize to 0 */ + u8 rx_temp_dlst_qe; /* 0x9E, reserved, initialize to 0 */ + u8 res14[0xBC - 0x9F]; /* reserved */ + __be32 dump_ptr; /* 0xBC, Dump pointer */ + __be32 rx_frame_rem; /* 0xC0, reserved, initialize to 0 */ + u8 rx_frame_rem_size; /* 0xC4, reserved, initialize to 0 */ + u8 tx_mode; /* 0xC5, mode, 0=AHDLC, 1=UART */ + __be16 tx_state; /* 0xC6, TX state */ + u8 res15[0xD0 - 0xC8]; /* reserved */ + __be32 resD0; /* 0xD0, reserved, initialize to 0 */ + u8 resD4; /* 0xD4, reserved, initialize to 0 */ + __be16 resD5; /* 0xD5, reserved, initialize to 0 */ +} __attribute__ ((packed)); + +/* SUPSMR definitions, for Soft-UART only */ +#define UCC_UART_SUPSMR_SL 0x8000 +#define UCC_UART_SUPSMR_RPM_MASK 0x6000 +#define UCC_UART_SUPSMR_RPM_ODD 0x0000 +#define UCC_UART_SUPSMR_RPM_LOW 0x2000 +#define UCC_UART_SUPSMR_RPM_EVEN 0x4000 +#define UCC_UART_SUPSMR_RPM_HIGH 0x6000 +#define UCC_UART_SUPSMR_PEN 0x1000 +#define UCC_UART_SUPSMR_TPM_MASK 0x0C00 +#define UCC_UART_SUPSMR_TPM_ODD 0x0000 +#define UCC_UART_SUPSMR_TPM_LOW 0x0400 +#define UCC_UART_SUPSMR_TPM_EVEN 0x0800 +#define UCC_UART_SUPSMR_TPM_HIGH 0x0C00 +#define UCC_UART_SUPSMR_FRZ 0x0100 +#define UCC_UART_SUPSMR_UM_MASK 0x00c0 +#define UCC_UART_SUPSMR_UM_NORMAL 0x0000 +#define UCC_UART_SUPSMR_UM_MAN_MULTI 0x0040 +#define UCC_UART_SUPSMR_UM_AUTO_MULTI 0x00c0 +#define UCC_UART_SUPSMR_CL_MASK 0x0030 +#define UCC_UART_SUPSMR_CL_8 0x0030 +#define UCC_UART_SUPSMR_CL_7 0x0020 +#define UCC_UART_SUPSMR_CL_6 0x0010 +#define UCC_UART_SUPSMR_CL_5 0x0000 + +#define UCC_UART_TX_STATE_AHDLC 0x00 +#define UCC_UART_TX_STATE_UART 0x01 +#define UCC_UART_TX_STATE_X1 0x00 +#define UCC_UART_TX_STATE_X16 0x80 + +#define UCC_UART_PRAM_ALIGNMENT 0x100 + +#define UCC_UART_SIZE_OF_BD UCC_SLOW_SIZE_OF_BD +#define NUM_CONTROL_CHARS 8 + +/* Private per-port data structure */ +struct uart_qe_port { + struct uart_port port; + struct ucc_slow __iomem *uccp; + struct ucc_uart_pram __iomem *uccup; + struct ucc_slow_info us_info; + struct ucc_slow_private *us_private; + struct device_node *np; + unsigned int ucc_num; /* First ucc is 0, not 1 */ + + u16 rx_nrfifos; + u16 rx_fifosize; + u16 tx_nrfifos; + u16 tx_fifosize; + int wait_closing; + u32 flags; + struct qe_bd *rx_bd_base; + struct qe_bd *rx_cur; + struct qe_bd *tx_bd_base; + struct qe_bd *tx_cur; + unsigned char *tx_buf; + unsigned char *rx_buf; + void *bd_virt; /* virtual address of the BD buffers */ + dma_addr_t bd_dma_addr; /* bus address of the BD buffers */ + unsigned int bd_size; /* size of BD buffer space */ +}; + +static struct uart_driver ucc_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "ucc_uart", + .dev_name = "ttyQE", + .major = SERIAL_QE_MAJOR, + .minor = SERIAL_QE_MINOR, + .nr = UCC_MAX_UART, +}; + +/* + * Virtual to physical address translation. + * + * Given the virtual address for a character buffer, this function returns + * the physical (DMA) equivalent. + */ +static inline dma_addr_t cpu2qe_addr(void *addr, struct uart_qe_port *qe_port) +{ + if (likely((addr >= qe_port->bd_virt)) && + (addr < (qe_port->bd_virt + qe_port->bd_size))) + return qe_port->bd_dma_addr + (addr - qe_port->bd_virt); + + /* something nasty happened */ + printk(KERN_ERR "%s: addr=%p\n", __func__, addr); + BUG(); + return 0; +} + +/* + * Physical to virtual address translation. + * + * Given the physical (DMA) address for a character buffer, this function + * returns the virtual equivalent. + */ +static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port) +{ + /* sanity check */ + if (likely((addr >= qe_port->bd_dma_addr) && + (addr < (qe_port->bd_dma_addr + qe_port->bd_size)))) + return qe_port->bd_virt + (addr - qe_port->bd_dma_addr); + + /* something nasty happened */ + printk(KERN_ERR "%s: addr=%llx\n", __func__, (u64)addr); + BUG(); + return NULL; +} + +/* + * Return 1 if the QE is done transmitting all buffers for this port + * + * This function scans each BD in sequence. If we find a BD that is not + * ready (READY=1), then we return 0 indicating that the QE is still sending + * data. If we reach the last BD (WRAP=1), then we know we've scanned + * the entire list, and all BDs are done. + */ +static unsigned int qe_uart_tx_empty(struct uart_port *port) +{ + struct uart_qe_port *qe_port = + container_of(port, struct uart_qe_port, port); + struct qe_bd *bdp = qe_port->tx_bd_base; + + while (1) { + if (ioread16be(&bdp->status) & BD_SC_READY) + /* This BD is not done, so return "not done" */ + return 0; + + if (ioread16be(&bdp->status) & BD_SC_WRAP) + /* + * This BD is done and it's the last one, so return + * "done" + */ + return 1; + + bdp++; + } +} + +/* + * Set the modem control lines + * + * Although the QE can control the modem control lines (e.g. CTS), we + * don't need that support. This function must exist, however, otherwise + * the kernel will panic. + */ +static void qe_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +/* + * Get the current modem control line status + * + * Although the QE can control the modem control lines (e.g. CTS), this + * driver currently doesn't support that, so we always return Carrier + * Detect, Data Set Ready, and Clear To Send. + */ +static unsigned int qe_uart_get_mctrl(struct uart_port *port) +{ + return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; +} + +/* + * Disable the transmit interrupt. + * + * Although this function is called "stop_tx", it does not actually stop + * transmission of data. Instead, it tells the QE to not generate an + * interrupt when the UCC is finished sending characters. + */ +static void qe_uart_stop_tx(struct uart_port *port) +{ + struct uart_qe_port *qe_port = + container_of(port, struct uart_qe_port, port); + + qe_clrbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX); +} + +/* + * Transmit as many characters to the HW as possible. + * + * This function will attempt to stuff of all the characters from the + * kernel's transmit buffer into TX BDs. + * + * A return value of non-zero indicates that it successfully stuffed all + * characters from the kernel buffer. + * + * A return value of zero indicates that there are still characters in the + * kernel's buffer that have not been transmitted, but there are no more BDs + * available. This function should be called again after a BD has been made + * available. + */ +static int qe_uart_tx_pump(struct uart_qe_port *qe_port) +{ + struct qe_bd *bdp; + unsigned char *p; + unsigned int count; + struct uart_port *port = &qe_port->port; + struct circ_buf *xmit = &port->state->xmit; + + /* Handle xon/xoff */ + if (port->x_char) { + /* Pick next descriptor and fill from buffer */ + bdp = qe_port->tx_cur; + + p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port); + + *p++ = port->x_char; + iowrite16be(1, &bdp->length); + qe_setbits_be16(&bdp->status, BD_SC_READY); + /* Get next BD. */ + if (ioread16be(&bdp->status) & BD_SC_WRAP) + bdp = qe_port->tx_bd_base; + else + bdp++; + qe_port->tx_cur = bdp; + + port->icount.tx++; + port->x_char = 0; + return 1; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + qe_uart_stop_tx(port); + return 0; + } + + /* Pick next descriptor and fill from buffer */ + bdp = qe_port->tx_cur; + + while (!(ioread16be(&bdp->status) & BD_SC_READY) && + (xmit->tail != xmit->head)) { + count = 0; + p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port); + while (count < qe_port->tx_fifosize) { + *p++ = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + count++; + if (xmit->head == xmit->tail) + break; + } + + iowrite16be(count, &bdp->length); + qe_setbits_be16(&bdp->status, BD_SC_READY); + + /* Get next BD. */ + if (ioread16be(&bdp->status) & BD_SC_WRAP) + bdp = qe_port->tx_bd_base; + else + bdp++; + } + qe_port->tx_cur = bdp; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) { + /* The kernel buffer is empty, so turn off TX interrupts. We + don't need to be told when the QE is finished transmitting + the data. */ + qe_uart_stop_tx(port); + return 0; + } + + return 1; +} + +/* + * Start transmitting data + * + * This function will start transmitting any available data, if the port + * isn't already transmitting data. + */ +static void qe_uart_start_tx(struct uart_port *port) +{ + struct uart_qe_port *qe_port = + container_of(port, struct uart_qe_port, port); + + /* If we currently are transmitting, then just return */ + if (ioread16be(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX) + return; + + /* Otherwise, pump the port and start transmission */ + if (qe_uart_tx_pump(qe_port)) + qe_setbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX); +} + +/* + * Stop transmitting data + */ +static void qe_uart_stop_rx(struct uart_port *port) +{ + struct uart_qe_port *qe_port = + container_of(port, struct uart_qe_port, port); + + qe_clrbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX); +} + +/* Start or stop sending break signal + * + * This function controls the sending of a break signal. If break_state=1, + * then we start sending a break signal. If break_state=0, then we stop + * sending the break signal. + */ +static void qe_uart_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_qe_port *qe_port = + container_of(port, struct uart_qe_port, port); + + if (break_state) + ucc_slow_stop_tx(qe_port->us_private); + else + ucc_slow_restart_tx(qe_port->us_private); +} + +/* ISR helper function for receiving character. + * + * This function is called by the ISR to handling receiving characters + */ +static void qe_uart_int_rx(struct uart_qe_port *qe_port) +{ + int i; + unsigned char ch, *cp; + struct uart_port *port = &qe_port->port; + struct tty_port *tport = &port->state->port; + struct qe_bd *bdp; + u16 status; + unsigned int flg; + + /* Just loop through the closed BDs and copy the characters into + * the buffer. + */ + bdp = qe_port->rx_cur; + while (1) { + status = ioread16be(&bdp->status); + + /* If this one is empty, then we assume we've read them all */ + if (status & BD_SC_EMPTY) + break; + + /* get number of characters, and check space in RX buffer */ + i = ioread16be(&bdp->length); + + /* If we don't have enough room in RX buffer for the entire BD, + * then we try later, which will be the next RX interrupt. + */ + if (tty_buffer_request_room(tport, i) < i) { + dev_dbg(port->dev, "ucc-uart: no room in RX buffer\n"); + return; + } + + /* get pointer */ + cp = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port); + + /* loop through the buffer */ + while (i-- > 0) { + ch = *cp++; + port->icount.rx++; + flg = TTY_NORMAL; + + if (!i && status & + (BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV)) + goto handle_error; + if (uart_handle_sysrq_char(port, ch)) + continue; + +error_return: + tty_insert_flip_char(tport, ch, flg); + + } + + /* This BD is ready to be used again. Clear status. get next */ + qe_clrsetbits_be16(&bdp->status, + BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID, + BD_SC_EMPTY); + if (ioread16be(&bdp->status) & BD_SC_WRAP) + bdp = qe_port->rx_bd_base; + else + bdp++; + + } + + /* Write back buffer pointer */ + qe_port->rx_cur = bdp; + + /* Activate BH processing */ + tty_flip_buffer_push(tport); + + return; + + /* Error processing */ + +handle_error: + /* Statistics */ + if (status & BD_SC_BR) + port->icount.brk++; + if (status & BD_SC_PR) + port->icount.parity++; + if (status & BD_SC_FR) + port->icount.frame++; + if (status & BD_SC_OV) + port->icount.overrun++; + + /* Mask out ignored conditions */ + status &= port->read_status_mask; + + /* Handle the remaining ones */ + if (status & BD_SC_BR) + flg = TTY_BREAK; + else if (status & BD_SC_PR) + flg = TTY_PARITY; + else if (status & BD_SC_FR) + flg = TTY_FRAME; + + /* Overrun does not affect the current character ! */ + if (status & BD_SC_OV) + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + port->sysrq = 0; + goto error_return; +} + +/* Interrupt handler + * + * This interrupt handler is called after a BD is processed. + */ +static irqreturn_t qe_uart_int(int irq, void *data) +{ + struct uart_qe_port *qe_port = (struct uart_qe_port *) data; + struct ucc_slow __iomem *uccp = qe_port->uccp; + u16 events; + + /* Clear the interrupts */ + events = ioread16be(&uccp->ucce); + iowrite16be(events, &uccp->ucce); + + if (events & UCC_UART_UCCE_BRKE) + uart_handle_break(&qe_port->port); + + if (events & UCC_UART_UCCE_RX) + qe_uart_int_rx(qe_port); + + if (events & UCC_UART_UCCE_TX) + qe_uart_tx_pump(qe_port); + + return events ? IRQ_HANDLED : IRQ_NONE; +} + +/* Initialize buffer descriptors + * + * This function initializes all of the RX and TX buffer descriptors. + */ +static void qe_uart_initbd(struct uart_qe_port *qe_port) +{ + int i; + void *bd_virt; + struct qe_bd *bdp; + + /* Set the physical address of the host memory buffers in the buffer + * descriptors, and the virtual address for us to work with. + */ + bd_virt = qe_port->bd_virt; + bdp = qe_port->rx_bd_base; + qe_port->rx_cur = qe_port->rx_bd_base; + for (i = 0; i < (qe_port->rx_nrfifos - 1); i++) { + iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status); + iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); + iowrite16be(0, &bdp->length); + bd_virt += qe_port->rx_fifosize; + bdp++; + } + + /* */ + iowrite16be(BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status); + iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); + iowrite16be(0, &bdp->length); + + /* Set the physical address of the host memory + * buffers in the buffer descriptors, and the + * virtual address for us to work with. + */ + bd_virt = qe_port->bd_virt + + L1_CACHE_ALIGN(qe_port->rx_nrfifos * qe_port->rx_fifosize); + qe_port->tx_cur = qe_port->tx_bd_base; + bdp = qe_port->tx_bd_base; + for (i = 0; i < (qe_port->tx_nrfifos - 1); i++) { + iowrite16be(BD_SC_INTRPT, &bdp->status); + iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); + iowrite16be(0, &bdp->length); + bd_virt += qe_port->tx_fifosize; + bdp++; + } + + /* Loopback requires the preamble bit to be set on the first TX BD */ +#ifdef LOOPBACK + qe_setbits_be16(&qe_port->tx_cur->status, BD_SC_P); +#endif + + iowrite16be(BD_SC_WRAP | BD_SC_INTRPT, &bdp->status); + iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); + iowrite16be(0, &bdp->length); +} + +/* + * Initialize a UCC for UART. + * + * This function configures a given UCC to be used as a UART device. Basic + * UCC initialization is handled in qe_uart_request_port(). This function + * does all the UART-specific stuff. + */ +static void qe_uart_init_ucc(struct uart_qe_port *qe_port) +{ + u32 cecr_subblock; + struct ucc_slow __iomem *uccp = qe_port->uccp; + struct ucc_uart_pram *uccup = qe_port->uccup; + + unsigned int i; + + /* First, disable TX and RX in the UCC */ + ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX); + + /* Program the UCC UART parameter RAM */ + iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.rbmr); + iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.tbmr); + iowrite16be(qe_port->rx_fifosize, &uccup->common.mrblr); + iowrite16be(0x10, &uccup->maxidl); + iowrite16be(1, &uccup->brkcr); + iowrite16be(0, &uccup->parec); + iowrite16be(0, &uccup->frmec); + iowrite16be(0, &uccup->nosec); + iowrite16be(0, &uccup->brkec); + iowrite16be(0, &uccup->uaddr[0]); + iowrite16be(0, &uccup->uaddr[1]); + iowrite16be(0, &uccup->toseq); + for (i = 0; i < 8; i++) + iowrite16be(0xC000, &uccup->cchars[i]); + iowrite16be(0xc0ff, &uccup->rccm); + + /* Configure the GUMR registers for UART */ + if (soft_uart) { + /* Soft-UART requires a 1X multiplier for TX */ + qe_clrsetbits_be32(&uccp->gumr_l, + UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK, + UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_1 | UCC_SLOW_GUMR_L_RDCR_16); + + qe_clrsetbits_be32(&uccp->gumr_h, UCC_SLOW_GUMR_H_RFW, + UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX); + } else { + qe_clrsetbits_be32(&uccp->gumr_l, + UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK, + UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_16 | UCC_SLOW_GUMR_L_RDCR_16); + + qe_clrsetbits_be32(&uccp->gumr_h, + UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX, + UCC_SLOW_GUMR_H_RFW); + } + +#ifdef LOOPBACK + qe_clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK, + UCC_SLOW_GUMR_L_DIAG_LOOP); + qe_clrsetbits_be32(&uccp->gumr_h, + UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_RSYN, + UCC_SLOW_GUMR_H_CDS); +#endif + + /* Disable rx interrupts and clear all pending events. */ + iowrite16be(0, &uccp->uccm); + iowrite16be(0xffff, &uccp->ucce); + iowrite16be(0x7e7e, &uccp->udsr); + + /* Initialize UPSMR */ + iowrite16be(0, &uccp->upsmr); + + if (soft_uart) { + iowrite16be(0x30, &uccup->supsmr); + iowrite16be(0, &uccup->res92); + iowrite32be(0, &uccup->rx_state); + iowrite32be(0, &uccup->rx_cnt); + iowrite8(0, &uccup->rx_bitmark); + iowrite8(10, &uccup->rx_length); + iowrite32be(0x4000, &uccup->dump_ptr); + iowrite8(0, &uccup->rx_temp_dlst_qe); + iowrite32be(0, &uccup->rx_frame_rem); + iowrite8(0, &uccup->rx_frame_rem_size); + /* Soft-UART requires TX to be 1X */ + iowrite8(UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1, + &uccup->tx_mode); + iowrite16be(0, &uccup->tx_state); + iowrite8(0, &uccup->resD4); + iowrite16be(0, &uccup->resD5); + + /* Set UART mode. + * Enable receive and transmit. + */ + + /* From the microcode errata: + * 1.GUMR_L register, set mode=0010 (QMC). + * 2.Set GUMR_H[17] bit. (UART/AHDLC mode). + * 3.Set GUMR_H[19:20] (Transparent mode) + * 4.Clear GUMR_H[26] (RFW) + * ... + * 6.Receiver must use 16x over sampling + */ + qe_clrsetbits_be32(&uccp->gumr_l, + UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK, + UCC_SLOW_GUMR_L_MODE_QMC | UCC_SLOW_GUMR_L_TDCR_16 | UCC_SLOW_GUMR_L_RDCR_16); + + qe_clrsetbits_be32(&uccp->gumr_h, + UCC_SLOW_GUMR_H_RFW | UCC_SLOW_GUMR_H_RSYN, + UCC_SLOW_GUMR_H_SUART | UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX | UCC_SLOW_GUMR_H_TFL); + +#ifdef LOOPBACK + qe_clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK, + UCC_SLOW_GUMR_L_DIAG_LOOP); + qe_clrbits_be32(&uccp->gumr_h, + UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_CDS); +#endif + + cecr_subblock = ucc_slow_get_qe_cr_subblock(qe_port->ucc_num); + qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, 0); + } else { + cecr_subblock = ucc_slow_get_qe_cr_subblock(qe_port->ucc_num); + qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, + QE_CR_PROTOCOL_UART, 0); + } +} + +/* + * Initialize the port. + */ +static int qe_uart_startup(struct uart_port *port) +{ + struct uart_qe_port *qe_port = + container_of(port, struct uart_qe_port, port); + int ret; + + /* + * If we're using Soft-UART mode, then we need to make sure the + * firmware has been uploaded first. + */ + if (soft_uart && !firmware_loaded) { + dev_err(port->dev, "Soft-UART firmware not uploaded\n"); + return -ENODEV; + } + + qe_uart_initbd(qe_port); + qe_uart_init_ucc(qe_port); + + /* Install interrupt handler. */ + ret = request_irq(port->irq, qe_uart_int, IRQF_SHARED, "ucc-uart", + qe_port); + if (ret) { + dev_err(port->dev, "could not claim IRQ %u\n", port->irq); + return ret; + } + + /* Startup rx-int */ + qe_setbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX); + ucc_slow_enable(qe_port->us_private, COMM_DIR_RX_AND_TX); + + return 0; +} + +/* + * Shutdown the port. + */ +static void qe_uart_shutdown(struct uart_port *port) +{ + struct uart_qe_port *qe_port = + container_of(port, struct uart_qe_port, port); + struct ucc_slow __iomem *uccp = qe_port->uccp; + unsigned int timeout = 20; + + /* Disable RX and TX */ + + /* Wait for all the BDs marked sent */ + while (!qe_uart_tx_empty(port)) { + if (!--timeout) { + dev_warn(port->dev, "shutdown timeout\n"); + break; + } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(2); + } + + if (qe_port->wait_closing) { + /* Wait a bit longer */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(qe_port->wait_closing); + } + + /* Stop uarts */ + ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX); + qe_clrbits_be16(&uccp->uccm, UCC_UART_UCCE_TX | UCC_UART_UCCE_RX); + + /* Shut them really down and reinit buffer descriptors */ + ucc_slow_graceful_stop_tx(qe_port->us_private); + qe_uart_initbd(qe_port); + + free_irq(port->irq, qe_port); +} + +/* + * Set the serial port parameters. + */ +static void qe_uart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct uart_qe_port *qe_port = + container_of(port, struct uart_qe_port, port); + struct ucc_slow __iomem *uccp = qe_port->uccp; + unsigned int baud; + unsigned long flags; + u16 upsmr = ioread16be(&uccp->upsmr); + struct ucc_uart_pram __iomem *uccup = qe_port->uccup; + u16 supsmr = ioread16be(&uccup->supsmr); + + /* byte size */ + upsmr &= UCC_UART_UPSMR_CL_MASK; + supsmr &= UCC_UART_SUPSMR_CL_MASK; + + switch (termios->c_cflag & CSIZE) { + case CS5: + upsmr |= UCC_UART_UPSMR_CL_5; + supsmr |= UCC_UART_SUPSMR_CL_5; + break; + case CS6: + upsmr |= UCC_UART_UPSMR_CL_6; + supsmr |= UCC_UART_SUPSMR_CL_6; + break; + case CS7: + upsmr |= UCC_UART_UPSMR_CL_7; + supsmr |= UCC_UART_SUPSMR_CL_7; + break; + default: /* case CS8 */ + upsmr |= UCC_UART_UPSMR_CL_8; + supsmr |= UCC_UART_SUPSMR_CL_8; + break; + } + + /* If CSTOPB is set, we want two stop bits */ + if (termios->c_cflag & CSTOPB) { + upsmr |= UCC_UART_UPSMR_SL; + supsmr |= UCC_UART_SUPSMR_SL; + } + + if (termios->c_cflag & PARENB) { + upsmr |= UCC_UART_UPSMR_PEN; + supsmr |= UCC_UART_SUPSMR_PEN; + + if (!(termios->c_cflag & PARODD)) { + upsmr &= ~(UCC_UART_UPSMR_RPM_MASK | + UCC_UART_UPSMR_TPM_MASK); + upsmr |= UCC_UART_UPSMR_RPM_EVEN | + UCC_UART_UPSMR_TPM_EVEN; + supsmr &= ~(UCC_UART_SUPSMR_RPM_MASK | + UCC_UART_SUPSMR_TPM_MASK); + supsmr |= UCC_UART_SUPSMR_RPM_EVEN | + UCC_UART_SUPSMR_TPM_EVEN; + } + } + + /* + * Set up parity check flag + */ + port->read_status_mask = BD_SC_EMPTY | BD_SC_OV; + if (termios->c_iflag & INPCK) + port->read_status_mask |= BD_SC_FR | BD_SC_PR; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= BD_SC_BR; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= BD_SC_PR | BD_SC_FR; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= BD_SC_BR; + /* + * If we're ignore parity and break indicators, ignore + * overruns too. (For real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= BD_SC_OV; + } + /* + * !!! ignore all characters if CREAD is not set + */ + if ((termios->c_cflag & CREAD) == 0) + port->read_status_mask &= ~BD_SC_EMPTY; + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); + + /* Do we really need a spinlock here? */ + spin_lock_irqsave(&port->lock, flags); + + /* Update the per-port timeout. */ + uart_update_timeout(port, termios->c_cflag, baud); + + iowrite16be(upsmr, &uccp->upsmr); + if (soft_uart) { + iowrite16be(supsmr, &uccup->supsmr); + iowrite8(tty_get_frame_size(termios->c_cflag), &uccup->rx_length); + + /* Soft-UART requires a 1X multiplier for TX */ + qe_setbrg(qe_port->us_info.rx_clock, baud, 16); + qe_setbrg(qe_port->us_info.tx_clock, baud, 1); + } else { + qe_setbrg(qe_port->us_info.rx_clock, baud, 16); + qe_setbrg(qe_port->us_info.tx_clock, baud, 16); + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +/* + * Return a pointer to a string that describes what kind of port this is. + */ +static const char *qe_uart_type(struct uart_port *port) +{ + return "QE"; +} + +/* + * Allocate any memory and I/O resources required by the port. + */ +static int qe_uart_request_port(struct uart_port *port) +{ + int ret; + struct uart_qe_port *qe_port = + container_of(port, struct uart_qe_port, port); + struct ucc_slow_info *us_info = &qe_port->us_info; + struct ucc_slow_private *uccs; + unsigned int rx_size, tx_size; + void *bd_virt; + dma_addr_t bd_dma_addr = 0; + + ret = ucc_slow_init(us_info, &uccs); + if (ret) { + dev_err(port->dev, "could not initialize UCC%u\n", + qe_port->ucc_num); + return ret; + } + + qe_port->us_private = uccs; + qe_port->uccp = uccs->us_regs; + qe_port->uccup = (struct ucc_uart_pram *) uccs->us_pram; + qe_port->rx_bd_base = uccs->rx_bd; + qe_port->tx_bd_base = uccs->tx_bd; + + /* + * Allocate the transmit and receive data buffers. + */ + + rx_size = L1_CACHE_ALIGN(qe_port->rx_nrfifos * qe_port->rx_fifosize); + tx_size = L1_CACHE_ALIGN(qe_port->tx_nrfifos * qe_port->tx_fifosize); + + bd_virt = dma_alloc_coherent(port->dev, rx_size + tx_size, &bd_dma_addr, + GFP_KERNEL); + if (!bd_virt) { + dev_err(port->dev, "could not allocate buffer descriptors\n"); + return -ENOMEM; + } + + qe_port->bd_virt = bd_virt; + qe_port->bd_dma_addr = bd_dma_addr; + qe_port->bd_size = rx_size + tx_size; + + qe_port->rx_buf = bd_virt; + qe_port->tx_buf = qe_port->rx_buf + rx_size; + + return 0; +} + +/* + * Configure the port. + * + * We say we're a CPM-type port because that's mostly true. Once the device + * is configured, this driver operates almost identically to the CPM serial + * driver. + */ +static void qe_uart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_CPM; + qe_uart_request_port(port); + } +} + +/* + * Release any memory and I/O resources that were allocated in + * qe_uart_request_port(). + */ +static void qe_uart_release_port(struct uart_port *port) +{ + struct uart_qe_port *qe_port = + container_of(port, struct uart_qe_port, port); + struct ucc_slow_private *uccs = qe_port->us_private; + + dma_free_coherent(port->dev, qe_port->bd_size, qe_port->bd_virt, + qe_port->bd_dma_addr); + + ucc_slow_free(uccs); +} + +/* + * Verify that the data in serial_struct is suitable for this device. + */ +static int qe_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM) + return -EINVAL; + + if (ser->irq < 0 || ser->irq >= nr_irqs) + return -EINVAL; + + if (ser->baud_base < 9600) + return -EINVAL; + + return 0; +} +/* UART operations + * + * Details on these functions can be found in Documentation/driver-api/serial/driver.rst + */ +static const struct uart_ops qe_uart_pops = { + .tx_empty = qe_uart_tx_empty, + .set_mctrl = qe_uart_set_mctrl, + .get_mctrl = qe_uart_get_mctrl, + .stop_tx = qe_uart_stop_tx, + .start_tx = qe_uart_start_tx, + .stop_rx = qe_uart_stop_rx, + .break_ctl = qe_uart_break_ctl, + .startup = qe_uart_startup, + .shutdown = qe_uart_shutdown, + .set_termios = qe_uart_set_termios, + .type = qe_uart_type, + .release_port = qe_uart_release_port, + .request_port = qe_uart_request_port, + .config_port = qe_uart_config_port, + .verify_port = qe_uart_verify_port, +}; + + +#ifdef CONFIG_PPC32 +/* + * Obtain the SOC model number and revision level + * + * This function parses the device tree to obtain the SOC model. It then + * reads the SVR register to the revision. + * + * The device tree stores the SOC model two different ways. + * + * The new way is: + * + * cpu@0 { + * compatible = "PowerPC,8323"; + * device_type = "cpu"; + * ... + * + * + * The old way is: + * PowerPC,8323@0 { + * device_type = "cpu"; + * ... + * + * This code first checks the new way, and then the old way. + */ +static unsigned int soc_info(unsigned int *rev_h, unsigned int *rev_l) +{ + struct device_node *np; + const char *soc_string; + unsigned int svr; + unsigned int soc; + + /* Find the CPU node */ + np = of_find_node_by_type(NULL, "cpu"); + if (!np) + return 0; + /* Find the compatible property */ + soc_string = of_get_property(np, "compatible", NULL); + if (!soc_string) + /* No compatible property, so try the name. */ + soc_string = np->name; + + of_node_put(np); + + /* Extract the SOC number from the "PowerPC," string */ + if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc) + return 0; + + /* Get the revision from the SVR */ + svr = mfspr(SPRN_SVR); + *rev_h = (svr >> 4) & 0xf; + *rev_l = svr & 0xf; + + return soc; +} + +/* + * requst_firmware_nowait() callback function + * + * This function is called by the kernel when a firmware is made available, + * or if it times out waiting for the firmware. + */ +static void uart_firmware_cont(const struct firmware *fw, void *context) +{ + struct qe_firmware *firmware; + struct device *dev = context; + int ret; + + if (!fw) { + dev_err(dev, "firmware not found\n"); + return; + } + + firmware = (struct qe_firmware *) fw->data; + + if (firmware->header.length != fw->size) { + dev_err(dev, "invalid firmware\n"); + goto out; + } + + ret = qe_upload_firmware(firmware); + if (ret) { + dev_err(dev, "could not load firmware\n"); + goto out; + } + + firmware_loaded = 1; + out: + release_firmware(fw); +} + +static int soft_uart_init(struct platform_device *ofdev) +{ + struct device_node *np = ofdev->dev.of_node; + struct qe_firmware_info *qe_fw_info; + int ret; + + if (of_find_property(np, "soft-uart", NULL)) { + dev_dbg(&ofdev->dev, "using Soft-UART mode\n"); + soft_uart = 1; + } else { + return 0; + } + + qe_fw_info = qe_get_firmware_info(); + + /* Check if the firmware has been uploaded. */ + if (qe_fw_info && strstr(qe_fw_info->id, "Soft-UART")) { + firmware_loaded = 1; + } else { + char filename[32]; + unsigned int soc; + unsigned int rev_h; + unsigned int rev_l; + + soc = soc_info(&rev_h, &rev_l); + if (!soc) { + dev_err(&ofdev->dev, "unknown CPU model\n"); + return -ENXIO; + } + sprintf(filename, "fsl_qe_ucode_uart_%u_%u%u.bin", + soc, rev_h, rev_l); + + dev_info(&ofdev->dev, "waiting for firmware %s\n", + filename); + + /* + * We call request_firmware_nowait instead of + * request_firmware so that the driver can load and + * initialize the ports without holding up the rest of + * the kernel. If hotplug support is enabled in the + * kernel, then we use it. + */ + ret = request_firmware_nowait(THIS_MODULE, + FW_ACTION_UEVENT, filename, &ofdev->dev, + GFP_KERNEL, &ofdev->dev, uart_firmware_cont); + if (ret) { + dev_err(&ofdev->dev, + "could not load firmware %s\n", + filename); + return ret; + } + } + return 0; +} + +#else /* !CONFIG_PPC32 */ + +static int soft_uart_init(struct platform_device *ofdev) +{ + return 0; +} + +#endif + + +static int ucc_uart_probe(struct platform_device *ofdev) +{ + struct device_node *np = ofdev->dev.of_node; + const char *sprop; /* String OF properties */ + struct uart_qe_port *qe_port = NULL; + struct resource res; + u32 val; + int ret; + + /* + * Determine if we need Soft-UART mode + */ + ret = soft_uart_init(ofdev); + if (ret) + return ret; + + qe_port = kzalloc(sizeof(struct uart_qe_port), GFP_KERNEL); + if (!qe_port) { + dev_err(&ofdev->dev, "can't allocate QE port structure\n"); + return -ENOMEM; + } + + /* Search for IRQ and mapbase */ + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(&ofdev->dev, "missing 'reg' property in device tree\n"); + goto out_free; + } + if (!res.start) { + dev_err(&ofdev->dev, "invalid 'reg' property in device tree\n"); + ret = -EINVAL; + goto out_free; + } + qe_port->port.mapbase = res.start; + + /* Get the UCC number (device ID) */ + /* UCCs are numbered 1-7 */ + if (of_property_read_u32(np, "cell-index", &val)) { + if (of_property_read_u32(np, "device-id", &val)) { + dev_err(&ofdev->dev, "UCC is unspecified in device tree\n"); + ret = -EINVAL; + goto out_free; + } + } + + if (val < 1 || val > UCC_MAX_NUM) { + dev_err(&ofdev->dev, "no support for UCC%u\n", val); + ret = -ENODEV; + goto out_free; + } + qe_port->ucc_num = val - 1; + + /* + * In the future, we should not require the BRG to be specified in the + * device tree. If no clock-source is specified, then just pick a BRG + * to use. This requires a new QE library function that manages BRG + * assignments. + */ + + sprop = of_get_property(np, "rx-clock-name", NULL); + if (!sprop) { + dev_err(&ofdev->dev, "missing rx-clock-name in device tree\n"); + ret = -ENODEV; + goto out_free; + } + + qe_port->us_info.rx_clock = qe_clock_source(sprop); + if ((qe_port->us_info.rx_clock < QE_BRG1) || + (qe_port->us_info.rx_clock > QE_BRG16)) { + dev_err(&ofdev->dev, "rx-clock-name must be a BRG for UART\n"); + ret = -ENODEV; + goto out_free; + } + +#ifdef LOOPBACK + /* In internal loopback mode, TX and RX must use the same clock */ + qe_port->us_info.tx_clock = qe_port->us_info.rx_clock; +#else + sprop = of_get_property(np, "tx-clock-name", NULL); + if (!sprop) { + dev_err(&ofdev->dev, "missing tx-clock-name in device tree\n"); + ret = -ENODEV; + goto out_free; + } + qe_port->us_info.tx_clock = qe_clock_source(sprop); +#endif + if ((qe_port->us_info.tx_clock < QE_BRG1) || + (qe_port->us_info.tx_clock > QE_BRG16)) { + dev_err(&ofdev->dev, "tx-clock-name must be a BRG for UART\n"); + ret = -ENODEV; + goto out_free; + } + + /* Get the port number, numbered 0-3 */ + if (of_property_read_u32(np, "port-number", &val)) { + dev_err(&ofdev->dev, "missing port-number in device tree\n"); + ret = -EINVAL; + goto out_free; + } + qe_port->port.line = val; + if (qe_port->port.line >= UCC_MAX_UART) { + dev_err(&ofdev->dev, "port-number must be 0-%u\n", + UCC_MAX_UART - 1); + ret = -EINVAL; + goto out_free; + } + + qe_port->port.irq = irq_of_parse_and_map(np, 0); + if (qe_port->port.irq == 0) { + dev_err(&ofdev->dev, "could not map IRQ for UCC%u\n", + qe_port->ucc_num + 1); + ret = -EINVAL; + goto out_free; + } + + /* + * Newer device trees have an "fsl,qe" compatible property for the QE + * node, but we still need to support older device trees. + */ + np = of_find_compatible_node(NULL, NULL, "fsl,qe"); + if (!np) { + np = of_find_node_by_type(NULL, "qe"); + if (!np) { + dev_err(&ofdev->dev, "could not find 'qe' node\n"); + ret = -EINVAL; + goto out_free; + } + } + + if (of_property_read_u32(np, "brg-frequency", &val)) { + dev_err(&ofdev->dev, + "missing brg-frequency in device tree\n"); + ret = -EINVAL; + goto out_np; + } + + if (val) + qe_port->port.uartclk = val; + else { + if (!IS_ENABLED(CONFIG_PPC32)) { + dev_err(&ofdev->dev, + "invalid brg-frequency in device tree\n"); + ret = -EINVAL; + goto out_np; + } + + /* + * Older versions of U-Boot do not initialize the brg-frequency + * property, so in this case we assume the BRG frequency is + * half the QE bus frequency. + */ + if (of_property_read_u32(np, "bus-frequency", &val)) { + dev_err(&ofdev->dev, + "missing QE bus-frequency in device tree\n"); + ret = -EINVAL; + goto out_np; + } + if (val) + qe_port->port.uartclk = val / 2; + else { + dev_err(&ofdev->dev, + "invalid QE bus-frequency in device tree\n"); + ret = -EINVAL; + goto out_np; + } + } + + spin_lock_init(&qe_port->port.lock); + qe_port->np = np; + qe_port->port.dev = &ofdev->dev; + qe_port->port.ops = &qe_uart_pops; + qe_port->port.iotype = UPIO_MEM; + + qe_port->tx_nrfifos = TX_NUM_FIFO; + qe_port->tx_fifosize = TX_BUF_SIZE; + qe_port->rx_nrfifos = RX_NUM_FIFO; + qe_port->rx_fifosize = RX_BUF_SIZE; + + qe_port->wait_closing = UCC_WAIT_CLOSING; + qe_port->port.fifosize = 512; + qe_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP; + + qe_port->us_info.ucc_num = qe_port->ucc_num; + qe_port->us_info.regs = (phys_addr_t) res.start; + qe_port->us_info.irq = qe_port->port.irq; + + qe_port->us_info.rx_bd_ring_len = qe_port->rx_nrfifos; + qe_port->us_info.tx_bd_ring_len = qe_port->tx_nrfifos; + + /* Make sure ucc_slow_init() initializes both TX and RX */ + qe_port->us_info.init_tx = 1; + qe_port->us_info.init_rx = 1; + + /* Add the port to the uart sub-system. This will cause + * qe_uart_config_port() to be called, so the us_info structure must + * be initialized. + */ + ret = uart_add_one_port(&ucc_uart_driver, &qe_port->port); + if (ret) { + dev_err(&ofdev->dev, "could not add /dev/ttyQE%u\n", + qe_port->port.line); + goto out_np; + } + + platform_set_drvdata(ofdev, qe_port); + + dev_info(&ofdev->dev, "UCC%u assigned to /dev/ttyQE%u\n", + qe_port->ucc_num + 1, qe_port->port.line); + + /* Display the mknod command for this device */ + dev_dbg(&ofdev->dev, "mknod command is 'mknod /dev/ttyQE%u c %u %u'\n", + qe_port->port.line, SERIAL_QE_MAJOR, + SERIAL_QE_MINOR + qe_port->port.line); + + return 0; +out_np: + of_node_put(np); +out_free: + kfree(qe_port); + return ret; +} + +static int ucc_uart_remove(struct platform_device *ofdev) +{ + struct uart_qe_port *qe_port = platform_get_drvdata(ofdev); + + dev_info(&ofdev->dev, "removing /dev/ttyQE%u\n", qe_port->port.line); + + uart_remove_one_port(&ucc_uart_driver, &qe_port->port); + + kfree(qe_port); + + return 0; +} + +static const struct of_device_id ucc_uart_match[] = { + { + .type = "serial", + .compatible = "ucc_uart", + }, + { + .compatible = "fsl,t1040-ucc-uart", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, ucc_uart_match); + +static struct platform_driver ucc_uart_of_driver = { + .driver = { + .name = "ucc_uart", + .of_match_table = ucc_uart_match, + }, + .probe = ucc_uart_probe, + .remove = ucc_uart_remove, +}; + +static int __init ucc_uart_init(void) +{ + int ret; + + printk(KERN_INFO "Freescale QUICC Engine UART device driver\n"); +#ifdef LOOPBACK + printk(KERN_INFO "ucc-uart: Using loopback mode\n"); +#endif + + ret = uart_register_driver(&ucc_uart_driver); + if (ret) { + printk(KERN_ERR "ucc-uart: could not register UART driver\n"); + return ret; + } + + ret = platform_driver_register(&ucc_uart_of_driver); + if (ret) { + printk(KERN_ERR + "ucc-uart: could not register platform driver\n"); + uart_unregister_driver(&ucc_uart_driver); + } + + return ret; +} + +static void __exit ucc_uart_exit(void) +{ + printk(KERN_INFO + "Freescale QUICC Engine UART device driver unloading\n"); + + platform_driver_unregister(&ucc_uart_of_driver); + uart_unregister_driver(&ucc_uart_driver); +} + +module_init(ucc_uart_init); +module_exit(ucc_uart_exit); + +MODULE_DESCRIPTION("Freescale QUICC Engine (QE) UART"); +MODULE_AUTHOR("Timur Tabi "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_QE_MAJOR); + diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c new file mode 100644 index 000000000..10fbdb099 --- /dev/null +++ b/drivers/tty/serial/vt8500_serial.c @@ -0,0 +1,745 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2010 Alexey Charkov + * + * Based on msm_serial.c, which is: + * Copyright (C) 2007 Google, Inc. + * Author: Robert Love + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * UART Register offsets + */ + +#define VT8500_URTDR 0x0000 /* Transmit data */ +#define VT8500_URRDR 0x0004 /* Receive data */ +#define VT8500_URDIV 0x0008 /* Clock/Baud rate divisor */ +#define VT8500_URLCR 0x000C /* Line control */ +#define VT8500_URICR 0x0010 /* IrDA control */ +#define VT8500_URIER 0x0014 /* Interrupt enable */ +#define VT8500_URISR 0x0018 /* Interrupt status */ +#define VT8500_URUSR 0x001c /* UART status */ +#define VT8500_URFCR 0x0020 /* FIFO control */ +#define VT8500_URFIDX 0x0024 /* FIFO index */ +#define VT8500_URBKR 0x0028 /* Break signal count */ +#define VT8500_URTOD 0x002c /* Time out divisor */ +#define VT8500_TXFIFO 0x1000 /* Transmit FIFO (16x8) */ +#define VT8500_RXFIFO 0x1020 /* Receive FIFO (16x10) */ + +/* + * Interrupt enable and status bits + */ + +#define TXDE (1 << 0) /* Tx Data empty */ +#define RXDF (1 << 1) /* Rx Data full */ +#define TXFAE (1 << 2) /* Tx FIFO almost empty */ +#define TXFE (1 << 3) /* Tx FIFO empty */ +#define RXFAF (1 << 4) /* Rx FIFO almost full */ +#define RXFF (1 << 5) /* Rx FIFO full */ +#define TXUDR (1 << 6) /* Tx underrun */ +#define RXOVER (1 << 7) /* Rx overrun */ +#define PER (1 << 8) /* Parity error */ +#define FER (1 << 9) /* Frame error */ +#define TCTS (1 << 10) /* Toggle of CTS */ +#define RXTOUT (1 << 11) /* Rx timeout */ +#define BKDONE (1 << 12) /* Break signal done */ +#define ERR (1 << 13) /* AHB error response */ + +#define RX_FIFO_INTS (RXFAF | RXFF | RXOVER | PER | FER | RXTOUT) +#define TX_FIFO_INTS (TXFAE | TXFE | TXUDR) + +/* + * Line control bits + */ + +#define VT8500_TXEN (1 << 0) /* Enable transmit logic */ +#define VT8500_RXEN (1 << 1) /* Enable receive logic */ +#define VT8500_CS8 (1 << 2) /* 8-bit data length (vs. 7-bit) */ +#define VT8500_CSTOPB (1 << 3) /* 2 stop bits (vs. 1) */ +#define VT8500_PARENB (1 << 4) /* Enable parity */ +#define VT8500_PARODD (1 << 5) /* Odd parity (vs. even) */ +#define VT8500_RTS (1 << 6) /* Ready to send */ +#define VT8500_LOOPBK (1 << 7) /* Enable internal loopback */ +#define VT8500_DMA (1 << 8) /* Enable DMA mode (needs FIFO) */ +#define VT8500_BREAK (1 << 9) /* Initiate break signal */ +#define VT8500_PSLVERR (1 << 10) /* APB error upon empty RX FIFO read */ +#define VT8500_SWRTSCTS (1 << 11) /* Software-controlled RTS/CTS */ + +/* + * Capability flags (driver-internal) + */ + +#define VT8500_HAS_SWRTSCTS_SWITCH (1 << 1) + +#define VT8500_RECOMMENDED_CLK 12000000 +#define VT8500_OVERSAMPLING_DIVISOR 13 +#define VT8500_MAX_PORTS 6 + +struct vt8500_port { + struct uart_port uart; + char name[16]; + struct clk *clk; + unsigned int clk_predivisor; + unsigned int ier; + unsigned int vt8500_uart_flags; +}; + +/* + * we use this variable to keep track of which ports + * have been allocated as we can't use pdev->id in + * devicetree + */ +static DECLARE_BITMAP(vt8500_ports_in_use, VT8500_MAX_PORTS); + +static inline void vt8500_write(struct uart_port *port, unsigned int val, + unsigned int off) +{ + writel(val, port->membase + off); +} + +static inline unsigned int vt8500_read(struct uart_port *port, unsigned int off) +{ + return readl(port->membase + off); +} + +static void vt8500_stop_tx(struct uart_port *port) +{ + struct vt8500_port *vt8500_port = container_of(port, + struct vt8500_port, + uart); + + vt8500_port->ier &= ~TX_FIFO_INTS; + vt8500_write(port, vt8500_port->ier, VT8500_URIER); +} + +static void vt8500_stop_rx(struct uart_port *port) +{ + struct vt8500_port *vt8500_port = container_of(port, + struct vt8500_port, + uart); + + vt8500_port->ier &= ~RX_FIFO_INTS; + vt8500_write(port, vt8500_port->ier, VT8500_URIER); +} + +static void vt8500_enable_ms(struct uart_port *port) +{ + struct vt8500_port *vt8500_port = container_of(port, + struct vt8500_port, + uart); + + vt8500_port->ier |= TCTS; + vt8500_write(port, vt8500_port->ier, VT8500_URIER); +} + +static void handle_rx(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + + /* + * Handle overrun + */ + if ((vt8500_read(port, VT8500_URISR) & RXOVER)) { + port->icount.overrun++; + tty_insert_flip_char(tport, 0, TTY_OVERRUN); + } + + /* and now the main RX loop */ + while (vt8500_read(port, VT8500_URFIDX) & 0x1f00) { + unsigned int c; + char flag = TTY_NORMAL; + + c = readw(port->membase + VT8500_RXFIFO) & 0x3ff; + + /* Mask conditions we're ignorning. */ + c &= ~port->read_status_mask; + + if (c & FER) { + port->icount.frame++; + flag = TTY_FRAME; + } else if (c & PER) { + port->icount.parity++; + flag = TTY_PARITY; + } + port->icount.rx++; + + if (!uart_handle_sysrq_char(port, c)) + tty_insert_flip_char(tport, c, flag); + } + + tty_flip_buffer_push(tport); +} + +static unsigned int vt8500_tx_empty(struct uart_port *port) +{ + unsigned int idx = vt8500_read(port, VT8500_URFIDX) & 0x1f; + + return idx < 16 ? TIOCSER_TEMT : 0; +} + +static void handle_tx(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + + if (port->x_char) { + writeb(port->x_char, port->membase + VT8500_TXFIFO); + port->icount.tx++; + port->x_char = 0; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + vt8500_stop_tx(port); + return; + } + + while (vt8500_tx_empty(port)) { + if (uart_circ_empty(xmit)) + break; + + writeb(xmit->buf[xmit->tail], port->membase + VT8500_TXFIFO); + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + vt8500_stop_tx(port); +} + +static void vt8500_start_tx(struct uart_port *port) +{ + struct vt8500_port *vt8500_port = container_of(port, + struct vt8500_port, + uart); + + vt8500_port->ier &= ~TX_FIFO_INTS; + vt8500_write(port, vt8500_port->ier, VT8500_URIER); + handle_tx(port); + vt8500_port->ier |= TX_FIFO_INTS; + vt8500_write(port, vt8500_port->ier, VT8500_URIER); +} + +static void handle_delta_cts(struct uart_port *port) +{ + port->icount.cts++; + wake_up_interruptible(&port->state->port.delta_msr_wait); +} + +static irqreturn_t vt8500_irq(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + unsigned long isr; + + spin_lock(&port->lock); + isr = vt8500_read(port, VT8500_URISR); + + /* Acknowledge active status bits */ + vt8500_write(port, isr, VT8500_URISR); + + if (isr & RX_FIFO_INTS) + handle_rx(port); + if (isr & TX_FIFO_INTS) + handle_tx(port); + if (isr & TCTS) + handle_delta_cts(port); + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static unsigned int vt8500_get_mctrl(struct uart_port *port) +{ + unsigned int usr; + + usr = vt8500_read(port, VT8500_URUSR); + if (usr & (1 << 4)) + return TIOCM_CTS; + else + return 0; +} + +static void vt8500_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + unsigned int lcr = vt8500_read(port, VT8500_URLCR); + + if (mctrl & TIOCM_RTS) + lcr |= VT8500_RTS; + else + lcr &= ~VT8500_RTS; + + vt8500_write(port, lcr, VT8500_URLCR); +} + +static void vt8500_break_ctl(struct uart_port *port, int break_ctl) +{ + if (break_ctl) + vt8500_write(port, + vt8500_read(port, VT8500_URLCR) | VT8500_BREAK, + VT8500_URLCR); +} + +static int vt8500_set_baud_rate(struct uart_port *port, unsigned int baud) +{ + struct vt8500_port *vt8500_port = + container_of(port, struct vt8500_port, uart); + unsigned long div; + unsigned int loops = 1000; + + div = ((vt8500_port->clk_predivisor - 1) & 0xf) << 16; + div |= (uart_get_divisor(port, baud) - 1) & 0x3ff; + + /* Effective baud rate */ + baud = port->uartclk / 16 / ((div & 0x3ff) + 1); + + while ((vt8500_read(port, VT8500_URUSR) & (1 << 5)) && --loops) + cpu_relax(); + + vt8500_write(port, div, VT8500_URDIV); + + /* Break signal timing depends on baud rate, update accordingly */ + vt8500_write(port, mult_frac(baud, 4096, 1000000), VT8500_URBKR); + + return baud; +} + +static int vt8500_startup(struct uart_port *port) +{ + struct vt8500_port *vt8500_port = + container_of(port, struct vt8500_port, uart); + int ret; + + snprintf(vt8500_port->name, sizeof(vt8500_port->name), + "vt8500_serial%d", port->line); + + ret = request_irq(port->irq, vt8500_irq, IRQF_TRIGGER_HIGH, + vt8500_port->name, port); + if (unlikely(ret)) + return ret; + + vt8500_write(port, 0x03, VT8500_URLCR); /* enable TX & RX */ + + return 0; +} + +static void vt8500_shutdown(struct uart_port *port) +{ + struct vt8500_port *vt8500_port = + container_of(port, struct vt8500_port, uart); + + vt8500_port->ier = 0; + + /* disable interrupts and FIFOs */ + vt8500_write(&vt8500_port->uart, 0, VT8500_URIER); + vt8500_write(&vt8500_port->uart, 0x880, VT8500_URFCR); + free_irq(port->irq, port); +} + +static void vt8500_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + struct vt8500_port *vt8500_port = + container_of(port, struct vt8500_port, uart); + unsigned long flags; + unsigned int baud, lcr; + unsigned int loops = 1000; + + spin_lock_irqsave(&port->lock, flags); + + /* calculate and set baud rate */ + baud = uart_get_baud_rate(port, termios, old, 900, 921600); + baud = vt8500_set_baud_rate(port, baud); + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); + + /* calculate parity */ + lcr = vt8500_read(&vt8500_port->uart, VT8500_URLCR); + lcr &= ~(VT8500_PARENB | VT8500_PARODD); + if (termios->c_cflag & PARENB) { + lcr |= VT8500_PARENB; + termios->c_cflag &= ~CMSPAR; + if (termios->c_cflag & PARODD) + lcr |= VT8500_PARODD; + } + + /* calculate bits per char */ + lcr &= ~VT8500_CS8; + switch (termios->c_cflag & CSIZE) { + case CS7: + break; + case CS8: + default: + lcr |= VT8500_CS8; + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + break; + } + + /* calculate stop bits */ + lcr &= ~VT8500_CSTOPB; + if (termios->c_cflag & CSTOPB) + lcr |= VT8500_CSTOPB; + + lcr &= ~VT8500_SWRTSCTS; + if (vt8500_port->vt8500_uart_flags & VT8500_HAS_SWRTSCTS_SWITCH) + lcr |= VT8500_SWRTSCTS; + + /* set parity, bits per char, and stop bit */ + vt8500_write(&vt8500_port->uart, lcr, VT8500_URLCR); + + /* Configure status bits to ignore based on termio flags. */ + port->read_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->read_status_mask = FER | PER; + + uart_update_timeout(port, termios->c_cflag, baud); + + /* Reset FIFOs */ + vt8500_write(&vt8500_port->uart, 0x88c, VT8500_URFCR); + while ((vt8500_read(&vt8500_port->uart, VT8500_URFCR) & 0xc) + && --loops) + cpu_relax(); + + /* Every possible FIFO-related interrupt */ + vt8500_port->ier = RX_FIFO_INTS | TX_FIFO_INTS; + + /* + * CTS flow control + */ + if (UART_ENABLE_MS(&vt8500_port->uart, termios->c_cflag)) + vt8500_port->ier |= TCTS; + + vt8500_write(&vt8500_port->uart, 0x881, VT8500_URFCR); + vt8500_write(&vt8500_port->uart, vt8500_port->ier, VT8500_URIER); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *vt8500_type(struct uart_port *port) +{ + struct vt8500_port *vt8500_port = + container_of(port, struct vt8500_port, uart); + return vt8500_port->name; +} + +static void vt8500_release_port(struct uart_port *port) +{ +} + +static int vt8500_request_port(struct uart_port *port) +{ + return 0; +} + +static void vt8500_config_port(struct uart_port *port, int flags) +{ + port->type = PORT_VT8500; +} + +static int vt8500_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_VT8500)) + return -EINVAL; + if (unlikely(port->irq != ser->irq)) + return -EINVAL; + return 0; +} + +static struct vt8500_port *vt8500_uart_ports[VT8500_MAX_PORTS]; +static struct uart_driver vt8500_uart_driver; + +#ifdef CONFIG_SERIAL_VT8500_CONSOLE + +static void wait_for_xmitr(struct uart_port *port) +{ + unsigned int status, tmout = 10000; + + /* Wait up to 10ms for the character(s) to be sent. */ + do { + status = vt8500_read(port, VT8500_URFIDX); + + if (--tmout == 0) + break; + udelay(1); + } while (status & 0x10); +} + +static void vt8500_console_putchar(struct uart_port *port, unsigned char c) +{ + wait_for_xmitr(port); + writeb(c, port->membase + VT8500_TXFIFO); +} + +static void vt8500_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct vt8500_port *vt8500_port = vt8500_uart_ports[co->index]; + unsigned long ier; + + BUG_ON(co->index < 0 || co->index >= vt8500_uart_driver.nr); + + ier = vt8500_read(&vt8500_port->uart, VT8500_URIER); + vt8500_write(&vt8500_port->uart, VT8500_URIER, 0); + + uart_console_write(&vt8500_port->uart, s, count, + vt8500_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and switch back to FIFO + */ + wait_for_xmitr(&vt8500_port->uart); + vt8500_write(&vt8500_port->uart, VT8500_URIER, ier); +} + +static int __init vt8500_console_setup(struct console *co, char *options) +{ + struct vt8500_port *vt8500_port; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (unlikely(co->index >= vt8500_uart_driver.nr || co->index < 0)) + return -ENXIO; + + vt8500_port = vt8500_uart_ports[co->index]; + + if (!vt8500_port) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&vt8500_port->uart, + co, baud, parity, bits, flow); +} + +static struct console vt8500_console = { + .name = "ttyWMT", + .write = vt8500_console_write, + .device = uart_console_device, + .setup = vt8500_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &vt8500_uart_driver, +}; + +#define VT8500_CONSOLE (&vt8500_console) + +#else +#define VT8500_CONSOLE NULL +#endif + +#ifdef CONFIG_CONSOLE_POLL +static int vt8500_get_poll_char(struct uart_port *port) +{ + unsigned int status = vt8500_read(port, VT8500_URFIDX); + + if (!(status & 0x1f00)) + return NO_POLL_CHAR; + + return vt8500_read(port, VT8500_RXFIFO) & 0xff; +} + +static void vt8500_put_poll_char(struct uart_port *port, unsigned char c) +{ + unsigned int status, tmout = 10000; + + do { + status = vt8500_read(port, VT8500_URFIDX); + + if (--tmout == 0) + break; + udelay(1); + } while (status & 0x10); + + vt8500_write(port, c, VT8500_TXFIFO); +} +#endif + +static const struct uart_ops vt8500_uart_pops = { + .tx_empty = vt8500_tx_empty, + .set_mctrl = vt8500_set_mctrl, + .get_mctrl = vt8500_get_mctrl, + .stop_tx = vt8500_stop_tx, + .start_tx = vt8500_start_tx, + .stop_rx = vt8500_stop_rx, + .enable_ms = vt8500_enable_ms, + .break_ctl = vt8500_break_ctl, + .startup = vt8500_startup, + .shutdown = vt8500_shutdown, + .set_termios = vt8500_set_termios, + .type = vt8500_type, + .release_port = vt8500_release_port, + .request_port = vt8500_request_port, + .config_port = vt8500_config_port, + .verify_port = vt8500_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = vt8500_get_poll_char, + .poll_put_char = vt8500_put_poll_char, +#endif +}; + +static struct uart_driver vt8500_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "vt8500_serial", + .dev_name = "ttyWMT", + .nr = 6, + .cons = VT8500_CONSOLE, +}; + +static unsigned int vt8500_flags; /* none required so far */ +static unsigned int wm8880_flags = VT8500_HAS_SWRTSCTS_SWITCH; + +static const struct of_device_id wmt_dt_ids[] = { + { .compatible = "via,vt8500-uart", .data = &vt8500_flags}, + { .compatible = "wm,wm8880-uart", .data = &wm8880_flags}, + {} +}; + +static int vt8500_serial_probe(struct platform_device *pdev) +{ + struct vt8500_port *vt8500_port; + struct resource *mmres; + struct device_node *np = pdev->dev.of_node; + const unsigned int *flags; + int ret; + int port; + int irq; + + flags = of_device_get_match_data(&pdev->dev); + if (!flags) + return -EINVAL; + + mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mmres) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + if (np) { + port = of_alias_get_id(np, "serial"); + if (port >= VT8500_MAX_PORTS) + port = -1; + } else { + port = -1; + } + + if (port < 0) { + /* calculate the port id */ + port = find_first_zero_bit(vt8500_ports_in_use, + VT8500_MAX_PORTS); + } + + if (port >= VT8500_MAX_PORTS) + return -ENODEV; + + /* reserve the port id */ + if (test_and_set_bit(port, vt8500_ports_in_use)) { + /* port already in use - shouldn't really happen */ + return -EBUSY; + } + + vt8500_port = devm_kzalloc(&pdev->dev, sizeof(struct vt8500_port), + GFP_KERNEL); + if (!vt8500_port) + return -ENOMEM; + + vt8500_port->uart.membase = devm_ioremap_resource(&pdev->dev, mmres); + if (IS_ERR(vt8500_port->uart.membase)) + return PTR_ERR(vt8500_port->uart.membase); + + vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0); + if (IS_ERR(vt8500_port->clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + return -EINVAL; + } + + ret = clk_prepare_enable(vt8500_port->clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable clock\n"); + return ret; + } + + vt8500_port->vt8500_uart_flags = *flags; + vt8500_port->clk_predivisor = DIV_ROUND_CLOSEST( + clk_get_rate(vt8500_port->clk), + VT8500_RECOMMENDED_CLK + ); + vt8500_port->uart.type = PORT_VT8500; + vt8500_port->uart.iotype = UPIO_MEM; + vt8500_port->uart.mapbase = mmres->start; + vt8500_port->uart.irq = irq; + vt8500_port->uart.fifosize = 16; + vt8500_port->uart.ops = &vt8500_uart_pops; + vt8500_port->uart.line = port; + vt8500_port->uart.dev = &pdev->dev; + vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; + vt8500_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_VT8500_CONSOLE); + + /* Serial core uses the magic "16" everywhere - adjust for it */ + vt8500_port->uart.uartclk = 16 * clk_get_rate(vt8500_port->clk) / + vt8500_port->clk_predivisor / + VT8500_OVERSAMPLING_DIVISOR; + + snprintf(vt8500_port->name, sizeof(vt8500_port->name), + "VT8500 UART%d", pdev->id); + + vt8500_uart_ports[port] = vt8500_port; + + uart_add_one_port(&vt8500_uart_driver, &vt8500_port->uart); + + platform_set_drvdata(pdev, vt8500_port); + + return 0; +} + +static struct platform_driver vt8500_platform_driver = { + .probe = vt8500_serial_probe, + .driver = { + .name = "vt8500_serial", + .of_match_table = wmt_dt_ids, + .suppress_bind_attrs = true, + }, +}; + +static int __init vt8500_serial_init(void) +{ + int ret; + + ret = uart_register_driver(&vt8500_uart_driver); + if (unlikely(ret)) + return ret; + + ret = platform_driver_register(&vt8500_platform_driver); + + if (unlikely(ret)) + uart_unregister_driver(&vt8500_uart_driver); + + return ret; +} +device_initcall(vt8500_serial_init); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c new file mode 100644 index 000000000..2eff7cff5 --- /dev/null +++ b/drivers/tty/serial/xilinx_uartps.c @@ -0,0 +1,1729 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Cadence UART driver (found in Xilinx Zynq) + * + * Copyright (c) 2011 - 2014 Xilinx, Inc. + * + * This driver has originally been pushed by Xilinx using a Zynq-branding. This + * still shows in the naming of this file, the kconfig symbols and some symbols + * in the code. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CDNS_UART_TTY_NAME "ttyPS" +#define CDNS_UART_NAME "xuartps" +#define CDNS_UART_MAJOR 0 /* use dynamic node allocation */ +#define CDNS_UART_MINOR 0 /* works best with devtmpfs */ +#define CDNS_UART_NR_PORTS 16 +#define CDNS_UART_FIFO_SIZE 64 /* FIFO size */ +#define CDNS_UART_REGISTER_SPACE 0x1000 +#define TX_TIMEOUT 500000 + +/* Rx Trigger level */ +static int rx_trigger_level = 56; +module_param(rx_trigger_level, uint, 0444); +MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes"); + +/* Rx Timeout */ +static int rx_timeout = 10; +module_param(rx_timeout, uint, 0444); +MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255"); + +/* Register offsets for the UART. */ +#define CDNS_UART_CR 0x00 /* Control Register */ +#define CDNS_UART_MR 0x04 /* Mode Register */ +#define CDNS_UART_IER 0x08 /* Interrupt Enable */ +#define CDNS_UART_IDR 0x0C /* Interrupt Disable */ +#define CDNS_UART_IMR 0x10 /* Interrupt Mask */ +#define CDNS_UART_ISR 0x14 /* Interrupt Status */ +#define CDNS_UART_BAUDGEN 0x18 /* Baud Rate Generator */ +#define CDNS_UART_RXTOUT 0x1C /* RX Timeout */ +#define CDNS_UART_RXWM 0x20 /* RX FIFO Trigger Level */ +#define CDNS_UART_MODEMCR 0x24 /* Modem Control */ +#define CDNS_UART_MODEMSR 0x28 /* Modem Status */ +#define CDNS_UART_SR 0x2C /* Channel Status */ +#define CDNS_UART_FIFO 0x30 /* FIFO */ +#define CDNS_UART_BAUDDIV 0x34 /* Baud Rate Divider */ +#define CDNS_UART_FLOWDEL 0x38 /* Flow Delay */ +#define CDNS_UART_IRRX_PWIDTH 0x3C /* IR Min Received Pulse Width */ +#define CDNS_UART_IRTX_PWIDTH 0x40 /* IR Transmitted pulse Width */ +#define CDNS_UART_TXWM 0x44 /* TX FIFO Trigger Level */ +#define CDNS_UART_RXBS 0x48 /* RX FIFO byte status register */ + +/* Control Register Bit Definitions */ +#define CDNS_UART_CR_STOPBRK 0x00000100 /* Stop TX break */ +#define CDNS_UART_CR_STARTBRK 0x00000080 /* Set TX break */ +#define CDNS_UART_CR_TX_DIS 0x00000020 /* TX disabled. */ +#define CDNS_UART_CR_TX_EN 0x00000010 /* TX enabled */ +#define CDNS_UART_CR_RX_DIS 0x00000008 /* RX disabled. */ +#define CDNS_UART_CR_RX_EN 0x00000004 /* RX enabled */ +#define CDNS_UART_CR_TXRST 0x00000002 /* TX logic reset */ +#define CDNS_UART_CR_RXRST 0x00000001 /* RX logic reset */ +#define CDNS_UART_CR_RST_TO 0x00000040 /* Restart Timeout Counter */ +#define CDNS_UART_RXBS_PARITY 0x00000001 /* Parity error status */ +#define CDNS_UART_RXBS_FRAMING 0x00000002 /* Framing error status */ +#define CDNS_UART_RXBS_BRK 0x00000004 /* Overrun error status */ + +/* + * Mode Register: + * The mode register (MR) defines the mode of transfer as well as the data + * format. If this register is modified during transmission or reception, + * data validity cannot be guaranteed. + */ +#define CDNS_UART_MR_CLKSEL 0x00000001 /* Pre-scalar selection */ +#define CDNS_UART_MR_CHMODE_L_LOOP 0x00000200 /* Local loop back mode */ +#define CDNS_UART_MR_CHMODE_NORM 0x00000000 /* Normal mode */ +#define CDNS_UART_MR_CHMODE_MASK 0x00000300 /* Mask for mode bits */ + +#define CDNS_UART_MR_STOPMODE_2_BIT 0x00000080 /* 2 stop bits */ +#define CDNS_UART_MR_STOPMODE_1_BIT 0x00000000 /* 1 stop bit */ + +#define CDNS_UART_MR_PARITY_NONE 0x00000020 /* No parity mode */ +#define CDNS_UART_MR_PARITY_MARK 0x00000018 /* Mark parity mode */ +#define CDNS_UART_MR_PARITY_SPACE 0x00000010 /* Space parity mode */ +#define CDNS_UART_MR_PARITY_ODD 0x00000008 /* Odd parity mode */ +#define CDNS_UART_MR_PARITY_EVEN 0x00000000 /* Even parity mode */ + +#define CDNS_UART_MR_CHARLEN_6_BIT 0x00000006 /* 6 bits data */ +#define CDNS_UART_MR_CHARLEN_7_BIT 0x00000004 /* 7 bits data */ +#define CDNS_UART_MR_CHARLEN_8_BIT 0x00000000 /* 8 bits data */ + +/* + * Interrupt Registers: + * Interrupt control logic uses the interrupt enable register (IER) and the + * interrupt disable register (IDR) to set the value of the bits in the + * interrupt mask register (IMR). The IMR determines whether to pass an + * interrupt to the interrupt status register (ISR). + * Writing a 1 to IER Enables an interrupt, writing a 1 to IDR disables an + * interrupt. IMR and ISR are read only, and IER and IDR are write only. + * Reading either IER or IDR returns 0x00. + * All four registers have the same bit definitions. + */ +#define CDNS_UART_IXR_TOUT 0x00000100 /* RX Timeout error interrupt */ +#define CDNS_UART_IXR_PARITY 0x00000080 /* Parity error interrupt */ +#define CDNS_UART_IXR_FRAMING 0x00000040 /* Framing error interrupt */ +#define CDNS_UART_IXR_OVERRUN 0x00000020 /* Overrun error interrupt */ +#define CDNS_UART_IXR_TXFULL 0x00000010 /* TX FIFO Full interrupt */ +#define CDNS_UART_IXR_TXEMPTY 0x00000008 /* TX FIFO empty interrupt */ +#define CDNS_UART_ISR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt */ +#define CDNS_UART_IXR_RXTRIG 0x00000001 /* RX FIFO trigger interrupt */ +#define CDNS_UART_IXR_RXFULL 0x00000004 /* RX FIFO full interrupt. */ +#define CDNS_UART_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */ +#define CDNS_UART_IXR_RXMASK 0x000021e7 /* Valid RX bit mask */ + + /* + * Do not enable parity error interrupt for the following + * reason: When parity error interrupt is enabled, each Rx + * parity error always results in 2 events. The first one + * being parity error interrupt and the second one with a + * proper Rx interrupt with the incoming data. Disabling + * parity error interrupt ensures better handling of parity + * error events. With this change, for a parity error case, we + * get a Rx interrupt with parity error set in ISR register + * and we still handle parity errors in the desired way. + */ + +#define CDNS_UART_RX_IRQS (CDNS_UART_IXR_FRAMING | \ + CDNS_UART_IXR_OVERRUN | \ + CDNS_UART_IXR_RXTRIG | \ + CDNS_UART_IXR_TOUT) + +/* Goes in read_status_mask for break detection as the HW doesn't do it*/ +#define CDNS_UART_IXR_BRK 0x00002000 + +#define CDNS_UART_RXBS_SUPPORT BIT(1) +/* + * Modem Control register: + * The read/write Modem Control register controls the interface with the modem + * or data set, or a peripheral device emulating a modem. + */ +#define CDNS_UART_MODEMCR_FCM 0x00000020 /* Automatic flow control mode */ +#define CDNS_UART_MODEMCR_RTS 0x00000002 /* Request to send output control */ +#define CDNS_UART_MODEMCR_DTR 0x00000001 /* Data Terminal Ready */ + +/* + * Modem Status register: + * The read/write Modem Status register reports the interface with the modem + * or data set, or a peripheral device emulating a modem. + */ +#define CDNS_UART_MODEMSR_DCD BIT(7) /* Data Carrier Detect */ +#define CDNS_UART_MODEMSR_RI BIT(6) /* Ting Indicator */ +#define CDNS_UART_MODEMSR_DSR BIT(5) /* Data Set Ready */ +#define CDNS_UART_MODEMSR_CTS BIT(4) /* Clear To Send */ + +/* + * Channel Status Register: + * The channel status register (CSR) is provided to enable the control logic + * to monitor the status of bits in the channel interrupt status register, + * even if these are masked out by the interrupt mask register. + */ +#define CDNS_UART_SR_RXEMPTY 0x00000002 /* RX FIFO empty */ +#define CDNS_UART_SR_TXEMPTY 0x00000008 /* TX FIFO empty */ +#define CDNS_UART_SR_TXFULL 0x00000010 /* TX FIFO full */ +#define CDNS_UART_SR_RXTRIG 0x00000001 /* Rx Trigger */ +#define CDNS_UART_SR_TACTIVE 0x00000800 /* TX state machine active */ + +/* baud dividers min/max values */ +#define CDNS_UART_BDIV_MIN 4 +#define CDNS_UART_BDIV_MAX 255 +#define CDNS_UART_CD_MAX 65535 +#define UART_AUTOSUSPEND_TIMEOUT 3000 + +/** + * struct cdns_uart - device data + * @port: Pointer to the UART port + * @uartclk: Reference clock + * @pclk: APB clock + * @cdns_uart_driver: Pointer to UART driver + * @baud: Current baud rate + * @clk_rate_change_nb: Notifier block for clock changes + * @quirks: Flags for RXBS support. + * @cts_override: Modem control state override + */ +struct cdns_uart { + struct uart_port *port; + struct clk *uartclk; + struct clk *pclk; + struct uart_driver *cdns_uart_driver; + unsigned int baud; + struct notifier_block clk_rate_change_nb; + u32 quirks; + bool cts_override; +}; +struct cdns_platform_data { + u32 quirks; +}; +#define to_cdns_uart(_nb) container_of(_nb, struct cdns_uart, \ + clk_rate_change_nb) + +/** + * cdns_uart_handle_rx - Handle the received bytes along with Rx errors. + * @dev_id: Id of the UART port + * @isrstatus: The interrupt status register value as read + * Return: None + */ +static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus) +{ + struct uart_port *port = (struct uart_port *)dev_id; + struct cdns_uart *cdns_uart = port->private_data; + unsigned int data; + unsigned int rxbs_status = 0; + unsigned int status_mask; + unsigned int framerrprocessed = 0; + char status = TTY_NORMAL; + bool is_rxbs_support; + + is_rxbs_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT; + + while ((readl(port->membase + CDNS_UART_SR) & + CDNS_UART_SR_RXEMPTY) != CDNS_UART_SR_RXEMPTY) { + if (is_rxbs_support) + rxbs_status = readl(port->membase + CDNS_UART_RXBS); + data = readl(port->membase + CDNS_UART_FIFO); + port->icount.rx++; + /* + * There is no hardware break detection in Zynq, so we interpret + * framing error with all-zeros data as a break sequence. + * Most of the time, there's another non-zero byte at the + * end of the sequence. + */ + if (!is_rxbs_support && (isrstatus & CDNS_UART_IXR_FRAMING)) { + if (!data) { + port->read_status_mask |= CDNS_UART_IXR_BRK; + framerrprocessed = 1; + continue; + } + } + if (is_rxbs_support && (rxbs_status & CDNS_UART_RXBS_BRK)) { + port->icount.brk++; + status = TTY_BREAK; + if (uart_handle_break(port)) + continue; + } + + isrstatus &= port->read_status_mask; + isrstatus &= ~port->ignore_status_mask; + status_mask = port->read_status_mask; + status_mask &= ~port->ignore_status_mask; + + if (data && + (port->read_status_mask & CDNS_UART_IXR_BRK)) { + port->read_status_mask &= ~CDNS_UART_IXR_BRK; + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } + + if (uart_handle_sysrq_char(port, data)) + continue; + + if (is_rxbs_support) { + if ((rxbs_status & CDNS_UART_RXBS_PARITY) + && (status_mask & CDNS_UART_IXR_PARITY)) { + port->icount.parity++; + status = TTY_PARITY; + } + if ((rxbs_status & CDNS_UART_RXBS_FRAMING) + && (status_mask & CDNS_UART_IXR_PARITY)) { + port->icount.frame++; + status = TTY_FRAME; + } + } else { + if (isrstatus & CDNS_UART_IXR_PARITY) { + port->icount.parity++; + status = TTY_PARITY; + } + if ((isrstatus & CDNS_UART_IXR_FRAMING) && + !framerrprocessed) { + port->icount.frame++; + status = TTY_FRAME; + } + } + if (isrstatus & CDNS_UART_IXR_OVERRUN) { + port->icount.overrun++; + tty_insert_flip_char(&port->state->port, 0, + TTY_OVERRUN); + } + tty_insert_flip_char(&port->state->port, data, status); + isrstatus = 0; + } + + tty_flip_buffer_push(&port->state->port); +} + +/** + * cdns_uart_handle_tx - Handle the bytes to be Txed. + * @dev_id: Id of the UART port + * Return: None + */ +static void cdns_uart_handle_tx(void *dev_id) +{ + struct uart_port *port = (struct uart_port *)dev_id; + struct circ_buf *xmit = &port->state->xmit; + unsigned int numbytes; + + if (uart_circ_empty(xmit)) { + writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR); + return; + } + + numbytes = port->fifosize; + while (numbytes && !uart_circ_empty(xmit) && + !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) { + + writel(xmit->buf[xmit->tail], port->membase + CDNS_UART_FIFO); + + port->icount.tx++; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + numbytes--; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +/** + * cdns_uart_isr - Interrupt handler + * @irq: Irq number + * @dev_id: Id of the port + * + * Return: IRQHANDLED + */ +static irqreturn_t cdns_uart_isr(int irq, void *dev_id) +{ + struct uart_port *port = (struct uart_port *)dev_id; + unsigned int isrstatus; + + spin_lock(&port->lock); + + /* Read the interrupt status register to determine which + * interrupt(s) is/are active and clear them. + */ + isrstatus = readl(port->membase + CDNS_UART_ISR); + writel(isrstatus, port->membase + CDNS_UART_ISR); + + if (isrstatus & CDNS_UART_IXR_TXEMPTY) { + cdns_uart_handle_tx(dev_id); + isrstatus &= ~CDNS_UART_IXR_TXEMPTY; + } + + isrstatus &= port->read_status_mask; + isrstatus &= ~port->ignore_status_mask; + /* + * Skip RX processing if RX is disabled as RXEMPTY will never be set + * as read bytes will not be removed from the FIFO. + */ + if (isrstatus & CDNS_UART_IXR_RXMASK && + !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS)) + cdns_uart_handle_rx(dev_id, isrstatus); + + spin_unlock(&port->lock); + return IRQ_HANDLED; +} + +/** + * cdns_uart_calc_baud_divs - Calculate baud rate divisors + * @clk: UART module input clock + * @baud: Desired baud rate + * @rbdiv: BDIV value (return value) + * @rcd: CD value (return value) + * @div8: Value for clk_sel bit in mod (return value) + * Return: baud rate, requested baud when possible, or actual baud when there + * was too much error, zero if no valid divisors are found. + * + * Formula to obtain baud rate is + * baud_tx/rx rate = clk/CD * (BDIV + 1) + * input_clk = (Uart User Defined Clock or Apb Clock) + * depends on UCLKEN in MR Reg + * clk = input_clk or input_clk/8; + * depends on CLKS in MR reg + * CD and BDIV depends on values in + * baud rate generate register + * baud rate clock divisor register + */ +static unsigned int cdns_uart_calc_baud_divs(unsigned int clk, + unsigned int baud, u32 *rbdiv, u32 *rcd, int *div8) +{ + u32 cd, bdiv; + unsigned int calc_baud; + unsigned int bestbaud = 0; + unsigned int bauderror; + unsigned int besterror = ~0; + + if (baud < clk / ((CDNS_UART_BDIV_MAX + 1) * CDNS_UART_CD_MAX)) { + *div8 = 1; + clk /= 8; + } else { + *div8 = 0; + } + + for (bdiv = CDNS_UART_BDIV_MIN; bdiv <= CDNS_UART_BDIV_MAX; bdiv++) { + cd = DIV_ROUND_CLOSEST(clk, baud * (bdiv + 1)); + if (cd < 1 || cd > CDNS_UART_CD_MAX) + continue; + + calc_baud = clk / (cd * (bdiv + 1)); + + if (baud > calc_baud) + bauderror = baud - calc_baud; + else + bauderror = calc_baud - baud; + + if (besterror > bauderror) { + *rbdiv = bdiv; + *rcd = cd; + bestbaud = calc_baud; + besterror = bauderror; + } + } + /* use the values when percent error is acceptable */ + if (((besterror * 100) / baud) < 3) + bestbaud = baud; + + return bestbaud; +} + +/** + * cdns_uart_set_baud_rate - Calculate and set the baud rate + * @port: Handle to the uart port structure + * @baud: Baud rate to set + * Return: baud rate, requested baud when possible, or actual baud when there + * was too much error, zero if no valid divisors are found. + */ +static unsigned int cdns_uart_set_baud_rate(struct uart_port *port, + unsigned int baud) +{ + unsigned int calc_baud; + u32 cd = 0, bdiv = 0; + u32 mreg; + int div8; + struct cdns_uart *cdns_uart = port->private_data; + + calc_baud = cdns_uart_calc_baud_divs(port->uartclk, baud, &bdiv, &cd, + &div8); + + /* Write new divisors to hardware */ + mreg = readl(port->membase + CDNS_UART_MR); + if (div8) + mreg |= CDNS_UART_MR_CLKSEL; + else + mreg &= ~CDNS_UART_MR_CLKSEL; + writel(mreg, port->membase + CDNS_UART_MR); + writel(cd, port->membase + CDNS_UART_BAUDGEN); + writel(bdiv, port->membase + CDNS_UART_BAUDDIV); + cdns_uart->baud = baud; + + return calc_baud; +} + +#ifdef CONFIG_COMMON_CLK +/** + * cdns_uart_clk_notifier_cb - Clock notifier callback + * @nb: Notifier block + * @event: Notify event + * @data: Notifier data + * Return: NOTIFY_OK or NOTIFY_DONE on success, NOTIFY_BAD on error. + */ +static int cdns_uart_clk_notifier_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + u32 ctrl_reg; + struct uart_port *port; + int locked = 0; + struct clk_notifier_data *ndata = data; + struct cdns_uart *cdns_uart = to_cdns_uart(nb); + unsigned long flags; + + port = cdns_uart->port; + if (port->suspended) + return NOTIFY_OK; + + switch (event) { + case PRE_RATE_CHANGE: + { + u32 bdiv, cd; + int div8; + + /* + * Find out if current baud-rate can be achieved with new clock + * frequency. + */ + if (!cdns_uart_calc_baud_divs(ndata->new_rate, cdns_uart->baud, + &bdiv, &cd, &div8)) { + dev_warn(port->dev, "clock rate change rejected\n"); + return NOTIFY_BAD; + } + + spin_lock_irqsave(&cdns_uart->port->lock, flags); + + /* Disable the TX and RX to set baud rate */ + ctrl_reg = readl(port->membase + CDNS_UART_CR); + ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS; + writel(ctrl_reg, port->membase + CDNS_UART_CR); + + spin_unlock_irqrestore(&cdns_uart->port->lock, flags); + + return NOTIFY_OK; + } + case POST_RATE_CHANGE: + /* + * Set clk dividers to generate correct baud with new clock + * frequency. + */ + + spin_lock_irqsave(&cdns_uart->port->lock, flags); + + locked = 1; + port->uartclk = ndata->new_rate; + + cdns_uart->baud = cdns_uart_set_baud_rate(cdns_uart->port, + cdns_uart->baud); + fallthrough; + case ABORT_RATE_CHANGE: + if (!locked) + spin_lock_irqsave(&cdns_uart->port->lock, flags); + + /* Set TX/RX Reset */ + ctrl_reg = readl(port->membase + CDNS_UART_CR); + ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST; + writel(ctrl_reg, port->membase + CDNS_UART_CR); + + while (readl(port->membase + CDNS_UART_CR) & + (CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST)) + cpu_relax(); + + /* + * Clear the RX disable and TX disable bits and then set the TX + * enable bit and RX enable bit to enable the transmitter and + * receiver. + */ + writel(rx_timeout, port->membase + CDNS_UART_RXTOUT); + ctrl_reg = readl(port->membase + CDNS_UART_CR); + ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS); + ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN; + writel(ctrl_reg, port->membase + CDNS_UART_CR); + + spin_unlock_irqrestore(&cdns_uart->port->lock, flags); + + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} +#endif + +/** + * cdns_uart_start_tx - Start transmitting bytes + * @port: Handle to the uart port structure + */ +static void cdns_uart_start_tx(struct uart_port *port) +{ + unsigned int status; + + if (uart_tx_stopped(port)) + return; + + /* + * Set the TX enable bit and clear the TX disable bit to enable the + * transmitter. + */ + status = readl(port->membase + CDNS_UART_CR); + status &= ~CDNS_UART_CR_TX_DIS; + status |= CDNS_UART_CR_TX_EN; + writel(status, port->membase + CDNS_UART_CR); + + if (uart_circ_empty(&port->state->xmit)) + return; + + writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_ISR); + + cdns_uart_handle_tx(port); + + /* Enable the TX Empty interrupt */ + writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IER); +} + +/** + * cdns_uart_stop_tx - Stop TX + * @port: Handle to the uart port structure + */ +static void cdns_uart_stop_tx(struct uart_port *port) +{ + unsigned int regval; + + regval = readl(port->membase + CDNS_UART_CR); + regval |= CDNS_UART_CR_TX_DIS; + /* Disable the transmitter */ + writel(regval, port->membase + CDNS_UART_CR); +} + +/** + * cdns_uart_stop_rx - Stop RX + * @port: Handle to the uart port structure + */ +static void cdns_uart_stop_rx(struct uart_port *port) +{ + unsigned int regval; + + /* Disable RX IRQs */ + writel(CDNS_UART_RX_IRQS, port->membase + CDNS_UART_IDR); + + /* Disable the receiver */ + regval = readl(port->membase + CDNS_UART_CR); + regval |= CDNS_UART_CR_RX_DIS; + writel(regval, port->membase + CDNS_UART_CR); +} + +/** + * cdns_uart_tx_empty - Check whether TX is empty + * @port: Handle to the uart port structure + * + * Return: TIOCSER_TEMT on success, 0 otherwise + */ +static unsigned int cdns_uart_tx_empty(struct uart_port *port) +{ + unsigned int status; + + status = readl(port->membase + CDNS_UART_SR) & + (CDNS_UART_SR_TXEMPTY | CDNS_UART_SR_TACTIVE); + return (status == CDNS_UART_SR_TXEMPTY) ? TIOCSER_TEMT : 0; +} + +/** + * cdns_uart_break_ctl - Based on the input ctl we have to start or stop + * transmitting char breaks + * @port: Handle to the uart port structure + * @ctl: Value based on which start or stop decision is taken + */ +static void cdns_uart_break_ctl(struct uart_port *port, int ctl) +{ + unsigned int status; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + status = readl(port->membase + CDNS_UART_CR); + + if (ctl == -1) + writel(CDNS_UART_CR_STARTBRK | status, + port->membase + CDNS_UART_CR); + else { + if ((status & CDNS_UART_CR_STOPBRK) == 0) + writel(CDNS_UART_CR_STOPBRK | status, + port->membase + CDNS_UART_CR); + } + spin_unlock_irqrestore(&port->lock, flags); +} + +/** + * cdns_uart_set_termios - termios operations, handling data length, parity, + * stop bits, flow control, baud rate + * @port: Handle to the uart port structure + * @termios: Handle to the input termios structure + * @old: Values of the previously saved termios structure + */ +static void cdns_uart_set_termios(struct uart_port *port, + struct ktermios *termios, + const struct ktermios *old) +{ + u32 cval = 0; + unsigned int baud, minbaud, maxbaud; + unsigned long flags; + unsigned int ctrl_reg, mode_reg; + + spin_lock_irqsave(&port->lock, flags); + + /* Disable the TX and RX to set baud rate */ + ctrl_reg = readl(port->membase + CDNS_UART_CR); + ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS; + writel(ctrl_reg, port->membase + CDNS_UART_CR); + + /* + * Min baud rate = 6bps and Max Baud Rate is 10Mbps for 100Mhz clk + * min and max baud should be calculated here based on port->uartclk. + * this way we get a valid baud and can safely call set_baud() + */ + minbaud = port->uartclk / + ((CDNS_UART_BDIV_MAX + 1) * CDNS_UART_CD_MAX * 8); + maxbaud = port->uartclk / (CDNS_UART_BDIV_MIN + 1); + baud = uart_get_baud_rate(port, termios, old, minbaud, maxbaud); + baud = cdns_uart_set_baud_rate(port, baud); + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); + + /* Update the per-port timeout. */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* Set TX/RX Reset */ + ctrl_reg = readl(port->membase + CDNS_UART_CR); + ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST; + writel(ctrl_reg, port->membase + CDNS_UART_CR); + + while (readl(port->membase + CDNS_UART_CR) & + (CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST)) + cpu_relax(); + + /* + * Clear the RX disable and TX disable bits and then set the TX enable + * bit and RX enable bit to enable the transmitter and receiver. + */ + ctrl_reg = readl(port->membase + CDNS_UART_CR); + ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS); + ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN; + writel(ctrl_reg, port->membase + CDNS_UART_CR); + + writel(rx_timeout, port->membase + CDNS_UART_RXTOUT); + + port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG | + CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT; + port->ignore_status_mask = 0; + + if (termios->c_iflag & INPCK) + port->read_status_mask |= CDNS_UART_IXR_PARITY | + CDNS_UART_IXR_FRAMING; + + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= CDNS_UART_IXR_PARITY | + CDNS_UART_IXR_FRAMING | CDNS_UART_IXR_OVERRUN; + + /* ignore all characters if CREAD is not set */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= CDNS_UART_IXR_RXTRIG | + CDNS_UART_IXR_TOUT | CDNS_UART_IXR_PARITY | + CDNS_UART_IXR_FRAMING | CDNS_UART_IXR_OVERRUN; + + mode_reg = readl(port->membase + CDNS_UART_MR); + + /* Handling Data Size */ + switch (termios->c_cflag & CSIZE) { + case CS6: + cval |= CDNS_UART_MR_CHARLEN_6_BIT; + break; + case CS7: + cval |= CDNS_UART_MR_CHARLEN_7_BIT; + break; + default: + case CS8: + cval |= CDNS_UART_MR_CHARLEN_8_BIT; + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + break; + } + + /* Handling Parity and Stop Bits length */ + if (termios->c_cflag & CSTOPB) + cval |= CDNS_UART_MR_STOPMODE_2_BIT; /* 2 STOP bits */ + else + cval |= CDNS_UART_MR_STOPMODE_1_BIT; /* 1 STOP bit */ + + if (termios->c_cflag & PARENB) { + /* Mark or Space parity */ + if (termios->c_cflag & CMSPAR) { + if (termios->c_cflag & PARODD) + cval |= CDNS_UART_MR_PARITY_MARK; + else + cval |= CDNS_UART_MR_PARITY_SPACE; + } else { + if (termios->c_cflag & PARODD) + cval |= CDNS_UART_MR_PARITY_ODD; + else + cval |= CDNS_UART_MR_PARITY_EVEN; + } + } else { + cval |= CDNS_UART_MR_PARITY_NONE; + } + cval |= mode_reg & 1; + writel(cval, port->membase + CDNS_UART_MR); + + cval = readl(port->membase + CDNS_UART_MODEMCR); + if (termios->c_cflag & CRTSCTS) + cval |= CDNS_UART_MODEMCR_FCM; + else + cval &= ~CDNS_UART_MODEMCR_FCM; + writel(cval, port->membase + CDNS_UART_MODEMCR); + + spin_unlock_irqrestore(&port->lock, flags); +} + +/** + * cdns_uart_startup - Called when an application opens a cdns_uart port + * @port: Handle to the uart port structure + * + * Return: 0 on success, negative errno otherwise + */ +static int cdns_uart_startup(struct uart_port *port) +{ + struct cdns_uart *cdns_uart = port->private_data; + bool is_brk_support; + int ret; + unsigned long flags; + unsigned int status = 0; + + is_brk_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT; + + spin_lock_irqsave(&port->lock, flags); + + /* Disable the TX and RX */ + writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS, + port->membase + CDNS_UART_CR); + + /* Set the Control Register with TX/RX Enable, TX/RX Reset, + * no break chars. + */ + writel(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST, + port->membase + CDNS_UART_CR); + + while (readl(port->membase + CDNS_UART_CR) & + (CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST)) + cpu_relax(); + + /* + * Clear the RX disable bit and then set the RX enable bit to enable + * the receiver. + */ + status = readl(port->membase + CDNS_UART_CR); + status &= ~CDNS_UART_CR_RX_DIS; + status |= CDNS_UART_CR_RX_EN; + writel(status, port->membase + CDNS_UART_CR); + + /* Set the Mode Register with normal mode,8 data bits,1 stop bit, + * no parity. + */ + writel(CDNS_UART_MR_CHMODE_NORM | CDNS_UART_MR_STOPMODE_1_BIT + | CDNS_UART_MR_PARITY_NONE | CDNS_UART_MR_CHARLEN_8_BIT, + port->membase + CDNS_UART_MR); + + /* + * Set the RX FIFO Trigger level to use most of the FIFO, but it + * can be tuned with a module parameter + */ + writel(rx_trigger_level, port->membase + CDNS_UART_RXWM); + + /* + * Receive Timeout register is enabled but it + * can be tuned with a module parameter + */ + writel(rx_timeout, port->membase + CDNS_UART_RXTOUT); + + /* Clear out any pending interrupts before enabling them */ + writel(readl(port->membase + CDNS_UART_ISR), + port->membase + CDNS_UART_ISR); + + spin_unlock_irqrestore(&port->lock, flags); + + ret = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME, port); + if (ret) { + dev_err(port->dev, "request_irq '%d' failed with %d\n", + port->irq, ret); + return ret; + } + + /* Set the Interrupt Registers with desired interrupts */ + if (is_brk_support) + writel(CDNS_UART_RX_IRQS | CDNS_UART_IXR_BRK, + port->membase + CDNS_UART_IER); + else + writel(CDNS_UART_RX_IRQS, port->membase + CDNS_UART_IER); + + return 0; +} + +/** + * cdns_uart_shutdown - Called when an application closes a cdns_uart port + * @port: Handle to the uart port structure + */ +static void cdns_uart_shutdown(struct uart_port *port) +{ + int status; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* Disable interrupts */ + status = readl(port->membase + CDNS_UART_IMR); + writel(status, port->membase + CDNS_UART_IDR); + writel(0xffffffff, port->membase + CDNS_UART_ISR); + + /* Disable the TX and RX */ + writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS, + port->membase + CDNS_UART_CR); + + spin_unlock_irqrestore(&port->lock, flags); + + free_irq(port->irq, port); +} + +/** + * cdns_uart_type - Set UART type to cdns_uart port + * @port: Handle to the uart port structure + * + * Return: string on success, NULL otherwise + */ +static const char *cdns_uart_type(struct uart_port *port) +{ + return port->type == PORT_XUARTPS ? CDNS_UART_NAME : NULL; +} + +/** + * cdns_uart_verify_port - Verify the port params + * @port: Handle to the uart port structure + * @ser: Handle to the structure whose members are compared + * + * Return: 0 on success, negative errno otherwise. + */ +static int cdns_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + if (ser->type != PORT_UNKNOWN && ser->type != PORT_XUARTPS) + return -EINVAL; + if (port->irq != ser->irq) + return -EINVAL; + if (ser->io_type != UPIO_MEM) + return -EINVAL; + if (port->iobase != ser->port) + return -EINVAL; + if (ser->hub6 != 0) + return -EINVAL; + return 0; +} + +/** + * cdns_uart_request_port - Claim the memory region attached to cdns_uart port, + * called when the driver adds a cdns_uart port via + * uart_add_one_port() + * @port: Handle to the uart port structure + * + * Return: 0 on success, negative errno otherwise. + */ +static int cdns_uart_request_port(struct uart_port *port) +{ + if (!request_mem_region(port->mapbase, CDNS_UART_REGISTER_SPACE, + CDNS_UART_NAME)) { + return -ENOMEM; + } + + port->membase = ioremap(port->mapbase, CDNS_UART_REGISTER_SPACE); + if (!port->membase) { + dev_err(port->dev, "Unable to map registers\n"); + release_mem_region(port->mapbase, CDNS_UART_REGISTER_SPACE); + return -ENOMEM; + } + return 0; +} + +/** + * cdns_uart_release_port - Release UART port + * @port: Handle to the uart port structure + * + * Release the memory region attached to a cdns_uart port. Called when the + * driver removes a cdns_uart port via uart_remove_one_port(). + */ +static void cdns_uart_release_port(struct uart_port *port) +{ + release_mem_region(port->mapbase, CDNS_UART_REGISTER_SPACE); + iounmap(port->membase); + port->membase = NULL; +} + +/** + * cdns_uart_config_port - Configure UART port + * @port: Handle to the uart port structure + * @flags: If any + */ +static void cdns_uart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE && cdns_uart_request_port(port) == 0) + port->type = PORT_XUARTPS; +} + +/** + * cdns_uart_get_mctrl - Get the modem control state + * @port: Handle to the uart port structure + * + * Return: the modem control state + */ +static unsigned int cdns_uart_get_mctrl(struct uart_port *port) +{ + u32 val; + unsigned int mctrl = 0; + struct cdns_uart *cdns_uart_data = port->private_data; + + if (cdns_uart_data->cts_override) + return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; + + val = readl(port->membase + CDNS_UART_MODEMSR); + if (val & CDNS_UART_MODEMSR_CTS) + mctrl |= TIOCM_CTS; + if (val & CDNS_UART_MODEMSR_DSR) + mctrl |= TIOCM_DSR; + if (val & CDNS_UART_MODEMSR_RI) + mctrl |= TIOCM_RNG; + if (val & CDNS_UART_MODEMSR_DCD) + mctrl |= TIOCM_CAR; + + return mctrl; +} + +static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + u32 val; + u32 mode_reg; + struct cdns_uart *cdns_uart_data = port->private_data; + + if (cdns_uart_data->cts_override) + return; + + val = readl(port->membase + CDNS_UART_MODEMCR); + mode_reg = readl(port->membase + CDNS_UART_MR); + + val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR); + mode_reg &= ~CDNS_UART_MR_CHMODE_MASK; + + if (mctrl & TIOCM_RTS) + val |= CDNS_UART_MODEMCR_RTS; + if (mctrl & TIOCM_DTR) + val |= CDNS_UART_MODEMCR_DTR; + if (mctrl & TIOCM_LOOP) + mode_reg |= CDNS_UART_MR_CHMODE_L_LOOP; + else + mode_reg |= CDNS_UART_MR_CHMODE_NORM; + + writel(val, port->membase + CDNS_UART_MODEMCR); + writel(mode_reg, port->membase + CDNS_UART_MR); +} + +#ifdef CONFIG_CONSOLE_POLL +static int cdns_uart_poll_get_char(struct uart_port *port) +{ + int c; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* Check if FIFO is empty */ + if (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY) + c = NO_POLL_CHAR; + else /* Read a character */ + c = (unsigned char) readl(port->membase + CDNS_UART_FIFO); + + spin_unlock_irqrestore(&port->lock, flags); + + return c; +} + +static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* Wait until FIFO is empty */ + while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY)) + cpu_relax(); + + /* Write a character */ + writel(c, port->membase + CDNS_UART_FIFO); + + /* Wait until FIFO is empty */ + while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY)) + cpu_relax(); + + spin_unlock_irqrestore(&port->lock, flags); +} +#endif + +static void cdns_uart_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + switch (state) { + case UART_PM_STATE_OFF: + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); + break; + default: + pm_runtime_get_sync(port->dev); + break; + } +} + +static const struct uart_ops cdns_uart_ops = { + .set_mctrl = cdns_uart_set_mctrl, + .get_mctrl = cdns_uart_get_mctrl, + .start_tx = cdns_uart_start_tx, + .stop_tx = cdns_uart_stop_tx, + .stop_rx = cdns_uart_stop_rx, + .tx_empty = cdns_uart_tx_empty, + .break_ctl = cdns_uart_break_ctl, + .set_termios = cdns_uart_set_termios, + .startup = cdns_uart_startup, + .shutdown = cdns_uart_shutdown, + .pm = cdns_uart_pm, + .type = cdns_uart_type, + .verify_port = cdns_uart_verify_port, + .request_port = cdns_uart_request_port, + .release_port = cdns_uart_release_port, + .config_port = cdns_uart_config_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = cdns_uart_poll_get_char, + .poll_put_char = cdns_uart_poll_put_char, +#endif +}; + +static struct uart_driver cdns_uart_uart_driver; + +#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE +/** + * cdns_uart_console_putchar - write the character to the FIFO buffer + * @port: Handle to the uart port structure + * @ch: Character to be written + */ +static void cdns_uart_console_putchar(struct uart_port *port, unsigned char ch) +{ + unsigned int ctrl_reg; + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(1000); + while (1) { + ctrl_reg = readl(port->membase + CDNS_UART_CR); + if (!(ctrl_reg & CDNS_UART_CR_TX_DIS)) + break; + if (time_after(jiffies, timeout)) { + dev_warn(port->dev, + "timeout waiting for Enable\n"); + return; + } + cpu_relax(); + } + + timeout = jiffies + msecs_to_jiffies(1000); + while (1) { + ctrl_reg = readl(port->membase + CDNS_UART_SR); + + if (!(ctrl_reg & CDNS_UART_SR_TXFULL)) + break; + if (time_after(jiffies, timeout)) { + dev_warn(port->dev, + "timeout waiting for TX fifo\n"); + return; + } + cpu_relax(); + } + writel(ch, port->membase + CDNS_UART_FIFO); +} + +static void cdns_early_write(struct console *con, const char *s, + unsigned int n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, cdns_uart_console_putchar); +} + +static int __init cdns_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + struct uart_port *port = &device->port; + + if (!port->membase) + return -ENODEV; + + /* initialise control register */ + writel(CDNS_UART_CR_TX_EN|CDNS_UART_CR_TXRST|CDNS_UART_CR_RXRST, + port->membase + CDNS_UART_CR); + + /* only set baud if specified on command line - otherwise + * assume it has been initialized by a boot loader. + */ + if (port->uartclk && device->baud) { + u32 cd = 0, bdiv = 0; + u32 mr; + int div8; + + cdns_uart_calc_baud_divs(port->uartclk, device->baud, + &bdiv, &cd, &div8); + mr = CDNS_UART_MR_PARITY_NONE; + if (div8) + mr |= CDNS_UART_MR_CLKSEL; + + writel(mr, port->membase + CDNS_UART_MR); + writel(cd, port->membase + CDNS_UART_BAUDGEN); + writel(bdiv, port->membase + CDNS_UART_BAUDDIV); + } + + device->con->write = cdns_early_write; + + return 0; +} +OF_EARLYCON_DECLARE(cdns, "xlnx,xuartps", cdns_early_console_setup); +OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p8", cdns_early_console_setup); +OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p12", cdns_early_console_setup); +OF_EARLYCON_DECLARE(cdns, "xlnx,zynqmp-uart", cdns_early_console_setup); + + +/* Static pointer to console port */ +static struct uart_port *console_port; + +/** + * cdns_uart_console_write - perform write operation + * @co: Console handle + * @s: Pointer to character array + * @count: No of characters + */ +static void cdns_uart_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *port = console_port; + unsigned long flags; + unsigned int imr, ctrl; + int locked = 1; + + if (port->sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); + + /* save and disable interrupt */ + imr = readl(port->membase + CDNS_UART_IMR); + writel(imr, port->membase + CDNS_UART_IDR); + + /* + * Make sure that the tx part is enabled. Set the TX enable bit and + * clear the TX disable bit to enable the transmitter. + */ + ctrl = readl(port->membase + CDNS_UART_CR); + ctrl &= ~CDNS_UART_CR_TX_DIS; + ctrl |= CDNS_UART_CR_TX_EN; + writel(ctrl, port->membase + CDNS_UART_CR); + + uart_console_write(port, s, count, cdns_uart_console_putchar); + while (cdns_uart_tx_empty(port) != TIOCSER_TEMT) + cpu_relax(); + + /* restore interrupt state */ + writel(imr, port->membase + CDNS_UART_IER); + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); +} + +/** + * cdns_uart_console_setup - Initialize the uart to default config + * @co: Console handle + * @options: Initial settings of uart + * + * Return: 0 on success, negative errno otherwise. + */ +static int cdns_uart_console_setup(struct console *co, char *options) +{ + struct uart_port *port = console_port; + + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + unsigned long time_out; + + if (!port->membase) { + pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n", + co->index); + return -ENODEV; + } + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + /* Wait for tx_empty before setting up the console */ + time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT); + + while (time_before(jiffies, time_out) && + cdns_uart_tx_empty(port) != TIOCSER_TEMT) + cpu_relax(); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct console cdns_uart_console = { + .name = CDNS_UART_TTY_NAME, + .write = cdns_uart_console_write, + .device = uart_console_device, + .setup = cdns_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, /* Specified on the cmdline (e.g. console=ttyPS ) */ + .data = &cdns_uart_uart_driver, +}; +#endif /* CONFIG_SERIAL_XILINX_PS_UART_CONSOLE */ + +#ifdef CONFIG_PM_SLEEP +/** + * cdns_uart_suspend - suspend event + * @device: Pointer to the device structure + * + * Return: 0 + */ +static int cdns_uart_suspend(struct device *device) +{ + struct uart_port *port = dev_get_drvdata(device); + struct cdns_uart *cdns_uart = port->private_data; + int may_wake; + + may_wake = device_may_wakeup(device); + + if (console_suspend_enabled && uart_console(port) && may_wake) { + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + /* Empty the receive FIFO 1st before making changes */ + while (!(readl(port->membase + CDNS_UART_SR) & + CDNS_UART_SR_RXEMPTY)) + readl(port->membase + CDNS_UART_FIFO); + /* set RX trigger level to 1 */ + writel(1, port->membase + CDNS_UART_RXWM); + /* disable RX timeout interrups */ + writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IDR); + spin_unlock_irqrestore(&port->lock, flags); + } + + /* + * Call the API provided in serial_core.c file which handles + * the suspend. + */ + return uart_suspend_port(cdns_uart->cdns_uart_driver, port); +} + +/** + * cdns_uart_resume - Resume after a previous suspend + * @device: Pointer to the device structure + * + * Return: 0 + */ +static int cdns_uart_resume(struct device *device) +{ + struct uart_port *port = dev_get_drvdata(device); + struct cdns_uart *cdns_uart = port->private_data; + unsigned long flags; + u32 ctrl_reg; + int may_wake; + int ret; + + may_wake = device_may_wakeup(device); + + if (console_suspend_enabled && uart_console(port) && !may_wake) { + ret = clk_enable(cdns_uart->pclk); + if (ret) + return ret; + + ret = clk_enable(cdns_uart->uartclk); + if (ret) { + clk_disable(cdns_uart->pclk); + return ret; + } + + spin_lock_irqsave(&port->lock, flags); + + /* Set TX/RX Reset */ + ctrl_reg = readl(port->membase + CDNS_UART_CR); + ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST; + writel(ctrl_reg, port->membase + CDNS_UART_CR); + while (readl(port->membase + CDNS_UART_CR) & + (CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST)) + cpu_relax(); + + /* restore rx timeout value */ + writel(rx_timeout, port->membase + CDNS_UART_RXTOUT); + /* Enable Tx/Rx */ + ctrl_reg = readl(port->membase + CDNS_UART_CR); + ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS); + ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN; + writel(ctrl_reg, port->membase + CDNS_UART_CR); + + clk_disable(cdns_uart->uartclk); + clk_disable(cdns_uart->pclk); + spin_unlock_irqrestore(&port->lock, flags); + } else { + spin_lock_irqsave(&port->lock, flags); + /* restore original rx trigger level */ + writel(rx_trigger_level, port->membase + CDNS_UART_RXWM); + /* enable RX timeout interrupt */ + writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IER); + spin_unlock_irqrestore(&port->lock, flags); + } + + return uart_resume_port(cdns_uart->cdns_uart_driver, port); +} +#endif /* ! CONFIG_PM_SLEEP */ +static int __maybe_unused cdns_runtime_suspend(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct cdns_uart *cdns_uart = port->private_data; + + clk_disable(cdns_uart->uartclk); + clk_disable(cdns_uart->pclk); + return 0; +}; + +static int __maybe_unused cdns_runtime_resume(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct cdns_uart *cdns_uart = port->private_data; + int ret; + + ret = clk_enable(cdns_uart->pclk); + if (ret) + return ret; + + ret = clk_enable(cdns_uart->uartclk); + if (ret) { + clk_disable(cdns_uart->pclk); + return ret; + } + return 0; +}; + +static const struct dev_pm_ops cdns_uart_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cdns_uart_suspend, cdns_uart_resume) + SET_RUNTIME_PM_OPS(cdns_runtime_suspend, + cdns_runtime_resume, NULL) +}; + +static const struct cdns_platform_data zynqmp_uart_def = { + .quirks = CDNS_UART_RXBS_SUPPORT, }; + +/* Match table for of_platform binding */ +static const struct of_device_id cdns_uart_of_match[] = { + { .compatible = "xlnx,xuartps", }, + { .compatible = "cdns,uart-r1p8", }, + { .compatible = "cdns,uart-r1p12", .data = &zynqmp_uart_def }, + { .compatible = "xlnx,zynqmp-uart", .data = &zynqmp_uart_def }, + {} +}; +MODULE_DEVICE_TABLE(of, cdns_uart_of_match); + +/* Temporary variable for storing number of instances */ +static int instances; + +/** + * cdns_uart_probe - Platform driver probe + * @pdev: Pointer to the platform device structure + * + * Return: 0 on success, negative errno otherwise + */ +static int cdns_uart_probe(struct platform_device *pdev) +{ + int rc, id, irq; + struct uart_port *port; + struct resource *res; + struct cdns_uart *cdns_uart_data; + const struct of_device_id *match; + + cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), + GFP_KERNEL); + if (!cdns_uart_data) + return -ENOMEM; + port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + /* Look for a serialN alias */ + id = of_alias_get_id(pdev->dev.of_node, "serial"); + if (id < 0) + id = 0; + + if (id >= CDNS_UART_NR_PORTS) { + dev_err(&pdev->dev, "Cannot get uart_port structure\n"); + return -ENODEV; + } + + if (!cdns_uart_uart_driver.state) { + cdns_uart_uart_driver.owner = THIS_MODULE; + cdns_uart_uart_driver.driver_name = CDNS_UART_NAME; + cdns_uart_uart_driver.dev_name = CDNS_UART_TTY_NAME; + cdns_uart_uart_driver.major = CDNS_UART_MAJOR; + cdns_uart_uart_driver.minor = CDNS_UART_MINOR; + cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; +#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE + cdns_uart_uart_driver.cons = &cdns_uart_console; +#endif + + rc = uart_register_driver(&cdns_uart_uart_driver); + if (rc < 0) { + dev_err(&pdev->dev, "Failed to register driver\n"); + return rc; + } + } + + cdns_uart_data->cdns_uart_driver = &cdns_uart_uart_driver; + + match = of_match_node(cdns_uart_of_match, pdev->dev.of_node); + if (match && match->data) { + const struct cdns_platform_data *data = match->data; + + cdns_uart_data->quirks = data->quirks; + } + + cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (PTR_ERR(cdns_uart_data->pclk) == -EPROBE_DEFER) { + rc = PTR_ERR(cdns_uart_data->pclk); + goto err_out_unregister_driver; + } + + if (IS_ERR(cdns_uart_data->pclk)) { + cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "aper_clk"); + if (IS_ERR(cdns_uart_data->pclk)) { + rc = PTR_ERR(cdns_uart_data->pclk); + goto err_out_unregister_driver; + } + dev_err(&pdev->dev, "clock name 'aper_clk' is deprecated.\n"); + } + + cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "uart_clk"); + if (PTR_ERR(cdns_uart_data->uartclk) == -EPROBE_DEFER) { + rc = PTR_ERR(cdns_uart_data->uartclk); + goto err_out_unregister_driver; + } + + if (IS_ERR(cdns_uart_data->uartclk)) { + cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "ref_clk"); + if (IS_ERR(cdns_uart_data->uartclk)) { + rc = PTR_ERR(cdns_uart_data->uartclk); + goto err_out_unregister_driver; + } + dev_err(&pdev->dev, "clock name 'ref_clk' is deprecated.\n"); + } + + rc = clk_prepare_enable(cdns_uart_data->pclk); + if (rc) { + dev_err(&pdev->dev, "Unable to enable pclk clock.\n"); + goto err_out_unregister_driver; + } + rc = clk_prepare_enable(cdns_uart_data->uartclk); + if (rc) { + dev_err(&pdev->dev, "Unable to enable device clock.\n"); + goto err_out_clk_dis_pclk; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + rc = -ENODEV; + goto err_out_clk_disable; + } + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + rc = -ENXIO; + goto err_out_clk_disable; + } + +#ifdef CONFIG_COMMON_CLK + cdns_uart_data->clk_rate_change_nb.notifier_call = + cdns_uart_clk_notifier_cb; + if (clk_notifier_register(cdns_uart_data->uartclk, + &cdns_uart_data->clk_rate_change_nb)) + dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); +#endif + + /* At this point, we've got an empty uart_port struct, initialize it */ + spin_lock_init(&port->lock); + port->type = PORT_UNKNOWN; + port->iotype = UPIO_MEM32; + port->flags = UPF_BOOT_AUTOCONF; + port->ops = &cdns_uart_ops; + port->fifosize = CDNS_UART_FIFO_SIZE; + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE); + port->line = id; + + /* + * Register the port. + * This function also registers this device with the tty layer + * and triggers invocation of the config_port() entry point. + */ + port->mapbase = res->start; + port->irq = irq; + port->dev = &pdev->dev; + port->uartclk = clk_get_rate(cdns_uart_data->uartclk); + port->private_data = cdns_uart_data; + port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG | + CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT; + cdns_uart_data->port = port; + platform_set_drvdata(pdev, port); + + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + device_init_wakeup(port->dev, true); + +#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE + /* + * If console hasn't been found yet try to assign this port + * because it is required to be assigned for console setup function. + * If register_console() don't assign value, then console_port pointer + * is cleanup. + */ + if (!console_port) { + cdns_uart_console.index = id; + console_port = port; + } +#endif + + rc = uart_add_one_port(&cdns_uart_uart_driver, port); + if (rc) { + dev_err(&pdev->dev, + "uart_add_one_port() failed; err=%i\n", rc); + goto err_out_pm_disable; + } + +#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE + /* This is not port which is used for console that's why clean it up */ + if (console_port == port && + !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) { + console_port = NULL; + cdns_uart_console.index = -1; + } +#endif + + cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node, + "cts-override"); + + instances++; + + return 0; + +err_out_pm_disable: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); +#ifdef CONFIG_COMMON_CLK + clk_notifier_unregister(cdns_uart_data->uartclk, + &cdns_uart_data->clk_rate_change_nb); +#endif +err_out_clk_disable: + clk_disable_unprepare(cdns_uart_data->uartclk); +err_out_clk_dis_pclk: + clk_disable_unprepare(cdns_uart_data->pclk); +err_out_unregister_driver: + if (!instances) + uart_unregister_driver(cdns_uart_data->cdns_uart_driver); + return rc; +} + +/** + * cdns_uart_remove - called when the platform driver is unregistered + * @pdev: Pointer to the platform device structure + * + * Return: 0 on success, negative errno otherwise + */ +static int cdns_uart_remove(struct platform_device *pdev) +{ + struct uart_port *port = platform_get_drvdata(pdev); + struct cdns_uart *cdns_uart_data = port->private_data; + int rc; + + /* Remove the cdns_uart port from the serial core */ +#ifdef CONFIG_COMMON_CLK + clk_notifier_unregister(cdns_uart_data->uartclk, + &cdns_uart_data->clk_rate_change_nb); +#endif + rc = uart_remove_one_port(cdns_uart_data->cdns_uart_driver, port); + port->mapbase = 0; + clk_disable_unprepare(cdns_uart_data->uartclk); + clk_disable_unprepare(cdns_uart_data->pclk); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + device_init_wakeup(&pdev->dev, false); + +#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE + if (console_port == port) + console_port = NULL; +#endif + + if (!--instances) + uart_unregister_driver(cdns_uart_data->cdns_uart_driver); + return rc; +} + +static struct platform_driver cdns_uart_platform_driver = { + .probe = cdns_uart_probe, + .remove = cdns_uart_remove, + .driver = { + .name = CDNS_UART_NAME, + .of_match_table = cdns_uart_of_match, + .pm = &cdns_uart_dev_pm_ops, + .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_XILINX_PS_UART), + }, +}; + +static int __init cdns_uart_init(void) +{ + /* Register the platform driver */ + return platform_driver_register(&cdns_uart_platform_driver); +} + +static void __exit cdns_uart_exit(void) +{ + /* Unregister the platform driver */ + platform_driver_unregister(&cdns_uart_platform_driver); +} + +arch_initcall(cdns_uart_init); +module_exit(cdns_uart_exit); + +MODULE_DESCRIPTION("Driver for Cadence UART"); +MODULE_AUTHOR("Xilinx Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c new file mode 100644 index 000000000..688db7d8b --- /dev/null +++ b/drivers/tty/serial/zs.c @@ -0,0 +1,1307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zs.c: Serial port driver for IOASIC DECstations. + * + * Derived from drivers/sbus/char/sunserial.c by Paul Mackerras. + * Derived from drivers/macintosh/macserial.c by Harald Koerfgen. + * + * DECstation changes + * Copyright (C) 1998-2000 Harald Koerfgen + * Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki + * + * For the rest of the code the original Copyright applies: + * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au) + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * + * + * Note: for IOASIC systems the wiring is as follows: + * + * mouse/keyboard: + * DIN-7 MJ-4 signal SCC + * 2 1 TxD <- A.TxD + * 3 4 RxD -> A.RxD + * + * EIA-232/EIA-423: + * DB-25 MMJ-6 signal SCC + * 2 2 TxD <- B.TxD + * 3 5 RxD -> B.RxD + * 4 RTS <- ~A.RTS + * 5 CTS -> ~B.CTS + * 6 6 DSR -> ~A.SYNC + * 8 CD -> ~B.DCD + * 12 DSRS(DCE) -> ~A.CTS (*) + * 15 TxC -> B.TxC + * 17 RxC -> B.RxC + * 20 1 DTR <- ~A.DTR + * 22 RI -> ~A.DCD + * 23 DSRS(DTE) <- ~B.RTS + * + * (*) EIA-232 defines the signal at this pin to be SCD, while DSRS(DCE) + * is shared with DSRS(DTE) at pin 23. + * + * As you can immediately notice the wiring of the RTS, DTR and DSR signals + * is a bit odd. This makes the handling of port B unnecessarily + * complicated and prevents the use of some automatic modes of operation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "zs.h" + + +MODULE_AUTHOR("Maciej W. Rozycki "); +MODULE_DESCRIPTION("DECstation Z85C30 serial driver"); +MODULE_LICENSE("GPL"); + + +static char zs_name[] __initdata = "DECstation Z85C30 serial driver version "; +static char zs_version[] __initdata = "0.10"; + +/* + * It would be nice to dynamically allocate everything that + * depends on ZS_NUM_SCCS, so we could support any number of + * Z85C30s, but for now... + */ +#define ZS_NUM_SCCS 2 /* Max # of ZS chips supported. */ +#define ZS_NUM_CHAN 2 /* 2 channels per chip. */ +#define ZS_CHAN_A 0 /* Index of the channel A. */ +#define ZS_CHAN_B 1 /* Index of the channel B. */ +#define ZS_CHAN_IO_SIZE 8 /* IOMEM space size. */ +#define ZS_CHAN_IO_STRIDE 4 /* Register alignment. */ +#define ZS_CHAN_IO_OFFSET 1 /* The SCC resides on the high byte + of the 16-bit IOBUS. */ +#define ZS_CLOCK 7372800 /* Z85C30 PCLK input clock rate. */ + +#define to_zport(uport) container_of(uport, struct zs_port, port) + +struct zs_parms { + resource_size_t scc[ZS_NUM_SCCS]; + int irq[ZS_NUM_SCCS]; +}; + +static struct zs_scc zs_sccs[ZS_NUM_SCCS]; + +static u8 zs_init_regs[ZS_NUM_REGS] __initdata = { + 0, /* write 0 */ + PAR_SPEC, /* write 1 */ + 0, /* write 2 */ + 0, /* write 3 */ + X16CLK | SB1, /* write 4 */ + 0, /* write 5 */ + 0, 0, 0, /* write 6, 7, 8 */ + MIE | DLC | NV, /* write 9 */ + NRZ, /* write 10 */ + TCBR | RCBR, /* write 11 */ + 0, 0, /* BRG time constant, write 12 + 13 */ + BRSRC | BRENABL, /* write 14 */ + 0, /* write 15 */ +}; + +/* + * Debugging. + */ +#undef ZS_DEBUG_REGS + + +/* + * Reading and writing Z85C30 registers. + */ +static void recovery_delay(void) +{ + udelay(2); +} + +static u8 read_zsreg(struct zs_port *zport, int reg) +{ + void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET; + u8 retval; + + if (reg != 0) { + writeb(reg & 0xf, control); + fast_iob(); + recovery_delay(); + } + retval = readb(control); + recovery_delay(); + return retval; +} + +static void write_zsreg(struct zs_port *zport, int reg, u8 value) +{ + void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET; + + if (reg != 0) { + writeb(reg & 0xf, control); + fast_iob(); recovery_delay(); + } + writeb(value, control); + fast_iob(); + recovery_delay(); + return; +} + +static u8 read_zsdata(struct zs_port *zport) +{ + void __iomem *data = zport->port.membase + + ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET; + u8 retval; + + retval = readb(data); + recovery_delay(); + return retval; +} + +static void write_zsdata(struct zs_port *zport, u8 value) +{ + void __iomem *data = zport->port.membase + + ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET; + + writeb(value, data); + fast_iob(); + recovery_delay(); + return; +} + +#ifdef ZS_DEBUG_REGS +void zs_dump(void) +{ + struct zs_port *zport; + int i, j; + + for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) { + zport = &zs_sccs[i / ZS_NUM_CHAN].zport[i % ZS_NUM_CHAN]; + + if (!zport->scc) + continue; + + for (j = 0; j < 16; j++) + printk("W%-2d = 0x%02x\t", j, zport->regs[j]); + printk("\n"); + for (j = 0; j < 16; j++) + printk("R%-2d = 0x%02x\t", j, read_zsreg(zport, j)); + printk("\n\n"); + } +} +#endif + + +static void zs_spin_lock_cond_irq(spinlock_t *lock, int irq) +{ + if (irq) + spin_lock_irq(lock); + else + spin_lock(lock); +} + +static void zs_spin_unlock_cond_irq(spinlock_t *lock, int irq) +{ + if (irq) + spin_unlock_irq(lock); + else + spin_unlock(lock); +} + +static int zs_receive_drain(struct zs_port *zport) +{ + int loops = 10000; + + while ((read_zsreg(zport, R0) & Rx_CH_AV) && --loops) + read_zsdata(zport); + return loops; +} + +static int zs_transmit_drain(struct zs_port *zport, int irq) +{ + struct zs_scc *scc = zport->scc; + int loops = 10000; + + while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && --loops) { + zs_spin_unlock_cond_irq(&scc->zlock, irq); + udelay(2); + zs_spin_lock_cond_irq(&scc->zlock, irq); + } + return loops; +} + +static int zs_line_drain(struct zs_port *zport, int irq) +{ + struct zs_scc *scc = zport->scc; + int loops = 10000; + + while (!(read_zsreg(zport, R1) & ALL_SNT) && --loops) { + zs_spin_unlock_cond_irq(&scc->zlock, irq); + udelay(2); + zs_spin_lock_cond_irq(&scc->zlock, irq); + } + return loops; +} + + +static void load_zsregs(struct zs_port *zport, u8 *regs, int irq) +{ + /* Let the current transmission finish. */ + zs_line_drain(zport, irq); + /* Load 'em up. */ + write_zsreg(zport, R3, regs[3] & ~RxENABLE); + write_zsreg(zport, R5, regs[5] & ~TxENAB); + write_zsreg(zport, R4, regs[4]); + write_zsreg(zport, R9, regs[9]); + write_zsreg(zport, R1, regs[1]); + write_zsreg(zport, R2, regs[2]); + write_zsreg(zport, R10, regs[10]); + write_zsreg(zport, R14, regs[14] & ~BRENABL); + write_zsreg(zport, R11, regs[11]); + write_zsreg(zport, R12, regs[12]); + write_zsreg(zport, R13, regs[13]); + write_zsreg(zport, R14, regs[14]); + write_zsreg(zport, R15, regs[15]); + if (regs[3] & RxENABLE) + write_zsreg(zport, R3, regs[3]); + if (regs[5] & TxENAB) + write_zsreg(zport, R5, regs[5]); + return; +} + + +/* + * Status handling routines. + */ + +/* + * zs_tx_empty() -- get the transmitter empty status + * + * Purpose: Let user call ioctl() to get info when the UART physically + * is emptied. On bus types like RS485, the transmitter must + * release the bus after transmitting. This must be done when + * the transmit shift register is empty, not be done when the + * transmit holding register is empty. This functionality + * allows an RS485 driver to be written in user space. + */ +static unsigned int zs_tx_empty(struct uart_port *uport) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + unsigned long flags; + u8 status; + + spin_lock_irqsave(&scc->zlock, flags); + status = read_zsreg(zport, R1); + spin_unlock_irqrestore(&scc->zlock, flags); + + return status & ALL_SNT ? TIOCSER_TEMT : 0; +} + +static unsigned int zs_raw_get_ab_mctrl(struct zs_port *zport_a, + struct zs_port *zport_b) +{ + u8 status_a, status_b; + unsigned int mctrl; + + status_a = read_zsreg(zport_a, R0); + status_b = read_zsreg(zport_b, R0); + + mctrl = ((status_b & CTS) ? TIOCM_CTS : 0) | + ((status_b & DCD) ? TIOCM_CAR : 0) | + ((status_a & DCD) ? TIOCM_RNG : 0) | + ((status_a & SYNC_HUNT) ? TIOCM_DSR : 0); + + return mctrl; +} + +static unsigned int zs_raw_get_mctrl(struct zs_port *zport) +{ + struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A]; + + return zport != zport_a ? zs_raw_get_ab_mctrl(zport_a, zport) : 0; +} + +static unsigned int zs_raw_xor_mctrl(struct zs_port *zport) +{ + struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A]; + unsigned int mmask, mctrl, delta; + u8 mask_a, mask_b; + + if (zport == zport_a) + return 0; + + mask_a = zport_a->regs[15]; + mask_b = zport->regs[15]; + + mmask = ((mask_b & CTSIE) ? TIOCM_CTS : 0) | + ((mask_b & DCDIE) ? TIOCM_CAR : 0) | + ((mask_a & DCDIE) ? TIOCM_RNG : 0) | + ((mask_a & SYNCIE) ? TIOCM_DSR : 0); + + mctrl = zport->mctrl; + if (mmask) { + mctrl &= ~mmask; + mctrl |= zs_raw_get_ab_mctrl(zport_a, zport) & mmask; + } + + delta = mctrl ^ zport->mctrl; + if (delta) + zport->mctrl = mctrl; + + return delta; +} + +static unsigned int zs_get_mctrl(struct uart_port *uport) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + unsigned int mctrl; + + spin_lock(&scc->zlock); + mctrl = zs_raw_get_mctrl(zport); + spin_unlock(&scc->zlock); + + return mctrl; +} + +static void zs_set_mctrl(struct uart_port *uport, unsigned int mctrl) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; + u8 oldloop, newloop; + + spin_lock(&scc->zlock); + if (zport != zport_a) { + if (mctrl & TIOCM_DTR) + zport_a->regs[5] |= DTR; + else + zport_a->regs[5] &= ~DTR; + if (mctrl & TIOCM_RTS) + zport_a->regs[5] |= RTS; + else + zport_a->regs[5] &= ~RTS; + write_zsreg(zport_a, R5, zport_a->regs[5]); + } + + /* Rarely modified, so don't poke at hardware unless necessary. */ + oldloop = zport->regs[14]; + newloop = oldloop; + if (mctrl & TIOCM_LOOP) + newloop |= LOOPBAK; + else + newloop &= ~LOOPBAK; + if (newloop != oldloop) { + zport->regs[14] = newloop; + write_zsreg(zport, R14, zport->regs[14]); + } + spin_unlock(&scc->zlock); +} + +static void zs_raw_stop_tx(struct zs_port *zport) +{ + write_zsreg(zport, R0, RES_Tx_P); + zport->tx_stopped = 1; +} + +static void zs_stop_tx(struct uart_port *uport) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + + spin_lock(&scc->zlock); + zs_raw_stop_tx(zport); + spin_unlock(&scc->zlock); +} + +static void zs_raw_transmit_chars(struct zs_port *); + +static void zs_start_tx(struct uart_port *uport) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + + spin_lock(&scc->zlock); + if (zport->tx_stopped) { + zs_transmit_drain(zport, 0); + zport->tx_stopped = 0; + zs_raw_transmit_chars(zport); + } + spin_unlock(&scc->zlock); +} + +static void zs_stop_rx(struct uart_port *uport) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; + + spin_lock(&scc->zlock); + zport->regs[15] &= ~BRKIE; + zport->regs[1] &= ~(RxINT_MASK | TxINT_ENAB); + zport->regs[1] |= RxINT_DISAB; + + if (zport != zport_a) { + /* A-side DCD tracks RI and SYNC tracks DSR. */ + zport_a->regs[15] &= ~(DCDIE | SYNCIE); + write_zsreg(zport_a, R15, zport_a->regs[15]); + if (!(zport_a->regs[15] & BRKIE)) { + zport_a->regs[1] &= ~EXT_INT_ENAB; + write_zsreg(zport_a, R1, zport_a->regs[1]); + } + + /* This-side DCD tracks DCD and CTS tracks CTS. */ + zport->regs[15] &= ~(DCDIE | CTSIE); + zport->regs[1] &= ~EXT_INT_ENAB; + } else { + /* DCD tracks RI and SYNC tracks DSR for the B side. */ + if (!(zport->regs[15] & (DCDIE | SYNCIE))) + zport->regs[1] &= ~EXT_INT_ENAB; + } + + write_zsreg(zport, R15, zport->regs[15]); + write_zsreg(zport, R1, zport->regs[1]); + spin_unlock(&scc->zlock); +} + +static void zs_enable_ms(struct uart_port *uport) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; + + if (zport == zport_a) + return; + + spin_lock(&scc->zlock); + + /* Clear Ext interrupts if not being handled already. */ + if (!(zport_a->regs[1] & EXT_INT_ENAB)) + write_zsreg(zport_a, R0, RES_EXT_INT); + + /* A-side DCD tracks RI and SYNC tracks DSR. */ + zport_a->regs[1] |= EXT_INT_ENAB; + zport_a->regs[15] |= DCDIE | SYNCIE; + + /* This-side DCD tracks DCD and CTS tracks CTS. */ + zport->regs[15] |= DCDIE | CTSIE; + + zs_raw_xor_mctrl(zport); + + write_zsreg(zport_a, R1, zport_a->regs[1]); + write_zsreg(zport_a, R15, zport_a->regs[15]); + write_zsreg(zport, R15, zport->regs[15]); + spin_unlock(&scc->zlock); +} + +static void zs_break_ctl(struct uart_port *uport, int break_state) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + unsigned long flags; + + spin_lock_irqsave(&scc->zlock, flags); + if (break_state == -1) + zport->regs[5] |= SND_BRK; + else + zport->regs[5] &= ~SND_BRK; + write_zsreg(zport, R5, zport->regs[5]); + spin_unlock_irqrestore(&scc->zlock, flags); +} + + +/* + * Interrupt handling routines. + */ +#define Rx_BRK 0x0100 /* BREAK event software flag. */ +#define Rx_SYS 0x0200 /* SysRq event software flag. */ + +static void zs_receive_chars(struct zs_port *zport) +{ + struct uart_port *uport = &zport->port; + struct zs_scc *scc = zport->scc; + struct uart_icount *icount; + unsigned int avail, status, ch, flag; + int count; + + for (count = 16; count; count--) { + spin_lock(&scc->zlock); + avail = read_zsreg(zport, R0) & Rx_CH_AV; + spin_unlock(&scc->zlock); + if (!avail) + break; + + spin_lock(&scc->zlock); + status = read_zsreg(zport, R1) & (Rx_OVR | FRM_ERR | PAR_ERR); + ch = read_zsdata(zport); + spin_unlock(&scc->zlock); + + flag = TTY_NORMAL; + + icount = &uport->icount; + icount->rx++; + + /* Handle the null char got when BREAK is removed. */ + if (!ch) + status |= zport->tty_break; + if (unlikely(status & + (Rx_OVR | FRM_ERR | PAR_ERR | Rx_SYS | Rx_BRK))) { + zport->tty_break = 0; + + /* Reset the error indication. */ + if (status & (Rx_OVR | FRM_ERR | PAR_ERR)) { + spin_lock(&scc->zlock); + write_zsreg(zport, R0, ERR_RES); + spin_unlock(&scc->zlock); + } + + if (status & (Rx_SYS | Rx_BRK)) { + icount->brk++; + /* SysRq discards the null char. */ + if (status & Rx_SYS) + continue; + } else if (status & FRM_ERR) + icount->frame++; + else if (status & PAR_ERR) + icount->parity++; + if (status & Rx_OVR) + icount->overrun++; + + status &= uport->read_status_mask; + if (status & Rx_BRK) + flag = TTY_BREAK; + else if (status & FRM_ERR) + flag = TTY_FRAME; + else if (status & PAR_ERR) + flag = TTY_PARITY; + } + + if (uart_handle_sysrq_char(uport, ch)) + continue; + + uart_insert_char(uport, status, Rx_OVR, ch, flag); + } + + tty_flip_buffer_push(&uport->state->port); +} + +static void zs_raw_transmit_chars(struct zs_port *zport) +{ + struct circ_buf *xmit = &zport->port.state->xmit; + + /* XON/XOFF chars. */ + if (zport->port.x_char) { + write_zsdata(zport, zport->port.x_char); + zport->port.icount.tx++; + zport->port.x_char = 0; + return; + } + + /* If nothing to do or stopped or hardware stopped. */ + if (uart_circ_empty(xmit) || uart_tx_stopped(&zport->port)) { + zs_raw_stop_tx(zport); + return; + } + + /* Send char. */ + write_zsdata(zport, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + zport->port.icount.tx++; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&zport->port); + + /* Are we are done? */ + if (uart_circ_empty(xmit)) + zs_raw_stop_tx(zport); +} + +static void zs_transmit_chars(struct zs_port *zport) +{ + struct zs_scc *scc = zport->scc; + + spin_lock(&scc->zlock); + zs_raw_transmit_chars(zport); + spin_unlock(&scc->zlock); +} + +static void zs_status_handle(struct zs_port *zport, struct zs_port *zport_a) +{ + struct uart_port *uport = &zport->port; + struct zs_scc *scc = zport->scc; + unsigned int delta; + u8 status, brk; + + spin_lock(&scc->zlock); + + /* Get status from Read Register 0. */ + status = read_zsreg(zport, R0); + + if (zport->regs[15] & BRKIE) { + brk = status & BRK_ABRT; + if (brk && !zport->brk) { + spin_unlock(&scc->zlock); + if (uart_handle_break(uport)) + zport->tty_break = Rx_SYS; + else + zport->tty_break = Rx_BRK; + spin_lock(&scc->zlock); + } + zport->brk = brk; + } + + if (zport != zport_a) { + delta = zs_raw_xor_mctrl(zport); + spin_unlock(&scc->zlock); + + if (delta & TIOCM_CTS) + uart_handle_cts_change(uport, + zport->mctrl & TIOCM_CTS); + if (delta & TIOCM_CAR) + uart_handle_dcd_change(uport, + zport->mctrl & TIOCM_CAR); + if (delta & TIOCM_RNG) + uport->icount.dsr++; + if (delta & TIOCM_DSR) + uport->icount.rng++; + + if (delta) + wake_up_interruptible(&uport->state->port.delta_msr_wait); + + spin_lock(&scc->zlock); + } + + /* Clear the status condition... */ + write_zsreg(zport, R0, RES_EXT_INT); + + spin_unlock(&scc->zlock); +} + +/* + * This is the Z85C30 driver's generic interrupt routine. + */ +static irqreturn_t zs_interrupt(int irq, void *dev_id) +{ + struct zs_scc *scc = dev_id; + struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; + struct zs_port *zport_b = &scc->zport[ZS_CHAN_B]; + irqreturn_t status = IRQ_NONE; + u8 zs_intreg; + int count; + + /* + * NOTE: The read register 3, which holds the irq status, + * does so for both channels on each chip. Although + * the status value itself must be read from the A + * channel and is only valid when read from channel A. + * Yes... broken hardware... + */ + for (count = 16; count; count--) { + spin_lock(&scc->zlock); + zs_intreg = read_zsreg(zport_a, R3); + spin_unlock(&scc->zlock); + if (!zs_intreg) + break; + + /* + * We do not like losing characters, so we prioritise + * interrupt sources a little bit differently than + * the SCC would, was it allowed to. + */ + if (zs_intreg & CHBRxIP) + zs_receive_chars(zport_b); + if (zs_intreg & CHARxIP) + zs_receive_chars(zport_a); + if (zs_intreg & CHBEXT) + zs_status_handle(zport_b, zport_a); + if (zs_intreg & CHAEXT) + zs_status_handle(zport_a, zport_a); + if (zs_intreg & CHBTxIP) + zs_transmit_chars(zport_b); + if (zs_intreg & CHATxIP) + zs_transmit_chars(zport_a); + + status = IRQ_HANDLED; + } + + return status; +} + + +/* + * Finally, routines used to initialize the serial port. + */ +static int zs_startup(struct uart_port *uport) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + unsigned long flags; + int irq_guard; + int ret; + + irq_guard = atomic_add_return(1, &scc->irq_guard); + if (irq_guard == 1) { + ret = request_irq(zport->port.irq, zs_interrupt, + IRQF_SHARED, "scc", scc); + if (ret) { + atomic_add(-1, &scc->irq_guard); + printk(KERN_ERR "zs: can't get irq %d\n", + zport->port.irq); + return ret; + } + } + + spin_lock_irqsave(&scc->zlock, flags); + + /* Clear the receive FIFO. */ + zs_receive_drain(zport); + + /* Clear the interrupt registers. */ + write_zsreg(zport, R0, ERR_RES); + write_zsreg(zport, R0, RES_Tx_P); + /* But Ext only if not being handled already. */ + if (!(zport->regs[1] & EXT_INT_ENAB)) + write_zsreg(zport, R0, RES_EXT_INT); + + /* Finally, enable sequencing and interrupts. */ + zport->regs[1] &= ~RxINT_MASK; + zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB; + zport->regs[3] |= RxENABLE; + zport->regs[15] |= BRKIE; + write_zsreg(zport, R1, zport->regs[1]); + write_zsreg(zport, R3, zport->regs[3]); + write_zsreg(zport, R5, zport->regs[5]); + write_zsreg(zport, R15, zport->regs[15]); + + /* Record the current state of RR0. */ + zport->mctrl = zs_raw_get_mctrl(zport); + zport->brk = read_zsreg(zport, R0) & BRK_ABRT; + + zport->tx_stopped = 1; + + spin_unlock_irqrestore(&scc->zlock, flags); + + return 0; +} + +static void zs_shutdown(struct uart_port *uport) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + unsigned long flags; + int irq_guard; + + spin_lock_irqsave(&scc->zlock, flags); + + zport->regs[3] &= ~RxENABLE; + write_zsreg(zport, R5, zport->regs[5]); + write_zsreg(zport, R3, zport->regs[3]); + + spin_unlock_irqrestore(&scc->zlock, flags); + + irq_guard = atomic_add_return(-1, &scc->irq_guard); + if (!irq_guard) + free_irq(zport->port.irq, scc); +} + + +static void zs_reset(struct zs_port *zport) +{ + struct zs_scc *scc = zport->scc; + int irq; + unsigned long flags; + + spin_lock_irqsave(&scc->zlock, flags); + irq = !irqs_disabled_flags(flags); + if (!scc->initialised) { + /* Reset the pointer first, just in case... */ + read_zsreg(zport, R0); + /* And let the current transmission finish. */ + zs_line_drain(zport, irq); + write_zsreg(zport, R9, FHWRES); + udelay(10); + write_zsreg(zport, R9, 0); + scc->initialised = 1; + } + load_zsregs(zport, zport->regs, irq); + spin_unlock_irqrestore(&scc->zlock, flags); +} + +static void zs_set_termios(struct uart_port *uport, struct ktermios *termios, + const struct ktermios *old_termios) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; + int irq; + unsigned int baud, brg; + unsigned long flags; + + spin_lock_irqsave(&scc->zlock, flags); + irq = !irqs_disabled_flags(flags); + + /* Byte size. */ + zport->regs[3] &= ~RxNBITS_MASK; + zport->regs[5] &= ~TxNBITS_MASK; + switch (termios->c_cflag & CSIZE) { + case CS5: + zport->regs[3] |= Rx5; + zport->regs[5] |= Tx5; + break; + case CS6: + zport->regs[3] |= Rx6; + zport->regs[5] |= Tx6; + break; + case CS7: + zport->regs[3] |= Rx7; + zport->regs[5] |= Tx7; + break; + case CS8: + default: + zport->regs[3] |= Rx8; + zport->regs[5] |= Tx8; + break; + } + + /* Parity and stop bits. */ + zport->regs[4] &= ~(XCLK_MASK | SB_MASK | PAR_ENA | PAR_EVEN); + if (termios->c_cflag & CSTOPB) + zport->regs[4] |= SB2; + else + zport->regs[4] |= SB1; + if (termios->c_cflag & PARENB) + zport->regs[4] |= PAR_ENA; + if (!(termios->c_cflag & PARODD)) + zport->regs[4] |= PAR_EVEN; + switch (zport->clk_mode) { + case 64: + zport->regs[4] |= X64CLK; + break; + case 32: + zport->regs[4] |= X32CLK; + break; + case 16: + zport->regs[4] |= X16CLK; + break; + case 1: + zport->regs[4] |= X1CLK; + break; + default: + BUG(); + } + + baud = uart_get_baud_rate(uport, termios, old_termios, 0, + uport->uartclk / zport->clk_mode / 4); + + brg = ZS_BPS_TO_BRG(baud, uport->uartclk / zport->clk_mode); + zport->regs[12] = brg & 0xff; + zport->regs[13] = (brg >> 8) & 0xff; + + uart_update_timeout(uport, termios->c_cflag, baud); + + uport->read_status_mask = Rx_OVR; + if (termios->c_iflag & INPCK) + uport->read_status_mask |= FRM_ERR | PAR_ERR; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + uport->read_status_mask |= Rx_BRK; + + uport->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + uport->ignore_status_mask |= FRM_ERR | PAR_ERR; + if (termios->c_iflag & IGNBRK) { + uport->ignore_status_mask |= Rx_BRK; + if (termios->c_iflag & IGNPAR) + uport->ignore_status_mask |= Rx_OVR; + } + + if (termios->c_cflag & CREAD) + zport->regs[3] |= RxENABLE; + else + zport->regs[3] &= ~RxENABLE; + + if (zport != zport_a) { + if (!(termios->c_cflag & CLOCAL)) { + zport->regs[15] |= DCDIE; + } else + zport->regs[15] &= ~DCDIE; + if (termios->c_cflag & CRTSCTS) { + zport->regs[15] |= CTSIE; + } else + zport->regs[15] &= ~CTSIE; + zs_raw_xor_mctrl(zport); + } + + /* Load up the new values. */ + load_zsregs(zport, zport->regs, irq); + + spin_unlock_irqrestore(&scc->zlock, flags); +} + +/* + * Hack alert! + * Required solely so that the initial PROM-based console + * works undisturbed in parallel with this one. + */ +static void zs_pm(struct uart_port *uport, unsigned int state, + unsigned int oldstate) +{ + struct zs_port *zport = to_zport(uport); + + if (state < 3) + zport->regs[5] |= TxENAB; + else + zport->regs[5] &= ~TxENAB; + write_zsreg(zport, R5, zport->regs[5]); +} + + +static const char *zs_type(struct uart_port *uport) +{ + return "Z85C30 SCC"; +} + +static void zs_release_port(struct uart_port *uport) +{ + iounmap(uport->membase); + uport->membase = NULL; + release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE); +} + +static int zs_map_port(struct uart_port *uport) +{ + if (!uport->membase) + uport->membase = ioremap(uport->mapbase, + ZS_CHAN_IO_SIZE); + if (!uport->membase) { + printk(KERN_ERR "zs: Cannot map MMIO\n"); + return -ENOMEM; + } + return 0; +} + +static int zs_request_port(struct uart_port *uport) +{ + int ret; + + if (!request_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE, "scc")) { + printk(KERN_ERR "zs: Unable to reserve MMIO resource\n"); + return -EBUSY; + } + ret = zs_map_port(uport); + if (ret) { + release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE); + return ret; + } + return 0; +} + +static void zs_config_port(struct uart_port *uport, int flags) +{ + struct zs_port *zport = to_zport(uport); + + if (flags & UART_CONFIG_TYPE) { + if (zs_request_port(uport)) + return; + + uport->type = PORT_ZS; + + zs_reset(zport); + } +} + +static int zs_verify_port(struct uart_port *uport, struct serial_struct *ser) +{ + struct zs_port *zport = to_zport(uport); + int ret = 0; + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_ZS) + ret = -EINVAL; + if (ser->irq != uport->irq) + ret = -EINVAL; + if (ser->baud_base != uport->uartclk / zport->clk_mode / 4) + ret = -EINVAL; + return ret; +} + + +static const struct uart_ops zs_ops = { + .tx_empty = zs_tx_empty, + .set_mctrl = zs_set_mctrl, + .get_mctrl = zs_get_mctrl, + .stop_tx = zs_stop_tx, + .start_tx = zs_start_tx, + .stop_rx = zs_stop_rx, + .enable_ms = zs_enable_ms, + .break_ctl = zs_break_ctl, + .startup = zs_startup, + .shutdown = zs_shutdown, + .set_termios = zs_set_termios, + .pm = zs_pm, + .type = zs_type, + .release_port = zs_release_port, + .request_port = zs_request_port, + .config_port = zs_config_port, + .verify_port = zs_verify_port, +}; + +/* + * Initialize Z85C30 port structures. + */ +static int __init zs_probe_sccs(void) +{ + static int probed; + struct zs_parms zs_parms; + int chip, side, irq; + int n_chips = 0; + int i; + + if (probed) + return 0; + + irq = dec_interrupt[DEC_IRQ_SCC0]; + if (irq >= 0) { + zs_parms.scc[n_chips] = IOASIC_SCC0; + zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC0]; + n_chips++; + } + irq = dec_interrupt[DEC_IRQ_SCC1]; + if (irq >= 0) { + zs_parms.scc[n_chips] = IOASIC_SCC1; + zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC1]; + n_chips++; + } + if (!n_chips) + return -ENXIO; + + probed = 1; + + for (chip = 0; chip < n_chips; chip++) { + spin_lock_init(&zs_sccs[chip].zlock); + for (side = 0; side < ZS_NUM_CHAN; side++) { + struct zs_port *zport = &zs_sccs[chip].zport[side]; + struct uart_port *uport = &zport->port; + + zport->scc = &zs_sccs[chip]; + zport->clk_mode = 16; + + uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ZS_CONSOLE); + uport->irq = zs_parms.irq[chip]; + uport->uartclk = ZS_CLOCK; + uport->fifosize = 1; + uport->iotype = UPIO_MEM; + uport->flags = UPF_BOOT_AUTOCONF; + uport->ops = &zs_ops; + uport->line = chip * ZS_NUM_CHAN + side; + uport->mapbase = dec_kn_slot_base + + zs_parms.scc[chip] + + (side ^ ZS_CHAN_B) * ZS_CHAN_IO_SIZE; + + for (i = 0; i < ZS_NUM_REGS; i++) + zport->regs[i] = zs_init_regs[i]; + } + } + + return 0; +} + + +#ifdef CONFIG_SERIAL_ZS_CONSOLE +static void zs_console_putchar(struct uart_port *uport, unsigned char ch) +{ + struct zs_port *zport = to_zport(uport); + struct zs_scc *scc = zport->scc; + int irq; + unsigned long flags; + + spin_lock_irqsave(&scc->zlock, flags); + irq = !irqs_disabled_flags(flags); + if (zs_transmit_drain(zport, irq)) + write_zsdata(zport, ch); + spin_unlock_irqrestore(&scc->zlock, flags); +} + +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + */ +static void zs_console_write(struct console *co, const char *s, + unsigned int count) +{ + int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN; + struct zs_port *zport = &zs_sccs[chip].zport[side]; + struct zs_scc *scc = zport->scc; + unsigned long flags; + u8 txint, txenb; + int irq; + + /* Disable transmit interrupts and enable the transmitter. */ + spin_lock_irqsave(&scc->zlock, flags); + txint = zport->regs[1]; + txenb = zport->regs[5]; + if (txint & TxINT_ENAB) { + zport->regs[1] = txint & ~TxINT_ENAB; + write_zsreg(zport, R1, zport->regs[1]); + } + if (!(txenb & TxENAB)) { + zport->regs[5] = txenb | TxENAB; + write_zsreg(zport, R5, zport->regs[5]); + } + spin_unlock_irqrestore(&scc->zlock, flags); + + uart_console_write(&zport->port, s, count, zs_console_putchar); + + /* Restore transmit interrupts and the transmitter enable. */ + spin_lock_irqsave(&scc->zlock, flags); + irq = !irqs_disabled_flags(flags); + zs_line_drain(zport, irq); + if (!(txenb & TxENAB)) { + zport->regs[5] &= ~TxENAB; + write_zsreg(zport, R5, zport->regs[5]); + } + if (txint & TxINT_ENAB) { + zport->regs[1] |= TxINT_ENAB; + write_zsreg(zport, R1, zport->regs[1]); + + /* Resume any transmission as the TxIP bit won't be set. */ + if (!zport->tx_stopped) + zs_raw_transmit_chars(zport); + } + spin_unlock_irqrestore(&scc->zlock, flags); +} + +/* + * Setup serial console baud/bits/parity. We do two things here: + * - construct a cflag setting for the first uart_open() + * - initialise the serial port + * Return non-zero if we didn't find a serial port. + */ +static int __init zs_console_setup(struct console *co, char *options) +{ + int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN; + struct zs_port *zport = &zs_sccs[chip].zport[side]; + struct uart_port *uport = &zport->port; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + ret = zs_map_port(uport); + if (ret) + return ret; + + zs_reset(zport); + zs_pm(uport, 0, -1); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + return uart_set_options(uport, co, baud, parity, bits, flow); +} + +static struct uart_driver zs_reg; +static struct console zs_console = { + .name = "ttyS", + .write = zs_console_write, + .device = uart_console_device, + .setup = zs_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &zs_reg, +}; + +/* + * Register console. + */ +static int __init zs_serial_console_init(void) +{ + int ret; + + ret = zs_probe_sccs(); + if (ret) + return ret; + register_console(&zs_console); + + return 0; +} + +console_initcall(zs_serial_console_init); + +#define SERIAL_ZS_CONSOLE &zs_console +#else +#define SERIAL_ZS_CONSOLE NULL +#endif /* CONFIG_SERIAL_ZS_CONSOLE */ + +static struct uart_driver zs_reg = { + .owner = THIS_MODULE, + .driver_name = "serial", + .dev_name = "ttyS", + .major = TTY_MAJOR, + .minor = 64, + .nr = ZS_NUM_SCCS * ZS_NUM_CHAN, + .cons = SERIAL_ZS_CONSOLE, +}; + +/* zs_init inits the driver. */ +static int __init zs_init(void) +{ + int i, ret; + + pr_info("%s%s\n", zs_name, zs_version); + + /* Find out how many Z85C30 SCCs we have. */ + ret = zs_probe_sccs(); + if (ret) + return ret; + + ret = uart_register_driver(&zs_reg); + if (ret) + return ret; + + for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) { + struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN]; + struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN]; + struct uart_port *uport = &zport->port; + + if (zport->scc) + uart_add_one_port(&zs_reg, uport); + } + + return 0; +} + +static void __exit zs_exit(void) +{ + int i; + + for (i = ZS_NUM_SCCS * ZS_NUM_CHAN - 1; i >= 0; i--) { + struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN]; + struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN]; + struct uart_port *uport = &zport->port; + + if (zport->scc) + uart_remove_one_port(&zs_reg, uport); + } + + uart_unregister_driver(&zs_reg); +} + +module_init(zs_init); +module_exit(zs_exit); diff --git a/drivers/tty/serial/zs.h b/drivers/tty/serial/zs.h new file mode 100644 index 000000000..26ef8eafa --- /dev/null +++ b/drivers/tty/serial/zs.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * zs.h: Definitions for the DECstation Z85C30 serial driver. + * + * Adapted from drivers/sbus/char/sunserial.h by Paul Mackerras. + * Adapted from drivers/macintosh/macserial.h by Harald Koerfgen. + * + * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au) + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 2004, 2005, 2007 Maciej W. Rozycki + */ +#ifndef _SERIAL_ZS_H +#define _SERIAL_ZS_H + +#ifdef __KERNEL__ + +#define ZS_NUM_REGS 16 + +/* + * This is our internal structure for each serial port's state. + */ +struct zs_port { + struct zs_scc *scc; /* Containing SCC. */ + struct uart_port port; /* Underlying UART. */ + + int clk_mode; /* May be 1, 16, 32, or 64. */ + + unsigned int tty_break; /* Set on BREAK condition. */ + int tx_stopped; /* Output is suspended. */ + + unsigned int mctrl; /* State of modem lines. */ + u8 brk; /* BREAK state from RR0. */ + + u8 regs[ZS_NUM_REGS]; /* Channel write registers. */ +}; + +/* + * Per-SCC state for locking and the interrupt handler. + */ +struct zs_scc { + struct zs_port zport[2]; + spinlock_t zlock; + atomic_t irq_guard; + int initialised; +}; + +#endif /* __KERNEL__ */ + +/* + * Conversion routines to/from brg time constants from/to bits per second. + */ +#define ZS_BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2)) +#define ZS_BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2) + +/* + * The Zilog register set. + */ + +/* Write Register 0 (Command) */ +#define R0 0 /* Register selects */ +#define R1 1 +#define R2 2 +#define R3 3 +#define R4 4 +#define R5 5 +#define R6 6 +#define R7 7 +#define R8 8 +#define R9 9 +#define R10 10 +#define R11 11 +#define R12 12 +#define R13 13 +#define R14 14 +#define R15 15 + +#define NULLCODE 0 /* Null Code */ +#define POINT_HIGH 0x8 /* Select upper half of registers */ +#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */ +#define SEND_ABORT 0x18 /* HDLC Abort */ +#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */ +#define RES_Tx_P 0x28 /* Reset TxINT Pending */ +#define ERR_RES 0x30 /* Error Reset */ +#define RES_H_IUS 0x38 /* Reset highest IUS */ + +#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */ +#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */ +#define RES_EOM_L 0xC0 /* Reset EOM latch */ + +/* Write Register 1 (Tx/Rx/Ext Int Enable and WAIT/DMA Commands) */ +#define EXT_INT_ENAB 0x1 /* Ext Int Enable */ +#define TxINT_ENAB 0x2 /* Tx Int Enable */ +#define PAR_SPEC 0x4 /* Parity is special condition */ + +#define RxINT_DISAB 0 /* Rx Int Disable */ +#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */ +#define RxINT_ALL 0x10 /* Int on all Rx Characters or error */ +#define RxINT_ERR 0x18 /* Int on error only */ +#define RxINT_MASK 0x18 + +#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */ +#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */ +#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */ + +/* Write Register 2 (Interrupt Vector) */ + +/* Write Register 3 (Receive Parameters and Control) */ +#define RxENABLE 0x1 /* Rx Enable */ +#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */ +#define ADD_SM 0x4 /* Address Search Mode (SDLC) */ +#define RxCRC_ENAB 0x8 /* Rx CRC Enable */ +#define ENT_HM 0x10 /* Enter Hunt Mode */ +#define AUTO_ENAB 0x20 /* Auto Enables */ +#define Rx5 0x0 /* Rx 5 Bits/Character */ +#define Rx7 0x40 /* Rx 7 Bits/Character */ +#define Rx6 0x80 /* Rx 6 Bits/Character */ +#define Rx8 0xc0 /* Rx 8 Bits/Character */ +#define RxNBITS_MASK 0xc0 + +/* Write Register 4 (Transmit/Receive Miscellaneous Parameters and Modes) */ +#define PAR_ENA 0x1 /* Parity Enable */ +#define PAR_EVEN 0x2 /* Parity Even/Odd* */ + +#define SYNC_ENAB 0 /* Sync Modes Enable */ +#define SB1 0x4 /* 1 stop bit/char */ +#define SB15 0x8 /* 1.5 stop bits/char */ +#define SB2 0xc /* 2 stop bits/char */ +#define SB_MASK 0xc + +#define MONSYNC 0 /* 8 Bit Sync character */ +#define BISYNC 0x10 /* 16 bit sync character */ +#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */ +#define EXTSYNC 0x30 /* External Sync Mode */ + +#define X1CLK 0x0 /* x1 clock mode */ +#define X16CLK 0x40 /* x16 clock mode */ +#define X32CLK 0x80 /* x32 clock mode */ +#define X64CLK 0xc0 /* x64 clock mode */ +#define XCLK_MASK 0xc0 + +/* Write Register 5 (Transmit Parameters and Controls) */ +#define TxCRC_ENAB 0x1 /* Tx CRC Enable */ +#define RTS 0x2 /* RTS */ +#define SDLC_CRC 0x4 /* SDLC/CRC-16 */ +#define TxENAB 0x8 /* Tx Enable */ +#define SND_BRK 0x10 /* Send Break */ +#define Tx5 0x0 /* Tx 5 bits (or less)/character */ +#define Tx7 0x20 /* Tx 7 bits/character */ +#define Tx6 0x40 /* Tx 6 bits/character */ +#define Tx8 0x60 /* Tx 8 bits/character */ +#define TxNBITS_MASK 0x60 +#define DTR 0x80 /* DTR */ + +/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */ + +/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */ + +/* Write Register 8 (Transmit Buffer) */ + +/* Write Register 9 (Master Interrupt Control) */ +#define VIS 1 /* Vector Includes Status */ +#define NV 2 /* No Vector */ +#define DLC 4 /* Disable Lower Chain */ +#define MIE 8 /* Master Interrupt Enable */ +#define STATHI 0x10 /* Status high */ +#define SOFTACK 0x20 /* Software Interrupt Acknowledge */ +#define NORESET 0 /* No reset on write to R9 */ +#define CHRB 0x40 /* Reset channel B */ +#define CHRA 0x80 /* Reset channel A */ +#define FHWRES 0xc0 /* Force hardware reset */ + +/* Write Register 10 (Miscellaneous Transmitter/Receiver Control Bits) */ +#define BIT6 1 /* 6 bit/8bit sync */ +#define LOOPMODE 2 /* SDLC Loop mode */ +#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */ +#define MARKIDLE 8 /* Mark/flag on idle */ +#define GAOP 0x10 /* Go active on poll */ +#define NRZ 0 /* NRZ mode */ +#define NRZI 0x20 /* NRZI mode */ +#define FM1 0x40 /* FM1 (transition = 1) */ +#define FM0 0x60 /* FM0 (transition = 0) */ +#define CRCPS 0x80 /* CRC Preset I/O */ + +/* Write Register 11 (Clock Mode Control) */ +#define TRxCXT 0 /* TRxC = Xtal output */ +#define TRxCTC 1 /* TRxC = Transmit clock */ +#define TRxCBR 2 /* TRxC = BR Generator Output */ +#define TRxCDP 3 /* TRxC = DPLL output */ +#define TRxCOI 4 /* TRxC O/I */ +#define TCRTxCP 0 /* Transmit clock = RTxC pin */ +#define TCTRxCP 8 /* Transmit clock = TRxC pin */ +#define TCBR 0x10 /* Transmit clock = BR Generator output */ +#define TCDPLL 0x18 /* Transmit clock = DPLL output */ +#define RCRTxCP 0 /* Receive clock = RTxC pin */ +#define RCTRxCP 0x20 /* Receive clock = TRxC pin */ +#define RCBR 0x40 /* Receive clock = BR Generator output */ +#define RCDPLL 0x60 /* Receive clock = DPLL output */ +#define RTxCX 0x80 /* RTxC Xtal/No Xtal */ + +/* Write Register 12 (Lower Byte of Baud Rate Generator Time Constant) */ + +/* Write Register 13 (Upper Byte of Baud Rate Generator Time Constant) */ + +/* Write Register 14 (Miscellaneous Control Bits) */ +#define BRENABL 1 /* Baud rate generator enable */ +#define BRSRC 2 /* Baud rate generator source */ +#define DTRREQ 4 /* DTR/Request function */ +#define AUTOECHO 8 /* Auto Echo */ +#define LOOPBAK 0x10 /* Local loopback */ +#define SEARCH 0x20 /* Enter search mode */ +#define RMC 0x40 /* Reset missing clock */ +#define DISDPLL 0x60 /* Disable DPLL */ +#define SSBR 0x80 /* Set DPLL source = BR generator */ +#define SSRTxC 0xa0 /* Set DPLL source = RTxC */ +#define SFMM 0xc0 /* Set FM mode */ +#define SNRZI 0xe0 /* Set NRZI mode */ + +/* Write Register 15 (External/Status Interrupt Control) */ +#define WR7P_EN 1 /* WR7 Prime SDLC Feature Enable */ +#define ZCIE 2 /* Zero count IE */ +#define DCDIE 8 /* DCD IE */ +#define SYNCIE 0x10 /* Sync/hunt IE */ +#define CTSIE 0x20 /* CTS IE */ +#define TxUIE 0x40 /* Tx Underrun/EOM IE */ +#define BRKIE 0x80 /* Break/Abort IE */ + + +/* Read Register 0 (Transmit/Receive Buffer Status and External Status) */ +#define Rx_CH_AV 0x1 /* Rx Character Available */ +#define ZCOUNT 0x2 /* Zero count */ +#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */ +#define DCD 0x8 /* DCD */ +#define SYNC_HUNT 0x10 /* Sync/hunt */ +#define CTS 0x20 /* CTS */ +#define TxEOM 0x40 /* Tx underrun */ +#define BRK_ABRT 0x80 /* Break/Abort */ + +/* Read Register 1 (Special Receive Condition Status) */ +#define ALL_SNT 0x1 /* All sent */ +/* Residue Data for 8 Rx bits/char programmed */ +#define RES3 0x8 /* 0/3 */ +#define RES4 0x4 /* 0/4 */ +#define RES5 0xc /* 0/5 */ +#define RES6 0x2 /* 0/6 */ +#define RES7 0xa /* 0/7 */ +#define RES8 0x6 /* 0/8 */ +#define RES18 0xe /* 1/8 */ +#define RES28 0x0 /* 2/8 */ +/* Special Rx Condition Interrupts */ +#define PAR_ERR 0x10 /* Parity Error */ +#define Rx_OVR 0x20 /* Rx Overrun Error */ +#define FRM_ERR 0x40 /* CRC/Framing Error */ +#define END_FR 0x80 /* End of Frame (SDLC) */ + +/* Read Register 2 (Interrupt Vector (WR2) -- channel A). */ + +/* Read Register 2 (Modified Interrupt Vector -- channel B). */ + +/* Read Register 3 (Interrupt Pending Bits -- channel A only). */ +#define CHBEXT 0x1 /* Channel B Ext/Stat IP */ +#define CHBTxIP 0x2 /* Channel B Tx IP */ +#define CHBRxIP 0x4 /* Channel B Rx IP */ +#define CHAEXT 0x8 /* Channel A Ext/Stat IP */ +#define CHATxIP 0x10 /* Channel A Tx IP */ +#define CHARxIP 0x20 /* Channel A Rx IP */ + +/* Read Register 6 (SDLC FIFO Status and Byte Count LSB) */ + +/* Read Register 7 (SDLC FIFO Status and Byte Count MSB) */ + +/* Read Register 8 (Receive Data) */ + +/* Read Register 10 (Miscellaneous Status Bits) */ +#define ONLOOP 2 /* On loop */ +#define LOOPSEND 0x10 /* Loop sending */ +#define CLK2MIS 0x40 /* Two clocks missing */ +#define CLK1MIS 0x80 /* One clock missing */ + +/* Read Register 12 (Lower Byte of Baud Rate Generator Constant (WR12)) */ + +/* Read Register 13 (Upper Byte of Baud Rate Generator Constant (WR13) */ + +/* Read Register 15 (External/Status Interrupt Control (WR15)) */ + +#endif /* _SERIAL_ZS_H */ -- cgit v1.2.3