summaryrefslogtreecommitdiffstats
path: root/drivers/soc/fsl/qe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc/fsl/qe')
-rw-r--r--drivers/soc/fsl/qe/Kconfig45
-rw-r--r--drivers/soc/fsl/qe/Makefile12
-rw-r--r--drivers/soc/fsl/qe/gpio.c342
-rw-r--r--drivers/soc/fsl/qe/qe.c681
-rw-r--r--drivers/soc/fsl/qe/qe_common.c251
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c487
-rw-r--r--drivers/soc/fsl/qe/qe_io.c186
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c219
-rw-r--r--drivers/soc/fsl/qe/ucc.c657
-rw-r--r--drivers/soc/fsl/qe/ucc_fast.c395
-rw-r--r--drivers/soc/fsl/qe/ucc_slow.c359
-rw-r--r--drivers/soc/fsl/qe/usb.c52
12 files changed, 3686 insertions, 0 deletions
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
new file mode 100644
index 000000000..7afa796db
--- /dev/null
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# QE Communication options
+#
+
+config QUICC_ENGINE
+ bool "QUICC Engine (QE) framework support"
+ depends on OF && HAS_IOMEM
+ depends on PPC || ARM || ARM64 || COMPILE_TEST
+ select GENERIC_ALLOCATOR
+ select CRC32
+ help
+ The QUICC Engine (QE) is a new generation of communications
+ coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
+ Selecting this option means that you wish to build a kernel
+ for a machine with a QE coprocessor.
+
+config UCC_SLOW
+ bool
+ default y if SERIAL_QE
+ help
+ This option provides qe_lib support to UCC slow
+ protocols: UART, BISYNC, QMC
+
+config UCC_FAST
+ bool
+ default y if UCC_GETH || QE_TDM
+ help
+ This option provides qe_lib support to UCC fast
+ protocols: HDLC, Ethernet, ATM, transparent
+
+config UCC
+ bool
+ default y if UCC_FAST || UCC_SLOW
+
+config QE_TDM
+ bool
+ default y if FSL_UCC_HDLC
+
+config QE_USB
+ bool
+ depends on QUICC_ENGINE
+ default y if USB_FSL_QE
+ help
+ QE USB Controller support
diff --git a/drivers/soc/fsl/qe/Makefile b/drivers/soc/fsl/qe/Makefile
new file mode 100644
index 000000000..55a555304
--- /dev/null
+++ b/drivers/soc/fsl/qe/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux ppc-specific parts of QE
+#
+obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
+obj-$(CONFIG_CPM) += qe_common.o
+obj-$(CONFIG_UCC) += ucc.o
+obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
+obj-$(CONFIG_UCC_FAST) += ucc_fast.o
+obj-$(CONFIG_QE_TDM) += qe_tdm.o
+obj-$(CONFIG_QE_USB) += usb.o
+obj-$(CONFIG_QE_GPIO) += gpio.o
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
new file mode 100644
index 000000000..99f7de43c
--- /dev/null
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * QUICC Engine GPIOs
+ *
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+/* FIXME: needed for gpio_to_chip() get rid of this */
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <soc/fsl/qe/qe.h>
+
+struct qe_gpio_chip {
+ struct of_mm_gpio_chip mm_gc;
+ spinlock_t lock;
+
+ unsigned long pin_flags[QE_PIO_PINS];
+#define QE_PIN_REQUESTED 0
+
+ /* shadowed data register to clear/set bits safely */
+ u32 cpdata;
+
+ /* saved_regs used to restore dedicated functions */
+ struct qe_pio_regs saved_regs;
+};
+
+static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct qe_gpio_chip *qe_gc =
+ container_of(mm_gc, struct qe_gpio_chip, mm_gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+
+ qe_gc->cpdata = ioread32be(&regs->cpdata);
+ qe_gc->saved_regs.cpdata = qe_gc->cpdata;
+ qe_gc->saved_regs.cpdir1 = ioread32be(&regs->cpdir1);
+ qe_gc->saved_regs.cpdir2 = ioread32be(&regs->cpdir2);
+ qe_gc->saved_regs.cppar1 = ioread32be(&regs->cppar1);
+ qe_gc->saved_regs.cppar2 = ioread32be(&regs->cppar2);
+ qe_gc->saved_regs.cpodr = ioread32be(&regs->cpodr);
+}
+
+static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+ return !!(ioread32be(&regs->cpdata) & pin_mask);
+}
+
+static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+ u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (val)
+ qe_gc->cpdata |= pin_mask;
+ else
+ qe_gc->cpdata &= ~pin_mask;
+
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static void qe_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ for (i = 0; i < gc->ngpio; i++) {
+ if (*mask == 0)
+ break;
+ if (__test_and_clear_bit(i, mask)) {
+ if (test_bit(i, bits))
+ qe_gc->cpdata |= (1U << (QE_PIO_PINS - 1 - i));
+ else
+ qe_gc->cpdata &= ~(1U << (QE_PIO_PINS - 1 - i));
+ }
+ }
+
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
+}
+
+static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ qe_gpio_set(gc, gpio, val);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
+}
+
+struct qe_pin {
+ /*
+ * The qe_gpio_chip name is unfortunate, we should change that to
+ * something like qe_pio_controller. Someday.
+ */
+ struct qe_gpio_chip *controller;
+ int num;
+};
+
+/**
+ * qe_pin_request - Request a QE pin
+ * @np: device node to get a pin from
+ * @index: index of a pin in the device tree
+ * Context: non-atomic
+ *
+ * This function return qe_pin so that you could use it with the rest of
+ * the QE Pin Multiplexing API.
+ */
+struct qe_pin *qe_pin_request(struct device_node *np, int index)
+{
+ struct qe_pin *qe_pin;
+ struct gpio_chip *gc;
+ struct qe_gpio_chip *qe_gc;
+ int err;
+ unsigned long flags;
+
+ qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
+ if (!qe_pin) {
+ pr_debug("%s: can't allocate memory\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = of_get_gpio(np, index);
+ if (err < 0)
+ goto err0;
+ gc = gpio_to_chip(err);
+ if (WARN_ON(!gc)) {
+ err = -ENODEV;
+ goto err0;
+ }
+
+ if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
+ pr_debug("%s: tried to get a non-qe pin\n", __func__);
+ err = -EINVAL;
+ goto err0;
+ }
+
+ qe_gc = gpiochip_get_data(gc);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ err -= gc->base;
+ if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
+ qe_pin->controller = qe_gc;
+ qe_pin->num = err;
+ err = 0;
+ } else {
+ err = -EBUSY;
+ }
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ if (!err)
+ return qe_pin;
+err0:
+ kfree(qe_pin);
+ pr_debug("%s failed with status %d\n", __func__, err);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(qe_pin_request);
+
+/**
+ * qe_pin_free - Free a pin
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function frees the qe_pin structure and makes a pin available
+ * for further qe_pin_request() calls.
+ */
+void qe_pin_free(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ unsigned long flags;
+ const int pin = qe_pin->num;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+ test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ kfree(qe_pin);
+}
+EXPORT_SYMBOL(qe_pin_free);
+
+/**
+ * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function resets a pin to a dedicated peripheral function that
+ * has been set up by the firmware.
+ */
+void qe_pin_set_dedicated(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs *sregs = &qe_gc->saved_regs;
+ int pin = qe_pin->num;
+ u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
+ u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
+ bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (second_reg) {
+ qe_clrsetbits_be32(&regs->cpdir2, mask2,
+ sregs->cpdir2 & mask2);
+ qe_clrsetbits_be32(&regs->cppar2, mask2,
+ sregs->cppar2 & mask2);
+ } else {
+ qe_clrsetbits_be32(&regs->cpdir1, mask2,
+ sregs->cpdir1 & mask2);
+ qe_clrsetbits_be32(&regs->cppar1, mask2,
+ sregs->cppar1 & mask2);
+ }
+
+ if (sregs->cpdata & mask1)
+ qe_gc->cpdata |= mask1;
+ else
+ qe_gc->cpdata &= ~mask1;
+
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
+ qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_dedicated);
+
+/**
+ * qe_pin_set_gpio - Set a pin to the GPIO mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function sets a pin to the GPIO mode.
+ */
+void qe_pin_set_gpio(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ /* Let's make it input by default, GPIO API is able to change that. */
+ __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_gpio);
+
+static int __init qe_add_gpiochips(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
+ int ret;
+ struct qe_gpio_chip *qe_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct gpio_chip *gc;
+
+ qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
+ if (!qe_gc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&qe_gc->lock);
+
+ mm_gc = &qe_gc->mm_gc;
+ gc = &mm_gc->gc;
+
+ mm_gc->save_regs = qe_gpio_save_regs;
+ gc->ngpio = QE_PIO_PINS;
+ gc->direction_input = qe_gpio_dir_in;
+ gc->direction_output = qe_gpio_dir_out;
+ gc->get = qe_gpio_get;
+ gc->set = qe_gpio_set;
+ gc->set_multiple = qe_gpio_set_multiple;
+
+ ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
+ if (ret)
+ goto err;
+ continue;
+err:
+ pr_err("%pOF: registration failed with status %d\n",
+ np, ret);
+ kfree(qe_gc);
+ /* try others anyway */
+ }
+ return 0;
+}
+arch_initcall(qe_add_gpiochips);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
new file mode 100644
index 000000000..b3c226eb5
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe.c
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net)
+ *
+ * Description:
+ * General Purpose functions for the global management of the
+ * QUICC Engine (QE).
+ */
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/iopoll.h>
+#include <linux/crc32.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_platform.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+static void qe_snums_init(void);
+static int qe_sdma_init(void);
+
+static DEFINE_SPINLOCK(qe_lock);
+DEFINE_SPINLOCK(cmxgcr_lock);
+EXPORT_SYMBOL(cmxgcr_lock);
+
+/* We allocate this here because it is used almost exclusively for
+ * the communication processor devices.
+ */
+struct qe_immap __iomem *qe_immr;
+EXPORT_SYMBOL(qe_immr);
+
+static u8 snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
+static DECLARE_BITMAP(snum_state, QE_NUM_OF_SNUM);
+static unsigned int qe_num_of_snum;
+
+static phys_addr_t qebase = -1;
+
+static struct device_node *qe_get_device_node(void)
+{
+ struct device_node *qe;
+
+ /*
+ * Newer device trees have an "fsl,qe" compatible property for the QE
+ * node, but we still need to support older device trees.
+ */
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (qe)
+ return qe;
+ return of_find_node_by_type(NULL, "qe");
+}
+
+static phys_addr_t get_qe_base(void)
+{
+ struct device_node *qe;
+ int ret;
+ struct resource res;
+
+ if (qebase != -1)
+ return qebase;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return qebase;
+
+ ret = of_address_to_resource(qe, 0, &res);
+ if (!ret)
+ qebase = res.start;
+ of_node_put(qe);
+
+ return qebase;
+}
+
+void qe_reset(void)
+{
+ if (qe_immr == NULL)
+ qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
+
+ qe_snums_init();
+
+ qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ /* Reclaim the MURAM memory for our use. */
+ qe_muram_init();
+
+ if (qe_sdma_init())
+ panic("sdma init failed!");
+}
+
+int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
+{
+ unsigned long flags;
+ u8 mcn_shift = 0, dev_shift = 0;
+ u32 val;
+ int ret;
+
+ spin_lock_irqsave(&qe_lock, flags);
+ if (cmd == QE_RESET) {
+ iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
+ } else {
+ if (cmd == QE_ASSIGN_PAGE) {
+ /* Here device is the SNUM, not sub-block */
+ dev_shift = QE_CR_SNUM_SHIFT;
+ } else if (cmd == QE_ASSIGN_RISC) {
+ /* Here device is the SNUM, and mcnProtocol is
+ * e_QeCmdRiscAssignment value */
+ dev_shift = QE_CR_SNUM_SHIFT;
+ mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT;
+ } else {
+ if (device == QE_CR_SUBBLOCK_USB)
+ mcn_shift = QE_CR_MCN_USB_SHIFT;
+ else
+ mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
+ }
+
+ iowrite32be(cmd_input, &qe_immr->cp.cecdr);
+ iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
+ &qe_immr->cp.cecr);
+ }
+
+ /* wait for the QE_CR_FLG to clear */
+ ret = readx_poll_timeout_atomic(ioread32be, &qe_immr->cp.cecr, val,
+ (val & QE_CR_FLG) == 0, 0, 100);
+ /* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */
+ spin_unlock_irqrestore(&qe_lock, flags);
+
+ return ret == 0;
+}
+EXPORT_SYMBOL(qe_issue_cmd);
+
+/* Set a baud rate generator. This needs lots of work. There are
+ * 16 BRGs, which can be connected to the QE channels or output
+ * as clocks. The BRGs are in two different block of internal
+ * memory mapped space.
+ * The BRG clock is the QE clock divided by 2.
+ * It was set up long ago during the initial boot phase and is
+ * given to us.
+ * Baud rate clocks are zero-based in the driver code (as that maps
+ * to port numbers). Documentation uses 1-based numbering.
+ */
+static unsigned int brg_clk = 0;
+
+#define CLK_GRAN (1000)
+#define CLK_GRAN_LIMIT (5)
+
+unsigned int qe_get_brg_clk(void)
+{
+ struct device_node *qe;
+ u32 brg;
+ unsigned int mod;
+
+ if (brg_clk)
+ return brg_clk;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return brg_clk;
+
+ if (!of_property_read_u32(qe, "brg-frequency", &brg))
+ brg_clk = brg;
+
+ of_node_put(qe);
+
+ /* round this if near to a multiple of CLK_GRAN */
+ mod = brg_clk % CLK_GRAN;
+ if (mod) {
+ if (mod < CLK_GRAN_LIMIT)
+ brg_clk -= mod;
+ else if (mod > (CLK_GRAN - CLK_GRAN_LIMIT))
+ brg_clk += CLK_GRAN - mod;
+ }
+
+ return brg_clk;
+}
+EXPORT_SYMBOL(qe_get_brg_clk);
+
+#define PVR_VER_836x 0x8083
+#define PVR_VER_832x 0x8084
+
+static bool qe_general4_errata(void)
+{
+#ifdef CONFIG_PPC32
+ return pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x);
+#endif
+ return false;
+}
+
+/* Program the BRG to the given sampling rate and multiplier
+ *
+ * @brg: the BRG, QE_BRG1 - QE_BRG16
+ * @rate: the desired sampling rate
+ * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
+ * GUMR_L[TDCR]. E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
+ * then 'multiplier' should be 8.
+ */
+int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
+{
+ u32 divisor, tempval;
+ u32 div16 = 0;
+
+ if ((brg < QE_BRG1) || (brg > QE_BRG16))
+ return -EINVAL;
+
+ divisor = qe_get_brg_clk() / (rate * multiplier);
+
+ if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
+ div16 = QE_BRGC_DIV16;
+ divisor /= 16;
+ }
+
+ /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
+ that the BRG divisor must be even if you're not using divide-by-16
+ mode. */
+ if (qe_general4_errata())
+ if (!div16 && (divisor & 1) && (divisor > 3))
+ divisor++;
+
+ tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
+ QE_BRGC_ENABLE | div16;
+
+ iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_setbrg);
+
+/* Convert a string to a QE clock source enum
+ *
+ * This function takes a string, typically from a property in the device
+ * tree, and returns the corresponding "enum qe_clock" value.
+*/
+enum qe_clock qe_clock_source(const char *source)
+{
+ unsigned int i;
+
+ if (strcasecmp(source, "none") == 0)
+ return QE_CLK_NONE;
+
+ if (strcmp(source, "tsync_pin") == 0)
+ return QE_TSYNC_PIN;
+
+ if (strcmp(source, "rsync_pin") == 0)
+ return QE_RSYNC_PIN;
+
+ if (strncasecmp(source, "brg", 3) == 0) {
+ i = simple_strtoul(source + 3, NULL, 10);
+ if ((i >= 1) && (i <= 16))
+ return (QE_BRG1 - 1) + i;
+ else
+ return QE_CLK_DUMMY;
+ }
+
+ if (strncasecmp(source, "clk", 3) == 0) {
+ i = simple_strtoul(source + 3, NULL, 10);
+ if ((i >= 1) && (i <= 24))
+ return (QE_CLK1 - 1) + i;
+ else
+ return QE_CLK_DUMMY;
+ }
+
+ return QE_CLK_DUMMY;
+}
+EXPORT_SYMBOL(qe_clock_source);
+
+/* Initialize SNUMs (thread serial numbers) according to
+ * QE Module Control chapter, SNUM table
+ */
+static void qe_snums_init(void)
+{
+ static const u8 snum_init_76[] = {
+ 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+ 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+ 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+ 0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
+ 0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
+ 0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
+ 0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
+ 0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
+ 0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
+ 0xF4, 0xF5, 0xFC, 0xFD,
+ };
+ static const u8 snum_init_46[] = {
+ 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+ 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+ 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+ 0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19,
+ 0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
+ 0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
+ };
+ struct device_node *qe;
+ const u8 *snum_init;
+ int i;
+
+ bitmap_zero(snum_state, QE_NUM_OF_SNUM);
+ qe_num_of_snum = 28; /* The default number of snum for threads is 28 */
+ qe = qe_get_device_node();
+ if (qe) {
+ i = of_property_read_variable_u8_array(qe, "fsl,qe-snums",
+ snums, 1, QE_NUM_OF_SNUM);
+ if (i > 0) {
+ of_node_put(qe);
+ qe_num_of_snum = i;
+ return;
+ }
+ /*
+ * Fall back to legacy binding of using the value of
+ * fsl,qe-num-snums to choose one of the static arrays
+ * above.
+ */
+ of_property_read_u32(qe, "fsl,qe-num-snums", &qe_num_of_snum);
+ of_node_put(qe);
+ }
+
+ if (qe_num_of_snum == 76) {
+ snum_init = snum_init_76;
+ } else if (qe_num_of_snum == 28 || qe_num_of_snum == 46) {
+ snum_init = snum_init_46;
+ } else {
+ pr_err("QE: unsupported value of fsl,qe-num-snums: %u\n", qe_num_of_snum);
+ return;
+ }
+ memcpy(snums, snum_init, qe_num_of_snum);
+}
+
+int qe_get_snum(void)
+{
+ unsigned long flags;
+ int snum = -EBUSY;
+ int i;
+
+ spin_lock_irqsave(&qe_lock, flags);
+ i = find_first_zero_bit(snum_state, qe_num_of_snum);
+ if (i < qe_num_of_snum) {
+ set_bit(i, snum_state);
+ snum = snums[i];
+ }
+ spin_unlock_irqrestore(&qe_lock, flags);
+
+ return snum;
+}
+EXPORT_SYMBOL(qe_get_snum);
+
+void qe_put_snum(u8 snum)
+{
+ const u8 *p = memchr(snums, snum, qe_num_of_snum);
+
+ if (p)
+ clear_bit(p - snums, snum_state);
+}
+EXPORT_SYMBOL(qe_put_snum);
+
+static int qe_sdma_init(void)
+{
+ struct sdma __iomem *sdma = &qe_immr->sdma;
+ static s32 sdma_buf_offset = -ENOMEM;
+
+ /* allocate 2 internal temporary buffers (512 bytes size each) for
+ * the SDMA */
+ if (sdma_buf_offset < 0) {
+ sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
+ if (sdma_buf_offset < 0)
+ return -ENOMEM;
+ }
+
+ iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
+ &sdma->sdebcr);
+ iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
+ &sdma->sdmr);
+
+ return 0;
+}
+
+/* The maximum number of RISCs we support */
+#define MAX_QE_RISC 4
+
+/* Firmware information stored here for qe_get_firmware_info() */
+static struct qe_firmware_info qe_firmware_info;
+
+/*
+ * Set to 1 if QE firmware has been uploaded, and therefore
+ * qe_firmware_info contains valid data.
+ */
+static int qe_firmware_uploaded;
+
+/*
+ * Upload a QE microcode
+ *
+ * This function is a worker function for qe_upload_firmware(). It does
+ * the actual uploading of the microcode.
+ */
+static void qe_upload_microcode(const void *base,
+ const struct qe_microcode *ucode)
+{
+ const __be32 *code = base + be32_to_cpu(ucode->code_offset);
+ unsigned int i;
+
+ if (ucode->major || ucode->minor || ucode->revision)
+ printk(KERN_INFO "qe-firmware: "
+ "uploading microcode '%s' version %u.%u.%u\n",
+ ucode->id, ucode->major, ucode->minor, ucode->revision);
+ else
+ printk(KERN_INFO "qe-firmware: "
+ "uploading microcode '%s'\n", ucode->id);
+
+ /* Use auto-increment */
+ iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
+ &qe_immr->iram.iadd);
+
+ for (i = 0; i < be32_to_cpu(ucode->count); i++)
+ iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
+
+ /* Set I-RAM Ready Register */
+ iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
+}
+
+/*
+ * Upload a microcode to the I-RAM at a specific address.
+ *
+ * See Documentation/powerpc/qe_firmware.rst for information on QE microcode
+ * uploading.
+ *
+ * Currently, only version 1 is supported, so the 'version' field must be
+ * set to 1.
+ *
+ * The SOC model and revision are not validated, they are only displayed for
+ * informational purposes.
+ *
+ * 'calc_size' is the calculated size, in bytes, of the firmware structure and
+ * all of the microcode structures, minus the CRC.
+ *
+ * 'length' is the size that the structure says it is, including the CRC.
+ */
+int qe_upload_firmware(const struct qe_firmware *firmware)
+{
+ unsigned int i;
+ unsigned int j;
+ u32 crc;
+ size_t calc_size;
+ size_t length;
+ const struct qe_header *hdr;
+
+ if (!firmware) {
+ printk(KERN_ERR "qe-firmware: invalid pointer\n");
+ return -EINVAL;
+ }
+
+ hdr = &firmware->header;
+ length = be32_to_cpu(hdr->length);
+
+ /* Check the magic */
+ if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
+ (hdr->magic[2] != 'F')) {
+ printk(KERN_ERR "qe-firmware: not a microcode\n");
+ return -EPERM;
+ }
+
+ /* Check the version */
+ if (hdr->version != 1) {
+ printk(KERN_ERR "qe-firmware: unsupported version\n");
+ return -EPERM;
+ }
+
+ /* Validate some of the fields */
+ if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
+ printk(KERN_ERR "qe-firmware: invalid data\n");
+ return -EINVAL;
+ }
+
+ /* Validate the length and check if there's a CRC */
+ calc_size = struct_size(firmware, microcode, firmware->count);
+
+ for (i = 0; i < firmware->count; i++)
+ /*
+ * For situations where the second RISC uses the same microcode
+ * as the first, the 'code_offset' and 'count' fields will be
+ * zero, so it's okay to add those.
+ */
+ calc_size += sizeof(__be32) *
+ be32_to_cpu(firmware->microcode[i].count);
+
+ /* Validate the length */
+ if (length != calc_size + sizeof(__be32)) {
+ printk(KERN_ERR "qe-firmware: invalid length\n");
+ return -EPERM;
+ }
+
+ /* Validate the CRC */
+ crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size));
+ if (crc != crc32(0, firmware, calc_size)) {
+ printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n");
+ return -EIO;
+ }
+
+ /*
+ * If the microcode calls for it, split the I-RAM.
+ */
+ if (!firmware->split)
+ qe_setbits_be16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+
+ if (firmware->soc.model)
+ printk(KERN_INFO
+ "qe-firmware: firmware '%s' for %u V%u.%u\n",
+ firmware->id, be16_to_cpu(firmware->soc.model),
+ firmware->soc.major, firmware->soc.minor);
+ else
+ printk(KERN_INFO "qe-firmware: firmware '%s'\n",
+ firmware->id);
+
+ /*
+ * The QE only supports one microcode per RISC, so clear out all the
+ * saved microcode information and put in the new.
+ */
+ memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
+ strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
+ qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes);
+ memcpy(qe_firmware_info.vtraps, firmware->vtraps,
+ sizeof(firmware->vtraps));
+
+ /* Loop through each microcode. */
+ for (i = 0; i < firmware->count; i++) {
+ const struct qe_microcode *ucode = &firmware->microcode[i];
+
+ /* Upload a microcode if it's present */
+ if (ucode->code_offset)
+ qe_upload_microcode(firmware, ucode);
+
+ /* Program the traps for this processor */
+ for (j = 0; j < 16; j++) {
+ u32 trap = be32_to_cpu(ucode->traps[j]);
+
+ if (trap)
+ iowrite32be(trap,
+ &qe_immr->rsp[i].tibcr[j]);
+ }
+
+ /* Enable traps */
+ iowrite32be(be32_to_cpu(ucode->eccr),
+ &qe_immr->rsp[i].eccr);
+ }
+
+ qe_firmware_uploaded = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_upload_firmware);
+
+/*
+ * Get info on the currently-loaded firmware
+ *
+ * This function also checks the device tree to see if the boot loader has
+ * uploaded a firmware already.
+ */
+struct qe_firmware_info *qe_get_firmware_info(void)
+{
+ static int initialized;
+ struct device_node *qe;
+ struct device_node *fw = NULL;
+ const char *sprop;
+
+ /*
+ * If we haven't checked yet, and a driver hasn't uploaded a firmware
+ * yet, then check the device tree for information.
+ */
+ if (qe_firmware_uploaded)
+ return &qe_firmware_info;
+
+ if (initialized)
+ return NULL;
+
+ initialized = 1;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return NULL;
+
+ /* Find the 'firmware' child node */
+ fw = of_get_child_by_name(qe, "firmware");
+ of_node_put(qe);
+
+ /* Did we find the 'firmware' node? */
+ if (!fw)
+ return NULL;
+
+ qe_firmware_uploaded = 1;
+
+ /* Copy the data into qe_firmware_info*/
+ sprop = of_get_property(fw, "id", NULL);
+ if (sprop)
+ strlcpy(qe_firmware_info.id, sprop,
+ sizeof(qe_firmware_info.id));
+
+ of_property_read_u64(fw, "extended-modes",
+ &qe_firmware_info.extended_modes);
+
+ of_property_read_u32_array(fw, "virtual-traps", qe_firmware_info.vtraps,
+ ARRAY_SIZE(qe_firmware_info.vtraps));
+
+ of_node_put(fw);
+
+ return &qe_firmware_info;
+}
+EXPORT_SYMBOL(qe_get_firmware_info);
+
+unsigned int qe_get_num_of_risc(void)
+{
+ struct device_node *qe;
+ unsigned int num_of_risc = 0;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return num_of_risc;
+
+ of_property_read_u32(qe, "fsl,qe-num-riscs", &num_of_risc);
+
+ of_node_put(qe);
+
+ return num_of_risc;
+}
+EXPORT_SYMBOL(qe_get_num_of_risc);
+
+unsigned int qe_get_num_of_snums(void)
+{
+ return qe_num_of_snum;
+}
+EXPORT_SYMBOL(qe_get_num_of_snums);
+
+static int __init qe_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!np)
+ return -ENODEV;
+ qe_reset();
+ of_node_put(np);
+ return 0;
+}
+subsys_initcall(qe_init);
+
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
+static int qe_resume(struct platform_device *ofdev)
+{
+ if (!qe_alive_during_sleep())
+ qe_reset();
+ return 0;
+}
+
+static int qe_probe(struct platform_device *ofdev)
+{
+ return 0;
+}
+
+static const struct of_device_id qe_ids[] = {
+ { .compatible = "fsl,qe", },
+ { },
+};
+
+static struct platform_driver qe_driver = {
+ .driver = {
+ .name = "fsl-qe",
+ .of_match_table = qe_ids,
+ },
+ .probe = qe_probe,
+ .resume = qe_resume,
+};
+
+builtin_platform_driver(qe_driver);
+#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
new file mode 100644
index 000000000..a0cb8e746
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Common CPM code
+ *
+ * Author: Scott Wood <scottwood@freescale.com>
+ *
+ * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
+ *
+ * Some parts derived from commproc.c/cpm2_common.c, which is:
+ * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
+ * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
+ * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ */
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/qe.h>
+
+static struct gen_pool *muram_pool;
+static DEFINE_SPINLOCK(cpm_muram_lock);
+static void __iomem *muram_vbase;
+static phys_addr_t muram_pbase;
+
+struct muram_block {
+ struct list_head head;
+ s32 start;
+ int size;
+};
+
+static LIST_HEAD(muram_block_list);
+
+/* max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+#define GENPOOL_OFFSET (4096 * 8)
+
+int cpm_muram_init(void)
+{
+ struct device_node *np;
+ struct resource r;
+ __be32 zero[OF_MAX_ADDR_CELLS] = {};
+ resource_size_t max = 0;
+ int i = 0;
+ int ret = 0;
+
+ if (muram_pbase)
+ return 0;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
+ if (!np) {
+ /* try legacy bindings */
+ np = of_find_node_by_name(NULL, "data-only");
+ if (!np) {
+ pr_err("Cannot find CPM muram data node");
+ ret = -ENODEV;
+ goto out_muram;
+ }
+ }
+
+ muram_pool = gen_pool_create(0, -1);
+ if (!muram_pool) {
+ pr_err("Cannot allocate memory pool for CPM/QE muram");
+ ret = -ENOMEM;
+ goto out_muram;
+ }
+ muram_pbase = of_translate_address(np, zero);
+ if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
+ pr_err("Cannot translate zero through CPM muram node");
+ ret = -ENODEV;
+ goto out_pool;
+ }
+
+ while (of_address_to_resource(np, i++, &r) == 0) {
+ if (r.end > max)
+ max = r.end;
+ ret = gen_pool_add(muram_pool, r.start - muram_pbase +
+ GENPOOL_OFFSET, resource_size(&r), -1);
+ if (ret) {
+ pr_err("QE: couldn't add muram to pool!\n");
+ goto out_pool;
+ }
+ }
+
+ muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
+ if (!muram_vbase) {
+ pr_err("Cannot map QE muram");
+ ret = -ENOMEM;
+ goto out_pool;
+ }
+ goto out_muram;
+out_pool:
+ gen_pool_destroy(muram_pool);
+out_muram:
+ of_node_put(np);
+ return ret;
+}
+
+/*
+ * cpm_muram_alloc_common - cpm_muram_alloc common code
+ * @size: number of bytes to allocate
+ * @algo: algorithm for alloc.
+ * @data: data for genalloc's algorithm.
+ *
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
+ */
+static s32 cpm_muram_alloc_common(unsigned long size,
+ genpool_algo_t algo, void *data)
+{
+ struct muram_block *entry;
+ s32 start;
+
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+ start = gen_pool_alloc_algo(muram_pool, size, algo, data);
+ if (!start) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+ start = start - GENPOOL_OFFSET;
+ memset_io(cpm_muram_addr(start), 0, size);
+ entry->start = start;
+ entry->size = size;
+ list_add(&entry->head, &muram_block_list);
+
+ return start;
+}
+
+/*
+ * cpm_muram_alloc - allocate the requested size worth of multi-user ram
+ * @size: number of bytes to allocate
+ * @align: requested alignment, in bytes
+ *
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+s32 cpm_muram_alloc(unsigned long size, unsigned long align)
+{
+ s32 start;
+ unsigned long flags;
+ struct genpool_data_align muram_pool_data;
+
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ muram_pool_data.align = align;
+ start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
+ &muram_pool_data);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+ return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc);
+
+/**
+ * cpm_muram_free - free a chunk of multi-user ram
+ * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
+ */
+void cpm_muram_free(s32 offset)
+{
+ unsigned long flags;
+ int size;
+ struct muram_block *tmp;
+
+ if (offset < 0)
+ return;
+
+ size = 0;
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ list_for_each_entry(tmp, &muram_block_list, head) {
+ if (tmp->start == offset) {
+ size = tmp->size;
+ list_del(&tmp->head);
+ kfree(tmp);
+ break;
+ }
+ }
+ gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+}
+EXPORT_SYMBOL(cpm_muram_free);
+
+/*
+ * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
+ * @offset: offset of allocation start address
+ * @size: number of bytes to allocate
+ * This function returns @offset if the area was available, a negative
+ * errno otherwise.
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
+{
+ s32 start;
+ unsigned long flags;
+ struct genpool_data_fixed muram_pool_data_fixed;
+
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
+ start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
+ &muram_pool_data_fixed);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+ return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc_fixed);
+
+/**
+ * cpm_muram_addr - turn a muram offset into a virtual address
+ * @offset: muram offset to convert
+ */
+void __iomem *cpm_muram_addr(unsigned long offset)
+{
+ return muram_vbase + offset;
+}
+EXPORT_SYMBOL(cpm_muram_addr);
+
+unsigned long cpm_muram_offset(const void __iomem *addr)
+{
+ return addr - muram_vbase;
+}
+EXPORT_SYMBOL(cpm_muram_offset);
+
+/**
+ * cpm_muram_dma - turn a muram virtual address into a DMA address
+ * @addr: virtual address from cpm_muram_addr() to convert
+ */
+dma_addr_t cpm_muram_dma(void __iomem *addr)
+{
+ return muram_pbase + (addr - muram_vbase);
+}
+EXPORT_SYMBOL(cpm_muram_dma);
+
+/*
+ * As cpm_muram_free, but takes the virtual address rather than the
+ * muram offset.
+ */
+void cpm_muram_free_addr(const void __iomem *addr)
+{
+ if (!addr)
+ return;
+ cpm_muram_free(cpm_muram_offset(addr));
+}
+EXPORT_SYMBOL(cpm_muram_free_addr);
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
new file mode 100644
index 000000000..bbae3d39c
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_ic.c
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * QUICC ENGINE Interrupt Controller
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <soc/fsl/qe/qe.h>
+
+#define NR_QE_IC_INTS 64
+
+/* QE IC registers offset */
+#define QEIC_CICR 0x00
+#define QEIC_CIVEC 0x04
+#define QEIC_CIPXCC 0x10
+#define QEIC_CIPYCC 0x14
+#define QEIC_CIPWCC 0x18
+#define QEIC_CIPZCC 0x1c
+#define QEIC_CIMR 0x20
+#define QEIC_CRIMR 0x24
+#define QEIC_CIPRTA 0x30
+#define QEIC_CIPRTB 0x34
+#define QEIC_CHIVEC 0x60
+
+struct qe_ic {
+ /* Control registers offset */
+ __be32 __iomem *regs;
+
+ /* The remapper for this QEIC */
+ struct irq_domain *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+
+ /* VIRQ numbers of QE high/low irqs */
+ int virq_high;
+ int virq_low;
+};
+
+/*
+ * QE interrupt controller internal structure
+ */
+struct qe_ic_info {
+ /* Location of this source at the QIMR register */
+ u32 mask;
+
+ /* Mask register offset */
+ u32 mask_reg;
+
+ /*
+ * For grouped interrupts sources - the interrupt code as
+ * appears at the group priority register
+ */
+ u8 pri_code;
+
+ /* Group priority register offset */
+ u32 pri_reg;
+};
+
+static DEFINE_RAW_SPINLOCK(qe_ic_lock);
+
+static struct qe_ic_info qe_ic_info[] = {
+ [1] = {
+ .mask = 0x00008000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [2] = {
+ .mask = 0x00004000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [3] = {
+ .mask = 0x00002000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [10] = {
+ .mask = 0x00000040,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [11] = {
+ .mask = 0x00000020,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [12] = {
+ .mask = 0x00000010,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [13] = {
+ .mask = 0x00000008,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 4,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [14] = {
+ .mask = 0x00000004,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 5,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [15] = {
+ .mask = 0x00000002,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 6,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [20] = {
+ .mask = 0x10000000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPRTA,
+ },
+ [25] = {
+ .mask = 0x00800000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [26] = {
+ .mask = 0x00400000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [27] = {
+ .mask = 0x00200000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [28] = {
+ .mask = 0x00100000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [32] = {
+ .mask = 0x80000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [33] = {
+ .mask = 0x40000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [34] = {
+ .mask = 0x20000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [35] = {
+ .mask = 0x10000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [36] = {
+ .mask = 0x08000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 4,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [40] = {
+ .mask = 0x00800000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [41] = {
+ .mask = 0x00400000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [42] = {
+ .mask = 0x00200000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [43] = {
+ .mask = 0x00100000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPYCC,
+ },
+};
+
+static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
+{
+ return ioread32be(base + (reg >> 2));
+}
+
+static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
+ u32 value)
+{
+ iowrite32be(value, base + (reg >> 2));
+}
+
+static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
+{
+ return irq_get_chip_data(virq);
+}
+
+static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
+{
+ return irq_data_get_irq_chip_data(d);
+}
+
+static void qe_ic_unmask_irq(struct irq_data *d)
+{
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 temp;
+
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+ temp | qe_ic_info[src].mask);
+
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static void qe_ic_mask_irq(struct irq_data *d)
+{
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 temp;
+
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+ temp & ~qe_ic_info[src].mask);
+
+ /* Flush the above write before enabling interrupts; otherwise,
+ * spurious interrupts will sometimes happen. To be 100% sure
+ * that the write has reached the device before interrupts are
+ * enabled, the mask register would have to be read back; however,
+ * this is not required for correctness, only to avoid wasting
+ * time on a large number of spurious interrupts. In testing,
+ * a sync reduced the observed spurious interrupts to zero.
+ */
+ mb();
+
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static struct irq_chip qe_ic_irq_chip = {
+ .name = "QEIC",
+ .irq_unmask = qe_ic_unmask_irq,
+ .irq_mask = qe_ic_mask_irq,
+ .irq_mask_ack = qe_ic_mask_irq,
+};
+
+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ /* Exact match, unless qe_ic node is NULL */
+ struct device_node *of_node = irq_domain_get_of_node(h);
+ return of_node == NULL || of_node == node;
+}
+
+static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct qe_ic *qe_ic = h->host_data;
+ struct irq_chip *chip;
+
+ if (hw >= ARRAY_SIZE(qe_ic_info)) {
+ pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
+ return -EINVAL;
+ }
+
+ if (qe_ic_info[hw].mask == 0) {
+ printk(KERN_ERR "Can't map reserved IRQ\n");
+ return -EINVAL;
+ }
+ /* Default chip */
+ chip = &qe_ic->hc_irq;
+
+ irq_set_chip_data(virq, qe_ic);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+
+ irq_set_chip_and_handler(virq, chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops qe_ic_host_ops = {
+ .match = qe_ic_host_match,
+ .map = qe_ic_host_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+{
+ int irq;
+
+ BUG_ON(qe_ic == NULL);
+
+ /* get the interrupt source vector. */
+ irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
+
+ if (irq == 0)
+ return 0;
+
+ return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+{
+ int irq;
+
+ BUG_ON(qe_ic == NULL);
+
+ /* get the interrupt source vector. */
+ irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
+
+ if (irq == 0)
+ return 0;
+
+ return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+static void qe_ic_cascade_low(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_high(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ cascade_irq = qe_ic_get_high_irq(qe_ic);
+ if (cascade_irq == 0)
+ cascade_irq = qe_ic_get_low_irq(qe_ic);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static int qe_ic_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void (*low_handler)(struct irq_desc *desc);
+ void (*high_handler)(struct irq_desc *desc);
+ struct qe_ic *qe_ic;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL);
+ if (qe_ic == NULL)
+ return -ENOMEM;
+
+ qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (qe_ic->regs == NULL) {
+ dev_err(dev, "failed to ioremap() registers\n");
+ return -ENODEV;
+ }
+
+ qe_ic->hc_irq = qe_ic_irq_chip;
+
+ qe_ic->virq_high = platform_get_irq(pdev, 0);
+ qe_ic->virq_low = platform_get_irq(pdev, 1);
+
+ if (qe_ic->virq_low <= 0)
+ return -ENODEV;
+
+ if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) {
+ low_handler = qe_ic_cascade_low;
+ high_handler = qe_ic_cascade_high;
+ } else {
+ low_handler = qe_ic_cascade_muxed_mpic;
+ high_handler = NULL;
+ }
+
+ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+ &qe_ic_host_ops, qe_ic);
+ if (qe_ic->irqhost == NULL) {
+ dev_err(dev, "failed to add irq domain\n");
+ return -ENODEV;
+ }
+
+ qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
+
+ irq_set_handler_data(qe_ic->virq_low, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_low, low_handler);
+
+ if (high_handler) {
+ irq_set_handler_data(qe_ic->virq_high, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_high, high_handler);
+ }
+ return 0;
+}
+static const struct of_device_id qe_ic_ids[] = {
+ { .compatible = "fsl,qe-ic"},
+ { .type = "qeic"},
+ {},
+};
+
+static struct platform_driver qe_ic_driver =
+{
+ .driver = {
+ .name = "qe-ic",
+ .of_match_table = qe_ic_ids,
+ },
+ .probe = qe_ic_init,
+};
+
+static int __init qe_ic_of_init(void)
+{
+ platform_driver_register(&qe_ic_driver);
+ return 0;
+}
+subsys_initcall(qe_ic_of_init);
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
new file mode 100644
index 000000000..a5e2d0e5a
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_io.c
+ *
+ * QE Parallel I/O ports configuration routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <LeoLi@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/qe.h>
+
+#undef DEBUG
+
+static struct qe_pio_regs __iomem *par_io;
+static int num_par_io_ports = 0;
+
+int par_io_init(struct device_node *np)
+{
+ struct resource res;
+ int ret;
+ u32 num_ports;
+
+ /* Map Parallel I/O ports registers */
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+ par_io = ioremap(res.start, resource_size(&res));
+ if (!par_io)
+ return -ENOMEM;
+
+ if (!of_property_read_u32(np, "num-ports", &num_ports))
+ num_par_io_ports = num_ports;
+
+ return 0;
+}
+
+void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
+ int open_drain, int assignment, int has_irq)
+{
+ u32 pin_mask1bit;
+ u32 pin_mask2bits;
+ u32 new_mask2bits;
+ u32 tmp_val;
+
+ /* calculate pin location for single and 2 bits information */
+ pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
+
+ /* Set open drain, if required */
+ tmp_val = ioread32be(&par_io->cpodr);
+ if (open_drain)
+ iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
+ else
+ iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
+
+ /* define direction */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+ ioread32be(&par_io->cpdir2) :
+ ioread32be(&par_io->cpdir1);
+
+ /* get all bits mask for 2 bit per port */
+ pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+ /* Get the final mask we need for the right definition */
+ new_mask2bits = (u32) (dir << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
+ } else {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
+ }
+ /* define pin assignment */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+ ioread32be(&par_io->cppar2) :
+ ioread32be(&par_io->cppar1);
+
+ new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
+ } else {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
+ }
+}
+EXPORT_SYMBOL(__par_io_config_pin);
+
+int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+ int assignment, int has_irq)
+{
+ if (!par_io || port >= num_par_io_ports)
+ return -EINVAL;
+
+ __par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment,
+ has_irq);
+ return 0;
+}
+EXPORT_SYMBOL(par_io_config_pin);
+
+int par_io_data_set(u8 port, u8 pin, u8 val)
+{
+ u32 pin_mask, tmp_val;
+
+ if (port >= num_par_io_ports)
+ return -EINVAL;
+ if (pin >= QE_PIO_PINS)
+ return -EINVAL;
+ /* calculate pin location */
+ pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
+
+ tmp_val = ioread32be(&par_io[port].cpdata);
+
+ if (val == 0) /* clear */
+ iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
+ else /* set */
+ iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
+
+ return 0;
+}
+EXPORT_SYMBOL(par_io_data_set);
+
+int par_io_of_config(struct device_node *np)
+{
+ struct device_node *pio;
+ int pio_map_len;
+ const __be32 *pio_map;
+
+ if (par_io == NULL) {
+ printk(KERN_ERR "par_io not initialized\n");
+ return -1;
+ }
+
+ pio = of_parse_phandle(np, "pio-handle", 0);
+ if (pio == NULL) {
+ printk(KERN_ERR "pio-handle not available\n");
+ return -1;
+ }
+
+ pio_map = of_get_property(pio, "pio-map", &pio_map_len);
+ if (pio_map == NULL) {
+ printk(KERN_ERR "pio-map is not set!\n");
+ return -1;
+ }
+ pio_map_len /= sizeof(unsigned int);
+ if ((pio_map_len % 6) != 0) {
+ printk(KERN_ERR "pio-map format wrong!\n");
+ return -1;
+ }
+
+ while (pio_map_len > 0) {
+ u8 port = be32_to_cpu(pio_map[0]);
+ u8 pin = be32_to_cpu(pio_map[1]);
+ int dir = be32_to_cpu(pio_map[2]);
+ int open_drain = be32_to_cpu(pio_map[3]);
+ int assignment = be32_to_cpu(pio_map[4]);
+ int has_irq = be32_to_cpu(pio_map[5]);
+
+ par_io_config_pin(port, pin, dir, open_drain,
+ assignment, has_irq);
+ pio_map += 6;
+ pio_map_len -= 6;
+ }
+ of_node_put(pio);
+ return 0;
+}
+EXPORT_SYMBOL(par_io_of_config);
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
new file mode 100644
index 000000000..7d7d78d3e
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Zhao Qiang <qiang.zhao@nxp.com>
+ *
+ * Description:
+ * QE TDM API Set - TDM specific routines implementations.
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <soc/fsl/qe/qe_tdm.h>
+
+static int set_tdm_framer(const char *tdm_framer_type)
+{
+ if (strcmp(tdm_framer_type, "e1") == 0)
+ return TDM_FRAMER_E1;
+ else if (strcmp(tdm_framer_type, "t1") == 0)
+ return TDM_FRAMER_T1;
+ else
+ return -EINVAL;
+}
+
+static void set_si_param(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si_mode_info *si_info = &ut_info->si_info;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK) {
+ si_info->simr_crt = 1;
+ si_info->simr_rfsd = 0;
+ }
+}
+
+int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
+ struct ucc_tdm_info *ut_info)
+{
+ const char *sprop;
+ int ret = 0;
+ u32 val;
+
+ sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.rx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.rx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.rx_sync > QE_RSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ sprop = of_get_property(np, "fsl,tx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.tx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.tx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.tx_sync > QE_TSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(np, "fsl,tx-timeslot-mask", 0, &val);
+ if (ret) {
+ pr_err("QE-TDM: Invalid tx-timeslot-mask property\n");
+ return -EINVAL;
+ }
+ utdm->tx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,rx-timeslot-mask", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: Invalid rx-timeslot-mask property\n");
+ return ret;
+ }
+ utdm->rx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,tdm-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No fsl,tdm-id property for this UCC\n");
+ return ret;
+ }
+ utdm->tdm_port = val;
+ ut_info->uf_info.tdm_num = utdm->tdm_port;
+
+ if (of_property_read_bool(np, "fsl,tdm-internal-loopback"))
+ utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
+ else
+ utdm->tdm_mode = TDM_NORMAL;
+
+ sprop = of_get_property(np, "fsl,tdm-framer-type", NULL);
+ if (!sprop) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No tdm-framer-type property for UCC\n");
+ return ret;
+ }
+ ret = set_tdm_framer(sprop);
+ if (ret < 0)
+ return -EINVAL;
+ utdm->tdm_framer_type = ret;
+
+ ret = of_property_read_u32_index(np, "fsl,siram-entry-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No siram entry id for UCC\n");
+ return ret;
+ }
+ utdm->siram_entry_id = val;
+
+ set_si_param(utdm, ut_info);
+ return ret;
+}
+EXPORT_SYMBOL(ucc_of_parse_tdm);
+
+void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si1 __iomem *si_regs;
+ u16 __iomem *siram;
+ u16 siram_entry_valid;
+ u16 siram_entry_closed;
+ u16 ucc_num;
+ u8 csel;
+ u16 sixmr;
+ u16 tdm_port;
+ u32 siram_entry_id;
+ u32 mask;
+ int i;
+
+ si_regs = utdm->si_regs;
+ siram = utdm->siram;
+ ucc_num = ut_info->uf_info.ucc_num;
+ tdm_port = utdm->tdm_port;
+ siram_entry_id = utdm->siram_entry_id;
+
+ if (utdm->tdm_framer_type == TDM_FRAMER_T1)
+ utdm->num_of_ts = 24;
+ if (utdm->tdm_framer_type == TDM_FRAMER_E1)
+ utdm->num_of_ts = 32;
+
+ /* set siram table */
+ csel = (ucc_num < 4) ? ucc_num + 9 : ucc_num - 3;
+
+ siram_entry_valid = SIR_CSEL(csel) | SIR_BYTE | SIR_CNT(0);
+ siram_entry_closed = SIR_IDLE | SIR_BYTE | SIR_CNT(0);
+
+ for (i = 0; i < utdm->num_of_ts; i++) {
+ mask = 0x01 << i;
+
+ if (utdm->tx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + i]);
+
+ if (utdm->rx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ }
+
+ qe_setbits_be16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+ qe_setbits_be16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+
+ /* Set SIxMR register */
+ sixmr = SIMR_SAD(siram_entry_id);
+
+ sixmr &= ~SIMR_SDM_MASK;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK)
+ sixmr |= SIMR_SDM_INTERNAL_LOOPBACK;
+ else
+ sixmr |= SIMR_SDM_NORMAL;
+
+ sixmr |= SIMR_RFSD(ut_info->si_info.simr_rfsd) |
+ SIMR_TFSD(ut_info->si_info.simr_tfsd);
+
+ if (ut_info->si_info.simr_crt)
+ sixmr |= SIMR_CRT;
+ if (ut_info->si_info.simr_sl)
+ sixmr |= SIMR_SL;
+ if (ut_info->si_info.simr_ce)
+ sixmr |= SIMR_CE;
+ if (ut_info->si_info.simr_fe)
+ sixmr |= SIMR_FE;
+ if (ut_info->si_info.simr_gm)
+ sixmr |= SIMR_GM;
+
+ switch (tdm_port) {
+ case 0:
+ iowrite16be(sixmr, &si_regs->sixmr1[0]);
+ break;
+ case 1:
+ iowrite16be(sixmr, &si_regs->sixmr1[1]);
+ break;
+ case 2:
+ iowrite16be(sixmr, &si_regs->sixmr1[2]);
+ break;
+ case 3:
+ iowrite16be(sixmr, &si_regs->sixmr1[3]);
+ break;
+ default:
+ pr_err("QE-TDM: can not find tdm sixmr reg\n");
+ break;
+ }
+}
+EXPORT_SYMBOL(ucc_tdm_init);
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
new file mode 100644
index 000000000..21dbcd787
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * arch/powerpc/sysdev/qe_lib/ucc.c
+ *
+ * QE UCC API Set - UCC specific routines implementations.
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/ucc.h>
+
+#define UCC_TDM_NUM 8
+#define RX_SYNC_SHIFT_BASE 30
+#define TX_SYNC_SHIFT_BASE 14
+#define RX_CLK_SHIFT_BASE 28
+#define TX_CLK_SHIFT_BASE 12
+
+int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
+{
+ unsigned long flags;
+
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+ qe_clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+ ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
+
+/* Configure the UCC to either Slow or Fast.
+ *
+ * A given UCC can be figured to support either "slow" devices (e.g. UART)
+ * or "fast" devices (e.g. Ethernet).
+ *
+ * 'ucc_num' is the UCC number, from 0 - 7.
+ *
+ * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
+ * must always be set to 1.
+ */
+int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
+{
+ u8 __iomem *guemr;
+
+ /* The GUEMR register is at the same location for both slow and fast
+ devices, so we just use uccX.slow.guemr. */
+ switch (ucc_num) {
+ case 0: guemr = &qe_immr->ucc1.slow.guemr;
+ break;
+ case 1: guemr = &qe_immr->ucc2.slow.guemr;
+ break;
+ case 2: guemr = &qe_immr->ucc3.slow.guemr;
+ break;
+ case 3: guemr = &qe_immr->ucc4.slow.guemr;
+ break;
+ case 4: guemr = &qe_immr->ucc5.slow.guemr;
+ break;
+ case 5: guemr = &qe_immr->ucc6.slow.guemr;
+ break;
+ case 6: guemr = &qe_immr->ucc7.slow.guemr;
+ break;
+ case 7: guemr = &qe_immr->ucc8.slow.guemr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ qe_clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
+ UCC_GUEMR_SET_RESERVED3 | speed);
+
+ return 0;
+}
+
+static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr,
+ unsigned int *reg_num, unsigned int *shift)
+{
+ unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
+
+ *reg_num = cmx + 1;
+ *cmxucr = &qe_immr->qmx.cmxucr[cmx];
+ *shift = 16 - 8 * (ucc_num & 2);
+}
+
+int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
+{
+ __be32 __iomem *cmxucr;
+ unsigned int reg_num;
+ unsigned int shift;
+
+ /* check if the UCC number is in range. */
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+ if (set)
+ qe_setbits_be32(cmxucr, mask << shift);
+ else
+ qe_clrbits_be32(cmxucr, mask << shift);
+
+ return 0;
+}
+
+int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ __be32 __iomem *cmxucr;
+ unsigned int reg_num;
+ unsigned int shift;
+ u32 clock_bits = 0;
+
+ /* check if the UCC number is in range. */
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
+ return -EINVAL;
+
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+ switch (reg_num) {
+ case 1:
+ switch (clock) {
+ case QE_BRG1: clock_bits = 1; break;
+ case QE_BRG2: clock_bits = 2; break;
+ case QE_BRG7: clock_bits = 3; break;
+ case QE_BRG8: clock_bits = 4; break;
+ case QE_CLK9: clock_bits = 5; break;
+ case QE_CLK10: clock_bits = 6; break;
+ case QE_CLK11: clock_bits = 7; break;
+ case QE_CLK12: clock_bits = 8; break;
+ case QE_CLK15: clock_bits = 9; break;
+ case QE_CLK16: clock_bits = 10; break;
+ default: break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_BRG5: clock_bits = 1; break;
+ case QE_BRG6: clock_bits = 2; break;
+ case QE_BRG7: clock_bits = 3; break;
+ case QE_BRG8: clock_bits = 4; break;
+ case QE_CLK13: clock_bits = 5; break;
+ case QE_CLK14: clock_bits = 6; break;
+ case QE_CLK19: clock_bits = 7; break;
+ case QE_CLK20: clock_bits = 8; break;
+ case QE_CLK15: clock_bits = 9; break;
+ case QE_CLK16: clock_bits = 10; break;
+ default: break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_BRG9: clock_bits = 1; break;
+ case QE_BRG10: clock_bits = 2; break;
+ case QE_BRG15: clock_bits = 3; break;
+ case QE_BRG16: clock_bits = 4; break;
+ case QE_CLK3: clock_bits = 5; break;
+ case QE_CLK4: clock_bits = 6; break;
+ case QE_CLK17: clock_bits = 7; break;
+ case QE_CLK18: clock_bits = 8; break;
+ case QE_CLK7: clock_bits = 9; break;
+ case QE_CLK8: clock_bits = 10; break;
+ case QE_CLK16: clock_bits = 11; break;
+ default: break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_BRG13: clock_bits = 1; break;
+ case QE_BRG14: clock_bits = 2; break;
+ case QE_BRG15: clock_bits = 3; break;
+ case QE_BRG16: clock_bits = 4; break;
+ case QE_CLK5: clock_bits = 5; break;
+ case QE_CLK6: clock_bits = 6; break;
+ case QE_CLK21: clock_bits = 7; break;
+ case QE_CLK22: clock_bits = 8; break;
+ case QE_CLK7: clock_bits = 9; break;
+ case QE_CLK8: clock_bits = 10; break;
+ case QE_CLK16: clock_bits = 11; break;
+ default: break;
+ }
+ break;
+ default: break;
+ }
+
+ /* Check for invalid combination of clock and UCC number */
+ if (!clock_bits)
+ return -ENOENT;
+
+ if (mode == COMM_DIR_RX)
+ shift += 4;
+
+ qe_clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+}
+
+static int ucc_get_tdm_common_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ /*
+ * for TDM[0, 1, 2, 3], TX and RX use common
+ * clock source BRG3,4 and CLK1,2
+ * for TDM[4, 5, 6, 7], TX and RX use common
+ * clock source BRG12,13 and CLK23,24
+ */
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG3:
+ clock_bits = 1;
+ break;
+ case QE_BRG4:
+ clock_bits = 2;
+ break;
+ case QE_CLK1:
+ clock_bits = 4;
+ break;
+ case QE_CLK2:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG12:
+ clock_bits = 1;
+ break;
+ case QE_BRG13:
+ clock_bits = 2;
+ break;
+ case QE_CLK23:
+ clock_bits = 4;
+ break;
+ case QE_CLK24:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_rx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK3:
+ clock_bits = 6;
+ break;
+ case QE_CLK8:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK5:
+ clock_bits = 6;
+ break;
+ case QE_CLK10:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK7:
+ clock_bits = 6;
+ break;
+ case QE_CLK12:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK9:
+ clock_bits = 6;
+ break;
+ case QE_CLK14:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK11:
+ clock_bits = 6;
+ break;
+ case QE_CLK16:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK13:
+ clock_bits = 6;
+ break;
+ case QE_CLK18:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK15:
+ clock_bits = 6;
+ break;
+ case QE_CLK20:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK17:
+ clock_bits = 6;
+ break;
+ case QE_CLK22:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_tx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK4:
+ clock_bits = 6;
+ break;
+ case QE_CLK9:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK6:
+ clock_bits = 6;
+ break;
+ case QE_CLK11:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK8:
+ clock_bits = 6;
+ break;
+ case QE_CLK13:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK10:
+ clock_bits = 6;
+ break;
+ case QE_CLK15:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK12:
+ clock_bits = 6;
+ break;
+ case QE_CLK17:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK14:
+ clock_bits = 6;
+ break;
+ case QE_CLK19:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK16:
+ clock_bits = 6;
+ break;
+ case QE_CLK21:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK18:
+ clock_bits = 6;
+ break;
+ case QE_CLK3:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+/* tdm_num: TDM A-H port num is 0-7 */
+static int ucc_get_tdm_rxtx_clk(enum comm_dir mode, u32 tdm_num,
+ enum qe_clock clock)
+{
+ int clock_bits;
+
+ clock_bits = ucc_get_tdm_common_clk(tdm_num, clock);
+ if (clock_bits > 0)
+ return clock_bits;
+ if (mode == COMM_DIR_RX)
+ clock_bits = ucc_get_tdm_rx_clk(tdm_num, clock);
+ if (mode == COMM_DIR_TX)
+ clock_bits = ucc_get_tdm_tx_clk(tdm_num, clock);
+ return clock_bits;
+}
+
+static u32 ucc_get_tdm_clk_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_CLK_SHIFT_BASE : TX_CLK_SHIFT_BASE;
+ if (tdm_num < 4)
+ shift -= tdm_num * 4;
+ else
+ shift -= (tdm_num - 4) * 4;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int clock_bits;
+ u32 shift;
+ struct qe_mux __iomem *qe_mux_reg;
+ __be32 __iomem *cmxs1cr;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num > 7)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ clock_bits = ucc_get_tdm_rxtx_clk(mode, tdm_num, clock);
+ if (clock_bits < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_clk_shift(mode, tdm_num);
+
+ cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
+ &qe_mux_reg->cmxsi1cr_h;
+
+ qe_clrsetbits_be32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+}
+
+static int ucc_get_tdm_sync_source(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source = -EINVAL;
+
+ if (mode == COMM_DIR_RX && clock == QE_RSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+ if (mode == COMM_DIR_TX && clock == QE_TSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG10:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG11:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG14:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG15:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return source;
+}
+
+static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
+ shift -= tdm_num * 2;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source;
+ u32 shift;
+ struct qe_mux __iomem *qe_mux_reg;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num >= UCC_TDM_NUM)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ source = ucc_get_tdm_sync_source(tdm_num, clock, mode);
+ if (source < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_sync_shift(mode, tdm_num);
+
+ qe_clrsetbits_be32(&qe_mux_reg->cmxsi1syr,
+ QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ source << shift);
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
new file mode 100644
index 000000000..53d8aafc9
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Fast API Set - UCC Fast specific routines implementations.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+
+void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
+{
+ printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
+ printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
+
+ printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
+ printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
+ printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
+ printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
+ printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
+ printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
+ printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
+ &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
+ printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
+ printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
+ printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
+ printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfset,
+ ioread16be(&uccf->uf_regs->urfset));
+ printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
+ printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
+ printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
+ printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
+ printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
+ printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
+ printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
+ &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
+}
+EXPORT_SYMBOL(ucc_fast_dump_regs);
+
+u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
+{
+ switch (uccf_num) {
+ case 0: return QE_CR_SUBBLOCK_UCCFAST1;
+ case 1: return QE_CR_SUBBLOCK_UCCFAST2;
+ case 2: return QE_CR_SUBBLOCK_UCCFAST3;
+ case 3: return QE_CR_SUBBLOCK_UCCFAST4;
+ case 4: return QE_CR_SUBBLOCK_UCCFAST5;
+ case 5: return QE_CR_SUBBLOCK_UCCFAST6;
+ case 6: return QE_CR_SUBBLOCK_UCCFAST7;
+ case 7: return QE_CR_SUBBLOCK_UCCFAST8;
+ default: return QE_CR_SUBBLOCK_INVALID;
+ }
+}
+EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
+
+void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
+{
+ iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
+}
+EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
+
+void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+
+ uf_regs = uccf->uf_regs;
+
+ /* Enable reception and/or transmission on this UCC. */
+ gumr = ioread32be(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr |= UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 1;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr |= UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 1;
+ }
+ iowrite32be(gumr, &uf_regs->gumr);
+}
+EXPORT_SYMBOL(ucc_fast_enable);
+
+void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+
+ uf_regs = uccf->uf_regs;
+
+ /* Disable reception and/or transmission on this UCC. */
+ gumr = ioread32be(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr &= ~UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 0;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr &= ~UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 0;
+ }
+ iowrite32be(gumr, &uf_regs->gumr);
+}
+EXPORT_SYMBOL(ucc_fast_disable);
+
+int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
+{
+ struct ucc_fast_private *uccf;
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+ int ret;
+
+ if (!uf_info)
+ return -EINVAL;
+
+ /* check if the UCC port number is in range. */
+ if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
+ printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check that 'max_rx_buf_length' is properly aligned (4). */
+ if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Validate Virtual Fifo register values */
+ if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
+ printk(KERN_ERR "%s: urfs is too small\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
+ if (!uccf) {
+ printk(KERN_ERR "%s: Cannot allocate private data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ uccf->ucc_fast_tx_virtual_fifo_base_offset = -1;
+ uccf->ucc_fast_rx_virtual_fifo_base_offset = -1;
+
+ /* Fill fast UCC structure */
+ uccf->uf_info = uf_info;
+ /* Set the PHY base address */
+ uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
+ if (uccf->uf_regs == NULL) {
+ printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+ kfree(uccf);
+ return -ENOMEM;
+ }
+
+ uccf->enabled_tx = 0;
+ uccf->enabled_rx = 0;
+ uccf->stopped_tx = 0;
+ uccf->stopped_rx = 0;
+ uf_regs = uccf->uf_regs;
+ uccf->p_ucce = &uf_regs->ucce;
+ uccf->p_uccm = &uf_regs->uccm;
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ uccf->p_utodr = &uf_regs->utodr;
+#endif
+#ifdef STATISTICS
+ uccf->tx_frames = 0;
+ uccf->rx_frames = 0;
+ uccf->rx_discarded = 0;
+#endif /* STATISTICS */
+
+ /* Set UCC to fast type */
+ ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
+ if (ret) {
+ printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
+ ucc_fast_free(uccf);
+ return ret;
+ }
+
+ uccf->mrblr = uf_info->max_rx_buf_length;
+
+ /* Set GUMR */
+ /* For more details see the hardware spec. */
+ gumr = uf_info->ttx_trx;
+ if (uf_info->tci)
+ gumr |= UCC_FAST_GUMR_TCI;
+ if (uf_info->cdp)
+ gumr |= UCC_FAST_GUMR_CDP;
+ if (uf_info->ctsp)
+ gumr |= UCC_FAST_GUMR_CTSP;
+ if (uf_info->cds)
+ gumr |= UCC_FAST_GUMR_CDS;
+ if (uf_info->ctss)
+ gumr |= UCC_FAST_GUMR_CTSS;
+ if (uf_info->txsy)
+ gumr |= UCC_FAST_GUMR_TXSY;
+ if (uf_info->rsyn)
+ gumr |= UCC_FAST_GUMR_RSYN;
+ gumr |= uf_info->synl;
+ if (uf_info->rtsm)
+ gumr |= UCC_FAST_GUMR_RTSM;
+ gumr |= uf_info->renc;
+ if (uf_info->revd)
+ gumr |= UCC_FAST_GUMR_REVD;
+ gumr |= uf_info->tenc;
+ gumr |= uf_info->tcrc;
+ gumr |= uf_info->mode;
+ iowrite32be(gumr, &uf_regs->gumr);
+
+ /* Allocate memory for Tx Virtual Fifo */
+ uccf->ucc_fast_tx_virtual_fifo_base_offset =
+ qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+ if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for Rx Virtual Fifo */
+ uccf->ucc_fast_rx_virtual_fifo_base_offset =
+ qe_muram_alloc(uf_info->urfs +
+ UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+ if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -ENOMEM;
+ }
+
+ /* Set Virtual Fifo registers */
+ iowrite16be(uf_info->urfs, &uf_regs->urfs);
+ iowrite16be(uf_info->urfet, &uf_regs->urfet);
+ iowrite16be(uf_info->urfset, &uf_regs->urfset);
+ iowrite16be(uf_info->utfs, &uf_regs->utfs);
+ iowrite16be(uf_info->utfet, &uf_regs->utfet);
+ iowrite16be(uf_info->utftt, &uf_regs->utftt);
+ /* utfb, urfb are offsets from MURAM base */
+ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
+ &uf_regs->utfb);
+ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
+ &uf_regs->urfb);
+
+ /* Mux clocking */
+ /* Grant Support */
+ ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
+ /* Breakpoint Support */
+ ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
+ /* Set Tsa or NMSI mode. */
+ ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
+ /* If NMSI (not Tsa), set Tx and Rx clock. */
+ if (!uf_info->tsa) {
+ /* Rx clock routing */
+ if ((uf_info->rx_clock != QE_CLK_NONE) &&
+ ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
+ COMM_DIR_RX)) {
+ printk(KERN_ERR "%s: illegal value for RX clock\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ /* Tx clock routing */
+ if ((uf_info->tx_clock != QE_CLK_NONE) &&
+ ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
+ COMM_DIR_TX)) {
+ printk(KERN_ERR "%s: illegal value for TX clock\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ } else {
+ /* tdm Rx clock routing */
+ if ((uf_info->rx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->rx_clock,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx clock routing */
+ if ((uf_info->tx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->tx_clock,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Rx sync clock routing */
+ if ((uf_info->rx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->rx_sync,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx sync clock routing */
+ if ((uf_info->tx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->tx_sync,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ }
+
+ /* Set interrupt mask register at UCC level. */
+ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
+
+ /* First, clear anything pending at UCC level,
+ * otherwise, old garbage may come through
+ * as soon as the dam is opened. */
+
+ /* Writing '1' clears */
+ iowrite32be(0xffffffff, &uf_regs->ucce);
+
+ *uccf_ret = uccf;
+ return 0;
+}
+EXPORT_SYMBOL(ucc_fast_init);
+
+void ucc_fast_free(struct ucc_fast_private * uccf)
+{
+ if (!uccf)
+ return;
+
+ qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
+ qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+ if (uccf->uf_regs)
+ iounmap(uccf->uf_regs);
+
+ kfree(uccf);
+}
+EXPORT_SYMBOL(ucc_fast_free);
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
new file mode 100644
index 000000000..d5ac1ac0e
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc_slow.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Slow API Set - UCC Slow specific routines implementations.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_slow.h>
+
+u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
+{
+ switch (uccs_num) {
+ case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
+ case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
+ case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
+ case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
+ case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
+ case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
+ case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
+ case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
+ default: return QE_CR_SUBBLOCK_INVALID;
+ }
+}
+EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
+
+void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
+
+void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_stop_tx);
+
+void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_restart_tx);
+
+void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+ struct ucc_slow __iomem *us_regs;
+ u32 gumr_l;
+
+ us_regs = uccs->us_regs;
+
+ /* Enable reception and/or transmission on this UCC. */
+ gumr_l = ioread32be(&us_regs->gumr_l);
+ if (mode & COMM_DIR_TX) {
+ gumr_l |= UCC_SLOW_GUMR_L_ENT;
+ uccs->enabled_tx = 1;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr_l |= UCC_SLOW_GUMR_L_ENR;
+ uccs->enabled_rx = 1;
+ }
+ iowrite32be(gumr_l, &us_regs->gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_enable);
+
+void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+ struct ucc_slow __iomem *us_regs;
+ u32 gumr_l;
+
+ us_regs = uccs->us_regs;
+
+ /* Disable reception and/or transmission on this UCC. */
+ gumr_l = ioread32be(&us_regs->gumr_l);
+ if (mode & COMM_DIR_TX) {
+ gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
+ uccs->enabled_tx = 0;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
+ uccs->enabled_rx = 0;
+ }
+ iowrite32be(gumr_l, &us_regs->gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_disable);
+
+/* Initialize the UCC for Slow operations
+ *
+ * The caller should initialize the following us_info
+ */
+int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
+{
+ struct ucc_slow_private *uccs;
+ u32 i;
+ struct ucc_slow __iomem *us_regs;
+ u32 gumr;
+ struct qe_bd __iomem *bd;
+ u32 id;
+ u32 command;
+ int ret = 0;
+
+ if (!us_info)
+ return -EINVAL;
+
+ /* check if the UCC port number is in range. */
+ if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
+ printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Set mrblr
+ * Check that 'max_rx_buf_length' is properly aligned (4), unless
+ * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
+ * case when QE accepts 32 bits at a time.
+ */
+ if ((!us_info->rfw) &&
+ (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
+ printk(KERN_ERR "max_rx_buf_length not aligned.\n");
+ return -EINVAL;
+ }
+
+ uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
+ if (!uccs) {
+ printk(KERN_ERR "%s: Cannot allocate private data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ uccs->rx_base_offset = -1;
+ uccs->tx_base_offset = -1;
+ uccs->us_pram_offset = -1;
+
+ /* Fill slow UCC structure */
+ uccs->us_info = us_info;
+ /* Set the PHY base address */
+ uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
+ if (uccs->us_regs == NULL) {
+ printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+ kfree(uccs);
+ return -ENOMEM;
+ }
+
+ us_regs = uccs->us_regs;
+ uccs->p_ucce = &us_regs->ucce;
+ uccs->p_uccm = &us_regs->uccm;
+
+ /* Get PRAM base */
+ uccs->us_pram_offset =
+ qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
+ if (uccs->us_pram_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
+ uccs->us_pram_offset);
+
+ uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
+
+ /* Set UCC to slow type */
+ ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
+ if (ret) {
+ printk(KERN_ERR "%s: cannot set UCC type", __func__);
+ ucc_slow_free(uccs);
+ return ret;
+ }
+
+ iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
+
+ INIT_LIST_HEAD(&uccs->confQ);
+
+ /* Allocate BDs. */
+ uccs->rx_base_offset =
+ qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
+ QE_ALIGNMENT_OF_BD);
+ if (uccs->rx_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
+ us_info->rx_bd_ring_len);
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+
+ uccs->tx_base_offset =
+ qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
+ QE_ALIGNMENT_OF_BD);
+ if (uccs->tx_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+
+ /* Init Tx bds */
+ bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
+ for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
+ /* clear bd buffer */
+ iowrite32be(0, &bd->buf);
+ /* set bd status and length */
+ iowrite32be(0, (u32 __iomem *)bd);
+ bd++;
+ }
+ /* for last BD set Wrap bit */
+ iowrite32be(0, &bd->buf);
+ iowrite32be(T_W, (u32 __iomem *)bd);
+
+ /* Init Rx bds */
+ bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
+ for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
+ /* set bd status and length */
+ iowrite32be(0, (u32 __iomem *)bd);
+ /* clear bd buffer */
+ iowrite32be(0, &bd->buf);
+ bd++;
+ }
+ /* for last BD set Wrap bit */
+ iowrite32be(R_W, (u32 __iomem *)bd);
+ iowrite32be(0, &bd->buf);
+
+ /* Set GUMR (For more details see the hardware spec.). */
+ /* gumr_h */
+ gumr = us_info->tcrc;
+ if (us_info->cdp)
+ gumr |= UCC_SLOW_GUMR_H_CDP;
+ if (us_info->ctsp)
+ gumr |= UCC_SLOW_GUMR_H_CTSP;
+ if (us_info->cds)
+ gumr |= UCC_SLOW_GUMR_H_CDS;
+ if (us_info->ctss)
+ gumr |= UCC_SLOW_GUMR_H_CTSS;
+ if (us_info->tfl)
+ gumr |= UCC_SLOW_GUMR_H_TFL;
+ if (us_info->rfw)
+ gumr |= UCC_SLOW_GUMR_H_RFW;
+ if (us_info->txsy)
+ gumr |= UCC_SLOW_GUMR_H_TXSY;
+ if (us_info->rtsm)
+ gumr |= UCC_SLOW_GUMR_H_RTSM;
+ iowrite32be(gumr, &us_regs->gumr_h);
+
+ /* gumr_l */
+ gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc |
+ (u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode;
+ if (us_info->tci)
+ gumr |= UCC_SLOW_GUMR_L_TCI;
+ if (us_info->rinv)
+ gumr |= UCC_SLOW_GUMR_L_RINV;
+ if (us_info->tinv)
+ gumr |= UCC_SLOW_GUMR_L_TINV;
+ if (us_info->tend)
+ gumr |= UCC_SLOW_GUMR_L_TEND;
+ iowrite32be(gumr, &us_regs->gumr_l);
+
+ /* Function code registers */
+
+ /* if the data is in cachable memory, the 'global' */
+ /* in the function code should be set. */
+ iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
+ iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
+
+ /* rbase, tbase are offsets from MURAM base */
+ iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
+ iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
+
+ /* Mux clocking */
+ /* Grant Support */
+ ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
+ /* Breakpoint Support */
+ ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
+ /* Set Tsa or NMSI mode. */
+ ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
+ /* If NMSI (not Tsa), set Tx and Rx clock. */
+ if (!us_info->tsa) {
+ /* Rx clock routing */
+ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
+ COMM_DIR_RX)) {
+ printk(KERN_ERR "%s: illegal value for RX clock\n",
+ __func__);
+ ucc_slow_free(uccs);
+ return -EINVAL;
+ }
+ /* Tx clock routing */
+ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
+ COMM_DIR_TX)) {
+ printk(KERN_ERR "%s: illegal value for TX clock\n",
+ __func__);
+ ucc_slow_free(uccs);
+ return -EINVAL;
+ }
+ }
+
+ /* Set interrupt mask register at UCC level. */
+ iowrite16be(us_info->uccm_mask, &us_regs->uccm);
+
+ /* First, clear anything pending at UCC level,
+ * otherwise, old garbage may come through
+ * as soon as the dam is opened. */
+
+ /* Writing '1' clears */
+ iowrite16be(0xffff, &us_regs->ucce);
+
+ /* Issue QE Init command */
+ if (us_info->init_tx && us_info->init_rx)
+ command = QE_INIT_TX_RX;
+ else if (us_info->init_tx)
+ command = QE_INIT_TX;
+ else
+ command = QE_INIT_RX; /* We know at least one is TRUE */
+
+ qe_issue_cmd(command, id, us_info->protocol, 0);
+
+ *uccs_ret = uccs;
+ return 0;
+}
+EXPORT_SYMBOL(ucc_slow_init);
+
+void ucc_slow_free(struct ucc_slow_private * uccs)
+{
+ if (!uccs)
+ return;
+
+ qe_muram_free(uccs->rx_base_offset);
+ qe_muram_free(uccs->tx_base_offset);
+ qe_muram_free(uccs->us_pram_offset);
+
+ if (uccs->us_regs)
+ iounmap(uccs->us_regs);
+
+ kfree(uccs);
+}
+EXPORT_SYMBOL(ucc_slow_free);
+
diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c
new file mode 100644
index 000000000..890f236ea
--- /dev/null
+++ b/drivers/soc/fsl/qe/usb.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * QE USB routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc.
+ * Shlomi Gridish <gridish@freescale.com>
+ * Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+int qe_usb_clock_set(enum qe_clock clk, int rate)
+{
+ struct qe_mux __iomem *mux = &qe_immr->qmx;
+ unsigned long flags;
+ u32 val;
+
+ switch (clk) {
+ case QE_CLK3: val = QE_CMXGCR_USBCS_CLK3; break;
+ case QE_CLK5: val = QE_CMXGCR_USBCS_CLK5; break;
+ case QE_CLK7: val = QE_CMXGCR_USBCS_CLK7; break;
+ case QE_CLK9: val = QE_CMXGCR_USBCS_CLK9; break;
+ case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break;
+ case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break;
+ case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break;
+ case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break;
+ case QE_BRG9: val = QE_CMXGCR_USBCS_BRG9; break;
+ case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break;
+ default:
+ pr_err("%s: requested unknown clock %d\n", __func__, clk);
+ return -EINVAL;
+ }
+
+ if (qe_clock_is_brg(clk))
+ qe_setbrg(clk, rate, 1);
+
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+
+ qe_clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
+
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_usb_clock_set);